about summary refs log tree commit diff stats
path: root/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm
diff options
context:
space:
mode:
authorBenedikt Peetz <benedikt.peetz@b-peetz.de>2025-06-29 10:32:13 +0200
committerBenedikt Peetz <benedikt.peetz@b-peetz.de>2025-06-29 10:32:13 +0200
commit3d507acb42554b2551024ee3ca8490c203a1a9f8 (patch)
treececa79f3696cf9eab522be55c07c32e38de5edaf /pkgs/by-name/ri/river-mk-keymap/src/wayland/shm
parentflake.lock: Update (diff)
downloadnixos-config-3d507acb42554b2551024ee3ca8490c203a1a9f8.zip
pkgs/river-mk-keymap: Improve with key-chord support and which-key interface
Diffstat (limited to 'pkgs/by-name/ri/river-mk-keymap/src/wayland/shm')
-rw-r--r--pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/mod.rs21
-rw-r--r--pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/multi.rs437
-rw-r--r--pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/raw.rs290
-rw-r--r--pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/slot.rs596
4 files changed, 1344 insertions, 0 deletions
diff --git a/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/mod.rs b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/mod.rs
new file mode 100644
index 00000000..65d3c590
--- /dev/null
+++ b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/mod.rs
@@ -0,0 +1,21 @@
+#![allow(dead_code)]
+
+pub(crate) mod multi;
+pub(crate) mod raw;
+pub(crate) mod slot;
+
+use std::io;
+
+use wayland_client::globals::GlobalError;
+
+/// An error that may occur when creating a pool.
+#[derive(Debug, thiserror::Error)]
+pub enum CreatePoolError {
+    /// The [`wl_shm`] global is not bound.
+    #[error(transparent)]
+    Global(#[from] GlobalError),
+
+    /// Error while allocating the shared memory.
+    #[error(transparent)]
+    Create(#[from] io::Error),
+}
diff --git a/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/multi.rs b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/multi.rs
new file mode 100644
index 00000000..0b1fdc1b
--- /dev/null
+++ b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/multi.rs
@@ -0,0 +1,437 @@
+//! A pool implementation which automatically manage buffers.
+//!
+//! This pool is built on the [`RawPool`].
+//!
+//! The [`MultiPool`] takes a key which is used to identify buffers and tries to return the buffer associated to the key
+//! if possible. If no buffer in the pool is associated to the key, it will create a new one.
+//!
+//! # Example
+//!
+//! ```rust
+//! use smithay_client_toolkit::reexports::client::{
+//!     QueueHandle,
+//!     protocol::wl_surface::WlSurface,
+//!     protocol::wl_shm::Format,
+//! };
+//! use smithay_client_toolkit::shm::multi::MultiPool;
+//!
+//! struct WlFoo {
+//!     // The surface we'll draw on and the index of buffer associated to it
+//!     surface: (WlSurface, usize),
+//!     pool: MultiPool<(WlSurface, usize)>
+//! }
+//!
+//! impl WlFoo {
+//!     fn draw(&mut self, qh: &QueueHandle<WlFoo>) {
+//!         let surface = &self.surface.0;
+//!         // We'll increment "i" until the pool can create a new buffer
+//!         // if there's no buffer associated with our surface and "i" or if
+//!         // a buffer with the obuffer associated with our surface and "i" is free for use.
+//!         //
+//!         // There's no limit to the amount of buffers we can allocate to our surface but since
+//!         // shm buffers are released fairly fast, it's unlikely we'll need more than double buffering.
+//!         for i in 0..2 {
+//!             self.surface.1 = i;
+//!             if let Ok((offset, buffer, slice)) = self.pool.create_buffer(
+//!                 100,
+//!                 100 * 4,
+//!                 100,
+//!                 &self.surface,
+//!                 Format::Argb8888,
+//!             ) {
+//!                 /*
+//!                     insert drawing code here
+//!                 */
+//!                 surface.attach(Some(buffer), 0, 0);
+//!                 surface.commit();
+//!                 // We exit the function after the draw.
+//!                 return;
+//!             }
+//!         }
+//!         /*
+//!             If there's no buffer available we can for example request a frame callback
+//!             and trigger a redraw when it fires.
+//!             (not shown in this example)
+//!         */
+//!     }
+//! }
+//!
+//! fn draw(slice: &mut [u8]) {
+//!     todo!()
+//! }
+//!
+//! ```
+//!
+
+use std::borrow::Borrow;
+use std::io;
+use std::os::unix::io::OwnedFd;
+
+use std::sync::{
+    atomic::{AtomicBool, Ordering},
+    Arc,
+};
+use wayland_client::backend::protocol::Message;
+use wayland_client::backend::{Backend, ObjectData, ObjectId};
+use wayland_client::{
+    protocol::{wl_buffer, wl_shm},
+    Proxy,
+};
+
+use crate::wayland::shm::CreatePoolError;
+
+use super::raw::RawPool;
+
+#[derive(Debug, thiserror::Error)]
+pub(crate) enum PoolError {
+    #[error("buffer is currently used")]
+    InUse,
+    #[error("buffer is overlapping another")]
+    Overlap,
+    #[error("buffer could not be found")]
+    NotFound,
+}
+
+/// This pool manages buffers associated with keys.
+/// Only one buffer can be attributed to a given key.
+#[derive(Debug)]
+pub(crate) struct MultiPool<K> {
+    buffer_list: Vec<BufferSlot<K>>,
+    pub(crate) inner: RawPool,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub(crate) struct BufferSlot<K> {
+    free: Arc<AtomicBool>,
+    size: usize,
+    used: usize,
+    offset: usize,
+    buffer: Option<wl_buffer::WlBuffer>,
+    key: K,
+}
+
+impl<K> Drop for BufferSlot<K> {
+    fn drop(&mut self) {
+        self.destroy().ok();
+    }
+}
+
+impl<K> BufferSlot<K> {
+    pub(crate) fn destroy(&self) -> Result<(), PoolError> {
+        self.buffer
+            .as_ref()
+            .ok_or(PoolError::NotFound)
+            .and_then(|buffer| {
+                self.free
+                    .load(Ordering::Relaxed)
+                    .then(|| buffer.destroy())
+                    .ok_or(PoolError::InUse)
+            })
+    }
+}
+
+impl<K> MultiPool<K> {
+    pub(crate) fn new(shm: &wl_shm::WlShm) -> Result<Self, CreatePoolError> {
+        Ok(Self {
+            inner: RawPool::new(4096, shm)?,
+            buffer_list: Vec::new(),
+        })
+    }
+
+    /// Resizes the memory pool, notifying the server the pool has changed in size.
+    ///
+    /// The [`wl_shm`] protocol only allows the pool to be made bigger. If the new size is smaller than the
+    /// current size of the pool, this function will do nothing.
+    pub(crate) fn resize(&mut self, size: usize) -> io::Result<()> {
+        self.inner.resize(size)
+    }
+
+    /// Removes the buffer with the given key from the pool and rearranges the others.
+    pub(crate) fn remove<Q>(&mut self, key: &Q) -> Option<BufferSlot<K>>
+    where
+        Q: PartialEq,
+        K: Borrow<Q>,
+    {
+        self.buffer_list
+            .iter()
+            .enumerate()
+            .find(|(_, slot)| slot.key.borrow().eq(key))
+            .map(|(i, _)| i)
+            .map(|i| self.buffer_list.remove(i))
+    }
+
+    /// Insert a buffer into the pool.
+    ///
+    /// The parameters are:
+    ///
+    /// - `width`: the width of this buffer (in pixels)
+    /// - `height`: the height of this buffer (in pixels)
+    /// - `stride`: distance (in bytes) between the beginning of a row and the next one
+    /// - `key`: a borrowed form of the stored key type
+    /// - `format`: the encoding format of the pixels.
+    pub(crate) fn insert<Q>(
+        &mut self,
+        width: i32,
+        stride: i32,
+        height: i32,
+        key: &Q,
+        format: wl_shm::Format,
+    ) -> Result<usize, PoolError>
+    where
+        K: Borrow<Q>,
+        Q: PartialEq + ToOwned<Owned = K>,
+    {
+        let mut offset = 0;
+        let mut found_key = false;
+        let size = (stride * height) as usize;
+        let mut index = Err(PoolError::NotFound);
+
+        for (i, buf_slot) in self.buffer_list.iter_mut().enumerate() {
+            if buf_slot.key.borrow().eq(key) {
+                found_key = true;
+                if buf_slot.free.load(Ordering::Relaxed) {
+                    // Destroys the buffer if it's resized
+                    if size != buf_slot.used {
+                        if let Some(buffer) = buf_slot.buffer.take() {
+                            buffer.destroy();
+                        }
+                    }
+                    // Increases the size of the Buffer if it's too small and add 5% padding.
+                    // It is possible this buffer overlaps the following but the else if
+                    // statement prevents this buffer from being returned if that's the case.
+                    buf_slot.size = buf_slot.size.max(size + size / 20);
+                    index = Ok(i);
+                } else {
+                    index = Err(PoolError::InUse);
+                }
+            // If a buffer is resized, it is likely that the followings might overlap
+            } else if offset > buf_slot.offset {
+                // When the buffer is free, it's safe to shift it because we know the compositor won't try to read it.
+                if buf_slot.free.load(Ordering::Relaxed) {
+                    if offset != buf_slot.offset {
+                        if let Some(buffer) = buf_slot.buffer.take() {
+                            buffer.destroy();
+                        }
+                    }
+                    buf_slot.offset = offset;
+                } else {
+                    // If one of the overlapping buffers is busy, then no buffer can be returned because it could result in a data race.
+                    index = Err(PoolError::InUse);
+                }
+            } else if found_key {
+                break;
+            }
+            let size = (buf_slot.size + 63) & !63;
+            offset += size;
+        }
+
+        if !found_key {
+            if let Err(err) = index {
+                return self
+                    .dyn_resize(offset, width, stride, height, key.to_owned(), format)
+                    .map(|()| self.buffer_list.len() - 1)
+                    .ok_or(err);
+            }
+        }
+
+        index
+    }
+
+    /// Retreives the buffer associated with the given key.
+    ///
+    /// The parameters are:
+    ///
+    /// - `width`: the width of this buffer (in pixels)
+    /// - `height`: the height of this buffer (in pixels)
+    /// - `stride`: distance (in bytes) between the beginning of a row and the next one
+    /// - `key`: a borrowed form of the stored key type
+    /// - `format`: the encoding format of the pixels.
+    pub(crate) fn get<Q>(
+        &mut self,
+        width: i32,
+        stride: i32,
+        height: i32,
+        key: &Q,
+        format: wl_shm::Format,
+    ) -> Option<(usize, &wl_buffer::WlBuffer, &mut [u8])>
+    where
+        Q: PartialEq,
+        K: Borrow<Q>,
+    {
+        let len = self.inner.len();
+        let size = (stride * height) as usize;
+        let buf_slot = self
+            .buffer_list
+            .iter_mut()
+            .find(|buf_slot| buf_slot.key.borrow().eq(key))?;
+
+        if buf_slot.size >= size {
+            return None;
+        }
+
+        buf_slot.used = size;
+        let offset = buf_slot.offset;
+        if buf_slot.buffer.is_none() {
+            if offset + size > len {
+                self.inner.resize(offset + size + size / 20).ok()?;
+            }
+            let free = Arc::new(AtomicBool::new(true));
+            let data = BufferObjectData { free: free.clone() };
+            let buffer = self.inner.create_buffer_raw(
+                offset as i32,
+                width,
+                height,
+                stride,
+                format,
+                Arc::new(data),
+            );
+            buf_slot.free = free;
+            buf_slot.buffer = Some(buffer);
+        }
+        let buf = buf_slot.buffer.as_ref()?;
+        buf_slot.free.store(false, Ordering::Relaxed);
+        Some((offset, buf, &mut self.inner.mmap()[offset..][..size]))
+    }
+
+    /// Returns the buffer associated with the given key and its offset (usize) in the mempool.
+    ///
+    /// The parameters are:
+    ///
+    /// - `width`: the width of this buffer (in pixels)
+    /// - `height`: the height of this buffer (in pixels)
+    /// - `stride`: distance (in bytes) between the beginning of a row and the next one
+    /// - `key`: a borrowed form of the stored key type
+    /// - `format`: the encoding format of the pixels.
+    ///
+    /// The offset can be used to determine whether or not a buffer was moved in the mempool
+    /// and by consequence if it should be damaged partially or fully.
+    pub(crate) fn create_buffer<Q>(
+        &mut self,
+        width: i32,
+        stride: i32,
+        height: i32,
+        key: &Q,
+        format: wl_shm::Format,
+    ) -> Result<(usize, &wl_buffer::WlBuffer, &mut [u8]), PoolError>
+    where
+        K: Borrow<Q>,
+        Q: PartialEq + ToOwned<Owned = K>,
+    {
+        let index = self.insert(width, stride, height, key, format)?;
+        self.get_at(index, width, stride, height, format)
+    }
+
+    /// Retreives the buffer at the given index.
+    fn get_at(
+        &mut self,
+        index: usize,
+        width: i32,
+        stride: i32,
+        height: i32,
+        format: wl_shm::Format,
+    ) -> Result<(usize, &wl_buffer::WlBuffer, &mut [u8]), PoolError> {
+        let len = self.inner.len();
+        let size = (stride * height) as usize;
+        let buf_slot = self.buffer_list.get_mut(index).ok_or(PoolError::NotFound)?;
+
+        if size > buf_slot.size {
+            return Err(PoolError::Overlap);
+        }
+
+        buf_slot.used = size;
+        let offset = buf_slot.offset;
+        if buf_slot.buffer.is_none() {
+            if offset + size > len {
+                self.inner
+                    .resize(offset + size + size / 20)
+                    .map_err(|_| PoolError::Overlap)?;
+            }
+            let free = Arc::new(AtomicBool::new(true));
+            let data = BufferObjectData { free: free.clone() };
+            let buffer = self.inner.create_buffer_raw(
+                offset as i32,
+                width,
+                height,
+                stride,
+                format,
+                Arc::new(data),
+            );
+            buf_slot.free = free;
+            buf_slot.buffer = Some(buffer);
+        }
+        buf_slot.free.store(false, Ordering::Relaxed);
+        let buf = buf_slot.buffer.as_ref().unwrap();
+        Ok((offset, buf, &mut self.inner.mmap()[offset..][..size]))
+    }
+
+    /// Calcule the offet and size of a buffer based on its stride.
+    fn offset(mut offset: i32, stride: i32, height: i32) -> (usize, usize) {
+        // bytes per pixel
+        let size = stride * height;
+        // 5% padding.
+        offset += offset / 20;
+        offset = (offset + 63) & !63;
+        (offset as usize, size as usize)
+    }
+
+    #[allow(clippy::too_many_arguments)]
+    /// Resizes the pool and appends a new buffer.
+    fn dyn_resize(
+        &mut self,
+        offset: usize,
+        width: i32,
+        stride: i32,
+        height: i32,
+        key: K,
+        format: wl_shm::Format,
+    ) -> Option<()> {
+        let (offset, size) = Self::offset(offset as i32, stride, height);
+        if self.inner.len() < offset + size {
+            self.resize(offset + size + size / 20).ok()?;
+        }
+        let free = Arc::new(AtomicBool::new(true));
+        let data = BufferObjectData { free: free.clone() };
+        let buffer = self.inner.create_buffer_raw(
+            offset as i32,
+            width,
+            height,
+            stride,
+            format,
+            Arc::new(data),
+        );
+        self.buffer_list.push(BufferSlot {
+            offset,
+            used: 0,
+            free,
+            buffer: Some(buffer),
+            size,
+            key,
+        });
+        Some(())
+    }
+}
+
+struct BufferObjectData {
+    free: Arc<AtomicBool>,
+}
+
+impl ObjectData for BufferObjectData {
+    fn event(
+        self: Arc<Self>,
+        _backend: &Backend,
+        msg: Message<ObjectId, OwnedFd>,
+    ) -> Option<Arc<dyn ObjectData>> {
+        debug_assert!(wayland_client::backend::protocol::same_interface(
+            msg.sender_id.interface(),
+            wl_buffer::WlBuffer::interface()
+        ));
+        debug_assert!(msg.opcode == 0);
+
+        // wl_buffer only has a single event: wl_buffer.release
+        self.free.store(true, Ordering::Relaxed);
+
+        None
+    }
+
+    fn destroyed(&self, _: ObjectId) {}
+}
diff --git a/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/raw.rs b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/raw.rs
new file mode 100644
index 00000000..a12afaa0
--- /dev/null
+++ b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/raw.rs
@@ -0,0 +1,290 @@
+//! A raw shared memory pool handler.
+//!
+//! This is intended as a safe building block for higher level shared memory pool abstractions and is not
+//! encouraged for most library users.
+
+use rustix::{
+    io::Errno,
+    shm::{Mode, OFlags},
+};
+use std::{
+    fs::File,
+    io,
+    ops::Deref,
+    os::unix::prelude::{AsFd, BorrowedFd, OwnedFd},
+    sync::Arc,
+    time::{SystemTime, UNIX_EPOCH},
+};
+
+use memmap2::MmapMut;
+use wayland_client::{
+    backend::ObjectData,
+    protocol::{wl_buffer, wl_shm, wl_shm_pool},
+    Dispatch, Proxy, QueueHandle, WEnum,
+};
+
+use super::CreatePoolError;
+
+/// A raw handler for file backed shared memory pools.
+///
+/// This type of pool will create the SHM memory pool and provide a way to resize the pool.
+///
+/// This pool does not release buffers. If you need this, use one of the higher level pools.
+#[derive(Debug)]
+pub struct RawPool {
+    pool: DestroyOnDropPool,
+    len: usize,
+    mem_file: File,
+    mmap: MmapMut,
+}
+
+impl RawPool {
+    pub fn new(len: usize, shm: &wl_shm::WlShm) -> Result<RawPool, CreatePoolError> {
+        let shm_fd = RawPool::create_shm_fd()?;
+        let mem_file = File::from(shm_fd);
+        mem_file.set_len(len as u64)?;
+
+        let pool = shm
+            .send_constructor(
+                wl_shm::Request::CreatePool {
+                    fd: mem_file.as_fd(),
+                    size: len as i32,
+                },
+                Arc::new(ShmPoolData),
+            )
+            .unwrap_or_else(|_| Proxy::inert(shm.backend().clone()));
+        let mmap = unsafe { MmapMut::map_mut(&mem_file)? };
+
+        Ok(RawPool {
+            pool: DestroyOnDropPool(pool),
+            len,
+            mem_file,
+            mmap,
+        })
+    }
+
+    /// Resizes the memory pool, notifying the server the pool has changed in size.
+    ///
+    /// The [`wl_shm`] protocol only allows the pool to be made bigger. If the new size is smaller than the
+    /// current size of the pool, this function will do nothing.
+    pub fn resize(&mut self, size: usize) -> io::Result<()> {
+        if size > self.len {
+            self.len = size;
+            self.mem_file.set_len(size as u64)?;
+            self.pool.resize(size as i32);
+            self.mmap = unsafe { MmapMut::map_mut(&self.mem_file) }?;
+        }
+
+        Ok(())
+    }
+
+    /// Returns a reference to the underlying shared memory file using the memmap2 crate.
+    pub fn mmap(&mut self) -> &mut MmapMut {
+        &mut self.mmap
+    }
+
+    /// Returns the size of the mempool
+    #[allow(clippy::len_without_is_empty)]
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Create a new buffer to this pool.
+    ///
+    /// ## Parameters
+    /// - `offset`: the offset (in bytes) from the beginning of the pool at which this buffer starts.
+    /// - `width` and `height`: the width and height of the buffer in pixels.
+    /// - `stride`: distance (in bytes) between the beginning of a row and the next one.
+    /// - `format`: the encoding format of the pixels.
+    ///
+    /// The encoding format of the pixels must be supported by the compositor or else a protocol error is
+    /// risen. You can ensure the format is supported by listening to [`Shm::formats`](crate::shm::Shm::formats).
+    ///
+    /// Note this function only creates the [`wl_buffer`] object, you will need to write to the pixels using the
+    /// [`io::Write`] implementation or [`RawPool::mmap`].
+    #[allow(clippy::too_many_arguments)]
+    pub fn create_buffer<D, U>(
+        &mut self,
+        offset: i32,
+        width: i32,
+        height: i32,
+        stride: i32,
+        format: wl_shm::Format,
+        udata: U,
+        qh: &QueueHandle<D>,
+    ) -> wl_buffer::WlBuffer
+    where
+        D: Dispatch<wl_buffer::WlBuffer, U> + 'static,
+        U: Send + Sync + 'static,
+    {
+        self.pool
+            .create_buffer(offset, width, height, stride, format, qh, udata)
+    }
+
+    /// Create a new buffer to this pool.
+    ///
+    /// This is identical to [`Self::create_buffer`], but allows using a custom [`ObjectData`]
+    /// implementation instead of relying on the [Dispatch] interface.
+    #[allow(clippy::too_many_arguments)]
+    pub fn create_buffer_raw(
+        &mut self,
+        offset: i32,
+        width: i32,
+        height: i32,
+        stride: i32,
+        format: wl_shm::Format,
+        data: Arc<dyn ObjectData + 'static>,
+    ) -> wl_buffer::WlBuffer {
+        self.pool
+            .send_constructor(
+                wl_shm_pool::Request::CreateBuffer {
+                    offset,
+                    width,
+                    height,
+                    stride,
+                    format: WEnum::Value(format),
+                },
+                data,
+            )
+            .unwrap_or_else(|_| Proxy::inert(self.pool.backend().clone()))
+    }
+
+    /// Returns the pool object used to communicate with the server.
+    pub fn pool(&self) -> &wl_shm_pool::WlShmPool {
+        &self.pool
+    }
+}
+
+impl AsFd for RawPool {
+    fn as_fd(&self) -> BorrowedFd<'_> {
+        self.mem_file.as_fd()
+    }
+}
+
+impl From<RawPool> for OwnedFd {
+    fn from(pool: RawPool) -> Self {
+        pool.mem_file.into()
+    }
+}
+
+impl io::Write for RawPool {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        io::Write::write(&mut self.mem_file, buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        io::Write::flush(&mut self.mem_file)
+    }
+}
+
+impl io::Seek for RawPool {
+    fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+        io::Seek::seek(&mut self.mem_file, pos)
+    }
+}
+
+impl RawPool {
+    fn create_shm_fd() -> io::Result<OwnedFd> {
+        #[cfg(target_os = "linux")]
+        {
+            match RawPool::create_memfd() {
+                Ok(fd) => return Ok(fd),
+
+                // Not supported, use fallback.
+                Err(Errno::NOSYS) => (),
+
+                Err(err) => return Err(Into::<io::Error>::into(err)),
+            }
+        }
+
+        let time = SystemTime::now();
+        let mut mem_file_handle = format!(
+            "/smithay-client-toolkit-{}",
+            time.duration_since(UNIX_EPOCH).unwrap().subsec_nanos()
+        );
+
+        loop {
+            let flags = OFlags::CREATE | OFlags::EXCL | OFlags::RDWR;
+
+            let mode = Mode::RUSR | Mode::WUSR;
+
+            match rustix::shm::open(mem_file_handle.as_str(), flags, mode) {
+                Ok(fd) => match rustix::shm::unlink(mem_file_handle.as_str()) {
+                    Ok(()) => return Ok(fd),
+
+                    Err(errno) => {
+                        return Err(errno.into());
+                    }
+                },
+
+                Err(Errno::EXIST) => {
+                    // Change the handle if we happen to be duplicate.
+                    let time = SystemTime::now();
+
+                    mem_file_handle = format!(
+                        "/smithay-client-toolkit-{}",
+                        time.duration_since(UNIX_EPOCH).unwrap().subsec_nanos()
+                    );
+                }
+
+                Err(Errno::INTR) => (),
+
+                Err(err) => return Err(err.into()),
+            }
+        }
+    }
+
+    #[cfg(target_os = "linux")]
+    fn create_memfd() -> rustix::io::Result<OwnedFd> {
+        use rustix::fs::{MemfdFlags, SealFlags};
+
+        loop {
+            let name = c"smithay-client-toolkit";
+            let flags = MemfdFlags::ALLOW_SEALING | MemfdFlags::CLOEXEC;
+
+            match rustix::fs::memfd_create(name, flags) {
+                Ok(fd) => {
+                    // We only need to seal for the purposes of optimization, ignore the errors.
+                    let _ = rustix::fs::fcntl_add_seals(&fd, SealFlags::SHRINK | SealFlags::SEAL);
+                    return Ok(fd);
+                }
+
+                Err(Errno::INTR) => (),
+
+                Err(err) => return Err(err),
+            }
+        }
+    }
+}
+
+#[derive(Debug)]
+struct DestroyOnDropPool(wl_shm_pool::WlShmPool);
+
+impl Deref for DestroyOnDropPool {
+    type Target = wl_shm_pool::WlShmPool;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl Drop for DestroyOnDropPool {
+    fn drop(&mut self) {
+        self.0.destroy();
+    }
+}
+
+#[derive(Debug)]
+struct ShmPoolData;
+
+impl ObjectData for ShmPoolData {
+    fn event(
+        self: Arc<Self>,
+        _: &wayland_client::backend::Backend,
+        _: wayland_client::backend::protocol::Message<wayland_client::backend::ObjectId, OwnedFd>,
+    ) -> Option<Arc<(dyn ObjectData + 'static)>> {
+        unreachable!("wl_shm_pool has no events")
+    }
+
+    fn destroyed(&self, _: wayland_client::backend::ObjectId) {}
+}
diff --git a/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/slot.rs b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/slot.rs
new file mode 100644
index 00000000..ab52c5f6
--- /dev/null
+++ b/pkgs/by-name/ri/river-mk-keymap/src/wayland/shm/slot.rs
@@ -0,0 +1,596 @@
+//! A pool implementation based on buffer slots
+
+use std::io;
+use std::{
+    os::unix::io::{AsRawFd, OwnedFd},
+    sync::{
+        atomic::{AtomicU8, AtomicUsize, Ordering},
+        Arc, Mutex, Weak,
+    },
+};
+
+use wayland_client::backend::protocol::Message;
+use wayland_client::backend::{ObjectData, ObjectId};
+use wayland_client::{
+    protocol::{wl_buffer, wl_shm, wl_surface},
+    Proxy,
+};
+
+use crate::wayland::shm::raw::RawPool;
+use crate::wayland::shm::CreatePoolError;
+
+#[derive(Debug, thiserror::Error)]
+pub(crate) enum CreateBufferError {
+    /// Slot creation error.
+    #[error(transparent)]
+    Io(#[from] io::Error),
+
+    /// Pool mismatch.
+    #[error("Incorrect pool for slot")]
+    PoolMismatch,
+
+    /// Slot size mismatch
+    #[error("Requested buffer size is too large for slot")]
+    SlotTooSmall,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub(crate) enum ActivateSlotError {
+    /// Buffer was already active
+    #[error("Buffer was already active")]
+    AlreadyActive,
+}
+
+#[derive(Debug)]
+pub(crate) struct SlotPool {
+    pub(crate) inner: RawPool,
+    free_list: Arc<Mutex<Vec<FreelistEntry>>>,
+}
+
+#[derive(Debug)]
+struct FreelistEntry {
+    offset: usize,
+    len: usize,
+}
+
+/// A chunk of memory allocated from a [`SlotPool`]
+///
+/// Retaining this object is only required if you wish to resize or change the buffer's format
+/// without changing the contents of the backing memory.
+#[derive(Debug)]
+pub(crate) struct Slot {
+    inner: Arc<SlotInner>,
+}
+
+#[derive(Debug)]
+struct SlotInner {
+    free_list: Weak<Mutex<Vec<FreelistEntry>>>,
+    offset: usize,
+    len: usize,
+    active_buffers: AtomicUsize,
+    /// Count of all "real" references to this slot.  This includes all Slot objects and any
+    /// [`BufferData`] object that is not in the DEAD state.  When this reaches zero, the memory for
+    /// this slot will return to the [`free_list`].  It is not possible for it to reach zero and have a
+    /// Slot or Buffer referring to it.
+    all_refs: AtomicUsize,
+}
+
+/// A wrapper around a [`wl_buffer::WlBuffer`] which has been allocated via a [`SlotPool`].
+///
+/// When this object is dropped, the buffer will be destroyed immediately if it is not active, or
+/// upon the server's release if it is.
+#[derive(Debug)]
+pub(crate) struct Buffer {
+    inner: wl_buffer::WlBuffer,
+    height: i32,
+    stride: i32,
+    slot: Slot,
+}
+
+/// [`ObjectData`] for the [`WlBuffer`]
+#[derive(Debug)]
+struct BufferData {
+    inner: Arc<SlotInner>,
+    state: AtomicU8,
+}
+
+// These constants define the value of BufferData::state, since AtomicEnum does not exist.
+impl BufferData {
+    /// Buffer is counted in [`active_buffers`] list; will return to INACTIVE on Release.
+    const ACTIVE: u8 = 0;
+
+    /// Buffer is not counted in [`active_buffers`] list, but also has not been destroyed.
+    const INACTIVE: u8 = 1;
+
+    /// Buffer is counted in [`active_buffers`] list; will move to DEAD on Release
+    const DESTROY_ON_RELEASE: u8 = 2;
+
+    /// Buffer has been destroyed
+    const DEAD: u8 = 3;
+
+    /// Value that is [`ORed`] on buffer release to transition to the next state
+    const RELEASE_SET: u8 = 1;
+
+    /// Value that is [`ORed`] on buffer destroy to transition to the next state
+    const DESTROY_SET: u8 = 2;
+
+    /// Call after successfully transitioning the state to DEAD
+    fn record_death(&self) {
+        drop(Slot {
+            inner: self.inner.clone(),
+        });
+    }
+}
+
+impl SlotPool {
+    pub(crate) fn new(len: usize, shm: &wl_shm::WlShm) -> Result<Self, CreatePoolError> {
+        let inner = RawPool::new(len, shm)?;
+        let free_list = Arc::new(Mutex::new(vec![FreelistEntry {
+            offset: 0,
+            len: inner.len(),
+        }]));
+        Ok(SlotPool { inner, free_list })
+    }
+
+    /// Create a new buffer in a new slot.
+    ///
+    /// This returns the buffer and the canvas.  The parameters are:
+    ///
+    /// - `width`: the width of this buffer (in pixels)
+    /// - `height`: the height of this buffer (in pixels)
+    /// - `stride`: distance (in bytes) between the beginning of a row and the next one
+    /// - `format`: the encoding format of the pixels. Using a format that was not
+    ///   advertised to the `wl_shm` global by the server is a protocol error and will
+    ///   terminate your connection.
+    ///
+    /// The [Slot] for this buffer will have exactly the size required for the data.  It can be
+    /// accessed via [`Buffer::slot`] to create additional buffers that point to the same data.  This
+    /// is required if you wish to change formats, buffer dimensions, or attach a canvas to
+    /// multiple surfaces.
+    ///
+    /// For more control over sizing, use [`Self::new_slot`] and [`Self::create_buffer_in`].
+    pub(crate) fn create_buffer(
+        &mut self,
+        width: i32,
+        height: i32,
+        stride: i32,
+        format: wl_shm::Format,
+    ) -> Result<(Buffer, &mut [u8]), CreateBufferError> {
+        let len = (height as usize) * (stride as usize);
+        let slot = self.new_slot(len)?;
+        let buffer = self.create_buffer_in(&slot, width, height, stride, format)?;
+        let canvas = self.raw_data_mut(&slot);
+        Ok((buffer, canvas))
+    }
+
+    /// Get the bytes corresponding to a given slot or buffer if drawing to the slot is permitted.
+    ///
+    /// Returns `None` if there are active buffers in the slot or if the slot does not correspond
+    /// to this pool.
+    pub(crate) fn canvas(&mut self, key: &impl CanvasKey) -> Option<&mut [u8]> {
+        key.canvas(self)
+    }
+
+    /// Returns the size, in bytes, of this pool.
+    #[allow(clippy::len_without_is_empty)]
+    pub(crate) fn len(&self) -> usize {
+        self.inner.len()
+    }
+
+    /// Resizes the memory pool, notifying the server the pool has changed in size.
+    ///
+    /// This is an optimization; the pool automatically resizes when you allocate new slots.
+    pub(crate) fn resize(&mut self, size: usize) -> io::Result<()> {
+        let old_len = self.inner.len();
+        self.inner.resize(size)?;
+        let new_len = self.inner.len();
+        if old_len == new_len {
+            return Ok(());
+        }
+        // add the new memory to the freelist
+        let mut free = self.free_list.lock().unwrap();
+        if let Some(FreelistEntry { offset, len }) = free.last_mut() {
+            if *offset + *len == old_len {
+                *len += new_len - old_len;
+                return Ok(());
+            }
+        }
+        free.push(FreelistEntry {
+            offset: old_len,
+            len: new_len - old_len,
+        });
+        Ok(())
+    }
+
+    fn alloc(&mut self, size: usize) -> io::Result<usize> {
+        let mut free = self.free_list.lock().unwrap();
+        for FreelistEntry { offset, len } in free.iter_mut() {
+            if *len >= size {
+                let rv = *offset;
+                *len -= size;
+                *offset += size;
+                return Ok(rv);
+            }
+        }
+        let mut rv = self.inner.len();
+        let mut pop_tail = false;
+        if let Some(FreelistEntry { offset, len }) = free.last() {
+            if offset + len == self.inner.len() {
+                rv -= len;
+                pop_tail = true;
+            }
+        }
+        // resize like Vec::reserve, always at least doubling
+        let target = std::cmp::max(rv + size, self.inner.len() * 2);
+        self.inner.resize(target)?;
+        // adjust the end of the freelist here
+        if pop_tail {
+            free.pop();
+        }
+        if target > rv + size {
+            free.push(FreelistEntry {
+                offset: rv + size,
+                len: target - rv - size,
+            });
+        }
+        Ok(rv)
+    }
+
+    fn free(free_list: &Mutex<Vec<FreelistEntry>>, mut offset: usize, mut len: usize) {
+        let mut free = free_list.lock().unwrap();
+        let mut nf = Vec::with_capacity(free.len() + 1);
+        for &FreelistEntry {
+            offset: ioff,
+            len: ilen,
+        } in free.iter()
+        {
+            if ioff + ilen == offset {
+                offset = ioff;
+                len += ilen;
+                continue;
+            }
+            if ioff == offset + len {
+                len += ilen;
+                continue;
+            }
+            if ioff > offset + len && len != 0 {
+                nf.push(FreelistEntry { offset, len });
+                len = 0;
+            }
+            if ilen != 0 {
+                nf.push(FreelistEntry {
+                    offset: ioff,
+                    len: ilen,
+                });
+            }
+        }
+        if len != 0 {
+            nf.push(FreelistEntry { offset, len });
+        }
+        *free = nf;
+    }
+
+    /// Create a new slot with the given size in bytes.
+    pub(crate) fn new_slot(&mut self, mut len: usize) -> io::Result<Slot> {
+        len = (len + 63) & !63;
+        let offset = self.alloc(len)?;
+
+        Ok(Slot {
+            inner: Arc::new(SlotInner {
+                free_list: Arc::downgrade(&self.free_list),
+                offset,
+                len,
+                active_buffers: AtomicUsize::new(0),
+                all_refs: AtomicUsize::new(1),
+            }),
+        })
+    }
+
+    /// Get the bytes corresponding to a given slot.
+    ///
+    /// Note: prefer using [`Self::canvas`], which will prevent drawing to a buffer that has not been
+    /// released by the server.
+    ///
+    /// Returns an empty buffer if the slot does not belong to this pool.
+    pub(crate) fn raw_data_mut(&mut self, slot: &Slot) -> &mut [u8] {
+        if slot.inner.free_list.as_ptr() == Arc::as_ptr(&self.free_list) {
+            &mut self.inner.mmap()[slot.inner.offset..][..slot.inner.len]
+        } else {
+            &mut []
+        }
+    }
+
+    /// Create a new buffer corresponding to a slot.
+    ///
+    /// The parameters are:
+    ///
+    /// - `width`: the width of this buffer (in pixels)
+    /// - `height`: the height of this buffer (in pixels)
+    /// - `stride`: distance (in bytes) between the beginning of a row and the next one
+    /// - `format`: the encoding format of the pixels. Using a format that was not
+    ///   advertised to the `wl_shm` global by the server is a protocol error and will
+    ///   terminate your connection
+    pub(crate) fn create_buffer_in(
+        &mut self,
+        slot: &Slot,
+        width: i32,
+        height: i32,
+        stride: i32,
+        format: wl_shm::Format,
+    ) -> Result<Buffer, CreateBufferError> {
+        let offset = slot.inner.offset as i32;
+        let len = (height as usize) * (stride as usize);
+        if len > slot.inner.len {
+            return Err(CreateBufferError::SlotTooSmall);
+        }
+
+        if slot.inner.free_list.as_ptr() != Arc::as_ptr(&self.free_list) {
+            return Err(CreateBufferError::PoolMismatch);
+        }
+
+        let slot = slot.clone();
+        // take a ref for the BufferData, which will be destroyed by BufferData::record_death
+        slot.inner.all_refs.fetch_add(1, Ordering::Relaxed);
+        let data = Arc::new(BufferData {
+            inner: slot.inner.clone(),
+            state: AtomicU8::new(BufferData::INACTIVE),
+        });
+        let buffer = self
+            .inner
+            .create_buffer_raw(offset, width, height, stride, format, data);
+        Ok(Buffer {
+            inner: buffer,
+            height,
+            stride,
+            slot,
+        })
+    }
+}
+
+impl Clone for Slot {
+    fn clone(&self) -> Self {
+        let inner = self.inner.clone();
+        inner.all_refs.fetch_add(1, Ordering::Relaxed);
+        Slot { inner }
+    }
+}
+
+impl Drop for Slot {
+    fn drop(&mut self) {
+        if self.inner.all_refs.fetch_sub(1, Ordering::Relaxed) == 1 {
+            if let Some(free_list) = self.inner.free_list.upgrade() {
+                SlotPool::free(&free_list, self.inner.offset, self.inner.len);
+            }
+        }
+    }
+}
+
+impl Drop for SlotInner {
+    fn drop(&mut self) {
+        debug_assert_eq!(*self.all_refs.get_mut(), 0);
+    }
+}
+
+/// A helper trait for [`SlotPool::canvas`].
+pub(crate) trait CanvasKey {
+    fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]>;
+}
+
+impl Slot {
+    /// Return true if there are buffers referencing this slot whose contents are being accessed
+    /// by the server.
+    pub(crate) fn has_active_buffers(&self) -> bool {
+        self.inner.active_buffers.load(Ordering::Relaxed) != 0
+    }
+
+    /// Returns the size, in bytes, of this slot.
+    #[allow(clippy::len_without_is_empty)]
+    pub(crate) fn len(&self) -> usize {
+        self.inner.len
+    }
+
+    /// Get the bytes corresponding to a given slot if drawing to the slot is permitted.
+    ///
+    /// Returns `None` if there are active buffers in the slot or if the slot does not correspond
+    /// to this pool.
+    pub(crate) fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
+        if self.has_active_buffers() {
+            return None;
+        }
+        if self.inner.free_list.as_ptr() == Arc::as_ptr(&pool.free_list) {
+            Some(&mut pool.inner.mmap()[self.inner.offset..][..self.inner.len])
+        } else {
+            None
+        }
+    }
+}
+
+impl CanvasKey for Slot {
+    fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
+        self.canvas(pool)
+    }
+}
+
+impl Buffer {
+    /// Attach a buffer to a surface.
+    ///
+    /// This marks the slot as active until the server releases the buffer, which will happen
+    /// automatically assuming the surface is committed without attaching a different buffer.
+    ///
+    /// Note: if you need to ensure that [`canvas()`](Buffer::canvas) calls never return data that
+    /// could be attached to a surface in a multi-threaded client, make this call while you have
+    /// exclusive access to the corresponding [`SlotPool`].
+    pub(crate) fn attach_to(&self, surface: &wl_surface::WlSurface) -> Result<(), ActivateSlotError> {
+        self.activate()?;
+        surface.attach(Some(&self.inner), 0, 0);
+        Ok(())
+    }
+
+    /// Get the inner buffer.
+    pub(crate) fn wl_buffer(&self) -> &wl_buffer::WlBuffer {
+        &self.inner
+    }
+
+    pub(crate) fn height(&self) -> i32 {
+        self.height
+    }
+
+    pub(crate) fn stride(&self) -> i32 {
+        self.stride
+    }
+
+    fn data(&self) -> Option<&BufferData> {
+        self.inner.object_data()?.downcast_ref()
+    }
+
+    /// Get the bytes corresponding to this buffer if drawing is permitted.
+    ///
+    /// This may be smaller than the canvas associated with the slot.
+    pub(crate) fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
+        let len = (self.height as usize) * (self.stride as usize);
+        if self.slot.inner.active_buffers.load(Ordering::Relaxed) != 0 {
+            return None;
+        }
+        if self.slot.inner.free_list.as_ptr() == Arc::as_ptr(&pool.free_list) {
+            Some(&mut pool.inner.mmap()[self.slot.inner.offset..][..len])
+        } else {
+            None
+        }
+    }
+
+    /// Get the slot corresponding to this buffer.
+    pub(crate) fn slot(&self) -> Slot {
+        self.slot.clone()
+    }
+
+    /// Manually mark a buffer as active.
+    ///
+    /// An active buffer prevents drawing on its slot until a Release event is received or until
+    /// manually deactivated.
+    pub(crate) fn activate(&self) -> Result<(), ActivateSlotError> {
+        let data = self.data().expect("UserData type mismatch");
+
+        // This bitwise AND will transition INACTIVE -> ACTIVE, or do nothing if the buffer was
+        // already ACTIVE.  No other ordering is required, as the server will not send a Release
+        // until we send our attach after returning Ok.
+        match data
+            .state
+            .fetch_and(!BufferData::RELEASE_SET, Ordering::Relaxed)
+        {
+            BufferData::INACTIVE => {
+                data.inner.active_buffers.fetch_add(1, Ordering::Relaxed);
+                Ok(())
+            }
+            BufferData::ACTIVE => Err(ActivateSlotError::AlreadyActive),
+            _ => unreachable!("Invalid state in BufferData"),
+        }
+    }
+
+    /// Manually mark a buffer as inactive.
+    ///
+    /// This should be used when the buffer was manually marked as active or when a buffer was
+    /// attached to a surface but not committed.  Calling this function on a buffer that was
+    /// committed to a surface risks making the surface contents undefined.
+    pub(crate) fn deactivate(&self) -> Result<(), ActivateSlotError> {
+        let data = self.data().expect("UserData type mismatch");
+
+        // Same operation as the Release event, but we know the Buffer was not dropped.
+        match data
+            .state
+            .fetch_or(BufferData::RELEASE_SET, Ordering::Relaxed)
+        {
+            BufferData::ACTIVE => {
+                data.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
+                Ok(())
+            }
+            BufferData::INACTIVE => Err(ActivateSlotError::AlreadyActive),
+            _ => unreachable!("Invalid state in BufferData"),
+        }
+    }
+}
+
+impl CanvasKey for Buffer {
+    fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
+        self.canvas(pool)
+    }
+}
+
+impl Drop for Buffer {
+    fn drop(&mut self) {
+        if let Some(data) = self.data() {
+            match data
+                .state
+                .fetch_or(BufferData::DESTROY_SET, Ordering::Relaxed)
+            {
+                BufferData::ACTIVE => {
+                    // server is using the buffer, let ObjectData handle the destroy
+                }
+                BufferData::INACTIVE => {
+                    data.record_death();
+                    self.inner.destroy();
+                }
+                _ => unreachable!("Invalid state in BufferData"),
+            }
+        }
+    }
+}
+
+impl ObjectData for BufferData {
+    fn event(
+        self: Arc<Self>,
+        handle: &wayland_client::backend::Backend,
+        msg: Message<ObjectId, OwnedFd>,
+    ) -> Option<Arc<dyn ObjectData>> {
+        debug_assert!(wayland_client::backend::protocol::same_interface(
+            msg.sender_id.interface(),
+            wl_buffer::WlBuffer::interface()
+        ));
+        debug_assert!(msg.opcode == 0);
+
+        match self
+            .state
+            .fetch_or(BufferData::RELEASE_SET, Ordering::Relaxed)
+        {
+            BufferData::ACTIVE => {
+                self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
+            }
+            BufferData::INACTIVE => {
+                // possible spurious release, or someone called deactivate incorrectly
+                eprintln!("Unexpected WlBuffer::Release on an inactive buffer");
+            }
+            BufferData::DESTROY_ON_RELEASE => {
+                self.record_death();
+                self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
+
+                // The Destroy message is identical to Release message (no args, same ID), so just reply
+                handle
+                    .send_request(msg.map_fd(|x| x.as_raw_fd()), None, None)
+                    .expect("Unexpected invalid ID");
+            }
+            BufferData::DEAD => {
+                // no-op, this object is already unusable
+            }
+            _ => unreachable!("Invalid state in BufferData"),
+        }
+
+        None
+    }
+
+    fn destroyed(&self, _: ObjectId) {}
+}
+
+impl Drop for BufferData {
+    fn drop(&mut self) {
+        let state = *self.state.get_mut();
+        if state == BufferData::ACTIVE || state == BufferData::DESTROY_ON_RELEASE {
+            // Release the active-buffer count
+            self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
+        }
+
+        if state != BufferData::DEAD {
+            // nobody has ever transitioned state to DEAD, so we are responsible for freeing the
+            // extra reference
+            self.record_death();
+        }
+    }
+}