about summary refs log tree commit diff
path: root/tvix/nix-compat/src/wire
diff options
context:
space:
mode:
Diffstat (limited to 'tvix/nix-compat/src/wire')
-rw-r--r--tvix/nix-compat/src/wire/bytes/mod.rs283
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader/mod.rs686
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader/trailer.rs197
-rw-r--r--tvix/nix-compat/src/wire/bytes/writer.rs538
-rw-r--r--tvix/nix-compat/src/wire/mod.rs5
5 files changed, 1709 insertions, 0 deletions
diff --git a/tvix/nix-compat/src/wire/bytes/mod.rs b/tvix/nix-compat/src/wire/bytes/mod.rs
new file mode 100644
index 000000000000..2ed071e37985
--- /dev/null
+++ b/tvix/nix-compat/src/wire/bytes/mod.rs
@@ -0,0 +1,283 @@
+use std::{
+    io::{Error, ErrorKind},
+    mem::MaybeUninit,
+    ops::RangeInclusive,
+};
+use tokio::io::{self, AsyncReadExt, AsyncWriteExt, ReadBuf};
+
+pub(crate) mod reader;
+pub use reader::BytesReader;
+mod writer;
+pub use writer::BytesWriter;
+
+/// 8 null bytes, used to write out padding.
+const EMPTY_BYTES: &[u8; 8] = &[0u8; 8];
+
+/// The length of the size field, in bytes is always 8.
+const LEN_SIZE: usize = 8;
+
+/// Read a "bytes wire packet" from the AsyncRead.
+/// Rejects reading more than `allowed_size` bytes of payload.
+///
+/// The packet is made up of three parts:
+/// - a length header, u64, LE-encoded
+/// - the payload itself
+/// - null bytes to the next 8 byte boundary
+///
+/// Ensures the payload size fits into the `allowed_size` passed,
+/// and that the padding is actual null bytes.
+///
+/// On success, the returned `Vec<u8>` only contains the payload itself.
+/// On failure (for example if a too large byte packet was sent), the reader
+/// becomes unusable.
+///
+/// This buffers the entire payload into memory,
+/// a streaming version is available at [crate::wire::bytes::BytesReader].
+pub async fn read_bytes<R: ?Sized>(
+    r: &mut R,
+    allowed_size: RangeInclusive<usize>,
+) -> io::Result<Vec<u8>>
+where
+    R: AsyncReadExt + Unpin,
+{
+    // read the length field
+    let len = r.read_u64_le().await?;
+    let len: usize = len
+        .try_into()
+        .ok()
+        .filter(|len| allowed_size.contains(len))
+        .ok_or_else(|| {
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                "signalled package size not in allowed range",
+            )
+        })?;
+
+    // calculate the total length, including padding.
+    // byte packets are padded to 8 byte blocks each.
+    let padded_len = padding_len(len as u64) as u64 + (len as u64);
+    let mut limited_reader = r.take(padded_len);
+
+    let mut buf = Vec::new();
+
+    let s = limited_reader.read_to_end(&mut buf).await?;
+
+    // make sure we got exactly the number of bytes, and not less.
+    if s as u64 != padded_len {
+        return Err(io::ErrorKind::UnexpectedEof.into());
+    }
+
+    let (_content, padding) = buf.split_at(len);
+
+    // ensure the padding is all zeroes.
+    if padding.iter().any(|&b| b != 0) {
+        return Err(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "padding is not all zeroes",
+        ));
+    }
+
+    // return the data without the padding
+    buf.truncate(len);
+    Ok(buf)
+}
+
+pub(crate) async fn read_bytes_buf<'a, const N: usize, R: ?Sized>(
+    reader: &mut R,
+    buf: &'a mut [MaybeUninit<u8>; N],
+    allowed_size: RangeInclusive<usize>,
+) -> io::Result<&'a [u8]>
+where
+    R: AsyncReadExt + Unpin,
+{
+    assert_eq!(N % 8, 0);
+    assert!(*allowed_size.end() <= N);
+
+    let len = reader.read_u64_le().await?;
+    let len: usize = len
+        .try_into()
+        .ok()
+        .filter(|len| allowed_size.contains(len))
+        .ok_or_else(|| {
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                "signalled package size not in allowed range",
+            )
+        })?;
+
+    let buf_len = (len + 7) & !7;
+    let buf = {
+        let mut read_buf = ReadBuf::uninit(&mut buf[..buf_len]);
+
+        while read_buf.filled().len() < buf_len {
+            reader.read_buf(&mut read_buf).await?;
+        }
+
+        // ReadBuf::filled does not pass the underlying buffer's lifetime through,
+        // so we must make a trip to hell.
+        //
+        // SAFETY: `read_buf` is filled up to `buf_len`, and we verify that it is
+        // still pointing at the same underlying buffer.
+        unsafe {
+            assert_eq!(read_buf.filled().as_ptr(), buf.as_ptr() as *const u8);
+            assume_init_bytes(&buf[..buf_len])
+        }
+    };
+
+    if buf[len..buf_len].iter().any(|&b| b != 0) {
+        return Err(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "padding is not all zeroes",
+        ));
+    }
+
+    Ok(&buf[..len])
+}
+
+/// SAFETY: The bytes have to actually be initialized.
+unsafe fn assume_init_bytes(slice: &[MaybeUninit<u8>]) -> &[u8] {
+    &*(slice as *const [MaybeUninit<u8>] as *const [u8])
+}
+
+/// Read a "bytes wire packet" of from the AsyncRead and tries to parse as string.
+/// Internally uses [read_bytes].
+/// Rejects reading more than `allowed_size` bytes of payload.
+pub async fn read_string<R>(r: &mut R, allowed_size: RangeInclusive<usize>) -> io::Result<String>
+where
+    R: AsyncReadExt + Unpin,
+{
+    let bytes = read_bytes(r, allowed_size).await?;
+    String::from_utf8(bytes).map_err(|e| Error::new(ErrorKind::InvalidData, e))
+}
+
+/// Writes a "bytes wire packet" to a (hopefully buffered) [AsyncWriteExt].
+///
+/// Accepts anything implementing AsRef<[u8]> as payload.
+///
+/// See [read_bytes] for a description of the format.
+///
+/// Note: if performance matters to you, make sure your
+/// [AsyncWriteExt] handle is buffered. This function is quite
+/// write-intesive.
+pub async fn write_bytes<W: AsyncWriteExt + Unpin, B: AsRef<[u8]>>(
+    w: &mut W,
+    b: B,
+) -> io::Result<()> {
+    // write the size packet.
+    w.write_u64_le(b.as_ref().len() as u64).await?;
+
+    // write the payload
+    w.write_all(b.as_ref()).await?;
+
+    // write padding if needed
+    let padding_len = padding_len(b.as_ref().len() as u64) as usize;
+    if padding_len != 0 {
+        w.write_all(&EMPTY_BYTES[..padding_len]).await?;
+    }
+    Ok(())
+}
+
+/// Computes the number of bytes we should add to len (a length in
+/// bytes) to be aligned on 64 bits (8 bytes).
+fn padding_len(len: u64) -> u8 {
+    let aligned = len.wrapping_add(7) & !7;
+    aligned.wrapping_sub(len) as u8
+}
+
+#[cfg(test)]
+mod tests {
+    use tokio_test::{assert_ok, io::Builder};
+
+    use super::*;
+    use hex_literal::hex;
+
+    /// The maximum length of bytes packets we're willing to accept in the test
+    /// cases.
+    const MAX_LEN: usize = 1024;
+
+    #[tokio::test]
+    async fn test_read_8_bytes() {
+        let mut mock = Builder::new()
+            .read(&8u64.to_le_bytes())
+            .read(&12345678u64.to_le_bytes())
+            .build();
+
+        assert_eq!(
+            &12345678u64.to_le_bytes(),
+            read_bytes(&mut mock, 0..=MAX_LEN).await.unwrap().as_slice()
+        );
+    }
+
+    #[tokio::test]
+    async fn test_read_9_bytes() {
+        let mut mock = Builder::new()
+            .read(&9u64.to_le_bytes())
+            .read(&hex!("01020304050607080900000000000000"))
+            .build();
+
+        assert_eq!(
+            hex!("010203040506070809"),
+            read_bytes(&mut mock, 0..=MAX_LEN).await.unwrap().as_slice()
+        );
+    }
+
+    #[tokio::test]
+    async fn test_read_0_bytes() {
+        // A empty byte packet is essentially just the 0 length field.
+        // No data is read, and there's zero padding.
+        let mut mock = Builder::new().read(&0u64.to_le_bytes()).build();
+
+        assert_eq!(
+            hex!(""),
+            read_bytes(&mut mock, 0..=MAX_LEN).await.unwrap().as_slice()
+        );
+    }
+
+    #[tokio::test]
+    /// Ensure we don't read any further than the size field if the length
+    /// doesn't match the range we want to accept.
+    async fn test_read_reject_too_large() {
+        let mut mock = Builder::new().read(&100u64.to_le_bytes()).build();
+
+        read_bytes(&mut mock, 10..=10)
+            .await
+            .expect_err("expect this to fail");
+    }
+
+    #[tokio::test]
+    async fn test_write_bytes_no_padding() {
+        let input = hex!("6478696f34657661");
+        let len = input.len() as u64;
+        let mut mock = Builder::new()
+            .write(&len.to_le_bytes())
+            .write(&input)
+            .build();
+        assert_ok!(write_bytes(&mut mock, &input).await)
+    }
+    #[tokio::test]
+    async fn test_write_bytes_with_padding() {
+        let input = hex!("322e332e3137");
+        let len = input.len() as u64;
+        let mut mock = Builder::new()
+            .write(&len.to_le_bytes())
+            .write(&hex!("322e332e31370000"))
+            .build();
+        assert_ok!(write_bytes(&mut mock, &input).await)
+    }
+
+    #[tokio::test]
+    async fn test_write_string() {
+        let input = "Hello, World!";
+        let len = input.len() as u64;
+        let mut mock = Builder::new()
+            .write(&len.to_le_bytes())
+            .write(&hex!("48656c6c6f2c20576f726c6421000000"))
+            .build();
+        assert_ok!(write_bytes(&mut mock, &input).await)
+    }
+
+    #[test]
+    fn padding_len_u64_max() {
+        assert_eq!(padding_len(u64::MAX), 1);
+    }
+}
diff --git a/tvix/nix-compat/src/wire/bytes/reader/mod.rs b/tvix/nix-compat/src/wire/bytes/reader/mod.rs
new file mode 100644
index 000000000000..c0227f4e6cff
--- /dev/null
+++ b/tvix/nix-compat/src/wire/bytes/reader/mod.rs
@@ -0,0 +1,686 @@
+use std::{
+    future::Future,
+    io,
+    num::NonZeroU64,
+    ops::RangeBounds,
+    pin::Pin,
+    task::{self, ready, Poll},
+};
+use tokio::io::{AsyncBufRead, AsyncRead, AsyncReadExt, ReadBuf};
+
+use trailer::{read_trailer, ReadTrailer, Trailer};
+
+#[doc(hidden)]
+pub use self::trailer::Pad;
+pub(crate) use self::trailer::Tag;
+mod trailer;
+
+/// Reads a "bytes wire packet" from the underlying reader.
+/// The format is the same as in [crate::wire::bytes::read_bytes],
+/// however this structure provides a [AsyncRead] interface,
+/// allowing to not having to pass around the entire payload in memory.
+///
+/// It is constructed by reading a size with [BytesReader::new],
+/// and yields payload data until the end of the packet is reached.
+///
+/// It will not return the final bytes before all padding has been successfully
+/// consumed as well, but the full length of the reader must be consumed.
+///
+/// If the data is not read all the way to the end, or an error is encountered,
+/// the underlying reader is no longer usable and might return garbage.
+#[derive(Debug)]
+#[allow(private_bounds)]
+pub struct BytesReader<R, T: Tag = Pad> {
+    state: State<R, T>,
+}
+
+/// Split the `user_len` into `body_len` and `tail_len`, which are respectively
+/// the non-terminal 8-byte blocks, and the โ‰ค8 bytes of user data contained in
+/// the trailer block.
+#[inline(always)]
+fn split_user_len(user_len: NonZeroU64) -> (u64, u8) {
+    let n = user_len.get() - 1;
+    let body_len = n & !7;
+    let tail_len = (n & 7) as u8 + 1;
+    (body_len, tail_len)
+}
+
+#[derive(Debug)]
+enum State<R, T: Tag> {
+    /// Full 8-byte blocks are being read and released to the caller.
+    /// NOTE: The final 8-byte block is *always* part of the trailer.
+    Body {
+        reader: Option<R>,
+        consumed: u64,
+        /// The total length of all user data contained in both the body and trailer.
+        user_len: NonZeroU64,
+    },
+    /// The trailer is in the process of being read.
+    ReadTrailer(ReadTrailer<R, T>),
+    /// The trailer has been fully read and validated,
+    /// and data can now be released to the caller.
+    ReleaseTrailer { consumed: u8, data: Trailer },
+}
+
+impl<R> BytesReader<R>
+where
+    R: AsyncRead + Unpin,
+{
+    /// Constructs a new BytesReader, using the underlying passed reader.
+    pub async fn new<S: RangeBounds<u64>>(reader: R, allowed_size: S) -> io::Result<Self> {
+        BytesReader::new_internal(reader, allowed_size).await
+    }
+}
+
+#[allow(private_bounds)]
+impl<R, T: Tag> BytesReader<R, T>
+where
+    R: AsyncRead + Unpin,
+{
+    /// Constructs a new BytesReader, using the underlying passed reader.
+    pub(crate) async fn new_internal<S: RangeBounds<u64>>(
+        mut reader: R,
+        allowed_size: S,
+    ) -> io::Result<Self> {
+        let size = reader.read_u64_le().await?;
+
+        if !allowed_size.contains(&size) {
+            return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid size"));
+        }
+
+        Ok(Self {
+            state: match NonZeroU64::new(size) {
+                Some(size) => State::Body {
+                    reader: Some(reader),
+                    consumed: 0,
+                    user_len: size,
+                },
+                None => State::ReleaseTrailer {
+                    consumed: 0,
+                    data: read_trailer::<R, T>(reader, 0).await?,
+                },
+            },
+        })
+    }
+
+    /// Returns whether there is any remaining data to be read.
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Remaining data length, ie not including data already read.
+    ///
+    /// If the size has not been read yet, this is [None].
+    pub fn len(&self) -> u64 {
+        match self.state {
+            State::Body {
+                consumed, user_len, ..
+            } => user_len.get() - consumed,
+            State::ReadTrailer(ref fut) => fut.len() as u64,
+            State::ReleaseTrailer { consumed, ref data } => data.len() as u64 - consumed as u64,
+        }
+    }
+}
+
+#[allow(private_bounds)]
+impl<R: AsyncRead + Unpin, T: Tag> AsyncRead for BytesReader<R, T> {
+    fn poll_read(
+        mut self: Pin<&mut Self>,
+        cx: &mut task::Context,
+        buf: &mut ReadBuf,
+    ) -> Poll<io::Result<()>> {
+        let this = &mut self.state;
+
+        loop {
+            match this {
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } => {
+                    let (body_len, tail_len) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    let reader = if remaining == 0 {
+                        let reader = reader.take().unwrap();
+                        *this = State::ReadTrailer(read_trailer(reader, tail_len));
+                        continue;
+                    } else {
+                        Pin::new(reader.as_mut().unwrap())
+                    };
+
+                    let mut bytes_read = 0;
+                    ready!(with_limited(buf, remaining, |buf| {
+                        let ret = reader.poll_read(cx, buf);
+                        bytes_read = buf.initialized().len();
+                        ret
+                    }))?;
+
+                    *consumed += bytes_read as u64;
+
+                    return if bytes_read != 0 {
+                        Ok(())
+                    } else {
+                        Err(io::ErrorKind::UnexpectedEof.into())
+                    }
+                    .into();
+                }
+                State::ReadTrailer(fut) => {
+                    *this = State::ReleaseTrailer {
+                        consumed: 0,
+                        data: ready!(Pin::new(fut).poll(cx))?,
+                    };
+                }
+                State::ReleaseTrailer { consumed, data } => {
+                    let data = &data[*consumed as usize..];
+                    let data = &data[..usize::min(data.len(), buf.remaining())];
+
+                    buf.put_slice(data);
+                    *consumed += data.len() as u8;
+
+                    return Ok(()).into();
+                }
+            }
+        }
+    }
+}
+
+#[allow(private_bounds)]
+impl<R: AsyncBufRead + Unpin, T: Tag> AsyncBufRead for BytesReader<R, T> {
+    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<io::Result<&[u8]>> {
+        let this = &mut self.get_mut().state;
+
+        loop {
+            match this {
+                // This state comes *after* the following case,
+                // but we can't keep it in logical order because
+                // that would lengthen the borrow lifetime.
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } if {
+                    let (body_len, _) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    remaining == 0
+                } =>
+                {
+                    let reader = reader.take().unwrap();
+                    let (_, tail_len) = split_user_len(*user_len);
+
+                    *this = State::ReadTrailer(read_trailer(reader, tail_len));
+                }
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } => {
+                    let (body_len, _) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    let reader = Pin::new(reader.as_mut().unwrap());
+
+                    match ready!(reader.poll_fill_buf(cx))? {
+                        &[] => {
+                            return Err(io::ErrorKind::UnexpectedEof.into()).into();
+                        }
+                        mut buf => {
+                            if buf.len() as u64 > remaining {
+                                buf = &buf[..remaining as usize];
+                            }
+
+                            return Ok(buf).into();
+                        }
+                    }
+                }
+                State::ReadTrailer(fut) => {
+                    *this = State::ReleaseTrailer {
+                        consumed: 0,
+                        data: ready!(Pin::new(fut).poll(cx))?,
+                    };
+                }
+                State::ReleaseTrailer { consumed, data } => {
+                    return Ok(&data[*consumed as usize..]).into();
+                }
+            }
+        }
+    }
+
+    fn consume(mut self: Pin<&mut Self>, amt: usize) {
+        match &mut self.state {
+            State::Body {
+                reader,
+                consumed,
+                user_len,
+            } => {
+                let reader = Pin::new(reader.as_mut().unwrap());
+                let (body_len, _) = split_user_len(*user_len);
+
+                *consumed = consumed
+                    .checked_add(amt as u64)
+                    .filter(|&consumed| consumed <= body_len)
+                    .expect("consumed out of bounds");
+
+                reader.consume(amt);
+            }
+            State::ReadTrailer(_) => unreachable!(),
+            State::ReleaseTrailer { consumed, data } => {
+                *consumed = amt
+                    .checked_add(*consumed as usize)
+                    .filter(|&consumed| consumed <= data.len())
+                    .expect("consumed out of bounds") as u8;
+            }
+        }
+    }
+}
+
+/// Make a limited version of `buf`, consisting only of up to `n` bytes of the unfilled section, and call `f` with it.
+/// After `f` returns, we propagate the filled cursor advancement back to `buf`.
+fn with_limited<R>(buf: &mut ReadBuf, n: u64, f: impl FnOnce(&mut ReadBuf) -> R) -> R {
+    let mut nbuf = buf.take(n.try_into().unwrap_or(usize::MAX));
+    let ptr = nbuf.initialized().as_ptr();
+    let ret = f(&mut nbuf);
+
+    // SAFETY: `ReadBuf::take` only returns the *unfilled* section of `buf`,
+    // so anything filled is new, initialized data.
+    //
+    // We verify that `nbuf` still points to the same buffer,
+    // so we're sure it hasn't been swapped out.
+    unsafe {
+        // ensure our buffer hasn't been swapped out
+        assert_eq!(nbuf.initialized().as_ptr(), ptr);
+
+        let n = nbuf.filled().len();
+        buf.assume_init(n);
+        buf.advance(n);
+    }
+
+    ret
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use crate::wire::bytes::{padding_len, write_bytes};
+    use hex_literal::hex;
+    use lazy_static::lazy_static;
+    use rstest::rstest;
+    use tokio::io::{AsyncReadExt, BufReader};
+    use tokio_test::io::Builder;
+
+    use super::*;
+
+    /// The maximum length of bytes packets we're willing to accept in the test
+    /// cases.
+    const MAX_LEN: u64 = 1024;
+
+    lazy_static! {
+        pub static ref LARGE_PAYLOAD: Vec<u8> = (0..255).collect::<Vec<u8>>().repeat(4 * 1024);
+    }
+
+    /// Helper function, calling the (simpler) write_bytes with the payload.
+    /// We use this to create data we want to read from the wire.
+    async fn produce_packet_bytes(payload: &[u8]) -> Vec<u8> {
+        let mut exp = vec![];
+        write_bytes(&mut exp, payload).await.unwrap();
+        exp
+    }
+
+    /// Read bytes packets of various length, and ensure read_to_end returns the
+    /// expected payload.
+    #[rstest]
+    #[case::empty(&[])] // empty bytes packet
+    #[case::size_1b(&[0xff])] // 1 bytes payload
+    #[case::size_8b(&hex!("0001020304050607"))] // 8 bytes payload (no padding)
+    #[case::size_9b(&hex!("000102030405060708"))] // 9 bytes payload (7 bytes padding)
+    #[case::size_1m(LARGE_PAYLOAD.as_slice())] // larger bytes packet
+    #[tokio::test]
+    async fn read_payload_correct(#[case] payload: &[u8]) {
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+        let mut buf = Vec::new();
+        r.read_to_end(&mut buf).await.expect("must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+
+    /// Read bytes packets of various length, and ensure copy_buf reads the
+    /// expected payload.
+    #[rstest]
+    #[case::empty(&[])] // empty bytes packet
+    #[case::size_1b(&[0xff])] // 1 bytes payload
+    #[case::size_8b(&hex!("0001020304050607"))] // 8 bytes payload (no padding)
+    #[case::size_9b(&hex!("000102030405060708"))] // 9 bytes payload (7 bytes padding)
+    #[case::size_1m(LARGE_PAYLOAD.as_slice())] // larger bytes packet
+    #[tokio::test]
+    async fn read_payload_correct_readbuf(#[case] payload: &[u8]) {
+        let mut mock = BufReader::new(
+            Builder::new()
+                .read(&produce_packet_bytes(payload).await)
+                .build(),
+        );
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+
+        let mut buf = Vec::new();
+        tokio::io::copy_buf(&mut r, &mut buf)
+            .await
+            .expect("copy_buf must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+
+    /// Fail if the bytes packet is larger than allowed
+    #[tokio::test]
+    async fn read_bigger_than_allowed_fail() {
+        let payload = LARGE_PAYLOAD.as_slice();
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[0..8]) // We stop reading after the size packet
+            .build();
+
+        assert_eq!(
+            BytesReader::new(&mut mock, ..2048)
+                .await
+                .unwrap_err()
+                .kind(),
+            io::ErrorKind::InvalidData
+        );
+    }
+
+    /// Fail if the bytes packet is smaller than allowed
+    #[tokio::test]
+    async fn read_smaller_than_allowed_fail() {
+        let payload = &[0x00, 0x01, 0x02];
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[0..8]) // We stop reading after the size packet
+            .build();
+
+        assert_eq!(
+            BytesReader::new(&mut mock, 1024..2048)
+                .await
+                .unwrap_err()
+                .kind(),
+            io::ErrorKind::InvalidData
+        );
+    }
+
+    /// Read the trailer immediately if there is no payload.
+    #[cfg(feature = "async")]
+    #[tokio::test]
+    async fn read_trailer_immediately() {
+        use crate::nar::wire::PadPar;
+
+        let mut mock = Builder::new()
+            .read(&[0; 8])
+            .read(&PadPar::PATTERN[8..])
+            .build();
+
+        BytesReader::<_, PadPar>::new_internal(&mut mock, ..)
+            .await
+            .unwrap();
+
+        // The mock reader will panic if dropped without reading all data.
+    }
+
+    /// Read the trailer even if we only read the exact payload size.
+    #[cfg(feature = "async")]
+    #[tokio::test]
+    async fn read_exact_trailer() {
+        use crate::nar::wire::PadPar;
+
+        let mut mock = Builder::new()
+            .read(&16u64.to_le_bytes())
+            .read(&[0x55; 16])
+            .read(&PadPar::PATTERN[8..])
+            .build();
+
+        let mut reader = BytesReader::<_, PadPar>::new_internal(&mut mock, ..)
+            .await
+            .unwrap();
+
+        let mut buf = [0; 16];
+        reader.read_exact(&mut buf).await.unwrap();
+        assert_eq!(buf, [0x55; 16]);
+
+        // The mock reader will panic if dropped without reading all data.
+    }
+
+    /// Fail if the padding is not all zeroes
+    #[tokio::test]
+    async fn read_fail_if_nonzero_padding() {
+        let payload = &[0x00, 0x01, 0x02];
+        let mut packet_bytes = produce_packet_bytes(payload).await;
+        // Flip some bits in the padding
+        packet_bytes[12] = 0xff;
+        let mut mock = Builder::new().read(&packet_bytes).build(); // We stop reading after the faulty bit
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        r.read_to_end(&mut buf).await.expect_err("must fail");
+    }
+
+    /// Start a 9 bytes payload packet, but have the underlying reader return
+    /// EOF in the middle of the size packet (after 4 bytes).
+    /// We should get an unexpected EOF error, already when trying to read the
+    /// first byte (of payload)
+    #[tokio::test]
+    async fn read_9b_eof_during_size() {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..4])
+            .build();
+
+        assert_eq!(
+            BytesReader::new(&mut mock, ..MAX_LEN)
+                .await
+                .expect_err("must fail")
+                .kind(),
+            io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but have the underlying reader return
+    /// EOF in the middle of the payload (4 bytes into the payload).
+    /// We should get an unexpected EOF error, after reading the first 4 bytes
+    /// (successfully).
+    #[tokio::test]
+    async fn read_9b_eof_during_payload() {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..8 + 4])
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = [0; 9];
+
+        r.read_exact(&mut buf[..4]).await.expect("must succeed");
+
+        assert_eq!(
+            r.read_exact(&mut buf[4..=4])
+                .await
+                .expect_err("must fail")
+                .kind(),
+            std::io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but don't supply the necessary padding.
+    /// This is expected to always fail before returning the final data.
+    #[rstest]
+    #[case::before_padding(8 + 9)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[case::after_padding(8 + 9 + padding_len(9) as usize - 1)]
+    #[tokio::test]
+    async fn read_9b_eof_after_payload(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+
+        // read_exact of the payload *body* will succeed, but a subsequent read will
+        // return UnexpectedEof error.
+        assert_eq!(r.read_exact(&mut [0; 8]).await.unwrap(), 8);
+        assert_eq!(
+            r.read_exact(&mut [0]).await.unwrap_err().kind(),
+            std::io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but return an error after a certain position.
+    /// Ensure that error is propagated.
+    #[rstest]
+    #[case::during_size(4)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn propagate_error_from_reader(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+
+        // Either length reading or data reading can fail, depending on which test case we're in.
+        let err: io::Error = async {
+            let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await?;
+            let mut buf = Vec::new();
+
+            r.read_to_end(&mut buf).await?;
+
+            Ok(())
+        }
+        .await
+        .expect_err("must fail");
+
+        assert_eq!(
+            err.kind(),
+            std::io::ErrorKind::Other,
+            "error kind must match"
+        );
+
+        assert_eq!(
+            err.into_inner().unwrap().to_string(),
+            "foo",
+            "error payload must contain foo"
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but return an error after a certain position.
+    /// Ensure that error is propagated (AsyncReadBuf case)
+    #[rstest]
+    #[case::during_size(4)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn propagate_error_from_reader_buffered(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+        let mut mock = BufReader::new(mock);
+
+        // Either length reading or data reading can fail, depending on which test case we're in.
+        let err: io::Error = async {
+            let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await?;
+            let mut buf = Vec::new();
+
+            tokio::io::copy_buf(&mut r, &mut buf).await?;
+
+            Ok(())
+        }
+        .await
+        .expect_err("must fail");
+
+        assert_eq!(
+            err.kind(),
+            std::io::ErrorKind::Other,
+            "error kind must match"
+        );
+
+        assert_eq!(
+            err.into_inner().unwrap().to_string(),
+            "foo",
+            "error payload must contain foo"
+        );
+    }
+
+    /// If there's an error right after the padding, we don't propagate it, as
+    /// we're done reading. We just return EOF.
+    #[tokio::test]
+    async fn no_error_after_eof() {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        r.read_to_end(&mut buf).await.expect("must succeed");
+        assert_eq!(buf.as_slice(), payload);
+    }
+
+    /// If there's an error right after the padding, we don't propagate it, as
+    /// we're done reading. We just return EOF.
+    #[tokio::test]
+    async fn no_error_after_eof_buffered() {
+        let payload = &hex!("FF0102030405060708");
+        let mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+        let mut mock = BufReader::new(mock);
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        tokio::io::copy_buf(&mut r, &mut buf)
+            .await
+            .expect("must succeed");
+        assert_eq!(buf.as_slice(), payload);
+    }
+
+    /// Introduce various stalls in various places of the packet, to ensure we
+    /// handle these cases properly, too.
+    #[rstest]
+    #[case::beginning(0)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn read_payload_correct_pending(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .wait(Duration::from_nanos(0))
+            .read(&produce_packet_bytes(payload).await[offset..])
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+        let mut buf = Vec::new();
+        r.read_to_end(&mut buf).await.expect("must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+}
diff --git a/tvix/nix-compat/src/wire/bytes/reader/trailer.rs b/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
new file mode 100644
index 000000000000..3a5bb75e7103
--- /dev/null
+++ b/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
@@ -0,0 +1,197 @@
+use std::{
+    fmt::Debug,
+    future::Future,
+    marker::PhantomData,
+    ops::Deref,
+    pin::Pin,
+    task::{self, ready, Poll},
+};
+
+use tokio::io::{self, AsyncRead, ReadBuf};
+
+/// Trailer represents up to 8 bytes of data read as part of the trailer block(s)
+#[derive(Debug)]
+pub(crate) struct Trailer {
+    data_len: u8,
+    buf: [u8; 8],
+}
+
+impl Deref for Trailer {
+    type Target = [u8];
+
+    fn deref(&self) -> &Self::Target {
+        &self.buf[..self.data_len as usize]
+    }
+}
+
+/// Tag defines a "trailer tag": specific, fixed bytes that must follow wire data.
+pub(crate) trait Tag {
+    /// The expected suffix
+    ///
+    /// The first 8 bytes may be ignored, and it must be an 8-byte aligned size.
+    const PATTERN: &'static [u8];
+
+    /// Suitably sized buffer for reading [Self::PATTERN]
+    ///
+    /// HACK: This is a workaround for const generics limitations.
+    type Buf: AsRef<[u8]> + AsMut<[u8]> + Debug + Unpin;
+
+    /// Make an instance of [Self::Buf]
+    fn make_buf() -> Self::Buf;
+}
+
+#[derive(Debug)]
+pub enum Pad {}
+
+impl Tag for Pad {
+    const PATTERN: &'static [u8] = &[0; 8];
+
+    type Buf = [u8; 8];
+
+    fn make_buf() -> Self::Buf {
+        [0; 8]
+    }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReadTrailer<R, T: Tag> {
+    reader: R,
+    data_len: u8,
+    filled: u8,
+    buf: T::Buf,
+    _phantom: PhantomData<fn(T) -> T>,
+}
+
+/// read_trailer returns a [Future] that reads a trailer with a given [Tag] from `reader`
+pub(crate) fn read_trailer<R: AsyncRead + Unpin, T: Tag>(
+    reader: R,
+    data_len: u8,
+) -> ReadTrailer<R, T> {
+    assert!(data_len <= 8, "payload in trailer must be <= 8 bytes");
+
+    let buf = T::make_buf();
+    assert_eq!(buf.as_ref().len(), T::PATTERN.len());
+    assert_eq!(T::PATTERN.len() % 8, 0);
+
+    ReadTrailer {
+        reader,
+        data_len,
+        filled: if data_len != 0 { 0 } else { 8 },
+        buf,
+        _phantom: PhantomData,
+    }
+}
+
+impl<R, T: Tag> ReadTrailer<R, T> {
+    pub fn len(&self) -> u8 {
+        self.data_len
+    }
+}
+
+impl<R: AsyncRead + Unpin, T: Tag> Future for ReadTrailer<R, T> {
+    type Output = io::Result<Trailer>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> {
+        let this = &mut *self;
+
+        loop {
+            if this.filled >= this.data_len {
+                let check_range = || this.data_len as usize..this.filled as usize;
+
+                if this.buf.as_ref()[check_range()] != T::PATTERN[check_range()] {
+                    return Err(io::Error::new(
+                        io::ErrorKind::InvalidData,
+                        "invalid trailer",
+                    ))
+                    .into();
+                }
+            }
+
+            if this.filled as usize == T::PATTERN.len() {
+                let mut buf = [0; 8];
+                buf.copy_from_slice(&this.buf.as_ref()[..8]);
+
+                return Ok(Trailer {
+                    data_len: this.data_len,
+                    buf,
+                })
+                .into();
+            }
+
+            let mut buf = ReadBuf::new(this.buf.as_mut());
+            buf.advance(this.filled as usize);
+
+            ready!(Pin::new(&mut this.reader).poll_read(cx, &mut buf))?;
+
+            this.filled = {
+                let filled = buf.filled().len() as u8;
+
+                if filled == this.filled {
+                    return Err(io::ErrorKind::UnexpectedEof.into()).into();
+                }
+
+                filled
+            };
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use super::*;
+
+    #[tokio::test]
+    async fn unexpected_eof() {
+        let reader = tokio_test::io::Builder::new()
+            .read(&[0xed])
+            .wait(Duration::ZERO)
+            .read(&[0xef, 0x00])
+            .build();
+
+        assert_eq!(
+            read_trailer::<_, Pad>(reader, 2).await.unwrap_err().kind(),
+            io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    #[tokio::test]
+    async fn invalid_padding() {
+        let reader = tokio_test::io::Builder::new()
+            .read(&[0xed])
+            .wait(Duration::ZERO)
+            .read(&[0xef, 0x01, 0x00])
+            .wait(Duration::ZERO)
+            .build();
+
+        assert_eq!(
+            read_trailer::<_, Pad>(reader, 2).await.unwrap_err().kind(),
+            io::ErrorKind::InvalidData
+        );
+    }
+
+    #[tokio::test]
+    async fn success() {
+        let reader = tokio_test::io::Builder::new()
+            .read(&[0xed])
+            .wait(Duration::ZERO)
+            .read(&[0xef, 0x00])
+            .wait(Duration::ZERO)
+            .read(&[0x00, 0x00, 0x00, 0x00, 0x00])
+            .build();
+
+        assert_eq!(
+            &*read_trailer::<_, Pad>(reader, 2).await.unwrap(),
+            &[0xed, 0xef]
+        );
+    }
+
+    #[tokio::test]
+    async fn no_padding() {
+        assert!(read_trailer::<_, Pad>(io::empty(), 0)
+            .await
+            .unwrap()
+            .is_empty());
+    }
+}
diff --git a/tvix/nix-compat/src/wire/bytes/writer.rs b/tvix/nix-compat/src/wire/bytes/writer.rs
new file mode 100644
index 000000000000..f5632771e961
--- /dev/null
+++ b/tvix/nix-compat/src/wire/bytes/writer.rs
@@ -0,0 +1,538 @@
+use pin_project_lite::pin_project;
+use std::task::{ready, Poll};
+
+use tokio::io::AsyncWrite;
+
+use super::{padding_len, EMPTY_BYTES, LEN_SIZE};
+
+pin_project! {
+    /// Writes a "bytes wire packet" to the underlying writer.
+    /// The format is the same as in [crate::wire::bytes::write_bytes],
+    /// however this structure provides a [AsyncWrite] interface,
+    /// allowing to not having to pass around the entire payload in memory.
+    ///
+    /// It internally takes care of writing (non-payload) framing (size and
+    /// padding).
+    ///
+    /// During construction, the expected payload size needs to be provided.
+    ///
+    /// After writing the payload to it, the user MUST call flush (or shutdown),
+    /// which will validate the written payload size to match, and write the
+    /// necessary padding.
+    ///
+    /// In case flush is not called at the end, invalid data might be sent
+    /// silently.
+    ///
+    /// The underlying writer returning `Ok(0)` is considered an EOF situation,
+    /// which is stronger than the "typically means the underlying object is no
+    /// longer able to accept bytes" interpretation from the docs. If such a
+    /// situation occurs, an error is returned.
+    ///
+    /// The struct holds three fields, the underlying writer, the (expected)
+    /// payload length, and an enum, tracking the state.
+    pub struct BytesWriter<W>
+    where
+        W: AsyncWrite,
+    {
+        #[pin]
+        inner: W,
+        payload_len: u64,
+        state: BytesPacketPosition,
+    }
+}
+
+/// Models the position inside a "bytes wire packet" that the writer is in.
+/// It can be in three different stages, inside size, payload or padding fields.
+/// The number tracks the number of bytes written inside the specific field.
+/// There shall be no ambiguous states, at the end of a stage we immediately
+/// move to the beginning of the next one:
+/// - Size(LEN_SIZE) must be expressed as Payload(0)
+/// - Payload(self.payload_len) must be expressed as Padding(0)
+///
+/// Padding(padding_len) means we're at the end of the bytes wire packet.
+#[derive(Clone, Debug, PartialEq, Eq)]
+enum BytesPacketPosition {
+    Size(usize),
+    Payload(u64),
+    Padding(usize),
+}
+
+impl<W> BytesWriter<W>
+where
+    W: AsyncWrite,
+{
+    /// Constructs a new BytesWriter, using the underlying passed writer.
+    pub fn new(w: W, payload_len: u64) -> Self {
+        Self {
+            inner: w,
+            payload_len,
+            state: BytesPacketPosition::Size(0),
+        }
+    }
+}
+
+/// Returns an error if the passed usize is 0.
+#[inline]
+fn ensure_nonzero_bytes_written(bytes_written: usize) -> Result<usize, std::io::Error> {
+    if bytes_written == 0 {
+        Err(std::io::Error::new(
+            std::io::ErrorKind::WriteZero,
+            "underlying writer accepted 0 bytes",
+        ))
+    } else {
+        Ok(bytes_written)
+    }
+}
+
+impl<W> AsyncWrite for BytesWriter<W>
+where
+    W: AsyncWrite,
+{
+    fn poll_write(
+        self: std::pin::Pin<&mut Self>,
+        cx: &mut std::task::Context<'_>,
+        buf: &[u8],
+    ) -> Poll<Result<usize, std::io::Error>> {
+        // Use a loop, so we can deal with (multiple) state transitions.
+        let mut this = self.project();
+
+        loop {
+            match *this.state {
+                BytesPacketPosition::Size(LEN_SIZE) => unreachable!(),
+                BytesPacketPosition::Size(pos) => {
+                    let size_field = &this.payload_len.to_le_bytes();
+
+                    let bytes_written = ensure_nonzero_bytes_written(ready!(this
+                        .inner
+                        .as_mut()
+                        .poll_write(cx, &size_field[pos..]))?)?;
+
+                    let new_pos = pos + bytes_written;
+                    if new_pos == LEN_SIZE {
+                        *this.state = BytesPacketPosition::Payload(0);
+                    } else {
+                        *this.state = BytesPacketPosition::Size(new_pos);
+                    }
+                }
+                BytesPacketPosition::Payload(pos) => {
+                    // Ensure we still have space for more payload
+                    if pos + (buf.len() as u64) > *this.payload_len {
+                        return Poll::Ready(Err(std::io::Error::new(
+                            std::io::ErrorKind::InvalidData,
+                            "tried to write excess bytes",
+                        )));
+                    }
+                    let bytes_written = ready!(this.inner.as_mut().poll_write(cx, buf))?;
+                    ensure_nonzero_bytes_written(bytes_written)?;
+                    let new_pos = pos + (bytes_written as u64);
+                    if new_pos == *this.payload_len {
+                        *this.state = BytesPacketPosition::Padding(0)
+                    } else {
+                        *this.state = BytesPacketPosition::Payload(new_pos)
+                    }
+
+                    return Poll::Ready(Ok(bytes_written));
+                }
+                // If we're already in padding state, there should be no more payload left to write!
+                BytesPacketPosition::Padding(_pos) => {
+                    return Poll::Ready(Err(std::io::Error::new(
+                        std::io::ErrorKind::InvalidData,
+                        "tried to write excess bytes",
+                    )))
+                }
+            }
+        }
+    }
+
+    fn poll_flush(
+        self: std::pin::Pin<&mut Self>,
+        cx: &mut std::task::Context<'_>,
+    ) -> Poll<Result<(), std::io::Error>> {
+        let mut this = self.project();
+
+        loop {
+            match *this.state {
+                BytesPacketPosition::Size(LEN_SIZE) => unreachable!(),
+                BytesPacketPosition::Size(pos) => {
+                    // More bytes to write in the size field
+                    let size_field = &this.payload_len.to_le_bytes()[..];
+                    let bytes_written = ensure_nonzero_bytes_written(ready!(this
+                        .inner
+                        .as_mut()
+                        .poll_write(cx, &size_field[pos..]))?)?;
+                    let new_pos = pos + bytes_written;
+                    if new_pos == LEN_SIZE {
+                        // Size field written, now ready to receive payload
+                        *this.state = BytesPacketPosition::Payload(0);
+                    } else {
+                        *this.state = BytesPacketPosition::Size(new_pos);
+                    }
+                }
+                BytesPacketPosition::Payload(_pos) => {
+                    // If we're at position 0 and want to write 0 bytes of payload
+                    // in total, we can transition to padding.
+                    // Otherwise, break, as we're expecting more payload to
+                    // be written.
+                    if *this.payload_len == 0 {
+                        *this.state = BytesPacketPosition::Padding(0);
+                    } else {
+                        break;
+                    }
+                }
+                BytesPacketPosition::Padding(pos) => {
+                    // Write remaining padding, if there is padding to write.
+                    let total_padding_len = padding_len(*this.payload_len) as usize;
+
+                    if pos != total_padding_len {
+                        let bytes_written = ensure_nonzero_bytes_written(ready!(this
+                            .inner
+                            .as_mut()
+                            .poll_write(cx, &EMPTY_BYTES[pos..total_padding_len]))?)?;
+                        *this.state = BytesPacketPosition::Padding(pos + bytes_written);
+                    } else {
+                        // everything written, break
+                        break;
+                    }
+                }
+            }
+        }
+        // Flush the underlying writer.
+        this.inner.as_mut().poll_flush(cx)
+    }
+
+    fn poll_shutdown(
+        mut self: std::pin::Pin<&mut Self>,
+        cx: &mut std::task::Context<'_>,
+    ) -> Poll<Result<(), std::io::Error>> {
+        // Call flush.
+        ready!(self.as_mut().poll_flush(cx))?;
+
+        let this = self.project();
+
+        // After a flush, being inside the padding state, and at the end of the padding
+        // is the only way to prevent a dirty shutdown.
+        if let BytesPacketPosition::Padding(pos) = *this.state {
+            let padding_len = padding_len(*this.payload_len) as usize;
+            if padding_len == pos {
+                // Shutdown the underlying writer
+                return this.inner.poll_shutdown(cx);
+            }
+        }
+
+        // Shutdown the underlying writer, bubbling up any errors.
+        ready!(this.inner.poll_shutdown(cx))?;
+
+        // return an error about unclean shutdown
+        Poll::Ready(Err(std::io::Error::new(
+            std::io::ErrorKind::BrokenPipe,
+            "unclean shutdown",
+        )))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use crate::wire::bytes::write_bytes;
+    use hex_literal::hex;
+    use lazy_static::lazy_static;
+    use tokio::io::AsyncWriteExt;
+    use tokio_test::{assert_err, assert_ok, io::Builder};
+
+    use super::*;
+
+    lazy_static! {
+        pub static ref LARGE_PAYLOAD: Vec<u8> = (0..255).collect::<Vec<u8>>().repeat(4 * 1024);
+    }
+
+    /// Helper function, calling the (simpler) write_bytes with the payload.
+    /// We use this to create data we want to see on the wire.
+    async fn produce_exp_bytes(payload: &[u8]) -> Vec<u8> {
+        let mut exp = vec![];
+        write_bytes(&mut exp, payload).await.unwrap();
+        exp
+    }
+
+    /// Write an empty bytes packet.
+    #[tokio::test]
+    async fn write_empty() {
+        let payload = &[];
+        let mut mock = Builder::new()
+            .write(&produce_exp_bytes(payload).await)
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, 0);
+        assert_ok!(w.write_all(&[]).await, "write all data");
+        assert_ok!(w.flush().await, "flush");
+    }
+
+    /// Write an empty bytes packet, not calling write.
+    #[tokio::test]
+    async fn write_empty_only_flush() {
+        let payload = &[];
+        let mut mock = Builder::new()
+            .write(&produce_exp_bytes(payload).await)
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, 0);
+        assert_ok!(w.flush().await, "flush");
+    }
+
+    /// Write an empty bytes packet, not calling write or flush, only shutdown.
+    #[tokio::test]
+    async fn write_empty_only_shutdown() {
+        let payload = &[];
+        let mut mock = Builder::new()
+            .write(&produce_exp_bytes(payload).await)
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, 0);
+        assert_ok!(w.shutdown().await, "shutdown");
+    }
+
+    /// Write a 1 bytes packet
+    #[tokio::test]
+    async fn write_1b() {
+        let payload = &[0xff];
+
+        let mut mock = Builder::new()
+            .write(&produce_exp_bytes(payload).await)
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+        assert_ok!(w.write_all(payload).await);
+        assert_ok!(w.flush().await, "flush");
+    }
+
+    /// Write a 8 bytes payload (no padding)
+    #[tokio::test]
+    async fn write_8b() {
+        let payload = &hex!("0001020304050607");
+
+        let mut mock = Builder::new()
+            .write(&produce_exp_bytes(payload).await)
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+        assert_ok!(w.write_all(payload).await);
+        assert_ok!(w.flush().await, "flush");
+    }
+
+    /// Write a 9 bytes payload (7 bytes padding)
+    #[tokio::test]
+    async fn write_9b() {
+        let payload = &hex!("000102030405060708");
+
+        let mut mock = Builder::new()
+            .write(&produce_exp_bytes(payload).await)
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+        assert_ok!(w.write_all(payload).await);
+        assert_ok!(w.flush().await, "flush");
+    }
+
+    /// Write a 9 bytes packet very granularly, with a lot of flushing in between,
+    /// and a shutdown at the end.
+    #[tokio::test]
+    async fn write_9b_flush() {
+        let payload = &hex!("000102030405060708");
+        let exp_bytes = produce_exp_bytes(payload).await;
+
+        let mut mock = Builder::new().write(&exp_bytes).build();
+
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+        assert_ok!(w.flush().await);
+
+        assert_ok!(w.write_all(&payload[..4]).await);
+        assert_ok!(w.flush().await);
+
+        // empty write, cause why not
+        assert_ok!(w.write_all(&[]).await);
+        assert_ok!(w.flush().await);
+
+        assert_ok!(w.write_all(&payload[4..]).await);
+        assert_ok!(w.flush().await);
+        assert_ok!(w.shutdown().await);
+    }
+
+    /// Write a 9 bytes packet, but cause the sink to only accept half of the
+    /// padding, ensuring we correctly write (only) the rest of the padding later.
+    /// We write another 2 bytes of "bait", where a faulty implementation (pre
+    /// cl/11384) would put too many null bytes.
+    #[tokio::test]
+    async fn write_9b_write_padding_2steps() {
+        let payload = &hex!("000102030405060708");
+        let exp_bytes = produce_exp_bytes(payload).await;
+
+        let mut mock = Builder::new()
+            .write(&exp_bytes[0..8]) // size
+            .write(&exp_bytes[8..17]) // payload
+            .write(&exp_bytes[17..19]) // padding (2 of 7 bytes)
+            // insert a wait to prevent Mock from merging the two writes into one
+            .wait(Duration::from_nanos(1))
+            .write(&hex!("0000000000ffff")) // padding (5 of 7 bytes, plus 2 bytes of "bait")
+            .build();
+
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+        assert_ok!(w.write_all(&payload[..]).await);
+        assert_ok!(w.flush().await);
+        // Write bait
+        assert_ok!(mock.write_all(&hex!("ffff")).await);
+    }
+
+    /// Write a larger bytes packet
+    #[tokio::test]
+    async fn write_1m() {
+        let payload = LARGE_PAYLOAD.as_slice();
+        let exp_bytes = produce_exp_bytes(payload).await;
+
+        let mut mock = Builder::new().write(&exp_bytes).build();
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+
+        assert_ok!(w.write_all(payload).await);
+        assert_ok!(w.flush().await, "flush");
+    }
+
+    /// Not calling flush at the end, but shutdown is also ok if we wrote all
+    /// bytes we promised to write (as shutdown implies flush)
+    #[tokio::test]
+    async fn write_shutdown_without_flush_end() {
+        let payload = &[0xf0, 0xff];
+        let exp_bytes = produce_exp_bytes(payload).await;
+
+        let mut mock = Builder::new().write(&exp_bytes).build();
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+
+        // call flush to write the size field
+        assert_ok!(w.flush().await);
+
+        // write payload
+        assert_ok!(w.write_all(payload).await);
+
+        // call shutdown
+        assert_ok!(w.shutdown().await);
+    }
+
+    /// Writing more bytes than previously signalled should fail.
+    #[tokio::test]
+    async fn write_more_than_signalled_fail() {
+        let mut buf = Vec::new();
+        let mut w = BytesWriter::new(&mut buf, 2);
+
+        assert_err!(w.write_all(&hex!("000102")).await);
+    }
+    /// Writing more bytes than previously signalled, but in two parts
+    #[tokio::test]
+    async fn write_more_than_signalled_split_fail() {
+        let mut buf = Vec::new();
+        let mut w = BytesWriter::new(&mut buf, 2);
+
+        // write two bytes
+        assert_ok!(w.write_all(&hex!("0001")).await);
+
+        // write the excess byte.
+        assert_err!(w.write_all(&hex!("02")).await);
+    }
+
+    /// Writing more bytes than previously signalled, but flushing after the
+    /// signalled amount should fail.
+    #[tokio::test]
+    async fn write_more_than_signalled_flush_fail() {
+        let mut buf = Vec::new();
+        let mut w = BytesWriter::new(&mut buf, 2);
+
+        // write two bytes, then flush
+        assert_ok!(w.write_all(&hex!("0001")).await);
+        assert_ok!(w.flush().await);
+
+        // write the excess byte.
+        assert_err!(w.write_all(&hex!("02")).await);
+    }
+
+    /// Calling shutdown while not having written all bytes that were promised
+    /// returns an error.
+    /// Note there's still cases of silent corruption if the user doesn't call
+    /// shutdown explicitly (only drops).
+    #[tokio::test]
+    async fn premature_shutdown() {
+        let payload = &[0xf0, 0xff];
+        let mut buf = Vec::new();
+        let mut w = BytesWriter::new(&mut buf, payload.len() as u64);
+
+        // call flush to write the size field
+        assert_ok!(w.flush().await);
+
+        // write half of the payload (!)
+        assert_ok!(w.write_all(&payload[0..1]).await);
+
+        // call shutdown, ensure it fails
+        assert_err!(w.shutdown().await);
+    }
+
+    /// Write to a Writer that fails to write during the size packet (after 4 bytes).
+    /// Ensure this error gets propagated on the first call to write.
+    #[tokio::test]
+    async fn inner_writer_fail_during_size_firstwrite() {
+        let payload = &[0xf0];
+
+        let mut mock = Builder::new()
+            .write(&1u32.to_le_bytes())
+            .write_error(std::io::Error::new(std::io::ErrorKind::Other, "๐Ÿฟ"))
+            .build();
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+
+        assert_err!(w.write_all(payload).await);
+    }
+
+    /// Write to a Writer that fails to write during the size packet (after 4 bytes).
+    /// Ensure this error gets propagated during an initial flush
+    #[tokio::test]
+    async fn inner_writer_fail_during_size_initial_flush() {
+        let payload = &[0xf0];
+
+        let mut mock = Builder::new()
+            .write(&1u32.to_le_bytes())
+            .write_error(std::io::Error::new(std::io::ErrorKind::Other, "๐Ÿฟ"))
+            .build();
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+
+        assert_err!(w.flush().await);
+    }
+
+    /// Write to a writer that fails to write during the payload (after 9 bytes).
+    /// Ensure this error gets propagated when we're writing this byte.
+    #[tokio::test]
+    async fn inner_writer_fail_during_write() {
+        let payload = &hex!("f0ff");
+
+        let mut mock = Builder::new()
+            .write(&2u64.to_le_bytes())
+            .write(&hex!("f0"))
+            .write_error(std::io::Error::new(std::io::ErrorKind::Other, "๐Ÿฟ"))
+            .build();
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+
+        assert_ok!(w.write(&hex!("f0")).await);
+        assert_err!(w.write(&hex!("ff")).await);
+    }
+
+    /// Write to a writer that fails to write during the padding (after 10 bytes).
+    /// Ensure this error gets propagated during a flush.
+    #[tokio::test]
+    async fn inner_writer_fail_during_padding_flush() {
+        let payload = &hex!("f0");
+
+        let mut mock = Builder::new()
+            .write(&1u64.to_le_bytes())
+            .write(&hex!("f0"))
+            .write(&hex!("00"))
+            .write_error(std::io::Error::new(std::io::ErrorKind::Other, "๐Ÿฟ"))
+            .build();
+        let mut w = BytesWriter::new(&mut mock, payload.len() as u64);
+
+        assert_ok!(w.write(&hex!("f0")).await);
+        assert_err!(w.flush().await);
+    }
+}
diff --git a/tvix/nix-compat/src/wire/mod.rs b/tvix/nix-compat/src/wire/mod.rs
new file mode 100644
index 000000000000..a197e3a1f451
--- /dev/null
+++ b/tvix/nix-compat/src/wire/mod.rs
@@ -0,0 +1,5 @@
+//! Module parsing and emitting the wire format used by Nix, both in the
+//! nix-daemon protocol as well as in the NAR format.
+
+mod bytes;
+pub use bytes::*;