about summary refs log tree commit diff
path: root/tvix
diff options
context:
space:
mode:
Diffstat (limited to 'tvix')
-rw-r--r--tvix/Cargo.lock666
-rw-r--r--tvix/Cargo.nix1737
-rw-r--r--tvix/Cargo.toml5
-rw-r--r--tvix/README.md3
-rw-r--r--tvix/boot/README.md2
-rw-r--r--tvix/boot/default.nix6
-rw-r--r--tvix/boot/tests/default.nix9
-rw-r--r--tvix/build/Cargo.toml9
-rw-r--r--tvix/build/src/bin/tvix-build.rs24
-rw-r--r--tvix/build/src/buildservice/from_addr.rs35
-rw-r--r--tvix/build/src/proto/mod.rs37
-rw-r--r--tvix/castore/Cargo.toml11
-rw-r--r--tvix/castore/default.nix17
-rw-r--r--tvix/castore/src/blobservice/from_addr.rs43
-rw-r--r--tvix/castore/src/blobservice/memory.rs28
-rw-r--r--tvix/castore/src/blobservice/mod.rs2
-rw-r--r--tvix/castore/src/blobservice/sled.rs150
-rw-r--r--tvix/castore/src/blobservice/tests/mod.rs1
-rw-r--r--tvix/castore/src/directoryservice/bigtable.rs4
-rw-r--r--tvix/castore/src/directoryservice/closure_validator.rs58
-rw-r--r--tvix/castore/src/directoryservice/from_addr.rs23
-rw-r--r--tvix/castore/src/directoryservice/grpc.rs2
-rw-r--r--tvix/castore/src/directoryservice/memory.rs9
-rw-r--r--tvix/castore/src/directoryservice/mod.rs6
-rw-r--r--tvix/castore/src/directoryservice/object_store.rs261
-rw-r--r--tvix/castore/src/directoryservice/sled.rs101
-rw-r--r--tvix/castore/src/directoryservice/tests/mod.rs3
-rw-r--r--tvix/castore/src/directoryservice/traverse.rs153
-rw-r--r--tvix/castore/src/directoryservice/utils.rs91
-rw-r--r--tvix/castore/src/errors.rs7
-rw-r--r--tvix/castore/src/fs/inodes.rs14
-rw-r--r--tvix/castore/src/fs/virtiofs.rs1
-rw-r--r--tvix/castore/src/import.rs361
-rw-r--r--tvix/castore/src/import/archive.rs458
-rw-r--r--tvix/castore/src/import/error.rs20
-rw-r--r--tvix/castore/src/import/fs.rs185
-rw-r--r--tvix/castore/src/import/mod.rs340
-rw-r--r--tvix/castore/src/lib.rs3
-rw-r--r--tvix/castore/src/path.rs446
-rw-r--r--tvix/castore/src/proto/grpc_directoryservice_wrapper.rs83
-rw-r--r--tvix/castore/src/proto/mod.rs88
-rw-r--r--tvix/castore/src/proto/tests/directory.rs81
-rw-r--r--tvix/castore/src/tests/import.rs2
-rw-r--r--tvix/cli/Cargo.toml5
-rw-r--r--tvix/cli/default.nix94
-rw-r--r--tvix/cli/src/main.rs83
-rw-r--r--tvix/crate-hashes.json5
-rw-r--r--tvix/default.nix5
-rw-r--r--tvix/docs/src/SUMMARY.md5
-rw-r--r--tvix/docs/src/TODO.md142
-rw-r--r--tvix/docs/src/nix-daemon/changelog.md202
-rw-r--r--tvix/docs/src/nix-daemon/logging.md122
-rw-r--r--tvix/docs/src/nix-daemon/operations.md894
-rw-r--r--tvix/docs/src/nix-daemon/serialization.md305
-rw-r--r--tvix/eval/docs/bindings.md133
-rw-r--r--tvix/eval/src/builtins/impure.rs2
-rw-r--r--tvix/eval/src/value/string.rs6
-rw-r--r--tvix/eval/src/vm/mod.rs2
-rw-r--r--tvix/eval/tests/nix_oracle.rs9
-rw-r--r--tvix/glue/Cargo.toml10
-rw-r--r--tvix/glue/benches/eval.rs24
-rw-r--r--tvix/glue/src/builtins/derivation.rs84
-rw-r--r--tvix/glue/src/builtins/errors.rs21
-rw-r--r--tvix/glue/src/builtins/fetchers.rs377
-rw-r--r--tvix/glue/src/builtins/import.rs45
-rw-r--r--tvix/glue/src/builtins/mod.rs327
-rw-r--r--tvix/glue/src/fetchers/decompression.rs (renamed from tvix/glue/src/decompression.rs)11
-rw-r--r--tvix/glue/src/fetchers/mod.rs453
-rw-r--r--tvix/glue/src/known_paths.rs114
-rw-r--r--tvix/glue/src/lib.rs2
-rw-r--r--tvix/glue/src/tests/mod.rs27
-rw-r--r--tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.exp1
-rw-r--r--tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.nix42
-rw-r--r--tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.exp1
-rw-r--r--tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.nix25
-rw-r--r--tvix/glue/src/tvix_store_io.rs519
-rw-r--r--tvix/nar-bridge-go/.gitignore (renamed from tvix/nar-bridge/.gitignore)0
-rw-r--r--tvix/nar-bridge-go/README.md (renamed from tvix/nar-bridge/README.md)2
-rw-r--r--tvix/nar-bridge-go/cmd/nar-bridge-http/main.go (renamed from tvix/nar-bridge/cmd/nar-bridge-http/main.go)4
-rw-r--r--tvix/nar-bridge-go/cmd/nar-bridge-http/otel.go (renamed from tvix/nar-bridge/cmd/nar-bridge-http/otel.go)0
-rw-r--r--tvix/nar-bridge-go/default.nix (renamed from tvix/nar-bridge/default.nix)2
-rw-r--r--tvix/nar-bridge-go/go.mod (renamed from tvix/nar-bridge/go.mod)2
-rw-r--r--tvix/nar-bridge-go/go.sum (renamed from tvix/nar-bridge/go.sum)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/nar_get.go (renamed from tvix/nar-bridge/pkg/http/nar_get.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/nar_put.go (renamed from tvix/nar-bridge/pkg/http/nar_put.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/http/narinfo.go (renamed from tvix/nar-bridge/pkg/http/narinfo.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/narinfo_get.go (renamed from tvix/nar-bridge/pkg/http/narinfo_get.go)61
-rw-r--r--tvix/nar-bridge-go/pkg/http/narinfo_put.go (renamed from tvix/nar-bridge/pkg/http/narinfo_put.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/http/server.go (renamed from tvix/nar-bridge/pkg/http/server.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/util.go (renamed from tvix/nar-bridge/pkg/http/util.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/blob_upload.go (renamed from tvix/nar-bridge/pkg/importer/blob_upload.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/counting_writer.go (renamed from tvix/nar-bridge/pkg/importer/counting_writer.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/directory_upload.go (renamed from tvix/nar-bridge/pkg/importer/directory_upload.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/gen_pathinfo.go (renamed from tvix/nar-bridge/pkg/importer/gen_pathinfo.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/importer.go (renamed from tvix/nar-bridge/pkg/importer/importer.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/importer_test.go (renamed from tvix/nar-bridge/pkg/importer/importer_test.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/importer/roundtrip_test.go (renamed from tvix/nar-bridge/pkg/importer/roundtrip_test.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/importer/util_test.go (renamed from tvix/nar-bridge/pkg/importer/util_test.go)0
-rw-r--r--tvix/nar-bridge-go/testdata/emptydirectory.nar (renamed from tvix/nar-bridge/testdata/emptydirectory.nar)bin96 -> 96 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar (renamed from tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar)bin464152 -> 464152 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/onebyteexecutable.nar (renamed from tvix/nar-bridge/testdata/onebyteexecutable.nar)bin152 -> 152 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/onebyteregular.nar (renamed from tvix/nar-bridge/testdata/onebyteregular.nar)bin120 -> 120 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/popdirectories.nar (renamed from tvix/nar-bridge/testdata/popdirectories.nar)bin600 -> 600 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/symlink.nar (renamed from tvix/nar-bridge/testdata/symlink.nar)bin136 -> 136 bytes
-rw-r--r--tvix/nix-compat/Cargo.toml7
-rw-r--r--tvix/nix-compat/src/aterm/escape.rs11
-rw-r--r--tvix/nix-compat/src/aterm/parser.rs41
-rw-r--r--tvix/nix-compat/src/derivation/mod.rs45
-rw-r--r--tvix/nix-compat/src/derivation/parser.rs60
-rw-r--r--tvix/nix-compat/src/derivation/tests/mod.rs194
-rw-r--r--tvix/nix-compat/src/nar/mod.rs2
-rw-r--r--tvix/nix-compat/src/nar/reader/async/mod.rs173
-rw-r--r--tvix/nix-compat/src/nar/reader/async/read.rs69
-rw-r--r--tvix/nix-compat/src/nar/reader/async/test.rs310
-rw-r--r--tvix/nix-compat/src/nar/reader/mod.rs268
-rw-r--r--tvix/nix-compat/src/nar/reader/read.rs32
-rw-r--r--tvix/nix-compat/src/nar/reader/test.rs272
-rw-r--r--tvix/nix-compat/src/nar/wire/mod.rs17
-rw-r--r--tvix/nix-compat/src/nar/wire/tag.rs1
-rw-r--r--tvix/nix-compat/src/narinfo/public_keys.rs37
-rw-r--r--tvix/nix-compat/src/narinfo/signature.rs38
-rw-r--r--tvix/nix-compat/src/nix_daemon/worker_protocol.rs73
-rw-r--r--tvix/nix-compat/src/nixbase32.rs31
-rw-r--r--tvix/nix-compat/src/nixhash/mod.rs26
-rw-r--r--tvix/nix-compat/src/store_path/mod.rs67
-rw-r--r--tvix/nix-compat/src/wire/bytes/mod.rs173
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader.rs464
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader/mod.rs684
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader/trailer.rs197
-rw-r--r--tvix/nix-compat/src/wire/bytes/writer.rs18
-rw-r--r--tvix/nix-compat/src/wire/mod.rs3
-rw-r--r--tvix/nix-compat/src/wire/primitive.rs74
-rw-r--r--tvix/shell.nix2
-rw-r--r--tvix/store/Cargo.toml25
-rw-r--r--tvix/store/default.nix14
-rw-r--r--tvix/store/docs/api.md2
-rw-r--r--tvix/store/src/bin/tvix-store.rs129
-rw-r--r--tvix/store/src/import.rs40
-rw-r--r--tvix/store/src/nar/import.rs352
-rw-r--r--tvix/store/src/nar/mod.rs28
-rw-r--r--tvix/store/src/nar/renderer.rs51
-rw-r--r--tvix/store/src/pathinfoservice/bigtable.rs15
-rw-r--r--tvix/store/src/pathinfoservice/combinators.rs111
-rw-r--r--tvix/store/src/pathinfoservice/from_addr.rs86
-rw-r--r--tvix/store/src/pathinfoservice/grpc.rs143
-rw-r--r--tvix/store/src/pathinfoservice/lru.rs128
-rw-r--r--tvix/store/src/pathinfoservice/memory.rs67
-rw-r--r--tvix/store/src/pathinfoservice/mod.rs20
-rw-r--r--tvix/store/src/pathinfoservice/nix_http.rs196
-rw-r--r--tvix/store/src/pathinfoservice/sled.rs169
-rw-r--r--tvix/store/src/pathinfoservice/tests/mod.rs10
-rw-r--r--tvix/store/src/pathinfoservice/tests/utils.rs10
-rw-r--r--tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs25
-rw-r--r--tvix/store/src/proto/tests/pathinfo.rs122
-rw-r--r--tvix/store/src/tests/fixtures.rs10
-rw-r--r--tvix/store/src/utils.rs17
-rw-r--r--tvix/tools/crunch-v2/Cargo.lock23
-rw-r--r--tvix/tools/crunch-v2/Cargo.toml2
-rw-r--r--tvix/tools/crunch-v2/src/main.rs2
-rw-r--r--tvix/tools/narinfo2parquet/Cargo.lock47
-rw-r--r--tvix/tools/narinfo2parquet/Cargo.nix115
-rw-r--r--tvix/website/landing-en.md2
162 files changed, 9618 insertions, 5493 deletions
diff --git a/tvix/Cargo.lock b/tvix/Cargo.lock
index 9666780c2b..dc5298c45b 100644
--- a/tvix/Cargo.lock
+++ b/tvix/Cargo.lock
@@ -18,6 +18,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 
 [[package]]
+name = "ahash"
+version = "0.8.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "version_check",
+ "zerocopy",
+]
+
+[[package]]
 name = "aho-corasick"
 version = "1.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -27,6 +39,12 @@ dependencies = [
 ]
 
 [[package]]
+name = "allocator-api2"
+version = "0.2.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
+
+[[package]]
 name = "android-tzdata"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -134,9 +152,9 @@ dependencies = [
 
 [[package]]
 name = "async-compression"
-version = "0.4.6"
+version = "0.4.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c"
+checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693"
 dependencies = [
  "bzip2",
  "flate2",
@@ -145,6 +163,8 @@ dependencies = [
  "pin-project-lite",
  "tokio",
  "xz2",
+ "zstd",
+ "zstd-safe",
 ]
 
 [[package]]
@@ -205,17 +225,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "async-recursion"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
-dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
- "syn 2.0.48",
-]
-
-[[package]]
 name = "async-signal"
 version = "0.2.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -250,8 +259,8 @@ version = "0.3.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -277,8 +286,8 @@ version = "0.1.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -301,42 +310,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
 dependencies = [
  "async-trait",
- "axum-core 0.3.4",
+ "axum-core",
  "bitflags 1.3.2",
  "bytes",
  "futures-util",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
- "itoa",
- "matchit",
- "memchr",
- "mime",
- "percent-encoding",
- "pin-project-lite",
- "rustversion",
- "serde",
- "sync_wrapper",
- "tower",
- "tower-layer",
- "tower-service",
-]
-
-[[package]]
-name = "axum"
-version = "0.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e"
-dependencies = [
- "async-trait",
- "axum-core 0.4.3",
- "bytes",
- "futures-util",
- "http 1.1.0",
- "http-body 1.0.0",
- "http-body-util",
- "hyper 1.2.0",
- "hyper-util",
+ "http",
+ "http-body",
+ "hyper",
  "itoa",
  "matchit",
  "memchr",
@@ -345,15 +325,10 @@ dependencies = [
  "pin-project-lite",
  "rustversion",
  "serde",
- "serde_json",
- "serde_path_to_error",
- "serde_urlencoded",
  "sync_wrapper",
- "tokio",
  "tower",
  "tower-layer",
  "tower-service",
- "tracing",
 ]
 
 [[package]]
@@ -365,8 +340,8 @@ dependencies = [
  "async-trait",
  "bytes",
  "futures-util",
- "http 0.2.11",
- "http-body 0.4.6",
+ "http",
+ "http-body",
  "mime",
  "rustversion",
  "tower-layer",
@@ -374,27 +349,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "axum-core"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3"
-dependencies = [
- "async-trait",
- "bytes",
- "futures-util",
- "http 1.1.0",
- "http-body 1.0.0",
- "http-body-util",
- "mime",
- "pin-project-lite",
- "rustversion",
- "sync_wrapper",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
 name = "backtrace"
 version = "0.3.69"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -427,9 +381,9 @@ version = "0.2.9"
 source = "git+https://github.com/flokli/bigtable_rs?rev=0af404741dfc40eb9fa99cf4d4140a09c5c20df7#0af404741dfc40eb9fa99cf4d4140a09c5c20df7"
 dependencies = [
  "gcp_auth",
- "http 0.2.11",
+ "http",
  "log",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "prost-types",
  "prost-wkt",
@@ -439,7 +393,7 @@ dependencies = [
  "serde_with",
  "thiserror",
  "tokio",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tower",
 ]
@@ -670,8 +624,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442"
 dependencies = [
  "heck",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -884,8 +838,8 @@ version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -907,8 +861,8 @@ checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f"
 dependencies = [
  "fnv",
  "ident_case",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "strsim",
  "syn 2.0.48",
 ]
@@ -920,7 +874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
 dependencies = [
  "darling_core",
- "quote 1.0.35",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -1075,7 +1029,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c"
 dependencies = [
  "num-traits",
- "quote 1.0.35",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -1337,8 +1291,8 @@ version = "0.3.30"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -1397,10 +1351,10 @@ dependencies = [
  "base64",
  "chrono",
  "home",
- "hyper 0.14.28",
+ "hyper",
  "hyper-rustls",
  "ring",
- "rustls 0.21.10",
+ "rustls 0.21.12",
  "rustls-pemfile 1.0.4",
  "serde",
  "serde_json",
@@ -1462,35 +1416,16 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
 
 [[package]]
 name = "h2"
-version = "0.3.24"
+version = "0.3.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9"
+checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
 dependencies = [
  "bytes",
  "fnv",
  "futures-core",
  "futures-sink",
  "futures-util",
- "http 0.2.11",
- "indexmap 2.1.0",
- "slab",
- "tokio",
- "tokio-util",
- "tracing",
-]
-
-[[package]]
-name = "h2"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51ee2dd2e4f378392eeff5d51618cd9a63166a2513846bbc55f21cfacd9199d4"
-dependencies = [
- "bytes",
- "fnv",
- "futures-core",
- "futures-sink",
- "futures-util",
- "http 1.1.0",
+ "http",
  "indexmap 2.1.0",
  "slab",
  "tokio",
@@ -1515,6 +1450,10 @@ name = "hashbrown"
 version = "0.14.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
+dependencies = [
+ "ahash",
+ "allocator-api2",
+]
 
 [[package]]
 name = "heck"
@@ -1561,47 +1500,13 @@ dependencies = [
 ]
 
 [[package]]
-name = "http"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
-dependencies = [
- "bytes",
- "fnv",
- "itoa",
-]
-
-[[package]]
 name = "http-body"
 version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
 dependencies = [
  "bytes",
- "http 0.2.11",
- "pin-project-lite",
-]
-
-[[package]]
-name = "http-body"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643"
-dependencies = [
- "bytes",
- "http 1.1.0",
-]
-
-[[package]]
-name = "http-body-util"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d"
-dependencies = [
- "bytes",
- "futures-core",
- "http 1.1.0",
- "http-body 1.0.0",
+ "http",
  "pin-project-lite",
 ]
 
@@ -1633,9 +1538,9 @@ dependencies = [
  "futures-channel",
  "futures-core",
  "futures-util",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
+ "h2",
+ "http",
+ "http-body",
  "httparse",
  "httpdate",
  "itoa",
@@ -1648,35 +1553,15 @@ dependencies = [
 ]
 
 [[package]]
-name = "hyper"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a"
-dependencies = [
- "bytes",
- "futures-channel",
- "futures-util",
- "h2 0.4.3",
- "http 1.1.0",
- "http-body 1.0.0",
- "httparse",
- "httpdate",
- "itoa",
- "pin-project-lite",
- "smallvec",
- "tokio",
-]
-
-[[package]]
 name = "hyper-rustls"
 version = "0.24.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
 dependencies = [
  "futures-util",
- "http 0.2.11",
- "hyper 0.14.28",
- "rustls 0.21.10",
+ "http",
+ "hyper",
+ "rustls 0.21.12",
  "rustls-native-certs 0.6.3",
  "tokio",
  "tokio-rustls 0.24.1",
@@ -1688,29 +1573,13 @@ version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
 dependencies = [
- "hyper 0.14.28",
+ "hyper",
  "pin-project-lite",
  "tokio",
  "tokio-io-timeout",
 ]
 
 [[package]]
-name = "hyper-util"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa"
-dependencies = [
- "bytes",
- "futures-util",
- "http 1.1.0",
- "http-body 1.0.0",
- "hyper 1.2.0",
- "pin-project-lite",
- "socket2",
- "tokio",
-]
-
-[[package]]
 name = "iana-time-zone"
 version = "0.1.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2000,6 +1869,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
 
 [[package]]
+name = "lru"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc"
+dependencies = [
+ "hashbrown 0.14.3",
+]
+
+[[package]]
 name = "lzma-sys"
 version = "0.1.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2104,9 +1982,9 @@ dependencies = [
 
 [[package]]
 name = "mio"
-version = "0.8.10"
+version = "0.8.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
+checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
 dependencies = [
  "libc",
  "log",
@@ -2198,8 +2076,6 @@ dependencies = [
  "serde",
  "serde_json",
  "sha2",
- "test-case",
- "test-generator",
  "thiserror",
  "tokio",
  "tokio-test",
@@ -2282,10 +2158,10 @@ dependencies = [
  "chrono",
  "futures",
  "humantime",
- "hyper 0.14.28",
+ "hyper",
  "itertools 0.12.0",
  "md-5",
- "parking_lot 0.12.1",
+ "parking_lot 0.12.2",
  "percent-encoding",
  "quick-xml",
  "rand",
@@ -2321,13 +2197,12 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
 
 [[package]]
 name = "opentelemetry"
-version = "0.21.0"
+version = "0.22.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a"
+checksum = "900d57987be3f2aeb70d385fff9b27fb74c5723cc9a52d904d4f9c807a0667bf"
 dependencies = [
  "futures-core",
  "futures-sink",
- "indexmap 2.1.0",
  "js-sys",
  "once_cell",
  "pin-project-lite",
@@ -2337,49 +2212,46 @@ dependencies = [
 
 [[package]]
 name = "opentelemetry-otlp"
-version = "0.14.0"
+version = "0.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f24cda83b20ed2433c68241f918d0f6fdec8b1d43b7a9590ab4420c5095ca930"
+checksum = "1a016b8d9495c639af2145ac22387dcb88e44118e45320d9238fbf4e7889abcb"
 dependencies = [
  "async-trait",
  "futures-core",
- "http 0.2.11",
+ "http",
  "opentelemetry",
  "opentelemetry-proto",
  "opentelemetry-semantic-conventions",
  "opentelemetry_sdk",
- "prost 0.11.9",
+ "prost",
  "thiserror",
  "tokio",
- "tonic 0.9.2",
+ "tonic",
 ]
 
 [[package]]
 name = "opentelemetry-proto"
-version = "0.4.0"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2e155ce5cc812ea3d1dffbd1539aed653de4bf4882d60e6e04dcf0901d674e1"
+checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4"
 dependencies = [
  "opentelemetry",
  "opentelemetry_sdk",
- "prost 0.11.9",
- "tonic 0.9.2",
+ "prost",
+ "tonic",
 ]
 
 [[package]]
 name = "opentelemetry-semantic-conventions"
-version = "0.13.0"
+version = "0.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f5774f1ef1f982ef2a447f6ee04ec383981a3ab99c8e77a1a7b30182e65bbc84"
-dependencies = [
- "opentelemetry",
-]
+checksum = "f9ab5bd6c42fb9349dcf28af2ba9a0667f697f9bdcca045d39f2cec5543e2910"
 
 [[package]]
 name = "opentelemetry_sdk"
-version = "0.21.2"
+version = "0.22.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f16aec8a98a457a52664d69e0091bac3a0abd18ead9b641cb00202ba4e0efe4"
+checksum = "9e90c7113be649e31e9a0f8b5ee24ed7a16923b322c3c5ab6367469c049d6b7e"
 dependencies = [
  "async-trait",
  "crossbeam-channel",
@@ -2440,9 +2312,9 @@ dependencies = [
 
 [[package]]
 name = "parking_lot"
-version = "0.12.1"
+version = "0.12.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
 dependencies = [
  "lock_api",
  "parking_lot_core 0.9.9",
@@ -2512,8 +2384,8 @@ version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -2632,21 +2504,12 @@ version = "0.2.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5"
 dependencies = [
- "proc-macro2 1.0.76",
+ "proc-macro2",
  "syn 2.0.48",
 ]
 
 [[package]]
 name = "proc-macro2"
-version = "0.4.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759"
-dependencies = [
- "unicode-xid",
-]
-
-[[package]]
-name = "proc-macro2"
 version = "1.0.76"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
@@ -2676,22 +2539,12 @@ dependencies = [
 
 [[package]]
 name = "prost"
-version = "0.11.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd"
-dependencies = [
- "bytes",
- "prost-derive 0.11.9",
-]
-
-[[package]]
-name = "prost"
 version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
 dependencies = [
  "bytes",
- "prost-derive 0.12.3",
+ "prost-derive",
 ]
 
 [[package]]
@@ -2708,7 +2561,7 @@ dependencies = [
  "once_cell",
  "petgraph",
  "prettyplease",
- "prost 0.12.3",
+ "prost",
  "prost-types",
  "pulldown-cmark",
  "pulldown-cmark-to-cmark",
@@ -2720,27 +2573,14 @@ dependencies = [
 
 [[package]]
 name = "prost-derive"
-version = "0.11.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
-dependencies = [
- "anyhow",
- "itertools 0.10.5",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "prost-derive"
 version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
 dependencies = [
  "anyhow",
  "itertools 0.11.0",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -2750,7 +2590,7 @@ version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
 dependencies = [
- "prost 0.12.3",
+ "prost",
 ]
 
 [[package]]
@@ -2761,7 +2601,7 @@ checksum = "4d8ef9c3f0f1dab910d2b7e2c24a8e4322e122eba6d7a1921eeebcebbc046c40"
 dependencies = [
  "chrono",
  "inventory",
- "prost 0.12.3",
+ "prost",
  "serde",
  "serde_derive",
  "serde_json",
@@ -2775,10 +2615,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5b31cae9a54ca84fee1504740a82eebf2479532905e106f63ca0c3bc8d780321"
 dependencies = [
  "heck",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "prost-types",
- "quote 1.0.35",
+ "quote",
 ]
 
 [[package]]
@@ -2788,7 +2628,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "435be4a8704091b4c5fb1d79799de7f2dbff53af05edf29385237f8cf7ab37ee"
 dependencies = [
  "chrono",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "prost-types",
  "prost-wkt",
@@ -2837,20 +2677,11 @@ dependencies = [
 
 [[package]]
 name = "quote"
-version = "0.6.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1"
-dependencies = [
- "proc-macro2 0.4.30",
-]
-
-[[package]]
-name = "quote"
 version = "1.0.35"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
 dependencies = [
- "proc-macro2 1.0.76",
+ "proc-macro2",
 ]
 
 [[package]]
@@ -3030,10 +2861,10 @@ dependencies = [
  "encoding_rs",
  "futures-core",
  "futures-util",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
+ "h2",
+ "http",
+ "http-body",
+ "hyper",
  "hyper-rustls",
  "ipnet",
  "js-sys",
@@ -3042,7 +2873,7 @@ dependencies = [
  "once_cell",
  "percent-encoding",
  "pin-project-lite",
- "rustls 0.21.10",
+ "rustls 0.21.12",
  "rustls-native-certs 0.6.3",
  "rustls-pemfile 1.0.4",
  "serde",
@@ -3117,8 +2948,8 @@ checksum = "04a9df72cc1f67020b0d63ad9bfe4a323e459ea7eb68e03bd9824db49f9a4c25"
 dependencies = [
  "cfg-if",
  "glob",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "regex",
  "relative-path",
  "rustc_version",
@@ -3132,7 +2963,7 @@ version = "0.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "88530b681abe67924d42cca181d070e3ac20e0740569441a9e35a7cedd2b34a4"
 dependencies = [
- "quote 1.0.35",
+ "quote",
  "rand",
  "rustc_version",
  "syn 2.0.48",
@@ -3174,9 +3005,9 @@ dependencies = [
 
 [[package]]
 name = "rustls"
-version = "0.21.10"
+version = "0.21.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
+checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
 dependencies = [
  "log",
  "ring",
@@ -3186,9 +3017,9 @@ dependencies = [
 
 [[package]]
 name = "rustls"
-version = "0.22.2"
+version = "0.22.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41"
+checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
 dependencies = [
  "log",
  "ring",
@@ -3394,8 +3225,8 @@ version = "1.0.197"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -3411,16 +3242,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "serde_path_to_error"
-version = "0.1.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"
-dependencies = [
- "itoa",
- "serde",
-]
-
-[[package]]
 name = "serde_qs"
 version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3477,8 +3298,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655"
 dependencies = [
  "darling",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -3588,8 +3409,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf"
 dependencies = [
  "heck",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 1.0.109",
 ]
 
@@ -3643,8 +3464,8 @@ version = "0.1.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "104842d6278bf64aa9d2f182ba4bde31e8aec7a131d29b7f444bb9b344a09e2a"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "structmeta-derive",
  "syn 1.0.109",
 ]
@@ -3655,8 +3476,8 @@ version = "0.1.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "24420be405b590e2d746d83b01f09af673270cf80e9b003a5fa7b651c58c7d93"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 1.0.109",
 ]
 
@@ -3668,23 +3489,12 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
 
 [[package]]
 name = "syn"
-version = "0.15.44"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5"
-dependencies = [
- "proc-macro2 0.4.30",
- "quote 0.6.13",
- "unicode-xid",
-]
-
-[[package]]
-name = "syn"
 version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "unicode-ident",
 ]
 
@@ -3694,8 +3504,8 @@ version = "2.0.48"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "unicode-ident",
 ]
 
@@ -3758,57 +3568,13 @@ dependencies = [
 ]
 
 [[package]]
-name = "test-case"
-version = "3.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8"
-dependencies = [
- "test-case-macros",
-]
-
-[[package]]
-name = "test-case-core"
-version = "3.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f"
-dependencies = [
- "cfg-if",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
- "syn 2.0.48",
-]
-
-[[package]]
-name = "test-case-macros"
-version = "3.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb"
-dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
- "syn 2.0.48",
- "test-case-core",
-]
-
-[[package]]
-name = "test-generator"
-version = "0.3.0"
-source = "git+https://github.com/JamesGuthrie/test-generator.git?rev=82e799979980962aec1aa324ec6e0e4cad781f41#82e799979980962aec1aa324ec6e0e4cad781f41"
-dependencies = [
- "glob",
- "proc-macro2 0.4.30",
- "quote 0.6.13",
- "syn 0.15.44",
-]
-
-[[package]]
 name = "test-strategy"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "62d6408d1406657be2f9d1701fbae379331d30d2f6e92050710edb0d34eeb480"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "structmeta",
  "syn 1.0.109",
 ]
@@ -3834,8 +3600,8 @@ version = "1.0.56"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -3935,11 +3701,10 @@ dependencies = [
 
 [[package]]
 name = "tokio-listener"
-version = "0.3.2"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96367e127b4cf47b92592a5154a563435fe28fe3fccf25917d4a34ee59c87303"
+checksum = "4134661e12ec11c6276be73544a43144a357b08dfab5c41fd226e15b5bc9a6b2"
 dependencies = [
- "axum 0.7.4",
  "document-features",
  "futures-core",
  "futures-util",
@@ -3948,7 +3713,7 @@ dependencies = [
  "socket2",
  "tokio",
  "tokio-util",
- "tonic 0.11.0",
+ "tonic",
  "tracing",
 ]
 
@@ -3958,8 +3723,8 @@ version = "2.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -3980,7 +3745,7 @@ version = "0.24.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
 dependencies = [
- "rustls 0.21.10",
+ "rustls 0.21.12",
  "tokio",
 ]
 
@@ -3990,7 +3755,7 @@ version = "0.25.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f"
 dependencies = [
- "rustls 0.22.2",
+ "rustls 0.22.4",
  "rustls-pki-types",
  "tokio",
 ]
@@ -4085,51 +3850,23 @@ dependencies = [
 
 [[package]]
 name = "tonic"
-version = "0.9.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
-dependencies = [
- "async-trait",
- "axum 0.6.20",
- "base64",
- "bytes",
- "futures-core",
- "futures-util",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
- "hyper-timeout",
- "percent-encoding",
- "pin-project",
- "prost 0.11.9",
- "tokio",
- "tokio-stream",
- "tower",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
-name = "tonic"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13"
 dependencies = [
  "async-stream",
  "async-trait",
- "axum 0.6.20",
+ "axum",
  "base64",
  "bytes",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
+ "h2",
+ "http",
+ "http-body",
+ "hyper",
  "hyper-timeout",
  "percent-encoding",
  "pin-project",
- "prost 0.12.3",
+ "prost",
  "rustls-native-certs 0.7.0",
  "rustls-pemfile 2.1.0",
  "rustls-pki-types",
@@ -4149,9 +3886,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2"
 dependencies = [
  "prettyplease",
- "proc-macro2 1.0.76",
+ "proc-macro2",
  "prost-build",
- "quote 1.0.35",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -4161,11 +3898,11 @@ version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7"
 dependencies = [
- "prost 0.12.3",
+ "prost",
  "prost-types",
  "tokio",
  "tokio-stream",
- "tonic 0.11.0",
+ "tonic",
 ]
 
 [[package]]
@@ -4218,8 +3955,8 @@ version = "0.1.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -4256,9 +3993,9 @@ dependencies = [
 
 [[package]]
 name = "tracing-opentelemetry"
-version = "0.22.0"
+version = "0.23.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c67ac25c5407e7b961fafc6f7e9aa5958fd297aada2d20fa2ae1737357e55596"
+checksum = "a9be14ba1bbe4ab79e9229f7f89fab8d120b865859f10527f31c033e599d2284"
 dependencies = [
  "js-sys",
  "once_cell",
@@ -4273,16 +4010,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "tracing-serde"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
-dependencies = [
- "serde",
- "tracing-core",
-]
-
-[[package]]
 name = "tracing-subscriber"
 version = "0.3.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -4292,15 +4019,12 @@ dependencies = [
  "nu-ansi-term",
  "once_cell",
  "regex",
- "serde",
- "serde_json",
  "sharded-slab",
  "smallvec",
  "thread_local",
  "tracing",
  "tracing-core",
  "tracing-log",
- "tracing-serde",
 ]
 
 [[package]]
@@ -4316,13 +4040,13 @@ dependencies = [
  "bytes",
  "clap",
  "itertools 0.12.0",
- "prost 0.12.3",
+ "prost",
  "prost-build",
- "test-case",
+ "rstest",
  "thiserror",
  "tokio",
  "tokio-listener",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tonic-reflection",
  "tracing",
@@ -4335,6 +4059,7 @@ dependencies = [
 name = "tvix-castore"
 version = "0.1.0"
 dependencies = [
+ "async-compression",
  "async-process",
  "async-stream",
  "async-tempfile",
@@ -4351,10 +4076,10 @@ dependencies = [
  "lazy_static",
  "libc",
  "object_store",
- "parking_lot 0.12.1",
+ "parking_lot 0.12.2",
  "petgraph",
  "pin-project-lite",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "rstest",
  "rstest_reuse",
@@ -4367,8 +4092,9 @@ dependencies = [
  "tokio",
  "tokio-retry",
  "tokio-stream",
+ "tokio-tar",
  "tokio-util",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tonic-reflection",
  "tower",
@@ -4394,7 +4120,6 @@ dependencies = [
  "dirs",
  "nix-compat",
  "rustyline",
- "test-case",
  "thiserror",
  "tokio",
  "tracing",
@@ -4449,8 +4174,8 @@ dependencies = [
 name = "tvix-eval-builtin-macros"
 version = "0.0.1"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 1.0.109",
  "tvix-eval",
 ]
@@ -4460,7 +4185,6 @@ name = "tvix-glue"
 version = "0.1.0"
 dependencies = [
  "async-compression",
- "async-recursion",
  "bstr",
  "bytes",
  "criterion",
@@ -4469,6 +4193,7 @@ dependencies = [
  "hex-literal",
  "lazy_static",
  "magic",
+ "md-5",
  "nix 0.27.1",
  "nix-compat",
  "pin-project",
@@ -4477,9 +4202,9 @@ dependencies = [
  "rstest",
  "serde",
  "serde_json",
+ "sha1",
  "sha2",
  "tempfile",
- "test-case",
  "thiserror",
  "tokio",
  "tokio-tar",
@@ -4489,6 +4214,7 @@ dependencies = [
  "tvix-castore",
  "tvix-eval",
  "tvix-store",
+ "url",
  "walkdir",
  "wu-manber",
 ]
@@ -4507,8 +4233,8 @@ name = "tvix-store"
 version = "0.1.0"
 dependencies = [
  "anyhow",
+ "async-compression",
  "async-process",
- "async-recursion",
  "async-stream",
  "bigtable_rs",
  "blake3",
@@ -4519,12 +4245,14 @@ dependencies = [
  "data-encoding",
  "futures",
  "lazy_static",
+ "lru",
  "nix-compat",
  "opentelemetry",
  "opentelemetry-otlp",
  "opentelemetry_sdk",
+ "parking_lot 0.12.2",
  "pin-project-lite",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "reqwest",
  "rstest",
@@ -4536,14 +4264,13 @@ dependencies = [
  "sha2",
  "sled",
  "tempfile",
- "test-case",
  "thiserror",
  "tokio",
  "tokio-listener",
  "tokio-retry",
  "tokio-stream",
  "tokio-util",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tonic-reflection",
  "tower",
@@ -4553,7 +4280,6 @@ dependencies = [
  "tvix-castore",
  "url",
  "walkdir",
- "xz2",
 ]
 
 [[package]]
@@ -4581,8 +4307,8 @@ version = "0.2.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ac73887f47b9312552aa90ef477927ff014d63d1920ca8037c6c1951eab64bb1"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
 ]
 
@@ -4635,12 +4361,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
 
 [[package]]
-name = "unicode-xid"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
-
-[[package]]
 name = "untrusted"
 version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -4821,8 +4541,8 @@ dependencies = [
  "bumpalo",
  "log",
  "once_cell",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
  "wasm-bindgen-shared",
 ]
@@ -4845,7 +4565,7 @@ version = "0.2.90"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999"
 dependencies = [
- "quote 1.0.35",
+ "quote",
  "wasm-bindgen-macro-support",
 ]
 
@@ -4855,8 +4575,8 @@ version = "0.2.90"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7"
 dependencies = [
- "proc-macro2 1.0.76",
- "quote 1.0.35",
+ "proc-macro2",
+ "quote",
  "syn 2.0.48",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
@@ -4893,9 +4613,9 @@ dependencies = [
 
 [[package]]
 name = "web-time"
-version = "0.2.4"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0"
+checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
 dependencies = [
  "js-sys",
  "wasm-bindgen",
@@ -5146,6 +4866,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
 
 [[package]]
+name = "zerocopy"
+version = "0.7.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
+dependencies = [
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.48",
+]
+
+[[package]]
 name = "zeroize"
 version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/tvix/Cargo.nix b/tvix/Cargo.nix
index 1145b82544..f6c3108fa4 100644
--- a/tvix/Cargo.nix
+++ b/tvix/Cargo.nix
@@ -1,4 +1,4 @@
-# This file was @generated by crate2nix 0.13.0 with the command:
+# This file was @generated by crate2nix 0.14.0 with the command:
 #   "generate" "--all-features"
 # See https://github.com/kolloch/crate2nix for more info.
 
@@ -193,6 +193,49 @@ rec {
           "rustc-dep-of-std" = [ "core" "compiler_builtins" ];
         };
       };
+      "ahash" = rec {
+        crateName = "ahash";
+        version = "0.8.11";
+        edition = "2018";
+        sha256 = "04chdfkls5xmhp1d48gnjsmglbqibizs3bpbj6rsj604m10si7g8";
+        authors = [
+          "Tom Kaitchuck <Tom.Kaitchuck@gmail.com>"
+        ];
+        dependencies = [
+          {
+            name = "cfg-if";
+            packageId = "cfg-if";
+          }
+          {
+            name = "once_cell";
+            packageId = "once_cell";
+            usesDefaultFeatures = false;
+            target = { target, features }: (!(("arm" == target."arch" or null) && ("none" == target."os" or null)));
+            features = [ "alloc" ];
+          }
+          {
+            name = "zerocopy";
+            packageId = "zerocopy";
+            usesDefaultFeatures = false;
+            features = [ "simd" ];
+          }
+        ];
+        buildDependencies = [
+          {
+            name = "version_check";
+            packageId = "version_check";
+          }
+        ];
+        features = {
+          "atomic-polyfill" = [ "dep:atomic-polyfill" "once_cell/atomic-polyfill" ];
+          "compile-time-rng" = [ "const-random" ];
+          "const-random" = [ "dep:const-random" ];
+          "default" = [ "std" "runtime-rng" ];
+          "getrandom" = [ "dep:getrandom" ];
+          "runtime-rng" = [ "getrandom" ];
+          "serde" = [ "dep:serde" ];
+        };
+      };
       "aho-corasick" = rec {
         crateName = "aho-corasick";
         version = "1.1.2";
@@ -218,6 +261,21 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "perf-literal" "std" ];
       };
+      "allocator-api2" = rec {
+        crateName = "allocator-api2";
+        version = "0.2.18";
+        edition = "2018";
+        sha256 = "0kr6lfnxvnj164j1x38g97qjlhb7akppqzvgfs0697140ixbav2w";
+        authors = [
+          "Zakarum <zaq.dev@icloud.com>"
+        ];
+        features = {
+          "default" = [ "std" ];
+          "serde" = [ "dep:serde" ];
+          "std" = [ "alloc" ];
+        };
+        resolvedDefaultFeatures = [ "alloc" ];
+      };
       "android-tzdata" = rec {
         crateName = "android-tzdata";
         version = "0.1.1";
@@ -454,9 +512,9 @@ rec {
       };
       "async-compression" = rec {
         crateName = "async-compression";
-        version = "0.4.6";
+        version = "0.4.9";
         edition = "2018";
-        sha256 = "0b6874q56g1cx8ivs9j89d757rsh9kyrrwlp1852094jjrmg85m1";
+        sha256 = "14r6vbsbbkqjiqy0qwwywjakdi29jfyidhqp389l5r4gm7bsp7jf";
         authors = [
           "Wim Looman <wim@nemo157.com>"
           "Allen Bui <fairingrey@gmail.com>"
@@ -496,6 +554,27 @@ rec {
             packageId = "xz2";
             optional = true;
           }
+          {
+            name = "zstd";
+            packageId = "zstd";
+            rename = "libzstd";
+            optional = true;
+            usesDefaultFeatures = false;
+          }
+          {
+            name = "zstd-safe";
+            packageId = "zstd-safe";
+            optional = true;
+            usesDefaultFeatures = false;
+          }
+        ];
+        devDependencies = [
+          {
+            name = "tokio";
+            packageId = "tokio";
+            usesDefaultFeatures = false;
+            features = [ "io-util" "macros" "rt-multi-thread" "io-std" ];
+          }
         ];
         features = {
           "all" = [ "all-implementations" "all-algorithms" ];
@@ -518,7 +597,7 @@ rec {
           "zstd-safe" = [ "dep:zstd-safe" ];
           "zstdmt" = [ "zstd" "zstd-safe/zstdmt" ];
         };
-        resolvedDefaultFeatures = [ "bzip2" "flate2" "gzip" "tokio" "xz" "xz2" ];
+        resolvedDefaultFeatures = [ "bzip2" "flate2" "gzip" "libzstd" "tokio" "xz" "xz2" "zstd" "zstd-safe" ];
       };
       "async-io" = rec {
         crateName = "async-io";
@@ -698,35 +777,6 @@ rec {
         ];
 
       };
-      "async-recursion" = rec {
-        crateName = "async-recursion";
-        version = "1.0.5";
-        edition = "2018";
-        sha256 = "1l2vlgyaa9a2dd0y1vbqyppzsvpdr1y4rar4gn1qi68pl5dmmmaz";
-        procMacro = true;
-        authors = [
-          "Robert Usher <266585+dcchut@users.noreply.github.com>"
-        ];
-        dependencies = [
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "quote";
-            packageId = "quote 1.0.35";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "syn";
-            packageId = "syn 2.0.48";
-            usesDefaultFeatures = false;
-            features = [ "full" "parsing" "printing" "proc-macro" "clone-impls" ];
-          }
-        ];
-
-      };
       "async-signal" = rec {
         crateName = "async-signal";
         version = "0.2.5";
@@ -833,11 +883,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -907,11 +957,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -944,7 +994,7 @@ rec {
         ];
 
       };
-      "axum 0.6.20" = rec {
+      "axum" = rec {
         crateName = "axum";
         version = "0.6.20";
         edition = "2021";
@@ -956,7 +1006,7 @@ rec {
           }
           {
             name = "axum-core";
-            packageId = "axum-core 0.3.4";
+            packageId = "axum-core";
           }
           {
             name = "bitflags";
@@ -974,15 +1024,15 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "stream" ];
           }
           {
@@ -1072,184 +1122,7 @@ rec {
           "ws" = [ "tokio" "dep:tokio-tungstenite" "dep:sha1" "dep:base64" ];
         };
       };
-      "axum 0.7.4" = rec {
-        crateName = "axum";
-        version = "0.7.4";
-        edition = "2021";
-        sha256 = "17kv7v8m981cqmfbv5m538fzxhw51l9bajv06kfddi7njarb8dhj";
-        dependencies = [
-          {
-            name = "async-trait";
-            packageId = "async-trait";
-          }
-          {
-            name = "axum-core";
-            packageId = "axum-core 0.4.3";
-          }
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "alloc" ];
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "http-body-util";
-            packageId = "http-body-util";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 1.2.0";
-            optional = true;
-          }
-          {
-            name = "hyper-util";
-            packageId = "hyper-util";
-            optional = true;
-            features = [ "tokio" "server" "server-auto" ];
-          }
-          {
-            name = "itoa";
-            packageId = "itoa";
-          }
-          {
-            name = "matchit";
-            packageId = "matchit";
-          }
-          {
-            name = "memchr";
-            packageId = "memchr";
-          }
-          {
-            name = "mime";
-            packageId = "mime";
-          }
-          {
-            name = "percent-encoding";
-            packageId = "percent-encoding";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-          {
-            name = "serde";
-            packageId = "serde";
-          }
-          {
-            name = "serde_json";
-            packageId = "serde_json";
-            optional = true;
-            features = [ "raw_value" ];
-          }
-          {
-            name = "serde_path_to_error";
-            packageId = "serde_path_to_error";
-            optional = true;
-          }
-          {
-            name = "serde_urlencoded";
-            packageId = "serde_urlencoded";
-            optional = true;
-          }
-          {
-            name = "sync_wrapper";
-            packageId = "sync_wrapper";
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            rename = "tokio";
-            optional = true;
-            features = [ "time" ];
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            usesDefaultFeatures = false;
-            features = [ "util" ];
-          }
-          {
-            name = "tower-layer";
-            packageId = "tower-layer";
-          }
-          {
-            name = "tower-service";
-            packageId = "tower-service";
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-        ];
-        buildDependencies = [
-          {
-            name = "rustversion";
-            packageId = "rustversion";
-          }
-        ];
-        devDependencies = [
-          {
-            name = "rustversion";
-            packageId = "rustversion";
-          }
-          {
-            name = "serde";
-            packageId = "serde";
-            features = [ "derive" ];
-          }
-          {
-            name = "serde_json";
-            packageId = "serde_json";
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            rename = "tokio";
-            features = [ "macros" "rt" "rt-multi-thread" "net" "test-util" ];
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            rename = "tower";
-            features = [ "util" "timeout" "limit" "load-shed" "steer" "filter" ];
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-          }
-        ];
-        features = {
-          "__private_docs" = [ "tower/full" "dep:tower-http" ];
-          "default" = [ "form" "http1" "json" "matched-path" "original-uri" "query" "tokio" "tower-log" "tracing" ];
-          "form" = [ "dep:serde_urlencoded" ];
-          "http1" = [ "dep:hyper" "hyper?/http1" ];
-          "http2" = [ "dep:hyper" "hyper?/http2" ];
-          "json" = [ "dep:serde_json" "dep:serde_path_to_error" ];
-          "macros" = [ "dep:axum-macros" ];
-          "multipart" = [ "dep:multer" ];
-          "query" = [ "dep:serde_urlencoded" ];
-          "tokio" = [ "dep:hyper-util" "dep:tokio" "tokio/net" "tokio/rt" "tower/make" "tokio/macros" ];
-          "tower-log" = [ "tower/log" ];
-          "tracing" = [ "dep:tracing" "axum-core/tracing" ];
-          "ws" = [ "dep:hyper" "tokio" "dep:tokio-tungstenite" "dep:sha1" "dep:base64" ];
-        };
-        resolvedDefaultFeatures = [ "default" "form" "http1" "json" "matched-path" "original-uri" "query" "tokio" "tower-log" "tracing" ];
-      };
-      "axum-core 0.3.4" = rec {
+      "axum-core" = rec {
         crateName = "axum-core";
         version = "0.3.4";
         edition = "2021";
@@ -1271,11 +1144,11 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "mime";
@@ -1309,85 +1182,6 @@ rec {
           "tracing" = [ "dep:tracing" ];
         };
       };
-      "axum-core 0.4.3" = rec {
-        crateName = "axum-core";
-        version = "0.4.3";
-        edition = "2021";
-        sha256 = "1qx28wg4j6qdcdrisqwyaavlzc0zvbsrcwa99zf9456lfbyn6p51";
-        dependencies = [
-          {
-            name = "async-trait";
-            packageId = "async-trait";
-          }
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "alloc" ];
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "http-body-util";
-            packageId = "http-body-util";
-          }
-          {
-            name = "mime";
-            packageId = "mime";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-          {
-            name = "sync_wrapper";
-            packageId = "sync_wrapper";
-          }
-          {
-            name = "tower-layer";
-            packageId = "tower-layer";
-          }
-          {
-            name = "tower-service";
-            packageId = "tower-service";
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-        ];
-        buildDependencies = [
-          {
-            name = "rustversion";
-            packageId = "rustversion";
-          }
-        ];
-        devDependencies = [
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "alloc" ];
-          }
-        ];
-        features = {
-          "__private_docs" = [ "dep:tower-http" ];
-          "tracing" = [ "dep:tracing" ];
-        };
-        resolvedDefaultFeatures = [ "tracing" ];
-      };
       "backtrace" = rec {
         crateName = "backtrace";
         version = "0.3.69";
@@ -1497,7 +1291,7 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "log";
@@ -1505,7 +1299,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-types";
@@ -1540,7 +1334,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             features = [ "tls" "transport" ];
           }
           {
@@ -2214,11 +2008,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -2792,11 +2586,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -2850,11 +2644,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "strsim";
@@ -2889,7 +2683,7 @@ rec {
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -3328,7 +3122,7 @@ rec {
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -4107,11 +3901,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -4284,7 +4078,7 @@ rec {
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "client" "runtime" "http2" ];
           }
           {
@@ -4299,7 +4093,7 @@ rec {
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
           }
           {
             name = "rustls-pemfile";
@@ -4477,83 +4271,11 @@ rec {
         ];
 
       };
-      "h2 0.3.24" = rec {
+      "h2" = rec {
         crateName = "h2";
-        version = "0.3.24";
+        version = "0.3.26";
         edition = "2018";
-        sha256 = "1jf9488b66nayxzp3iw3b2rb64y49hdbbywnv9wfwrsv14i48b5v";
-        authors = [
-          "Carl Lerche <me@carllerche.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "fnv";
-            packageId = "fnv";
-          }
-          {
-            name = "futures-core";
-            packageId = "futures-core";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "futures-sink";
-            packageId = "futures-sink";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "http";
-            packageId = "http 0.2.11";
-          }
-          {
-            name = "indexmap";
-            packageId = "indexmap 2.1.0";
-            features = [ "std" ];
-          }
-          {
-            name = "slab";
-            packageId = "slab";
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "io-util" ];
-          }
-          {
-            name = "tokio-util";
-            packageId = "tokio-util";
-            features = [ "codec" "io" ];
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-            usesDefaultFeatures = false;
-            features = [ "std" ];
-          }
-        ];
-        devDependencies = [
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "rt-multi-thread" "macros" "sync" "net" ];
-          }
-        ];
-        features = { };
-      };
-      "h2 0.4.3" = rec {
-        crateName = "h2";
-        version = "0.4.3";
-        edition = "2021";
-        sha256 = "1m4rj76zl77jany6p10k4mm1cqwsrlc1dmgmxwp3jy7kwk92vvji";
+        sha256 = "1s7msnfv7xprzs6xzfj5sg6p8bjcdpcqcmjjbkd345cyi1x55zl1";
         authors = [
           "Carl Lerche <me@carllerche.com>"
           "Sean McArthur <sean@seanmonstar.com>"
@@ -4584,7 +4306,7 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 1.1.0";
+            packageId = "http";
           }
           {
             name = "indexmap";
@@ -4668,6 +4390,21 @@ rec {
         authors = [
           "Amanieu d'Antras <amanieu@gmail.com>"
         ];
+        dependencies = [
+          {
+            name = "ahash";
+            packageId = "ahash";
+            optional = true;
+            usesDefaultFeatures = false;
+          }
+          {
+            name = "allocator-api2";
+            packageId = "allocator-api2";
+            optional = true;
+            usesDefaultFeatures = false;
+            features = [ "alloc" ];
+          }
+        ];
         features = {
           "ahash" = [ "dep:ahash" ];
           "alloc" = [ "dep:alloc" ];
@@ -4682,7 +4419,7 @@ rec {
           "rustc-dep-of-std" = [ "nightly" "core" "compiler_builtins" "alloc" "rustc-internal-api" ];
           "serde" = [ "dep:serde" ];
         };
-        resolvedDefaultFeatures = [ "inline-more" "raw" ];
+        resolvedDefaultFeatures = [ "ahash" "allocator-api2" "default" "inline-more" "raw" ];
       };
       "heck" = rec {
         crateName = "heck";
@@ -4757,7 +4494,7 @@ rec {
         ];
 
       };
-      "http 0.2.11" = rec {
+      "http" = rec {
         crateName = "http";
         version = "0.2.11";
         edition = "2018";
@@ -4783,36 +4520,7 @@ rec {
         ];
 
       };
-      "http 1.1.0" = rec {
-        crateName = "http";
-        version = "1.1.0";
-        edition = "2018";
-        sha256 = "0n426lmcxas6h75c2cp25m933pswlrfjz10v91vc62vib2sdvf91";
-        authors = [
-          "Alex Crichton <alex@alexcrichton.com>"
-          "Carl Lerche <me@carllerche.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "fnv";
-            packageId = "fnv";
-          }
-          {
-            name = "itoa";
-            packageId = "itoa";
-          }
-        ];
-        features = {
-          "default" = [ "std" ];
-        };
-        resolvedDefaultFeatures = [ "default" "std" ];
-      };
-      "http-body 0.4.6" = rec {
+      "http-body" = rec {
         crateName = "http-body";
         version = "0.4.6";
         edition = "2018";
@@ -4829,63 +4537,7 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-        ];
-
-      };
-      "http-body 1.0.0" = rec {
-        crateName = "http-body";
-        version = "1.0.0";
-        edition = "2018";
-        sha256 = "0hyn8n3iadrbwq8y0p1rl1275s4nm49bllw5wji29g4aa3dqbb0w";
-        authors = [
-          "Carl Lerche <me@carllerche.com>"
-          "Lucio Franco <luciofranco14@gmail.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-        ];
-
-      };
-      "http-body-util" = rec {
-        crateName = "http-body-util";
-        version = "0.1.1";
-        edition = "2018";
-        sha256 = "07agldas2qgcfc05ckiarlmf9vzragbda823nqhrqrc6mjrghx84";
-        authors = [
-          "Carl Lerche <me@carllerche.com>"
-          "Lucio Franco <luciofranco14@gmail.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-core";
-            packageId = "futures-core";
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
+            packageId = "http";
           }
           {
             name = "pin-project-lite";
@@ -4927,7 +4579,7 @@ rec {
         ];
 
       };
-      "hyper 0.14.28" = rec {
+      "hyper" = rec {
         crateName = "hyper";
         version = "0.14.28";
         edition = "2018";
@@ -4956,16 +4608,16 @@ rec {
           }
           {
             name = "h2";
-            packageId = "h2 0.3.24";
+            packageId = "h2";
             optional = true;
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "httparse";
@@ -5034,104 +4686,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "client" "default" "full" "h2" "http1" "http2" "runtime" "server" "socket2" "stream" "tcp" ];
       };
-      "hyper 1.2.0" = rec {
-        crateName = "hyper";
-        version = "1.2.0";
-        edition = "2021";
-        sha256 = "0fi6k7hz5fmdph0a5r8hw50d7h2n9zxkizmafcmb65f67bblhr8q";
-        authors = [
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-channel";
-            packageId = "futures-channel";
-            optional = true;
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "h2";
-            packageId = "h2 0.4.3";
-            optional = true;
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "httparse";
-            packageId = "httparse";
-            optional = true;
-          }
-          {
-            name = "httpdate";
-            packageId = "httpdate";
-            optional = true;
-          }
-          {
-            name = "itoa";
-            packageId = "itoa";
-            optional = true;
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-            optional = true;
-          }
-          {
-            name = "smallvec";
-            packageId = "smallvec";
-            optional = true;
-            features = [ "const_generics" "const_new" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "sync" ];
-          }
-        ];
-        devDependencies = [
-          {
-            name = "futures-channel";
-            packageId = "futures-channel";
-            features = [ "sink" ];
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "sink" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "fs" "macros" "net" "io-std" "io-util" "rt" "rt-multi-thread" "sync" "time" "test-util" ];
-          }
-        ];
-        features = {
-          "client" = [ "dep:want" "dep:pin-project-lite" "dep:smallvec" ];
-          "ffi" = [ "dep:libc" "dep:http-body-util" ];
-          "full" = [ "client" "http1" "http2" "server" ];
-          "http1" = [ "dep:futures-channel" "dep:futures-util" "dep:httparse" "dep:itoa" ];
-          "http2" = [ "dep:futures-channel" "dep:futures-util" "dep:h2" ];
-          "server" = [ "dep:httpdate" "dep:pin-project-lite" "dep:smallvec" ];
-          "tracing" = [ "dep:tracing" ];
-        };
-        resolvedDefaultFeatures = [ "default" "http1" "http2" "server" ];
-      };
       "hyper-rustls" = rec {
         crateName = "hyper-rustls";
         version = "0.24.2";
@@ -5145,17 +4699,17 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             usesDefaultFeatures = false;
             features = [ "client" ];
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             usesDefaultFeatures = false;
           }
           {
@@ -5176,12 +4730,12 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "full" ];
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             usesDefaultFeatures = false;
             features = [ "tls12" ];
           }
@@ -5218,7 +4772,7 @@ rec {
         dependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "client" ];
           }
           {
@@ -5237,7 +4791,7 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "client" "http1" "tcp" ];
           }
           {
@@ -5248,82 +4802,6 @@ rec {
         ];
 
       };
-      "hyper-util" = rec {
-        crateName = "hyper-util";
-        version = "0.1.3";
-        edition = "2021";
-        sha256 = "1akngan7j0n2n0wd25c6952mvqbkj9gp1lcwzyxjc0d37l8yyf6a";
-        authors = [
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 1.2.0";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-          {
-            name = "socket2";
-            packageId = "socket2";
-            optional = true;
-            features = [ "all" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            optional = true;
-            features = [ "net" "rt" "time" ];
-          }
-        ];
-        devDependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 1.2.0";
-            features = [ "full" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "macros" "test-util" ];
-          }
-        ];
-        features = {
-          "client" = [ "hyper/client" "dep:tracing" "dep:futures-channel" "dep:tower" "dep:tower-service" ];
-          "client-legacy" = [ "client" ];
-          "full" = [ "client" "client-legacy" "server" "server-auto" "service" "http1" "http2" "tokio" ];
-          "http1" = [ "hyper/http1" ];
-          "http2" = [ "hyper/http2" ];
-          "server" = [ "hyper/server" ];
-          "server-auto" = [ "server" "http1" "http2" ];
-          "service" = [ "dep:tower" "dep:tower-service" ];
-          "tokio" = [ "dep:tokio" "dep:socket2" ];
-        };
-        resolvedDefaultFeatures = [ "default" "http1" "http2" "server" "server-auto" "tokio" ];
-      };
       "iana-time-zone" = rec {
         crateName = "iana-time-zone";
         version = "0.1.60";
@@ -6155,6 +5633,28 @@ rec {
         };
         resolvedDefaultFeatures = [ "std" ];
       };
+      "lru" = rec {
+        crateName = "lru";
+        version = "0.12.3";
+        edition = "2015";
+        sha256 = "1p5hryc967wdh56q9wzb2x9gdqy3yd0sqmnb2fcf7z28wrsjw9nk";
+        authors = [
+          "Jerome Froelich <jeromefroelic@hotmail.com>"
+        ];
+        dependencies = [
+          {
+            name = "hashbrown";
+            packageId = "hashbrown 0.14.3";
+            optional = true;
+          }
+        ];
+        features = {
+          "default" = [ "hashbrown" ];
+          "hashbrown" = [ "dep:hashbrown" ];
+          "nightly" = [ "hashbrown" "hashbrown/nightly" ];
+        };
+        resolvedDefaultFeatures = [ "default" "hashbrown" ];
+      };
       "lzma-sys" = rec {
         crateName = "lzma-sys";
         version = "0.1.20";
@@ -6424,9 +5924,9 @@ rec {
       };
       "mio" = rec {
         crateName = "mio";
-        version = "0.8.10";
+        version = "0.8.11";
         edition = "2018";
-        sha256 = "02gyaxvaia9zzi4drrw59k9s0j6pa5d1y2kv7iplwjipdqlhngcg";
+        sha256 = "034byyl0ardml5yliy1hmvx8arkmn9rv479pid794sm07ia519m4";
         authors = [
           "Carl Lerche <me@carllerche.com>"
           "Thomas de Zeeuw <thomasdezeeuw@gmail.com>"
@@ -6791,14 +6291,6 @@ rec {
             packageId = "serde_json";
           }
           {
-            name = "test-case";
-            packageId = "test-case";
-          }
-          {
-            name = "test-generator";
-            packageId = "test-generator";
-          }
-          {
             name = "tokio-test";
             packageId = "tokio-test";
           }
@@ -6809,11 +6301,12 @@ rec {
         ];
         features = {
           "async" = [ "tokio" ];
+          "default" = [ "async" "wire" ];
           "pin-project-lite" = [ "dep:pin-project-lite" ];
           "tokio" = [ "dep:tokio" ];
           "wire" = [ "tokio" "pin-project-lite" ];
         };
-        resolvedDefaultFeatures = [ "async" "pin-project-lite" "tokio" "wire" ];
+        resolvedDefaultFeatures = [ "async" "default" "pin-project-lite" "tokio" "wire" ];
       };
       "nom" = rec {
         crateName = "nom";
@@ -7016,7 +6509,7 @@ rec {
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             optional = true;
             usesDefaultFeatures = false;
           }
@@ -7032,7 +6525,7 @@ rec {
           }
           {
             name = "parking_lot";
-            packageId = "parking_lot 0.12.1";
+            packageId = "parking_lot 0.12.2";
           }
           {
             name = "percent-encoding";
@@ -7110,7 +6603,7 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "server" ];
           }
           {
@@ -7179,9 +6672,9 @@ rec {
       };
       "opentelemetry" = rec {
         crateName = "opentelemetry";
-        version = "0.21.0";
+        version = "0.22.0";
         edition = "2021";
-        sha256 = "12jfmyx8k9q2sjlx4wp76ddzaf94i7lnkliv1c9mj164bnd36chy";
+        sha256 = "1gv70rx8172g9n82v9f97ircax7v4ydzyprq1nvsxwp3gfc5f3ch";
         dependencies = [
           {
             name = "futures-core";
@@ -7192,10 +6685,6 @@ rec {
             packageId = "futures-sink";
           }
           {
-            name = "indexmap";
-            packageId = "indexmap 2.1.0";
-          }
-          {
             name = "js-sys";
             packageId = "js-sys";
             target = { target, features }: (("wasm32" == target."arch" or null) && (!("wasi" == target."os" or null)));
@@ -7212,6 +6701,7 @@ rec {
           {
             name = "thiserror";
             packageId = "thiserror";
+            usesDefaultFeatures = false;
           }
           {
             name = "urlencoding";
@@ -7229,9 +6719,9 @@ rec {
       };
       "opentelemetry-otlp" = rec {
         crateName = "opentelemetry-otlp";
-        version = "0.14.0";
+        version = "0.15.0";
         edition = "2021";
-        sha256 = "0c59bh4wa824mf89ayivsjqwipkg1y6r27r4d0y47lhfna1xlk7j";
+        sha256 = "1jxbi5w4xgwg4gcj0lz4310y926bglw25b2546pkkilmjj6nn08s";
         dependencies = [
           {
             name = "async-trait";
@@ -7243,8 +6733,9 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
             optional = true;
+            usesDefaultFeatures = false;
           }
           {
             name = "opentelemetry";
@@ -7267,45 +6758,45 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.11.9";
+            packageId = "prost";
             optional = true;
           }
           {
             name = "thiserror";
             packageId = "thiserror";
+            usesDefaultFeatures = false;
           }
           {
             name = "tokio";
             packageId = "tokio";
             optional = true;
+            usesDefaultFeatures = false;
             features = [ "sync" "rt" ];
           }
           {
             name = "tonic";
-            packageId = "tonic 0.9.2";
+            packageId = "tonic";
             optional = true;
+            usesDefaultFeatures = false;
           }
         ];
         devDependencies = [
           {
             name = "tokio";
             packageId = "tokio";
+            usesDefaultFeatures = false;
             features = [ "macros" "rt-multi-thread" ];
           }
         ];
         features = {
           "default" = [ "grpc-tonic" "trace" ];
-          "grpc-sys" = [ "grpcio" "opentelemetry-proto/gen-grpcio" ];
           "grpc-tonic" = [ "tonic" "prost" "http" "tokio" "opentelemetry-proto/gen-tonic" ];
-          "grpcio" = [ "dep:grpcio" ];
           "gzip-tonic" = [ "tonic/gzip" ];
           "http" = [ "dep:http" ];
           "http-proto" = [ "prost" "opentelemetry-http" "opentelemetry-proto/gen-tonic-messages" "http" "trace" "metrics" ];
           "integration-testing" = [ "tonic" "prost" "tokio/full" "trace" ];
           "logs" = [ "opentelemetry/logs" "opentelemetry_sdk/logs" "opentelemetry-proto/logs" ];
           "metrics" = [ "opentelemetry/metrics" "opentelemetry_sdk/metrics" "opentelemetry-proto/metrics" ];
-          "openssl" = [ "grpcio/openssl" ];
-          "openssl-vendored" = [ "grpcio/openssl-vendored" ];
           "opentelemetry-http" = [ "dep:opentelemetry-http" ];
           "prost" = [ "dep:prost" ];
           "reqwest" = [ "dep:reqwest" ];
@@ -7314,8 +6805,6 @@ rec {
           "reqwest-rustls" = [ "reqwest" "reqwest/rustls-tls-native-roots" ];
           "serde" = [ "dep:serde" ];
           "serialize" = [ "serde" ];
-          "surf" = [ "dep:surf" ];
-          "surf-client" = [ "surf" "opentelemetry-http/surf" ];
           "tls" = [ "tonic/tls" ];
           "tls-roots" = [ "tls" "tonic/tls-roots" ];
           "tokio" = [ "dep:tokio" ];
@@ -7326,9 +6815,9 @@ rec {
       };
       "opentelemetry-proto" = rec {
         crateName = "opentelemetry-proto";
-        version = "0.4.0";
+        version = "0.5.0";
         edition = "2021";
-        sha256 = "1qblsq0hkksdw3k60bc8yi5xwlynmqwibggz3lyyl4n8bk75bqd2";
+        sha256 = "1r5a1k4fryqijhsar36ld806yf82isw11xfnx7d80nwgnv4xv3rs";
         dependencies = [
           {
             name = "opentelemetry";
@@ -7342,53 +6831,47 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.11.9";
+            packageId = "prost";
             optional = true;
           }
           {
             name = "tonic";
-            packageId = "tonic 0.9.2";
+            packageId = "tonic";
             optional = true;
             usesDefaultFeatures = false;
             features = [ "codegen" "prost" ];
           }
         ];
         features = {
-          "full" = [ "gen-tonic" "gen-grpcio" "trace" "logs" "metrics" "zpages" "with-serde" ];
-          "gen-grpcio" = [ "grpcio" "prost" ];
+          "full" = [ "gen-tonic" "trace" "logs" "metrics" "zpages" "with-serde" ];
           "gen-tonic" = [ "gen-tonic-messages" "tonic/transport" ];
           "gen-tonic-messages" = [ "tonic" "prost" ];
-          "grpcio" = [ "dep:grpcio" ];
+          "hex" = [ "dep:hex" ];
           "logs" = [ "opentelemetry/logs" "opentelemetry_sdk/logs" ];
           "metrics" = [ "opentelemetry/metrics" "opentelemetry_sdk/metrics" ];
           "prost" = [ "dep:prost" ];
+          "schemars" = [ "dep:schemars" ];
           "serde" = [ "dep:serde" ];
           "tonic" = [ "dep:tonic" ];
           "trace" = [ "opentelemetry/trace" "opentelemetry_sdk/trace" ];
-          "with-serde" = [ "serde" ];
+          "with-schemars" = [ "schemars" ];
+          "with-serde" = [ "serde" "hex" ];
           "zpages" = [ "trace" ];
         };
         resolvedDefaultFeatures = [ "gen-tonic" "gen-tonic-messages" "prost" "tonic" "trace" ];
       };
       "opentelemetry-semantic-conventions" = rec {
         crateName = "opentelemetry-semantic-conventions";
-        version = "0.13.0";
+        version = "0.14.0";
         edition = "2021";
-        sha256 = "115wbgk840dklyhpg3lwp4x1m643qd7f0vkz8hmfz0pry4g4yxzm";
-        dependencies = [
-          {
-            name = "opentelemetry";
-            packageId = "opentelemetry";
-            usesDefaultFeatures = false;
-          }
-        ];
+        sha256 = "04197racbkpj75fh9jnwkdznjzv6l2ljpbr8ryfk9f9gqkb5pazr";
 
       };
       "opentelemetry_sdk" = rec {
         crateName = "opentelemetry_sdk";
-        version = "0.21.2";
+        version = "0.22.1";
         edition = "2021";
-        sha256 = "1r7gw2j2n800rd0vdnga32yhlfmc3c4y0sadcr97licam74aw5ig";
+        sha256 = "0zkbkl29qik7cfmwbhr2ncink8fp9vi5x2qgk8gf6jg67c8wg44y";
         dependencies = [
           {
             name = "async-trait";
@@ -7441,11 +6924,12 @@ rec {
             packageId = "rand";
             optional = true;
             usesDefaultFeatures = false;
-            features = [ "std" "std_rng" ];
+            features = [ "std" "std_rng" "small_rng" ];
           }
           {
             name = "thiserror";
             packageId = "thiserror";
+            usesDefaultFeatures = false;
           }
           {
             name = "tokio";
@@ -7603,11 +7087,11 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" ];
       };
-      "parking_lot 0.12.1" = rec {
+      "parking_lot 0.12.2" = rec {
         crateName = "parking_lot";
-        version = "0.12.1";
-        edition = "2018";
-        sha256 = "13r2xk7mnxfc5g0g6dkdxqdqad99j7s7z8zhzz4npw5r0g0v4hip";
+        version = "0.12.2";
+        edition = "2021";
+        sha256 = "1ys2dzz6cysjmwyivwxczl1ljpcf5cj4qmhdj07d5bkc9z5g0jky";
         authors = [
           "Amanieu d'Antras <amanieu@gmail.com>"
         ];
@@ -7792,11 +7276,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -8115,7 +7599,7 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
             usesDefaultFeatures = false;
           }
           {
@@ -8128,7 +7612,7 @@ rec {
         devDependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
             usesDefaultFeatures = false;
           }
           {
@@ -8142,26 +7626,7 @@ rec {
           "verbatim" = [ "syn/parsing" ];
         };
       };
-      "proc-macro2 0.4.30" = rec {
-        crateName = "proc-macro2";
-        version = "0.4.30";
-        edition = "2015";
-        sha256 = "0nd71fl24sys066jrha6j7i34nfkjv44yzw8yww9742wmc8j0gfg";
-        authors = [
-          "Alex Crichton <alex@alexcrichton.com>"
-        ];
-        dependencies = [
-          {
-            name = "unicode-xid";
-            packageId = "unicode-xid";
-          }
-        ];
-        features = {
-          "default" = [ "proc-macro" ];
-        };
-        resolvedDefaultFeatures = [ "default" "proc-macro" ];
-      };
-      "proc-macro2 1.0.76" = rec {
+      "proc-macro2" = rec {
         crateName = "proc-macro2";
         version = "1.0.76";
         edition = "2021";
@@ -8267,35 +7732,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "alloc" "bit-set" "default" "fork" "lazy_static" "regex-syntax" "rusty-fork" "std" "tempfile" "timeout" ];
       };
-      "prost 0.11.9" = rec {
-        crateName = "prost";
-        version = "0.11.9";
-        edition = "2021";
-        sha256 = "1kc1hva2h894hc0zf6r4r8fsxfpazf7xn5rj3jya9sbrsyhym0hb";
-        authors = [
-          "Dan Burkert <dan@danburkert.com>"
-          "Lucio Franco <luciofranco14@gmail.com"
-          "Tokio Contributors <team@tokio.rs>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "prost-derive";
-            packageId = "prost-derive 0.11.9";
-            optional = true;
-          }
-        ];
-        features = {
-          "default" = [ "prost-derive" "std" ];
-          "prost-derive" = [ "dep:prost-derive" ];
-        };
-        resolvedDefaultFeatures = [ "default" "prost-derive" "std" ];
-      };
-      "prost 0.12.3" = rec {
+      "prost" = rec {
         crateName = "prost";
         version = "0.12.3";
         edition = "2021";
@@ -8313,7 +7750,7 @@ rec {
           }
           {
             name = "prost-derive";
-            packageId = "prost-derive 0.12.3";
+            packageId = "prost-derive";
             optional = true;
           }
         ];
@@ -8374,7 +7811,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
             usesDefaultFeatures = false;
           }
           {
@@ -8425,45 +7862,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "cleanup-markdown" "default" "format" "prettyplease" "pulldown-cmark" "pulldown-cmark-to-cmark" "syn" ];
       };
-      "prost-derive 0.11.9" = rec {
-        crateName = "prost-derive";
-        version = "0.11.9";
-        edition = "2021";
-        sha256 = "1d3mw2s2jba1f7wcjmjd6ha2a255p2rmynxhm1nysv9w1z8xilp5";
-        procMacro = true;
-        authors = [
-          "Dan Burkert <dan@danburkert.com>"
-          "Lucio Franco <luciofranco14@gmail.com>"
-          "Tokio Contributors <team@tokio.rs>"
-        ];
-        dependencies = [
-          {
-            name = "anyhow";
-            packageId = "anyhow";
-          }
-          {
-            name = "itertools";
-            packageId = "itertools 0.10.5";
-            usesDefaultFeatures = false;
-            features = [ "use_alloc" ];
-          }
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
-          }
-          {
-            name = "quote";
-            packageId = "quote 1.0.35";
-          }
-          {
-            name = "syn";
-            packageId = "syn 1.0.109";
-            features = [ "extra-traits" ];
-          }
-        ];
-
-      };
-      "prost-derive 0.12.3" = rec {
+      "prost-derive" = rec {
         crateName = "prost-derive";
         version = "0.12.3";
         edition = "2021";
@@ -8487,11 +7886,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -8514,7 +7913,7 @@ rec {
         dependencies = [
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
             usesDefaultFeatures = false;
             features = [ "prost-derive" ];
           }
@@ -8546,7 +7945,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "serde";
@@ -8582,7 +7981,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-build";
@@ -8594,7 +7993,7 @@ rec {
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
         ];
 
@@ -8616,7 +8015,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-wkt";
@@ -8638,7 +8037,7 @@ rec {
         buildDependencies = [
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-build";
@@ -8756,28 +8155,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "overlapped-lists" "serde" "serialize" ];
       };
-      "quote 0.6.13" = rec {
-        crateName = "quote";
-        version = "0.6.13";
-        edition = "2015";
-        sha256 = "1qgqq48jymp5h4y082aanf25hrw6bpb678xh3zw993qfhxmkpqkc";
-        authors = [
-          "David Tolnay <dtolnay@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 0.4.30";
-            usesDefaultFeatures = false;
-          }
-        ];
-        features = {
-          "default" = [ "proc-macro" ];
-          "proc-macro" = [ "proc-macro2/proc-macro" ];
-        };
-        resolvedDefaultFeatures = [ "default" "proc-macro" ];
-      };
-      "quote 1.0.35" = rec {
+      "quote" = rec {
         crateName = "quote";
         version = "1.0.35";
         edition = "2018";
@@ -8788,7 +8166,7 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
             usesDefaultFeatures = false;
           }
         ];
@@ -9312,21 +8690,21 @@ rec {
           }
           {
             name = "h2";
-            packageId = "h2 0.3.24";
+            packageId = "h2";
             target = { target, features }: (!("wasm32" == target."arch" or null));
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
             target = { target, features }: (!("wasm32" == target."arch" or null));
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             usesDefaultFeatures = false;
             target = { target, features }: (!("wasm32" == target."arch" or null));
             features = [ "tcp" "http1" "http2" "client" "runtime" ];
@@ -9375,7 +8753,7 @@ rec {
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             optional = true;
             target = { target, features }: (!("wasm32" == target."arch" or null));
             features = [ "dangerous_configuration" ];
@@ -9475,7 +8853,7 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             usesDefaultFeatures = false;
             target = { target, features }: (!("wasm32" == target."arch" or null));
             features = [ "tcp" "stream" "http1" "http2" "client" "server" "runtime" ];
@@ -9717,11 +9095,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "regex";
@@ -9764,7 +9142,7 @@ rec {
         dependencies = [
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "rand";
@@ -9937,11 +9315,11 @@ rec {
         };
         resolvedDefaultFeatures = [ "alloc" "default" "event" "fs" "net" "pipe" "process" "std" "termios" "time" "use-libc-auxv" ];
       };
-      "rustls 0.21.10" = rec {
+      "rustls 0.21.12" = rec {
         crateName = "rustls";
-        version = "0.21.10";
+        version = "0.21.12";
         edition = "2021";
-        sha256 = "1fmpzk3axnhkd99saqkvraifdfms4pkyi56lkihf8n877j0sdmgr";
+        sha256 = "0gjdg2a9r81sdwkyw3n5yfbkrr6p9gyk3xr2kcsr3cs83x6s2miz";
         dependencies = [
           {
             name = "log";
@@ -9978,11 +9356,11 @@ rec {
         };
         resolvedDefaultFeatures = [ "dangerous_configuration" "default" "log" "logging" "tls12" ];
       };
-      "rustls 0.22.2" = rec {
+      "rustls 0.22.4" = rec {
         crateName = "rustls";
-        version = "0.22.2";
+        version = "0.22.4";
         edition = "2021";
-        sha256 = "0hcxyhq6ynvws9v5b2h81s1nwmijmya7a3vyyyhsy1wqpmb9jz78";
+        sha256 = "0cl4q6w0x1cl5ldjsgbbiiqhkz6qg5vxl5dkn9wwsyxc44vzfkmz";
         dependencies = [
           {
             name = "log";
@@ -10564,13 +9942,13 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
             usesDefaultFeatures = false;
             features = [ "proc-macro" ];
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
             usesDefaultFeatures = false;
             features = [ "proc-macro" ];
           }
@@ -10622,27 +10000,7 @@ rec {
           "preserve_order" = [ "indexmap" "std" ];
           "std" = [ "serde/std" ];
         };
-        resolvedDefaultFeatures = [ "alloc" "default" "raw_value" "std" ];
-      };
-      "serde_path_to_error" = rec {
-        crateName = "serde_path_to_error";
-        version = "0.1.16";
-        edition = "2021";
-        sha256 = "19hlz2359l37ifirskpcds7sxg0gzpqvfilibs7whdys0128i6dg";
-        authors = [
-          "David Tolnay <dtolnay@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "itoa";
-            packageId = "itoa";
-          }
-          {
-            name = "serde";
-            packageId = "serde";
-          }
-        ];
-
+        resolvedDefaultFeatures = [ "alloc" "default" "std" ];
       };
       "serde_qs" = rec {
         crateName = "serde_qs";
@@ -10856,11 +10214,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -11105,7 +10463,6 @@ rec {
           "drain_keep_rest" = [ "drain_filter" ];
           "serde" = [ "dep:serde" ];
         };
-        resolvedDefaultFeatures = [ "const_generics" "const_new" ];
       };
       "smol_str" = rec {
         crateName = "smol_str";
@@ -11191,11 +10548,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -11331,11 +10688,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "structmeta-derive";
@@ -11367,11 +10724,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -11400,39 +10757,6 @@ rec {
           "default" = [ "std" "i128" ];
         };
       };
-      "syn 0.15.44" = rec {
-        crateName = "syn";
-        version = "0.15.44";
-        edition = "2015";
-        sha256 = "1id5g6x6zihv3j7hwrw3m1jp636bg8dpi671r7zy3jvpkavb794w";
-        authors = [
-          "David Tolnay <dtolnay@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 0.4.30";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "quote";
-            packageId = "quote 0.6.13";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "unicode-xid";
-            packageId = "unicode-xid";
-          }
-        ];
-        features = {
-          "default" = [ "derive" "parsing" "printing" "clone-impls" "proc-macro" ];
-          "printing" = [ "quote" ];
-          "proc-macro" = [ "proc-macro2/proc-macro" "quote/proc-macro" ];
-          "quote" = [ "dep:quote" ];
-        };
-        resolvedDefaultFeatures = [ "clone-impls" "default" "derive" "full" "parsing" "printing" "proc-macro" "quote" ];
-      };
       "syn 1.0.109" = rec {
         crateName = "syn";
         version = "1.0.109";
@@ -11444,12 +10768,12 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
             usesDefaultFeatures = false;
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
             optional = true;
             usesDefaultFeatures = false;
           }
@@ -11478,12 +10802,12 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
             usesDefaultFeatures = false;
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
             optional = true;
             usesDefaultFeatures = false;
           }
@@ -11632,129 +10956,6 @@ rec {
         ];
 
       };
-      "test-case" = rec {
-        crateName = "test-case";
-        version = "3.3.1";
-        edition = "2021";
-        sha256 = "1a380yzm6787737cw7s09jqmkn9035hghahradl2ikdg2gfm09gb";
-        authors = [
-          "Marcin Sas-Szymanski <marcin.sas-szymanski@anixe.pl>"
-          "Wojciech Polak <frondeus@gmail.com>"
-          "ลukasz Biel <lukasz.p.biel@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "test-case-macros";
-            packageId = "test-case-macros";
-            usesDefaultFeatures = false;
-          }
-        ];
-        features = {
-          "regex" = [ "dep:regex" ];
-          "with-regex" = [ "regex" "test-case-macros/with-regex" ];
-        };
-      };
-      "test-case-core" = rec {
-        crateName = "test-case-core";
-        version = "3.3.1";
-        edition = "2021";
-        sha256 = "0krqi0gbi1yyycigyjlak63r8h1n0vms7mg3kckqwlfd87c7zjxd";
-        authors = [
-          "Marcin Sas-Szymanski <marcin.sas-szymanski@anixe.pl>"
-          "Wojciech Polak <frondeus@gmail.com>"
-          "ลukasz Biel <lukasz.p.biel@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "cfg-if";
-            packageId = "cfg-if";
-          }
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
-          }
-          {
-            name = "quote";
-            packageId = "quote 1.0.35";
-          }
-          {
-            name = "syn";
-            packageId = "syn 2.0.48";
-            features = [ "full" "extra-traits" ];
-          }
-        ];
-        features = { };
-      };
-      "test-case-macros" = rec {
-        crateName = "test-case-macros";
-        version = "3.3.1";
-        edition = "2021";
-        sha256 = "1yvgky3qax73bic6m368q04xc955p4a91mddd6b5fk7d04mfg2aw";
-        procMacro = true;
-        authors = [
-          "Marcin Sas-Szymanski <marcin.sas-szymanski@anixe.pl>"
-          "Wojciech Polak <frondeus@gmail.com>"
-          "ลukasz Biel <lukasz.p.biel@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
-          }
-          {
-            name = "quote";
-            packageId = "quote 1.0.35";
-          }
-          {
-            name = "syn";
-            packageId = "syn 2.0.48";
-            features = [ "full" "extra-traits" "parsing" ];
-          }
-          {
-            name = "test-case-core";
-            packageId = "test-case-core";
-            usesDefaultFeatures = false;
-          }
-        ];
-        features = {
-          "with-regex" = [ "test-case-core/with-regex" ];
-        };
-      };
-      "test-generator" = rec {
-        crateName = "test-generator";
-        version = "0.3.0";
-        edition = "2018";
-        workspace_member = null;
-        src = pkgs.fetchgit {
-          url = "https://github.com/JamesGuthrie/test-generator.git";
-          rev = "82e799979980962aec1aa324ec6e0e4cad781f41";
-          sha256 = "08brp3qqa55hijc7xby3lam2cc84hvx1zzfqv6lj7smlczh8k32y";
-        };
-        procMacro = true;
-        authors = [
-          "Frank Rehberger <frehberg@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "glob";
-            packageId = "glob";
-          }
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2 0.4.30";
-          }
-          {
-            name = "quote";
-            packageId = "quote 0.6.13";
-          }
-          {
-            name = "syn";
-            packageId = "syn 0.15.44";
-            features = [ "full" ];
-          }
-        ];
-
-      };
       "test-strategy" = rec {
         crateName = "test-strategy";
         version = "0.2.1";
@@ -11767,11 +10968,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "structmeta";
@@ -11826,11 +11027,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -12165,16 +11366,11 @@ rec {
       };
       "tokio-listener" = rec {
         crateName = "tokio-listener";
-        version = "0.3.2";
+        version = "0.4.2";
         edition = "2021";
-        sha256 = "00vkr1cywd2agn8jbkzwwf7y4ps3cfjm8l9ab697px2cgc97wdln";
+        sha256 = "1cm6r5dmpq96s8gw9dgsinq5g8s466j48dg7dckwc4gc28g6cd21";
         dependencies = [
           {
-            name = "axum";
-            packageId = "axum 0.7.4";
-            rename = "axum07";
-          }
-          {
             name = "document-features";
             packageId = "document-features";
           }
@@ -12218,7 +11414,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             rename = "tonic";
             optional = true;
           }
@@ -12235,12 +11431,14 @@ rec {
           }
         ];
         features = {
-          "axum07" = [ "dep:hyper1" "dep:hyper-util" "dep:futures-util" "dep:tower-service" "dep:tower" ];
+          "axum07" = [ "dep:hyper1" "dep:hyper-util" "dep:futures-util" "dep:tower-service" "dep:tower" "dep:axum07" ];
           "clap" = [ "dep:clap" ];
           "default" = [ "user_facing_default" "tokio-util" ];
           "hyper014" = [ "dep:hyper014" ];
           "inetd" = [ "dep:futures-util" ];
+          "multi-listener" = [ "dep:futures-util" ];
           "nix" = [ "dep:nix" ];
+          "sd_listen" = [ "socket2" ];
           "serde" = [ "dep:serde" "serde_with" ];
           "serde_with" = [ "dep:serde_with" ];
           "socket2" = [ "dep:socket2" ];
@@ -12265,11 +11463,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -12319,7 +11517,7 @@ rec {
         dependencies = [
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             usesDefaultFeatures = false;
           }
           {
@@ -12351,7 +11549,7 @@ rec {
         dependencies = [
           {
             name = "rustls";
-            packageId = "rustls 0.22.2";
+            packageId = "rustls 0.22.4";
             usesDefaultFeatures = false;
           }
           {
@@ -12695,7 +11893,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "serde" ];
       };
-      "tonic 0.11.0" = rec {
+      "tonic" = rec {
         crateName = "tonic";
         version = "0.11.0";
         edition = "2021";
@@ -12716,7 +11914,7 @@ rec {
           }
           {
             name = "axum";
-            packageId = "axum 0.6.20";
+            packageId = "axum";
             optional = true;
             usesDefaultFeatures = false;
           }
@@ -12730,20 +11928,20 @@ rec {
           }
           {
             name = "h2";
-            packageId = "h2 0.3.24";
+            packageId = "h2";
             optional = true;
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             optional = true;
             features = [ "full" ];
           }
@@ -12762,7 +11960,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
             optional = true;
             usesDefaultFeatures = false;
             features = [ "std" ];
@@ -12841,139 +12039,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "channel" "codegen" "default" "prost" "tls" "tls-roots" "tls-roots-common" "transport" ];
       };
-      "tonic 0.9.2" = rec {
-        crateName = "tonic";
-        version = "0.9.2";
-        edition = "2021";
-        sha256 = "0nlx35lvah5hdcp6lg1d6dlprq0zz8ijj6f727szfcv479m6d0ih";
-        authors = [
-          "Lucio Franco <luciofranco14@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "async-trait";
-            packageId = "async-trait";
-            optional = true;
-          }
-          {
-            name = "axum";
-            packageId = "axum 0.6.20";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "base64";
-            packageId = "base64";
-          }
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-core";
-            packageId = "futures-core";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "h2";
-            packageId = "h2 0.3.24";
-            optional = true;
-          }
-          {
-            name = "http";
-            packageId = "http 0.2.11";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 0.4.6";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 0.14.28";
-            optional = true;
-            features = [ "full" ];
-          }
-          {
-            name = "hyper-timeout";
-            packageId = "hyper-timeout";
-            optional = true;
-          }
-          {
-            name = "percent-encoding";
-            packageId = "percent-encoding";
-          }
-          {
-            name = "pin-project";
-            packageId = "pin-project";
-          }
-          {
-            name = "prost";
-            packageId = "prost 0.11.9";
-            optional = true;
-            usesDefaultFeatures = false;
-            features = [ "std" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            optional = true;
-            features = [ "net" "time" "macros" ];
-          }
-          {
-            name = "tokio-stream";
-            packageId = "tokio-stream";
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            optional = true;
-            usesDefaultFeatures = false;
-            features = [ "balance" "buffer" "discover" "limit" "load" "make" "timeout" "util" ];
-          }
-          {
-            name = "tower-layer";
-            packageId = "tower-layer";
-          }
-          {
-            name = "tower-service";
-            packageId = "tower-service";
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-          }
-        ];
-        devDependencies = [
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "rt" "macros" ];
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            features = [ "full" ];
-          }
-        ];
-        features = {
-          "channel" = [ "dep:h2" "dep:hyper" "dep:tokio" "dep:tower" "dep:hyper-timeout" ];
-          "codegen" = [ "dep:async-trait" ];
-          "default" = [ "transport" "codegen" "prost" ];
-          "gzip" = [ "dep:flate2" ];
-          "prost" = [ "dep:prost" ];
-          "tls" = [ "dep:rustls-pemfile" "transport" "dep:tokio-rustls" "dep:async-stream" ];
-          "tls-roots" = [ "tls-roots-common" "dep:rustls-native-certs" ];
-          "tls-roots-common" = [ "tls" ];
-          "tls-webpki-roots" = [ "tls-roots-common" "dep:webpki-roots" ];
-          "transport" = [ "dep:axum" "channel" ];
-        };
-        resolvedDefaultFeatures = [ "channel" "codegen" "default" "prost" "transport" ];
-      };
       "tonic-build" = rec {
         crateName = "tonic-build";
         version = "0.11.0";
@@ -12989,7 +12054,7 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "prost-build";
@@ -12998,7 +12063,7 @@ rec {
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -13025,7 +12090,7 @@ rec {
         dependencies = [
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-types";
@@ -13046,7 +12111,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             usesDefaultFeatures = false;
             features = [ "codegen" "prost" ];
           }
@@ -13054,7 +12119,7 @@ rec {
         devDependencies = [
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             usesDefaultFeatures = false;
             features = [ "transport" ];
           }
@@ -13268,11 +12333,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -13382,9 +12447,9 @@ rec {
       };
       "tracing-opentelemetry" = rec {
         crateName = "tracing-opentelemetry";
-        version = "0.22.0";
+        version = "0.23.0";
         edition = "2018";
-        sha256 = "15jmwmbp6wz15bx20bfsmabx53wmlnd7wvzwz9hvkrq7aifc4yn6";
+        sha256 = "1112kmckw0qwyckhbwarb230n4ldmfgzixr9jagbfjmy3fx19gm9";
         authors = [
           "Julian Tescher <julian@tescher.me>"
           "Tokio Contributors <team@tokio.rs>"
@@ -13472,6 +12537,7 @@ rec {
         features = {
           "async-trait" = [ "dep:async-trait" ];
           "default" = [ "tracing-log" "metrics" ];
+          "futures-util" = [ "dep:futures-util" ];
           "metrics" = [ "opentelemetry/metrics" "opentelemetry_sdk/metrics" "smallvec" ];
           "smallvec" = [ "dep:smallvec" ];
           "thiserror" = [ "dep:thiserror" ];
@@ -13479,30 +12545,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "metrics" "smallvec" "tracing-log" ];
       };
-      "tracing-serde" = rec {
-        crateName = "tracing-serde";
-        version = "0.1.3";
-        edition = "2018";
-        sha256 = "1qfr0va69djvxqvjrx4vqq7p6myy414lx4w1f6amcn0hfwqj2sxw";
-        authors = [
-          "Tokio Contributors <team@tokio.rs>"
-        ];
-        dependencies = [
-          {
-            name = "serde";
-            packageId = "serde";
-          }
-          {
-            name = "tracing-core";
-            packageId = "tracing-core";
-          }
-        ];
-        features = {
-          "valuable" = [ "valuable_crate" "valuable-serde" "tracing-core/valuable" ];
-          "valuable-serde" = [ "dep:valuable-serde" ];
-          "valuable_crate" = [ "dep:valuable_crate" ];
-        };
-      };
       "tracing-subscriber" = rec {
         crateName = "tracing-subscriber";
         version = "0.3.18";
@@ -13537,16 +12579,6 @@ rec {
             features = [ "std" "unicode-case" "unicode-perl" ];
           }
           {
-            name = "serde";
-            packageId = "serde";
-            optional = true;
-          }
-          {
-            name = "serde_json";
-            packageId = "serde_json";
-            optional = true;
-          }
-          {
             name = "sharded-slab";
             packageId = "sharded-slab";
             optional = true;
@@ -13579,11 +12611,6 @@ rec {
             usesDefaultFeatures = false;
             features = [ "log-tracer" "std" ];
           }
-          {
-            name = "tracing-serde";
-            packageId = "tracing-serde";
-            optional = true;
-          }
         ];
         devDependencies = [
           {
@@ -13629,7 +12656,7 @@ rec {
           "valuable-serde" = [ "dep:valuable-serde" ];
           "valuable_crate" = [ "dep:valuable_crate" ];
         };
-        resolvedDefaultFeatures = [ "alloc" "ansi" "default" "env-filter" "fmt" "json" "matchers" "nu-ansi-term" "once_cell" "regex" "registry" "serde" "serde_json" "sharded-slab" "smallvec" "std" "thread_local" "tracing" "tracing-log" "tracing-serde" ];
+        resolvedDefaultFeatures = [ "alloc" "ansi" "default" "env-filter" "fmt" "matchers" "nu-ansi-term" "once_cell" "regex" "registry" "sharded-slab" "smallvec" "std" "thread_local" "tracing" "tracing-log" ];
       };
       "try-lock" = rec {
         crateName = "try-lock";
@@ -13674,7 +12701,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "thiserror";
@@ -13691,7 +12718,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             features = [ "tls" "tls-roots" ];
           }
           {
@@ -13706,7 +12733,6 @@ rec {
           {
             name = "tracing-subscriber";
             packageId = "tracing-subscriber";
-            features = [ "json" ];
           }
           {
             name = "tvix-castore";
@@ -13729,8 +12755,8 @@ rec {
         ];
         devDependencies = [
           {
-            name = "test-case";
-            packageId = "test-case";
+            name = "rstest";
+            packageId = "rstest";
           }
         ];
         features = {
@@ -13750,6 +12776,11 @@ rec {
           else ./castore;
         dependencies = [
           {
+            name = "async-compression";
+            packageId = "async-compression";
+            features = [ "tokio" "zstd" ];
+          }
+          {
             name = "async-stream";
             packageId = "async-stream";
           }
@@ -13813,7 +12844,7 @@ rec {
           }
           {
             name = "parking_lot";
-            packageId = "parking_lot 0.12.1";
+            packageId = "parking_lot 0.12.2";
           }
           {
             name = "petgraph";
@@ -13825,7 +12856,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "serde";
@@ -13859,13 +12890,17 @@ rec {
             features = [ "fs" "net" ];
           }
           {
+            name = "tokio-tar";
+            packageId = "tokio-tar";
+          }
+          {
             name = "tokio-util";
             packageId = "tokio-util";
-            features = [ "io" "io-util" ];
+            features = [ "io" "io-util" "codec" ];
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
           }
           {
             name = "tonic-reflection";
@@ -13970,7 +13005,7 @@ rec {
           "tonic-reflection" = [ "dep:tonic-reflection" ];
           "virtiofs" = [ "fs" "dep:vhost" "dep:vhost-user-backend" "dep:virtio-queue" "dep:vm-memory" "dep:vmm-sys-util" "dep:virtio-bindings" "fuse-backend-rs?/vhost-user-fs" "fuse-backend-rs?/virtiofs" ];
         };
-        resolvedDefaultFeatures = [ "cloud" "default" "fs" "fuse" "tonic-reflection" "virtiofs" ];
+        resolvedDefaultFeatures = [ "cloud" "default" "fs" "fuse" "integration" "tonic-reflection" "virtiofs" ];
       };
       "tvix-cli" = rec {
         crateName = "tvix-cli";
@@ -14027,7 +13062,6 @@ rec {
           {
             name = "tracing-subscriber";
             packageId = "tracing-subscriber";
-            features = [ "json" ];
           }
           {
             name = "tvix-build";
@@ -14055,12 +13089,6 @@ rec {
             packageId = "wu-manber";
           }
         ];
-        devDependencies = [
-          {
-            name = "test-case";
-            packageId = "test-case";
-          }
-        ];
 
       };
       "tvix-eval" = rec {
@@ -14246,11 +13274,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -14283,10 +13311,6 @@ rec {
             features = [ "tokio" "gzip" "bzip2" "xz" ];
           }
           {
-            name = "async-recursion";
-            packageId = "async-recursion";
-          }
-          {
             name = "bstr";
             packageId = "bstr";
           }
@@ -14307,6 +13331,10 @@ rec {
             packageId = "magic";
           }
           {
+            name = "md-5";
+            packageId = "md-5";
+          }
+          {
             name = "nix-compat";
             packageId = "nix-compat";
           }
@@ -14329,6 +13357,10 @@ rec {
             packageId = "serde_json";
           }
           {
+            name = "sha1";
+            packageId = "sha1";
+          }
+          {
             name = "sha2";
             packageId = "sha2";
           }
@@ -14372,6 +13404,10 @@ rec {
             usesDefaultFeatures = false;
           }
           {
+            name = "url";
+            packageId = "url";
+          }
+          {
             name = "walkdir";
             packageId = "walkdir";
           }
@@ -14411,10 +13447,6 @@ rec {
             name = "tempfile";
             packageId = "tempfile";
           }
-          {
-            name = "test-case";
-            packageId = "test-case";
-          }
         ];
         features = {
           "default" = [ "nix_tests" ];
@@ -14472,8 +13504,9 @@ rec {
             packageId = "anyhow";
           }
           {
-            name = "async-recursion";
-            packageId = "async-recursion";
+            name = "async-compression";
+            packageId = "async-compression";
+            features = [ "tokio" "bzip2" "gzip" "xz" "zstd" ];
           }
           {
             name = "async-stream";
@@ -14519,6 +13552,10 @@ rec {
             packageId = "lazy_static";
           }
           {
+            name = "lru";
+            packageId = "lru";
+          }
+          {
             name = "nix-compat";
             packageId = "nix-compat";
             features = [ "async" ];
@@ -14540,12 +13577,16 @@ rec {
             features = [ "rt-tokio" ];
           }
           {
+            name = "parking_lot";
+            packageId = "parking_lot 0.12.2";
+          }
+          {
             name = "pin-project-lite";
             packageId = "pin-project-lite";
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "reqwest";
@@ -14604,7 +13645,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             features = [ "tls" "tls-roots" ];
           }
           {
@@ -14627,7 +13668,7 @@ rec {
           {
             name = "tracing-subscriber";
             packageId = "tracing-subscriber";
-            features = [ "env-filter" "json" ];
+            features = [ "env-filter" ];
           }
           {
             name = "tvix-castore";
@@ -14641,10 +13682,6 @@ rec {
             name = "walkdir";
             packageId = "walkdir";
           }
-          {
-            name = "xz2";
-            packageId = "xz2";
-          }
         ];
         buildDependencies = [
           {
@@ -14674,10 +13711,6 @@ rec {
             packageId = "tempfile";
           }
           {
-            name = "test-case";
-            packageId = "test-case";
-          }
-          {
             name = "tokio-retry";
             packageId = "tokio-retry";
           }
@@ -14690,7 +13723,7 @@ rec {
           "tonic-reflection" = [ "dep:tonic-reflection" "tvix-castore/tonic-reflection" ];
           "virtiofs" = [ "tvix-castore/virtiofs" ];
         };
-        resolvedDefaultFeatures = [ "cloud" "default" "fuse" "otlp" "tonic-reflection" "virtiofs" ];
+        resolvedDefaultFeatures = [ "cloud" "default" "fuse" "integration" "otlp" "tonic-reflection" "virtiofs" ];
       };
       "typenum" = rec {
         crateName = "typenum";
@@ -14757,11 +13790,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -14872,18 +13905,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" ];
       };
-      "unicode-xid" = rec {
-        crateName = "unicode-xid";
-        version = "0.1.0";
-        edition = "2015";
-        sha256 = "1z57lqh4s18rr4x0j4fw4fmp9hf9346h0kmdgqsqx0fhjr3k0wpw";
-        authors = [
-          "erick.tryzelaar <erick.tryzelaar@gmail.com>"
-          "kwantam <kwantam@gmail.com>"
-        ];
-        features = { };
-        resolvedDefaultFeatures = [ "default" ];
-      };
       "untrusted" = rec {
         crateName = "untrusted";
         version = "0.9.0";
@@ -15351,11 +14372,11 @@ rec {
           }
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -15417,7 +14438,7 @@ rec {
         dependencies = [
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "wasm-bindgen-macro-support";
@@ -15441,11 +14462,11 @@ rec {
         dependencies = [
           {
             name = "proc-macro2";
-            packageId = "proc-macro2 1.0.76";
+            packageId = "proc-macro2";
           }
           {
             name = "quote";
-            packageId = "quote 1.0.35";
+            packageId = "quote";
           }
           {
             name = "syn";
@@ -15991,23 +15012,25 @@ rec {
       };
       "web-time" = rec {
         crateName = "web-time";
-        version = "0.2.4";
+        version = "1.1.0";
         edition = "2021";
-        sha256 = "1q6gk0nkwbfz30g1pz8g52mq00zjx7m5im36k3474aw73jdh8c5a";
+        sha256 = "1fx05yqx83dhx628wb70fyy10yjfq1jpl20qfqhdkymi13rq0ras";
         dependencies = [
           {
             name = "js-sys";
             packageId = "js-sys";
-            target = { target, features }: ((builtins.elem "wasm" target."family") && (!(("emscripten" == target."os" or null) || ("wasi" == target."os" or null))));
+            target = { target, features }: ((builtins.elem "wasm" target."family") && ("unknown" == target."os" or null));
           }
           {
             name = "wasm-bindgen";
             packageId = "wasm-bindgen";
             usesDefaultFeatures = false;
-            target = { target, features }: ((builtins.elem "wasm" target."family") && (!(("emscripten" == target."os" or null) || ("wasi" == target."os" or null))));
+            target = { target, features }: ((builtins.elem "wasm" target."family") && ("unknown" == target."os" or null));
           }
         ];
-
+        features = {
+          "serde" = [ "dep:serde" ];
+        };
       };
       "which 4.4.2" = rec {
         crateName = "which";
@@ -17061,6 +16084,67 @@ rec {
         ];
 
       };
+      "zerocopy" = rec {
+        crateName = "zerocopy";
+        version = "0.7.34";
+        edition = "2018";
+        sha256 = "11xhrwixm78m6ca1jdxf584wdwvpgg7q00vg21fhwl0psvyf71xf";
+        authors = [
+          "Joshua Liebow-Feeser <joshlf@google.com>"
+        ];
+        dependencies = [
+          {
+            name = "zerocopy-derive";
+            packageId = "zerocopy-derive";
+            optional = true;
+          }
+          {
+            name = "zerocopy-derive";
+            packageId = "zerocopy-derive";
+            target = { target, features }: false;
+          }
+        ];
+        devDependencies = [
+          {
+            name = "zerocopy-derive";
+            packageId = "zerocopy-derive";
+          }
+        ];
+        features = {
+          "__internal_use_only_features_that_work_on_stable" = [ "alloc" "derive" "simd" ];
+          "byteorder" = [ "dep:byteorder" ];
+          "default" = [ "byteorder" ];
+          "derive" = [ "zerocopy-derive" ];
+          "simd-nightly" = [ "simd" ];
+          "zerocopy-derive" = [ "dep:zerocopy-derive" ];
+        };
+        resolvedDefaultFeatures = [ "simd" ];
+      };
+      "zerocopy-derive" = rec {
+        crateName = "zerocopy-derive";
+        version = "0.7.34";
+        edition = "2018";
+        sha256 = "0fqvglw01w3hp7xj9gdk1800x9j7v58s9w8ijiyiz2a7krb39s8m";
+        procMacro = true;
+        authors = [
+          "Joshua Liebow-Feeser <joshlf@google.com>"
+        ];
+        dependencies = [
+          {
+            name = "proc-macro2";
+            packageId = "proc-macro2";
+          }
+          {
+            name = "quote";
+            packageId = "quote";
+          }
+          {
+            name = "syn";
+            packageId = "syn 2.0.48";
+          }
+        ];
+
+      };
       "zeroize" = rec {
         crateName = "zeroize";
         version = "1.7.0";
@@ -17298,8 +16382,9 @@ rec {
             # because we compiled those test binaries in the former and not the latter.
             # So all paths will expect source tree to be there and not in the build top directly.
             # For example: $NIX_BUILD_TOP := /build in general, if you ask yourself.
-            # TODO(raitobezarius): I believe there could be more edge cases if `crate.sourceRoot`
-            # do exist but it's very hard to reason about them, so let's wait until the first bug report.
+            # NOTE: There could be edge cases if `crate.sourceRoot` does exist but
+            # it's very hard to reason about them.
+            # Open a bug if you run into this!
             mkdir -p source/
             cd source/
 
diff --git a/tvix/Cargo.toml b/tvix/Cargo.toml
index 6cd19831dc..847d9aceec 100644
--- a/tvix/Cargo.toml
+++ b/tvix/Cargo.toml
@@ -30,6 +30,11 @@ members = [
   "store",
 ]
 
+[workspace.lints.clippy]
+# Allow blocks_in_conditions due to false positives with #[tracing::instrument(โ€ฆ)]:
+# https://github.com/rust-lang/rust-clippy/issues/12281
+blocks_in_conditions = "allow"
+
 # Add a profile to all targets that enables release optimisations, but
 # retains debug symbols. This is great for use with
 # benchmarking/profiling tools.
diff --git a/tvix/README.md b/tvix/README.md
index bf96afa4ba..fb536bc229 100644
--- a/tvix/README.md
+++ b/tvix/README.md
@@ -61,8 +61,7 @@ This folder contains the following components:
 * `//tvix/castore` - subtree storage/transfer in a content-addressed fashion
 * `//tvix/cli` - preliminary REPL & CLI implementation for Tvix
 * `//tvix/eval` - an implementation of the Nix programming language
-* `//tvix/nar-bridge`
-  * `nar-bridge-http`: A HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
+* `//tvix/nar-bridge-go` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
 * `//tvix/nix-compat` - a Rust library for compatibility with C++ Nix, features like encodings and hashing schemes and formats
 * `//tvix/serde` - a Rust library for using the Nix language for app configuration
 * `//tvix/store` - a "filesystem" linking Nix store paths and metadata with the content-addressed layer
diff --git a/tvix/boot/README.md b/tvix/boot/README.md
index 13a4855060..9c7b722a7a 100644
--- a/tvix/boot/README.md
+++ b/tvix/boot/README.md
@@ -43,7 +43,7 @@ Potentially copy some data into tvix-store (via nar-bridge):
 
 ```
 mg run //tvix:store -- daemon &
-$(mg build //tvix:nar-bridge)/bin/nar-bridge-http &
+$(mg build //tvix:nar-bridge-go)/bin/nar-bridge-http &
 rm -Rf ~/.cache/nix; nix copy --to http://localhost:9000\?compression\=none $(mg build //third_party/nixpkgs:hello)
 pkill nar-bridge-http; pkill tvix-store
 ```
diff --git a/tvix/boot/default.nix b/tvix/boot/default.nix
index 0f2edc3085..85995ffbf2 100644
--- a/tvix/boot/default.nix
+++ b/tvix/boot/default.nix
@@ -67,7 +67,7 @@ rec {
   # Start a `tvix-store` virtiofs daemon from $PATH, then a cloud-hypervisor
   # pointed to it.
   # Supports the following env vars (and defaults)
-  # CH_NUM_CPUS=1
+  # CH_NUM_CPUS=2
   # CH_MEM_SIZE=512M
   # CH_CMDLINE=""
   runVM = pkgs.writers.writeBashBin "run-tvix-vm" ''
@@ -89,7 +89,7 @@ rec {
     # Wait for the socket to exist.
     until [ -e $tempdir/tvix.sock ]; do sleep 0.1; done
 
-    CH_NUM_CPUS="''${CH_NUM_CPUS:-1}"
+    CH_NUM_CPUS="''${CH_NUM_CPUS:-2}"
     CH_MEM_SIZE="''${CH_MEM_SIZE:-512M}"
     CH_CMDLINE="''${CH_CMDLINE:-}"
 
@@ -102,7 +102,7 @@ rec {
      --kernel ${kernel.dev}/vmlinux \
      --initramfs ${initrd} \
      --cmdline "console=ttyS0 $CH_CMDLINE" \
-     --fs tag=tvix,socket=$tempdir/tvix.sock,num_queues=1,queue_size=512
+     --fs tag=tvix,socket=$tempdir/tvix.sock,num_queues=''${CH_NUM_CPU},queue_size=512
   '';
 
   meta.ci.targets = [
diff --git a/tvix/boot/tests/default.nix b/tvix/boot/tests/default.nix
index d16dba79f1..5c7f97a1ce 100644
--- a/tvix/boot/tests/default.nix
+++ b/tvix/boot/tests/default.nix
@@ -109,18 +109,13 @@ depot.nix.readTree.drvTargets
     path = ../../docs;
     importPathName = "docs";
   });
-  docs-sled = (mkBootTest {
-    blobServiceAddr = "sled://$PWD/blobs.sled";
+  docs-persistent = (mkBootTest {
+    blobServiceAddr = "objectstore+file://$PWD/blobs";
     directoryServiceAddr = "sled://$PWD/directories.sled";
     pathInfoServiceAddr = "sled://$PWD/pathinfo.sled";
     path = ../../docs;
     importPathName = "docs";
   });
-  docs-objectstore-local = (mkBootTest {
-    blobServiceAddr = "objectstore+file://$PWD/blobs";
-    path = ../../docs;
-    importPathName = "docs";
-  });
 
   closure-tvix = (mkBootTest {
     blobServiceAddr = "objectstore+file://$PWD/blobs";
diff --git a/tvix/build/Cargo.toml b/tvix/build/Cargo.toml
index bda2d136c7..cf25465cca 100644
--- a/tvix/build/Cargo.toml
+++ b/tvix/build/Cargo.toml
@@ -10,11 +10,11 @@ itertools = "0.12.0"
 prost = "0.12.1"
 thiserror = "1.0.56"
 tokio = { version = "1.32.0" }
-tokio-listener = { version = "0.3.2", features = [ "tonic011" ] }
+tokio-listener = { version = "0.4.1", features = [ "tonic011" ] }
 tonic = { version = "0.11.0", features = ["tls", "tls-roots"] }
 tvix-castore = { path = "../castore" }
 tracing = "0.1.37"
-tracing-subscriber = { version = "0.3.16", features = ["json"] }
+tracing-subscriber = "0.3.16"
 url = "2.4.0"
 
 [dependencies.tonic-reflection]
@@ -30,4 +30,7 @@ default = []
 tonic-reflection = ["dep:tonic-reflection"]
 
 [dev-dependencies]
-test-case = "3.3.1"
+rstest = "0.19.0"
+
+[lints]
+workspace = true
diff --git a/tvix/build/src/bin/tvix-build.rs b/tvix/build/src/bin/tvix-build.rs
index ed36c8933c..07d7e30dfd 100644
--- a/tvix/build/src/bin/tvix-build.rs
+++ b/tvix/build/src/bin/tvix-build.rs
@@ -23,10 +23,6 @@ use tvix_castore::proto::FILE_DESCRIPTOR_SET as CASTORE_FILE_DESCRIPTOR_SET;
 #[derive(Parser)]
 #[command(author, version, about, long_about = None)]
 struct Cli {
-    /// Whether to log in JSON
-    #[arg(long)]
-    json: bool,
-
     #[arg(long)]
     log_level: Option<Level>,
 
@@ -58,23 +54,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
     // configure log settings
     let level = cli.log_level.unwrap_or(Level::INFO);
 
-    let subscriber = tracing_subscriber::registry()
+    tracing_subscriber::registry()
         .with(
-            cli.json.then_some(
-                tracing_subscriber::fmt::Layer::new()
-                    .with_writer(std::io::stderr.with_max_level(level))
-                    .json(),
-            ),
+            tracing_subscriber::fmt::Layer::new()
+                .with_writer(std::io::stderr.with_max_level(level))
+                .pretty(),
         )
-        .with(
-            (!cli.json).then_some(
-                tracing_subscriber::fmt::Layer::new()
-                    .with_writer(std::io::stderr.with_max_level(level))
-                    .pretty(),
-            ),
-        );
-
-    tracing::subscriber::set_global_default(subscriber).expect("Unable to set global subscriber");
+        .init();
 
     match cli.command {
         Commands::Daemon {
diff --git a/tvix/build/src/buildservice/from_addr.rs b/tvix/build/src/buildservice/from_addr.rs
index f5c4e6a490..cc5403edef 100644
--- a/tvix/build/src/buildservice/from_addr.rs
+++ b/tvix/build/src/buildservice/from_addr.rs
@@ -50,38 +50,41 @@ mod tests {
     use std::sync::Arc;
 
     use super::from_addr;
-    use test_case::test_case;
+    use rstest::rstest;
     use tvix_castore::{
         blobservice::{BlobService, MemoryBlobService},
         directoryservice::{DirectoryService, MemoryDirectoryService},
     };
 
+    #[rstest]
     /// This uses an unsupported scheme.
-    #[test_case("http://foo.example/test", false; "unsupported scheme")]
+    #[case::unsupported_scheme("http://foo.example/test", false)]
     /// This configures dummy
-    #[test_case("dummy://", true; "valid dummy")]
+    #[case::valid_dummy("dummy://", true)]
     /// Correct scheme to connect to a unix socket.
-    #[test_case("grpc+unix:///path/to/somewhere", true; "grpc valid unix socket")]
+    #[case::grpc_valid_unix_socket("grpc+unix:///path/to/somewhere", true)]
     /// Correct scheme for unix socket, but setting a host too, which is invalid.
-    #[test_case("grpc+unix://host.example/path/to/somewhere", false; "grpc invalid unix socket and host")]
+    #[case::grpc_invalid_unix_socket_and_host("grpc+unix://host.example/path/to/somewhere", false)]
     /// Correct scheme to connect to localhost, with port 12345
-    #[test_case("grpc+http://[::1]:12345", true; "grpc valid IPv6 localhost port 12345")]
+    #[case::grpc_valid_ipv6_localhost_port_12345("grpc+http://[::1]:12345", true)]
     /// Correct scheme to connect to localhost over http, without specifying a port.
-    #[test_case("grpc+http://localhost", true; "grpc valid http host without port")]
+    #[case::grpc_valid_http_host_without_port("grpc+http://localhost", true)]
     /// Correct scheme to connect to localhost over http, without specifying a port.
-    #[test_case("grpc+https://localhost", true; "grpc valid https host without port")]
+    #[case::grpc_valid_https_host_without_port("grpc+https://localhost", true)]
     /// Correct scheme to connect to localhost over http, but with additional path, which is invalid.
-    #[test_case("grpc+http://localhost/some-path", false; "grpc valid invalid host and path")]
+    #[case::grpc_invalid_host_and_path("grpc+http://localhost/some-path", false)]
     #[tokio::test]
-    async fn test_from_addr(uri_str: &str, is_ok: bool) {
+    async fn test_from_addr(#[case] uri_str: &str, #[case] exp_succeed: bool) {
         let blob_service: Arc<dyn BlobService> = Arc::from(MemoryBlobService::default());
         let directory_service: Arc<dyn DirectoryService> =
             Arc::from(MemoryDirectoryService::default());
-        assert_eq!(
-            from_addr(uri_str, blob_service, directory_service)
-                .await
-                .is_ok(),
-            is_ok
-        )
+
+        let resp = from_addr(uri_str, blob_service, directory_service).await;
+
+        if exp_succeed {
+            resp.expect("should succeed");
+        } else {
+            assert!(resp.is_err(), "should fail");
+        }
     }
 }
diff --git a/tvix/build/src/proto/mod.rs b/tvix/build/src/proto/mod.rs
index c7831795c3..e359b5b5b7 100644
--- a/tvix/build/src/proto/mod.rs
+++ b/tvix/build/src/proto/mod.rs
@@ -236,28 +236,27 @@ impl build_request::BuildConstraints {
 
 #[cfg(test)]
 mod tests {
-    use test_case::test_case;
-
-    use crate::proto::is_clean_relative_path;
-
-    use super::is_clean_path;
-
-    #[test_case("foo/bar/", false; "fail trailing slash")]
-    #[test_case("foo/../bar", false; "fail dotdot")]
-    #[test_case("foo/./bar", false; "fail singledot")]
-    #[test_case("foo//bar", false; "fail unnecessary slashes")]
-    #[test_case("//foo/bar", false; "fail absolute unnecessary slashes")]
-    #[test_case("", true; "ok empty")]
-    #[test_case("foo/bar", true; "ok relative")]
-    #[test_case("/", true; "ok absolute")]
-    #[test_case("/foo/bar", true; "ok absolute2")]
-    fn test_is_clean_path(s: &str, expected: bool) {
+    use super::{is_clean_path, is_clean_relative_path};
+    use rstest::rstest;
+
+    #[rstest]
+    #[case::fail_trailing_slash("foo/bar/", false)]
+    #[case::fail_dotdot("foo/../bar", false)]
+    #[case::fail_singledot("foo/./bar", false)]
+    #[case::fail_unnecessary_slashes("foo//bar", false)]
+    #[case::fail_absolute_unnecessary_slashes("//foo/bar", false)]
+    #[case::ok_empty("", true)]
+    #[case::ok_relative("foo/bar", true)]
+    #[case::ok_absolute("/", true)]
+    #[case::ok_absolute2("/foo/bar", true)]
+    fn test_is_clean_path(#[case] s: &str, #[case] expected: bool) {
         assert_eq!(is_clean_path(s), expected);
     }
 
-    #[test_case("/", false; "fail absolute")]
-    #[test_case("foo/bar", true; "ok relative")]
-    fn test_is_clean_relative_path(s: &str, expected: bool) {
+    #[rstest]
+    #[case::fail_absolute("/", false)]
+    #[case::ok_relative("foo/bar", true)]
+    fn test_is_clean_relative_path(#[case] s: &str, #[case] expected: bool) {
         assert_eq!(is_clean_relative_path(s), expected);
     }
 
diff --git a/tvix/castore/Cargo.toml b/tvix/castore/Cargo.toml
index b68922b7ce..4cbc29053b 100644
--- a/tvix/castore/Cargo.toml
+++ b/tvix/castore/Cargo.toml
@@ -4,6 +4,7 @@ version = "0.1.0"
 edition = "2021"
 
 [dependencies]
+async-compression = { version = "0.4.9", features = ["tokio", "zstd"]}
 async-stream = "0.3.5"
 async-tempfile = "0.4.0"
 blake3 = { version = "1.3.1", features = ["rayon", "std", "traits-preview"] }
@@ -21,7 +22,8 @@ prost = "0.12.1"
 sled = { version = "0.34.7" }
 thiserror = "1.0.38"
 tokio-stream = { version = "0.1.14", features = ["fs", "net"] }
-tokio-util = { version = "0.7.9", features = ["io", "io-util"] }
+tokio-util = { version = "0.7.9", features = ["io", "io-util", "codec"] }
+tokio-tar = "0.3.1"
 tokio = { version = "1.32.0", features = ["fs", "macros", "net", "rt", "rt-multi-thread", "signal"] }
 tonic = "0.11.0"
 tower = "0.4.13"
@@ -111,3 +113,10 @@ virtiofs = [
 ]
 fuse = ["fs"]
 tonic-reflection = ["dep:tonic-reflection"]
+# Whether to run the integration tests.
+# Requires the following packages in $PATH:
+# cbtemulator, google-cloud-bigtable-tool
+integration = []
+
+[lints]
+workspace = true
diff --git a/tvix/castore/default.nix b/tvix/castore/default.nix
index edc20ac79d..641d883760 100644
--- a/tvix/castore/default.nix
+++ b/tvix/castore/default.nix
@@ -1,12 +1,23 @@
 { depot, pkgs, ... }:
 
-depot.tvix.crates.workspaceMembers.tvix-castore.build.override {
+(depot.tvix.crates.workspaceMembers.tvix-castore.build.override {
   runTests = true;
   testPreRun = ''
     export SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt;
-    export PATH="$PATH:${pkgs.lib.makeBinPath [pkgs.cbtemulator pkgs.google-cloud-bigtable-tool]}"
   '';
 
   # enable some optional features.
   features = [ "default" "cloud" ];
-}
+}).overrideAttrs (_: {
+  meta.ci.targets = [ "integration-tests" ];
+  passthru.integration-tests = depot.tvix.crates.workspaceMembers.tvix-castore.build.override {
+    runTests = true;
+    testPreRun = ''
+      export SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt;
+      export PATH="$PATH:${pkgs.lib.makeBinPath [pkgs.cbtemulator pkgs.google-cloud-bigtable-tool]}"
+    '';
+
+    # enable some optional features.
+    features = [ "default" "cloud" "integration" ];
+  };
+})
diff --git a/tvix/castore/src/blobservice/from_addr.rs b/tvix/castore/src/blobservice/from_addr.rs
index 3e3f943e59..8898bbfb95 100644
--- a/tvix/castore/src/blobservice/from_addr.rs
+++ b/tvix/castore/src/blobservice/from_addr.rs
@@ -2,15 +2,12 @@ use url::Url;
 
 use crate::{proto::blob_service_client::BlobServiceClient, Error};
 
-use super::{
-    BlobService, GRPCBlobService, MemoryBlobService, ObjectStoreBlobService, SledBlobService,
-};
+use super::{BlobService, GRPCBlobService, MemoryBlobService, ObjectStoreBlobService};
 
 /// Constructs a new instance of a [BlobService] from an URI.
 ///
 /// The following schemes are supported by the following services:
 /// - `memory://` ([MemoryBlobService])
-/// - `sled://` ([SledBlobService])
 /// - `grpc+*://` ([GRPCBlobService])
 /// - `objectstore+*://` ([ObjectStoreBlobService])
 ///
@@ -27,27 +24,6 @@ pub async fn from_addr(uri: &str) -> Result<Box<dyn BlobService>, crate::Error>
             }
             Box::<MemoryBlobService>::default()
         }
-        "sled" => {
-            // sled doesn't support host, and a path can be provided (otherwise
-            // it'll live in memory only).
-            if url.has_host() {
-                return Err(Error::StorageError("no host allowed".to_string()));
-            }
-
-            if url.path() == "/" {
-                return Err(Error::StorageError(
-                    "cowardly refusing to open / with sled".to_string(),
-                ));
-            }
-
-            // TODO: expose other parameters as URL parameters?
-
-            Box::new(if url.path().is_empty() {
-                SledBlobService::new_temporary().map_err(|e| Error::StorageError(e.to_string()))?
-            } else {
-                SledBlobService::new(url.path()).map_err(|e| Error::StorageError(e.to_string()))?
-            })
-        }
         scheme if scheme.starts_with("grpc+") => {
             // schemes starting with grpc+ go to the GRPCPathInfoService.
             //   That's normally grpc+unix for unix sockets, and grpc+http(s) for the HTTP counterparts.
@@ -83,28 +59,11 @@ pub async fn from_addr(uri: &str) -> Result<Box<dyn BlobService>, crate::Error>
 #[cfg(test)]
 mod tests {
     use super::from_addr;
-    use lazy_static::lazy_static;
     use rstest::rstest;
-    use tempfile::TempDir;
-
-    lazy_static! {
-        static ref TMPDIR_SLED_1: TempDir = TempDir::new().unwrap();
-        static ref TMPDIR_SLED_2: TempDir = TempDir::new().unwrap();
-    }
 
     #[rstest]
     /// This uses an unsupported scheme.
     #[case::unsupported_scheme("http://foo.example/test", false)]
-    /// This configures sled in temporary mode.
-    #[case::sled_temporary("sled://", true)]
-    /// This configures sled with /, which should fail.
-    #[case::sled_invalid_root("sled:///", false)]
-    /// This configures sled with a host, not path, which should fail.
-    #[case::sled_invalid_host("sled://foo.example", false)]
-    /// This configures sled with a valid path path, which should succeed.
-    #[case::sled_valid_path(&format!("sled://{}", &TMPDIR_SLED_1.path().to_str().unwrap()), true)]
-    /// This configures sled with a host, and a valid path path, which should fail.
-    #[case::sled_invalid_host_with_valid_path(&format!("sled://foo.example{}", &TMPDIR_SLED_2.path().to_str().unwrap()), false)]
     /// This correctly sets the scheme, and doesn't set a path.
     #[case::memory_valid("memory://", true)]
     /// This sets a memory url host to `foo`
diff --git a/tvix/castore/src/blobservice/memory.rs b/tvix/castore/src/blobservice/memory.rs
index 25eec334de..873d06b461 100644
--- a/tvix/castore/src/blobservice/memory.rs
+++ b/tvix/castore/src/blobservice/memory.rs
@@ -1,9 +1,7 @@
+use parking_lot::RwLock;
 use std::io::{self, Cursor, Write};
 use std::task::Poll;
-use std::{
-    collections::HashMap,
-    sync::{Arc, RwLock},
-};
+use std::{collections::HashMap, sync::Arc};
 use tonic::async_trait;
 use tracing::instrument;
 
@@ -19,13 +17,13 @@ pub struct MemoryBlobService {
 impl BlobService for MemoryBlobService {
     #[instrument(skip_all, ret, err, fields(blob.digest=%digest))]
     async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
-        let db = self.db.read().unwrap();
+        let db = self.db.read();
         Ok(db.contains_key(digest))
     }
 
     #[instrument(skip_all, err, fields(blob.digest=%digest))]
     async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
-        let db = self.db.read().unwrap();
+        let db = self.db.read();
 
         match db.get(digest).map(|x| Cursor::new(x.clone())) {
             Some(result) => Ok(Some(Box::new(result))),
@@ -109,24 +107,16 @@ impl BlobWriter for MemoryBlobWriter {
         } else {
             let (buf, hasher) = self.writers.take().unwrap();
 
-            // We know self.hasher is doing blake3 hashing, so this won't fail.
             let digest: B3Digest = hasher.finalize().as_bytes().into();
 
             // Only insert if the blob doesn't already exist.
-            let db = self.db.read().map_err(|e| {
-                io::Error::new(io::ErrorKind::BrokenPipe, format!("RwLock poisoned: {}", e))
-            })?;
+            let mut db = self.db.upgradable_read();
             if !db.contains_key(&digest) {
-                // drop the read lock, so we can open for writing.
-                drop(db);
-
                 // open the database for writing.
-                let mut db = self.db.write().map_err(|e| {
-                    io::Error::new(io::ErrorKind::BrokenPipe, format!("RwLock poisoned: {}", e))
-                })?;
-
-                // and put buf in there. This will move buf out.
-                db.insert(digest.clone(), buf);
+                db.with_upgraded(|db| {
+                    // and put buf in there. This will move buf out.
+                    db.insert(digest.clone(), buf);
+                });
             }
 
             self.digest = Some(digest.clone());
diff --git a/tvix/castore/src/blobservice/mod.rs b/tvix/castore/src/blobservice/mod.rs
index 4ba56a4af7..50acd40bf7 100644
--- a/tvix/castore/src/blobservice/mod.rs
+++ b/tvix/castore/src/blobservice/mod.rs
@@ -11,7 +11,6 @@ mod grpc;
 mod memory;
 mod naive_seeker;
 mod object_store;
-mod sled;
 
 #[cfg(test)]
 pub mod tests;
@@ -22,7 +21,6 @@ pub use self::from_addr::from_addr;
 pub use self::grpc::GRPCBlobService;
 pub use self::memory::MemoryBlobService;
 pub use self::object_store::ObjectStoreBlobService;
-pub use self::sled::SledBlobService;
 
 /// The base trait all BlobService services need to implement.
 /// It provides functions to check whether a given blob exists,
diff --git a/tvix/castore/src/blobservice/sled.rs b/tvix/castore/src/blobservice/sled.rs
deleted file mode 100644
index 3dd4bff7bc..0000000000
--- a/tvix/castore/src/blobservice/sled.rs
+++ /dev/null
@@ -1,150 +0,0 @@
-use super::{BlobReader, BlobService, BlobWriter};
-use crate::{B3Digest, Error};
-use std::{
-    io::{self, Cursor, Write},
-    path::Path,
-    task::Poll,
-};
-use tonic::async_trait;
-use tracing::instrument;
-
-#[derive(Clone)]
-pub struct SledBlobService {
-    db: sled::Db,
-}
-
-impl SledBlobService {
-    pub fn new<P: AsRef<Path>>(p: P) -> Result<Self, sled::Error> {
-        let config = sled::Config::default()
-            .use_compression(false) // is a required parameter
-            .path(p);
-        let db = config.open()?;
-
-        Ok(Self { db })
-    }
-
-    pub fn new_temporary() -> Result<Self, sled::Error> {
-        let config = sled::Config::default().temporary(true);
-        let db = config.open()?;
-
-        Ok(Self { db })
-    }
-}
-
-#[async_trait]
-impl BlobService for SledBlobService {
-    #[instrument(skip(self), fields(blob.digest=%digest))]
-    async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
-        match self.db.contains_key(digest.as_slice()) {
-            Ok(has) => Ok(has),
-            Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
-        }
-    }
-
-    #[instrument(skip(self), fields(blob.digest=%digest))]
-    async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
-        match self.db.get(digest.as_slice()) {
-            Ok(None) => Ok(None),
-            Ok(Some(data)) => Ok(Some(Box::new(Cursor::new(data[..].to_vec())))),
-            Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
-        }
-    }
-
-    #[instrument(skip(self))]
-    async fn open_write(&self) -> Box<dyn BlobWriter> {
-        Box::new(SledBlobWriter::new(self.db.clone()))
-    }
-}
-
-pub struct SledBlobWriter {
-    db: sled::Db,
-
-    /// Contains the buffer Vec and hasher, or None if already closed
-    writers: Option<(Vec<u8>, blake3::Hasher)>,
-
-    /// The digest that has been returned, if we successfully closed.
-    digest: Option<B3Digest>,
-}
-
-impl SledBlobWriter {
-    pub fn new(db: sled::Db) -> Self {
-        Self {
-            db,
-            writers: Some((Vec::new(), blake3::Hasher::new())),
-            digest: None,
-        }
-    }
-}
-
-impl tokio::io::AsyncWrite for SledBlobWriter {
-    fn poll_write(
-        mut self: std::pin::Pin<&mut Self>,
-        _cx: &mut std::task::Context<'_>,
-        b: &[u8],
-    ) -> std::task::Poll<Result<usize, io::Error>> {
-        Poll::Ready(match &mut self.writers {
-            None => Err(io::Error::new(
-                io::ErrorKind::NotConnected,
-                "already closed",
-            )),
-            Some((ref mut buf, ref mut hasher)) => {
-                let bytes_written = buf.write(b)?;
-                hasher.write(&b[..bytes_written])
-            }
-        })
-    }
-
-    fn poll_flush(
-        mut self: std::pin::Pin<&mut Self>,
-        _cx: &mut std::task::Context<'_>,
-    ) -> std::task::Poll<Result<(), io::Error>> {
-        Poll::Ready(match &mut self.writers {
-            None => Err(io::Error::new(
-                io::ErrorKind::NotConnected,
-                "already closed",
-            )),
-            Some(_) => Ok(()),
-        })
-    }
-
-    fn poll_shutdown(
-        self: std::pin::Pin<&mut Self>,
-        _cx: &mut std::task::Context<'_>,
-    ) -> std::task::Poll<Result<(), io::Error>> {
-        // shutdown is "instantaneous", we only write to a Vec<u8> as buffer.
-        Poll::Ready(Ok(()))
-    }
-}
-
-#[async_trait]
-impl BlobWriter for SledBlobWriter {
-    async fn close(&mut self) -> io::Result<B3Digest> {
-        if self.writers.is_none() {
-            match &self.digest {
-                Some(digest) => Ok(digest.clone()),
-                None => Err(io::Error::new(
-                    io::ErrorKind::NotConnected,
-                    "already closed",
-                )),
-            }
-        } else {
-            let (buf, hasher) = self.writers.take().unwrap();
-
-            let digest: B3Digest = hasher.finalize().as_bytes().into();
-
-            // Only insert if the blob doesn't already exist.
-            if !self.db.contains_key(digest.as_slice()).map_err(|e| {
-                Error::StorageError(format!("Unable to check if we have blob {}: {}", digest, e))
-            })? {
-                // put buf in there. This will move buf out.
-                self.db
-                    .insert(digest.as_slice(), buf)
-                    .map_err(|e| Error::StorageError(format!("unable to insert blob: {}", e)))?;
-            }
-
-            self.digest = Some(digest.clone());
-
-            Ok(digest)
-        }
-    }
-}
diff --git a/tvix/castore/src/blobservice/tests/mod.rs b/tvix/castore/src/blobservice/tests/mod.rs
index 30c4e97634..0280faebb1 100644
--- a/tvix/castore/src/blobservice/tests/mod.rs
+++ b/tvix/castore/src/blobservice/tests/mod.rs
@@ -25,7 +25,6 @@ use self::utils::make_grpc_blob_service_client;
 #[case::grpc(make_grpc_blob_service_client().await)]
 #[case::memory(blobservice::from_addr("memory://").await.unwrap())]
 #[case::objectstore_memory(blobservice::from_addr("objectstore+memory://").await.unwrap())]
-#[case::sled(blobservice::from_addr("sled://").await.unwrap())]
 pub fn blob_services(#[case] blob_service: impl BlobService) {}
 
 /// Using [BlobService::has] on a non-existing blob should return false.
diff --git a/tvix/castore/src/directoryservice/bigtable.rs b/tvix/castore/src/directoryservice/bigtable.rs
index bee2fb15ae..1194c6ddc9 100644
--- a/tvix/castore/src/directoryservice/bigtable.rs
+++ b/tvix/castore/src/directoryservice/bigtable.rs
@@ -115,7 +115,7 @@ impl BigtableDirectoryService {
             .stdout(Stdio::piped())
             .kill_on_drop(true)
             .spawn()
-            .expect("failed to spwan emulator");
+            .expect("failed to spawn emulator");
 
         Retry::spawn(
             ExponentialBackoff::from_millis(20)
@@ -343,7 +343,7 @@ impl DirectoryService for BigtableDirectoryService {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         traverse_directory(self.clone(), root_directory_digest)
     }
 
diff --git a/tvix/castore/src/directoryservice/closure_validator.rs b/tvix/castore/src/directoryservice/closure_validator.rs
index 461fc907bd..b9746a5a05 100644
--- a/tvix/castore/src/directoryservice/closure_validator.rs
+++ b/tvix/castore/src/directoryservice/closure_validator.rs
@@ -4,7 +4,7 @@ use bstr::ByteSlice;
 
 use petgraph::{
     graph::{DiGraph, NodeIndex},
-    visit::Bfs,
+    visit::{Bfs, Walker},
 };
 use tracing::instrument;
 
@@ -13,6 +13,8 @@ use crate::{
     B3Digest, Error,
 };
 
+type DirectoryGraph = DiGraph<Directory, ()>;
+
 /// This can be used to validate a Directory closure (DAG of connected
 /// Directories), and their insertion order.
 ///
@@ -37,7 +39,7 @@ use crate::{
 pub struct ClosureValidator {
     // A directed graph, using Directory as node weight, without edge weights.
     // Edges point from parents to children.
-    graph: DiGraph<Directory, ()>,
+    graph: DirectoryGraph,
 
     // A lookup table from directory digest to node index.
     digest_to_node_ix: HashMap<B3Digest, NodeIndex>,
@@ -122,11 +124,54 @@ impl ClosureValidator {
     /// In case no elements have been inserted, returns an empty list.
     #[instrument(level = "trace", skip_all, err)]
     pub(crate) fn finalize(self) -> Result<Vec<Directory>, Error> {
+        let (graph, _) = match self.finalize_raw()? {
+            None => return Ok(vec![]),
+            Some(v) => v,
+        };
+        // Dissolve the graph, returning the nodes as a Vec.
+        // As the graph was populated in a valid DFS PostOrder, we can return
+        // nodes in that same order.
+        let (nodes, _edges) = graph.into_nodes_edges();
+        Ok(nodes.into_iter().map(|x| x.weight).collect())
+    }
+
+    /// Ensure that all inserted Directories are connected, then return a
+    /// (deduplicated) and validated list of directories, in from-root-to-leaves
+    /// order.
+    /// In case no elements have been inserted, returns an empty list.
+    #[instrument(level = "trace", skip_all, err)]
+    pub(crate) fn finalize_root_to_leaves(self) -> Result<Vec<Directory>, Error> {
+        let (graph, root) = match self.finalize_raw()? {
+            None => return Ok(vec![]),
+            Some(v) => v,
+        };
+
+        // do a BFS traversal of the graph, starting with the root node to get
+        // all nodes reachable from there.
+        let traversal = Bfs::new(&graph, root);
+
+        let order = traversal.iter(&graph).collect::<Vec<_>>();
+
+        let (nodes, _edges) = graph.into_nodes_edges();
+
+        // Convert to option, so that we can take individual nodes out without messing up the
+        // indices
+        let mut nodes = nodes.into_iter().map(Some).collect::<Vec<_>>();
+
+        Ok(order
+            .iter()
+            .map(|i| nodes[i.index()].take().unwrap().weight)
+            .collect())
+    }
+
+    /// Internal implementation of closure validation
+    #[instrument(level = "trace", skip_all, err)]
+    fn finalize_raw(self) -> Result<Option<(DirectoryGraph, NodeIndex)>, Error> {
         // If no nodes were inserted, an empty list is returned.
         let last_directory_ix = if let Some(x) = self.last_directory_ix {
             x
         } else {
-            return Ok(vec![]);
+            return Ok(None);
         };
 
         // do a BFS traversal of the graph, starting with the root node to get
@@ -136,6 +181,7 @@ impl ClosureValidator {
         let mut visited_directory_count = 0;
         #[cfg(debug_assertions)]
         let mut visited_directory_ixs = HashSet::new();
+        #[cfg_attr(not(debug_assertions), allow(unused))]
         while let Some(directory_ix) = traversal.next(&self.graph) {
             #[cfg(debug_assertions)]
             visited_directory_ixs.insert(directory_ix);
@@ -171,11 +217,7 @@ impl ClosureValidator {
             }
         }
 
-        // Dissolve the graph, returning the nodes as a Vec.
-        // As the graph was populated in a valid DFS PostOrder, we can return
-        // nodes in that same order.
-        let (nodes, _edges) = self.graph.into_nodes_edges();
-        Ok(nodes.into_iter().map(|x| x.weight).collect())
+        Ok(Some((self.graph, last_directory_ix)))
     }
 }
 
diff --git a/tvix/castore/src/directoryservice/from_addr.rs b/tvix/castore/src/directoryservice/from_addr.rs
index 31158d3a38..ee675ca68a 100644
--- a/tvix/castore/src/directoryservice/from_addr.rs
+++ b/tvix/castore/src/directoryservice/from_addr.rs
@@ -2,7 +2,10 @@ use url::Url;
 
 use crate::{proto::directory_service_client::DirectoryServiceClient, Error};
 
-use super::{DirectoryService, GRPCDirectoryService, MemoryDirectoryService, SledDirectoryService};
+use super::{
+    DirectoryService, GRPCDirectoryService, MemoryDirectoryService, ObjectStoreDirectoryService,
+    SledDirectoryService,
+};
 
 /// Constructs a new instance of a [DirectoryService] from an URI.
 ///
@@ -63,6 +66,18 @@ pub async fn from_addr(uri: &str) -> Result<Box<dyn DirectoryService>, crate::Er
             let client = DirectoryServiceClient::new(crate::tonic::channel_from_url(&url).await?);
             Box::new(GRPCDirectoryService::from_client(client))
         }
+        scheme if scheme.starts_with("objectstore+") => {
+            // We need to convert the URL to string, strip the prefix there, and then
+            // parse it back as url, as Url::set_scheme() rejects some of the transitions we want to do.
+            let trimmed_url = {
+                let s = url.to_string();
+                Url::parse(s.strip_prefix("objectstore+").unwrap()).unwrap()
+            };
+            Box::new(
+                ObjectStoreDirectoryService::parse_url(&trimmed_url)
+                    .map_err(|e| Error::StorageError(e.to_string()))?,
+            )
+        }
         #[cfg(feature = "cloud")]
         "bigtable" => {
             use super::bigtable::BigtableParameters;
@@ -144,7 +159,7 @@ mod tests {
     #[case::grpc_invalid_host_and_path("grpc+http://localhost/some-path", false)]
     /// A valid example for Bigtable
     #[cfg_attr(
-        feature = "cloud",
+        all(feature = "cloud", feature = "integration"),
         case::bigtable_valid_url(
             "bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1",
             true
@@ -152,7 +167,7 @@ mod tests {
     )]
     /// A valid example for Bigtable, specifying a custom channel size and timeout
     #[cfg_attr(
-        feature = "cloud",
+        all(feature = "cloud", feature = "integration"),
         case::bigtable_valid_url(
             "bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1&channel_size=10&timeout=10",
             true
@@ -160,7 +175,7 @@ mod tests {
     )]
     /// A invalid Bigtable example (missing fields)
     #[cfg_attr(
-        feature = "cloud",
+        all(feature = "cloud", feature = "integration"),
         case::bigtable_invalid_url("bigtable://instance-1", false)
     )]
     #[tokio::test]
diff --git a/tvix/castore/src/directoryservice/grpc.rs b/tvix/castore/src/directoryservice/grpc.rs
index 7402fe1b56..fe935629bf 100644
--- a/tvix/castore/src/directoryservice/grpc.rs
+++ b/tvix/castore/src/directoryservice/grpc.rs
@@ -107,7 +107,7 @@ impl DirectoryService for GRPCDirectoryService {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         let mut grpc_client = self.grpc_client.clone();
         let root_directory_digest = root_directory_digest.clone();
 
diff --git a/tvix/castore/src/directoryservice/memory.rs b/tvix/castore/src/directoryservice/memory.rs
index 2cbbbd1b16..3b2795c396 100644
--- a/tvix/castore/src/directoryservice/memory.rs
+++ b/tvix/castore/src/directoryservice/memory.rs
@@ -1,7 +1,8 @@
 use crate::{proto, B3Digest, Error};
 use futures::stream::BoxStream;
 use std::collections::HashMap;
-use std::sync::{Arc, RwLock};
+use std::sync::Arc;
+use tokio::sync::RwLock;
 use tonic::async_trait;
 use tracing::{instrument, warn};
 
@@ -17,7 +18,7 @@ pub struct MemoryDirectoryService {
 impl DirectoryService for MemoryDirectoryService {
     #[instrument(skip(self, digest), fields(directory.digest = %digest))]
     async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
-        let db = self.db.read()?;
+        let db = self.db.read().await;
 
         match db.get(digest) {
             // The directory was not found, return
@@ -62,7 +63,7 @@ impl DirectoryService for MemoryDirectoryService {
         }
 
         // store it
-        let mut db = self.db.write()?;
+        let mut db = self.db.write().await;
         db.insert(digest.clone(), directory);
 
         Ok(digest)
@@ -72,7 +73,7 @@ impl DirectoryService for MemoryDirectoryService {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         traverse_directory(self.clone(), root_directory_digest)
     }
 
diff --git a/tvix/castore/src/directoryservice/mod.rs b/tvix/castore/src/directoryservice/mod.rs
index cf6bea39d8..3f180ef162 100644
--- a/tvix/castore/src/directoryservice/mod.rs
+++ b/tvix/castore/src/directoryservice/mod.rs
@@ -6,6 +6,7 @@ mod closure_validator;
 mod from_addr;
 mod grpc;
 mod memory;
+mod object_store;
 mod simple_putter;
 mod sled;
 #[cfg(test)]
@@ -17,6 +18,7 @@ pub use self::closure_validator::ClosureValidator;
 pub use self::from_addr::from_addr;
 pub use self::grpc::GRPCDirectoryService;
 pub use self::memory::MemoryDirectoryService;
+pub use self::object_store::ObjectStoreDirectoryService;
 pub use self::simple_putter::SimplePutter;
 pub use self::sled::SledDirectoryService;
 pub use self::traverse::descend_to;
@@ -64,7 +66,7 @@ pub trait DirectoryService: Send + Sync {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>>;
+    ) -> BoxStream<'static, Result<proto::Directory, Error>>;
 
     /// Allows persisting a closure of [proto::Directory], which is a graph of
     /// connected Directory messages.
@@ -87,7 +89,7 @@ where
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         self.as_ref().get_recursive(root_directory_digest)
     }
 
diff --git a/tvix/castore/src/directoryservice/object_store.rs b/tvix/castore/src/directoryservice/object_store.rs
new file mode 100644
index 0000000000..64ce335edb
--- /dev/null
+++ b/tvix/castore/src/directoryservice/object_store.rs
@@ -0,0 +1,261 @@
+use std::collections::HashSet;
+use std::sync::Arc;
+
+use data_encoding::HEXLOWER;
+use futures::future::Either;
+use futures::stream::BoxStream;
+use futures::SinkExt;
+use futures::StreamExt;
+use futures::TryFutureExt;
+use futures::TryStreamExt;
+use object_store::{path::Path, ObjectStore};
+use prost::Message;
+use tokio::io::AsyncWriteExt;
+use tokio_util::codec::LengthDelimitedCodec;
+use tonic::async_trait;
+use tracing::{instrument, trace, warn, Level};
+use url::Url;
+
+use super::{ClosureValidator, DirectoryPutter, DirectoryService};
+use crate::{proto, B3Digest, Error};
+
+/// Stores directory closures in an object store.
+/// Notably, this makes use of the option to disallow accessing child directories except when
+/// fetching them recursively via the top-level directory, since all batched writes
+/// (using `put_multiple_start`) are stored in a single object.
+/// Directories are stored in a length-delimited format with a 1MiB limit. The length field is a
+/// u32 and the directories are stored in root-to-leaves topological order, the same way they will
+/// be returned to the client in get_recursive.
+#[derive(Clone)]
+pub struct ObjectStoreDirectoryService {
+    object_store: Arc<dyn ObjectStore>,
+    base_path: Path,
+}
+
+#[instrument(level=Level::TRACE, skip_all,fields(base_path=%base_path,blob.digest=%digest),ret(Display))]
+fn derive_dirs_path(base_path: &Path, digest: &B3Digest) -> Path {
+    base_path
+        .child("dirs")
+        .child("b3")
+        .child(HEXLOWER.encode(&digest.as_slice()[..2]))
+        .child(HEXLOWER.encode(digest.as_slice()))
+}
+
+#[allow(clippy::identity_op)]
+const MAX_FRAME_LENGTH: usize = 1 * 1024 * 1024 * 1000; // 1 MiB
+                                                        //
+impl ObjectStoreDirectoryService {
+    /// Constructs a new [ObjectStoreBlobService] from a [Url] supported by
+    /// [object_store].
+    /// Any path suffix becomes the base path of the object store.
+    /// additional options, the same as in [object_store::parse_url_opts] can
+    /// be passed.
+    pub fn parse_url_opts<I, K, V>(url: &Url, options: I) -> Result<Self, object_store::Error>
+    where
+        I: IntoIterator<Item = (K, V)>,
+        K: AsRef<str>,
+        V: Into<String>,
+    {
+        let (object_store, path) = object_store::parse_url_opts(url, options)?;
+
+        Ok(Self {
+            object_store: Arc::new(object_store),
+            base_path: path,
+        })
+    }
+
+    /// Like [Self::parse_url_opts], except without the options.
+    pub fn parse_url(url: &Url) -> Result<Self, object_store::Error> {
+        Self::parse_url_opts(url, Vec::<(String, String)>::new())
+    }
+}
+
+#[async_trait]
+impl DirectoryService for ObjectStoreDirectoryService {
+    /// This is the same steps as for get_recursive anyways, so we just call get_recursive and
+    /// return the first element of the stream and drop the request.
+    #[instrument(skip(self, digest), fields(directory.digest = %digest))]
+    async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
+        self.get_recursive(digest).take(1).next().await.transpose()
+    }
+
+    #[instrument(skip(self, directory), fields(directory.digest = %directory.digest()))]
+    async fn put(&self, directory: proto::Directory) -> Result<B3Digest, Error> {
+        if !directory.directories.is_empty() {
+            return Err(Error::InvalidRequest(
+                    "only put_multiple_start is supported by the ObjectStoreDirectoryService for directories with children".into(),
+            ));
+        }
+
+        let mut handle = self.put_multiple_start();
+        handle.put(directory).await?;
+        handle.close().await
+    }
+
+    #[instrument(skip_all, fields(directory.digest = %root_directory_digest))]
+    fn get_recursive(
+        &self,
+        root_directory_digest: &B3Digest,
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
+        // The Directory digests we're expecting to receive.
+        let mut expected_directory_digests: HashSet<B3Digest> =
+            HashSet::from([root_directory_digest.clone()]);
+
+        let dir_path = derive_dirs_path(&self.base_path, root_directory_digest);
+        let object_store = self.object_store.clone();
+
+        Box::pin(
+            (async move {
+                let stream = match object_store.get(&dir_path).await {
+                    Ok(v) => v.into_stream(),
+                    Err(object_store::Error::NotFound { .. }) => {
+                        return Ok(Either::Left(futures::stream::empty()))
+                    }
+                    Err(e) => return Err(std::io::Error::from(e).into()),
+                };
+
+                // get a reader of the response body.
+                let r = tokio_util::io::StreamReader::new(stream);
+                let decompressed_stream = async_compression::tokio::bufread::ZstdDecoder::new(r);
+
+                // the subdirectories are stored in a length delimited format
+                let delimited_stream = LengthDelimitedCodec::builder()
+                    .max_frame_length(MAX_FRAME_LENGTH)
+                    .length_field_type::<u32>()
+                    .new_read(decompressed_stream);
+
+                let dirs_stream = delimited_stream.map_err(Error::from).and_then(move |buf| {
+                    futures::future::ready((|| {
+                        let mut hasher = blake3::Hasher::new();
+                        let digest: B3Digest = hasher.update(&buf).finalize().as_bytes().into();
+
+                        // Ensure to only decode the directory objects whose digests we trust
+                        let was_expected = expected_directory_digests.remove(&digest);
+                        if !was_expected {
+                            return Err(crate::Error::StorageError(format!(
+                                "received unexpected directory {}",
+                                digest
+                            )));
+                        }
+
+                        let directory = proto::Directory::decode(&*buf).map_err(|e| {
+                            warn!("unable to parse directory {}: {}", digest, e);
+                            Error::StorageError(e.to_string())
+                        })?;
+
+                        for directory in &directory.directories {
+                            // Allow the children to appear next
+                            expected_directory_digests.insert(
+                                B3Digest::try_from(directory.digest.clone())
+                                    .map_err(|e| Error::StorageError(e.to_string()))?,
+                            );
+                        }
+
+                        Ok(directory)
+                    })())
+                });
+
+                Ok(Either::Right(dirs_stream))
+            })
+            .try_flatten_stream(),
+        )
+    }
+
+    #[instrument(skip_all)]
+    fn put_multiple_start(&self) -> Box<(dyn DirectoryPutter + 'static)>
+    where
+        Self: Clone,
+    {
+        Box::new(ObjectStoreDirectoryPutter::new(
+            self.object_store.clone(),
+            self.base_path.clone(),
+        ))
+    }
+}
+
+struct ObjectStoreDirectoryPutter {
+    object_store: Arc<dyn ObjectStore>,
+    base_path: Path,
+
+    directory_validator: Option<ClosureValidator>,
+}
+
+impl ObjectStoreDirectoryPutter {
+    fn new(object_store: Arc<dyn ObjectStore>, base_path: Path) -> Self {
+        Self {
+            object_store,
+            base_path,
+            directory_validator: Some(Default::default()),
+        }
+    }
+}
+
+#[async_trait]
+impl DirectoryPutter for ObjectStoreDirectoryPutter {
+    #[instrument(level = "trace", skip_all, fields(directory.digest=%directory.digest()), err)]
+    async fn put(&mut self, directory: proto::Directory) -> Result<(), Error> {
+        match self.directory_validator {
+            None => return Err(Error::StorageError("already closed".to_string())),
+            Some(ref mut validator) => {
+                validator.add(directory)?;
+            }
+        }
+
+        Ok(())
+    }
+
+    #[instrument(level = "trace", skip_all, ret, err)]
+    async fn close(&mut self) -> Result<B3Digest, Error> {
+        let validator = match self.directory_validator.take() {
+            None => return Err(Error::InvalidRequest("already closed".to_string())),
+            Some(validator) => validator,
+        };
+
+        // retrieve the validated directories.
+        // It is important that they are in topological order (root first),
+        // since that's how we want to retrieve them from the object store in the end.
+        let directories = validator.finalize_root_to_leaves()?;
+
+        // Get the root digest
+        let root_digest = directories
+            .first()
+            .ok_or_else(|| Error::InvalidRequest("got no directories".to_string()))?
+            .digest();
+
+        let dir_path = derive_dirs_path(&self.base_path, &root_digest);
+
+        match self.object_store.head(&dir_path).await {
+            // directory tree already exists, nothing to do
+            Ok(_) => {
+                trace!("directory tree already exists");
+            }
+
+            // directory tree does not yet exist, compress and upload.
+            Err(object_store::Error::NotFound { .. }) => {
+                trace!("uploading directory tree");
+
+                let object_store_writer =
+                    object_store::buffered::BufWriter::new(self.object_store.clone(), dir_path);
+                let compressed_writer =
+                    async_compression::tokio::write::ZstdEncoder::new(object_store_writer);
+                let mut directories_sink = LengthDelimitedCodec::builder()
+                    .max_frame_length(MAX_FRAME_LENGTH)
+                    .length_field_type::<u32>()
+                    .new_write(compressed_writer);
+
+                for directory in directories {
+                    directories_sink
+                        .send(directory.encode_to_vec().into())
+                        .await?;
+                }
+
+                let mut compressed_writer = directories_sink.into_inner();
+                compressed_writer.shutdown().await?;
+            }
+            // other error
+            Err(err) => Err(std::io::Error::from(err))?,
+        }
+
+        Ok(root_digest)
+    }
+}
diff --git a/tvix/castore/src/directoryservice/sled.rs b/tvix/castore/src/directoryservice/sled.rs
index e4a4c2bbed..9490a49c00 100644
--- a/tvix/castore/src/directoryservice/sled.rs
+++ b/tvix/castore/src/directoryservice/sled.rs
@@ -37,12 +37,23 @@ impl SledDirectoryService {
 impl DirectoryService for SledDirectoryService {
     #[instrument(skip(self, digest), fields(directory.digest = %digest))]
     async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
-        match self.db.get(digest.as_slice()) {
+        let resp = tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            let digest = digest.clone();
+            move || db.get(digest.as_slice())
+        })
+        .await?
+        .map_err(|e| {
+            warn!("failed to retrieve directory: {}", e);
+            Error::StorageError(format!("failed to retrieve directory: {}", e))
+        })?;
+
+        match resp {
             // The directory was not found, return
-            Ok(None) => Ok(None),
+            None => Ok(None),
 
             // The directory was found, try to parse the data as Directory message
-            Ok(Some(data)) => match Directory::decode(&*data) {
+            Some(data) => match Directory::decode(&*data) {
                 Ok(directory) => {
                     // Validate the retrieved Directory indeed has the
                     // digest we expect it to have, to detect corruptions.
@@ -70,35 +81,38 @@ impl DirectoryService for SledDirectoryService {
                     Err(Error::StorageError(e.to_string()))
                 }
             },
-            // some storage error?
-            Err(e) => Err(Error::StorageError(e.to_string())),
         }
     }
 
     #[instrument(skip(self, directory), fields(directory.digest = %directory.digest()))]
     async fn put(&self, directory: proto::Directory) -> Result<B3Digest, Error> {
-        let digest = directory.digest();
-
-        // validate the directory itself.
-        if let Err(e) = directory.validate() {
-            return Err(Error::InvalidRequest(format!(
-                "directory {} failed validation: {}",
-                digest, e,
-            )));
-        }
-        // store it
-        let result = self.db.insert(digest.as_slice(), directory.encode_to_vec());
-        if let Err(e) = result {
-            return Err(Error::StorageError(e.to_string()));
-        }
-        Ok(digest)
+        tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            move || {
+                let digest = directory.digest();
+
+                // validate the directory itself.
+                if let Err(e) = directory.validate() {
+                    return Err(Error::InvalidRequest(format!(
+                        "directory {} failed validation: {}",
+                        digest, e,
+                    )));
+                }
+                // store it
+                db.insert(digest.as_slice(), directory.encode_to_vec())
+                    .map_err(|e| Error::StorageError(e.to_string()))?;
+
+                Ok(digest)
+            }
+        })
+        .await?
     }
 
     #[instrument(skip_all, fields(directory.digest = %root_directory_digest))]
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         traverse_directory(self.clone(), root_directory_digest)
     }
 
@@ -143,25 +157,32 @@ impl DirectoryPutter for SledDirectoryPutter {
         match self.directory_validator.take() {
             None => Err(Error::InvalidRequest("already closed".to_string())),
             Some(validator) => {
-                // retrieve the validated directories.
-                let directories = validator.finalize()?;
-
-                // Get the root digest, which is at the end (cf. insertion order)
-                let root_digest = directories
-                    .last()
-                    .ok_or_else(|| Error::InvalidRequest("got no directories".to_string()))?
-                    .digest();
-
-                let mut batch = sled::Batch::default();
-                for directory in directories {
-                    batch.insert(directory.digest().as_slice(), directory.encode_to_vec());
-                }
-
-                self.tree
-                    .apply_batch(batch)
-                    .map_err(|e| Error::StorageError(format!("unable to apply batch: {}", e)))?;
-
-                Ok(root_digest)
+                // Insert all directories as a batch.
+                tokio::task::spawn_blocking({
+                    let tree = self.tree.clone();
+                    move || {
+                        // retrieve the validated directories.
+                        let directories = validator.finalize()?;
+
+                        // Get the root digest, which is at the end (cf. insertion order)
+                        let root_digest = directories
+                            .last()
+                            .ok_or_else(|| Error::InvalidRequest("got no directories".to_string()))?
+                            .digest();
+
+                        let mut batch = sled::Batch::default();
+                        for directory in directories {
+                            batch.insert(directory.digest().as_slice(), directory.encode_to_vec());
+                        }
+
+                        tree.apply_batch(batch).map_err(|e| {
+                            Error::StorageError(format!("unable to apply batch: {}", e))
+                        })?;
+
+                        Ok(root_digest)
+                    }
+                })
+                .await?
             }
         }
     }
diff --git a/tvix/castore/src/directoryservice/tests/mod.rs b/tvix/castore/src/directoryservice/tests/mod.rs
index 50c8a5c6d3..cc3c5b788a 100644
--- a/tvix/castore/src/directoryservice/tests/mod.rs
+++ b/tvix/castore/src/directoryservice/tests/mod.rs
@@ -26,7 +26,8 @@ use self::utils::make_grpc_directory_service_client;
 #[case::grpc(make_grpc_directory_service_client().await)]
 #[case::memory(directoryservice::from_addr("memory://").await.unwrap())]
 #[case::sled(directoryservice::from_addr("sled://").await.unwrap())]
-#[cfg_attr(feature = "cloud", case::bigtable(directoryservice::from_addr("bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1").await.unwrap()))]
+#[case::objectstore(directoryservice::from_addr("objectstore+memory://").await.unwrap())]
+#[cfg_attr(all(feature = "cloud", feature = "integration"), case::bigtable(directoryservice::from_addr("bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1").await.unwrap()))]
 pub fn directory_services(#[case] directory_service: impl DirectoryService) {}
 
 /// Ensures asking for a directory that doesn't exist returns a Ok(None).
diff --git a/tvix/castore/src/directoryservice/traverse.rs b/tvix/castore/src/directoryservice/traverse.rs
index 573581edbd..17a51ae2bb 100644
--- a/tvix/castore/src/directoryservice/traverse.rs
+++ b/tvix/castore/src/directoryservice/traverse.rs
@@ -1,95 +1,72 @@
 use super::DirectoryService;
-use crate::{proto::NamedNode, B3Digest, Error};
-use std::os::unix::ffi::OsStrExt;
+use crate::{
+    proto::{node::Node, NamedNode},
+    B3Digest, Error, Path,
+};
 use tracing::{instrument, warn};
 
 /// This descends from a (root) node to the given (sub)path, returning the Node
 /// at that path, or none, if there's nothing at that path.
-#[instrument(skip(directory_service))]
+#[instrument(skip(directory_service, path), fields(%path))]
 pub async fn descend_to<DS>(
     directory_service: DS,
-    root_node: crate::proto::node::Node,
-    path: &std::path::Path,
-) -> Result<Option<crate::proto::node::Node>, Error>
+    root_node: Node,
+    path: impl AsRef<Path> + std::fmt::Display,
+) -> Result<Option<Node>, Error>
 where
     DS: AsRef<dyn DirectoryService>,
 {
-    // strip a possible `/` prefix from the path.
-    let path = {
-        if path.starts_with("/") {
-            path.strip_prefix("/").unwrap()
-        } else {
-            path
-        }
-    };
-
-    let mut cur_node = root_node;
-    let mut it = path.components();
-
-    loop {
-        match it.next() {
-            None => {
-                // the (remaining) path is empty, return the node we're current at.
-                return Ok(Some(cur_node));
+    let mut parent_node = root_node;
+    for component in path.as_ref().components() {
+        match parent_node {
+            Node::File(_) | Node::Symlink(_) => {
+                // There's still some path left, but the parent node is no directory.
+                // This means the path doesn't exist, as we can't reach it.
+                return Ok(None);
             }
-            Some(first_component) => {
-                match cur_node {
-                    crate::proto::node::Node::File(_) | crate::proto::node::Node::Symlink(_) => {
-                        // There's still some path left, but the current node is no directory.
-                        // This means the path doesn't exist, as we can't reach it.
-                        return Ok(None);
-                    }
-                    crate::proto::node::Node::Directory(directory_node) => {
-                        let digest: B3Digest = directory_node.digest.try_into().map_err(|_e| {
-                            Error::StorageError("invalid digest length".to_string())
+            Node::Directory(directory_node) => {
+                let digest: B3Digest = directory_node
+                    .digest
+                    .try_into()
+                    .map_err(|_e| Error::StorageError("invalid digest length".to_string()))?;
+
+                // fetch the linked node from the directory_service.
+                let directory =
+                    directory_service
+                        .as_ref()
+                        .get(&digest)
+                        .await?
+                        .ok_or_else(|| {
+                            // If we didn't get the directory node that's linked, that's a store inconsistency, bail out!
+                            warn!("directory {} does not exist", digest);
+
+                            Error::StorageError(format!("directory {} does not exist", digest))
                         })?;
 
-                        // fetch the linked node from the directory_service
-                        match directory_service.as_ref().get(&digest).await? {
-                            // If we didn't get the directory node that's linked, that's a store inconsistency, bail out!
-                            None => {
-                                warn!("directory {} does not exist", digest);
-
-                                return Err(Error::StorageError(format!(
-                                    "directory {} does not exist",
-                                    digest
-                                )));
-                            }
-                            Some(directory) => {
-                                // look for first_component in the [Directory].
-                                // FUTUREWORK: as the nodes() iterator returns in a sorted fashion, we
-                                // could stop as soon as e.name is larger than the search string.
-                                let child_node = directory.nodes().find(|n| {
-                                    n.get_name() == first_component.as_os_str().as_bytes()
-                                });
-
-                                match child_node {
-                                    // child node not found means there's no such element inside the directory.
-                                    None => {
-                                        return Ok(None);
-                                    }
-                                    // child node found, return to top-of loop to find the next
-                                    // node in the path.
-                                    Some(child_node) => {
-                                        cur_node = child_node;
-                                    }
-                                }
-                            }
-                        }
-                    }
+                // look for the component in the [Directory].
+                // FUTUREWORK: as the nodes() iterator returns in a sorted fashion, we
+                // could stop as soon as e.name is larger than the search string.
+                if let Some(child_node) = directory.nodes().find(|n| n.get_name() == component) {
+                    // child node found, update prev_node to that and continue.
+                    parent_node = child_node;
+                } else {
+                    // child node not found means there's no such element inside the directory.
+                    return Ok(None);
                 }
             }
         }
     }
+
+    // We traversed the entire path, so this must be the node.
+    Ok(Some(parent_node))
 }
 
 #[cfg(test)]
 mod tests {
-    use std::path::PathBuf;
-
     use crate::{
         directoryservice,
         fixtures::{DIRECTORY_COMPLICATED, DIRECTORY_WITH_KEEP},
+        PathBuf,
     };
 
     use super::descend_to;
@@ -132,7 +109,7 @@ mod tests {
             let resp = descend_to(
                 &directory_service,
                 node_directory_complicated.clone(),
-                &PathBuf::from(""),
+                "".parse::<PathBuf>().unwrap(),
             )
             .await
             .expect("must succeed");
@@ -145,7 +122,7 @@ mod tests {
             let resp = descend_to(
                 &directory_service,
                 node_directory_complicated.clone(),
-                &PathBuf::from("keep"),
+                "keep".parse::<PathBuf>().unwrap(),
             )
             .await
             .expect("must succeed");
@@ -158,7 +135,7 @@ mod tests {
             let resp = descend_to(
                 &directory_service,
                 node_directory_complicated.clone(),
-                &PathBuf::from("keep/.keep"),
+                "keep/.keep".parse::<PathBuf>().unwrap(),
             )
             .await
             .expect("must succeed");
@@ -166,25 +143,12 @@ mod tests {
             assert_eq!(Some(node_file_keep.clone()), resp);
         }
 
-        // traversal to `keep/.keep` should return the node for the .keep file
-        {
-            let resp = descend_to(
-                &directory_service,
-                node_directory_complicated.clone(),
-                &PathBuf::from("/keep/.keep"),
-            )
-            .await
-            .expect("must succeed");
-
-            assert_eq!(Some(node_file_keep), resp);
-        }
-
         // traversal to `void` should return None (doesn't exist)
         {
             let resp = descend_to(
                 &directory_service,
                 node_directory_complicated.clone(),
-                &PathBuf::from("void"),
+                "void".parse::<PathBuf>().unwrap(),
             )
             .await
             .expect("must succeed");
@@ -192,12 +156,12 @@ mod tests {
             assert_eq!(None, resp);
         }
 
-        // traversal to `void` should return None (doesn't exist)
+        // traversal to `v/oid` should return None (doesn't exist)
         {
             let resp = descend_to(
                 &directory_service,
                 node_directory_complicated.clone(),
-                &PathBuf::from("//v/oid"),
+                "v/oid".parse::<PathBuf>().unwrap(),
             )
             .await
             .expect("must succeed");
@@ -211,25 +175,12 @@ mod tests {
             let resp = descend_to(
                 &directory_service,
                 node_directory_complicated.clone(),
-                &PathBuf::from("keep/.keep/foo"),
+                "keep/.keep/foo".parse::<PathBuf>().unwrap(),
             )
             .await
             .expect("must succeed");
 
             assert_eq!(None, resp);
         }
-
-        // traversal to a subpath of '/' should return the root node.
-        {
-            let resp = descend_to(
-                &directory_service,
-                node_directory_complicated.clone(),
-                &PathBuf::from("/"),
-            )
-            .await
-            .expect("must succeed");
-
-            assert_eq!(Some(node_directory_complicated), resp);
-        }
     }
 }
diff --git a/tvix/castore/src/directoryservice/utils.rs b/tvix/castore/src/directoryservice/utils.rs
index 01c521076c..a0ba395ecd 100644
--- a/tvix/castore/src/directoryservice/utils.rs
+++ b/tvix/castore/src/directoryservice/utils.rs
@@ -2,14 +2,16 @@ use super::DirectoryService;
 use crate::proto;
 use crate::B3Digest;
 use crate::Error;
-use async_stream::stream;
+use async_stream::try_stream;
 use futures::stream::BoxStream;
 use std::collections::{HashSet, VecDeque};
+use tracing::instrument;
 use tracing::warn;
 
 /// Traverses a [proto::Directory] from the root to the children.
 ///
 /// This is mostly BFS, but directories are only returned once.
+#[instrument(skip(directory_service))]
 pub fn traverse_directory<'a, DS: DirectoryService + 'static>(
     directory_service: DS,
     root_directory_digest: &B3Digest,
@@ -23,60 +25,53 @@ pub fn traverse_directory<'a, DS: DirectoryService + 'static>(
     // We omit sending the same directories multiple times.
     let mut sent_directory_digests: HashSet<B3Digest> = HashSet::new();
 
-    let stream = stream! {
+    Box::pin(try_stream! {
         while let Some(current_directory_digest) = worklist_directory_digests.pop_front() {
-            match directory_service.get(&current_directory_digest).await {
+            let current_directory = directory_service.get(&current_directory_digest).await.map_err(|e| {
+                warn!("failed to look up directory");
+                Error::StorageError(format!(
+                    "unable to look up directory {}: {}",
+                    current_directory_digest, e
+                ))
+            })?.ok_or_else(|| {
                 // if it's not there, we have an inconsistent store!
-                Ok(None) => {
-                    warn!("directory {} does not exist", current_directory_digest);
-                    yield Err(Error::StorageError(format!(
-                        "directory {} does not exist",
-                        current_directory_digest
-                    )));
-                }
-                Err(e) => {
-                    warn!("failed to look up directory");
-                    yield Err(Error::StorageError(format!(
-                        "unable to look up directory {}: {}",
-                        current_directory_digest, e
-                    )));
-                }
+                warn!("directory {} does not exist", current_directory_digest);
+                Error::StorageError(format!(
+                    "directory {} does not exist",
+                    current_directory_digest
+                ))
 
-                // if we got it
-                Ok(Some(current_directory)) => {
-                    // validate, we don't want to send invalid directories.
-                    if let Err(e) = current_directory.validate() {
-                        warn!("directory failed validation: {}", e.to_string());
-                        yield Err(Error::StorageError(format!(
-                            "invalid directory: {}",
-                            current_directory_digest
-                        )));
-                    }
+            })?;
 
-                    // We're about to send this directory, so let's avoid sending it again if a
-                    // descendant has it.
-                    sent_directory_digests.insert(current_directory_digest);
+            // validate, we don't want to send invalid directories.
+            current_directory.validate().map_err(|e| {
+               warn!("directory failed validation: {}", e.to_string());
+               Error::StorageError(format!(
+                   "invalid directory: {}",
+                   current_directory_digest
+               ))
+            })?;
 
-                    // enqueue all child directory digests to the work queue, as
-                    // long as they're not part of the worklist or already sent.
-                    // This panics if the digest looks invalid, it's supposed to be checked first.
-                    for child_directory_node in &current_directory.directories {
-                        // TODO: propagate error
-                        let child_digest: B3Digest = child_directory_node.digest.clone().try_into().unwrap();
+            // We're about to send this directory, so let's avoid sending it again if a
+            // descendant has it.
+            sent_directory_digests.insert(current_directory_digest);
 
-                        if worklist_directory_digests.contains(&child_digest)
-                            || sent_directory_digests.contains(&child_digest)
-                        {
-                            continue;
-                        }
-                        worklist_directory_digests.push_back(child_digest);
-                    }
+            // enqueue all child directory digests to the work queue, as
+            // long as they're not part of the worklist or already sent.
+            // This panics if the digest looks invalid, it's supposed to be checked first.
+            for child_directory_node in &current_directory.directories {
+                // TODO: propagate error
+                let child_digest: B3Digest = child_directory_node.digest.clone().try_into().unwrap();
 
-                    yield Ok(current_directory);
+                if worklist_directory_digests.contains(&child_digest)
+                    || sent_directory_digests.contains(&child_digest)
+                {
+                    continue;
                 }
-            };
-        }
-    };
+                worklist_directory_digests.push_back(child_digest);
+            }
 
-    Box::pin(stream)
+            yield current_directory;
+        }
+    })
 }
diff --git a/tvix/castore/src/errors.rs b/tvix/castore/src/errors.rs
index e807a19b9e..8343d0774a 100644
--- a/tvix/castore/src/errors.rs
+++ b/tvix/castore/src/errors.rs
@@ -1,4 +1,3 @@
-use std::sync::PoisonError;
 use thiserror::Error;
 use tokio::task::JoinError;
 use tonic::Status;
@@ -13,12 +12,6 @@ pub enum Error {
     StorageError(String),
 }
 
-impl<T> From<PoisonError<T>> for Error {
-    fn from(value: PoisonError<T>) -> Self {
-        Error::StorageError(value.to_string())
-    }
-}
-
 impl From<JoinError> for Error {
     fn from(value: JoinError) -> Self {
         Error::StorageError(value.to_string())
diff --git a/tvix/castore/src/fs/inodes.rs b/tvix/castore/src/fs/inodes.rs
index c22bd4b2eb..bdd4595434 100644
--- a/tvix/castore/src/fs/inodes.rs
+++ b/tvix/castore/src/fs/inodes.rs
@@ -57,16 +57,18 @@ impl InodeData {
                     children.len() as u64
                 }
             },
-            mode: match self {
-                InodeData::Regular(_, _, false) => libc::S_IFREG | 0o444, // no-executable files
-                InodeData::Regular(_, _, true) => libc::S_IFREG | 0o555,  // executable files
-                InodeData::Symlink(_) => libc::S_IFLNK | 0o444,
-                InodeData::Directory(_) => libc::S_IFDIR | 0o555,
-            },
+            mode: self.as_fuse_type() | self.mode(),
             ..Default::default()
         }
     }
 
+    fn mode(&self) -> u32 {
+        match self {
+            InodeData::Regular(_, _, false) | InodeData::Symlink(_) => 0o444,
+            InodeData::Regular(_, _, true) | InodeData::Directory(_) => 0o555,
+        }
+    }
+
     pub fn as_fuse_entry(&self, inode: u64) -> fuse_backend_rs::api::filesystem::Entry {
         fuse_backend_rs::api::filesystem::Entry {
             inode,
diff --git a/tvix/castore/src/fs/virtiofs.rs b/tvix/castore/src/fs/virtiofs.rs
index 846270d285..d63e2f2bdd 100644
--- a/tvix/castore/src/fs/virtiofs.rs
+++ b/tvix/castore/src/fs/virtiofs.rs
@@ -34,6 +34,7 @@ enum Error {
     /// Invalid descriptor chain.
     InvalidDescriptorChain,
     /// Failed to handle filesystem requests.
+    #[allow(dead_code)]
     HandleRequests(fuse_backend_rs::Error),
     /// Failed to construct new vhost user daemon.
     NewDaemon,
diff --git a/tvix/castore/src/import.rs b/tvix/castore/src/import.rs
deleted file mode 100644
index e16bda1f64..0000000000
--- a/tvix/castore/src/import.rs
+++ /dev/null
@@ -1,361 +0,0 @@
-use crate::blobservice::BlobService;
-use crate::directoryservice::DirectoryPutter;
-use crate::directoryservice::DirectoryService;
-use crate::proto::node::Node;
-use crate::proto::Directory;
-use crate::proto::DirectoryNode;
-use crate::proto::FileNode;
-use crate::proto::SymlinkNode;
-use crate::Error as CastoreError;
-use async_stream::stream;
-use futures::pin_mut;
-use futures::{Stream, StreamExt};
-use std::fs::FileType;
-use tracing::Level;
-
-#[cfg(target_family = "unix")]
-use std::os::unix::ffi::OsStrExt;
-
-use std::{
-    collections::HashMap,
-    fmt::Debug,
-    os::unix::prelude::PermissionsExt,
-    path::{Path, PathBuf},
-};
-use tracing::instrument;
-use walkdir::DirEntry;
-use walkdir::WalkDir;
-
-#[cfg(debug_assertions)]
-use std::collections::HashSet;
-
-#[derive(Debug, thiserror::Error)]
-pub enum Error {
-    #[error("failed to upload directory at {0}: {1}")]
-    UploadDirectoryError(PathBuf, CastoreError),
-
-    #[error("invalid encoding encountered for entry {0:?}")]
-    InvalidEncoding(PathBuf),
-
-    #[error("unable to stat {0}: {1}")]
-    UnableToStat(PathBuf, std::io::Error),
-
-    #[error("unable to open {0}: {1}")]
-    UnableToOpen(PathBuf, std::io::Error),
-
-    #[error("unable to read {0}: {1}")]
-    UnableToRead(PathBuf, std::io::Error),
-
-    #[error("unsupported file {0} type: {1:?}")]
-    UnsupportedFileType(PathBuf, FileType),
-}
-
-impl From<CastoreError> for Error {
-    fn from(value: CastoreError) -> Self {
-        match value {
-            CastoreError::InvalidRequest(_) => panic!("tvix bug"),
-            CastoreError::StorageError(_) => panic!("error"),
-        }
-    }
-}
-
-impl From<Error> for std::io::Error {
-    fn from(value: Error) -> Self {
-        std::io::Error::new(std::io::ErrorKind::Other, value)
-    }
-}
-
-/// Walk the filesystem at a given path and returns a level-keyed list of directory entries.
-///
-/// This is how [`ingest_path`] assembles the set of entries to pass on [`ingest_entries`].
-/// This low-level function can be used if additional filtering or processing is required on the
-/// entries.
-///
-/// Level here is in the context of graph theory, e.g. 2-level nodes
-/// are nodes that are at depth 2.
-///
-/// This function will walk the filesystem using `walkdir` and will consume
-/// `O(#number of entries)` space.
-#[instrument(fields(path), err)]
-pub fn walk_path_for_ingestion<P>(path: P) -> Result<Vec<Vec<DirEntry>>, Error>
-where
-    P: AsRef<Path> + std::fmt::Debug,
-{
-    let mut entries_per_depths: Vec<Vec<DirEntry>> = vec![Vec::new()];
-    for entry in WalkDir::new(path.as_ref())
-        .follow_links(false)
-        .follow_root_links(false)
-        .contents_first(false)
-        .sort_by_file_name()
-        .into_iter()
-    {
-        // Entry could be a NotFound, if the root path specified does not exist.
-        let entry = entry.map_err(|e| {
-            Error::UnableToOpen(
-                PathBuf::from(path.as_ref()),
-                e.into_io_error().expect("walkdir err must be some"),
-            )
-        })?;
-
-        if entry.depth() >= entries_per_depths.len() {
-            debug_assert!(
-                entry.depth() == entries_per_depths.len(),
-                "Received unexpected entry with depth {} during descent, previously at {}",
-                entry.depth(),
-                entries_per_depths.len()
-            );
-
-            entries_per_depths.push(vec![entry]);
-        } else {
-            entries_per_depths[entry.depth()].push(entry);
-        }
-    }
-
-    Ok(entries_per_depths)
-}
-
-/// Convert a leveled-key vector of filesystem entries into a stream of
-/// [DirEntry] in a way that honors the Merkle invariant, i.e. from bottom to top.
-pub fn leveled_entries_to_stream(
-    entries_per_depths: Vec<Vec<DirEntry>>,
-) -> impl Stream<Item = DirEntry> {
-    stream! {
-        for level in entries_per_depths.into_iter().rev() {
-            for entry in level.into_iter() {
-                yield entry;
-            }
-        }
-    }
-}
-
-/// Ingests the contents at a given path into the tvix store, interacting with a [BlobService] and
-/// [DirectoryService]. It returns the root node or an error.
-///
-/// It does not follow symlinks at the root, they will be ingested as actual symlinks.
-#[instrument(skip(blob_service, directory_service), fields(path), err)]
-pub async fn ingest_path<'a, BS, DS, P>(
-    blob_service: BS,
-    directory_service: DS,
-    path: P,
-) -> Result<Node, Error>
-where
-    P: AsRef<Path> + std::fmt::Debug,
-    BS: AsRef<dyn BlobService>,
-    DS: AsRef<dyn DirectoryService>,
-{
-    // produce the leveled-key vector of DirEntry.
-    let entries_per_depths = walk_path_for_ingestion(path)?;
-    let direntry_stream = leveled_entries_to_stream(entries_per_depths);
-    pin_mut!(direntry_stream);
-
-    ingest_entries(blob_service, directory_service, direntry_stream).await
-}
-
-/// The Merkle invariant checker is an internal structure to perform bookkeeping of all directory
-/// entries we are ingesting and verifying we are ingesting them in the right order.
-///
-/// That is, whenever we process an entry `L`, we would like to verify if we didn't process earlier
-/// an entry `P` such that `P` is an **ancestor** of `L`.
-///
-/// If such a thing happened, it means that we have processed something like:
-///
-///```no_trust
-///        A
-///       / \
-///      B   C
-///     / \   \
-///    G  F    P <--------- processed before this one
-///           / \                                  |
-///          D  E                                  |
-///              \                                 |
-///               L  <-----------------------------+
-/// ```
-///
-/// This is exactly what must never happen.
-///
-/// Note: this checker is local, it can only see what happens on our side, not on the remote side,
-/// i.e. the different remote services.
-#[derive(Default)]
-#[cfg(debug_assertions)]
-struct MerkleInvariantChecker {
-    seen: HashSet<PathBuf>,
-}
-
-#[cfg(debug_assertions)]
-impl MerkleInvariantChecker {
-    /// See a directory entry and remember it.
-    fn see(&mut self, node: &DirEntry) {
-        self.seen.insert(node.path().to_owned());
-    }
-
-    /// Returns a potential ancestor already seen for that directory entry.
-    fn find_ancestor<'a>(&self, node: &'a DirEntry) -> Option<&'a Path> {
-        node.path().ancestors().find(|p| self.seen.contains(*p))
-    }
-}
-
-/// Ingests elements from the given stream of [`DirEntry`] into a the passed [`BlobService`] and
-/// [`DirectoryService`].
-/// It does not follow symlinks at the root, they will be ingested as actual symlinks.
-#[instrument(skip_all, ret(level = Level::TRACE), err)]
-pub async fn ingest_entries<'a, BS, DS, S>(
-    blob_service: BS,
-    directory_service: DS,
-    #[allow(unused_mut)] mut direntry_stream: S,
-) -> Result<Node, Error>
-where
-    BS: AsRef<dyn BlobService>,
-    DS: AsRef<dyn DirectoryService>,
-    S: Stream<Item = DirEntry> + std::marker::Unpin,
-{
-    #[cfg(debug_assertions)]
-    let mut invariant_checker: MerkleInvariantChecker = Default::default();
-
-    #[cfg(debug_assertions)]
-    let mut direntry_stream = direntry_stream.inspect(|e| {
-        // If we find an ancestor before we see this entry, this means that the caller
-        // broke the contract, refer to the documentation of the invariant checker to
-        // understand the reasoning here.
-        if let Some(ancestor) = invariant_checker.find_ancestor(e) {
-            panic!(
-                "Tvix bug: merkle invariant checker discovered that {} was processed before {}!",
-                ancestor.display(),
-                e.path().display()
-            );
-        }
-
-        invariant_checker.see(e);
-    });
-
-    // For a given path, this holds the [Directory] structs as they are populated.
-    let mut directories: HashMap<PathBuf, Directory> = HashMap::default();
-    let mut maybe_directory_putter: Option<Box<dyn DirectoryPutter>> = None;
-
-    // We need to process a directory's children before processing
-    // the directory itself in order to have all the data needed
-    // to compute the hash.
-
-    let root_node = loop {
-        let entry = match direntry_stream.next().await {
-            Some(entry) => entry,
-            None => {
-                // The last entry of the stream must have depth 0, after which
-                // we break the loop manually.
-                panic!("Tvix bug: unexpected end of stream");
-            }
-        };
-        let file_type = entry.file_type();
-
-        let node = if file_type.is_dir() {
-            // If the entry is a directory, we traversed all its children (and
-            // populated it in `directories`).
-            // If we don't have it in there, it's an empty directory.
-            let directory = directories
-                .remove(entry.path())
-                // In that case, it contained no children
-                .unwrap_or_default();
-
-            let directory_size = directory.size();
-            let directory_digest = directory.digest();
-
-            // Use the directory_putter to upload the directory.
-            // If we don't have one yet (as that's the first one to upload),
-            // initialize the putter.
-            maybe_directory_putter
-                .get_or_insert_with(|| directory_service.as_ref().put_multiple_start())
-                .put(directory)
-                .await?;
-
-            Node::Directory(DirectoryNode {
-                name: entry.file_name().as_bytes().to_owned().into(),
-                digest: directory_digest.into(),
-                size: directory_size,
-            })
-        } else if file_type.is_symlink() {
-            let target: bytes::Bytes = std::fs::read_link(entry.path())
-                .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e))?
-                .as_os_str()
-                .as_bytes()
-                .to_owned()
-                .into();
-
-            Node::Symlink(SymlinkNode {
-                name: entry.file_name().as_bytes().to_owned().into(),
-                target,
-            })
-        } else if file_type.is_file() {
-            let metadata = entry
-                .metadata()
-                .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e.into()))?;
-
-            let mut file = tokio::fs::File::open(entry.path())
-                .await
-                .map_err(|e| Error::UnableToOpen(entry.path().to_path_buf(), e))?;
-
-            let mut writer = blob_service.as_ref().open_write().await;
-
-            if let Err(e) = tokio::io::copy(&mut file, &mut writer).await {
-                return Err(Error::UnableToRead(entry.path().to_path_buf(), e));
-            };
-
-            let digest = writer
-                .close()
-                .await
-                .map_err(|e| Error::UnableToRead(entry.path().to_path_buf(), e))?;
-
-            Node::File(FileNode {
-                name: entry.file_name().as_bytes().to_vec().into(),
-                digest: digest.into(),
-                size: metadata.len(),
-                // If it's executable by the user, it'll become executable.
-                // This matches nix's dump() function behaviour.
-                executable: metadata.permissions().mode() & 64 != 0,
-            })
-        } else {
-            return Err(Error::UnsupportedFileType(
-                entry.path().to_path_buf(),
-                file_type,
-            ));
-        };
-
-        if entry.depth() == 0 {
-            break node;
-        } else {
-            // calculate the parent path, and make sure we register the node there.
-            // NOTE: entry.depth() > 0
-            let parent_path = entry.path().parent().unwrap().to_path_buf();
-
-            // record node in parent directory, creating a new [proto:Directory] if not there yet.
-            let parent_directory = directories.entry(parent_path).or_default();
-            match node {
-                Node::Directory(e) => parent_directory.directories.push(e),
-                Node::File(e) => parent_directory.files.push(e),
-                Node::Symlink(e) => parent_directory.symlinks.push(e),
-            }
-        }
-    };
-
-    // if there were directories uploaded, make sure we flush the putter, so
-    // they're all persisted to the backend.
-    if let Some(mut directory_putter) = maybe_directory_putter {
-        let root_directory_digest = directory_putter.close().await?;
-
-        #[cfg(debug_assertions)]
-        {
-            if let Node::Directory(directory_node) = &root_node {
-                debug_assert_eq!(
-                    root_directory_digest,
-                    directory_node
-                        .digest
-                        .to_vec()
-                        .try_into()
-                        .expect("invalid digest len")
-                )
-            } else {
-                unreachable!("Tvix bug: directory putter initialized but no root directory node");
-            }
-        }
-    };
-
-    Ok(root_node)
-}
diff --git a/tvix/castore/src/import/archive.rs b/tvix/castore/src/import/archive.rs
new file mode 100644
index 0000000000..0ebb4a2361
--- /dev/null
+++ b/tvix/castore/src/import/archive.rs
@@ -0,0 +1,458 @@
+//! Imports from an archive (tarballs)
+
+use std::collections::HashMap;
+use std::io::{Cursor, Write};
+use std::sync::Arc;
+
+use petgraph::graph::{DiGraph, NodeIndex};
+use petgraph::visit::{DfsPostOrder, EdgeRef};
+use petgraph::Direction;
+use tokio::io::AsyncRead;
+use tokio::sync::Semaphore;
+use tokio::task::JoinSet;
+use tokio_stream::StreamExt;
+use tokio_tar::Archive;
+use tokio_util::io::InspectReader;
+use tracing::{instrument, warn, Level};
+
+use crate::blobservice::BlobService;
+use crate::directoryservice::DirectoryService;
+use crate::import::{ingest_entries, IngestionEntry, IngestionError};
+use crate::proto::node::Node;
+use crate::B3Digest;
+
+type TarPathBuf = std::path::PathBuf;
+
+/// Files smaller than this threshold, in bytes, are uploaded to the [BlobService] in the
+/// background.
+///
+/// This is a u32 since we acquire a weighted semaphore using the size of the blob.
+/// [Semaphore::acquire_many_owned] takes a u32, so we need to ensure the size of
+/// the blob can be represented using a u32 and will not cause an overflow.
+const CONCURRENT_BLOB_UPLOAD_THRESHOLD: u32 = 1024 * 1024;
+
+/// The maximum amount of bytes allowed to be buffered in memory to perform async blob uploads.
+const MAX_TARBALL_BUFFER_SIZE: usize = 128 * 1024 * 1024;
+
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+    #[error("unable to construct stream of entries: {0}")]
+    Entries(std::io::Error),
+
+    #[error("unable to read next entry: {0}")]
+    NextEntry(std::io::Error),
+
+    #[error("unable to read path for entry: {0}")]
+    PathRead(std::io::Error),
+
+    #[error("unable to convert path {0} for entry: {1}")]
+    PathConvert(TarPathBuf, std::io::Error),
+
+    #[error("unable to read size field for {0}: {1}")]
+    Size(TarPathBuf, std::io::Error),
+
+    #[error("unable to read mode field for {0}: {1}")]
+    Mode(TarPathBuf, std::io::Error),
+
+    #[error("unable to read link name field for {0}: {1}")]
+    LinkName(TarPathBuf, std::io::Error),
+
+    #[error("unable to read blob contents for {0}: {1}")]
+    BlobRead(TarPathBuf, std::io::Error),
+
+    // FUTUREWORK: proper error for blob finalize
+    #[error("unable to finalize blob {0}: {1}")]
+    BlobFinalize(TarPathBuf, std::io::Error),
+
+    #[error("unsupported tar entry {0} type: {1:?}")]
+    EntryType(TarPathBuf, tokio_tar::EntryType),
+
+    #[error("symlink missing target {0}")]
+    MissingSymlinkTarget(TarPathBuf),
+
+    #[error("unexpected number of top level directory entries")]
+    UnexpectedNumberOfTopLevelEntries,
+}
+
+/// Ingests elements from the given tar [`Archive`] into a the passed [`BlobService`] and
+/// [`DirectoryService`].
+#[instrument(skip_all, ret(level = Level::TRACE), err)]
+pub async fn ingest_archive<BS, DS, R>(
+    blob_service: BS,
+    directory_service: DS,
+    mut archive: Archive<R>,
+) -> Result<Node, IngestionError<Error>>
+where
+    BS: BlobService + Clone + 'static,
+    DS: DirectoryService,
+    R: AsyncRead + Unpin,
+{
+    // Since tarballs can have entries in any arbitrary order, we need to
+    // buffer all of the directory metadata so we can reorder directory
+    // contents and entries to meet the requires of the castore.
+
+    // In the first phase, collect up all the regular files and symlinks.
+    let mut nodes = IngestionEntryGraph::new();
+
+    let semaphore = Arc::new(Semaphore::new(MAX_TARBALL_BUFFER_SIZE));
+    let mut async_blob_uploads: JoinSet<Result<(), Error>> = JoinSet::new();
+
+    let mut entries_iter = archive.entries().map_err(Error::Entries)?;
+    while let Some(mut entry) = entries_iter.try_next().await.map_err(Error::NextEntry)? {
+        let tar_path: TarPathBuf = entry.path().map_err(Error::PathRead)?.into();
+
+        // construct a castore PathBuf, which we use in the produced IngestionEntry.
+        let path = crate::path::PathBuf::from_host_path(tar_path.as_path(), true)
+            .map_err(|e| Error::PathConvert(tar_path.clone(), e))?;
+
+        let header = entry.header();
+        let entry = match header.entry_type() {
+            tokio_tar::EntryType::Regular
+            | tokio_tar::EntryType::GNUSparse
+            | tokio_tar::EntryType::Continuous => {
+                let header_size = header
+                    .size()
+                    .map_err(|e| Error::Size(tar_path.clone(), e))?;
+
+                // If the blob is small enough, read it off the wire, compute the digest,
+                // and upload it to the [BlobService] in the background.
+                let (size, digest) = if header_size <= CONCURRENT_BLOB_UPLOAD_THRESHOLD as u64 {
+                    let mut buffer = Vec::with_capacity(header_size as usize);
+                    let mut hasher = blake3::Hasher::new();
+                    let mut reader = InspectReader::new(&mut entry, |bytes| {
+                        hasher.write_all(bytes).unwrap();
+                    });
+
+                    // Ensure that we don't buffer into memory until we've acquired a permit.
+                    // This prevents consuming too much memory when performing concurrent
+                    // blob uploads.
+                    let permit = semaphore
+                        .clone()
+                        // This cast is safe because ensure the header_size is less than
+                        // CONCURRENT_BLOB_UPLOAD_THRESHOLD which is a u32.
+                        .acquire_many_owned(header_size as u32)
+                        .await
+                        .unwrap();
+                    let size = tokio::io::copy(&mut reader, &mut buffer)
+                        .await
+                        .map_err(|e| Error::Size(tar_path.clone(), e))?;
+
+                    let digest: B3Digest = hasher.finalize().as_bytes().into();
+
+                    {
+                        let blob_service = blob_service.clone();
+                        let digest = digest.clone();
+                        async_blob_uploads.spawn({
+                            let tar_path = tar_path.clone();
+                            async move {
+                                let mut writer = blob_service.open_write().await;
+
+                                tokio::io::copy(&mut Cursor::new(buffer), &mut writer)
+                                    .await
+                                    .map_err(|e| Error::BlobRead(tar_path.clone(), e))?;
+
+                                let blob_digest = writer
+                                    .close()
+                                    .await
+                                    .map_err(|e| Error::BlobFinalize(tar_path, e))?;
+
+                                assert_eq!(digest, blob_digest, "Tvix bug: blob digest mismatch");
+
+                                // Make sure we hold the permit until we finish writing the blob
+                                // to the [BlobService].
+                                drop(permit);
+                                Ok(())
+                            }
+                        });
+                    }
+
+                    (size, digest)
+                } else {
+                    let mut writer = blob_service.open_write().await;
+
+                    let size = tokio::io::copy(&mut entry, &mut writer)
+                        .await
+                        .map_err(|e| Error::BlobRead(tar_path.clone(), e))?;
+
+                    let digest = writer
+                        .close()
+                        .await
+                        .map_err(|e| Error::BlobFinalize(tar_path.clone(), e))?;
+
+                    (size, digest)
+                };
+
+                let executable = entry
+                    .header()
+                    .mode()
+                    .map_err(|e| Error::Mode(tar_path, e))?
+                    & 64
+                    != 0;
+
+                IngestionEntry::Regular {
+                    path,
+                    size,
+                    executable,
+                    digest,
+                }
+            }
+            tokio_tar::EntryType::Symlink => IngestionEntry::Symlink {
+                target: entry
+                    .link_name()
+                    .map_err(|e| Error::LinkName(tar_path.clone(), e))?
+                    .ok_or_else(|| Error::MissingSymlinkTarget(tar_path.clone()))?
+                    .into_owned()
+                    .into_os_string()
+                    .into_encoded_bytes(),
+                path,
+            },
+            // Push a bogus directory marker so we can make sure this directoy gets
+            // created. We don't know the digest and size until after reading the full
+            // tarball.
+            tokio_tar::EntryType::Directory => IngestionEntry::Dir { path },
+
+            tokio_tar::EntryType::XGlobalHeader | tokio_tar::EntryType::XHeader => continue,
+
+            entry_type => return Err(Error::EntryType(tar_path, entry_type).into()),
+        };
+
+        nodes.add(entry)?;
+    }
+
+    while let Some(result) = async_blob_uploads.join_next().await {
+        result.expect("task panicked")?;
+    }
+
+    let root_node = ingest_entries(
+        directory_service,
+        futures::stream::iter(nodes.finalize()?.into_iter().map(Ok)),
+    )
+    .await?;
+
+    Ok(root_node)
+}
+
+/// Keep track of the directory structure of a file tree being ingested. This is used
+/// for ingestion sources which do not provide any ordering or uniqueness guarantees
+/// like tarballs.
+///
+/// If we ingest multiple entries with the same paths and both entries are not directories,
+/// the newer entry will replace the latter entry, disconnecting the old node's children
+/// from the graph.
+///
+/// Once all nodes are ingested a call to [IngestionEntryGraph::finalize] will return
+/// a list of entries compute by performaing a DFS post order traversal of the graph
+/// from the top-level directory entry.
+///
+/// This expects the directory structure to contain a single top-level directory entry.
+/// An error is returned if this is not the case and ingestion will fail.
+struct IngestionEntryGraph {
+    graph: DiGraph<IngestionEntry, ()>,
+    path_to_index: HashMap<crate::path::PathBuf, NodeIndex>,
+    root_node: Option<NodeIndex>,
+}
+
+impl Default for IngestionEntryGraph {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl IngestionEntryGraph {
+    /// Creates a new ingestion entry graph.
+    pub fn new() -> Self {
+        IngestionEntryGraph {
+            graph: DiGraph::new(),
+            path_to_index: HashMap::new(),
+            root_node: None,
+        }
+    }
+
+    /// Adds a new entry to the graph. Parent directories are automatically inserted.
+    /// If a node exists in the graph with the same name as the new entry and both the old
+    /// and new nodes are not directories, the node is replaced and is disconnected from its
+    /// children.
+    pub fn add(&mut self, entry: IngestionEntry) -> Result<NodeIndex, Error> {
+        let path = entry.path().to_owned();
+
+        let index = match self.path_to_index.get(entry.path()) {
+            Some(&index) => {
+                // If either the old entry or new entry are not directories, we'll replace the old
+                // entry.
+                if !entry.is_dir() || !self.get_node(index).is_dir() {
+                    self.replace_node(index, entry);
+                }
+
+                index
+            }
+            None => self.graph.add_node(entry),
+        };
+
+        // for archives, a path with 1 component is the root node
+        if path.components().count() == 1 {
+            // We expect archives to contain a single root node, if there is another root node
+            // entry with a different path name, this is unsupported.
+            if let Some(root_node) = self.root_node {
+                if self.get_node(root_node).path() != path.as_ref() {
+                    return Err(Error::UnexpectedNumberOfTopLevelEntries);
+                }
+            }
+
+            self.root_node = Some(index)
+        } else if let Some(parent_path) = path.parent() {
+            // Recursively add the parent node until it hits the root node.
+            let parent_index = self.add(IngestionEntry::Dir {
+                path: parent_path.to_owned(),
+            })?;
+
+            // Insert an edge from the parent directory to the child entry.
+            self.graph.add_edge(parent_index, index, ());
+        }
+
+        self.path_to_index.insert(path, index);
+
+        Ok(index)
+    }
+
+    /// Traverses the graph in DFS post order and collects the entries into a [Vec<IngestionEntry>].
+    ///
+    /// Unreachable parts of the graph are not included in the result.
+    pub fn finalize(self) -> Result<Vec<IngestionEntry>, Error> {
+        // There must be a root node.
+        let Some(root_node_index) = self.root_node else {
+            return Err(Error::UnexpectedNumberOfTopLevelEntries);
+        };
+
+        // The root node must be a directory.
+        if !self.get_node(root_node_index).is_dir() {
+            return Err(Error::UnexpectedNumberOfTopLevelEntries);
+        }
+
+        let mut traversal = DfsPostOrder::new(&self.graph, root_node_index);
+        let mut nodes = Vec::with_capacity(self.graph.node_count());
+        while let Some(node_index) = traversal.next(&self.graph) {
+            nodes.push(self.get_node(node_index).clone());
+        }
+
+        Ok(nodes)
+    }
+
+    /// Replaces the node with the specified entry. The node's children are disconnected.
+    ///
+    /// This should never be called if both the old and new nodes are directories.
+    fn replace_node(&mut self, index: NodeIndex, new_entry: IngestionEntry) {
+        let entry = self
+            .graph
+            .node_weight_mut(index)
+            .expect("Tvix bug: missing node entry");
+
+        debug_assert!(!(entry.is_dir() && new_entry.is_dir()));
+
+        // Replace the node itself.
+        warn!(
+            "saw duplicate entry in archive at path {:?}. old: {:?} new: {:?}",
+            entry.path(),
+            &entry,
+            &new_entry
+        );
+        *entry = new_entry;
+
+        // Remove any outgoing edges to disconnect the old node's children.
+        let edges = self
+            .graph
+            .edges_directed(index, Direction::Outgoing)
+            .map(|edge| edge.id())
+            .collect::<Vec<_>>();
+        for edge in edges {
+            self.graph.remove_edge(edge);
+        }
+    }
+
+    fn get_node(&self, index: NodeIndex) -> &IngestionEntry {
+        self.graph
+            .node_weight(index)
+            .expect("Tvix bug: missing node entry")
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use crate::import::IngestionEntry;
+    use crate::B3Digest;
+
+    use super::{Error, IngestionEntryGraph};
+
+    use lazy_static::lazy_static;
+    use rstest::rstest;
+
+    lazy_static! {
+        pub static ref EMPTY_DIGEST: B3Digest = blake3::hash(&[]).as_bytes().into();
+        pub static ref DIR_A: IngestionEntry = IngestionEntry::Dir {
+            path: "a".parse().unwrap()
+        };
+        pub static ref DIR_B: IngestionEntry = IngestionEntry::Dir {
+            path: "b".parse().unwrap()
+        };
+        pub static ref DIR_A_B: IngestionEntry = IngestionEntry::Dir {
+            path: "a/b".parse().unwrap()
+        };
+        pub static ref FILE_A: IngestionEntry = IngestionEntry::Regular {
+            path: "a".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_DIGEST.clone(),
+        };
+        pub static ref FILE_A_B: IngestionEntry = IngestionEntry::Regular {
+            path: "a/b".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_DIGEST.clone(),
+        };
+        pub static ref FILE_A_B_C: IngestionEntry = IngestionEntry::Regular {
+            path: "a/b/c".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_DIGEST.clone(),
+        };
+    }
+
+    #[rstest]
+    #[case::implicit_directories(&[&*FILE_A_B_C], &[&*FILE_A_B_C, &*DIR_A_B, &*DIR_A])]
+    #[case::explicit_directories(&[&*DIR_A, &*DIR_A_B, &*FILE_A_B_C], &[&*FILE_A_B_C, &*DIR_A_B, &*DIR_A])]
+    #[case::inaccesible_tree(&[&*DIR_A, &*DIR_A_B, &*FILE_A_B], &[&*FILE_A_B, &*DIR_A])]
+    fn node_ingestion_success(
+        #[case] in_entries: &[&IngestionEntry],
+        #[case] exp_entries: &[&IngestionEntry],
+    ) {
+        let mut nodes = IngestionEntryGraph::new();
+
+        for entry in in_entries {
+            nodes.add((*entry).clone()).expect("failed to add entry");
+        }
+
+        let entries = nodes.finalize().expect("invalid entries");
+
+        let exp_entries: Vec<IngestionEntry> =
+            exp_entries.iter().map(|entry| (*entry).clone()).collect();
+
+        assert_eq!(entries, exp_entries);
+    }
+
+    #[rstest]
+    #[case::no_top_level_entries(&[], Error::UnexpectedNumberOfTopLevelEntries)]
+    #[case::multiple_top_level_dirs(&[&*DIR_A, &*DIR_B], Error::UnexpectedNumberOfTopLevelEntries)]
+    #[case::top_level_file_entry(&[&*FILE_A], Error::UnexpectedNumberOfTopLevelEntries)]
+    fn node_ingestion_error(#[case] in_entries: &[&IngestionEntry], #[case] exp_error: Error) {
+        let mut nodes = IngestionEntryGraph::new();
+
+        let result = (|| {
+            for entry in in_entries {
+                nodes.add((*entry).clone())?;
+            }
+            nodes.finalize()
+        })();
+
+        let error = result.expect_err("expected error");
+        assert_eq!(error.to_string(), exp_error.to_string());
+    }
+}
diff --git a/tvix/castore/src/import/error.rs b/tvix/castore/src/import/error.rs
new file mode 100644
index 0000000000..e3fba617e0
--- /dev/null
+++ b/tvix/castore/src/import/error.rs
@@ -0,0 +1,20 @@
+use super::PathBuf;
+
+use crate::Error as CastoreError;
+
+/// Represents all error types that emitted by ingest_entries.
+/// It can represent errors uploading individual Directories and finalizing
+/// the upload.
+/// It also contains a generic error kind that'll carry ingestion-method
+/// specific errors.
+#[derive(Debug, thiserror::Error)]
+pub enum IngestionError<E: std::fmt::Display> {
+    #[error("error from producer: {0}")]
+    Producer(#[from] E),
+
+    #[error("failed to upload directory at {0}: {1}")]
+    UploadDirectoryError(PathBuf, CastoreError),
+
+    #[error("failed to finalize directory upload: {0}")]
+    FinalizeDirectoryUpload(CastoreError),
+}
diff --git a/tvix/castore/src/import/fs.rs b/tvix/castore/src/import/fs.rs
new file mode 100644
index 0000000000..9d3ecfe6ab
--- /dev/null
+++ b/tvix/castore/src/import/fs.rs
@@ -0,0 +1,185 @@
+//! Import from a real filesystem.
+
+use futures::stream::BoxStream;
+use futures::StreamExt;
+use std::fs::FileType;
+use std::os::unix::ffi::OsStringExt;
+use std::os::unix::fs::MetadataExt;
+use std::os::unix::fs::PermissionsExt;
+use tracing::instrument;
+use walkdir::DirEntry;
+use walkdir::WalkDir;
+
+use crate::blobservice::BlobService;
+use crate::directoryservice::DirectoryService;
+use crate::proto::node::Node;
+use crate::B3Digest;
+
+use super::ingest_entries;
+use super::IngestionEntry;
+use super::IngestionError;
+
+/// Ingests the contents at a given path into the tvix store, interacting with a [BlobService] and
+/// [DirectoryService]. It returns the root node or an error.
+///
+/// It does not follow symlinks at the root, they will be ingested as actual symlinks.
+///
+/// This function will walk the filesystem using `walkdir` and will consume
+/// `O(#number of entries)` space.
+#[instrument(skip(blob_service, directory_service), fields(path), err)]
+pub async fn ingest_path<BS, DS, P>(
+    blob_service: BS,
+    directory_service: DS,
+    path: P,
+) -> Result<Node, IngestionError<Error>>
+where
+    P: AsRef<std::path::Path> + std::fmt::Debug,
+    BS: BlobService + Clone,
+    DS: DirectoryService,
+{
+    let iter = WalkDir::new(path.as_ref())
+        .follow_links(false)
+        .follow_root_links(false)
+        .contents_first(true)
+        .into_iter();
+
+    let entries = dir_entries_to_ingestion_stream(blob_service, iter, path.as_ref());
+    ingest_entries(directory_service, entries).await
+}
+
+/// Converts an iterator of [walkdir::DirEntry]s into a stream of ingestion entries.
+/// This can then be fed into [ingest_entries] to ingest all the entries into the castore.
+///
+/// The produced stream is buffered, so uploads can happen concurrently.
+///
+/// The root is the [Path] in the filesystem that is being ingested into the castore.
+pub fn dir_entries_to_ingestion_stream<'a, BS, I>(
+    blob_service: BS,
+    iter: I,
+    root: &'a std::path::Path,
+) -> BoxStream<'a, Result<IngestionEntry, Error>>
+where
+    BS: BlobService + Clone + 'a,
+    I: Iterator<Item = Result<DirEntry, walkdir::Error>> + Send + 'a,
+{
+    let prefix = root.parent().unwrap_or_else(|| std::path::Path::new(""));
+
+    Box::pin(
+        futures::stream::iter(iter)
+            .map(move |x| {
+                let blob_service = blob_service.clone();
+                async move {
+                    match x {
+                        Ok(dir_entry) => {
+                            dir_entry_to_ingestion_entry(blob_service, &dir_entry, prefix).await
+                        }
+                        Err(e) => Err(Error::Stat(
+                            prefix.to_path_buf(),
+                            e.into_io_error().expect("walkdir err must be some"),
+                        )),
+                    }
+                }
+            })
+            .buffered(50),
+    )
+}
+
+/// Converts a [walkdir::DirEntry] into an [IngestionEntry], uploading blobs to the
+/// provided [BlobService].
+///
+/// The prefix path is stripped from the path of each entry. This is usually the parent path
+/// of the path being ingested so that the last element of the stream only has one component.
+pub async fn dir_entry_to_ingestion_entry<BS>(
+    blob_service: BS,
+    entry: &DirEntry,
+    prefix: &std::path::Path,
+) -> Result<IngestionEntry, Error>
+where
+    BS: BlobService,
+{
+    let file_type = entry.file_type();
+
+    let fs_path = entry
+        .path()
+        .strip_prefix(prefix)
+        .expect("Tvix bug: failed to strip root path prefix");
+
+    // convert to castore PathBuf
+    let path = crate::path::PathBuf::from_host_path(fs_path, false)
+        .unwrap_or_else(|e| panic!("Tvix bug: walkdir direntry cannot be parsed: {}", e));
+
+    if file_type.is_dir() {
+        Ok(IngestionEntry::Dir { path })
+    } else if file_type.is_symlink() {
+        let target = std::fs::read_link(entry.path())
+            .map_err(|e| Error::Stat(entry.path().to_path_buf(), e))?
+            .into_os_string()
+            .into_vec();
+
+        Ok(IngestionEntry::Symlink { path, target })
+    } else if file_type.is_file() {
+        let metadata = entry
+            .metadata()
+            .map_err(|e| Error::Stat(entry.path().to_path_buf(), e.into()))?;
+
+        let digest = upload_blob(blob_service, entry.path().to_path_buf()).await?;
+
+        Ok(IngestionEntry::Regular {
+            path,
+            size: metadata.size(),
+            // If it's executable by the user, it'll become executable.
+            // This matches nix's dump() function behaviour.
+            executable: metadata.permissions().mode() & 64 != 0,
+            digest,
+        })
+    } else {
+        return Err(Error::FileType(fs_path.to_path_buf(), file_type));
+    }
+}
+
+/// Uploads the file at the provided [Path] the the [BlobService].
+#[instrument(skip(blob_service), fields(path), err)]
+async fn upload_blob<BS>(
+    blob_service: BS,
+    path: impl AsRef<std::path::Path>,
+) -> Result<B3Digest, Error>
+where
+    BS: BlobService,
+{
+    let mut file = match tokio::fs::File::open(path.as_ref()).await {
+        Ok(file) => file,
+        Err(e) => return Err(Error::BlobRead(path.as_ref().to_path_buf(), e)),
+    };
+
+    let mut writer = blob_service.open_write().await;
+
+    if let Err(e) = tokio::io::copy(&mut file, &mut writer).await {
+        return Err(Error::BlobRead(path.as_ref().to_path_buf(), e));
+    };
+
+    let digest = writer
+        .close()
+        .await
+        .map_err(|e| Error::BlobFinalize(path.as_ref().to_path_buf(), e))?;
+
+    Ok(digest)
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+    #[error("unsupported file type at {0}: {1:?}")]
+    FileType(std::path::PathBuf, FileType),
+
+    #[error("unable to stat {0}: {1}")]
+    Stat(std::path::PathBuf, std::io::Error),
+
+    #[error("unable to open {0}: {1}")]
+    Open(std::path::PathBuf, std::io::Error),
+
+    #[error("unable to read {0}: {1}")]
+    BlobRead(std::path::PathBuf, std::io::Error),
+
+    // TODO: proper error for blob finalize
+    #[error("unable to finalize blob {0}: {1}")]
+    BlobFinalize(std::path::PathBuf, std::io::Error),
+}
diff --git a/tvix/castore/src/import/mod.rs b/tvix/castore/src/import/mod.rs
new file mode 100644
index 0000000000..e8b27e469c
--- /dev/null
+++ b/tvix/castore/src/import/mod.rs
@@ -0,0 +1,340 @@
+//! The main library function here is [ingest_entries], receiving a stream of
+//! [IngestionEntry].
+//!
+//! Specific implementations, such as ingesting from the filesystem, live in
+//! child modules.
+
+use crate::directoryservice::DirectoryPutter;
+use crate::directoryservice::DirectoryService;
+use crate::path::{Path, PathBuf};
+use crate::proto::node::Node;
+use crate::proto::Directory;
+use crate::proto::DirectoryNode;
+use crate::proto::FileNode;
+use crate::proto::SymlinkNode;
+use crate::B3Digest;
+use futures::{Stream, StreamExt};
+
+use tracing::Level;
+
+use std::collections::HashMap;
+use tracing::instrument;
+
+mod error;
+pub use error::IngestionError;
+
+pub mod archive;
+pub mod fs;
+
+/// Ingests [IngestionEntry] from the given stream into a the passed [DirectoryService].
+/// On success, returns the root [Node].
+///
+/// The stream must have the following invariants:
+/// - All children entries must come before their parents.
+/// - The last entry must be the root node which must have a single path component.
+/// - Every entry should have a unique path, and only consist of normal components.
+///   This means, no windows path prefixes, absolute paths, `.` or `..`.
+/// - All referenced directories must have an associated directory entry in the stream.
+///   This means if there is a file entry for `foo/bar`, there must also be a `foo` directory
+///   entry.
+///
+/// Internally we maintain a [HashMap] of [PathBuf] to partially populated [Directory] at that
+/// path. Once we receive an [IngestionEntry] for the directory itself, we remove it from the
+/// map and upload it to the [DirectoryService] through a lazily created [DirectoryPutter].
+///
+/// On success, returns the root node.
+#[instrument(skip_all, ret(level = Level::TRACE), err)]
+pub async fn ingest_entries<DS, S, E>(
+    directory_service: DS,
+    mut entries: S,
+) -> Result<Node, IngestionError<E>>
+where
+    DS: DirectoryService,
+    S: Stream<Item = Result<IngestionEntry, E>> + Send + std::marker::Unpin,
+    E: std::error::Error,
+{
+    // For a given path, this holds the [Directory] structs as they are populated.
+    let mut directories: HashMap<PathBuf, Directory> = HashMap::default();
+    let mut maybe_directory_putter: Option<Box<dyn DirectoryPutter>> = None;
+
+    let root_node = loop {
+        let mut entry = entries
+            .next()
+            .await
+            // The last entry of the stream must have 1 path component, after which
+            // we break the loop manually.
+            .expect("Tvix bug: unexpected end of stream")?;
+
+        let name = entry
+            .path()
+            .file_name()
+            // If this is the root node, it will have an empty name.
+            .unwrap_or_default()
+            .to_owned()
+            .into();
+
+        let node = match &mut entry {
+            IngestionEntry::Dir { .. } => {
+                // If the entry is a directory, we traversed all its children (and
+                // populated it in `directories`).
+                // If we don't have it in directories, it's a directory without
+                // children.
+                let directory = directories
+                    .remove(entry.path())
+                    // In that case, it contained no children
+                    .unwrap_or_default();
+
+                let directory_size = directory.size();
+                let directory_digest = directory.digest();
+
+                // Use the directory_putter to upload the directory.
+                // If we don't have one yet (as that's the first one to upload),
+                // initialize the putter.
+                maybe_directory_putter
+                    .get_or_insert_with(|| directory_service.put_multiple_start())
+                    .put(directory)
+                    .await
+                    .map_err(|e| {
+                        IngestionError::UploadDirectoryError(entry.path().to_owned(), e)
+                    })?;
+
+                Node::Directory(DirectoryNode {
+                    name,
+                    digest: directory_digest.into(),
+                    size: directory_size,
+                })
+            }
+            IngestionEntry::Symlink { ref target, .. } => Node::Symlink(SymlinkNode {
+                name,
+                target: target.to_owned().into(),
+            }),
+            IngestionEntry::Regular {
+                size,
+                executable,
+                digest,
+                ..
+            } => Node::File(FileNode {
+                name,
+                digest: digest.to_owned().into(),
+                size: *size,
+                executable: *executable,
+            }),
+        };
+
+        let parent = entry
+            .path()
+            .parent()
+            .expect("Tvix bug: got entry with root node");
+
+        if parent == crate::Path::ROOT {
+            break node;
+        } else {
+            // record node in parent directory, creating a new [Directory] if not there yet.
+            directories.entry(parent.to_owned()).or_default().add(node);
+        }
+    };
+
+    assert!(
+        entries.count().await == 0,
+        "Tvix bug: left over elements in the stream"
+    );
+
+    assert!(
+        directories.is_empty(),
+        "Tvix bug: left over directories after processing ingestion stream"
+    );
+
+    // if there were directories uploaded, make sure we flush the putter, so
+    // they're all persisted to the backend.
+    if let Some(mut directory_putter) = maybe_directory_putter {
+        #[cfg_attr(not(debug_assertions), allow(unused))]
+        let root_directory_digest = directory_putter
+            .close()
+            .await
+            .map_err(|e| IngestionError::FinalizeDirectoryUpload(e))?;
+
+        #[cfg(debug_assertions)]
+        {
+            if let Node::Directory(directory_node) = &root_node {
+                debug_assert_eq!(
+                    root_directory_digest,
+                    directory_node
+                        .digest
+                        .to_vec()
+                        .try_into()
+                        .expect("invalid digest len")
+                )
+            } else {
+                unreachable!("Tvix bug: directory putter initialized but no root directory node");
+            }
+        }
+    };
+
+    Ok(root_node)
+}
+
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub enum IngestionEntry {
+    Regular {
+        path: PathBuf,
+        size: u64,
+        executable: bool,
+        digest: B3Digest,
+    },
+    Symlink {
+        path: PathBuf,
+        target: Vec<u8>,
+    },
+    Dir {
+        path: PathBuf,
+    },
+}
+
+impl IngestionEntry {
+    fn path(&self) -> &Path {
+        match self {
+            IngestionEntry::Regular { path, .. } => path,
+            IngestionEntry::Symlink { path, .. } => path,
+            IngestionEntry::Dir { path } => path,
+        }
+    }
+
+    fn is_dir(&self) -> bool {
+        matches!(self, IngestionEntry::Dir { .. })
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use rstest::rstest;
+
+    use crate::fixtures::{DIRECTORY_COMPLICATED, DIRECTORY_WITH_KEEP, EMPTY_BLOB_DIGEST};
+    use crate::proto::node::Node;
+    use crate::proto::{Directory, DirectoryNode, FileNode, SymlinkNode};
+    use crate::{directoryservice::MemoryDirectoryService, fixtures::DUMMY_DIGEST};
+
+    use super::ingest_entries;
+    use super::IngestionEntry;
+
+    #[rstest]
+    #[case::single_file(vec![IngestionEntry::Regular {
+        path: "foo".parse().unwrap(),
+        size: 42,
+        executable: true,
+        digest: DUMMY_DIGEST.clone(),
+    }],
+        Node::File(FileNode { name: "foo".into(), digest: DUMMY_DIGEST.clone().into(), size: 42, executable: true }
+    ))]
+    #[case::single_symlink(vec![IngestionEntry::Symlink {
+        path: "foo".parse().unwrap(),
+        target: b"blub".into(),
+    }],
+        Node::Symlink(SymlinkNode { name: "foo".into(), target: "blub".into()})
+    )]
+    #[case::single_dir(vec![IngestionEntry::Dir {
+        path: "foo".parse().unwrap(),
+    }],
+        Node::Directory(DirectoryNode { name: "foo".into(), digest: Directory::default().digest().into(), size: Directory::default().size()})
+    )]
+    #[case::dir_with_keep(vec![
+        IngestionEntry::Regular {
+            path: "foo/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Dir {
+            path: "foo".parse().unwrap(),
+        },
+    ],
+        Node::Directory(DirectoryNode { name: "foo".into(), digest: DIRECTORY_WITH_KEEP.digest().into(), size: DIRECTORY_WITH_KEEP.size() })
+    )]
+    /// This is intentionally a bit unsorted, though it still satisfies all
+    /// requirements we have on the order of elements in the stream.
+    #[case::directory_complicated(vec![
+        IngestionEntry::Regular {
+            path: "blub/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Regular {
+            path: "blub/keep/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Dir {
+            path: "blub/keep".parse().unwrap(),
+        },
+        IngestionEntry::Symlink {
+            path: "blub/aa".parse().unwrap(),
+            target: b"/nix/store/somewhereelse".into(),
+        },
+        IngestionEntry::Dir {
+            path: "blub".parse().unwrap(),
+        },
+    ],
+        Node::Directory(DirectoryNode { name: "blub".into(), digest: DIRECTORY_COMPLICATED.digest().into(), size:DIRECTORY_COMPLICATED.size() })
+    )]
+    #[tokio::test]
+    async fn test_ingestion(#[case] entries: Vec<IngestionEntry>, #[case] exp_root_node: Node) {
+        let directory_service = MemoryDirectoryService::default();
+
+        let root_node = ingest_entries(
+            directory_service.clone(),
+            futures::stream::iter(entries.into_iter().map(Ok::<_, std::io::Error>)),
+        )
+        .await
+        .expect("must succeed");
+
+        assert_eq!(exp_root_node, root_node, "root node should match");
+    }
+
+    #[rstest]
+    #[should_panic]
+    #[case::empty_entries(vec![])]
+    #[should_panic]
+    #[case::missing_intermediate_dir(vec![
+        IngestionEntry::Regular {
+            path: "blub/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+    ])]
+    #[should_panic]
+    #[case::leaf_after_parent(vec![
+        IngestionEntry::Dir {
+            path: "blub".parse().unwrap(),
+        },
+        IngestionEntry::Regular {
+            path: "blub/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+    ])]
+    #[should_panic]
+    #[case::root_in_entry(vec![
+        IngestionEntry::Regular {
+            path: ".keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Dir {
+            path: "".parse().unwrap(),
+        },
+    ])]
+    #[tokio::test]
+    async fn test_ingestion_fail(#[case] entries: Vec<IngestionEntry>) {
+        let directory_service = MemoryDirectoryService::default();
+
+        let _ = ingest_entries(
+            directory_service.clone(),
+            futures::stream::iter(entries.into_iter().map(Ok::<_, std::io::Error>)),
+        )
+        .await;
+    }
+}
diff --git a/tvix/castore/src/lib.rs b/tvix/castore/src/lib.rs
index 1a7ac6b4b4..bdc533a8c5 100644
--- a/tvix/castore/src/lib.rs
+++ b/tvix/castore/src/lib.rs
@@ -9,6 +9,9 @@ pub mod fixtures;
 #[cfg(feature = "fs")]
 pub mod fs;
 
+mod path;
+pub use path::{Path, PathBuf};
+
 pub mod import;
 pub mod proto;
 pub mod tonic;
diff --git a/tvix/castore/src/path.rs b/tvix/castore/src/path.rs
new file mode 100644
index 0000000000..fcc2bd01fb
--- /dev/null
+++ b/tvix/castore/src/path.rs
@@ -0,0 +1,446 @@
+//! Contains data structures to deal with Paths in the tvix-castore model.
+
+use std::{
+    borrow::Borrow,
+    fmt::{self, Debug, Display},
+    mem,
+    ops::Deref,
+    str::FromStr,
+};
+
+use bstr::ByteSlice;
+
+use crate::proto::validate_node_name;
+
+/// Represents a Path in the castore model.
+/// These are always relative, and platform-independent, which distinguishes
+/// them from the ones provided in the standard library.
+#[derive(Eq, Hash, PartialEq)]
+#[repr(transparent)] // SAFETY: Representation has to match [u8]
+pub struct Path {
+    // As node names in the castore model cannot contain slashes,
+    // we use them as component separators here.
+    inner: [u8],
+}
+
+#[allow(dead_code)]
+impl Path {
+    // SAFETY: The empty path is valid.
+    pub const ROOT: &'static Path = unsafe { Path::from_bytes_unchecked(&[]) };
+
+    /// Convert a byte slice to a path, without checking validity.
+    const unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Path {
+        // SAFETY: &[u8] and &Path have the same representation.
+        unsafe { mem::transmute(bytes) }
+    }
+
+    fn from_bytes(bytes: &[u8]) -> Option<&Path> {
+        if !bytes.is_empty() {
+            // Ensure all components are valid castore node names.
+            for component in bytes.split_str(b"/") {
+                validate_node_name(component).ok()?;
+            }
+        }
+
+        // SAFETY: We have verified that the path contains no empty components.
+        Some(unsafe { Path::from_bytes_unchecked(bytes) })
+    }
+
+    pub fn into_boxed_bytes(self: Box<Path>) -> Box<[u8]> {
+        // SAFETY: Box<Path> and Box<[u8]> have the same representation.
+        unsafe { mem::transmute(self) }
+    }
+
+    /// Returns the path without its final component, if there is one.
+    ///
+    /// Note that the parent of a bare file name is [Path::ROOT].
+    /// [Path::ROOT] is the only path without a parent.
+    pub fn parent(&self) -> Option<&Path> {
+        // The root does not have a parent.
+        if self.inner.is_empty() {
+            return None;
+        }
+
+        Some(
+            if let Some((parent, _file_name)) = self.inner.rsplit_once_str(b"/") {
+                // SAFETY: The parent of a valid Path is a valid Path.
+                unsafe { Path::from_bytes_unchecked(parent) }
+            } else {
+                // The parent of a bare file name is the root.
+                Path::ROOT
+            },
+        )
+    }
+
+    /// Creates a PathBuf with `name` adjoined to self.
+    pub fn try_join(&self, name: &[u8]) -> Result<PathBuf, std::io::Error> {
+        let mut v = PathBuf::with_capacity(self.inner.len() + name.len() + 1);
+        v.inner.extend_from_slice(&self.inner);
+        v.try_push(name)?;
+
+        Ok(v)
+    }
+
+    /// Produces an iterator over the components of the path, which are
+    /// individual byte slices.
+    /// In case the path is empty, an empty iterator is returned.
+    pub fn components(&self) -> impl Iterator<Item = &[u8]> {
+        let mut iter = self.inner.split_str(&b"/");
+
+        // We don't want to return an empty element, consume it if it's the only one.
+        if self.inner.is_empty() {
+            let _ = iter.next();
+        }
+
+        iter
+    }
+
+    /// Returns the final component of the Path, if there is one.
+    pub fn file_name(&self) -> Option<&[u8]> {
+        self.components().last()
+    }
+
+    pub fn as_bytes(&self) -> &[u8] {
+        &self.inner
+    }
+}
+
+impl Debug for Path {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        Debug::fmt(self.inner.as_bstr(), f)
+    }
+}
+
+impl Display for Path {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        Display::fmt(self.inner.as_bstr(), f)
+    }
+}
+
+impl AsRef<Path> for Path {
+    fn as_ref(&self) -> &Path {
+        self
+    }
+}
+
+/// Represents a owned PathBuf in the castore model.
+/// These are always relative, and platform-independent, which distinguishes
+/// them from the ones provided in the standard library.
+#[derive(Clone, Default, Eq, Hash, PartialEq)]
+pub struct PathBuf {
+    inner: Vec<u8>,
+}
+
+impl Deref for PathBuf {
+    type Target = Path;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: PathBuf always contains a valid Path.
+        unsafe { Path::from_bytes_unchecked(&self.inner) }
+    }
+}
+
+impl AsRef<Path> for PathBuf {
+    fn as_ref(&self) -> &Path {
+        self
+    }
+}
+
+impl ToOwned for Path {
+    type Owned = PathBuf;
+
+    fn to_owned(&self) -> Self::Owned {
+        PathBuf {
+            inner: self.inner.to_owned(),
+        }
+    }
+}
+
+impl Borrow<Path> for PathBuf {
+    fn borrow(&self) -> &Path {
+        self
+    }
+}
+
+impl From<Box<Path>> for PathBuf {
+    fn from(value: Box<Path>) -> Self {
+        // SAFETY: Box<Path> is always a valid path.
+        unsafe { PathBuf::from_bytes_unchecked(value.into_boxed_bytes().into_vec()) }
+    }
+}
+
+impl From<&Path> for PathBuf {
+    fn from(value: &Path) -> Self {
+        value.to_owned()
+    }
+}
+
+impl FromStr for PathBuf {
+    type Err = std::io::Error;
+
+    fn from_str(s: &str) -> Result<PathBuf, Self::Err> {
+        Ok(Path::from_bytes(s.as_bytes())
+            .ok_or(std::io::ErrorKind::InvalidData)?
+            .to_owned())
+    }
+}
+
+impl Debug for PathBuf {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        Debug::fmt(&**self, f)
+    }
+}
+
+impl Display for PathBuf {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        Display::fmt(&**self, f)
+    }
+}
+
+impl PathBuf {
+    pub fn new() -> PathBuf {
+        Self::default()
+    }
+
+    pub fn with_capacity(capacity: usize) -> PathBuf {
+        // SAFETY: The empty path is a valid path.
+        Self {
+            inner: Vec::with_capacity(capacity),
+        }
+    }
+
+    /// Adjoins `name` to self.
+    pub fn try_push(&mut self, name: &[u8]) -> Result<(), std::io::Error> {
+        validate_node_name(name).map_err(|_| std::io::ErrorKind::InvalidData)?;
+
+        if !self.inner.is_empty() {
+            self.inner.push(b'/');
+        }
+
+        self.inner.extend_from_slice(name);
+
+        Ok(())
+    }
+
+    /// Convert a byte vector to a PathBuf, without checking validity.
+    unsafe fn from_bytes_unchecked(bytes: Vec<u8>) -> PathBuf {
+        PathBuf { inner: bytes }
+    }
+
+    /// Convert from a [&std::path::Path] to [Self].
+    ///
+    /// - Self uses `/` as path separator.
+    /// - Absolute paths are always rejected, are are these with custom prefixes.
+    /// - Repeated separators are deduplicated.
+    /// - Occurrences of `.` are normalized away.
+    /// - A trailing slash is normalized away.
+    ///
+    /// A `canonicalize_dotdot` boolean controls whether `..` will get
+    /// canonicalized if possible, or should return an error.
+    ///
+    /// For more exotic paths, this conversion might produce different results
+    /// on different platforms, due to different underlying byte
+    /// representations, which is why it's restricted to unix for now.
+    #[cfg(unix)]
+    pub fn from_host_path(
+        host_path: &std::path::Path,
+        canonicalize_dotdot: bool,
+    ) -> Result<Self, std::io::Error> {
+        let mut p = PathBuf::with_capacity(host_path.as_os_str().len());
+
+        for component in host_path.components() {
+            match component {
+                std::path::Component::Prefix(_) | std::path::Component::RootDir => {
+                    return Err(std::io::Error::new(
+                        std::io::ErrorKind::InvalidData,
+                        "found disallowed prefix or rootdir",
+                    ))
+                }
+                std::path::Component::CurDir => continue, // ignore
+                std::path::Component::ParentDir => {
+                    if canonicalize_dotdot {
+                        // Try popping the last element from the path being constructed.
+                        // FUTUREWORK: pop method?
+                        p = p
+                            .parent()
+                            .ok_or_else(|| {
+                                std::io::Error::new(
+                                    std::io::ErrorKind::InvalidData,
+                                    "found .. going too far up",
+                                )
+                            })?
+                            .to_owned();
+                    } else {
+                        return Err(std::io::Error::new(
+                            std::io::ErrorKind::InvalidData,
+                            "found disallowed ..",
+                        ));
+                    }
+                }
+                std::path::Component::Normal(s) => {
+                    // append the new component to the path being constructed.
+                    p.try_push(s.as_encoded_bytes()).map_err(|_| {
+                        std::io::Error::new(
+                            std::io::ErrorKind::InvalidData,
+                            "encountered invalid node in sub_path component",
+                        )
+                    })?
+                }
+            }
+        }
+
+        Ok(p)
+    }
+
+    pub fn into_boxed_path(self) -> Box<Path> {
+        // SAFETY: Box<[u8]> and Box<Path> have the same representation,
+        // and PathBuf always contains a valid Path.
+        unsafe { mem::transmute(self.inner.into_boxed_slice()) }
+    }
+
+    pub fn into_bytes(self) -> Vec<u8> {
+        self.inner
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::{Path, PathBuf};
+    use bstr::ByteSlice;
+    use rstest::rstest;
+
+    // TODO: add some manual tests including invalid UTF-8 (hard to express
+    // with rstest)
+
+    #[rstest]
+    #[case::empty("", 0)]
+    #[case("a", 1)]
+    #[case("a/b", 2)]
+    #[case("a/b/c", 3)]
+    // add two slightly more cursed variants.
+    // Technically nothing prevents us from representing this with castore,
+    // but maybe we want to disallow constructing paths like this as it's a
+    // bad idea.
+    #[case::cursed("C:\\a/b", 2)]
+    #[case::cursed("\\\\tvix-store", 1)]
+    pub fn from_str(#[case] s: &str, #[case] num_components: usize) {
+        let p: PathBuf = s.parse().expect("must parse");
+
+        assert_eq!(s.as_bytes(), p.as_bytes(), "inner bytes mismatch");
+        assert_eq!(
+            num_components,
+            p.components().count(),
+            "number of components mismatch"
+        );
+    }
+
+    #[rstest]
+    #[case::absolute("/a/b")]
+    #[case::two_forward_slashes_start("//a/b")]
+    #[case::two_forward_slashes_middle("a/b//c/d")]
+    #[case::trailing_slash("a/b/")]
+    #[case::dot(".")]
+    #[case::dotdot("..")]
+    #[case::dot_start("./a")]
+    #[case::dotdot_start("../a")]
+    #[case::dot_middle("a/./b")]
+    #[case::dotdot_middle("a/../b")]
+    #[case::dot_end("a/b/.")]
+    #[case::dotdot_end("a/b/..")]
+    #[case::null("fo\0o")]
+    pub fn from_str_fail(#[case] s: &str) {
+        s.parse::<PathBuf>().expect_err("must fail");
+    }
+
+    #[rstest]
+    #[case("foo", "")]
+    #[case("foo/bar", "foo")]
+    #[case("foo2/bar2", "foo2")]
+    #[case("foo/bar/baz", "foo/bar")]
+    pub fn parent(#[case] p: PathBuf, #[case] exp_parent: PathBuf) {
+        assert_eq!(Some(&*exp_parent), p.parent());
+    }
+
+    #[rstest]
+    pub fn no_parent() {
+        assert!(Path::ROOT.parent().is_none());
+    }
+
+    #[rstest]
+    #[case("a", "b", "a/b")]
+    #[case("a", "b", "a/b")]
+    pub fn join_push(#[case] mut p: PathBuf, #[case] name: &str, #[case] exp_p: PathBuf) {
+        assert_eq!(exp_p, p.try_join(name.as_bytes()).expect("join failed"));
+        p.try_push(name.as_bytes()).expect("push failed");
+        assert_eq!(exp_p, p);
+    }
+
+    #[rstest]
+    #[case("a", "/")]
+    #[case("a", "")]
+    #[case("a", "b/c")]
+    #[case("", "/")]
+    #[case("", "")]
+    #[case("", "b/c")]
+    #[case("", ".")]
+    #[case("", "..")]
+    pub fn join_push_fail(#[case] mut p: PathBuf, #[case] name: &str) {
+        p.try_join(name.as_bytes())
+            .expect_err("join succeeded unexpectedly");
+        p.try_push(name.as_bytes())
+            .expect_err("push succeeded unexpectedly");
+    }
+
+    #[rstest]
+    #[case::empty("", vec![])]
+    #[case("a", vec!["a"])]
+    #[case("a/b", vec!["a", "b"])]
+    #[case("a/b/c", vec!["a","b", "c"])]
+    pub fn components(#[case] p: PathBuf, #[case] exp_components: Vec<&str>) {
+        assert_eq!(
+            exp_components,
+            p.components()
+                .map(|x| x.to_str().unwrap())
+                .collect::<Vec<_>>()
+        );
+    }
+
+    #[rstest]
+    #[case::empty("", "", false)]
+    #[case::path("a", "a", false)]
+    #[case::path2("a/b", "a/b", false)]
+    #[case::double_slash_middle("a//b", "a/b", false)]
+    #[case::dot(".", "", false)]
+    #[case::dot_start("./a/b", "a/b", false)]
+    #[case::dot_middle("a/./b", "a/b", false)]
+    #[case::dot_end("a/b/.", "a/b", false)]
+    #[case::trailing_slash("a/b/", "a/b", false)]
+    #[case::dotdot_canonicalize("a/..", "", true)]
+    #[case::dotdot_canonicalize2("a/../b", "b", true)]
+    #[cfg_attr(unix, case::faux_prefix("\\\\nix-store", "\\\\nix-store", false))]
+    #[cfg_attr(unix, case::faux_letter("C:\\foo.txt", "C:\\foo.txt", false))]
+    pub fn from_host_path(
+        #[case] host_path: std::path::PathBuf,
+        #[case] exp_path: PathBuf,
+        #[case] canonicalize_dotdot: bool,
+    ) {
+        let p = PathBuf::from_host_path(&host_path, canonicalize_dotdot).expect("must succeed");
+
+        assert_eq!(exp_path, p);
+    }
+
+    #[rstest]
+    #[case::absolute("/", false)]
+    #[case::dotdot_root("..", false)]
+    #[case::dotdot_root_canonicalize("..", true)]
+    #[case::dotdot_root_no_canonicalize("a/..", false)]
+    #[case::invalid_name("foo/bar\0", false)]
+    // #[cfg_attr(windows, case::prefix("\\\\nix-store", false))]
+    // #[cfg_attr(windows, case::letter("C:\\foo.txt", false))]
+    pub fn from_host_path_fail(
+        #[case] host_path: std::path::PathBuf,
+        #[case] canonicalize_dotdot: bool,
+    ) {
+        PathBuf::from_host_path(&host_path, canonicalize_dotdot).expect_err("must fail");
+    }
+}
diff --git a/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs b/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs
index 7d741a3f07..5c1428690c 100644
--- a/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs
+++ b/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs
@@ -1,12 +1,12 @@
 use crate::directoryservice::ClosureValidator;
 use crate::proto;
 use crate::{directoryservice::DirectoryService, B3Digest};
-use futures::StreamExt;
+use futures::stream::BoxStream;
+use futures::TryStreamExt;
 use std::ops::Deref;
-use tokio::sync::mpsc::channel;
-use tokio_stream::wrappers::ReceiverStream;
+use tokio_stream::once;
 use tonic::{async_trait, Request, Response, Status, Streaming};
-use tracing::{debug, instrument, warn};
+use tracing::{instrument, warn};
 
 pub struct GRPCDirectoryServiceWrapper<T> {
     directory_service: T,
@@ -23,63 +23,52 @@ impl<T> proto::directory_service_server::DirectoryService for GRPCDirectoryServi
 where
     T: Deref<Target = dyn DirectoryService> + Send + Sync + 'static,
 {
-    type GetStream = ReceiverStream<tonic::Result<proto::Directory, Status>>;
+    type GetStream = BoxStream<'static, tonic::Result<proto::Directory, Status>>;
 
     #[instrument(skip_all)]
-    async fn get(
-        &self,
+    async fn get<'a>(
+        &'a self,
         request: Request<proto::GetDirectoryRequest>,
     ) -> Result<Response<Self::GetStream>, Status> {
-        let (tx, rx) = channel(5);
-
         let req_inner = request.into_inner();
 
-        // look at the digest in the request and put it in the top of the queue.
-        match &req_inner.by_what {
-            None => return Err(Status::invalid_argument("by_what needs to be specified")),
-            Some(proto::get_directory_request::ByWhat::Digest(ref digest)) => {
+        let by_what = &req_inner
+            .by_what
+            .ok_or_else(|| Status::invalid_argument("invalid by_what"))?;
+
+        match by_what {
+            proto::get_directory_request::ByWhat::Digest(ref digest) => {
                 let digest: B3Digest = digest
                     .clone()
                     .try_into()
                     .map_err(|_e| Status::invalid_argument("invalid digest length"))?;
 
-                if !req_inner.recursive {
-                    let e: Result<proto::Directory, Status> = match self
-                        .directory_service
-                        .get(&digest)
-                        .await
-                    {
-                        Ok(Some(directory)) => Ok(directory),
-                        Ok(None) => {
-                            Err(Status::not_found(format!("directory {} not found", digest)))
-                        }
-                        Err(e) => {
-                            warn!(err = %e, directory.digest=%digest, "failed to get directory");
-                            Err(e.into())
-                        }
-                    };
-
-                    if tx.send(e).await.is_err() {
-                        debug!("receiver dropped");
+                Ok(tonic::Response::new({
+                    if !req_inner.recursive {
+                        let directory = self
+                            .directory_service
+                            .get(&digest)
+                            .await
+                            .map_err(|e| {
+                                warn!(err = %e, directory.digest=%digest, "failed to get directory");
+                                tonic::Status::new(tonic::Code::Internal, e.to_string())
+                            })?
+                            .ok_or_else(|| {
+                                Status::not_found(format!("directory {} not found", digest))
+                            })?;
+
+                        Box::pin(once(Ok(directory)))
+                    } else {
+                        // If recursive was requested, traverse via get_recursive.
+                        Box::pin(
+                            self.directory_service.get_recursive(&digest).map_err(|e| {
+                                tonic::Status::new(tonic::Code::Internal, e.to_string())
+                            }),
+                        )
                     }
-                } else {
-                    // If recursive was requested, traverse via get_recursive.
-                    let mut directories_it = self.directory_service.get_recursive(&digest);
-
-                    while let Some(e) = directories_it.next().await {
-                        // map err in res from Error to Status
-                        let res = e.map_err(|e| Status::internal(e.to_string()));
-                        if tx.send(res).await.is_err() {
-                            debug!("receiver dropped");
-                            break;
-                        }
-                    }
-                }
+                }))
             }
         }
-
-        let receiver_stream = ReceiverStream::new(rx);
-        Ok(Response::new(receiver_stream))
     }
 
     #[instrument(skip_all)]
diff --git a/tvix/castore/src/proto/mod.rs b/tvix/castore/src/proto/mod.rs
index 97ef183588..5374e3ae5a 100644
--- a/tvix/castore/src/proto/mod.rs
+++ b/tvix/castore/src/proto/mod.rs
@@ -66,7 +66,7 @@ pub enum ValidateStatBlobResponseError {
 
 /// Checks a Node name for validity as an intermediate node.
 /// We disallow slashes, null bytes, '.', '..' and the empty string.
-fn validate_node_name(name: &[u8]) -> Result<(), ValidateNodeError> {
+pub(crate) fn validate_node_name(name: &[u8]) -> Result<(), ValidateNodeError> {
     if name.is_empty()
         || name == b".."
         || name == b"."
@@ -179,6 +179,42 @@ impl Ord for node::Node {
     }
 }
 
+impl PartialOrd for FileNode {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for FileNode {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.get_name().cmp(other.get_name())
+    }
+}
+
+impl PartialOrd for SymlinkNode {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for SymlinkNode {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.get_name().cmp(other.get_name())
+    }
+}
+
+impl PartialOrd for DirectoryNode {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for DirectoryNode {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.get_name().cmp(other.get_name())
+    }
+}
+
 /// Accepts a name, and a mutable reference to the previous name.
 /// If the passed name is larger than the previous one, the reference is updated.
 /// If it's not, an error is returned.
@@ -303,6 +339,56 @@ impl Directory {
             i_symlinks: self.symlinks.iter().peekable(),
         };
     }
+
+    /// Adds the specified [node::Node] to the [Directory], preserving sorted entries.
+    /// This assumes the [Directory] to be sorted prior to adding the node.
+    ///
+    /// Inserting an element that already exists with the same name in the directory is not
+    /// supported.
+    pub fn add(&mut self, node: node::Node) {
+        debug_assert!(
+            !self.files.iter().any(|x| x.get_name() == node.get_name()),
+            "name already exists in files"
+        );
+        debug_assert!(
+            !self
+                .directories
+                .iter()
+                .any(|x| x.get_name() == node.get_name()),
+            "name already exists in directories"
+        );
+        debug_assert!(
+            !self
+                .symlinks
+                .iter()
+                .any(|x| x.get_name() == node.get_name()),
+            "name already exists in symlinks"
+        );
+
+        match node {
+            node::Node::File(node) => {
+                let pos = self
+                    .files
+                    .binary_search(&node)
+                    .expect_err("Tvix bug: dir entry with name already exists");
+                self.files.insert(pos, node);
+            }
+            node::Node::Directory(node) => {
+                let pos = self
+                    .directories
+                    .binary_search(&node)
+                    .expect_err("Tvix bug: dir entry with name already exists");
+                self.directories.insert(pos, node);
+            }
+            node::Node::Symlink(node) => {
+                let pos = self
+                    .symlinks
+                    .binary_search(&node)
+                    .expect_err("Tvix bug: dir entry with name already exists");
+                self.symlinks.insert(pos, node);
+            }
+        }
+    }
 }
 
 impl StatBlobResponse {
diff --git a/tvix/castore/src/proto/tests/directory.rs b/tvix/castore/src/proto/tests/directory.rs
index 5fda394775..81b73a048d 100644
--- a/tvix/castore/src/proto/tests/directory.rs
+++ b/tvix/castore/src/proto/tests/directory.rs
@@ -1,5 +1,6 @@
 use crate::proto::{
-    Directory, DirectoryNode, FileNode, SymlinkNode, ValidateDirectoryError, ValidateNodeError,
+    node, Directory, DirectoryNode, FileNode, SymlinkNode, ValidateDirectoryError,
+    ValidateNodeError,
 };
 
 use hex_literal::hex;
@@ -371,3 +372,81 @@ fn validate_overflow() {
         _ => panic!("unexpected error"),
     }
 }
+
+#[test]
+fn add_nodes_to_directory() {
+    let mut d = Directory {
+        ..Default::default()
+    };
+
+    d.add(node::Node::Directory(DirectoryNode {
+        name: "b".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+    }));
+    d.add(node::Node::Directory(DirectoryNode {
+        name: "a".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+    }));
+    d.add(node::Node::Directory(DirectoryNode {
+        name: "z".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+    }));
+
+    d.add(node::Node::File(FileNode {
+        name: "f".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+        executable: true,
+    }));
+    d.add(node::Node::File(FileNode {
+        name: "c".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+        executable: true,
+    }));
+    d.add(node::Node::File(FileNode {
+        name: "g".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+        executable: true,
+    }));
+
+    d.add(node::Node::Symlink(SymlinkNode {
+        name: "t".into(),
+        target: "a".into(),
+    }));
+    d.add(node::Node::Symlink(SymlinkNode {
+        name: "o".into(),
+        target: "a".into(),
+    }));
+    d.add(node::Node::Symlink(SymlinkNode {
+        name: "e".into(),
+        target: "a".into(),
+    }));
+
+    d.validate().expect("directory should be valid");
+}
+
+#[test]
+#[cfg_attr(not(debug_assertions), ignore)]
+#[should_panic = "name already exists in directories"]
+fn add_duplicate_node_to_directory_panics() {
+    let mut d = Directory {
+        ..Default::default()
+    };
+
+    d.add(node::Node::Directory(DirectoryNode {
+        name: "a".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+    }));
+    d.add(node::Node::File(FileNode {
+        name: "a".into(),
+        digest: DUMMY_DIGEST.to_vec().into(),
+        size: 1,
+        executable: true,
+    }));
+}
diff --git a/tvix/castore/src/tests/import.rs b/tvix/castore/src/tests/import.rs
index b44b71cd78..8b3bd5ce0f 100644
--- a/tvix/castore/src/tests/import.rs
+++ b/tvix/castore/src/tests/import.rs
@@ -1,7 +1,7 @@
 use crate::blobservice::{self, BlobService};
 use crate::directoryservice;
 use crate::fixtures::*;
-use crate::import::ingest_path;
+use crate::import::fs::ingest_path;
 use crate::proto;
 
 use std::sync::Arc;
diff --git a/tvix/cli/Cargo.toml b/tvix/cli/Cargo.toml
index d15165383b..1fa2351822 100644
--- a/tvix/cli/Cargo.toml
+++ b/tvix/cli/Cargo.toml
@@ -21,10 +21,7 @@ rustyline = "10.0.0"
 thiserror = "1.0.38"
 tokio = "1.28.0"
 tracing = { version = "0.1.37", features = ["max_level_trace", "release_max_level_info"] }
-tracing-subscriber = { version = "0.3.16", features = ["json"] }
+tracing-subscriber = "0.3.16"
 
 [dependencies.wu-manber]
 git = "https://github.com/tvlfyi/wu-manber.git"
-
-[dev-dependencies]
-test-case = "3.3.1"
diff --git a/tvix/cli/default.nix b/tvix/cli/default.nix
index 8782d34bf6..62e93cc213 100644
--- a/tvix/cli/default.nix
+++ b/tvix/cli/default.nix
@@ -1,23 +1,11 @@
 { depot, pkgs, lib, ... }:
 
-let
-  mkNixpkgsEvalCheck = attrset: expectedPath: {
-    label = ":nix: evaluate nixpkgs.${attrset} in tvix";
-    needsOutput = true;
-
-    command = pkgs.writeShellScript "tvix-eval-${builtins.replaceStrings [".drv"] ["-drv"] attrset}" ''
-      TVIX_OUTPUT=$(result/bin/tvix -E '(import ${pkgs.path} {}).${attrset}')
-      EXPECTED='${/* the verbatim expected Tvix output: */ "=> \"${builtins.unsafeDiscardStringContext expectedPath}\" :: string"}'
-
-      echo "Tvix output: ''${TVIX_OUTPUT}"
-      if [ "$TVIX_OUTPUT" != "$EXPECTED" ]; then
-        echo "Correct would have been ''${EXPECTED}"
-        exit 1
-      fi
+(depot.tvix.crates.workspaceMembers.tvix-cli.build.override {
+  runTests = true;
+}).overrideAttrs (finalAttrs: previousAttrs:
 
-      echo "Output was correct."
-    '';
-  };
+let
+  tvix-cli = finalAttrs.finalPackage;
 
   benchmark-gnutime-format-string =
     description:
@@ -30,24 +18,16 @@ let
       };
     });
 
-in
-
-(depot.tvix.crates.workspaceMembers.tvix-cli.build.override {
-  runTests = true;
-}).overrideAttrs (finalAttrs: previousAttrs:
-
-let
-  tvix-cli = finalAttrs.finalPackage;
-
   # You can run the benchmark with a simple `nix run`, like:
   #
-  #  nix run -f . tvix.cli.meta.ci.extraSteps.benchmark-nixpkgs-cross-hello-outpath
+  #  nix-build -A tvix.cli.meta.ci.extraSteps.benchmark-nixpkgs-cross-hello-outpath
   #
   # TODO(amjoseph): store these results someplace more durable, like git trailers
   #
   mkExprBenchmark = { expr, description }:
     let name = "tvix-cli-benchmark-${description}"; in
-    (pkgs.writeShellScriptBin name ''
+    (pkgs.runCommand name { } ''
+      export SSL_CERT_FILE=${pkgs.cacert.out}/etc/ssl/certs/ca-bundle.crt
       ${lib.escapeShellArgs [
         "${pkgs.time}/bin/time"
         "--format" "${benchmark-gnutime-format-string description}"
@@ -55,15 +35,8 @@ let
         "--no-warnings"
         "-E" expr
       ]}
-    '').overrideAttrs (finalAttrs: previousAttrs: {
-      passthru = (previousAttrs.passthru or { }) // {
-        ci = {
-          label = ":nix: benchmark ${description} in tvix";
-          needsOutput = true;
-          command = "${finalAttrs.finalPackage}/bin/${finalAttrs.meta.mainProgram}";
-        };
-      };
-    });
+      touch $out
+    '');
 
   mkNixpkgsBenchmark = attrpath:
     mkExprBenchmark {
@@ -71,6 +44,28 @@ let
       expr = "(import ${pkgs.path} {}).${attrpath}";
     };
 
+  # Constructs a Derivation invoking tvix-cli inside a build, ensures the
+  # calculated tvix output path matches what's passed in externally.
+  mkNixpkgsEvalTest = attrpath: expectedPath:
+    let
+      name = "tvix-eval-test-${builtins.replaceStrings [".drv"] ["-drv"] attrpath}";
+    in
+    (pkgs.runCommand name { } ''
+      export SSL_CERT_FILE=${pkgs.cacert.out}/etc/ssl/certs/ca-bundle.crt
+      TVIX_OUTPUT=$(${tvix-cli}/bin/tvix -E '(import ${pkgs.path} {}).${attrpath}')
+      EXPECTED='${/* the verbatim expected Tvix output: */ "=> \"${builtins.unsafeDiscardStringContext expectedPath}\" :: string"}'
+
+      echo "Tvix output: ''${TVIX_OUTPUT}"
+      if [ "$TVIX_OUTPUT" != "$EXPECTED" ]; then
+        echo "Correct would have been ''${EXPECTED}"
+        exit 1
+      fi
+
+      echo "Output was correct."
+      touch $out
+    '');
+
+
   benchmarks = {
     benchmark-hello = (mkNixpkgsBenchmark "hello.outPath");
     benchmark-cross-hello = (mkNixpkgsBenchmark "pkgsCross.aarch64-multiplatform.hello.outPath");
@@ -79,21 +74,22 @@ let
     # Example used for benchmarking LightSpan::Delayed in commit bf286a54bc2ac5eeb78c3d5c5ae66e9af24d74d4
     benchmark-nixpkgs-attrnames = (mkExprBenchmark { expr = "builtins.length (builtins.attrNames (import ${pkgs.path} {}))"; description = "nixpkgs-attrnames"; });
   };
+
+  evalTests = {
+    eval-nixpkgs-stdenv-drvpath = (mkNixpkgsEvalTest "stdenv.drvPath" pkgs.stdenv.drvPath);
+    eval-nixpkgs-stdenv-outpath = (mkNixpkgsEvalTest "stdenv.outPath" pkgs.stdenv.outPath);
+    eval-nixpkgs-hello-outpath = (mkNixpkgsEvalTest "hello.outPath" pkgs.hello.outPath);
+    eval-nixpkgs-firefox-outpath = (mkNixpkgsEvalTest "firefox.outPath" pkgs.firefox.outPath);
+    eval-nixpkgs-firefox-drvpath = (mkNixpkgsEvalTest "firefox.drvPath" pkgs.firefox.drvPath);
+    eval-nixpkgs-cross-stdenv-outpath = (mkNixpkgsEvalTest "pkgsCross.aarch64-multiplatform.stdenv.outPath" pkgs.pkgsCross.aarch64-multiplatform.stdenv.outPath);
+    eval-nixpkgs-cross-hello-outpath = (mkNixpkgsEvalTest "pkgsCross.aarch64-multiplatform.hello.outPath" pkgs.pkgsCross.aarch64-multiplatform.hello.outPath);
+  };
 in
 {
   meta = {
-    ci.extraSteps = {
-      eval-nixpkgs-stdenv-drvpath = (mkNixpkgsEvalCheck "stdenv.drvPath" pkgs.stdenv.drvPath);
-      eval-nixpkgs-stdenv-outpath = (mkNixpkgsEvalCheck "stdenv.outPath" pkgs.stdenv.outPath);
-      eval-nixpkgs-hello-outpath = (mkNixpkgsEvalCheck "hello.outPath" pkgs.hello.outPath);
-      eval-nixpkgs-firefox-outpath = (mkNixpkgsEvalCheck "firefox.outPath" pkgs.firefox.outPath);
-      eval-nixpkgs-firefox-drvpath = (mkNixpkgsEvalCheck "firefox.drvPath" pkgs.firefox.drvPath);
-      eval-nixpkgs-cross-stdenv-outpath = (mkNixpkgsEvalCheck "pkgsCross.aarch64-multiplatform.stdenv.outPath" pkgs.pkgsCross.aarch64-multiplatform.stdenv.outPath);
-      eval-nixpkgs-cross-hello-outpath = (mkNixpkgsEvalCheck "pkgsCross.aarch64-multiplatform.hello.outPath" pkgs.pkgsCross.aarch64-multiplatform.hello.outPath);
-    };
-    ci.targets = builtins.attrNames benchmarks;
+    ci.targets = (builtins.attrNames benchmarks) ++ (builtins.attrNames evalTests);
   };
 
-  # Expose benchmarks as standard CI targets.
-  passthru = benchmarks;
+  # Expose benchmarks and evalTests as standard CI targets.
+  passthru = benchmarks // evalTests;
 })
diff --git a/tvix/cli/src/main.rs b/tvix/cli/src/main.rs
index 436e895863..d66d2ce4cb 100644
--- a/tvix/cli/src/main.rs
+++ b/tvix/cli/src/main.rs
@@ -5,6 +5,7 @@ use std::{fs, path::PathBuf};
 use tracing::Level;
 use tracing_subscriber::fmt::writer::MakeWriterExt;
 use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
+use tracing_subscriber::{EnvFilter, Layer};
 use tvix_build::buildservice;
 use tvix_eval::builtins::impure_builtins;
 use tvix_eval::observer::{DisassemblingObserver, TracingObserver};
@@ -79,27 +80,23 @@ struct Args {
     build_service_addr: String,
 }
 
-/// Interprets the given code snippet, printing out warnings, errors
-/// and the result itself. The return value indicates whether
-/// evaluation succeeded.
-fn interpret(code: &str, path: Option<PathBuf>, args: &Args, explain: bool) -> bool {
-    let tokio_runtime = tokio::runtime::Runtime::new().expect("failed to setup tokio runtime");
-
-    let (blob_service, directory_service, path_info_service) = tokio_runtime
-        .block_on({
-            let blob_service_addr = args.blob_service_addr.clone();
-            let directory_service_addr = args.directory_service_addr.clone();
-            let path_info_service_addr = args.path_info_service_addr.clone();
-            async move {
-                tvix_store::utils::construct_services(
-                    blob_service_addr,
-                    directory_service_addr,
-                    path_info_service_addr,
-                )
-                .await
-            }
-        })
-        .expect("unable to setup {blob|directory|pathinfo}service before interpreter setup");
+fn init_io_handle(tokio_runtime: &tokio::runtime::Runtime, args: &Args) -> Rc<TvixStoreIO> {
+    let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+        tokio_runtime
+            .block_on({
+                let blob_service_addr = args.blob_service_addr.clone();
+                let directory_service_addr = args.directory_service_addr.clone();
+                let path_info_service_addr = args.path_info_service_addr.clone();
+                async move {
+                    tvix_store::utils::construct_services(
+                        blob_service_addr,
+                        directory_service_addr,
+                        path_info_service_addr,
+                    )
+                    .await
+                }
+            })
+            .expect("unable to setup {blob|directory|pathinfo}service before interpreter setup");
 
     let build_service = tokio_runtime
         .block_on({
@@ -116,14 +113,26 @@ fn interpret(code: &str, path: Option<PathBuf>, args: &Args, explain: bool) -> b
         })
         .expect("unable to setup buildservice before interpreter setup");
 
-    let tvix_store_io = Rc::new(TvixStoreIO::new(
+    Rc::new(TvixStoreIO::new(
         blob_service.clone(),
         directory_service.clone(),
         path_info_service.into(),
+        nar_calculation_service.into(),
         build_service.into(),
         tokio_runtime.handle().clone(),
-    ));
+    ))
+}
 
+/// Interprets the given code snippet, printing out warnings, errors
+/// and the result itself. The return value indicates whether
+/// evaluation succeeded.
+fn interpret(
+    tvix_store_io: Rc<TvixStoreIO>,
+    code: &str,
+    path: Option<PathBuf>,
+    args: &Args,
+    explain: bool,
+) -> bool {
     let mut eval = tvix_eval::Evaluation::new(
         Box::new(TvixIO::new(tvix_store_io.clone() as Rc<dyn EvalIO>)) as Box<dyn EvalIO>,
         true,
@@ -229,24 +238,34 @@ fn main() {
     let subscriber = tracing_subscriber::registry().with(
         tracing_subscriber::fmt::Layer::new()
             .with_writer(std::io::stderr.with_max_level(level))
-            .pretty(),
+            .compact()
+            .with_filter(
+                EnvFilter::builder()
+                    .with_default_directive(level.into())
+                    .from_env()
+                    .expect("invalid RUST_LOG"),
+            ),
     );
     subscriber
         .try_init()
         .expect("unable to set up tracing subscriber");
 
+    let tokio_runtime = tokio::runtime::Runtime::new().expect("failed to setup tokio runtime");
+
+    let io_handle = init_io_handle(&tokio_runtime, &args);
+
     if let Some(file) = &args.script {
-        run_file(file.clone(), &args)
+        run_file(io_handle, file.clone(), &args)
     } else if let Some(expr) = &args.expr {
-        if !interpret(expr, None, &args, false) {
+        if !interpret(io_handle, expr, None, &args, false) {
             std::process::exit(1);
         }
     } else {
-        run_prompt(&args)
+        run_prompt(io_handle, &args)
     }
 }
 
-fn run_file(mut path: PathBuf, args: &Args) {
+fn run_file(io_handle: Rc<TvixStoreIO>, mut path: PathBuf, args: &Args) {
     if path.is_dir() {
         path.push("default.nix");
     }
@@ -255,7 +274,7 @@ fn run_file(mut path: PathBuf, args: &Args) {
     let success = if args.compile_only {
         lint(&contents, Some(path), args)
     } else {
-        interpret(&contents, Some(path), args, false)
+        interpret(io_handle, &contents, Some(path), args, false)
     };
 
     if !success {
@@ -279,7 +298,7 @@ fn state_dir() -> Option<PathBuf> {
     path
 }
 
-fn run_prompt(args: &Args) {
+fn run_prompt(io_handle: Rc<TvixStoreIO>, args: &Args) {
     let mut rl = Editor::<()>::new().expect("should be able to launch rustyline");
 
     if args.compile_only {
@@ -310,9 +329,9 @@ fn run_prompt(args: &Args) {
                 rl.add_history_entry(&line);
 
                 if let Some(without_prefix) = line.strip_prefix(":d ") {
-                    interpret(without_prefix, None, args, true);
+                    interpret(Rc::clone(&io_handle), without_prefix, None, args, true);
                 } else {
-                    interpret(&line, None, args, false);
+                    interpret(Rc::clone(&io_handle), &line, None, args, false);
                 }
             }
             Err(ReadlineError::Interrupted) | Err(ReadlineError::Eof) => break,
diff --git a/tvix/crate-hashes.json b/tvix/crate-hashes.json
index e0cb5e6df9..2c1e740cb9 100644
--- a/tvix/crate-hashes.json
+++ b/tvix/crate-hashes.json
@@ -1,5 +1,4 @@
 {
-  "bigtable_rs 0.2.9 (git+https://github.com/flokli/bigtable_rs?rev=0af404741dfc40eb9fa99cf4d4140a09c5c20df7#0af404741dfc40eb9fa99cf4d4140a09c5c20df7)": "1njjam1lx2xlnm7a41lga8601vmjgqz0fvc77x24gd04pc7avxll",
-  "test-generator 0.3.0 (git+https://github.com/JamesGuthrie/test-generator.git?rev=82e799979980962aec1aa324ec6e0e4cad781f41#82e799979980962aec1aa324ec6e0e4cad781f41)": "08brp3qqa55hijc7xby3lam2cc84hvx1zzfqv6lj7smlczh8k32y",
-  "wu-manber 0.1.0 (git+https://github.com/tvlfyi/wu-manber.git#0d5b22bea136659f7de60b102a7030e0daaa503d)": "1zhk83lbq99xzyjwphv2qrb8f8qgfqwa5bbbvyzm0z0bljsjv0pd"
+  "git+https://github.com/flokli/bigtable_rs?rev=0af404741dfc40eb9fa99cf4d4140a09c5c20df7#0.2.9": "1njjam1lx2xlnm7a41lga8601vmjgqz0fvc77x24gd04pc7avxll",
+  "git+https://github.com/tvlfyi/wu-manber.git#wu-manber@0.1.0": "1zhk83lbq99xzyjwphv2qrb8f8qgfqwa5bbbvyzm0z0bljsjv0pd"
 }
\ No newline at end of file
diff --git a/tvix/default.nix b/tvix/default.nix
index f965959c91..a3a4d35df6 100644
--- a/tvix/default.nix
+++ b/tvix/default.nix
@@ -89,7 +89,6 @@ let
           (lib.nameValuePair "${crateName}-${crates.internal.crates.${crateName}.version}" crates.internal.crates.${crateName}.src.outputHash)
         ) [
         "bigtable_rs"
-        "test-generator"
         "wu-manber"
       ]);
   };
@@ -225,9 +224,7 @@ in
       rustPlatform.cargoSetupHook
     ];
 
-    # Allow blocks_in_conditions due to false positives with #[tracing::instrument(โ€ฆ)]:
-    # https://github.com/rust-lang/rust-clippy/issues/12281
-    buildPhase = "cargo clippy --tests --all-features --benches --examples -- -Dwarnings -A clippy::blocks_in_conditions | tee $out";
+    buildPhase = "cargo clippy --tests --all-features --benches --examples -- -Dwarnings | tee $out";
   };
 
   meta.ci.targets = [
diff --git a/tvix/docs/src/SUMMARY.md b/tvix/docs/src/SUMMARY.md
index f9b069d42c..954abae338 100644
--- a/tvix/docs/src/SUMMARY.md
+++ b/tvix/docs/src/SUMMARY.md
@@ -2,8 +2,13 @@
 
 # Tvix
 - [Architecture & data flow](./architecture.md)
+- [TODOs](./TODO.md)
 
 # Nix
 - [Specification of the Nix Language](./language-spec.md)
 - [Nix language version history](./lang-version.md)
 - [Value Pointer Equality](./value-pointer-equality.md)
+- [Daemon protocol changelog](./nix-daemon/changelog.md)
+- [Daemon protocol logging](./nix-daemon/logging.md)
+- [Daemon protocol operations](./nix-daemon/operations.md)
+- [Daemon protocol serialization](./nix-daemon/serialization.md)
\ No newline at end of file
diff --git a/tvix/docs/src/TODO.md b/tvix/docs/src/TODO.md
new file mode 100644
index 0000000000..8fb22ea822
--- /dev/null
+++ b/tvix/docs/src/TODO.md
@@ -0,0 +1,142 @@
+# TODO
+
+This contains a rough collection of ideas on the TODO list, trying to keep track
+of it somewhere.
+
+Of course, there's no guarantee these things will get addressed, but it helps
+dumping the backlog somewhere.
+
+Feel free to add new ideas. Before picking something, ask in `#tvix-dev` to make
+sure noone is working on this, or has some specific design in mind already.
+
+## Cleanups
+### Nix language test suite
+ - Think about how to merge, but "categorize" `tvix_tests` in `glue` and `eval`.
+   We currently only have this split as they need a different feature set /
+   builtins.
+ - move some of the rstest cases in `tvix-glue` to the `.nix`/`.exp` mechanism.
+   Some of them need test fixtures, which cannot be represented in git (special
+   file types in the import tests for example). Needs some support from the test
+   suite to create these fixtures on demand.
+ - extend `verify-lang-tests/default.nix` mechanism to validate `tvix-eval` and
+   `tvix-glue` test cases (or the common structure above).
+ - absorb `eval/tests/nix_oracle.rs` into `tvix_tests`, or figure out why it's
+   not possible (and document) it. It looks like it's only as nix is invoked
+   with a different level of `--strict`, but the toplevel doc-comment suggests
+   its generic?
+
+### Error cleanup
+ - Currently, all services use tvix_castore::Error, which only has two kinds
+   (invalid request, storage error), containing an (owned) string.
+   This is quite primitive. We should have individual error types for BS, DS, PS.
+   Maybe these should have some generics to still be able to carry errors from
+   the underlying backend, similar to `IngestionError`.
+
+## Fixes towards correctness
+ - `builtins.toXML` is missing string context. See b/398.
+ - `builtins.toXML` self-closing tags need to be configurable in a more granular
+   fashion, requires third-party crate support. See b/399.
+ - `rnix` only supports string source files, but `NixString` uses bytes (and Nix
+   source code might be no valid UTF-8).
+
+## Documentation
+Extend the other pages in here. Some ideas on what should be tackled:
+ - Document what Tvix is, and what it is not yet. What it is now, what it is not
+   (yet), explaining some of the architectural choices (castore, more hermetic
+   `Build` repr), while still being compatible. Explain how it's possible to
+   plug in other frontends, and use `tvix-{[ca]store,build}` without Nixlang even.
+   And how `nix-compat` is a useful crate for all sorts of formats and data
+   types of Nix.
+ - Update the Architecture diagram to model the current state of things.
+   There's no gRPC between Coordinator and Evaluator.
+ - Add a dedicated section/page explaining the separation between tvix-glue and
+   tvix-eval, and how more annoying builtins get injected into tvix-eval through
+   tvix-glue.
+   Maybe restructure to only explain the component structure potentially
+   crossing process boundaries (those with gRPC), and make the rest more crate
+   and trait-focused?
+ - Restructure docs on castore vs store, this seems to be duplicated a bit and
+   is probably still not too clear.
+ - Describe store composition(s) in more detail. There's some notes on granular
+   fetching which probably can be repurposed.
+ - Absorb the rest of //tvix/website into this.
+
+## Features
+
+### CLI
+ - `nix repl` can set variables and effectively mutates a global scope. We
+  should update the existing / add another repl that allows the same. We don't
+  want to mutate the evaluator, but should construct a new one, passing in the
+  root scope returned from the previous evaluation.
+
+### Fetchers
+Some more fetcher-related builtins need work:
+ - `fetchGit`
+ - `fetchTree` (hairy, seems there's no proper spec and the URL syntax seems
+   subject to change/underdocumented)
+
+### Convert builtins:fetchurl to Fetches
+We need to convert `builtins:fetchurl`-style calls to `builtins.derivation` to
+fetches, not Derivations (tracked in `KnownPaths`).
+
+### Derivation -> Build
+While we have some support for `structuredAttrs` and `fetchClosure` (at least
+enough to calculate output hashes, aka produce identical ATerm), the code
+populating the `Build` struct doesn't exist it yet.
+
+Similarly, we also don't properly populate the build environment for
+`fetchClosure` yet. (Note there already is `ExportedPathInfo`, so once
+`structuredAttrs` is there this should be easy.
+
+### Builders
+Once builds are proven to work with real-world builds, and the corner cases
+there are ruled out, adding other types of builders might be interesting.
+
+ - bwrap
+ - gVisor
+ - Cloud Hypervisor (using similar technique as `//tvix//boot`).
+
+Long-term, we want to extend traits and gRPC protocol to expose more telemetry,
+logs etc, but this is something requiring a lot of designing.
+
+### Store composition
+ - Combinators: list-by-priority, first-come-first-serve, cache
+ - How do describe hierarchies. URL format too one-dimensional, but we might get
+   quite far with a similar "substituters" concept that Nix uses, to construct
+   the composed stores.
+### Store Config
+   There's already serde for some store options (bigtable uses `serde_qs`).
+   We might also have common options global over all backends, like chunking
+   parameters for chunking blobservices. Think where this would fit in.
+ - Rework the URL syntax for object_store. We should support the default s3/gcs
+   URLs at least.
+
+### BlobService
+ - On the trait side, currently there's no way to distinguish reading a
+   known-chunk vs blob, so we might be calling `.chunks()` unnecessarily often.
+   At least for the `object_store` backend, this might be a problem.
+ - While `object_store` recently got support for `Content-Type`
+   (https://github.com/apache/arrow-rs/pull/5650), there's no support on the
+   local filesystem yet. We'd need to add support to this (through xattrs).
+
+### DirectoryService
+ - Add an `object_store` variant, storing a Directory *closure* keyed by the
+   root `Directory` digest. This won't allow indexing intermediate Directory
+   nodes, but once we have `DirectoryService` composition, it shouldn't be an
+   issue.
+ - [redb](https://www.redb.org/) backend
+
+### PathInfoService
+ - [redb](https://www.redb.org/) backend
+ - sqlite backend (different schema than the Nix one, we need the root nodes data!)
+
+### Nix Daemon protocol
+- Some work ongoing on the worker operation parsing (griff, picnoir)
+
+### O11Y
+ - gRPC trace propagation (cl/10532)
+ - `tracing-tracy` (cl/10952)
+ - `[tracing-]indicatif` for progress/log reporting (floklis stash)
+ - unification into `tvix-tracing` crate, currently a lot of boilerplate
+   in `tvix-store` CLI entrypoint, and half of the boilerplate copied over to
+   `tvix-cli`.
diff --git a/tvix/docs/src/nix-daemon/changelog.md b/tvix/docs/src/nix-daemon/changelog.md
new file mode 100644
index 0000000000..bc99dc6af0
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/changelog.md
@@ -0,0 +1,202 @@
+
+
+## Nix version protocol
+
+| Nix version     | Protocol |
+| --------------- | -------- |
+| 0.11            | 1.02     |
+| 0.12            | 1.04     |
+| 0.13            | 1.05     |
+| 0.14            | 1.05     |
+| 0.15            | 1.05     |
+| 0.16            | 1.06     |
+| 1.0             | 1.10     |
+| 1.1             | 1.11     |
+| 1.2             | 1.12     |
+| 1.3 - 1.5.3     | 1.13     |
+| 1.6 - 1.10      | 1.14     |
+| 1.11 - 1.11.16  | 1.15     |
+| 2.0 - 2.0.4     | 1.20     |
+| 2.1 - 2.3.18    | 1.21     |
+| 2.4 - 2.6.1     | 1.32     |
+| 2.7.0           | 1.33     |
+| 2.8.0 - 2.14.1  | 1.34     |
+| 2.15.0 - 2.19.4 | 1.35     |
+| 2.20.0 - 2.22.0 | 1.37     |
+
+In commit [be64fbb501][be64fbb501] support was droped for protocol versions older than 1.10.
+This happened when the protocol was between 1.17 and 1.18 and was released with Nix 2.0.
+So this means that any version of Nix 2.x can't talk to Nix 0.x.
+
+## Operation History
+
+| Op              | Id | Commit         | Protocol | Nix Version | Notes |
+| --------------- | -- | -------------- | -------- | ----------- | ----- |
+| *Quit           | 0  | [a711689368][a711689368] || 0.11 | Became dead code in [7951c3c54][7951c3c54] (Nix 0.11) and removed in [d3c61d83b][d3c61d83b] (Nix 1.8) |
+| IsValidPath     | 1  | [a711689368][a711689368] || 0.11 ||
+| HasSubstitutes  | 3  | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| QueryPathHash   | 4  | [0565b5f2b3][0565b5f2b3] || 0.11 | Obsolete [e0204f8d46][e0204f8d46]<br>Nix 2.0 Protocol 1.16 |
+| QueryReferences | 5  | [0565b5f2b3][0565b5f2b3] || 0.11 | Obsolete [e0204f8d46][e0204f8d46]<br>Nix 2.0 Protocol 1.16 |
+| QueryReferrers  | 6  | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| AddToStore      | 7  | [0263279071][0263279071] || 0.11 ||
+| AddTextToStore  | 8  | [0263279071][0263279071] || 0.11 | Obsolete [c602ebfb34][c602ebfb34]<br>Nix 2.4 Protocol 1.25 |
+| BuildPaths      | 9  | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| EnsurePath      | 10 | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| AddTempRoot     | 11 | [e25fad691a][e25fad691a] || 0.11 ||
+| AddIndirectRoot | 12 | [74033a844f][74033a844f] || 0.11 ||
+| SyncWithGC      | 13 | [e25fad691a][e25fad691a] || 0.11 | Obsolete [9947f1646a][9947f1646a]<br> Nix 2.5.0 Protocol 1.32 |
+| FindRoots       | 14 | [29cf434a35][29cf434a35] || 0.11 ||
+| *CollectGarbage | 15 | [a9c4f66cfb][a9c4f66cfb] || 0.11 | Removed [a72709afd8][a72709afd8]<br>Nix 0.12 Protocol 1.02 |
+| ExportPath      | 16 | [0f5da8a83c][0f5da8a83c] || 0.11 | Obsolete [538a64e8c3][538a64e8c3]<br>Nix 2.0 Protocol 1.17 |
+| *ImportPath     | 17 | [0f5da8a83c][0f5da8a83c] || 0.11 | Removed [273b288a7e][273b288a7e]<br>Nix 1.0 Protocol 1.09 |
+| QueryDeriver    | 18 | [6d1a1191b0][6d1a1191b0] || 0.11 | Obsolete [e0204f8d46][e0204f8d46]<br>Nix 2.0 Protocol 1.16 |
+| SetOptions      | 19 | [f3441e6122][f3441e6122] || 0.11 ||
+| CollectGarbage              | 20 | [a72709afd8][a72709afd8] | 1.02  | 0.12   ||
+| QuerySubstitutablePathInfo  | 21 | [03427e76f1][03427e76f1] | 1.02  | 0.12   ||
+| QueryDerivationOutputs      | 22 | [e42401ee7b][e42401ee7b] | 1.05  | 1.0    | Obsolete [d38f860c3e][d38f860c3e]<br>Nix 2.4 Protocol 1.22* |
+| QueryAllValidPaths          | 23 | [24035b98b1][24035b98b1] | 1.05  | 1.0    ||
+| *QueryFailedPaths            | 24 | [f92c9a0ac5][f92c9a0ac5] | 1.05  | 1.0    | Removed [8cffec848][8cffec848]<br>Nix 2.0 Protocol 1.16 |
+| *ClearFailedPaths            | 25 | [f92c9a0ac5][f92c9a0ac5] | 1.05  | 1.0    | Removed [8cffec848][8cffec848]<br>Nix 2.0 Protocol 1.16 |
+| QueryPathInfo               | 26 | [1db6259076][1db6259076] | 1.06  | 1.0    ||
+| ImportPaths                 | 27 | [273b288a7e][273b288a7e] | 1.09  | 1.0    | Obsolete [538a64e8c3][538a64e8c3]<br>Nix 2.0 Protocol 1.17 |
+| QueryDerivationOutputNames  | 28 | [af2e53fd48][af2e53fd48]<br>([194d21f9f6][194d21f9f6]) | 1.08      | 1.0 | Obsolete<br>[045b07200c][045b07200c]<br>Nix 2.4 Protocol 1.21 |
+| QueryPathFromHashPart       | 29 | [ccc52adfb2][ccc52adfb2] | 1.11  | 1.1    ||
+| QuerySubstitutablePathInfos | 30 | [eb3036da87][eb3036da87] | 1.12* | 1.2    ||
+| QueryValidPaths             | 31 | [58ef4d9a95][58ef4d9a95] | 1.12  | 1.2    ||
+| QuerySubstitutablePaths     | 32 | [09a6321aeb][09a6321aeb] | 1.12  | 1.2    ||
+| QueryValidDerivers          | 33 | [2754a07ead][2754a07ead] | 1.13* | 1.3    ||
+| OptimiseStore               | 34 | [8fb8c26b6d][2754a07ead] | 1.14  | 1.8    ||
+| VerifyStore                 | 35 | [b755752f76][b755752f76] | 1.14  | 1.9    ||
+| BuildDerivation             | 36 | [71a5161365][71a5161365] | 1.14  | 1.10   ||
+| AddSignatures               | 37 | [d0f5719c2a][d0f5719c2a] | 1.16  | 2.0    ||
+| NarFromPath                 | 38 | [b4b5e9ce2f][b4b5e9ce2f] | 1.17  | 2.0    ||
+| AddToStoreNar               | 39 | [584f8a62de][584f8a62de] | 1.17  | 2.0    ||
+| QueryMissing                | 40 | [ba20730b3f][ba20730b3f] | 1.19* | 2.0    ||
+| QueryDerivationOutputMap    | 41 | [d38f860c3e][d38f860c3e] | 1.22* | 2.4    ||
+| RegisterDrvOutput           | 42 | [58cdab64ac][58cdab64ac] | 1.27  | 2.4    ||
+| QueryRealisation            | 43 | [58cdab64ac][58cdab64ac] | 1.27  | 2.4    ||
+| AddMultipleToStore          | 44 | [fe1f34fa60][fe1f34fa60] | 1.32* | 2.4    ||
+| AddBuildLog                 | 45 | [4dda1f92aa][4dda1f92aa] | 1.32  | 2.6.0  ||
+| BuildPathsWithResults       | 46 | [a4604f1928][a4604f1928] | 1.34* | 2.8.0  ||
+| AddPermRoot                 | 47 | [226b0f3956][226b0f3956] | 1.36* | 2.20.0 ||
+
+Notes: Ops that start with * have been removed.
+Protocol version that ends with * was bumped while adding that operation. Otherwise protocol version referes to the protocol version at the time the operation was added (so only at the next protocol version can you assume the operation is present/removed/obsolete since it was added/removed/obsoleted between protocol versions).
+
+## Protocol version change log
+
+- 1.01 [f3441e6122][f3441e6122] Initial Version
+- 1.02 [c370755583][c370755583] Use build hook
+- 1.03 [db4f4a8425][db4f4a8425] Backward compatibility check
+- 1.04 [96598e7b06][96598e7b06] SetOptions buildVerbosity
+- 1.05 [60ec75048a][60ec75048a] SetOptions useAtime & maxAtime
+- 1.06 [6846ed8b44][6846ed8b44] SetOptions buildCores
+- 1.07 [bdf089f463][bdf089f463] QuerySubstitutablePathInfo narSize
+- 1.08 [b1eb252172][b1eb252172] STDERR_ERROR exit status
+- 1.09 [e0bd307802][e0bd307802] ImportPath not supported on versions older than 1.09
+- 1.10 [db5b86ef13][db5b86ef13] SetOptions build-use-substitutess
+- 1.11 [4bc4da331a][4bc4da331a] open connection reserveSpace
+- 1.12 [eb3036da87][eb3036da87] Implement QuerySubstitutablePathInfos
+- 1.13 [2754a07ead][2754a07ead] Implement QueryValidDerivers
+- 1.14 [a583a2bc59][a583a2bc59] open connection cpu affinity
+- 1.15 [d1e3bf01bc][d1e3bf01bc] BuildPaths buildMode
+- 1.16 [9cee600c88][9cee600c88] QueryPathInfo ultimate & sigs
+- 1.17 [ddea253ff8][ddea253ff8] QueryPathInfo returns valid bool
+- 1.18 [4b8f1b0ec0][4b8f1b0ec0] Select between AddToStoreNar and ImportPaths
+- 1.19 [ba20730b3f][ba20730b3f] Implement QueryMissing
+- 1.20 [cfc8132391][cfc8132391] Don't send activity and result logs to old clients
+- 1.21 [6185d25e52][6185d25e52] AddToStoreNar uses TunnelLogger for data
+- 1.22 [d38f860c3e][d38f860c3e] Implement QueryDerivationOutputMap and obsolete QueryDerivationOutputs
+- 1.23 [4c0077a07d][4c0077a07d] AddToStoreNar uses FramedSink/-Source for data
+- 1.24 [5ccd94501d][5ccd94501d] Allow trustless building of CA derivations
+- 1.25 [e34fe47d0c][e34fe47d0c] New implementation of AddToStore
+- 1.26 [c43e882f54][c43e882f54] STDERR_ERROR serialize exception
+- 1.27 [3a63fc6cd5][3a63fc6cd5] QueryValidPaths substitute flag
+- 1.28 [27b5747ca7][27b5747ca7] BuildDerivation returns builtOutputs
+- 1.29 [9d309de0de][9d309de0de] BuildDerivation returns timesBuilt, isNonDeterministic, startTime & stopTime
+- 1.30 [e5951a6b2f][e5951a6b2f] Bump version number for DerivedPath changes
+- 1.31 [a8416866cf][a8416866cf] RegisterDrvOutput & QueryRealisation send realisations as JSON
+- 1.32 [fe1f34fa60][fe1f34fa60] Implement AddMultipleToStore
+- 1.33 [35dbdbedd4][35dbdbedd4] open connection sends nix version
+- 1.34 [a4604f1928][a4604f1928] Implement BuildPathsWithResults
+- 1.35 [9207f94582][9207f94582] open connection sends trusted option
+- 1.36 [226b0f3956][226b0f3956] Implement AddPermRoot
+- 1.37 [1e3d811840][1e3d811840] Serialize BuildResult send cpuUser & cpuSystem
+
+
+
+[0263279071]: https://github.com/NixOS/nix/commit/0263279071
+[03427e76f1]: https://github.com/NixOS/nix/commit/03427e76f1
+[045b07200c]: https://github.com/NixOS/nix/commit/045b07200c
+[0565b5f2b3]: https://github.com/NixOS/nix/commit/0565b5f2b3
+[09a6321aeb]: https://github.com/NixOS/nix/commit/09a6321aeb
+[0f5da8a83c]: https://github.com/NixOS/nix/commit/0f5da8a83c
+[194d21f9f6]: https://github.com/NixOS/nix/commit/194d21f9f6
+[1db6259076]: https://github.com/NixOS/nix/commit/1db6259076
+[1e3d811840]: https://github.com/NixOS/nix/commit/1e3d811840
+[24035b98b1]: https://github.com/NixOS/nix/commit/24035b98b1
+[226b0f3956]: https://github.com/NixOS/nix/commit/226b0f3956
+[273b288a7e]: https://github.com/NixOS/nix/commit/273b288a7e
+[2754a07ead]: https://github.com/NixOS/nix/commit/2754a07ead
+[27b5747ca7]: https://github.com/NixOS/nix/commit/27b5747ca7
+[29cf434a35]: https://github.com/NixOS/nix/commit/29cf434a35
+[35dbdbedd4]: https://github.com/NixOS/nix/commit/35dbdbedd4
+[3a63fc6cd5]: https://github.com/NixOS/nix/commit/3a63fc6cd5
+[4b8f1b0ec0]: https://github.com/NixOS/nix/commit/4b8f1b0ec0
+[4bc4da331a]: https://github.com/NixOS/nix/commit/4bc4da331a
+[4c0077a07d]: https://github.com/NixOS/nix/commit/4c0077a07d
+[4dda1f92aa]: https://github.com/NixOS/nix/commit/4dda1f92aa
+[538a64e8c3]: https://github.com/NixOS/nix/commit/538a64e8c3
+[584f8a62de]: https://github.com/NixOS/nix/commit/584f8a62de
+[58cdab64ac]: https://github.com/NixOS/nix/commit/58cdab64ac
+[58ef4d9a95]: https://github.com/NixOS/nix/commit/58ef4d9a95
+[5ccd94501d]: https://github.com/NixOS/nix/commit/5ccd94501d
+[60ec75048a]: https://github.com/NixOS/nix/commit/60ec75048a
+[6185d25e52]: https://github.com/NixOS/nix/commit/6185d25e52
+[6846ed8b44]: https://github.com/NixOS/nix/commit/6846ed8b44
+[6d1a1191b0]: https://github.com/NixOS/nix/commit/6d1a1191b0
+[71a5161365]: https://github.com/NixOS/nix/commit/71a5161365
+[74033a844f]: https://github.com/NixOS/nix/commit/74033a844f
+[7951c3c54]: https://github.com/NixOS/nix/commit/7951c3c54
+[8cffec848]: https://github.com/NixOS/nix/commit/8cffec848
+[8fb8c26b6d]: https://github.com/NixOS/nix/commit/8fb8c26b6d
+[9207f94582]: https://github.com/NixOS/nix/commit/9207f94582
+[96598e7b06]: https://github.com/NixOS/nix/commit/96598e7b06
+[9947f1646a]: https://github.com/NixOS/nix/commit/9947f1646a
+[9cee600c88]: https://github.com/NixOS/nix/commit/9cee600c88
+[9d309de0de]: https://github.com/NixOS/nix/commit/9d309de0de
+[a4604f1928]: https://github.com/NixOS/nix/commit/a4604f1928
+[a583a2bc59]: https://github.com/NixOS/nix/commit/a583a2bc59
+[a711689368]: https://github.com/NixOS/nix/commit/a711689368
+[a72709afd8]: https://github.com/NixOS/nix/commit/a72709afd8
+[a8416866cf]: https://github.com/NixOS/nix/commit/a8416866cf
+[a9c4f66cfb]: https://github.com/NixOS/nix/commit/a9c4f66cfb
+[af2e53fd48]: https://github.com/NixOS/nix/commit/af2e53fd48
+[b1eb252172]: https://github.com/NixOS/nix/commit/b1eb252172
+[b4b5e9ce2f]: https://github.com/NixOS/nix/commit/b4b5e9ce2f
+[b755752f76]: https://github.com/NixOS/nix/commit/b755752f76
+[ba20730b3f]: https://github.com/NixOS/nix/commit/ba20730b3f
+[bdf089f463]: https://github.com/NixOS/nix/commit/bdf089f463
+[be64fbb501]: https://github.com/NixOS/nix/commit/be64fbb501
+[c370755583]: https://github.com/NixOS/nix/commit/c370755583
+[c43e882f54]: https://github.com/NixOS/nix/commit/c43e882f54
+[c602ebfb34]: https://github.com/NixOS/nix/commit/c602ebfb34
+[ccc52adfb2]: https://github.com/NixOS/nix/commit/ccc52adfb2
+[cfc8132391]: https://github.com/NixOS/nix/commit/cfc8132391
+[d0f5719c2a]: https://github.com/NixOS/nix/commit/d0f5719c2a
+[d1e3bf01bc]: https://github.com/NixOS/nix/commit/d1e3bf01bc
+[d38f860c3e]: https://github.com/NixOS/nix/commit/d38f860c3e
+[d3c61d83b]: https://github.com/NixOS/nix/commit/d3c61d83b
+[db4f4a8425]: https://github.com/NixOS/nix/commit/db4f4a8425
+[db5b86ef13]: https://github.com/NixOS/nix/commit/db5b86ef13
+[ddea253ff8]: https://github.com/NixOS/nix/commit/ddea253ff8
+[e0204f8d46]: https://github.com/NixOS/nix/commit/e0204f8d46
+[e0bd307802]: https://github.com/NixOS/nix/commit/e0bd307802
+[e25fad691a]: https://github.com/NixOS/nix/commit/e25fad691a
+[e34fe47d0c]: https://github.com/NixOS/nix/commit/e34fe47d0c
+[e42401ee7b]: https://github.com/NixOS/nix/commit/e42401ee7b
+[e5951a6b2f]: https://github.com/NixOS/nix/commit/e5951a6b2f
+[eb3036da87]: https://github.com/NixOS/nix/commit/eb3036da87
+[f3441e6122]: https://github.com/NixOS/nix/commit/f3441e6122
+[f92c9a0ac5]: https://github.com/NixOS/nix/commit/f92c9a0ac5
+[fe1f34fa60]: https://github.com/NixOS/nix/commit/fe1f34fa60
diff --git a/tvix/docs/src/nix-daemon/logging.md b/tvix/docs/src/nix-daemon/logging.md
new file mode 100644
index 0000000000..70fe0882fa
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/logging.md
@@ -0,0 +1,122 @@
+# Logging
+
+Because the daemon protocol only has one sender stream and one receiver stream
+logging messages need to be carefully interleaved with requests and responses.
+Usually this means that after the operation and all of its inputs (the request)
+has been read logging hijacks the sender stream (in the server case) and uses
+it to send typed logging messages while the request is being processed. When
+the response has been generated it will send `STDERR_LAST` to mark that what
+follows is the response data to the request. If the request failed a
+`STDERR_ERROR` message is sent with the error and no response is sent.
+
+While not in this state between request reading and response sending all
+messages and activities are buffered until next time the logger can send data.
+
+The logging messages supported are:
+- `STDERR_LAST`
+- `STDERR_ERROR`
+- `STDERR_NEXT`
+- `STDERR_READ`
+- `STDERR_WRITE`
+- `STDERR_START_ACTIVITY`
+- `STDERR_STOP_ACTIVITY`
+- `STDERR_RESULT`
+
+
+### `STDERR_LAST`
+Marks the end of the logs, normal processing can resume.
+
+- 0x616c7473 :: [UInt64][se-UInt64] (hardcoded)
+
+### `STDERR_ERROR`
+This also marks the end of this log "session" and so it
+has the same effect as `STDERR_LAST`.
+On the client the error is thrown as an exception and no response is read.
+
+#### If protocol version is 1.26 or newer
+- 0x63787470 :: [UInt64][se-UInt64] (hardcoded)
+- error :: [Error][se-Error]
+
+#### If protocol version is older than 1.26
+- 0x63787470 :: [UInt64][se-UInt64] (hardcoded)
+- msg :: [String][se-String]
+- exitStatus :: [Int][se-Int]
+
+
+### `STDERR_NEXT`
+Normal string log message.
+
+- 0x6f6c6d67 :: [UInt64][se-UInt64] (hardcoded)
+- msg :: [String][se-String]
+
+
+### `STDERR_READ`
+Reader interface used by ImportsPaths and AddToStoreNar (between 1.21 and 1.23).
+It works by sending a desired buffer length and then on the receiver stream it
+reads bytes buffer of that length. If it receives 0 bytes it sees this as an
+unexpected EOF.
+
+- 0x64617461 :: [UInt64][se-UInt64] (hardcoded)
+- desiredLen :: [Size][se-Size]
+
+### `STDERR_WRITE`
+Writer interface used by ExportPath. Simply writes a buffer.
+
+- 0x64617416 :: [UInt64][se-UInt64] (hardcoded)
+- buffer :: [Bytes][se-Bytes]
+
+### `STDERR_START_ACTIVITY`
+Begins an activity. In other tracing frameworks this would be called a span.
+
+Implemented in protocol 1.20. To achieve backwards compatible with older
+versions of the protocol instead of sending an `STDERR_START_ACTIVITY`
+the level is checked against enabled logging level and the text field is
+sent as a simple log message with `STDERR_NEXT`.
+
+- 0x53545254 :: [UInt64][se-UInt64] (hardcoded)
+- act :: [UInt64][se-UInt64]
+- level :: [Verbosity][se-Verbosity]
+- type :: [ActivityType][se-ActivityType]
+- text :: [String][se-String]
+- fields :: [List][se-List] of [Field][se-Field]
+- parent :: [UInt64][se-UInt64]
+
+
+act is atomic (nextId++ + (getPid() << 32))
+
+
+### `STDERR_STOP_ACTIVITY`
+Stops the given activity. The activity id should not send any more results.
+Just sends `ActivityId`.
+
+Implemented in protocol 1.20. When backwards compatible with older versions of
+the protocol and this message would have been sent it is instead ignored.
+
+- 0x53544f50 :: [UInt64][se-UInt64] (hardcoded)
+
+
+### `STDERR_RESULT`
+Sends results for a given activity.
+
+Implemented in protocol 1.20. When backwards compatible with older versions of
+the protocol and this message would have been sent it is instead ignored.
+
+- 0x52534c54 :: [UInt64][se-UInt64] (hardcoded)
+- act :: [UInt64][se-UInt64]
+- type :: [ResultType][se-ResultType]
+- fields :: [List][se-List] of [Field][se-Field]
+
+
+
+
+[se-UInt64]: ./serialization.md#uint64
+[se-Int]: ./serialization.md#int
+[se-Size]: ./serialization.md#size
+[se-Verbosity]: ./serialization.md#verbosity
+[se-ActivityType]: ./serialization.md#activitytype
+[se-ResultType]: ./serialization.md#resulttype
+[se-Bytes]: ./serialization.md#bytes
+[se-String]: ./serialization.md#string
+[se-List]: ./serialization.md#list-of-x
+[se-Error]: ./serialization.md#error
+[se-Field]: ./serialization.md#field
\ No newline at end of file
diff --git a/tvix/docs/src/nix-daemon/operations.md b/tvix/docs/src/nix-daemon/operations.md
new file mode 100644
index 0000000000..0683ab0709
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/operations.md
@@ -0,0 +1,894 @@
+
+# TOC
+
+| Operation                                                   | Id |
+| ----------------------------------------------------------- | -- |
+| [IsValidPath](#isvalidpath)                                 | 1  |
+| [HasSubstitutes](#hassubstitutes)                           | 3  |
+| [QueryReferrers](#queryreferrers)                           | 6  |
+| [AddToStore](#addtostore)                                   | 7  |
+| [BuildPaths](#buildpaths)                                   | 9  |
+| [EnsurePath](#ensurepath)                                   | 10 |
+| [AddTempRoot](#addtemproot)                                 | 11 |
+| [AddIndirectRoot](#addindirectroot)                         | 12 |
+| [FindRoots](#findroots)                                     | 14 |
+| [SetOptions](#setoptions)                                   | 19 |
+| [CollectGarbage](#collectgarbage)                           | 20 |
+| [QuerySubstitutablePathInfo](#querysubstitutablepathinfo)   | 21 |
+| [QueryAllValidPaths](#queryallvalidpaths)                   | 23 |
+| [QueryPathInfo](#querypathinfo)                             | 26 |
+| [QueryPathFromHashPart](#querypathfromhashpart)             | 29 |
+| [QuerySubstitutablePathInfos](#querysubstitutablepathinfos) | 30 |
+| [QueryValidPaths](#queryvalidpaths)                         | 31 |
+| [QuerySubstitutablePaths](#querysubstitutablepaths)         | 32 |
+| [QueryValidDerivers](#queryvalidderivers)                   | 33 |
+| [OptimiseStore](#optimisestore)                             | 34 |
+| [VerifyStore](#verifystore)                                 | 35 |
+| [BuildDerivation](#buildderivation)                         | 36 |
+| [AddSignatures](#addsignatures)                             | 37 |
+| [NarFromPath](#narfrompath)                                 | 38 |
+| [AddToStoreNar](#addtostore)                                | 39 |
+| [QueryMissing](#querymissing)                               | 40 |
+| [QueryDerivationOutputMap](#queryderivationoutputmap)       | 41 |
+| [RegisterDrvOutput](#registerdrvoutput)                     | 42 |
+| [QueryRealisation](#queryrealisation)                       | 43 |
+| [AddMultipleToStore](#addmultipletostore)                   | 44 |
+| [AddBuildLog](#addbuildlog)                                 | 45 |
+| [BuildPathsWithResults](#buildpathswithresults)             | 46 |
+| [AddPermRoot](#addpermroot)                                 | 47 |
+
+
+## Obsolete operations
+
+| Operation                                                 | Id |
+| --------------------------------------------------------- | -- |
+| [QueryPathHash](#querypathhash)                           | 4  |
+| [QueryReferences](#queryreferences)                       | 5  |
+| [AddTextToStore](#addtexttostore)                         | 8  |
+| [SyncWithGC](#syncwithgc)                                 | 13 |
+| [ExportPath](#exportpath)                                 | 16 |
+| [QueryDeriver](#queryderiver)                             | 18 |
+| [QueryDerivationOutputs](#queryderivationoutputs)         | 22 |
+| [ImportPaths](#importpaths)                               | 27 |
+| [QueryDerivationOutputNames](#queryderivationoutputnames) | 28 |
+
+
+## Removed operations
+
+| Operation                                         | Id |
+| ------------------------------------------------- | -- |
+| [Quit](#quit-removed)                             | 0  |
+| [ImportPath](#importpath-removed)                 | 17 |
+| [Old CollectGarbage](#old-collectgarbage-removed) | 15 |
+| [QueryFailedPaths](#queryfailedpaths)             | 24 |
+| [ClearFailedPaths](#clearfailedpaths)             | 25 |
+
+
+
+## Quit (removed)
+
+**Id:** 0<br>
+**Introduced:** Nix 0.11<br>
+**Removed:** Became dead code in Nix 0.11 and removed in Nix 1.8
+
+
+## IsValidPath
+
+**Id:** 1<br>
+**Introduced:** Nix 0.11<br>
+
+As the name says checks that a store path is valid i.e. in the store.
+
+This is a pretty core operation used everywhere.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+isValid :: [Bool][se-Bool]
+
+
+## HasSubstitutes
+
+**Id:** 3<br>
+**Introduced:** Nix 0.11<br>
+
+Checks if we can substitute the input path from a substituter. Uses
+QuerySubstitutablePaths under the hood :/
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+hasSubstitutes :: [Bool][se-Bool]
+
+
+## QueryPathHash
+
+**Id:** 4<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.16, Nix 2.0<br>
+
+Retrieves the base16 NAR hash of a given store path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+hash :: [String][se-String] (base16-encoded NAR hash without algorithm prefix)
+
+
+## QueryReferences
+
+**Id:** 5<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.16, Nix 2.0<br>
+
+Retrieves the references of a given path
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+references :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryReferrers
+
+**Id:** 6<br>
+**Introduced:** Nix 0.11<br>
+
+Retrieves the referrers of a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+referrers :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## AddToStore
+
+**Id:** 7<br>
+**Introduced:** Nix 0.11<br>
+
+Add a new path to the store.
+
+### Before protocol version 1.25
+#### Inputs
+- baseName :: [String][se-String]
+- fixed :: [Bool64][se-Bool64]
+- recursive :: [FileIngestionMethod][se-FileIngestionMethod]
+- hashAlgo :: [String][se-String]
+- NAR dump
+
+If fixed is `true`, hashAlgo is forced to `sha256` and recursive is forced to
+`Recursive`.
+
+Only `Flat` and `Recursive` values are supported for the recursive input
+parameter.
+
+#### Outputs
+path :: [StorePath][se-StorePath]
+
+### Protocol version 1.25 or newer
+#### Inputs
+- name :: [String][se-String]
+- camStr :: [ContentAddressMethodWithAlgo][se-ContentAddressMethodWithAlgo]
+- refs :: [List][se-List] of [StorePath][se-StorePath]
+- repairBool :: [Bool64][se-Bool64]
+- [Framed][se-Framed] NAR dump
+
+#### Outputs
+info :: [ValidPathInfo][se-ValidPathInfo]
+
+
+## AddTextToStore
+
+**Id:** 8<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.25, Nix 2.4
+
+Add a text file as a store path.
+
+This was obsoleted by adding the functionality implemented by this operation
+to [AddToStore](#addtostore). And so this corresponds to calling
+[AddToStore](#addtostore) with `camStr` set to `text:sha256` and `text`
+wrapped as a NAR.
+
+### Inputs
+- suffix :: [String][se-String]
+- text :: [Bytes][se-Bytes]
+- refs :: [List][se-List] of [StorePath][se-StorePath]
+
+### Outpus
+path :: [StorePath][se-StorePath]
+
+
+## BuildPaths
+
+**Id:** 9<br>
+****Introduced:**** Nix 0.11<br>
+
+Build (or substitute) a list of derivations.
+
+### Inputs
+paths :: [List][se-List] of [DerivedPath][se-DerivedPath]
+
+#### Protocol 1.15 or newer
+mode :: [BuildMode][se-BuildMode]
+
+Check that connection is trusted before allowing Repair mode.
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## EnsurePath
+
+**Id:** 10<br>
+**Introduced:** Nix 0.11<br>
+
+Checks if a path is valid. Note: it may be made valid by running a substitute.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## AddTempRoot
+
+**Id:** 11<br>
+**Introduced:** Nix 0.11<br>
+
+Creates a temporary GC root for the given store path.
+
+Temporary GC roots are valid only for the life of the connection and are used
+primarily to prevent the GC from pulling the rug out from under the client and
+deleting store paths that the client is actively doing something with.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## AddIndirectRoot
+
+**Id:** 12<br>
+**Introduced:** Nix 0.11<br>
+
+Add an indirect root, which is a weak reference to the user-facing symlink
+created by [AddPermRoot](#addpermroot).
+
+Only ever sent on the local unix socket nix daemon.
+
+### Inputs
+path :: [String][se-String]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## SyncWithGC
+
+**Id:** 13<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.32, Nix 2.5.0
+
+Acquire the global GC lock, then immediately release it.  This function must be
+called after registering a new permanent root, but before exiting.  Otherwise,
+it is possible that a running garbage collector doesn't see the new root and
+deletes the stuff we've just built.  By acquiring the lock briefly, we ensure
+that either:
+
+- The collector is already running, and so we block until the
+    collector is finished.  The collector will know about our
+    *temporary* locks, which should include whatever it is we
+    want to register as a permanent lock.
+- The collector isn't running, or it's just started but hasn't
+    acquired the GC lock yet.  In that case we get and release
+    the lock right away, then exit.  The collector scans the
+    permanent root and sees ours.
+
+In either case the permanent root is seen by the collector.
+
+Was made obsolete by using [AddTempRoot](#addtemproot) to accomplish the same
+thing.
+
+
+## FindRoots
+
+**Id:** 14<br>
+**Introduced:** Nix 0.11<br>
+
+Find the GC roots.
+
+### Outputs
+roots :: [Map][se-Map] of [String][se-String] to [StorePath][se-StorePath]
+
+The key is the link pointing to the given store path.
+
+
+## Old CollectGarbage (removed)
+
+**Id:** 15<br>
+**Introduced:** Nix 0.11<br>
+**Removed:** Protocol 1.02, Nix 0.12<br>
+
+
+## ExportPath
+
+**Id:** 16<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.17, Nix 2.0<br>
+
+Export a store path in the binary format nix-store --import expects. See implementation there https://github.com/NixOS/nix/blob/db3bf180a569cb20db42c5e4669d2277be6f46b6/src/libstore/export-import.cc#L29 for more details.
+
+### Inputs
+- path :: [StorePath][se-StorePath]
+- sign :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+
+### Outputs
+Uses `STDERR_WRITE` to send dump in export format
+
+After dump it outputs.
+
+1 :: [Int][se-Int] (hardcoded)
+
+
+## ImportPath (removed)
+
+**Id:** 17<br>
+**Introduced:** Nix 0.11<br>
+**Removed:** Protocol 1.09, Nix 1.0<br>
+
+
+## QueryDeriver
+
+**Id:** 18<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.16, Nix 2.0<br>
+
+Returns the store path of the derivation for a given store path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+deriver :: [OptStorePath][se-OptStorePath]
+
+
+## SetOptions
+
+**Id:** 19<br>
+**Introduced:** Nix 0.11<br>
+
+Sends client options to the remote side.
+
+Only ever used right after the handshake.
+
+### Inputs
+
+- keepFailed :: [Int][se-Int]
+- keepGoing :: [Int][se-Int]
+- tryFallback :: [Int][se-Int]
+- verbosity :: [Verbosity][se-Verbosity]
+- maxbuildJobs :: [Int][se-Int]
+- maxSilentTime :: [Int][se-Int]
+- useBuildHook :: [Bool][se-Bool] (ignored and hardcoded to true in client)
+- verboseBuild :: [Verbosity][se-Verbosity]
+- logType :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- printBuildTrace :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- buildCores :: [Int][se-Int]
+- useSubstitutes :: [Int][se-Int]
+
+### Protocol 1.12 or newer
+otherSettings :: [Map][se-Map] of [String][se-String] to [String][se-String]
+
+
+## CollectGarbage
+
+**Id:** 20<br>
+**Introduced:** Protocol 1.02, Nix 0.12<br>
+
+Find the GC roots.
+
+### Inputs
+- action :: [GCAction][se-GCAction]
+- pathsToDelete :: [List][se-List] of [StorePath][se-StorePath]
+- ignoreLiveness :: [Bool64][se-Bool64]
+- maxFreed :: [UInt64][se-UInt64]
+- removed :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- removed :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- removed :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+
+### Outputs
+- pathsDeleted :: [List][se-List] of [String][se-String]
+- bytesFreed :: [UInt64][se-UInt64]
+- 0 :: [UInt64][se-UInt64] (hardcoded)
+
+Depending on the action pathsDeleted is, the GC roots, or the paths that would
+be or have been deleted.
+
+
+## QuerySubstitutablePathInfo
+
+**Id:** 21<br>
+**Introduced:** Protocol 1.02, Nix 0.12<br>
+
+Retrieves the various substitutable paths infos for a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+found :: [Bool][se-Bool]
+
+#### If found is true
+- deriver :: [OptStorePath][se-OptStorePath]
+- references :: [List][se-List] of [StorePath][se-StorePath]
+- downloadSize :: [UInt64][se-UInt64]
+- narSize :: [UInt64][se-UInt64]
+
+
+## QueryDerivationOutputs
+
+**Id:** 22<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+**Obsolete:** Protocol 1.22*, Nix 2.4<br>
+
+Retrieves all the outputs paths of a given derivation.
+
+### Inputs
+path :: [StorePath][se-StorePath] (must point to a derivation)
+
+### Outputs
+derivationOutputs :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryAllValidPaths
+
+**Id:** 23<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+
+Retrieves all the valid paths contained in the store.
+
+### Outputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryFailedPaths (removed)
+
+**Id:** 24<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+**Removed:** Protocol 1.16, Nix 2.0<br>
+
+Failed build caching API only ever used by Hydra.
+
+
+## ClearFailedPaths (removed)
+
+**Id:** 25<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+**Removed:** Protocol 1.16, Nix 2.0<br>
+
+Failed build caching API only ever used by Hydra.
+
+
+## QueryPathInfo
+
+**Id:** 26<br>
+**Introduced:** Protocol 1.06, Nix 1.0<br>
+
+Retrieves the pathInfo for a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+
+#### If protocol version is 1.17 or newer
+success :: [Bool64][se-Bool64]
+
+##### If success is true
+pathInfo :: [UnkeyedValidPathInfo][se-UnkeyedValidPathInfo]
+
+#### If protocol version is older than 1.17
+If info not found return error with `STDERR_ERROR`
+
+pathInfo :: [UnkeyedValidPathInfo][se-UnkeyedValidPathInfo]
+
+
+## ImportPaths
+
+**Id:** 27<br>
+**Introduced:** Protocol 1.09, Nix 1.0<br>
+**Obsolete:** Protocol 1.17, Nix 2.0<br>
+
+Older way of adding a store path to the remote store.
+
+It was obsoleted and replaced by AddToStoreNar because it sends the NAR
+before the metadata about the store path and so you would typically have
+to store the NAR in memory or temporarily on disk before processing it.
+
+### Inputs
+List of NAR dumps coming from the ExportPaths operations.
+
+### Outputs
+importedPaths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryDerivationOutputNames
+
+**Id:** 28<br>
+**Introduced:** Protocol 1.08, Nix 1.0<br>
+**Obsolete:** Protocol 1.21, Nix 2.4<br>
+
+Retrieves the name of the outputs of a given derivation. EG. out, dev, etc.
+
+### Inputs
+path :: [StorePath][se-StorePath] (must be a derivation path)
+
+### Outputs
+names :: [List][se-List] of [String][se-String]
+
+
+## QueryPathFromHashPart
+
+**Id:** 29<br>
+**Introduced:** Protocol 1.11, Nix 1.1<br>
+
+Retrieves a store path from a base16 (input) hash. Returns "" if no path was
+found.
+
+### Inputs
+hashPart :: [String][se-String]  (must be a base-16 hash)
+
+### Outputs
+path :: [OptStorePath][se-OptStorePath]
+
+
+## QuerySubstitutablePathInfos
+
+**Id:** 30<br>
+**Introduced:** Protocol 1.12*, Nix 1.2<br>
+
+Retrieves the various substitutable paths infos for set of store paths.
+
+### Inputs
+#### If protocol version is 1.22 or newer
+paths :: [Map][se-Map] of [StorePath][se-StorePath] to [OptContentAddress][se-OptContentAddress] 
+
+#### If protocol version older than 1.22
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+### Outputs
+infos :: [List][se-List] of [SubstitutablePathInfo][se-SubstitutablePathInfo]
+
+
+## QueryValidPaths
+
+**Id:** 31<br>
+**Introduced:** Protocol 1.12, Nix 1.2<br>
+
+Takes a list of store paths and returns a new list only containing the valid store paths
+
+## Inputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+### If protocol version is 1.27 or newer
+substitute :: [Bool][se-Bool] (defaults to false if not sent)
+
+## Outputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QuerySubstitutablePaths
+
+**Id:** 32<br>
+**Introduced:** Protocol 1.12, Nix 1.2<br>
+
+Takes a set of store path, returns a filtered new set of paths that can be
+substituted.
+
+In versions of the protocol prior to 1.12 [HasSubstitutes](#hassubstitutes)
+is used to implement the functionality that this operation provides.
+
+### Inputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+### Outputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryValidDerivers
+
+**Id:** 33<br>
+**Introduced:** Protocol 1.13*, Nix 1.3<br>
+
+Retrieves the derivers of a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+derivers :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## OptimiseStore
+
+**Id:** 34<br>
+**Introduced:** Protocol 1.14, Nix 1.8<br>
+
+Optimise store by hardlinking files with the same content.
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## VerifyStore
+
+**Id:** 35<br>
+**Introduced:** Protocol 1.14, Nix 1.9<br>
+
+Verify store either only db and existence of path or entire contents of store
+paths against the NAR hash. 
+
+### Inputs
+- checkContents :: [Bool64][se-Bool64]
+- repair :: [Bool64][se-Bool64]
+
+### Outputs
+errors :: [Bool][se-Bool]
+
+
+## BuildDerivation
+
+**Id:** 36<br>
+**Introduced:** Protocol 1.14, Nix 1.10<br>
+
+Main build operation used when remote building.
+
+When functioning as a remote builder this operation is used instead of
+BuildPaths so that it doesn't have to send the entire tree of derivations
+to the remote side first before it can start building. What this does
+instead is have a reduced version of the derivation to be built sent as
+part of its input and then only building that derivation.
+
+The paths required by the build need to be part of the remote store
+(by copying with AddToStoreNar or substituting) before this operation is
+called.
+
+### Inputs
+- drvPath :: [StorePath][se-StorePath]
+- drv :: [BasicDerivation][se-BasicDerivation]
+- buildMode :: [BuildMode][se-BuildMode]
+
+### Outputs
+buildResult :: [BuildResult][se-BuildResult]
+
+
+## AddSignatures
+
+**Id:** 37<br>
+**Introduced:** Protocol 1.16, Nix 2.0<br>
+
+Add the signatures associated to a given path.
+
+### Inputs
+- path :: [StorePath][se-StorePath]
+- signatures :: [List][se-List] of [String][se-String]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## NarFromPath
+
+**Id:** 38<br>
+**Introduced:** Protocol 1.17, Nix 2.0<br>
+
+Main way of getting the contents of a store path to the client.
+
+As the name suggests this is done by sending a NAR file.
+
+It replaced the now obsolete ExportPath operation and is used by newer clients to
+implement the export functionality for cli. It is also used when remote building
+to transfer build results from remote builder to client.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+NAR dumped straight to the stream.
+
+
+## AddToStoreNar
+
+**Id:** 39<br>
+**Introduced:** Protocol 1.17, Nix 2.0<br>
+
+Dumps a path as a NAR
+
+### Inputs
+- path :: [StorePath][se-StorePath]
+- deriver :: [OptStorePath][se-OptStorePath]
+- narHash :: [String][se-String] SHA256 NAR hash base 16
+- references :: [List][se-List] of [StorePath][se-StorePath]
+- registrationTime :: [Time][se-Time]
+- narSize :: [UInt64][se-UInt64]
+- ultimate :: [Bool64][se-Bool64]
+- signatures :: [List][se-List] of [String][se-String]
+- ca :: [OptContentAddress][se-OptContentAddress]
+- repair :: [Bool64][se-Bool64]
+- dontCheckSigs :: [Bool64][se-Bool64]
+
+#### If protocol version is 1.23 or newer
+[Framed][se-Framed] NAR dump
+
+#### If protocol version is between 1.21 and 1.23
+NAR dump sent using `STDERR_READ`
+
+#### If protocol version is older than 1.21
+NAR dump sent raw on stream
+
+
+## QueryMissing
+
+**Id:** 40<br>
+**Introduced:** Protocol 1.19*, Nix 2.0<br>
+
+### Inputs
+targets :: [List][se-List] of [DerivedPath][se-DerivedPath]
+
+### Outputs
+- willBuild :: [List][se-List] of [StorePath][se-StorePath]
+- willSubstitute :: [List][se-List] of [StorePath][se-StorePath]
+- unknown :: [List][se-List] of [StorePath][se-StorePath]
+- downloadSize :: [UInt64][se-UInt64]
+- narSize :: [UInt64][se-UInt64]
+
+
+## QueryDerivationOutputMap
+
+**Id:** 41<br>
+**Introduced:** Protocol 1.22*, Nix 2.4<br>
+
+Retrieves an associative map outputName -> storePath for a given derivation.
+
+### Inputs
+path :: [StorePath][se-StorePath]  (must be a derivation path)
+
+### Outputs
+outputs :: [Map][se-Map] of [String][se-String] to [OptStorePath][se-OptStorePath]
+
+
+## RegisterDrvOutput
+
+**Id:** 42<br>
+**Introduced:** Protocol 1.27, Nix 2.4<br>
+
+Registers a DRV output
+
+### Inputs
+#### If protocol is 1.31 or newer
+realisation :: [Realisation][se-Realisation]
+
+#### If protocol is older than 1.31
+- outputId :: [DrvOutput][se-DrvOutput]
+- outputPath :: [StorePath][se-StorePath]
+
+
+## QueryRealisation
+
+**Id:** 43<br>
+**Introduced:** Protocol 1.27, Nix 2.4<br>
+
+Retrieves the realisations attached to a drv output id realisations.
+
+### Inputs
+outputId :: [DrvOutput][se-DrvOutput]
+
+### Outputs
+#### If protocol is 1.31 or newer
+realisations :: [List][se-List] of [Realisation][se-Realisation]
+
+#### If protocol is older than 1.31
+outPaths :: [List][se-List] of [BaseStorePath][se-BaseStorePath]
+
+
+## AddMultipleToStore
+
+**Id:** 44<br>
+**Introduced:** Protocol 1.32*, Nix 2.4<br>
+
+A pipelined version of [AddToStoreNar](#addtostorenar) where you can add
+multiple paths in one go.
+
+Added because the protocol doesn't support pipelining and so on a low latency
+connection waiting for the request/response of [AddToStoreNar](#addtostorenar)
+for each small NAR was costly.
+
+### Inputs
+- repair :: [Bool64][se-Bool64]
+- dontCheckSigs :: [Bool64][se-Bool64]
+- [Framed][se-Framed] stream of add multiple NAR dump
+
+
+## AddBuildLog
+
+**Id:** 45<br>
+**Introduced:** Protocol 1.32, Nix 2.6.0<br>
+
+Attach some build logs to a given build.
+
+### Inputs
+- path :: [String][se-String] (might be [BaseStorePath][se-BaseStorePath])
+- [Framed][se-Framed] stream of log lines
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## BuildPathsWithResults
+
+**Id:** 46<br>
+**Introduced:** Protocol 1.34*, Nix 2.8.0<br>
+
+Build (or substitute) a list of derivations and returns a list of results.
+
+### Inputs
+- drvs :: [List][se-List] of [DerivedPath][se-DerivedPath]
+- mode :: [BuildMode][se-BuildMode]
+
+### Outputs
+results :: [List][se-List] of [KeyedBuildResult][se-KeyedBuildResult]
+
+
+## AddPermRoot
+
+**Id:** 47<br>
+**Introduced:** Protocol 1.36*, Nix 2.20.0<br>
+
+### Inputs
+- storePath :: [StorePath][se-StorePath]
+- gcRoot :: [String][se-String]
+
+### Outputs
+gcRoot :: [String][se-String]
+
+
+
+[se-Int]: ./serialization.md#int
+[se-UInt8]: ./serialization.md#uint8
+[se-UInt64]: ./serialization.md#uint64
+[se-Bool]: ./serialization.md#bool
+[se-Bool64]: ./serialization.md#bool64
+[se-Time]: ./serialization.md#time
+[se-FileIngestionMethod]: ./serialization.md#fileingestionmethod
+[se-BuildMode]: ./serialization.md#buildmode
+[se-Verbosity]: ./serialization.md#verbosity
+[se-GCAction]: ./serialization.md#gcaction
+[se-Bytes]: ./serialization.md#bytes
+[se-String]: ./serialization.md#string
+[se-StorePath]: ./serialization.md#storepath
+[se-BaseStorePath]: ./serialization.md#basestorepath
+[se-OptStorePath]: ./serialization.md#optstorepath
+[se-ContentAddressMethodWithAlgo]: ./serialization.md#contentaddressmethodwithalgo
+[se-OptContentAddress]: ./serialization.md#optcontentaddress
+[se-DerivedPath]: ./serialization.md#derivedpath
+[se-DrvOutput]: ./serialization.md#drvoutput
+[se-Realisation]: ./serialization.md#realisation
+[se-List]: ./serialization.md#list-of-x
+[se-Map]: ./serialization.md#map-of-x-to-y
+[se-SubstitutablePathInfo]: ./serialization.md#substitutablepathinfo
+[se-ValidPathInfo]: ./serialization.md#validpathinfo
+[se-UnkeyedValidPathInfo]: ./serialization.md#unkeyedvalidpathinfo
+[se-BuildResult]: ./serialization.md#buildmode
+[se-KeyedBuildResult]: ./serialization.md#keyedbuildresult
+[se-BasicDerivation]: ./serialization.md#basicderivation
+[se-Framed]: ./serialization.md#framed
\ No newline at end of file
diff --git a/tvix/docs/src/nix-daemon/serialization.md b/tvix/docs/src/nix-daemon/serialization.md
new file mode 100644
index 0000000000..a2694a4dea
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/serialization.md
@@ -0,0 +1,305 @@
+
+### UInt64
+Little endian byte order
+
+### Bytes
+
+- len :: [UInt64](#uint64)
+- len bytes of content
+- padding with zeros to ensure 64 bit alignment of content with padding
+
+
+## Int serializers
+
+### Int
+[UInt64](#uint64) cast to C `unsigned int` with upper bounds checking.
+
+### Int64
+[UInt64](#uint64) cast to C `int64_t` with upper bounds checking.
+
+### UInt8
+[UInt64](#uint64) cast to C `uint8_t` with upper bounds checking.
+
+### Size
+[UInt64](#uint64) cast to C `size_t` with upper bounds checking.
+
+### Time
+[UInt64](#uint64) cast to C `time_t` with upper bounds checking.
+s
+### Bool
+Sent as an [Int](#int) where 0 is false and everything else is true.
+
+### Bool64
+Sent as an [UInt64](#uint64) where 0 is false and everything else is true.
+
+### FileIngestionMethod
+An [UInt8](#uint8) enum with the following possible values:
+
+| Name      | Int |
+| --------- | --- |
+| Flat      |  0  |
+| Recursive |  1  |
+
+### BuildMode
+An [Int](#int) enum with the following possible values:
+
+| Name   | Int |
+| ------ | --- |
+| Normal |  0  |
+| Repair |  1  |
+| Check  |  2  |
+
+### Verbosity
+An [Int](#int) enum with the following possible values:
+
+| Name      | Int |
+| --------- | --- |
+| Error     |  0  |
+| Warn      |  1  |
+| Notice    |  2  |
+| Info      |  3  |
+| Talkative |  4  |
+| Chatty    |  5  |
+| Debug     |  6  |
+| Vomit     |  7  |
+
+### GCAction
+An [Int](#int) enum with the following possible values:
+
+| Name           | Int |
+| -------------- | --- |
+| ReturnLive     |  0  |
+| ReturnDead     |  1  |
+| DeleteDead     |  2  |
+| DeleteSpecific |  3  |
+
+### BuildStatus
+An [Int](#int) enum with the following possible values:
+
+| Name                   | Int |
+| ---------------------- | --- |
+| Built                  |  0  |
+| Substituted            |  1  |
+| AlreadyValid           |  2  |
+| PermanentFailure       |  3  |
+| InputRejected          |  4  |
+| OutputRejected         |  5  |
+| TransientFailure       |  6  |
+| CachedFailure          |  7  |
+| TimedOut               |  8  |
+| MiscFailure            |  9  |
+| DependencyFailed       | 10  |
+| LogLimitExceeded       | 11  |
+| NotDeterministic       | 12  |
+| ResolvesToAlreadyValid | 13  |
+| NoSubstituters         | 14  |
+
+### ActivityType
+An [Int](#int) enum with the following possible values:
+
+| Name          | Int |
+| ------------- | --- |
+| Unknown       |   0 |
+| CopyPath      | 100 |
+| FileTransfer  | 101 |
+| Realise       | 102 |
+| CopyPaths     | 103 |
+| Builds        | 104 |
+| Build         | 105 |
+| OptimiseStore | 106 |
+| VerifyPaths   | 107 |
+| Substitute    | 108 |
+| QueryPathInfo | 109 |
+| PostBuildHook | 110 |
+| BuildWaiting  | 111 |
+| FetchTree     | 112 |
+
+### ResultType
+An [Int](#int) enum with the following possible values:
+
+| Name             | Int |
+| ---------------- | --- |
+| FileLinked       | 100 |
+| BuildLogLine     | 101 |
+| UntrustedPath    | 102 |
+| CorruptedPath    | 103 |
+| SetPhase         | 104 |
+| Progress         | 105 |
+| SetExpected      | 106 |
+| PostBuildLogLine | 107 |
+| FetchStatus      | 108 |
+
+### FieldType
+An [Int](#int) enum with the following possible values:
+
+| Name   | Int |
+| ------ | --- |
+| Int    |  0  |
+| String |  1  |
+
+
+## Bytes serializers
+
+### String
+Simply a [Bytes](#bytes) that has some UTF-8 string like semantics sometimes.
+
+### StorePath
+String representation of a full store path.
+
+### BaseStorePath
+String representation of the basename of a store path. That is the store path
+without the /nix/store prefix.
+
+### OptStorePath
+Optional store path.
+
+If no store path this is serialized as the empty string otherwise it is the same as
+[StorePath](#storepath).
+
+### ContentAddressMethodWithAlgo
+One of the following strings:
+- text:`hash algorithm`
+- fixed:r:`hash algorithm`
+- fixed:`hash algorithm`
+
+### DerivedPath
+#### If protocol is 1.30 or newer
+        return DerivedPath::parseLegacy(store, s);
+#### If protocol is older than 1.30
+        return parsePathWithOutputs(store, s).toDerivedPath();
+
+### ContentAddress
+String with the format:
+- [ContentAddressMethodWithAlgo](#contentaddressmethodwithalgo):`hash`
+
+### OptContentAddress
+Optional version of [ContentAddress](#contentaddress) where empty string means
+no content address.
+
+### DrvOutput
+String with format:
+- `hash with any prefix`!`output name`
+
+### Realisation
+A JSON object sent as a string.
+
+The JSON object has the following keys:
+| Key                   | Value                   |
+| --------------------- | ----------------------- |
+| id                    | [DrvOutput](#drvoutput) |
+| outPath               | [StorePath](#storepath) |
+| signatures            | Array of String         |
+| dependentRealisations | Object where key is [DrvOutput](#drvoutput) and value is [StorePath](#storepath) |
+
+
+## Complex serializers
+
+### List of x
+A list is encoded as a [Size](#size) length n followed by n encodings of x
+
+### Map of x to y
+A map is encoded as a [Size](#size) length n followed by n encodings of pairs of x and y
+
+
+### BuildResult
+- status :: [BuildStatus](#buildstatus)
+- errorMsg :: [String](#string)
+
+#### Protocol 1.29 or newer
+- timesBuilt :: [Int](#int)
+- isNonDeterministic :: [Bool64](#bool64)
+- startTime :: [Time](#time)
+- stopTime :: [Time](#time)
+
+#### Protocol 1.37 or newer
+- cpuUser :: [OptMicroseconds](#optmicroseconds)
+- cpuSystem :: [OptMicroseconds](#optmicroseconds)
+
+#### Protocol 1.28 or newer
+builtOutputs ::  [Map](#map-of-x-to-y) of [DrvOutput](#drvoutput) to [Realisation](#realisations)
+
+### KeyedBuildResult
+- path :: [DerivedPath](#derivedpath)
+- result :: [BuildResult](#buildresult)
+
+### OptMicroseconds
+Optional microseconds.
+
+- tag :: [UInt8](#uint8)
+
+#### If tag is 1
+- seconds :: [Int64](#int64)
+
+
+### SubstitutablePathInfo
+- storePath :: [StorePath](#storepath)
+- deriver :: [OptStorePath](#optstorepath)
+- references :: [List](#list-of-x) of [StorePath](#storepath)
+- downloadSize :: [UInt64](#uint64)
+- narSize :: [UInt64](#uint64)
+
+
+### UnkeyedValidPathInfo
+- deriver :: [OptStorePath](#optstorepath)
+- narHash :: [String](#string) SHA256 NAR hash base16 encoded
+- references :: [List](#list-of-x) of [StorePath](#storepath)
+- registrationTime :: [Time](#time)
+- narSize :: [UInt64](#uint64)
+
+#### If protocol version is 1.16 or above
+- ultimate :: [Bool64](#bool64)
+- signatures :: [List](#list-of-x) of [String](#string)
+- ca :: [OptContentAddress](#optcontentaddress)
+
+
+### ValidPathInfo
+- path :: [StorePath](#storepath)
+- info :: [UnkeyedValidPathInfo](#unkeyedvalidpathinfo)
+
+### DerivationOutput
+- path :: [String](#string)
+- hashAlgo :: [String](#string)
+- hash :: [String](#string)
+
+### BasicDerivation
+- outputs :: [Map](#map-of-x-to-y) of [String](#string) to [DerivationOutput](#derivationoutput)
+- inputSrcs :: [List](#list-of-x) of [StorePath](#storepath)
+- platform :: [String](#string)
+- builder :: [String](#string)
+- args :: [List](#list-of-x) of [String](#string)
+- env :: [Map](#map-of-x-to-y) of [String](#string) to [String](#string)
+
+### TraceLine
+- havePos :: [Size](#size) (hardcoded to 0)
+- hint :: [String](#string)
+
+### Error
+- type :: [String](#string) (hardcoded to `Error`)
+- level :: [Verbosity](#verbosity)
+- name :: [String](#string) (removed and hardcoded to `Error`)
+- msg :: [String](#string)
+- havePos :: [Size](#size) (hardcoded to 0)
+- traces :: [List](#list-of-x) of [TraceLine](#traceline)
+
+## Field
+- type :: [FieldType](#fieldtype)
+
+### If type is Int
+- value :: [UInt64](#uint64)
+
+### If type is String
+- value :: [String](#string)
+
+
+## Framed
+
+At protocol 1.23 [AddToStoreNar](./operations.md#addtostorenar) introduced a
+framed streaming for sending the NAR dump and later versions of the protocol
+also used this framing for other operations.
+
+At its core the framed streaming is just a series of [Bytes](#bytes) of
+varying length and terminated by an empty [Bytes](#bytes).
+
+This method of sending data has the advantage of not having to parse the data
+to find where it ends. Older versions of the protocol would potentially parse
+the NAR twice.
\ No newline at end of file
diff --git a/tvix/eval/docs/bindings.md b/tvix/eval/docs/bindings.md
new file mode 100644
index 0000000000..2b062cb13d
--- /dev/null
+++ b/tvix/eval/docs/bindings.md
@@ -0,0 +1,133 @@
+Compilation of bindings
+=======================
+
+Compilation of Nix bindings is one of the most mind-bending parts of Nix
+evaluation. The implementation of just the compilation is currently almost 1000
+lines of code, excluding the various insane test cases we dreamt up for it.
+
+## What is a binding?
+
+In short, any attribute set or `let`-expression. Tvix currently does not treat
+formals in function parameters (e.g. `{ name ? "fred" }: ...`) the same as these
+bindings.
+
+They have two very difficult features:
+
+1. Keys can mutually refer to each other in `rec` sets or `let`-bindings,
+   including out of definition order.
+2. Attribute sets can be nested, and parts of one attribute set can be defined
+   in multiple separate bindings.
+
+Tvix resolves as much of this logic statically (i.e. at compile-time) as
+possible, but the procedure is quite complicated.
+
+## High-level concept
+
+The idea behind the way we compile bindings is to fully resolve nesting
+statically, and use the usual mechanisms (i.e. recursion/thunking/value
+capturing) for resolving dynamic values.
+
+This is done by compiling bindings in several phases:
+
+1. An initial compilation phase *only* for plain inherit statements (i.e.
+   `inherit name;`), *not* for namespaced inherits (i.e. `inherit (from)
+   name;`).
+
+2. A declaration-only phase, in which we use the compiler's scope tracking logic
+   to calculate the physical runtime stack indices (further referred to as
+   "stack slots" or just "slots") that all values will end up in.
+
+   In this phase, whenever we encounter a nested attribute set, it is merged
+   into a custom data structure that acts like a synthetic AST node.
+
+   This can be imagined similar to a rewrite like this:
+
+   ```nix
+   # initial code:
+   {
+       a.b = 1;
+       a.c = 2;
+   }
+
+   # rewritten form:
+   {
+       a = {
+           b = 1;
+           c = 2;
+       };
+   }
+   ```
+
+   The rewrite applies to attribute sets and `let`-bindings alike.
+
+   At the end of this phase, we know the stack slots of all namespaces for
+   inheriting from, all values inherited from them, and all values (and
+   optionall keys) of bindings at the current level.
+
+   Only statically known keys are actually merged, so any dynamic keys that
+   conflict will lead to a "key already defined" error at runtime.
+
+3. A compilation phase, in which all values (and, when necessary, keys) are
+   actually compiled. In this phase the custom data structure used for merging
+   is encountered when compiling values.
+
+   As this data structure acts like an AST node, the process begins recursively
+   for each nested attribute set.
+
+At the end of this process we have bytecode that leaves the required values (and
+optionally keys) on the stack. In the case of attribute sets, a final operation
+is emitted that constructs the actual attribute set structure at runtime. For
+`let`-bindings a final operation is emitted that removes these locals from the
+stack when the scope ends.
+
+## Moving parts
+
+WARNING: This documents the *current* implementation. If you only care about the
+conceptual aspects, see above.
+
+There's a few types involved:
+
+* `PeekableAttrs`: peekable iterator over an attribute path (e.g. `a.b.c`)
+* `BindingsKind`: enum defining the kind of bindings (attrs/recattrs/let)
+* `AttributeSet`: struct holding the bindings kind, the AST nodes with inherits
+  (both namespaced and not), and an internal representation of bindings
+  (essentially a vector of tuples of the peekable attrs and the expression to
+  compile for the value).
+* `Binding`: enum describing the kind of binding (namespaced inherit, attribute
+  set, plain binding of *any other value type*)
+* `KeySlot`: enum describing the location in which a key slot is placed at
+  runtime (nowhere, statically known value in a slot, dynamic value in a slot)
+* `TrackedBinding`: struct representing statically known information about a
+  single binding (its key slot, value slot and `Binding`)
+* `TrackedBindings`: vector of tracked bindings, which implements logic for
+  merging attribute sets together
+
+And quite a few methods on `Compiler`:
+
+* `compile_bindings`: entry point for compiling anything that looks like a
+  binding, this calls out to the functions below.
+* `compile_plain_inherits`: takes all inherits of a bindings node and compiles
+  the ones that are trivial to compile (i.e. just plain inherits without a
+  namespace). The `rnix` parser does not represent namespaced/plain inherits in
+  different nodes, so this function also aggregates the namespaced inherits and
+  returns them for further use
+* `declare_namespaced_inherits`: passes over all namespaced inherits and
+  declares them on the locals stack, as well as inserts them into the provided
+  `TrackedBindings`
+* `declare_bindings`: declares all regular key/value bindings in a bindings
+  scope, but without actually compiling their keys or values.
+
+  There's a lot of heavy lifting going on here:
+
+  1. It invokes the various pieces of logic responsible for merging nested
+     attribute sets together, creating intermediate data structures in the value
+     slots of bindings that can be recursively processed the same way.
+  2. It decides on the key slots of expressions based on the kind of bindings,
+     and the type of expression providing the key.
+* `bind_values`: runs the actual compilation of values. Notably this function is
+  responsible for recursively compiling merged attribute sets when it encounters
+  a `Binding::Set` (on which it invokes `compile_bindings` itself).
+
+In addition to these several methods (such as `compile_attr_set`,
+`compile_let_in`, ...) invoke the binding-kind specific logic and then call out
+to the functions above.
diff --git a/tvix/eval/src/builtins/impure.rs b/tvix/eval/src/builtins/impure.rs
index 18403fe5d8..c82b910f5f 100644
--- a/tvix/eval/src/builtins/impure.rs
+++ b/tvix/eval/src/builtins/impure.rs
@@ -37,7 +37,7 @@ mod impure_builtins {
             Ok(p) => p,
         };
         let r = generators::request_open_file(&co, path).await;
-        Ok(hash_nix_string(algo.to_str()?, r).map(Value::from)?)
+        hash_nix_string(algo.to_str()?, r).map(Value::from)
     }
 
     #[builtin("pathExists")]
diff --git a/tvix/eval/src/value/string.rs b/tvix/eval/src/value/string.rs
index dd027895fd..ceb43f1ea5 100644
--- a/tvix/eval/src/value/string.rs
+++ b/tvix/eval/src/value/string.rs
@@ -530,11 +530,7 @@ impl<'a> From<&'a NixString> for &'a BStr {
     }
 }
 
-impl From<NixString> for String {
-    fn from(s: NixString) -> Self {
-        s.to_string()
-    }
-}
+// No impl From<NixString> for String, that one quotes.
 
 impl From<NixString> for BString {
     fn from(s: NixString) -> Self {
diff --git a/tvix/eval/src/vm/mod.rs b/tvix/eval/src/vm/mod.rs
index c10b79cd99..5c244cc3ca 100644
--- a/tvix/eval/src/vm/mod.rs
+++ b/tvix/eval/src/vm/mod.rs
@@ -1148,7 +1148,7 @@ where
                     let mut captured_with_stack = frame
                         .upvalues
                         .with_stack()
-                        .map(Clone::clone)
+                        .cloned()
                         // ... or make an empty one if there isn't one already.
                         .unwrap_or_else(|| Vec::with_capacity(self.with_stack.len()));
 
diff --git a/tvix/eval/tests/nix_oracle.rs b/tvix/eval/tests/nix_oracle.rs
index 6bab75cfd9..5a5cc0a822 100644
--- a/tvix/eval/tests/nix_oracle.rs
+++ b/tvix/eval/tests/nix_oracle.rs
@@ -30,7 +30,14 @@ fn nix_eval(expr: &str, strictness: Strictness) -> String {
         .arg(format!("({expr})"))
         .env(
             "NIX_REMOTE",
-            format!("local?root={}", store_dir.path().display()),
+            format!(
+                "local?root={}",
+                store_dir
+                    .path()
+                    .canonicalize()
+                    .expect("valid path")
+                    .display()
+            ),
         )
         .output()
         .unwrap();
diff --git a/tvix/glue/Cargo.toml b/tvix/glue/Cargo.toml
index eab07b7eb4..0afdefeaaa 100644
--- a/tvix/glue/Cargo.toml
+++ b/tvix/glue/Cargo.toml
@@ -4,7 +4,7 @@ version = "0.1.0"
 edition = "2021"
 
 [dependencies]
-async-recursion = "1.0.5"
+async-compression = { version = "0.4.9", features = ["tokio", "gzip", "bzip2", "xz"]}
 bstr = "1.6.0"
 bytes = "1.4.0"
 data-encoding = "2.3.3"
@@ -25,12 +25,11 @@ thiserror = "1.0.38"
 serde = "1.0.195"
 serde_json = "1.0"
 sha2 = "0.10.8"
+sha1 = "0.10.6"
+md-5 = "0.10.6"
+url = "2.4.0"
 walkdir = "2.4.0"
 
-[dependencies.async-compression]
-version = "0.4.6"
-features = ["tokio", "gzip", "bzip2", "xz"]
-
 [dependencies.wu-manber]
 git = "https://github.com/tvlfyi/wu-manber.git"
 
@@ -42,7 +41,6 @@ nix = { version = "0.27.1", features = [ "fs" ] }
 pretty_assertions = "1.4.0"
 rstest = "0.19.0"
 tempfile = "3.8.1"
-test-case = "3.3.1"
 
 [features]
 default = ["nix_tests"]
diff --git a/tvix/glue/benches/eval.rs b/tvix/glue/benches/eval.rs
index dfb4fabe44..202278c1aa 100644
--- a/tvix/glue/benches/eval.rs
+++ b/tvix/glue/benches/eval.rs
@@ -2,10 +2,6 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion};
 use lazy_static::lazy_static;
 use std::{env, rc::Rc, sync::Arc, time::Duration};
 use tvix_build::buildservice::DummyBuildService;
-use tvix_castore::{
-    blobservice::{BlobService, MemoryBlobService},
-    directoryservice::{DirectoryService, MemoryDirectoryService},
-};
 use tvix_eval::{builtins::impure_builtins, EvalIO};
 use tvix_glue::{
     builtins::{add_derivation_builtins, add_fetcher_builtins, add_import_builtins},
@@ -13,16 +9,9 @@ use tvix_glue::{
     tvix_io::TvixIO,
     tvix_store_io::TvixStoreIO,
 };
-use tvix_store::pathinfoservice::{MemoryPathInfoService, PathInfoService};
+use tvix_store::utils::construct_services;
 
 lazy_static! {
-    static ref BLOB_SERVICE: Arc<dyn BlobService> = Arc::new(MemoryBlobService::default());
-    static ref DIRECTORY_SERVICE: Arc<dyn DirectoryService> =
-        Arc::new(MemoryDirectoryService::default());
-    static ref PATH_INFO_SERVICE: Arc<dyn PathInfoService> = Arc::new(MemoryPathInfoService::new(
-        BLOB_SERVICE.clone(),
-        DIRECTORY_SERVICE.clone(),
-    ));
     static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Runtime::new().unwrap();
 }
 
@@ -30,12 +19,17 @@ fn interpret(code: &str) {
     // TODO: this is a bit annoying.
     // It'd be nice if we could set this up once and then run evaluate() with a
     // piece of code. b/262
+    let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+        TOKIO_RUNTIME
+            .block_on(async { construct_services("memory://", "memory://", "memory://").await })
+            .unwrap();
 
     // We assemble a complete store in memory.
     let tvix_store_io = Rc::new(TvixStoreIO::new(
-        BLOB_SERVICE.clone(),
-        DIRECTORY_SERVICE.clone(),
-        PATH_INFO_SERVICE.clone(),
+        blob_service,
+        directory_service,
+        path_info_service.into(),
+        nar_calculation_service.into(),
         Arc::<DummyBuildService>::default(),
         TOKIO_RUNTIME.handle().clone(),
     ));
diff --git a/tvix/glue/src/builtins/derivation.rs b/tvix/glue/src/builtins/derivation.rs
index 1a8d18943e..a7742ae40a 100644
--- a/tvix/glue/src/builtins/derivation.rs
+++ b/tvix/glue/src/builtins/derivation.rs
@@ -457,55 +457,59 @@ pub(crate) mod derivation_builtins {
         drv.validate(false)
             .map_err(DerivationError::InvalidDerivation)?;
 
-        // Calculate the derivation_or_fod_hash for the current derivation.
-        // This one is still intermediate (so not added to known_paths)
-        let derivation_or_fod_hash_tmp = drv.derivation_or_fod_hash(|drv_path| {
-            known_paths
-                .get_hash_derivation_modulo(&drv_path.to_owned())
-                .unwrap_or_else(|| panic!("{} not found", drv_path))
-                .to_owned()
-        });
+        // Calculate the hash_derivation_modulo for the current derivation..
+        debug_assert!(
+            drv.outputs.values().all(|output| { output.path.is_none() }),
+            "outputs should still be unset"
+        );
 
         // Mutate the Derivation struct and set output paths
-        drv.calculate_output_paths(name, &derivation_or_fod_hash_tmp)
-            .map_err(DerivationError::InvalidDerivation)?;
+        drv.calculate_output_paths(
+            name,
+            // This one is still intermediate (so not added to known_paths),
+            // as the outputs are still unset.
+            &drv.hash_derivation_modulo(|drv_path| {
+                *known_paths
+                    .get_hash_derivation_modulo(&drv_path.to_owned())
+                    .unwrap_or_else(|| panic!("{} not found", drv_path))
+            }),
+        )
+        .map_err(DerivationError::InvalidDerivation)?;
 
         let drv_path = drv
             .calculate_derivation_path(name)
             .map_err(DerivationError::InvalidDerivation)?;
 
-        // TODO: avoid cloning
-        known_paths.add(drv_path.clone(), drv.clone());
-
-        let mut new_attrs: Vec<(String, NixString)> = drv
-            .outputs
-            .into_iter()
-            .map(|(name, output)| {
-                (
-                    name.clone(),
+        // Assemble the attrset to return from this builtin.
+        let out = Value::Attrs(Box::new(NixAttrs::from_iter(
+            drv.outputs
+                .iter()
+                .map(|(name, output)| {
+                    (
+                        name.clone(),
+                        NixString::new_context_from(
+                            NixContextElement::Single {
+                                name: name.clone(),
+                                derivation: drv_path.to_absolute_path(),
+                            }
+                            .into(),
+                            output.path.as_ref().unwrap().to_absolute_path(),
+                        ),
+                    )
+                })
+                .chain(std::iter::once((
+                    "drvPath".to_owned(),
                     NixString::new_context_from(
-                        NixContextElement::Single {
-                            name,
-                            derivation: drv_path.to_absolute_path(),
-                        }
-                        .into(),
-                        output.path.unwrap().to_absolute_path(),
+                        NixContextElement::Derivation(drv_path.to_absolute_path()).into(),
+                        drv_path.to_absolute_path(),
                     ),
-                )
-            })
-            .collect();
-
-        new_attrs.push((
-            "drvPath".to_string(),
-            NixString::new_context_from(
-                NixContextElement::Derivation(drv_path.to_absolute_path()).into(),
-                drv_path.to_absolute_path(),
-            ),
-        ));
-
-        Ok(Value::Attrs(Box::new(NixAttrs::from_iter(
-            new_attrs.into_iter(),
-        ))))
+                ))),
+        )));
+
+        // Register the Derivation in known_paths.
+        known_paths.add_derivation(drv_path, drv);
+
+        Ok(out)
     }
 
     #[builtin("toFile")]
diff --git a/tvix/glue/src/builtins/errors.rs b/tvix/glue/src/builtins/errors.rs
index 53351cf902..f6d5745c56 100644
--- a/tvix/glue/src/builtins/errors.rs
+++ b/tvix/glue/src/builtins/errors.rs
@@ -3,8 +3,10 @@ use nix_compat::{
     nixhash::{self, NixHash},
     store_path::BuildStorePathError,
 };
+use reqwest::Url;
 use std::rc::Rc;
 use thiserror::Error;
+use tvix_castore::import;
 
 /// Errors related to derivation construction
 #[derive(Debug, Error)]
@@ -33,7 +35,7 @@ impl From<DerivationError> for tvix_eval::ErrorKind {
 pub enum FetcherError {
     #[error("hash mismatch in file downloaded from {url}:\n  wanted: {wanted}\n     got: {got}")]
     HashMismatch {
-        url: String,
+        url: Url,
         wanted: NixHash,
         got: NixHash,
     },
@@ -41,17 +43,20 @@ pub enum FetcherError {
     #[error("Invalid hash type '{0}' for fetcher")]
     InvalidHashType(&'static str),
 
-    #[error("Error in store path for fetcher output: {0}")]
-    StorePath(#[from] BuildStorePathError),
+    #[error("Unable to parse URL: {0}")]
+    InvalidUrl(#[from] url::ParseError),
 
     #[error(transparent)]
     Http(#[from] reqwest::Error),
-}
 
-impl From<FetcherError> for tvix_eval::ErrorKind {
-    fn from(err: FetcherError) -> Self {
-        tvix_eval::ErrorKind::TvixError(Rc::new(err))
-    }
+    #[error(transparent)]
+    Io(#[from] std::io::Error),
+
+    #[error(transparent)]
+    Import(#[from] tvix_castore::import::IngestionError<import::archive::Error>),
+
+    #[error("Error calculating store path for fetcher output: {0}")]
+    StorePath(#[from] BuildStorePathError),
 }
 
 /// Errors related to `builtins.path` and `builtins.filterSource`,
diff --git a/tvix/glue/src/builtins/fetchers.rs b/tvix/glue/src/builtins/fetchers.rs
index cbb57532f6..c7602c03e8 100644
--- a/tvix/glue/src/builtins/fetchers.rs
+++ b/tvix/glue/src/builtins/fetchers.rs
@@ -1,196 +1,128 @@
-//! Contains builtins that fetch paths from the Internet
+//! Contains builtins that fetch paths from the Internet, or local filesystem.
 
-use crate::tvix_store_io::TvixStoreIO;
-use bstr::ByteSlice;
-use nix_compat::nixhash::{self, CAHash};
-use nix_compat::store_path::{build_ca_path, StorePathRef};
+use super::utils::select_string;
+use crate::{
+    fetchers::{url_basename, Fetch},
+    tvix_store_io::TvixStoreIO,
+};
+use nix_compat::nixhash;
+use nix_compat::nixhash::NixHash;
 use std::rc::Rc;
+use tracing::info;
 use tvix_eval::builtin_macros::builtins;
+use tvix_eval::generators::Gen;
 use tvix_eval::generators::GenCo;
-use tvix_eval::{CatchableErrorKind, ErrorKind, NixContextElement, NixString, Value};
-
-use super::utils::select_string;
-use super::{DerivationError, FetcherError};
-
-/// Attempts to mimic `nix::libutil::baseNameOf`
-fn url_basename(s: &str) -> &str {
-    if s.is_empty() {
-        return "";
-    }
-
-    let mut last = s.len() - 1;
-    if s.chars().nth(last).unwrap() == '/' && last > 0 {
-        last -= 1;
-    }
-
-    if last == 0 {
-        return "";
-    }
+use tvix_eval::{CatchableErrorKind, ErrorKind, Value};
 
-    let pos = match s[..=last].rfind('/') {
-        Some(pos) => {
-            if pos == last - 1 {
-                0
-            } else {
-                pos
-            }
-        }
-        None => 0,
-    };
-
-    &s[(pos + 1)..=last]
-}
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-enum HashMode {
-    Flat,
-    Recursive,
+struct NixFetchArgs {
+    url_str: String,
+    name: Option<String>,
+    sha256: Option<[u8; 32]>,
 }
 
-/// Struct representing the arguments passed to fetcher functions
-#[derive(Debug, PartialEq, Eq)]
-struct FetchArgs {
-    url: String,
-    name: String,
-    hash: Option<CAHash>,
-}
-
-impl FetchArgs {
-    pub fn new(
-        url: String,
-        name: Option<String>,
-        sha256: Option<String>,
-        mode: HashMode,
-    ) -> nixhash::NixHashResult<Self> {
-        Ok(Self {
-            name: name.unwrap_or_else(|| url_basename(&url).to_owned()),
-            url,
-            hash: sha256
-                .map(|h| {
-                    let hash = nixhash::from_str(&h, Some("sha256"))?;
-                    Ok(match mode {
-                        HashMode::Flat => Some(nixhash::CAHash::Flat(hash)),
-                        HashMode::Recursive => Some(nixhash::CAHash::Nar(hash)),
-                    })
-                })
-                .transpose()?
-                .flatten(),
-        })
-    }
-
-    fn store_path(&self) -> Result<Option<StorePathRef>, ErrorKind> {
-        let Some(h) = &self.hash else {
-            return Ok(None);
-        };
-        build_ca_path(&self.name, h, Vec::<String>::new(), false)
-            .map(Some)
-            .map_err(|e| FetcherError::from(e).into())
+// `fetchurl` and `fetchTarball` accept a single argument, which can either be the URL (as string),
+// or an attrset, where `url`, `sha256` and `name` keys are allowed.
+async fn extract_fetch_args(
+    co: &GenCo,
+    args: Value,
+) -> Result<Result<NixFetchArgs, CatchableErrorKind>, ErrorKind> {
+    if let Ok(url_str) = args.to_str() {
+        // Get the raw bytes, not the ToString repr.
+        let url_str =
+            String::from_utf8(url_str.as_bytes().to_vec()).map_err(|_| ErrorKind::Utf8)?;
+        return Ok(Ok(NixFetchArgs {
+            url_str,
+            name: None,
+            sha256: None,
+        }));
     }
 
-    async fn extract(
-        co: &GenCo,
-        args: Value,
-        default_name: Option<&str>,
-        mode: HashMode,
-    ) -> Result<Result<Self, CatchableErrorKind>, ErrorKind> {
-        if let Ok(url) = args.to_str() {
-            return Ok(Ok(FetchArgs::new(
-                url.to_str()?.to_owned(),
-                None,
-                None,
-                mode,
-            )
-            .map_err(DerivationError::InvalidOutputHash)?));
-        }
+    let attrs = args.to_attrs().map_err(|_| ErrorKind::TypeError {
+        expected: "attribute set or contextless string",
+        actual: args.type_of(),
+    })?;
 
-        let attrs = args.to_attrs().map_err(|_| ErrorKind::TypeError {
-            expected: "attribute set or string",
-            actual: args.type_of(),
-        })?;
-
-        let url = match select_string(co, &attrs, "url").await? {
-            Ok(s) => s.ok_or_else(|| ErrorKind::AttributeNotFound { name: "url".into() })?,
-            Err(cek) => return Ok(Err(cek)),
-        };
-        let name = match select_string(co, &attrs, "name").await? {
-            Ok(s) => s.or_else(|| default_name.map(|s| s.to_owned())),
-            Err(cek) => return Ok(Err(cek)),
-        };
-        let sha256 = match select_string(co, &attrs, "sha256").await? {
-            Ok(s) => s,
-            Err(cek) => return Ok(Err(cek)),
-        };
-
-        Ok(Ok(
-            FetchArgs::new(url, name, sha256, mode).map_err(DerivationError::InvalidOutputHash)?
-        ))
-    }
-}
+    let url_str = match select_string(co, &attrs, "url").await? {
+        Ok(s) => s.ok_or_else(|| ErrorKind::AttributeNotFound { name: "url".into() })?,
+        Err(cek) => return Ok(Err(cek)),
+    };
+    let name = match select_string(co, &attrs, "name").await? {
+        Ok(s) => s,
+        Err(cek) => return Ok(Err(cek)),
+    };
+    let sha256_str = match select_string(co, &attrs, "sha256").await? {
+        Ok(s) => s,
+        Err(cek) => return Ok(Err(cek)),
+    };
 
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-enum FetchMode {
-    Url,
-    Tarball,
-}
+    // TODO: disallow other attrset keys, to match Nix' behaviour.
 
-impl From<FetchMode> for HashMode {
-    fn from(value: FetchMode) -> Self {
-        match value {
-            FetchMode::Url => HashMode::Flat,
-            FetchMode::Tarball => HashMode::Recursive,
-        }
-    }
-}
+    // parse the sha256 string into a digest.
+    let sha256 = match sha256_str {
+        Some(sha256_str) => {
+            let nixhash = nixhash::from_str(&sha256_str, Some("sha256"))
+                // TODO: DerivationError::InvalidOutputHash should be moved to ErrorKind::InvalidHash and used here instead
+                .map_err(|e| ErrorKind::TvixError(Rc::new(e)))?;
 
-impl FetchMode {
-    fn default_name(self) -> Option<&'static str> {
-        match self {
-            FetchMode::Url => None,
-            FetchMode::Tarball => Some("source"),
+            Some(nixhash.digest_as_bytes().try_into().expect("is sha256"))
         }
-    }
-}
-
-fn string_from_store_path(store_path: StorePathRef) -> NixString {
-    NixString::new_context_from(
-        NixContextElement::Plain(store_path.to_absolute_path()).into(),
-        store_path.to_absolute_path(),
-    )
-}
-
-async fn fetch(
-    state: Rc<TvixStoreIO>,
-    co: GenCo,
-    args: Value,
-    mode: FetchMode,
-) -> Result<Value, ErrorKind> {
-    let args = match FetchArgs::extract(&co, args, mode.default_name(), mode.into()).await? {
-        Ok(args) => args,
-        Err(cek) => return Ok(cek.into()),
+        None => None,
     };
 
-    if let Some(store_path) = args.store_path()? {
-        if state.store_path_exists(store_path).await? {
-            return Ok(string_from_store_path(store_path).into());
-        }
-    }
-
-    let hash = args.hash.as_ref().map(|h| h.hash());
-    let store_path = Rc::clone(&state).tokio_handle.block_on(state.fetch_url(
-        &args.url,
-        &args.name,
-        hash.as_deref(),
-    ))?;
-
-    Ok(string_from_store_path(store_path.as_ref()).into())
+    Ok(Ok(NixFetchArgs {
+        url_str,
+        name,
+        sha256,
+    }))
 }
 
 #[allow(unused_variables)] // for the `state` arg, for now
 #[builtins(state = "Rc<TvixStoreIO>")]
 pub(crate) mod fetcher_builtins {
+    use crate::builtins::FetcherError;
+    use url::Url;
+
     use super::*;
 
-    use tvix_eval::generators::Gen;
+    /// Consumes a fetch.
+    /// If there is enough info to calculate the store path without fetching,
+    /// queue the fetch to be fetched lazily, and return the store path.
+    /// If there's not enough info to calculate it, do the fetch now, and then
+    /// return the store path.
+    fn fetch_lazy(state: Rc<TvixStoreIO>, name: String, fetch: Fetch) -> Result<Value, ErrorKind> {
+        match fetch
+            .store_path(&name)
+            .map_err(|e| ErrorKind::TvixError(Rc::new(e)))?
+        {
+            Some(store_path) => {
+                // Move the fetch to KnownPaths, so it can be actually fetched later.
+                let sp = state
+                    .known_paths
+                    .borrow_mut()
+                    .add_fetch(fetch, &name)
+                    .expect("Tvix bug: should only fail if the store path cannot be calculated");
+
+                debug_assert_eq!(
+                    sp, store_path,
+                    "calculated store path by KnownPaths should match"
+                );
+
+                // Emit the calculated Store Path.
+                Ok(Value::Path(Box::new(store_path.to_absolute_path().into())))
+            }
+            None => {
+                // If we don't have enough info, do the fetch now.
+                info!(?fetch, "triggering required fetch");
+
+                let (store_path, _root_node) = state
+                    .tokio_handle
+                    .block_on(async { state.fetcher.ingest_and_persist(&name, fetch).await })
+                    .map_err(|e| ErrorKind::TvixError(Rc::new(e)))?;
+
+                Ok(Value::Path(Box::new(store_path.to_absolute_path().into())))
+            }
+        }
+    }
 
     #[builtin("fetchurl")]
     async fn builtin_fetchurl(
@@ -198,7 +130,25 @@ pub(crate) mod fetcher_builtins {
         co: GenCo,
         args: Value,
     ) -> Result<Value, ErrorKind> {
-        fetch(state, co, args, FetchMode::Url).await
+        let args = match extract_fetch_args(&co, args).await? {
+            Ok(args) => args,
+            Err(cek) => return Ok(Value::from(cek)),
+        };
+
+        // Derive the name from the URL basename if not set explicitly.
+        let name = args
+            .name
+            .unwrap_or_else(|| url_basename(&args.url_str).to_owned());
+
+        // Parse the URL.
+        let url = Url::parse(&args.url_str)
+            .map_err(|e| ErrorKind::TvixError(Rc::new(FetcherError::InvalidUrl(e))))?;
+
+        fetch_lazy(
+            state,
+            name,
+            Fetch::URL(url, args.sha256.map(NixHash::Sha256)),
+        )
     }
 
     #[builtin("fetchTarball")]
@@ -207,7 +157,22 @@ pub(crate) mod fetcher_builtins {
         co: GenCo,
         args: Value,
     ) -> Result<Value, ErrorKind> {
-        fetch(state, co, args, FetchMode::Tarball).await
+        let args = match extract_fetch_args(&co, args).await? {
+            Ok(args) => args,
+            Err(cek) => return Ok(Value::from(cek)),
+        };
+
+        // Name defaults to "source" if not set explicitly.
+        const DEFAULT_NAME_FETCH_TARBALL: &str = "source";
+        let name = args
+            .name
+            .unwrap_or_else(|| DEFAULT_NAME_FETCH_TARBALL.to_owned());
+
+        // Parse the URL.
+        let url = Url::parse(&args.url_str)
+            .map_err(|e| ErrorKind::TvixError(Rc::new(FetcherError::InvalidUrl(e))))?;
+
+        fetch_lazy(state, name, Fetch::Tarball(url, args.sha256))
     }
 
     #[builtin("fetchGit")]
@@ -219,71 +184,3 @@ pub(crate) mod fetcher_builtins {
         Err(ErrorKind::NotImplemented("fetchGit"))
     }
 }
-
-#[cfg(test)]
-mod tests {
-    use std::str::FromStr;
-
-    use nix_compat::store_path::StorePath;
-
-    use super::*;
-
-    #[test]
-    fn fetchurl_store_path() {
-        let url = "https://raw.githubusercontent.com/aaptel/notmuch-extract-patch/f732a53e12a7c91a06755ebfab2007adc9b3063b/notmuch-extract-patch";
-        let sha256 = "0nawkl04sj7psw6ikzay7kydj3dhd0fkwghcsf5rzaw4bmp4kbax";
-        let args = FetchArgs::new(url.into(), None, Some(sha256.into()), HashMode::Flat).unwrap();
-
-        assert_eq!(
-            args.store_path().unwrap().unwrap().to_owned(),
-            StorePath::from_str("06qi00hylriyfm0nl827crgjvbax84mz-notmuch-extract-patch").unwrap()
-        )
-    }
-
-    #[test]
-    fn fetch_tarball_store_path() {
-        let url = "https://github.com/NixOS/nixpkgs/archive/91050ea1e57e50388fa87a3302ba12d188ef723a.tar.gz";
-        let sha256 = "1hf6cgaci1n186kkkjq106ryf8mmlq9vnwgfwh625wa8hfgdn4dm";
-        let args = FetchArgs::new(
-            url.into(),
-            Some("source".into()),
-            Some(sha256.into()),
-            HashMode::Recursive,
-        )
-        .unwrap();
-
-        assert_eq!(
-            args.store_path().unwrap().unwrap().to_owned(),
-            StorePath::from_str("7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source").unwrap()
-        )
-    }
-
-    mod url_basename {
-        use super::*;
-
-        #[test]
-        fn empty_path() {
-            assert_eq!(url_basename(""), "");
-        }
-
-        #[test]
-        fn path_on_root() {
-            assert_eq!(url_basename("/dir"), "dir");
-        }
-
-        #[test]
-        fn relative_path() {
-            assert_eq!(url_basename("dir/foo"), "foo");
-        }
-
-        #[test]
-        fn root_with_trailing_slash() {
-            assert_eq!(url_basename("/"), "");
-        }
-
-        #[test]
-        fn trailing_slash() {
-            assert_eq!(url_basename("/dir/"), "dir");
-        }
-    }
-}
diff --git a/tvix/glue/src/builtins/import.rs b/tvix/glue/src/builtins/import.rs
index 800f8ddc17..4a15afa814 100644
--- a/tvix/glue/src/builtins/import.rs
+++ b/tvix/glue/src/builtins/import.rs
@@ -1,9 +1,8 @@
 //! Implements builtins used to import paths in the store.
 
 use crate::builtins::errors::ImportError;
-use futures::pin_mut;
 use std::path::Path;
-use tvix_castore::import::leveled_entries_to_stream;
+use tvix_castore::import::ingest_entries;
 use tvix_eval::{
     builtin_macros::builtins,
     generators::{self, GenCo},
@@ -18,17 +17,15 @@ async fn filtered_ingest(
     path: &Path,
     filter: Option<&Value>,
 ) -> Result<tvix_castore::proto::node::Node, ErrorKind> {
-    // produce the leveled-key vector of DirEntry.
-    let mut entries_per_depths: Vec<Vec<walkdir::DirEntry>> = vec![Vec::new()];
+    let mut entries: Vec<walkdir::DirEntry> = vec![];
     let mut it = walkdir::WalkDir::new(path)
         .follow_links(false)
         .follow_root_links(false)
         .contents_first(false)
-        .sort_by_file_name()
         .into_iter();
 
     // Skip root node.
-    entries_per_depths[0].push(
+    entries.push(
         it.next()
             .ok_or_else(|| ErrorKind::IO {
                 path: Some(path.to_path_buf()),
@@ -85,32 +82,22 @@ async fn filtered_ingest(
             continue;
         }
 
-        if entry.depth() >= entries_per_depths.len() {
-            debug_assert!(
-                entry.depth() == entries_per_depths.len(),
-                "Received unexpected entry with depth {} during descent, previously at {}",
-                entry.depth(),
-                entries_per_depths.len()
-            );
-
-            entries_per_depths.push(vec![entry]);
-        } else {
-            entries_per_depths[entry.depth()].push(entry);
-        }
-
-        // FUTUREWORK: determine when it's the right moment to flush a level to the ingester.
+        entries.push(entry);
     }
 
-    let direntry_stream = leveled_entries_to_stream(entries_per_depths);
-    pin_mut!(direntry_stream);
+    let dir_entries = entries.into_iter().rev().map(Ok);
 
     state.tokio_handle.block_on(async {
-        state
-            .ingest_entries(direntry_stream)
+        let entries = tvix_castore::import::fs::dir_entries_to_ingestion_stream(
+            &state.blob_service,
+            dir_entries,
+            path,
+        );
+        ingest_entries(&state.directory_service, entries)
             .await
-            .map_err(|err| ErrorKind::IO {
+            .map_err(|e| ErrorKind::IO {
                 path: Some(path.to_path_buf()),
-                error: err.into(),
+                error: Rc::new(std::io::Error::new(std::io::ErrorKind::Other, e)),
             })
     })
 }
@@ -191,7 +178,7 @@ mod import_builtins {
             CAHash::Nar(NixHash::Sha256(state.tokio_handle.block_on(async {
                 Ok::<_, tvix_eval::ErrorKind>(
                     state
-                        .path_info_service
+                        .nar_calculation_service
                         .as_ref()
                         .calculate_nar(&root_node)
                         .await
@@ -223,7 +210,7 @@ mod import_builtins {
         };
 
         let obtained_hash = ca.hash().clone().into_owned();
-        let (path_info, output_path) = state.tokio_handle.block_on(async {
+        let (path_info, _hash, output_path) = state.tokio_handle.block_on(async {
             state
                 .node_to_path_info(name.as_ref(), path.as_ref(), ca, root_node)
                 .await
@@ -268,7 +255,7 @@ mod import_builtins {
             .tokio_handle
             .block_on(async {
                 let (_, nar_sha256) = state
-                    .path_info_service
+                    .nar_calculation_service
                     .as_ref()
                     .calculate_nar(&root_node)
                     .await?;
diff --git a/tvix/glue/src/builtins/mod.rs b/tvix/glue/src/builtins/mod.rs
index fec309cbf0..3d6263286d 100644
--- a/tvix/glue/src/builtins/mod.rs
+++ b/tvix/glue/src/builtins/mod.rs
@@ -56,8 +56,8 @@ mod tests {
 
     use super::{add_derivation_builtins, add_fetcher_builtins, add_import_builtins};
     use nix_compat::store_path::hash_placeholder;
+    use rstest::rstest;
     use tempfile::TempDir;
-    use test_case::test_case;
     use tvix_build::buildservice::DummyBuildService;
     use tvix_eval::{EvalIO, EvaluationResult};
     use tvix_store::utils::construct_services;
@@ -68,7 +68,7 @@ mod tests {
     fn eval(str: &str) -> EvaluationResult {
         // We assemble a complete store in memory.
         let runtime = tokio::runtime::Runtime::new().expect("Failed to build a Tokio runtime");
-        let (blob_service, directory_service, path_info_service) = runtime
+        let (blob_service, directory_service, path_info_service, nar_calculation_service) = runtime
             .block_on(async { construct_services("memory://", "memory://", "memory://").await })
             .expect("Failed to construct store services in memory");
 
@@ -76,6 +76,7 @@ mod tests {
             blob_service,
             directory_service,
             path_info_service.into(),
+            nar_calculation_service.into(),
             Arc::<DummyBuildService>::default(),
             runtime.handle().clone(),
         ));
@@ -119,26 +120,27 @@ mod tests {
 
     /// construct some calls to builtins.derivation and compare produced output
     /// paths.
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo"; "r:sha256")]
-    #[test_case(r#"(builtins.derivation { name = "foo2"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/gi0p8vd635vpk1nq029cz3aa3jkhar5k-foo2"; "r:sha256 other name")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha1"; outputHash = "sha1-VUCRC+16gU5lcrLYHlPSUyx0Y/Q="; }).outPath"#, "/nix/store/p5sammmhpa84ama7ymkbgwwzrilva24x-foo"; "r:sha1")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "md5"; outputHash = "md5-07BzhNET7exJ6qYjitX/AA=="; }).outPath"#, "/nix/store/gmmxgpy1jrzs86r5y05wy6wiy2m15xgi-foo"; "r:md5")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha512"; outputHash = "sha512-DPkYCnZKuoY6Z7bXLwkYvBMcZ3JkLLLc5aNPCnAvlHDdwr8SXBIZixmVwjPDS0r9NGxUojNMNQqUilG26LTmtg=="; }).outPath"#, "/nix/store/lfi2bfyyap88y45mfdwi4j99gkaxaj19-foo"; "r:sha512")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "4374173a8cbe88de152b609f96f46e958bcf65762017474eec5a05ec2bd61530"; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo"; "r:sha256 base16")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "0c0msqmyq1asxi74f5r0frjwz2wmdvs9d7v05caxx25yihx1fx23"; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo"; "r:sha256 nixbase32")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo"; "r:sha256 base64")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-fgIr3TyFGDAXP5+qoAaiMKDg/a1MlT6Fv/S/DaA24S8="; }).outPath"#, "/nix/store/xm1l9dx4zgycv9qdhcqqvji1z88z534b-foo"; "r:sha256 base64 nopad")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/q4pkwkxdib797fhk22p0k3g1q32jmxvf-foo"; "sha256")]
-    #[test_case(r#"(builtins.derivation { name = "foo2"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/znw17xlmx9r6gw8izjkqxkl6s28sza4l-foo2"; "sha256 other name")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha1"; outputHash = "sha1-VUCRC+16gU5lcrLYHlPSUyx0Y/Q="; }).outPath"#, "/nix/store/zgpnjjmga53d8srp8chh3m9fn7nnbdv6-foo"; "sha1")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "md5"; outputHash = "md5-07BzhNET7exJ6qYjitX/AA=="; }).outPath"#, "/nix/store/jfhcwnq1852ccy9ad9nakybp2wadngnd-foo"; "md5")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha512"; outputHash = "sha512-DPkYCnZKuoY6Z7bXLwkYvBMcZ3JkLLLc5aNPCnAvlHDdwr8SXBIZixmVwjPDS0r9NGxUojNMNQqUilG26LTmtg=="; }).outPath"#, "/nix/store/as736rr116ian9qzg457f96j52ki8bm3-foo"; "sha512")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo"; "r:sha256 outputHashAlgo omitted")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/q4pkwkxdib797fhk22p0k3g1q32jmxvf-foo"; "r:sha256 outputHashAlgo and outputHashMode omitted")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; }).outPath"#, "/nix/store/xpcvxsx5sw4rbq666blz6sxqlmsqphmr-foo"; "outputHash* omitted")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; outputs = ["foo" "bar"]; system = "x86_64-linux"; }).outPath"#, "/nix/store/hkwdinvz2jpzgnjy9lv34d2zxvclj4s3-foo-foo"; "multiple outputs")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; args = ["--foo" "42" "--bar"]; system = "x86_64-linux"; }).outPath"#, "/nix/store/365gi78n2z7vwc1bvgb98k0a9cqfp6as-foo"; "args")]
-    #[test_case(r#"
+    #[rstest]
+    #[case::r_sha256(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo")]
+    #[case::r_sha256_other_name(r#"(builtins.derivation { name = "foo2"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/gi0p8vd635vpk1nq029cz3aa3jkhar5k-foo2")]
+    #[case::r_sha1(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha1"; outputHash = "sha1-VUCRC+16gU5lcrLYHlPSUyx0Y/Q="; }).outPath"#, "/nix/store/p5sammmhpa84ama7ymkbgwwzrilva24x-foo")]
+    #[case::r_md5(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "md5"; outputHash = "md5-07BzhNET7exJ6qYjitX/AA=="; }).outPath"#, "/nix/store/gmmxgpy1jrzs86r5y05wy6wiy2m15xgi-foo")]
+    #[case::r_sha512(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha512"; outputHash = "sha512-DPkYCnZKuoY6Z7bXLwkYvBMcZ3JkLLLc5aNPCnAvlHDdwr8SXBIZixmVwjPDS0r9NGxUojNMNQqUilG26LTmtg=="; }).outPath"#, "/nix/store/lfi2bfyyap88y45mfdwi4j99gkaxaj19-foo")]
+    #[case::r_sha256_base16(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "4374173a8cbe88de152b609f96f46e958bcf65762017474eec5a05ec2bd61530"; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo")]
+    #[case::r_sha256_nixbase32(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "0c0msqmyq1asxi74f5r0frjwz2wmdvs9d7v05caxx25yihx1fx23"; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo")]
+    #[case::r_sha256_base64(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo")]
+    #[case::r_sha256_base64_nopad(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-fgIr3TyFGDAXP5+qoAaiMKDg/a1MlT6Fv/S/DaA24S8="; }).outPath"#, "/nix/store/xm1l9dx4zgycv9qdhcqqvji1z88z534b-foo")]
+    #[case::sha256(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/q4pkwkxdib797fhk22p0k3g1q32jmxvf-foo")]
+    #[case::sha256_other_name(r#"(builtins.derivation { name = "foo2"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha256"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/znw17xlmx9r6gw8izjkqxkl6s28sza4l-foo2")]
+    #[case::sha1(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha1"; outputHash = "sha1-VUCRC+16gU5lcrLYHlPSUyx0Y/Q="; }).outPath"#, "/nix/store/zgpnjjmga53d8srp8chh3m9fn7nnbdv6-foo")]
+    #[case::md5(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "md5"; outputHash = "md5-07BzhNET7exJ6qYjitX/AA=="; }).outPath"#, "/nix/store/jfhcwnq1852ccy9ad9nakybp2wadngnd-foo")]
+    #[case::sha512(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "flat"; outputHashAlgo = "sha512"; outputHash = "sha512-DPkYCnZKuoY6Z7bXLwkYvBMcZ3JkLLLc5aNPCnAvlHDdwr8SXBIZixmVwjPDS0r9NGxUojNMNQqUilG26LTmtg=="; }).outPath"#, "/nix/store/as736rr116ian9qzg457f96j52ki8bm3-foo")]
+    #[case::r_sha256_outputhashalgo_omitted(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/17wgs52s7kcamcyin4ja58njkf91ipq8-foo")]
+    #[case::r_sha256_outputhashalgo_and_outputhashmode_omitted(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#, "/nix/store/q4pkwkxdib797fhk22p0k3g1q32jmxvf-foo")]
+    #[case::outputhash_omitted(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; }).outPath"#, "/nix/store/xpcvxsx5sw4rbq666blz6sxqlmsqphmr-foo")]
+    #[case::multiple_outputs(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; outputs = ["foo" "bar"]; system = "x86_64-linux"; }).outPath"#, "/nix/store/hkwdinvz2jpzgnjy9lv34d2zxvclj4s3-foo-foo")]
+    #[case::args(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; args = ["--foo" "42" "--bar"]; system = "x86_64-linux"; }).outPath"#, "/nix/store/365gi78n2z7vwc1bvgb98k0a9cqfp6as-foo")]
+    #[case::full(r#"
                    let
                      bar = builtins.derivation {
                        name = "bar";
@@ -155,34 +157,34 @@ mod tests {
                      system = ":";
                      inherit bar;
                    }).outPath
-        "#, "/nix/store/5vyvcwah9l9kf07d52rcgdk70g2f4y13-foo"; "full")]
-    #[test_case(r#"(builtins.derivation { "name" = "foo"; passAsFile = ["bar"]; bar = "baz"; system = ":"; builder = ":";}).outPath"#, "/nix/store/25gf0r1ikgmh4vchrn8qlc4fnqlsa5a1-foo"; "passAsFile")]
+        "#, "/nix/store/5vyvcwah9l9kf07d52rcgdk70g2f4y13-foo")]
+    #[case::pass_as_file(r#"(builtins.derivation { "name" = "foo"; passAsFile = ["bar"]; bar = "baz"; system = ":"; builder = ":";}).outPath"#, "/nix/store/25gf0r1ikgmh4vchrn8qlc4fnqlsa5a1-foo")]
     // __ignoreNulls = true, but nothing set to null
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; }).drvPath"#, "/nix/store/xa96w6d7fxrlkk60z1fmx2ffp2wzmbqx-foo.drv"; "ignoreNulls no arg drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; }).outPath"#, "/nix/store/pk2agn9za8r9bxsflgh1y7fyyrmwcqkn-foo"; "ignoreNulls no arg outPath")]
+    #[case::ignore_nulls_true_no_arg_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; }).drvPath"#, "/nix/store/xa96w6d7fxrlkk60z1fmx2ffp2wzmbqx-foo.drv")]
+    #[case::ignore_nulls_true_no_arg_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; }).outPath"#, "/nix/store/pk2agn9za8r9bxsflgh1y7fyyrmwcqkn-foo")]
     // __ignoreNulls = true, with a null arg, same paths
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; ignoreme = null; }).drvPath"#, "/nix/store/xa96w6d7fxrlkk60z1fmx2ffp2wzmbqx-foo.drv"; "ignoreNulls drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; ignoreme = null; }).outPath"#, "/nix/store/pk2agn9za8r9bxsflgh1y7fyyrmwcqkn-foo"; "ignoreNulls outPath")]
+    #[case::ignore_nulls_true_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; ignoreme = null; }).drvPath"#, "/nix/store/xa96w6d7fxrlkk60z1fmx2ffp2wzmbqx-foo.drv")]
+    #[case::ignore_nulls_true_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = true; ignoreme = null; }).outPath"#, "/nix/store/pk2agn9za8r9bxsflgh1y7fyyrmwcqkn-foo")]
     // __ignoreNulls = false
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; }).drvPath"#, "/nix/store/xa96w6d7fxrlkk60z1fmx2ffp2wzmbqx-foo.drv"; "ignoreNulls false no arg drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; }).outPath"#, "/nix/store/pk2agn9za8r9bxsflgh1y7fyyrmwcqkn-foo"; "ignoreNulls false no arg arg outPath")]
+    #[case::ignore_nulls_false_no_arg_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; }).drvPath"#, "/nix/store/xa96w6d7fxrlkk60z1fmx2ffp2wzmbqx-foo.drv")]
+    #[case::ignore_nulls_false_no_arg_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; }).outPath"#, "/nix/store/pk2agn9za8r9bxsflgh1y7fyyrmwcqkn-foo")]
     // __ignoreNulls = false, with a null arg
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; foo = null; }).drvPath"#, "/nix/store/xwkwbajfiyhdqmksrbzm0s4g4ib8d4ms-foo.drv"; "ignoreNulls false arg drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; foo = null; }).outPath"#, "/nix/store/2n2jqm6l7r2ahi19m58pl896ipx9cyx6-foo"; "ignoreNulls false arg arg outPath")]
+    #[case::ignore_nulls_fales_arg_path_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; foo = null; }).drvPath"#, "/nix/store/xwkwbajfiyhdqmksrbzm0s4g4ib8d4ms-foo.drv")]
+    #[case::ignore_nulls_fales_arg_path_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; foo = null; }).outPath"#, "/nix/store/2n2jqm6l7r2ahi19m58pl896ipx9cyx6-foo")]
     // structured attrs set to false will render an empty string inside env
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = false; foo = "bar"; }).drvPath"#, "/nix/store/qs39krwr2lsw6ac910vqx4pnk6m63333-foo.drv"; "structuredAttrs-false-drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = false; foo = "bar"; }).outPath"#, "/nix/store/9yy3764rdip3fbm8ckaw4j9y7vh4d231-foo"; "structuredAttrs-false-outPath")]
+    #[case::structured_attrs_false_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = false; foo = "bar"; }).drvPath"#, "/nix/store/qs39krwr2lsw6ac910vqx4pnk6m63333-foo.drv")]
+    #[case::structured_attrs_false_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = false; foo = "bar"; }).outPath"#, "/nix/store/9yy3764rdip3fbm8ckaw4j9y7vh4d231-foo")]
     // simple structured attrs
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; }).drvPath"#, "/nix/store/k6rlb4k10cb9iay283037ml1nv3xma2f-foo.drv"; "structuredAttrs-simple-drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; }).outPath"#, "/nix/store/6lmv3hyha1g4cb426iwjyifd7nrdv1xn-foo"; "structuredAttrs-simple-outPath")]
+    #[case::structured_attrs_simple_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; }).drvPath"#, "/nix/store/k6rlb4k10cb9iay283037ml1nv3xma2f-foo.drv")]
+    #[case::structured_attrs_simple_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; }).outPath"#, "/nix/store/6lmv3hyha1g4cb426iwjyifd7nrdv1xn-foo")]
     // structured attrs with outputsCheck
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; outputChecks = {out = {maxClosureSize = 256 * 1024 * 1024; disallowedRequisites = [ "dev" ];};}; }).drvPath"#, "/nix/store/fx9qzpchh5wchchhy39bwsml978d6wp1-foo.drv"; "structuredAttrs-outputChecks-drvPath")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; outputChecks = {out = {maxClosureSize = 256 * 1024 * 1024; disallowedRequisites = [ "dev" ];};}; }).outPath"#, "/nix/store/pcywah1nwym69rzqdvpp03sphfjgyw1l-foo"; "structuredAttrs-outputChecks-outPath")]
+    #[case::structured_attrs_output_checks_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; outputChecks = {out = {maxClosureSize = 256 * 1024 * 1024; disallowedRequisites = [ "dev" ];};}; }).drvPath"#, "/nix/store/fx9qzpchh5wchchhy39bwsml978d6wp1-foo.drv")]
+    #[case::structured_attrs_output_checks_outpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __structuredAttrs = true; foo = "bar"; outputChecks = {out = {maxClosureSize = 256 * 1024 * 1024; disallowedRequisites = [ "dev" ];};}; }).outPath"#, "/nix/store/pcywah1nwym69rzqdvpp03sphfjgyw1l-foo")]
     // structured attrs and __ignoreNulls. ignoreNulls is inactive (so foo ends up in __json, yet __ignoreNulls itself is not present.
-    #[test_case(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; foo = null; __structuredAttrs = true; }).drvPath"#, "/nix/store/rldskjdcwa3p7x5bqy3r217va1jsbjsc-foo.drv"; "structuredAttrs-and-ignore-nulls-drvPath")]
+    #[case::structured_attrs_and_ignore_nulls_drvpath(r#"(builtins.derivation { name = "foo"; system = ":"; builder = ":"; __ignoreNulls = false; foo = null; __structuredAttrs = true; }).drvPath"#, "/nix/store/rldskjdcwa3p7x5bqy3r217va1jsbjsc-foo.drv")]
     // structured attrs, setting outputs.
-    #[test_case(r#"(builtins.derivation { name = "test"; system = "aarch64-linux"; builder = "/bin/sh"; __structuredAttrs = true; outputs = [ "out"]; }).drvPath"#, "/nix/store/6sgawp30zibsh525p7c948xxd22y2ngy-test.drv"; "structuredAttrs-outputs-drvPath")]
-    fn test_outpath(code: &str, expected_path: &str) {
+    #[case::structured_attrs_outputs_drvpath(r#"(builtins.derivation { name = "test"; system = "aarch64-linux"; builder = "/bin/sh"; __structuredAttrs = true; outputs = [ "out"]; }).drvPath"#, "/nix/store/6sgawp30zibsh525p7c948xxd22y2ngy-test.drv")]
+    fn test_outpath(#[case] code: &str, #[case] expected_path: &str) {
         let value = eval(code).value.expect("must succeed");
 
         match value {
@@ -194,10 +196,11 @@ mod tests {
     }
 
     /// construct some calls to builtins.derivation that should be rejected
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-00"; }).outPath"#; "invalid outputhash")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha1"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#; "sha1 and sha256")]
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; outputs = ["foo" "foo"]; system = "x86_64-linux"; }).outPath"#; "duplicate output names")]
-    fn test_outpath_invalid(code: &str) {
+    #[rstest]
+    #[case::invalid_outputhash(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-00"; }).outPath"#)]
+    #[case::sha1_and_sha256(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha1"; outputHash = "sha256-Q3QXOoy+iN4VK2CflvRulYvPZXYgF0dO7FoF7CvWFTA="; }).outPath"#)]
+    #[case::duplicate_output_names(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; outputs = ["foo" "foo"]; system = "x86_64-linux"; }).outPath"#)]
+    fn test_outpath_invalid(#[case] code: &str) {
         let resp = eval(code);
         assert!(resp.value.is_none(), "Value should be None");
         assert!(
@@ -285,7 +288,8 @@ mod tests {
         }
     }
 
-    #[test_case(r#"
+    #[rstest]
+    #[case::input_in_args(r#"
                    let
                      bar = builtins.derivation {
                        name = "bar";
@@ -302,8 +306,8 @@ mod tests {
                      args = [ "${bar}" ];
                      system = ":";
                    }).drvPath
-        "#, "/nix/store/50yl2gmmljyl0lzyrp1mcyhn53vhjhkd-foo.drv"; "input in `args`")]
-    fn test_inputs_derivation_from_context(code: &str, expected_drvpath: &str) {
+        "#, "/nix/store/50yl2gmmljyl0lzyrp1mcyhn53vhjhkd-foo.drv")]
+    fn test_inputs_derivation_from_context(#[case] code: &str, #[case] expected_drvpath: &str) {
         let eval_result = eval(code);
 
         let value = eval_result.value.expect("must succeed");
@@ -331,8 +335,12 @@ mod tests {
     }
 
     /// constructs calls to builtins.derivation that should succeed, but produce warnings
-    #[test_case(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-fgIr3TyFGDAXP5+qoAaiMKDg/a1MlT6Fv/S/DaA24S8===="; }).outPath"#, "/nix/store/xm1l9dx4zgycv9qdhcqqvji1z88z534b-foo"; "r:sha256 wrong padding")]
-    fn builtins_derivation_hash_wrong_padding_warn(code: &str, expected_path: &str) {
+    #[rstest]
+    #[case::r_sha256_wrong_padding(r#"(builtins.derivation { name = "foo"; builder = "/bin/sh"; system = "x86_64-linux"; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "sha256-fgIr3TyFGDAXP5+qoAaiMKDg/a1MlT6Fv/S/DaA24S8===="; }).outPath"#, "/nix/store/xm1l9dx4zgycv9qdhcqqvji1z88z534b-foo")]
+    fn builtins_derivation_hash_wrong_padding_warn(
+        #[case] code: &str,
+        #[case] expected_path: &str,
+    ) {
         let eval_result = eval(code);
 
         let value = eval_result.value.expect("must succeed");
@@ -353,35 +361,117 @@ mod tests {
     /// Invokes `builtins.filterSource` on various carefully-crated subdirs, and
     /// ensures the resulting store paths matches what Nix produces.
     /// @fixtures is replaced to the fixtures directory.
+    #[rstest]
     #[cfg(target_family = "unix")]
-    #[test_case(r#"(builtins.filterSource (p: t: true) @fixtures)"#, "/nix/store/bqh6kd0x3vps2rzagzpl7qmbbgnx19cp-import_fixtures"; "complicated directory: filter nothing")]
-    #[test_case(r#"(builtins.filterSource (p: t: false) @fixtures)"#, "/nix/store/giq6czz24lpjg97xxcxk6rg950lcpib1-import_fixtures"; "complicated directory: filter everything")]
-    #[test_case(r#"(builtins.filterSource (p: t: t != "directory") @fixtures/a_dir)"#, "/nix/store/8vbqaxapywkvv1hacdja3pi075r14d43-a_dir"; "simple directory with one file: filter directories")]
-    #[test_case(r#"(builtins.filterSource (p: t: t != "regular") @fixtures/a_dir)"#, "/nix/store/zphlqc93s2iq4xm393l06hzf8hp85r4z-a_dir"; "simple directory with one file: filter files")]
-    #[test_case(r#"(builtins.filterSource (p: t: t != "symlink") @fixtures/a_dir)"#, "/nix/store/8vbqaxapywkvv1hacdja3pi075r14d43-a_dir"; "simple directory with one file: filter symlinks")]
-    #[test_case(r#"(builtins.filterSource (p: t: true) @fixtures/a_dir)"#, "/nix/store/8vbqaxapywkvv1hacdja3pi075r14d43-a_dir"; "simple directory with one file: filter nothing")]
-    #[test_case(r#"(builtins.filterSource (p: t: false) @fixtures/a_dir)"#, "/nix/store/zphlqc93s2iq4xm393l06hzf8hp85r4z-a_dir"; "simple directory with one file: filter everything")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "directory") @fixtures/b_dir"#, "/nix/store/xzsfzdgrxg93icaamjm8zq1jq6xvf2fz-b_dir"; "simple directory with one directory: filter directories")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "regular") @fixtures/b_dir"#, "/nix/store/8rjx64mm7173xp60rahv7cl3ixfkv3rf-b_dir"; "simple directory with one directory: filter files")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "symlink") @fixtures/b_dir"#, "/nix/store/8rjx64mm7173xp60rahv7cl3ixfkv3rf-b_dir"; "simple directory with one directory: filter symlinks")]
-    #[test_case(r#"builtins.filterSource (p: t: true) @fixtures/b_dir"#, "/nix/store/8rjx64mm7173xp60rahv7cl3ixfkv3rf-b_dir"; "simple directory with one directory: filter nothing")]
-    #[test_case(r#"builtins.filterSource (p: t: false) @fixtures/b_dir"#, "/nix/store/xzsfzdgrxg93icaamjm8zq1jq6xvf2fz-b_dir"; "simple directory with one directory: filter everything")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "directory") @fixtures/c_dir"#, "/nix/store/riigfmmzzrq65zqiffcjk5sbqr9c9h09-c_dir"; "simple directory with one symlink to a file: filter directory")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "regular") @fixtures/c_dir"#, "/nix/store/riigfmmzzrq65zqiffcjk5sbqr9c9h09-c_dir"; "simple directory with one symlink to a file: filter files")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "symlink") @fixtures/c_dir"#, "/nix/store/y5g1fz04vzjvf422q92qmv532axj5q26-c_dir"; "simple directory with one symlink to a file: filter symlinks")]
-    #[test_case(r#"builtins.filterSource (p: t: true) @fixtures/c_dir"#, "/nix/store/riigfmmzzrq65zqiffcjk5sbqr9c9h09-c_dir"; "simple directory with one symlink to a file: filter nothing")]
-    #[test_case(r#"builtins.filterSource (p: t: false) @fixtures/c_dir"#, "/nix/store/y5g1fz04vzjvf422q92qmv532axj5q26-c_dir"; "simple directory with one symlink to a file: filter everything")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "directory") @fixtures/d_dir"#, "/nix/store/f2d1aixwiqy4lbzrd040ala2s4m2z199-d_dir"; "simple directory with dangling symlink: filter directory")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "regular") @fixtures/d_dir"#, "/nix/store/f2d1aixwiqy4lbzrd040ala2s4m2z199-d_dir"; "simple directory with dangling symlink: filter file")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "symlink") @fixtures/d_dir"#, "/nix/store/7l371xax8kknhpska4wrmyll1mzlhzvl-d_dir"; "simple directory with dangling symlink: filter symlinks")]
-    #[test_case(r#"builtins.filterSource (p: t: true) @fixtures/d_dir"#, "/nix/store/f2d1aixwiqy4lbzrd040ala2s4m2z199-d_dir"; "simple directory with dangling symlink: filter nothing")]
-    #[test_case(r#"builtins.filterSource (p: t: false) @fixtures/d_dir"#, "/nix/store/7l371xax8kknhpska4wrmyll1mzlhzvl-d_dir"; "simple directory with dangling symlink: filter everything")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "directory") @fixtures/symlink_to_a_dir"#, "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"; "simple symlinked directory with one file: filter directories")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "regular") @fixtures/symlink_to_a_dir"#, "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"; "simple symlinked directory with one file: filter file")]
-    #[test_case(r#"builtins.filterSource (p: t: t != "symlink") @fixtures/symlink_to_a_dir"#, "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"; "simple symlinked directory with one file: filter symlinks")]
-    #[test_case(r#"builtins.filterSource (p: t: true) @fixtures/symlink_to_a_dir"#, "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"; "simple symlinked directory with one file: filter nothing")]
-    #[test_case(r#"builtins.filterSource (p: t: false) @fixtures/symlink_to_a_dir"#, "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"; "simple symlinked directory with one file: filter everything")]
-    fn builtins_filter_source_succeed(code: &str, expected_outpath: &str) {
+    #[case::complicated_filter_nothing(
+        r#"(builtins.filterSource (p: t: true) @fixtures)"#,
+        "/nix/store/bqh6kd0x3vps2rzagzpl7qmbbgnx19cp-import_fixtures"
+    )]
+    #[case::complicated_filter_everything(
+        r#"(builtins.filterSource (p: t: false) @fixtures)"#,
+        "/nix/store/giq6czz24lpjg97xxcxk6rg950lcpib1-import_fixtures"
+    )]
+    #[case::simple_dir_with_one_file_filter_dirs(
+        r#"(builtins.filterSource (p: t: t != "directory") @fixtures/a_dir)"#,
+        "/nix/store/8vbqaxapywkvv1hacdja3pi075r14d43-a_dir"
+    )]
+    #[case::simple_dir_with_one_file_filter_files(
+        r#"(builtins.filterSource (p: t: t != "regular") @fixtures/a_dir)"#,
+        "/nix/store/zphlqc93s2iq4xm393l06hzf8hp85r4z-a_dir"
+    )]
+    #[case::simple_dir_with_one_file_filter_symlinks(
+        r#"(builtins.filterSource (p: t: t != "symlink") @fixtures/a_dir)"#,
+        "/nix/store/8vbqaxapywkvv1hacdja3pi075r14d43-a_dir"
+    )]
+    #[case::simple_dir_with_one_file_filter_nothing(
+        r#"(builtins.filterSource (p: t: true) @fixtures/a_dir)"#,
+        "/nix/store/8vbqaxapywkvv1hacdja3pi075r14d43-a_dir"
+    )]
+    #[case::simple_dir_with_one_file_filter_everything(
+        r#"(builtins.filterSource (p: t: false) @fixtures/a_dir)"#,
+        "/nix/store/zphlqc93s2iq4xm393l06hzf8hp85r4z-a_dir"
+    )]
+    #[case::simple_dir_with_one_dir_filter_dirs(
+        r#"builtins.filterSource (p: t: t != "directory") @fixtures/b_dir"#,
+        "/nix/store/xzsfzdgrxg93icaamjm8zq1jq6xvf2fz-b_dir"
+    )]
+    #[case::simple_dir_with_one_dir_filter_files(
+        r#"builtins.filterSource (p: t: t != "regular") @fixtures/b_dir"#,
+        "/nix/store/8rjx64mm7173xp60rahv7cl3ixfkv3rf-b_dir"
+    )]
+    #[case::simple_dir_with_one_dir_filter_symlinks(
+        r#"builtins.filterSource (p: t: t != "symlink") @fixtures/b_dir"#,
+        "/nix/store/8rjx64mm7173xp60rahv7cl3ixfkv3rf-b_dir"
+    )]
+    #[case::simple_dir_with_one_dir_filter_nothing(
+        r#"builtins.filterSource (p: t: true) @fixtures/b_dir"#,
+        "/nix/store/8rjx64mm7173xp60rahv7cl3ixfkv3rf-b_dir"
+    )]
+    #[case::simple_dir_with_one_dir_filter_everything(
+        r#"builtins.filterSource (p: t: false) @fixtures/b_dir"#,
+        "/nix/store/xzsfzdgrxg93icaamjm8zq1jq6xvf2fz-b_dir"
+    )]
+    #[case::simple_dir_with_one_symlink_to_file_filter_dirs(
+        r#"builtins.filterSource (p: t: t != "directory") @fixtures/c_dir"#,
+        "/nix/store/riigfmmzzrq65zqiffcjk5sbqr9c9h09-c_dir"
+    )]
+    #[case::simple_dir_with_one_symlink_to_file_filter_files(
+        r#"builtins.filterSource (p: t: t != "regular") @fixtures/c_dir"#,
+        "/nix/store/riigfmmzzrq65zqiffcjk5sbqr9c9h09-c_dir"
+    )]
+    #[case::simple_dir_with_one_symlink_to_file_filter_symlinks(
+        r#"builtins.filterSource (p: t: t != "symlink") @fixtures/c_dir"#,
+        "/nix/store/y5g1fz04vzjvf422q92qmv532axj5q26-c_dir"
+    )]
+    #[case::simple_dir_with_one_symlink_to_file_filter_nothing(
+        r#"builtins.filterSource (p: t: true) @fixtures/c_dir"#,
+        "/nix/store/riigfmmzzrq65zqiffcjk5sbqr9c9h09-c_dir"
+    )]
+    #[case::simple_dir_with_one_symlink_to_file_filter_everything(
+        r#"builtins.filterSource (p: t: false) @fixtures/c_dir"#,
+        "/nix/store/y5g1fz04vzjvf422q92qmv532axj5q26-c_dir"
+    )]
+    #[case::simple_dir_with_dangling_symlink_filter_dirs(
+        r#"builtins.filterSource (p: t: t != "directory") @fixtures/d_dir"#,
+        "/nix/store/f2d1aixwiqy4lbzrd040ala2s4m2z199-d_dir"
+    )]
+    #[case::simple_dir_with_dangling_symlink_filter_files(
+        r#"builtins.filterSource (p: t: t != "regular") @fixtures/d_dir"#,
+        "/nix/store/f2d1aixwiqy4lbzrd040ala2s4m2z199-d_dir"
+    )]
+    #[case::simple_dir_with_dangling_symlink_filter_symlinks(
+        r#"builtins.filterSource (p: t: t != "symlink") @fixtures/d_dir"#,
+        "/nix/store/7l371xax8kknhpska4wrmyll1mzlhzvl-d_dir"
+    )]
+    #[case::simple_dir_with_dangling_symlink_filter_nothing(
+        r#"builtins.filterSource (p: t: true) @fixtures/d_dir"#,
+        "/nix/store/f2d1aixwiqy4lbzrd040ala2s4m2z199-d_dir"
+    )]
+    #[case::simple_dir_with_dangling_symlink_filter_everything(
+        r#"builtins.filterSource (p: t: false) @fixtures/d_dir"#,
+        "/nix/store/7l371xax8kknhpska4wrmyll1mzlhzvl-d_dir"
+    )]
+    #[case::simple_symlinked_dir_with_one_file_filter_dirs(
+        r#"builtins.filterSource (p: t: t != "directory") @fixtures/symlink_to_a_dir"#,
+        "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"
+    )]
+    #[case::simple_symlinked_dir_with_one_file_filter_files(
+        r#"builtins.filterSource (p: t: t != "regular") @fixtures/symlink_to_a_dir"#,
+        "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"
+    )]
+    #[case::simple_symlinked_dir_with_one_file_filter_symlinks(
+        r#"builtins.filterSource (p: t: t != "symlink") @fixtures/symlink_to_a_dir"#,
+        "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"
+    )]
+    #[case::simple_symlinked_dir_with_one_file_filter_nothing(
+        r#"builtins.filterSource (p: t: true) @fixtures/symlink_to_a_dir"#,
+        "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"
+    )]
+    #[case::simple_symlinked_dir_with_one_file_filter_everything(
+        r#"builtins.filterSource (p: t: false) @fixtures/symlink_to_a_dir"#,
+        "/nix/store/apmdprm8fwl2zrjpbyfcd99zrnhvf47q-symlink_to_a_dir"
+    )]
+    fn builtins_filter_source_succeed(#[case] code: &str, #[case] expected_outpath: &str) {
         // populate the fixtures dir
         let temp = TempDir::new().expect("create temporary directory");
         let p = temp.path().join("import_fixtures");
@@ -440,16 +530,17 @@ mod tests {
     }
 
     // Space is an illegal character.
-    #[test_case(
+    #[rstest]
+    #[case(
         r#"(builtins.path { name = "valid-name"; path = @fixtures + "/te st"; recursive = true; })"#,
         true
     )]
     // Space is still an illegal character.
-    #[test_case(
+    #[case(
         r#"(builtins.path { name = "invalid name"; path = @fixtures + "/te st"; recursive = true; })"#,
         false
     )]
-    fn builtins_path_recursive_rename(code: &str, success: bool) {
+    fn builtins_path_recursive_rename(#[case] code: &str, #[case] success: bool) {
         // populate the fixtures dir
         let temp = TempDir::new().expect("create temporary directory");
         let p = temp.path().join("import_fixtures");
@@ -485,17 +576,18 @@ mod tests {
     }
 
     // Space is an illegal character.
-    #[test_case(
+    #[rstest]
+    #[case(
         r#"(builtins.path { name = "valid-name"; path = @fixtures + "/te st"; recursive = false; })"#,
         true
     )]
     // Space is still an illegal character.
-    #[test_case(
+    #[case(
         r#"(builtins.path { name = "invalid name"; path = @fixtures + "/te st"; recursive = false; })"#,
         false
     )]
     // The non-recursive variant passes explicitly `recursive = false;`
-    fn builtins_path_nonrecursive_rename(code: &str, success: bool) {
+    fn builtins_path_nonrecursive_rename(#[case] code: &str, #[case] success: bool) {
         // populate the fixtures dir
         let temp = TempDir::new().expect("create temporary directory");
         let p = temp.path().join("import_fixtures");
@@ -530,23 +622,24 @@ mod tests {
         }
     }
 
-    #[test_case(
+    #[rstest]
+    #[case(
         r#"(builtins.path { name = "valid-name"; path = @fixtures + "/te st"; recursive = false; sha256 = "sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU="; })"#,
         true
     )]
-    #[test_case(
+    #[case(
         r#"(builtins.path { name = "valid-name"; path = @fixtures + "/te st"; recursive = true; sha256 = "sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU="; })"#,
         false
     )]
-    #[test_case(
+    #[case(
         r#"(builtins.path { name = "valid-name"; path = @fixtures + "/te st"; recursive = true; sha256 = "sha256-d6xi4mKdjkX2JFicDIv5niSzpyI0m/Hnm8GGAIU04kY="; })"#,
         true
     )]
-    #[test_case(
+    #[case(
         r#"(builtins.path { name = "valid-name"; path = @fixtures + "/te st"; recursive = false; sha256 = "sha256-d6xi4mKdjkX2JFicDIv5niSzpyI0m/Hnm8GGAIU04kY="; })"#,
         false
     )]
-    fn builtins_path_fod_locking(code: &str, success: bool) {
+    fn builtins_path_fod_locking(#[case] code: &str, #[case] exp_success: bool) {
         // populate the fixtures dir
         let temp = TempDir::new().expect("create temporary directory");
         let p = temp.path().join("import_fixtures");
@@ -566,7 +659,7 @@ mod tests {
 
         let value = eval_result.value;
 
-        if success {
+        if exp_success {
             assert!(
                 value.is_some(),
                 "expected successful evaluation on legal rename and valid FOD sha256"
@@ -576,15 +669,16 @@ mod tests {
         }
     }
 
-    #[test_case(
+    #[rstest]
+    #[case(
         r#"(builtins.path { name = "valid-path"; path = @fixtures + "/te st dir"; filter = _: _: true; })"#,
         "/nix/store/i28jmi4fwym4fw3flkrkp2mdxx50pdy0-valid-path"
     )]
-    #[test_case(
+    #[case(
         r#"(builtins.path { name = "valid-path"; path = @fixtures + "/te st dir"; filter = _: _: false; })"#,
         "/nix/store/pwza2ij9gk1fmzhbjnynmfv2mq2sgcap-valid-path"
     )]
-    fn builtins_path_filter(code: &str, expected_outpath: &str) {
+    fn builtins_path_filter(#[case] code: &str, #[case] expected_outpath: &str) {
         // populate the fixtures dir
         let temp = TempDir::new().expect("create temporary directory");
         let p = temp.path().join("import_fixtures");
@@ -617,18 +711,36 @@ mod tests {
 
     // All tests filter out some unsupported (not representable in castore) nodes, confirming
     // invalid, but filtered-out nodes don't prevent ingestion of a path.
+    #[rstest]
     #[cfg(target_family = "unix")]
     // There is a set of invalid filetypes.
-    // We write a filter function for most subsets, excluding one that filters all of them.
-    // We expect these cases to make the evaluation fail as there are still invalid files present
-    // after the filtering.
-    #[test_case(r#"(builtins.filterSource (p: t: t == "unknown") @fixtures)"#, false; "complicated directory: filter unsupported types")]
-    #[test_case(r#"(builtins.filterSource (p: t: (builtins.baseNameOf p) != "a_charnode") @fixtures)"#, false; "complicated directory: filter character device nodes")]
-    #[test_case(r#"(builtins.filterSource (p: t: (builtins.baseNameOf p) != "a_socket") @fixtures)"#, false; "complicated directory: filter sockets")]
-    #[test_case(r#"(builtins.filterSource (p: t: (builtins.baseNameOf p) != "a_fifo") @fixtures)"#, false; "complicated directory: filter FIFOs")]
+    // We write various filter functions filtering them out, but usually leaving
+    // some behind.
+    // In case there's still invalid filetypes left after the filtering, we
+    // expect the evaluation to fail.
+    #[case::fail_kept_unknowns(
+        r#"(builtins.filterSource (p: t: t == "unknown") @fixtures)"#,
+        false
+    )]
     // We filter all invalid filetypes, so the evaluation has to succeed.
-    #[test_case(r#"(builtins.filterSource (p: t: t != "unknown") @fixtures)"#, true; "complicated directory: filter out unsupported types")]
-    fn builtins_filter_source_unsupported_files(code: &str, success: bool) {
+    #[case::succeed_filter_unknowns(
+        r#"(builtins.filterSource (p: t: t != "unknown") @fixtures)"#,
+        true
+    )]
+    #[case::fail_kept_charnode(
+        r#"(builtins.filterSource (p: t: (builtins.baseNameOf p) != "a_charnode") @fixtures)"#,
+        false
+    )]
+    #[case::fail_kept_socket(
+        r#"(builtins.filterSource (p: t: (builtins.baseNameOf p) != "a_socket") @fixtures)"#,
+        false
+    )]
+    #[case::fail_kept_fifo(
+        r#"(builtins.filterSource (p: t: (builtins.baseNameOf p) != "a_fifo") @fixtures)"#,
+        false
+    )]
+    fn builtins_filter_source_unsupported_files(#[case] code: &str, #[case] exp_success: bool) {
+        use nix::errno::Errno;
         use nix::sys::stat;
         use nix::unistd;
         use std::os::unix::net::UnixListener;
@@ -655,12 +767,21 @@ mod tests {
             stat::Mode::S_IRWXU,
             0,
         )
+        .inspect_err(|e| {
+            if *e == Errno::EPERM {
+                eprintln!(
+                    "\
+Missing permissions to create a character device node with mknod(2).
+Please run this test as root or set CAP_MKNOD."
+                );
+            }
+        })
         .expect("Failed to create a character device node");
 
         let code_replaced = code.replace("@fixtures", &temp.path().to_string_lossy());
         let eval_result = eval(&code_replaced);
 
-        if success {
+        if exp_success {
             assert!(
                 eval_result.value.is_some(),
                 "unexpected failure on a directory of unsupported file types but all filtered: {:?}",
diff --git a/tvix/glue/src/decompression.rs b/tvix/glue/src/fetchers/decompression.rs
index 7e526932e7..f96fa60e34 100644
--- a/tvix/glue/src/decompression.rs
+++ b/tvix/glue/src/fetchers/decompression.rs
@@ -183,7 +183,7 @@ mod tests {
 
     use async_compression::tokio::bufread::GzipEncoder;
     use futures::TryStreamExt;
-    use test_case::test_case;
+    use rstest::rstest;
     use tokio::io::{AsyncReadExt, BufReader};
     use tokio_tar::Archive;
 
@@ -203,11 +203,12 @@ mod tests {
         assert_eq!(data[..], round_tripped[..]);
     }
 
-    #[test_case(include_bytes!("tests/blob.tar.gz"); "gzip")]
-    #[test_case(include_bytes!("tests/blob.tar.bz2"); "bzip2")]
-    #[test_case(include_bytes!("tests/blob.tar.xz"); "xz")]
+    #[rstest]
+    #[case::gzip(include_bytes!("../tests/blob.tar.gz"))]
+    #[case::bzip2(include_bytes!("../tests/blob.tar.bz2"))]
+    #[case::xz(include_bytes!("../tests/blob.tar.xz"))]
     #[tokio::test]
-    async fn compressed_tar(data: &[u8]) {
+    async fn compressed_tar(#[case] data: &[u8]) {
         let reader = DecompressedReader::new(BufReader::new(data));
         let mut archive = Archive::new(reader);
         let mut entries: Vec<_> = archive.entries().unwrap().try_collect().await.unwrap();
diff --git a/tvix/glue/src/fetchers/mod.rs b/tvix/glue/src/fetchers/mod.rs
new file mode 100644
index 0000000000..1b2e1ee20c
--- /dev/null
+++ b/tvix/glue/src/fetchers/mod.rs
@@ -0,0 +1,453 @@
+use futures::TryStreamExt;
+use md5::Md5;
+use nix_compat::{
+    nixhash::{CAHash, HashAlgo, NixHash},
+    store_path::{build_ca_path, BuildStorePathError, StorePathRef},
+};
+use sha1::Sha1;
+use sha2::{digest::Output, Digest, Sha256, Sha512};
+use tokio::io::{AsyncBufRead, AsyncRead, AsyncWrite};
+use tokio_util::io::InspectReader;
+use tracing::warn;
+use tvix_castore::{
+    blobservice::BlobService,
+    directoryservice::DirectoryService,
+    proto::{node::Node, FileNode},
+};
+use tvix_store::{nar::NarCalculationService, pathinfoservice::PathInfoService, proto::PathInfo};
+use url::Url;
+
+use crate::builtins::FetcherError;
+
+mod decompression;
+use decompression::DecompressedReader;
+
+/// Representing options for doing a fetch.
+#[derive(Clone, Eq, PartialEq)]
+pub enum Fetch {
+    /// Fetch a literal file from the given URL, with an optional expected
+    /// NixHash of it.
+    /// TODO: check if this is *always* sha256, and if so, make it [u8; 32].
+    URL(Url, Option<NixHash>),
+
+    /// Fetch a tarball from the given URL and unpack.
+    /// The file must be a tape archive (.tar), optionally compressed with gzip,
+    /// bzip2 or xz.
+    /// The top-level path component of the files in the tarball is removed,
+    /// so it is best if the tarball contains a single directory at top level.
+    /// Optionally, a sha256 digest can be provided to verify the unpacked
+    /// contents against.
+    Tarball(Url, Option<[u8; 32]>),
+
+    /// TODO
+    Git(),
+}
+
+// Drops potentially sensitive username and password from a URL.
+fn redact_url(url: &Url) -> Url {
+    let mut url = url.to_owned();
+    if !url.username().is_empty() {
+        let _ = url.set_username("redacted");
+    }
+
+    if url.password().is_some() {
+        let _ = url.set_password(Some("redacted"));
+    }
+
+    url
+}
+
+impl std::fmt::Debug for Fetch {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            Fetch::URL(url, exp_hash) => {
+                let url = redact_url(url);
+                if let Some(exp_hash) = exp_hash {
+                    write!(f, "URL [url: {}, exp_hash: Some({})]", &url, exp_hash)
+                } else {
+                    write!(f, "URL [url: {}, exp_hash: None]", &url)
+                }
+            }
+            Fetch::Tarball(url, exp_digest) => {
+                let url = redact_url(url);
+                if let Some(exp_digest) = exp_digest {
+                    write!(
+                        f,
+                        "Tarball [url: {}, exp_hash: Some({})]",
+                        url,
+                        NixHash::Sha256(*exp_digest)
+                    )
+                } else {
+                    write!(f, "Tarball [url: {}, exp_hash: None]", url)
+                }
+            }
+            Fetch::Git() => todo!(),
+        }
+    }
+}
+
+impl Fetch {
+    /// If the [Fetch] contains an expected hash upfront, returns the resulting
+    /// store path.
+    /// This doesn't do any fetching.
+    pub fn store_path<'a>(
+        &self,
+        name: &'a str,
+    ) -> Result<Option<StorePathRef<'a>>, BuildStorePathError> {
+        let ca_hash = match self {
+            Fetch::URL(_, Some(nixhash)) => CAHash::Flat(nixhash.clone()),
+            Fetch::Tarball(_, Some(nar_sha256)) => CAHash::Nar(NixHash::Sha256(*nar_sha256)),
+            _ => return Ok(None),
+        };
+
+        // calculate the store path of this fetch
+        build_ca_path(name, &ca_hash, Vec::<String>::new(), false).map(Some)
+    }
+}
+
+/// Knows how to fetch a given [Fetch].
+pub struct Fetcher<BS, DS, PS, NS> {
+    http_client: reqwest::Client,
+    blob_service: BS,
+    directory_service: DS,
+    path_info_service: PS,
+    nar_calculation_service: NS,
+}
+
+impl<BS, DS, PS, NS> Fetcher<BS, DS, PS, NS> {
+    pub fn new(
+        blob_service: BS,
+        directory_service: DS,
+        path_info_service: PS,
+        nar_calculation_service: NS,
+    ) -> Self {
+        Self {
+            http_client: reqwest::Client::new(),
+            blob_service,
+            directory_service,
+            path_info_service,
+            nar_calculation_service,
+        }
+    }
+
+    /// Constructs a HTTP request to the passed URL, and returns a AsyncReadBuf to it.
+    /// In case the URI uses the file:// scheme, use tokio::fs to open it.
+    async fn download(&self, url: Url) -> Result<Box<dyn AsyncBufRead + Unpin>, FetcherError> {
+        match url.scheme() {
+            "file" => {
+                let f = tokio::fs::File::open(url.to_file_path().map_err(|_| {
+                    // "Returns Err if the host is neither empty nor "localhost"
+                    // (except on Windows, where file: URLs may have a non-local host)"
+                    FetcherError::Io(std::io::Error::new(
+                        std::io::ErrorKind::Other,
+                        "invalid host for file:// scheme",
+                    ))
+                })?)
+                .await?;
+                Ok(Box::new(tokio::io::BufReader::new(f)))
+            }
+            _ => {
+                let resp = self.http_client.get(url).send().await?;
+                Ok(Box::new(tokio_util::io::StreamReader::new(
+                    resp.bytes_stream().map_err(|e| {
+                        let e = e.without_url();
+                        warn!(%e, "failed to get response body");
+                        std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)
+                    }),
+                )))
+            }
+        }
+    }
+}
+
+/// Copies all data from the passed reader to the passed writer.
+/// Afterwards, it also returns the resulting [Digest], as well as the number of
+/// bytes copied.
+/// The exact hash function used is left generic over all [Digest].
+async fn hash<D: Digest + std::io::Write>(
+    mut r: impl AsyncRead + Unpin,
+    mut w: impl AsyncWrite + Unpin,
+) -> std::io::Result<(Output<D>, u64)> {
+    let mut hasher = D::new();
+    let bytes_copied = tokio::io::copy(
+        &mut InspectReader::new(&mut r, |d| hasher.write_all(d).unwrap()),
+        &mut w,
+    )
+    .await?;
+    Ok((hasher.finalize(), bytes_copied))
+}
+
+impl<BS, DS, PS, NS> Fetcher<BS, DS, PS, NS>
+where
+    BS: BlobService + Clone + 'static,
+    DS: DirectoryService + Clone,
+    PS: PathInfoService,
+    NS: NarCalculationService,
+{
+    /// Ingest the data from a specified [Fetch].
+    /// On success, return the root node, a content digest and length.
+    /// Returns an error if there was a failure during fetching, or the contents
+    /// didn't match the previously communicated hash contained inside the FetchArgs.
+    pub async fn ingest(&self, fetch: Fetch) -> Result<(Node, CAHash, u64), FetcherError> {
+        match fetch {
+            Fetch::URL(url, exp_hash) => {
+                // Construct a AsyncRead reading from the data as its downloaded.
+                let mut r = self.download(url.clone()).await?;
+
+                // Construct a AsyncWrite to write into the BlobService.
+                let mut blob_writer = self.blob_service.open_write().await;
+
+                // Copy the contents from the download reader to the blob writer.
+                // Calculate the digest of the file received, depending on the
+                // communicated expected hash (or sha256 if none provided).
+                let (actual_hash, blob_size) = match exp_hash
+                    .as_ref()
+                    .map(NixHash::algo)
+                    .unwrap_or_else(|| HashAlgo::Sha256)
+                {
+                    HashAlgo::Sha256 => hash::<Sha256>(&mut r, &mut blob_writer).await.map(
+                        |(digest, bytes_written)| (NixHash::Sha256(digest.into()), bytes_written),
+                    )?,
+                    HashAlgo::Md5 => hash::<Md5>(&mut r, &mut blob_writer).await.map(
+                        |(digest, bytes_written)| (NixHash::Md5(digest.into()), bytes_written),
+                    )?,
+                    HashAlgo::Sha1 => hash::<Sha1>(&mut r, &mut blob_writer).await.map(
+                        |(digest, bytes_written)| (NixHash::Sha1(digest.into()), bytes_written),
+                    )?,
+                    HashAlgo::Sha512 => hash::<Sha512>(&mut r, &mut blob_writer).await.map(
+                        |(digest, bytes_written)| {
+                            (NixHash::Sha512(Box::new(digest.into())), bytes_written)
+                        },
+                    )?,
+                };
+
+                if let Some(exp_hash) = exp_hash {
+                    if exp_hash != actual_hash {
+                        return Err(FetcherError::HashMismatch {
+                            url,
+                            wanted: exp_hash,
+                            got: actual_hash,
+                        });
+                    }
+                }
+
+                // Construct and return the FileNode describing the downloaded contents.
+                Ok((
+                    Node::File(FileNode {
+                        name: vec![].into(),
+                        digest: blob_writer.close().await?.into(),
+                        size: blob_size,
+                        executable: false,
+                    }),
+                    CAHash::Flat(actual_hash),
+                    blob_size,
+                ))
+            }
+            Fetch::Tarball(url, exp_nar_sha256) => {
+                // Construct a AsyncRead reading from the data as its downloaded.
+                let r = self.download(url.clone()).await?;
+
+                // Pop compression.
+                let r = DecompressedReader::new(r);
+                // Open the archive.
+                let archive = tokio_tar::Archive::new(r);
+
+                // Ingest the archive, get the root node
+                let node = tvix_castore::import::archive::ingest_archive(
+                    self.blob_service.clone(),
+                    self.directory_service.clone(),
+                    archive,
+                )
+                .await?;
+
+                // If an expected NAR sha256 was provided, compare with the one
+                // calculated from our root node.
+                // Even if no expected NAR sha256 has been provided, we need
+                // the actual one later.
+                let (nar_size, actual_nar_sha256) = self
+                    .nar_calculation_service
+                    .calculate_nar(&node)
+                    .await
+                    .map_err(|e| {
+                        // convert the generic Store error to an IO error.
+                        FetcherError::Io(e.into())
+                    })?;
+
+                if let Some(exp_nar_sha256) = exp_nar_sha256 {
+                    if exp_nar_sha256 != actual_nar_sha256 {
+                        return Err(FetcherError::HashMismatch {
+                            url,
+                            wanted: NixHash::Sha256(exp_nar_sha256),
+                            got: NixHash::Sha256(actual_nar_sha256),
+                        });
+                    }
+                }
+
+                Ok((
+                    node,
+                    CAHash::Nar(NixHash::Sha256(actual_nar_sha256)),
+                    nar_size,
+                ))
+            }
+            Fetch::Git() => todo!(),
+        }
+    }
+
+    /// Ingests the data from a specified [Fetch], persists the returned node
+    /// in the PathInfoService, and returns the calculated StorePath, as well as
+    /// the root node pointing to the contents.
+    /// The root node can be used to descend into the data without doing the
+    /// lookup to the PathInfoService again.
+    pub async fn ingest_and_persist<'a>(
+        &self,
+        name: &'a str,
+        fetch: Fetch,
+    ) -> Result<(StorePathRef<'a>, Node), FetcherError> {
+        // Fetch file, return the (unnamed) (File)Node of its contents, ca hash and filesize.
+        let (node, ca_hash, size) = self.ingest(fetch).await?;
+
+        // Calculate the store path to return later, which is done with the ca_hash.
+        let store_path = build_ca_path(name, &ca_hash, Vec::<String>::new(), false)?;
+
+        // Rename the node name to match the Store Path.
+        let node = node.rename(store_path.to_string().into());
+
+        // If the resulting hash is not a CAHash::Nar, we also need to invoke
+        // `calculate_nar` to calculate this representation, as it's required in
+        // the [PathInfo].
+        let (nar_size, nar_sha256) = match &ca_hash {
+            CAHash::Flat(_nix_hash) => self
+                .nar_calculation_service
+                .calculate_nar(&node)
+                .await
+                .map_err(|e| FetcherError::Io(e.into()))?,
+            CAHash::Nar(NixHash::Sha256(nar_sha256)) => (size, *nar_sha256),
+            CAHash::Nar(_) => unreachable!("Tvix bug: fetch returned non-sha256 CAHash::Nar"),
+            CAHash::Text(_) => unreachable!("Tvix bug: fetch returned CAHash::Text"),
+        };
+
+        // Construct the PathInfo and persist it.
+        let path_info = PathInfo {
+            node: Some(tvix_castore::proto::Node { node: Some(node) }),
+            references: vec![],
+            narinfo: Some(tvix_store::proto::NarInfo {
+                nar_size,
+                nar_sha256: nar_sha256.to_vec().into(),
+                signatures: vec![],
+                reference_names: vec![],
+                deriver: None,
+                ca: Some(ca_hash.into()),
+            }),
+        };
+
+        let path_info = self
+            .path_info_service
+            .put(path_info)
+            .await
+            .map_err(|e| FetcherError::Io(e.into()))?;
+
+        Ok((store_path, path_info.node.unwrap().node.unwrap()))
+    }
+}
+
+/// Attempts to mimic `nix::libutil::baseNameOf`
+pub(crate) fn url_basename(s: &str) -> &str {
+    if s.is_empty() {
+        return "";
+    }
+
+    let mut last = s.len() - 1;
+    if s.chars().nth(last).unwrap() == '/' && last > 0 {
+        last -= 1;
+    }
+
+    if last == 0 {
+        return "";
+    }
+
+    let pos = match s[..=last].rfind('/') {
+        Some(pos) => {
+            if pos == last - 1 {
+                0
+            } else {
+                pos
+            }
+        }
+        None => 0,
+    };
+
+    &s[(pos + 1)..=last]
+}
+
+#[cfg(test)]
+mod tests {
+    mod fetch {
+        use nix_compat::nixbase32;
+
+        use crate::fetchers::Fetch;
+
+        use super::super::*;
+
+        #[test]
+        fn fetchurl_store_path() {
+            let url = Url::parse("https://raw.githubusercontent.com/aaptel/notmuch-extract-patch/f732a53e12a7c91a06755ebfab2007adc9b3063b/notmuch-extract-patch").unwrap();
+            let exp_hash = NixHash::Sha256(
+                nixbase32::decode_fixed("0nawkl04sj7psw6ikzay7kydj3dhd0fkwghcsf5rzaw4bmp4kbax")
+                    .unwrap(),
+            );
+
+            let fetch = Fetch::URL(url, Some(exp_hash));
+            assert_eq!(
+                "06qi00hylriyfm0nl827crgjvbax84mz-notmuch-extract-patch",
+                &fetch
+                    .store_path("notmuch-extract-patch")
+                    .unwrap()
+                    .unwrap()
+                    .to_string(),
+            )
+        }
+
+        #[test]
+        fn fetch_tarball_store_path() {
+            let url = Url::parse("https://github.com/NixOS/nixpkgs/archive/91050ea1e57e50388fa87a3302ba12d188ef723a.tar.gz").unwrap();
+            let exp_nixbase32 =
+                nixbase32::decode_fixed("1hf6cgaci1n186kkkjq106ryf8mmlq9vnwgfwh625wa8hfgdn4dm")
+                    .unwrap();
+            let fetch = Fetch::Tarball(url, Some(exp_nixbase32));
+
+            assert_eq!(
+                "7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source",
+                &fetch.store_path("source").unwrap().unwrap().to_string(),
+            )
+        }
+    }
+
+    mod url_basename {
+        use super::super::*;
+
+        #[test]
+        fn empty_path() {
+            assert_eq!(url_basename(""), "");
+        }
+
+        #[test]
+        fn path_on_root() {
+            assert_eq!(url_basename("/dir"), "dir");
+        }
+
+        #[test]
+        fn relative_path() {
+            assert_eq!(url_basename("dir/foo"), "foo");
+        }
+
+        #[test]
+        fn root_with_trailing_slash() {
+            assert_eq!(url_basename("/"), "");
+        }
+
+        #[test]
+        fn trailing_slash() {
+            assert_eq!(url_basename("/dir/"), "dir");
+        }
+    }
+}
diff --git a/tvix/glue/src/known_paths.rs b/tvix/glue/src/known_paths.rs
index 9cd9470fa9..290c9d5b69 100644
--- a/tvix/glue/src/known_paths.rs
+++ b/tvix/glue/src/known_paths.rs
@@ -8,9 +8,14 @@
 //! This data is required to find the derivation needed to actually trigger the
 //! build, if necessary.
 
-use nix_compat::{derivation::Derivation, store_path::StorePath};
+use nix_compat::{
+    derivation::Derivation,
+    store_path::{BuildStorePathError, StorePath, StorePathRef},
+};
 use std::collections::HashMap;
 
+use crate::fetchers::Fetch;
+
 /// Struct keeping track of all known Derivations in the current evaluation.
 /// This keeps both the Derivation struct, as well as the "Hash derivation
 /// modulo".
@@ -26,6 +31,9 @@ pub struct KnownPaths {
     /// Note that in the case of FODs, multiple drvs can produce the same output
     /// path. We use one of them.
     outputs_to_drvpath: HashMap<StorePath, StorePath>,
+
+    /// A map from output path to fetches (and their names).
+    outputs_to_fetches: HashMap<StorePath, (String, Fetch)>,
 }
 
 impl KnownPaths {
@@ -50,12 +58,12 @@ impl KnownPaths {
         self.outputs_to_drvpath.get(output_path)
     }
 
-    /// Insert a new Derivation into this struct.
+    /// Insert a new [Derivation] into this struct.
     /// The Derivation struct must pass validation, and its output paths need to
     /// be fully calculated.
     /// All input derivations this refers to must also be inserted to this
     /// struct.
-    pub fn add(&mut self, drv_path: StorePath, drv: Derivation) {
+    pub fn add_derivation(&mut self, drv_path: StorePath, drv: Derivation) {
         // check input derivations to have been inserted.
         #[cfg(debug_assertions)]
         {
@@ -65,7 +73,7 @@ impl KnownPaths {
         }
 
         // compute the hash derivation modulo
-        let hash_derivation_modulo = drv.derivation_or_fod_hash(|drv_path| {
+        let hash_derivation_modulo = drv.hash_derivation_modulo(|drv_path| {
             self.get_hash_derivation_modulo(&drv_path.to_owned())
                 .unwrap_or_else(|| panic!("{} not found", drv_path))
                 .to_owned()
@@ -95,11 +103,40 @@ impl KnownPaths {
             }
         }
     }
+
+    /// Insert a new [Fetch] into this struct, which *must* have an expected
+    /// hash (otherwise we wouldn't be able to calculate the store path).
+    /// Fetches without a known hash need to be fetched inside builtins.
+    pub fn add_fetch<'a>(
+        &mut self,
+        fetch: Fetch,
+        name: &'a str,
+    ) -> Result<StorePathRef<'a>, BuildStorePathError> {
+        let store_path = fetch
+            .store_path(name)?
+            .expect("Tvix bug: fetch must have an expected hash");
+        // insert the fetch.
+        self.outputs_to_fetches
+            .insert(store_path.to_owned(), (name.to_owned(), fetch));
+
+        Ok(store_path)
+    }
+
+    /// Return the name and fetch producing the passed output path.
+    /// Note there can also be (multiple) Derivations producing the same output path.
+    pub fn get_fetch_for_output_path(&self, output_path: &StorePath) -> Option<(String, Fetch)> {
+        self.outputs_to_fetches
+            .get(output_path)
+            .map(|(name, fetch)| (name.to_owned(), fetch.to_owned()))
+    }
 }
 
 #[cfg(test)]
 mod tests {
-    use nix_compat::{derivation::Derivation, store_path::StorePath};
+    use nix_compat::{derivation::Derivation, nixbase32, nixhash::NixHash, store_path::StorePath};
+    use url::Url;
+
+    use crate::fetchers::Fetch;
 
     use super::KnownPaths;
     use hex_literal::hex;
@@ -122,21 +159,33 @@ mod tests {
             StorePath::from_bytes(b"mp57d33657rf34lzvlbpfa1gjfv5gmpg-bar").expect("must parse");
         static ref FOO_OUT_PATH: StorePath =
             StorePath::from_bytes(b"fhaj6gmwns62s6ypkcldbaj2ybvkhx3p-foo").expect("must parse");
+
+        static ref FETCH_URL : Fetch = Fetch::URL(
+            Url::parse("https://raw.githubusercontent.com/aaptel/notmuch-extract-patch/f732a53e12a7c91a06755ebfab2007adc9b3063b/notmuch-extract-patch").unwrap(),
+            Some(NixHash::Sha256(nixbase32::decode_fixed("0nawkl04sj7psw6ikzay7kydj3dhd0fkwghcsf5rzaw4bmp4kbax").unwrap()))
+        );
+        static ref FETCH_URL_OUT_PATH: StorePath = StorePath::from_bytes(b"06qi00hylriyfm0nl827crgjvbax84mz-notmuch-extract-patch").unwrap();
+
+        static ref FETCH_TARBALL : Fetch = Fetch::Tarball(
+            Url::parse("https://github.com/NixOS/nixpkgs/archive/91050ea1e57e50388fa87a3302ba12d188ef723a.tar.gz").unwrap(),
+            Some(nixbase32::decode_fixed("1hf6cgaci1n186kkkjq106ryf8mmlq9vnwgfwh625wa8hfgdn4dm").unwrap())
+        );
+        static ref FETCH_TARBALL_OUT_PATH: StorePath = StorePath::from_bytes(b"7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source").unwrap();
     }
 
     /// ensure we don't allow acdding a Derivation that depends on another,
     /// not-yet-added Derivation.
     #[test]
     #[should_panic]
-    fn reject_if_missing_input_drv() {
+    fn drv_reject_if_missing_input_drv() {
         let mut known_paths = KnownPaths::default();
 
         // FOO_DRV depends on BAR_DRV, which wasn't added.
-        known_paths.add(FOO_DRV_PATH.clone(), FOO_DRV.clone());
+        known_paths.add_derivation(FOO_DRV_PATH.clone(), FOO_DRV.clone());
     }
 
     #[test]
-    fn happy_path() {
+    fn drv_happy_path() {
         let mut known_paths = KnownPaths::default();
 
         // get_drv_by_drvpath should return None for non-existing Derivations,
@@ -149,7 +198,7 @@ mod tests {
         );
 
         // Add BAR_DRV
-        known_paths.add(BAR_DRV_PATH.clone(), BAR_DRV.clone());
+        known_paths.add_derivation(BAR_DRV_PATH.clone(), BAR_DRV.clone());
 
         // We should get it back
         assert_eq!(
@@ -173,7 +222,7 @@ mod tests {
 
         // Now insert FOO_DRV too. It shouldn't panic, as BAR_DRV is already
         // added.
-        known_paths.add(FOO_DRV_PATH.clone(), FOO_DRV.clone());
+        known_paths.add_derivation(FOO_DRV_PATH.clone(), FOO_DRV.clone());
 
         assert_eq!(
             Some(&FOO_DRV.clone()),
@@ -192,4 +241,49 @@ mod tests {
             known_paths.get_drv_path_for_output_path(&FOO_OUT_PATH)
         );
     }
+
+    #[test]
+    fn fetch_happy_path() {
+        let mut known_paths = KnownPaths::default();
+
+        // get_fetch_for_output_path should return None for new fetches.
+        assert!(known_paths
+            .get_fetch_for_output_path(&FETCH_TARBALL_OUT_PATH)
+            .is_none());
+
+        // add_fetch should return the properly calculated store paths.
+        assert_eq!(
+            *FETCH_TARBALL_OUT_PATH,
+            known_paths
+                .add_fetch(FETCH_TARBALL.clone(), "source")
+                .unwrap()
+                .to_owned()
+        );
+
+        assert_eq!(
+            *FETCH_URL_OUT_PATH,
+            known_paths
+                .add_fetch(FETCH_URL.clone(), "notmuch-extract-patch")
+                .unwrap()
+                .to_owned()
+        );
+
+        // We should be able to get these fetches out, when asking for their out path.
+        let (got_name, got_fetch) = known_paths
+            .get_fetch_for_output_path(&FETCH_URL_OUT_PATH)
+            .expect("must be some");
+
+        assert_eq!("notmuch-extract-patch", got_name);
+        assert_eq!(FETCH_URL.clone(), got_fetch);
+
+        // โ€ฆ multiple times.
+        let (got_name, got_fetch) = known_paths
+            .get_fetch_for_output_path(&FETCH_URL_OUT_PATH)
+            .expect("must be some");
+
+        assert_eq!("notmuch-extract-patch", got_name);
+        assert_eq!(FETCH_URL.clone(), got_fetch);
+    }
+
+    // TODO: add test panicking about missing digest
 }
diff --git a/tvix/glue/src/lib.rs b/tvix/glue/src/lib.rs
index f04d5ec3a0..2e5a3be103 100644
--- a/tvix/glue/src/lib.rs
+++ b/tvix/glue/src/lib.rs
@@ -1,11 +1,11 @@
 pub mod builtins;
+pub mod fetchers;
 pub mod known_paths;
 pub mod refscan;
 pub mod tvix_build;
 pub mod tvix_io;
 pub mod tvix_store_io;
 
-mod decompression;
 #[cfg(test)]
 mod tests;
 
diff --git a/tvix/glue/src/tests/mod.rs b/tvix/glue/src/tests/mod.rs
index e66f484e3d..9fe0c22270 100644
--- a/tvix/glue/src/tests/mod.rs
+++ b/tvix/glue/src/tests/mod.rs
@@ -3,17 +3,15 @@ use std::{rc::Rc, sync::Arc};
 use pretty_assertions::assert_eq;
 use std::path::PathBuf;
 use tvix_build::buildservice::DummyBuildService;
-use tvix_castore::{
-    blobservice::{BlobService, MemoryBlobService},
-    directoryservice::{DirectoryService, MemoryDirectoryService},
-};
 use tvix_eval::{EvalIO, Value};
-use tvix_store::pathinfoservice::{MemoryPathInfoService, PathInfoService};
+use tvix_store::utils::construct_services;
 
 use rstest::rstest;
 
 use crate::{
     builtins::{add_derivation_builtins, add_fetcher_builtins, add_import_builtins},
+    configure_nix_path,
+    tvix_io::TvixIO,
     tvix_store_io::TvixStoreIO,
 };
 
@@ -34,28 +32,31 @@ fn eval_test(code_path: PathBuf, expect_success: bool) {
         return;
     }
 
-    let blob_service = Arc::new(MemoryBlobService::default()) as Arc<dyn BlobService>;
-    let directory_service =
-        Arc::new(MemoryDirectoryService::default()) as Arc<dyn DirectoryService>;
-    let path_info_service = Box::new(MemoryPathInfoService::new(
-        blob_service.clone(),
-        directory_service.clone(),
-    )) as Box<dyn PathInfoService>;
     let tokio_runtime = tokio::runtime::Runtime::new().unwrap();
+    let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+        tokio_runtime
+            .block_on(async { construct_services("memory://", "memory://", "memory://").await })
+            .unwrap();
 
     let tvix_store_io = Rc::new(TvixStoreIO::new(
         blob_service,
         directory_service,
         path_info_service.into(),
+        nar_calculation_service.into(),
         Arc::new(DummyBuildService::default()),
         tokio_runtime.handle().clone(),
     ));
-    let mut eval = tvix_eval::Evaluation::new(tvix_store_io.clone() as Rc<dyn EvalIO>, true);
+    // Wrap with TvixIO, so <nix/fetchurl.nix can be imported.
+    let mut eval = tvix_eval::Evaluation::new(
+        Box::new(TvixIO::new(tvix_store_io.clone() as Rc<dyn EvalIO>)) as Box<dyn EvalIO>,
+        true,
+    );
 
     eval.strict = true;
     add_derivation_builtins(&mut eval, tvix_store_io.clone());
     add_fetcher_builtins(&mut eval, tvix_store_io.clone());
     add_import_builtins(&mut eval, tvix_store_io.clone());
+    configure_nix_path(&mut eval, &None);
 
     let result = eval.evaluate(code, Some(code_path.clone()));
     let failed = match result.value {
diff --git a/tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.exp b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.exp
new file mode 100644
index 0000000000..c7332c0503
--- /dev/null
+++ b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.exp
@@ -0,0 +1 @@
+[ /nix/store/7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source /nix/store/7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source /nix/store/7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source /nix/store/7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source /nix/store/7adgvk5zdfq4pwrhsm3n9lzypb12gw0g-source /nix/store/md9dsn2zwa6aj7zzalvjwwwx82whcyva-some-name ]
diff --git a/tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.nix b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.nix
new file mode 100644
index 0000000000..e454f12444
--- /dev/null
+++ b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchtarball.nix
@@ -0,0 +1,42 @@
+[
+  # (fetchTarball "url") cannot be tested, as that one has to fetch from the
+  # internet to calculate the path.
+
+  # with url and sha256
+  (builtins.fetchTarball {
+    url = "https://github.com/NixOS/nixpkgs/archive/91050ea1e57e50388fa87a3302ba12d188ef723a.tar.gz";
+    sha256 = "1hf6cgaci1n186kkkjq106ryf8mmlq9vnwgfwh625wa8hfgdn4dm";
+  })
+
+  # with url and sha256 (as SRI)
+  (builtins.fetchTarball {
+    url = "https://github.com/NixOS/nixpkgs/archive/91050ea1e57e50388fa87a3302ba12d188ef723a.tar.gz";
+    sha256 = "sha256-tRHbnoNI8SIM5O5xuxOmtSLnswEByzmnQcGGyNRjxsE=";
+  })
+
+  # with another url, it actually doesn't matter (no .gz prefix)
+  (builtins.fetchTarball {
+    url = "https://github.com/NixOS/nixpkgs/archive/91050ea1e57e50388fa87a3302ba12d188ef723a.tar";
+    sha256 = "sha256-tRHbnoNI8SIM5O5xuxOmtSLnswEByzmnQcGGyNRjxsE=";
+  })
+
+  # also with an entirely different url, it doesn't change
+  (builtins.fetchTarball {
+    url = "https://test.example/owo";
+    sha256 = "sha256-tRHbnoNI8SIM5O5xuxOmtSLnswEByzmnQcGGyNRjxsE=";
+  })
+
+  # โ€ฆ because `name` defaults to source, and that (and the sha256 affect the store path)
+  (builtins.fetchTarball {
+    name = "source";
+    url = "https://test.example/owo";
+    sha256 = "sha256-tRHbnoNI8SIM5O5xuxOmtSLnswEByzmnQcGGyNRjxsE=";
+  })
+
+  # โ€ฆ so changing name causes the hash to change.
+  (builtins.fetchTarball {
+    name = "some-name";
+    url = "https://test.example/owo";
+    sha256 = "sha256-tRHbnoNI8SIM5O5xuxOmtSLnswEByzmnQcGGyNRjxsE=";
+  })
+]
diff --git a/tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.exp b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.exp
new file mode 100644
index 0000000000..37a04d577c
--- /dev/null
+++ b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.exp
@@ -0,0 +1 @@
+[ /nix/store/y0r1p1cqmlvm0yqkz3gxvkc1p8kg2sz8-null /nix/store/06qi00hylriyfm0nl827crgjvbax84mz-notmuch-extract-patch /nix/store/06qi00hylriyfm0nl827crgjvbax84mz-notmuch-extract-patch /nix/store/06qi00hylriyfm0nl827crgjvbax84mz-notmuch-extract-patch ]
diff --git a/tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.nix b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.nix
new file mode 100644
index 0000000000..8a39101525
--- /dev/null
+++ b/tvix/glue/src/tests/tvix_tests/eval-okay-fetchurl.nix
@@ -0,0 +1,25 @@
+[
+  # (fetchurl "url") needs to immediately fetch, but our options without
+  # internet access are fairly limited.
+  # TODO: populate some fixtures at a known location instead.
+  (builtins.fetchurl "file:///dev/null")
+
+  # fetchurl with url and sha256
+  (builtins.fetchurl {
+    url = "https://raw.githubusercontent.com/aaptel/notmuch-extract-patch/f732a53e12a7c91a06755ebfab2007adc9b3063b/notmuch-extract-patch";
+    sha256 = "0nawkl04sj7psw6ikzay7kydj3dhd0fkwghcsf5rzaw4bmp4kbax";
+  })
+
+  # fetchurl with url and sha256 (as SRI)
+  (builtins.fetchurl {
+    url = "https://raw.githubusercontent.com/aaptel/notmuch-extract-patch/f732a53e12a7c91a06755ebfab2007adc9b3063b/notmuch-extract-patch";
+    sha256 = "sha256-Xa1Jbl2Eq5+L0ww+Ph1osA3Z/Dxe/RkN1/dITQCdXFk=";
+  })
+
+  # fetchurl with another url, but same name
+  (builtins.fetchurl {
+    url = "https://test.example/owo";
+    name = "notmuch-extract-patch";
+    sha256 = "sha256-Xa1Jbl2Eq5+L0ww+Ph1osA3Z/Dxe/RkN1/dITQCdXFk=";
+  })
+]
diff --git a/tvix/glue/src/tvix_store_io.rs b/tvix/glue/src/tvix_store_io.rs
index 10a5902785..7b8ef3ff0a 100644
--- a/tvix/glue/src/tvix_store_io.rs
+++ b/tvix/glue/src/tvix_store_io.rs
@@ -1,15 +1,11 @@
 //! This module provides an implementation of EvalIO talking to tvix-store.
 
-use async_recursion::async_recursion;
 use bytes::Bytes;
-use futures::Stream;
 use futures::{StreamExt, TryStreamExt};
 use nix_compat::nixhash::NixHash;
-use nix_compat::store_path::{build_ca_path, StorePathRef};
+use nix_compat::store_path::StorePathRef;
 use nix_compat::{nixhash::CAHash, store_path::StorePath};
 use sha2::{Digest, Sha256};
-use std::marker::Unpin;
-use std::rc::Rc;
 use std::{
     cell::RefCell,
     collections::BTreeSet,
@@ -18,21 +14,22 @@ use std::{
     sync::Arc,
 };
 use tokio_util::io::SyncIoBridge;
-use tracing::{error, instrument, warn, Level};
+use tracing::{error, info, instrument, warn, Level};
 use tvix_build::buildservice::BuildService;
-use tvix_eval::{ErrorKind, EvalIO, FileType, StdIO};
+use tvix_castore::proto::node::Node;
+use tvix_eval::{EvalIO, FileType, StdIO};
+use tvix_store::nar::NarCalculationService;
 use tvix_store::utils::AsyncIoBridge;
-use walkdir::DirEntry;
 
 use tvix_castore::{
     blobservice::BlobService,
     directoryservice::{self, DirectoryService},
-    proto::{node::Node, FileNode, NamedNode},
+    proto::NamedNode,
     B3Digest,
 };
 use tvix_store::{pathinfoservice::PathInfoService, proto::PathInfo};
 
-use crate::builtins::FetcherError;
+use crate::fetchers::Fetcher;
 use crate::known_paths::KnownPaths;
 use crate::tvix_build::derivation_to_build_request;
 
@@ -52,15 +49,26 @@ use crate::tvix_build::derivation_to_build_request;
 /// implementation of "Tvix Store IO" which does not necessarily bring the concept of blob service,
 /// directory service or path info service.
 pub struct TvixStoreIO {
-    blob_service: Arc<dyn BlobService>,
-    directory_service: Arc<dyn DirectoryService>,
-    // This is public so builtins can put PathInfos directly.
+    // This is public so helper functions can interact with the stores directly.
+    pub(crate) blob_service: Arc<dyn BlobService>,
+    pub(crate) directory_service: Arc<dyn DirectoryService>,
     pub(crate) path_info_service: Arc<dyn PathInfoService>,
+    pub(crate) nar_calculation_service: Arc<dyn NarCalculationService>,
+
     std_io: StdIO,
     #[allow(dead_code)]
     build_service: Arc<dyn BuildService>,
     pub(crate) tokio_handle: tokio::runtime::Handle,
-    http_client: reqwest::Client,
+
+    #[allow(clippy::type_complexity)]
+    pub(crate) fetcher: Fetcher<
+        Arc<dyn BlobService>,
+        Arc<dyn DirectoryService>,
+        Arc<dyn PathInfoService>,
+        Arc<dyn NarCalculationService>,
+    >,
+
+    // Paths known how to produce, by building or fetching.
     pub(crate) known_paths: RefCell<KnownPaths>,
 }
 
@@ -69,17 +77,24 @@ impl TvixStoreIO {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
         path_info_service: Arc<dyn PathInfoService>,
+        nar_calculation_service: Arc<dyn NarCalculationService>,
         build_service: Arc<dyn BuildService>,
         tokio_handle: tokio::runtime::Handle,
     ) -> Self {
         Self {
-            blob_service,
-            directory_service,
-            path_info_service,
+            blob_service: blob_service.clone(),
+            directory_service: directory_service.clone(),
+            path_info_service: path_info_service.clone(),
+            nar_calculation_service: nar_calculation_service.clone(),
             std_io: StdIO {},
             build_service,
             tokio_handle,
-            http_client: reqwest::Client::new(),
+            fetcher: Fetcher::new(
+                blob_service,
+                directory_service,
+                path_info_service,
+                nar_calculation_service,
+            ),
             known_paths: Default::default(),
         }
     }
@@ -91,7 +106,6 @@ impl TvixStoreIO {
     ///
     /// In case there is no PathInfo yet, this means we need to build it
     /// (which currently is stubbed out still).
-    #[async_recursion(?Send)]
     #[instrument(skip(self, store_path), fields(store_path=%store_path), ret(level = Level::TRACE), err)]
     async fn store_path_to_node(
         &self,
@@ -121,189 +135,212 @@ impl TvixStoreIO {
             // it for things like <nixpkgs> pointing to a store path.
             // In the future, these things will (need to) have PathInfo.
             None => {
-                // The store path doesn't exist yet, so we need to build it.
-                warn!("triggering build");
-
-                // Look up the derivation for this output path.
-                let (drv_path, drv) = {
-                    let known_paths = self.known_paths.borrow();
-                    match known_paths.get_drv_path_for_output_path(store_path) {
-                        Some(drv_path) => (
-                            drv_path.to_owned(),
-                            known_paths.get_drv_by_drvpath(drv_path).unwrap().to_owned(),
-                        ),
-                        None => {
-                            warn!(store_path=%store_path, "no drv found");
-                            // let StdIO take over
-                            return Ok(None);
-                        }
+                // The store path doesn't exist yet, so we need to fetch or build it.
+                // We check for fetches first, as we might have both native
+                // fetchers and FODs in KnownPaths, and prefer the former.
+
+                let maybe_fetch = self
+                    .known_paths
+                    .borrow()
+                    .get_fetch_for_output_path(store_path);
+
+                match maybe_fetch {
+                    Some((name, fetch)) => {
+                        info!(?fetch, "triggering lazy fetch");
+                        let (sp, root_node) = self
+                            .fetcher
+                            .ingest_and_persist(&name, fetch)
+                            .await
+                            .map_err(|e| {
+                            std::io::Error::new(std::io::ErrorKind::InvalidData, e)
+                        })?;
+
+                        debug_assert_eq!(
+                            sp.to_string(),
+                            store_path.to_string(),
+                            "store path returned from fetcher should match"
+                        );
+
+                        root_node
                     }
-                };
-
-                // derivation_to_build_request needs castore nodes for all inputs.
-                // Provide them, which means, here is where we recursively build
-                // all dependencies.
-                #[allow(clippy::mutable_key_type)]
-                let input_nodes: BTreeSet<Node> =
-                    futures::stream::iter(drv.input_derivations.iter())
-                        .map(|(input_drv_path, output_names)| {
-                            // look up the derivation object
-                            let input_drv = {
-                                let known_paths = self.known_paths.borrow();
-                                known_paths
-                                    .get_drv_by_drvpath(input_drv_path)
-                                    .unwrap_or_else(|| panic!("{} not found", input_drv_path))
-                                    .to_owned()
+                    None => {
+                        // Look up the derivation for this output path.
+                        let (drv_path, drv) = {
+                            let known_paths = self.known_paths.borrow();
+                            match known_paths.get_drv_path_for_output_path(store_path) {
+                                Some(drv_path) => (
+                                    drv_path.to_owned(),
+                                    known_paths.get_drv_by_drvpath(drv_path).unwrap().to_owned(),
+                                ),
+                                None => {
+                                    warn!(store_path=%store_path, "no drv found");
+                                    // let StdIO take over
+                                    return Ok(None);
+                                }
+                            }
+                        };
+
+                        warn!("triggering build");
+
+                        // derivation_to_build_request needs castore nodes for all inputs.
+                        // Provide them, which means, here is where we recursively build
+                        // all dependencies.
+                        #[allow(clippy::mutable_key_type)]
+                        let input_nodes: BTreeSet<Node> =
+                            futures::stream::iter(drv.input_derivations.iter())
+                                .map(|(input_drv_path, output_names)| {
+                                    // look up the derivation object
+                                    let input_drv = {
+                                        let known_paths = self.known_paths.borrow();
+                                        known_paths
+                                            .get_drv_by_drvpath(input_drv_path)
+                                            .unwrap_or_else(|| {
+                                                panic!("{} not found", input_drv_path)
+                                            })
+                                            .to_owned()
+                                    };
+
+                                    // convert output names to actual paths
+                                    let output_paths: Vec<StorePath> = output_names
+                                        .iter()
+                                        .map(|output_name| {
+                                            input_drv
+                                                .outputs
+                                                .get(output_name)
+                                                .expect("missing output_name")
+                                                .path
+                                                .as_ref()
+                                                .expect("missing output path")
+                                                .clone()
+                                        })
+                                        .collect();
+                                    // For each output, ask for the castore node.
+                                    // We're in a per-derivation context, so if they're
+                                    // not built yet they'll all get built together.
+                                    // If they don't need to build, we can however still
+                                    // substitute all in parallel (if they don't need to
+                                    // be built) - so we turn this into a stream of streams.
+                                    // It's up to the builder to deduplicate same build requests.
+                                    futures::stream::iter(output_paths.into_iter()).map(
+                                        |output_path| async move {
+                                            let node = self
+                                                .store_path_to_node(&output_path, Path::new(""))
+                                                .await?;
+
+                                            if let Some(node) = node {
+                                                Ok(node)
+                                            } else {
+                                                Err(io::Error::other("no node produced"))
+                                            }
+                                        },
+                                    )
+                                })
+                                .flatten()
+                                .buffer_unordered(10) // TODO: make configurable
+                                .try_collect()
+                                .await?;
+
+                        // TODO: check if input sources are sufficiently dealth with,
+                        // I think yes, they must be imported into the store by other
+                        // operations, so dealt with in the Some(โ€ฆ) match arm
+
+                        // synthesize the build request.
+                        let build_request = derivation_to_build_request(&drv, input_nodes)?;
+
+                        // create a build
+                        let build_result = self
+                            .build_service
+                            .as_ref()
+                            .do_build(build_request)
+                            .await
+                            .map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))?;
+
+                        // TODO: refscan?
+
+                        // For each output, insert a PathInfo.
+                        for output in &build_result.outputs {
+                            let root_node = output.node.as_ref().expect("invalid root node");
+
+                            // calculate the nar representation
+                            let (nar_size, nar_sha256) = self
+                                .nar_calculation_service
+                                .calculate_nar(root_node)
+                                .await?;
+
+                            // assemble the PathInfo to persist
+                            let path_info = PathInfo {
+                                node: Some(tvix_castore::proto::Node {
+                                    node: Some(root_node.clone()),
+                                }),
+                                references: vec![], // TODO: refscan
+                                narinfo: Some(tvix_store::proto::NarInfo {
+                                    nar_size,
+                                    nar_sha256: Bytes::from(nar_sha256.to_vec()),
+                                    signatures: vec![],
+                                    reference_names: vec![], // TODO: refscan
+                                    deriver: Some(tvix_store::proto::StorePath {
+                                        name: drv_path
+                                            .name()
+                                            .strip_suffix(".drv")
+                                            .expect("missing .drv suffix")
+                                            .to_string(),
+                                        digest: drv_path.digest().to_vec().into(),
+                                    }),
+                                    ca: drv.fod_digest().map(
+                                        |fod_digest| -> tvix_store::proto::nar_info::Ca {
+                                            (&CAHash::Nar(nix_compat::nixhash::NixHash::Sha256(
+                                                fod_digest,
+                                            )))
+                                                .into()
+                                        },
+                                    ),
+                                }),
                             };
 
-                            // convert output names to actual paths
-                            let output_paths: Vec<StorePath> = output_names
-                                .iter()
-                                .map(|output_name| {
-                                    input_drv
-                                        .outputs
-                                        .get(output_name)
-                                        .expect("missing output_name")
-                                        .path
-                                        .as_ref()
-                                        .expect("missing output path")
-                                        .clone()
-                                })
-                                .collect();
-                            // For each output, ask for the castore node.
-                            // We're in a per-derivation context, so if they're
-                            // not built yet they'll all get built together.
-                            // If they don't need to build, we can however still
-                            // substitute all in parallel (if they don't need to
-                            // be built) - so we turn this into a stream of streams.
-                            // It's up to the builder to deduplicate same build requests.
-                            futures::stream::iter(output_paths.into_iter()).map(
-                                |output_path| async move {
-                                    let node = self
-                                        .store_path_to_node(&output_path, Path::new(""))
-                                        .await?;
-
-                                    if let Some(node) = node {
-                                        Ok(node)
-                                    } else {
-                                        Err(io::Error::other("no node produced"))
-                                    }
-                                },
-                            )
-                        })
-                        .flatten()
-                        .buffer_unordered(10) // TODO: make configurable
-                        .try_collect()
-                        .await?;
-
-                // TODO: check if input sources are sufficiently dealth with,
-                // I think yes, they must be imported into the store by other
-                // operations, so dealt with in the Some(โ€ฆ) match arm
-
-                // synthesize the build request.
-                let build_request = derivation_to_build_request(&drv, input_nodes)?;
-
-                // create a build
-                let build_result = self
-                    .build_service
-                    .as_ref()
-                    .do_build(build_request)
-                    .await
-                    .map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))?;
-
-                // TODO: refscan?
-
-                // For each output, insert a PathInfo.
-                for output in &build_result.outputs {
-                    let root_node = output.node.as_ref().expect("invalid root node");
-
-                    // calculate the nar representation
-                    let (nar_size, nar_sha256) =
-                        self.path_info_service.calculate_nar(root_node).await?;
-
-                    // assemble the PathInfo to persist
-                    let path_info = PathInfo {
-                        node: Some(tvix_castore::proto::Node {
-                            node: Some(root_node.clone()),
-                        }),
-                        references: vec![], // TODO: refscan
-                        narinfo: Some(tvix_store::proto::NarInfo {
-                            nar_size,
-                            nar_sha256: Bytes::from(nar_sha256.to_vec()),
-                            signatures: vec![],
-                            reference_names: vec![], // TODO: refscan
-                            deriver: Some(tvix_store::proto::StorePath {
-                                name: drv_path
-                                    .name()
-                                    .strip_suffix(".drv")
-                                    .expect("missing .drv suffix")
-                                    .to_string(),
-                                digest: drv_path.digest().to_vec().into(),
-                            }),
-                            ca: drv.fod_digest().map(
-                                |fod_digest| -> tvix_store::proto::nar_info::Ca {
-                                    (&CAHash::Nar(nix_compat::nixhash::NixHash::Sha256(fod_digest)))
-                                        .into()
-                                },
-                            ),
-                        }),
-                    };
-
-                    self.path_info_service
-                        .put(path_info)
-                        .await
-                        .map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))?;
-                }
+                            self.path_info_service
+                                .put(path_info)
+                                .await
+                                .map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))?;
+                        }
 
-                // find the output for the store path requested
-                build_result
-                    .outputs
-                    .into_iter()
-                    .find(|output_node| {
-                        output_node.node.as_ref().expect("invalid node").get_name()
-                            == store_path.to_string().as_bytes()
-                    })
-                    .expect("build didn't produce the store path")
-                    .node
-                    .expect("invalid node")
+                        // find the output for the store path requested
+                        build_result
+                            .outputs
+                            .into_iter()
+                            .find(|output_node| {
+                                output_node.node.as_ref().expect("invalid node").get_name()
+                                    == store_path.to_string().as_bytes()
+                            })
+                            .expect("build didn't produce the store path")
+                            .node
+                            .expect("invalid node")
+                    }
+                }
             }
         };
 
         // now with the root_node and sub_path, descend to the node requested.
+        // We convert sub_path to the castore model here.
+        let sub_path = tvix_castore::PathBuf::from_host_path(sub_path, true)?;
+
         directoryservice::descend_to(&self.directory_service, root_node, sub_path)
             .await
             .map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))
     }
 
-    /// This forwards the ingestion to the [`tvix_castore::import::ingest_entries`],
-    /// passing the blob_service and directory_service that's used.
-    /// The error is mapped to std::io::Error for simplicity.
-    pub(crate) async fn ingest_entries<S>(&self, entries_stream: S) -> io::Result<Node>
-    where
-        S: Stream<Item = DirEntry> + Unpin,
-    {
-        tvix_castore::import::ingest_entries(
-            &self.blob_service,
-            &self.directory_service,
-            entries_stream,
-        )
-        .await
-        .map_err(|err| std::io::Error::new(io::ErrorKind::Other, err))
-    }
-
     pub(crate) async fn node_to_path_info(
         &self,
         name: &str,
         path: &Path,
         ca: CAHash,
         root_node: Node,
-    ) -> io::Result<(PathInfo, StorePath)> {
+    ) -> io::Result<(PathInfo, NixHash, StorePath)> {
         // Ask the PathInfoService for the NAR size and sha256
         // We always need it no matter what is the actual hash mode
         // because the path info construct a narinfo which *always*
         // require a SHA256 of the NAR representation and the NAR size.
         let (nar_size, nar_sha256) = self
-            .path_info_service
+            .nar_calculation_service
             .as_ref()
             .calculate_nar(&root_node)
             .await?;
@@ -326,7 +363,11 @@ impl TvixStoreIO {
         let path_info =
             tvix_store::import::derive_nar_ca_path_info(nar_size, nar_sha256, Some(ca), root_node);
 
-        Ok((path_info, output_path.to_owned()))
+        Ok((
+            path_info,
+            NixHash::Sha256(nar_sha256),
+            output_path.to_owned(),
+        ))
     }
 
     pub(crate) async fn register_node_in_path_info_service(
@@ -336,7 +377,7 @@ impl TvixStoreIO {
         ca: CAHash,
         root_node: Node,
     ) -> io::Result<StorePath> {
-        let (path_info, output_path) = self.node_to_path_info(name, path, ca, root_node).await?;
+        let (path_info, _, output_path) = self.node_to_path_info(name, path, ca, root_node).await?;
         let _path_info = self.path_info_service.as_ref().put(path_info).await?;
 
         Ok(output_path)
@@ -370,88 +411,6 @@ impl TvixStoreIO {
             .await?
             .is_some())
     }
-
-    pub async fn fetch_url(
-        &self,
-        url: &str,
-        name: &str,
-        hash: Option<&NixHash>,
-    ) -> Result<StorePath, ErrorKind> {
-        let resp = self
-            .http_client
-            .get(url)
-            .send()
-            .await
-            .map_err(FetcherError::from)?;
-        let mut sha = Sha256::new();
-        let mut data = tokio_util::io::StreamReader::new(
-            resp.bytes_stream()
-                .inspect_ok(|data| {
-                    sha.update(data);
-                })
-                .map_err(|e| {
-                    let e = e.without_url();
-                    warn!(%e, "failed to get response body");
-                    io::Error::new(io::ErrorKind::BrokenPipe, e.to_string())
-                }),
-        );
-
-        let mut blob = self.blob_service.open_write().await;
-        let size = tokio::io::copy(&mut data, blob.as_mut()).await?;
-        let blob_digest = blob.close().await?;
-        let got = NixHash::Sha256(sha.finalize().into());
-
-        let hash = CAHash::Flat(if let Some(wanted) = hash {
-            if *wanted != got {
-                return Err(FetcherError::HashMismatch {
-                    url: url.to_owned(),
-                    wanted: wanted.clone(),
-                    got,
-                }
-                .into());
-            }
-            wanted.clone()
-        } else {
-            got
-        });
-
-        let path = build_ca_path(name, &hash, Vec::<String>::new(), false)
-            .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
-        let node = Node::File(FileNode {
-            name: path.to_string().into(),
-            digest: blob_digest.into(),
-            size,
-            executable: false,
-        });
-
-        let (nar_size, nar_sha256) = self
-            .path_info_service
-            .calculate_nar(&node)
-            .await
-            .map_err(|e| ErrorKind::TvixError(Rc::new(e)))?;
-
-        let path_info = PathInfo {
-            node: Some(tvix_castore::proto::Node {
-                node: Some(node.clone()),
-            }),
-            references: vec![],
-            narinfo: Some(tvix_store::proto::NarInfo {
-                nar_size,
-                nar_sha256: nar_sha256.to_vec().into(),
-                signatures: vec![],
-                reference_names: vec![],
-                deriver: None, /* ? */
-                ca: Some((&hash).into()),
-            }),
-        };
-
-        self.path_info_service
-            .put(path_info)
-            .await
-            .map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))?;
-
-        Ok(path.to_owned())
-    }
 }
 
 impl EvalIO for TvixStoreIO {
@@ -486,13 +445,13 @@ impl EvalIO for TvixStoreIO {
                 .tokio_handle
                 .block_on(async { self.store_path_to_node(&store_path, &sub_path).await })?
             {
-                // depending on the node type, treat read_to_string differently
+                // depending on the node type, treat open differently
                 match node {
                     Node::Directory(_) => {
                         // This would normally be a io::ErrorKind::IsADirectory (still unstable)
                         Err(io::Error::new(
                             io::ErrorKind::Unsupported,
-                            format!("tried to read directory at {:?} to string", path),
+                            format!("tried to open directory at {:?}", path),
                         ))
                     }
                     Node::File(file_node) => {
@@ -531,7 +490,7 @@ impl EvalIO for TvixStoreIO {
                     }
                     Node::Symlink(_symlink_node) => Err(io::Error::new(
                         io::ErrorKind::Unsupported,
-                        "read_to_string for symlinks is unsupported",
+                        "open for symlinks is unsupported",
                     ))?,
                 }
             } else {
@@ -622,6 +581,7 @@ impl EvalIO for TvixStoreIO {
                 &self.blob_service,
                 &self.directory_service,
                 &self.path_info_service,
+                &self.nar_calculation_service,
             )
             .await
         })?;
@@ -642,12 +602,8 @@ mod tests {
     use bstr::ByteSlice;
     use tempfile::TempDir;
     use tvix_build::buildservice::DummyBuildService;
-    use tvix_castore::{
-        blobservice::{BlobService, MemoryBlobService},
-        directoryservice::{DirectoryService, MemoryDirectoryService},
-    };
     use tvix_eval::{EvalIO, EvaluationResult};
-    use tvix_store::pathinfoservice::MemoryPathInfoService;
+    use tvix_store::utils::construct_services;
 
     use super::TvixStoreIO;
     use crate::builtins::{add_derivation_builtins, add_fetcher_builtins, add_import_builtins};
@@ -656,22 +612,19 @@ mod tests {
     /// Takes care of setting up the evaluator so it knows about the
     // `derivation` builtin.
     fn eval(str: &str) -> EvaluationResult {
-        let blob_service = Arc::new(MemoryBlobService::default()) as Arc<dyn BlobService>;
-        let directory_service =
-            Arc::new(MemoryDirectoryService::default()) as Arc<dyn DirectoryService>;
-        let path_info_service = Arc::new(MemoryPathInfoService::new(
-            blob_service.clone(),
-            directory_service.clone(),
-        ));
-
-        let runtime = tokio::runtime::Runtime::new().unwrap();
+        let tokio_runtime = tokio::runtime::Runtime::new().unwrap();
+        let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+            tokio_runtime
+                .block_on(async { construct_services("memory://", "memory://", "memory://").await })
+                .unwrap();
 
         let io = Rc::new(TvixStoreIO::new(
-            blob_service.clone(),
-            directory_service.clone(),
-            path_info_service,
+            blob_service,
+            directory_service,
+            path_info_service.into(),
+            nar_calculation_service.into(),
             Arc::<DummyBuildService>::default(),
-            runtime.handle().clone(),
+            tokio_runtime.handle().clone(),
         ));
         let mut eval = tvix_eval::Evaluation::new(io.clone() as Rc<dyn EvalIO>, true);
 
diff --git a/tvix/nar-bridge/.gitignore b/tvix/nar-bridge-go/.gitignore
index d70e1f8120..d70e1f8120 100644
--- a/tvix/nar-bridge/.gitignore
+++ b/tvix/nar-bridge-go/.gitignore
diff --git a/tvix/nar-bridge/README.md b/tvix/nar-bridge-go/README.md
index b14ee7af7b..81431daf38 100644
--- a/tvix/nar-bridge/README.md
+++ b/tvix/nar-bridge-go/README.md
@@ -1,4 +1,4 @@
-# //tvix/nar-bridge
+# //tvix/nar-bridge-go
 
 This exposes a HTTP Binary cache interface (GET/HEAD/PUT requests) for a `tvix-
 store`.
diff --git a/tvix/nar-bridge/cmd/nar-bridge-http/main.go b/tvix/nar-bridge-go/cmd/nar-bridge-http/main.go
index 171ea7f5bd..cf2aaf4901 100644
--- a/tvix/nar-bridge/cmd/nar-bridge-http/main.go
+++ b/tvix/nar-bridge-go/cmd/nar-bridge-http/main.go
@@ -14,7 +14,7 @@ import (
 	"google.golang.org/grpc/credentials/insecure"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	narBridgeHttp "code.tvl.fyi/tvix/nar-bridge/pkg/http"
+	narBridgeHttp "code.tvl.fyi/tvix/nar-bridge-go/pkg/http"
 	storev1pb "code.tvl.fyi/tvix/store-go"
 	log "github.com/sirupsen/logrus"
 )
@@ -47,7 +47,7 @@ func main() {
 			log.Fatal("failed to read build info")
 		}
 
-		shutdown, err := setupOpenTelemetry(ctx, "nar-bridge", buildInfo.Main.Version)
+		shutdown, err := setupOpenTelemetry(ctx, "nar-bridge-http", buildInfo.Main.Version)
 		if err != nil {
 			log.WithError(err).Fatal("failed to setup OpenTelemetry")
 		}
diff --git a/tvix/nar-bridge/cmd/nar-bridge-http/otel.go b/tvix/nar-bridge-go/cmd/nar-bridge-http/otel.go
index c446c6ec1a..c446c6ec1a 100644
--- a/tvix/nar-bridge/cmd/nar-bridge-http/otel.go
+++ b/tvix/nar-bridge-go/cmd/nar-bridge-http/otel.go
diff --git a/tvix/nar-bridge/default.nix b/tvix/nar-bridge-go/default.nix
index c0247f279f..303d9c5041 100644
--- a/tvix/nar-bridge/default.nix
+++ b/tvix/nar-bridge-go/default.nix
@@ -3,7 +3,7 @@
 { depot, pkgs, lib, ... }:
 
 pkgs.buildGoModule {
-  name = "nar-bridge";
+  name = "nar-bridge-go";
   src = depot.third_party.gitignoreSource ./.;
 
   vendorHash = "sha256-7jugbC5sEGhppjiZgnoLP5A6kQSaHK9vE6cXVZBG22s=";
diff --git a/tvix/nar-bridge/go.mod b/tvix/nar-bridge-go/go.mod
index deb6943e23..3aa0694ff7 100644
--- a/tvix/nar-bridge/go.mod
+++ b/tvix/nar-bridge-go/go.mod
@@ -1,4 +1,4 @@
-module code.tvl.fyi/tvix/nar-bridge
+module code.tvl.fyi/tvix/nar-bridge-go
 
 require (
 	code.tvl.fyi/tvix/castore-go v0.0.0-20231105151352-990d6ba2175e
diff --git a/tvix/nar-bridge/go.sum b/tvix/nar-bridge-go/go.sum
index 39f77b9061..39f77b9061 100644
--- a/tvix/nar-bridge/go.sum
+++ b/tvix/nar-bridge-go/go.sum
diff --git a/tvix/nar-bridge/pkg/http/nar_get.go b/tvix/nar-bridge-go/pkg/http/nar_get.go
index 75797f8da9..75797f8da9 100644
--- a/tvix/nar-bridge/pkg/http/nar_get.go
+++ b/tvix/nar-bridge-go/pkg/http/nar_get.go
diff --git a/tvix/nar-bridge/pkg/http/nar_put.go b/tvix/nar-bridge-go/pkg/http/nar_put.go
index fdfa20f9c3..96bdd38b70 100644
--- a/tvix/nar-bridge/pkg/http/nar_put.go
+++ b/tvix/nar-bridge-go/pkg/http/nar_put.go
@@ -7,7 +7,7 @@ import (
 	"net/http"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	"github.com/go-chi/chi/v5"
 	mh "github.com/multiformats/go-multihash/core"
 	nixhash "github.com/nix-community/go-nix/pkg/hash"
diff --git a/tvix/nar-bridge/pkg/http/narinfo.go b/tvix/nar-bridge-go/pkg/http/narinfo.go
index e5b99a9505..e5b99a9505 100644
--- a/tvix/nar-bridge/pkg/http/narinfo.go
+++ b/tvix/nar-bridge-go/pkg/http/narinfo.go
diff --git a/tvix/nar-bridge/pkg/http/narinfo_get.go b/tvix/nar-bridge-go/pkg/http/narinfo_get.go
index 98d85744d8..d43cb58078 100644
--- a/tvix/nar-bridge/pkg/http/narinfo_get.go
+++ b/tvix/nar-bridge-go/pkg/http/narinfo_get.go
@@ -96,37 +96,42 @@ func renderNarinfo(
 }
 
 func registerNarinfoGet(s *Server) {
-	// GET $outHash.narinfo looks up the PathInfo from the tvix-store,
-	// and then render a .narinfo file to the client.
-	// It will keep the PathInfo in the lookup map,
-	// so a subsequent GET /nar/ $narhash.nar request can find it.
-	s.handler.Get("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", func(w http.ResponseWriter, r *http.Request) {
-		defer r.Body.Close()
-
-		ctx := r.Context()
-		log := log.WithField("outputhash", chi.URLParamFromCtx(ctx, "outputhash"))
-
-		// parse the output hash sent in the request URL
-		outputHash, err := nixbase32.DecodeString(chi.URLParamFromCtx(ctx, "outputhash"))
-		if err != nil {
-			log.WithError(err).Error("unable to decode output hash from url")
-			w.WriteHeader(http.StatusBadRequest)
-			_, err := w.Write([]byte("unable to decode output hash from url"))
+	// GET/HEAD $outHash.narinfo looks up the PathInfo from the tvix-store,
+	// and, if it's a GET request, render a .narinfo file to the client.
+	// In both cases it will keep the PathInfo in the lookup map,
+	// so a subsequent GET/HEAD /nar/ $narhash.nar request can find it.
+	genNarinfoHandler := func(isHead bool) func(w http.ResponseWriter, r *http.Request) {
+		return func(w http.ResponseWriter, r *http.Request) {
+			defer r.Body.Close()
+
+			ctx := r.Context()
+			log := log.WithField("outputhash", chi.URLParamFromCtx(ctx, "outputhash"))
+
+			// parse the output hash sent in the request URL
+			outputHash, err := nixbase32.DecodeString(chi.URLParamFromCtx(ctx, "outputhash"))
 			if err != nil {
-				log.WithError(err).Errorf("unable to write error message to client")
+				log.WithError(err).Error("unable to decode output hash from url")
+				w.WriteHeader(http.StatusBadRequest)
+				_, err := w.Write([]byte("unable to decode output hash from url"))
+				if err != nil {
+					log.WithError(err).Errorf("unable to write error message to client")
+				}
+
+				return
 			}
 
-			return
-		}
-
-		err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narDbMu, s.narDb, outputHash, w, false)
-		if err != nil {
-			if errors.Is(err, fs.ErrNotExist) {
-				w.WriteHeader(http.StatusNotFound)
-			} else {
-				log.WithError(err).Warn("unable to render narinfo")
-				w.WriteHeader(http.StatusInternalServerError)
+			err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narDbMu, s.narDb, outputHash, w, isHead)
+			if err != nil {
+				if errors.Is(err, fs.ErrNotExist) {
+					w.WriteHeader(http.StatusNotFound)
+				} else {
+					log.WithError(err).Warn("unable to render narinfo")
+					w.WriteHeader(http.StatusInternalServerError)
+				}
 			}
 		}
-	})
+	}
+
+	s.handler.Get("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", genNarinfoHandler(false))
+	s.handler.Head("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", genNarinfoHandler(true))
 }
diff --git a/tvix/nar-bridge/pkg/http/narinfo_put.go b/tvix/nar-bridge-go/pkg/http/narinfo_put.go
index fd588bec86..0e2ae989c0 100644
--- a/tvix/nar-bridge/pkg/http/narinfo_put.go
+++ b/tvix/nar-bridge-go/pkg/http/narinfo_put.go
@@ -3,7 +3,7 @@ package http
 import (
 	"net/http"
 
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	"github.com/go-chi/chi/v5"
 	"github.com/nix-community/go-nix/pkg/narinfo"
 	"github.com/nix-community/go-nix/pkg/nixbase32"
diff --git a/tvix/nar-bridge/pkg/http/server.go b/tvix/nar-bridge-go/pkg/http/server.go
index fbcb20be18..fbcb20be18 100644
--- a/tvix/nar-bridge/pkg/http/server.go
+++ b/tvix/nar-bridge-go/pkg/http/server.go
diff --git a/tvix/nar-bridge/pkg/http/util.go b/tvix/nar-bridge-go/pkg/http/util.go
index 60febea1f4..60febea1f4 100644
--- a/tvix/nar-bridge/pkg/http/util.go
+++ b/tvix/nar-bridge-go/pkg/http/util.go
diff --git a/tvix/nar-bridge/pkg/importer/blob_upload.go b/tvix/nar-bridge-go/pkg/importer/blob_upload.go
index c1255dd3ad..c1255dd3ad 100644
--- a/tvix/nar-bridge/pkg/importer/blob_upload.go
+++ b/tvix/nar-bridge-go/pkg/importer/blob_upload.go
diff --git a/tvix/nar-bridge/pkg/importer/counting_writer.go b/tvix/nar-bridge-go/pkg/importer/counting_writer.go
index d003a4b11b..d003a4b11b 100644
--- a/tvix/nar-bridge/pkg/importer/counting_writer.go
+++ b/tvix/nar-bridge-go/pkg/importer/counting_writer.go
diff --git a/tvix/nar-bridge/pkg/importer/directory_upload.go b/tvix/nar-bridge-go/pkg/importer/directory_upload.go
index 117f442fa5..117f442fa5 100644
--- a/tvix/nar-bridge/pkg/importer/directory_upload.go
+++ b/tvix/nar-bridge-go/pkg/importer/directory_upload.go
diff --git a/tvix/nar-bridge/pkg/importer/gen_pathinfo.go b/tvix/nar-bridge-go/pkg/importer/gen_pathinfo.go
index bdc298a9a3..bdc298a9a3 100644
--- a/tvix/nar-bridge/pkg/importer/gen_pathinfo.go
+++ b/tvix/nar-bridge-go/pkg/importer/gen_pathinfo.go
diff --git a/tvix/nar-bridge/pkg/importer/importer.go b/tvix/nar-bridge-go/pkg/importer/importer.go
index fce6c5f293..fce6c5f293 100644
--- a/tvix/nar-bridge/pkg/importer/importer.go
+++ b/tvix/nar-bridge-go/pkg/importer/importer.go
diff --git a/tvix/nar-bridge/pkg/importer/importer_test.go b/tvix/nar-bridge-go/pkg/importer/importer_test.go
index 8ff63b9257..313677084f 100644
--- a/tvix/nar-bridge/pkg/importer/importer_test.go
+++ b/tvix/nar-bridge-go/pkg/importer/importer_test.go
@@ -9,7 +9,7 @@ import (
 	"testing"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	"github.com/stretchr/testify/require"
 )
 
diff --git a/tvix/nar-bridge/pkg/importer/roundtrip_test.go b/tvix/nar-bridge-go/pkg/importer/roundtrip_test.go
index 6d6fcb9ee2..c50d332d85 100644
--- a/tvix/nar-bridge/pkg/importer/roundtrip_test.go
+++ b/tvix/nar-bridge-go/pkg/importer/roundtrip_test.go
@@ -11,7 +11,7 @@ import (
 	"testing"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	storev1pb "code.tvl.fyi/tvix/store-go"
 	"github.com/stretchr/testify/require"
 )
diff --git a/tvix/nar-bridge/pkg/importer/util_test.go b/tvix/nar-bridge-go/pkg/importer/util_test.go
index 06353cf582..06353cf582 100644
--- a/tvix/nar-bridge/pkg/importer/util_test.go
+++ b/tvix/nar-bridge-go/pkg/importer/util_test.go
diff --git a/tvix/nar-bridge/testdata/emptydirectory.nar b/tvix/nar-bridge-go/testdata/emptydirectory.nar
index baba558622..baba558622 100644
--- a/tvix/nar-bridge/testdata/emptydirectory.nar
+++ b/tvix/nar-bridge-go/testdata/emptydirectory.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar b/tvix/nar-bridge-go/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
index 6cb0b16e5d..6cb0b16e5d 100644
--- a/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
+++ b/tvix/nar-bridge-go/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/onebyteexecutable.nar b/tvix/nar-bridge-go/testdata/onebyteexecutable.nar
index 6868219666..6868219666 100644
--- a/tvix/nar-bridge/testdata/onebyteexecutable.nar
+++ b/tvix/nar-bridge-go/testdata/onebyteexecutable.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/onebyteregular.nar b/tvix/nar-bridge-go/testdata/onebyteregular.nar
index b8c94932bf..b8c94932bf 100644
--- a/tvix/nar-bridge/testdata/onebyteregular.nar
+++ b/tvix/nar-bridge-go/testdata/onebyteregular.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/popdirectories.nar b/tvix/nar-bridge-go/testdata/popdirectories.nar
index 74313aca52..74313aca52 100644
--- a/tvix/nar-bridge/testdata/popdirectories.nar
+++ b/tvix/nar-bridge-go/testdata/popdirectories.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/symlink.nar b/tvix/nar-bridge-go/testdata/symlink.nar
index 7990e4ad5b..7990e4ad5b 100644
--- a/tvix/nar-bridge/testdata/symlink.nar
+++ b/tvix/nar-bridge-go/testdata/symlink.nar
Binary files differdiff --git a/tvix/nix-compat/Cargo.toml b/tvix/nix-compat/Cargo.toml
index ebec6d937d..876ac3ecad 100644
--- a/tvix/nix-compat/Cargo.toml
+++ b/tvix/nix-compat/Cargo.toml
@@ -44,16 +44,9 @@ lazy_static = "1.4.0"
 pretty_assertions = "1.4.0"
 rstest = "0.19.0"
 serde_json = "1.0"
-test-case = "3.3.1"
 tokio-test = "0.4.3"
 zstd = "^0.13.0"
 
-[dev-dependencies.test-generator]
-# This fork of test-generator adds support for cargo workspaces, see
-# also https://github.com/frehberg/test-generator/pull/14
-git = "https://github.com/JamesGuthrie/test-generator.git"
-rev = "82e799979980962aec1aa324ec6e0e4cad781f41"
-
 [[bench]]
 name = "derivation_parse_aterm"
 harness = false
diff --git a/tvix/nix-compat/src/aterm/escape.rs b/tvix/nix-compat/src/aterm/escape.rs
index 06b550bbf0..80a85d2103 100644
--- a/tvix/nix-compat/src/aterm/escape.rs
+++ b/tvix/nix-compat/src/aterm/escape.rs
@@ -16,12 +16,13 @@ pub fn escape_bytes<P: AsRef<[u8]>>(s: P) -> Vec<u8> {
 #[cfg(test)]
 mod tests {
     use super::escape_bytes;
-    use test_case::test_case;
+    use rstest::rstest;
 
-    #[test_case(b"", b""; "empty")]
-    #[test_case(b"\"", b"\\\""; "doublequote")]
-    #[test_case(b":", b":"; "colon")]
-    fn escape(input: &[u8], expected: &[u8]) {
+    #[rstest]
+    #[case::empty(b"", b"")]
+    #[case::doublequote(b"\"", b"\\\"")]
+    #[case::colon(b":", b":")]
+    fn escape(#[case] input: &[u8], #[case] expected: &[u8]) {
         assert_eq!(expected, escape_bytes(input))
     }
 }
diff --git a/tvix/nix-compat/src/aterm/parser.rs b/tvix/nix-compat/src/aterm/parser.rs
index 72648d5ef5..a30cb40ab0 100644
--- a/tvix/nix-compat/src/aterm/parser.rs
+++ b/tvix/nix-compat/src/aterm/parser.rs
@@ -76,25 +76,31 @@ pub(crate) fn parse_str_list(i: &[u8]) -> IResult<&[u8], Vec<String>> {
 
 #[cfg(test)]
 mod tests {
-    use test_case::test_case;
+    use rstest::rstest;
 
-    #[test_case(br#""""#, b"", b""; "empty")]
-    #[test_case(br#""Hello World""#, b"Hello World", b""; "hello world")]
-    #[test_case(br#""\"""#, br#"""#, b""; "doublequote")]
-    #[test_case(br#"":""#, b":", b""; "colon")]
-    #[test_case(br#""\""Rest"#, br#"""#, b"Rest"; "doublequote rest")]
-    fn parse_bstr_field(input: &[u8], expected: &[u8], exp_rest: &[u8]) {
+    #[rstest]
+    #[case::empty(br#""""#, b"", b"")]
+    #[case::hello_world(br#""Hello World""#, b"Hello World", b"")]
+    #[case::doublequote(br#""\"""#, br#"""#, b"")]
+    #[case::colon(br#"":""#, b":", b"")]
+    #[case::doublequote_rest(br#""\""Rest"#, br#"""#, b"Rest")]
+    fn test_parse_bstr_field(
+        #[case] input: &[u8],
+        #[case] expected: &[u8],
+        #[case] exp_rest: &[u8],
+    ) {
         let (rest, parsed) = super::parse_bstr_field(input).expect("must parse");
         assert_eq!(exp_rest, rest, "expected remainder");
         assert_eq!(expected, parsed);
     }
 
-    #[test_case(br#""""#, "", b""; "empty")]
-    #[test_case(br#""Hello World""#, "Hello World", b""; "hello world")]
-    #[test_case(br#""\"""#, r#"""#, b""; "doublequote")]
-    #[test_case(br#"":""#, ":", b""; "colon")]
-    #[test_case(br#""\""Rest"#, r#"""#, b"Rest"; "doublequote rest")]
-    fn parse_string_field(input: &[u8], expected: &str, exp_rest: &[u8]) {
+    #[rstest]
+    #[case::empty(br#""""#, "", b"")]
+    #[case::hello_world(br#""Hello World""#, "Hello World", b"")]
+    #[case::doublequote(br#""\"""#, r#"""#, b"")]
+    #[case::colon(br#"":""#, ":", b"")]
+    #[case::doublequote_rest(br#""\""Rest"#, r#"""#, b"Rest")]
+    fn parse_string_field(#[case] input: &[u8], #[case] expected: &str, #[case] exp_rest: &[u8]) {
         let (rest, parsed) = super::parse_string_field(input).expect("must parse");
         assert_eq!(exp_rest, rest, "expected remainder");
         assert_eq!(expected, &parsed);
@@ -107,10 +113,11 @@ mod tests {
         super::parse_string_field(&input).expect_err("must fail");
     }
 
-    #[test_case(br#"["foo"]"#, vec!["foo".to_string()], b""; "single foo")]
-    #[test_case(b"[]", vec![], b""; "empty list")]
-    #[test_case(b"[]blub", vec![], b"blub"; "empty list with rest")]
-    fn parse_list(input: &[u8], expected: Vec<String>, exp_rest: &[u8]) {
+    #[rstest]
+    #[case::single_foo(br#"["foo"]"#, vec!["foo".to_string()], b"")]
+    #[case::empty_list(b"[]", vec![], b"")]
+    #[case::empty_list_with_rest(b"[]blub", vec![], b"blub")]
+    fn parse_list(#[case] input: &[u8], #[case] expected: Vec<String>, #[case] exp_rest: &[u8]) {
         let (rest, parsed) = super::parse_str_list(input).expect("must parse");
         assert_eq!(exp_rest, rest, "expected remainder");
         assert_eq!(expected, parsed);
diff --git a/tvix/nix-compat/src/derivation/mod.rs b/tvix/nix-compat/src/derivation/mod.rs
index 07da127ed0..6e12e3ea86 100644
--- a/tvix/nix-compat/src/derivation/mod.rs
+++ b/tvix/nix-compat/src/derivation/mod.rs
@@ -188,11 +188,12 @@ impl Derivation {
     ///    `fixed:out:${algo}:${digest}:${fodPath}` string is hashed instead of
     ///    the A-Term.
     ///
-    /// If the derivation is not a fixed derivation, it's up to the caller of
-    /// this function to provide a lookup function to lookup these calculation
-    /// results of parent derivations at `fn_get_derivation_or_fod_hash` (by
-    /// drv path).
-    pub fn derivation_or_fod_hash<F>(&self, fn_get_derivation_or_fod_hash: F) -> [u8; 32]
+    /// It's up to the caller of this function to provide a (infallible) lookup
+    /// function to query [hash_derivation_modulo] of direct input derivations,
+    /// by their [StorePathRef].
+    /// It will only be called in case the derivation is not a fixed-output
+    /// derivation.
+    pub fn hash_derivation_modulo<F>(&self, fn_lookup_hash_derivation_modulo: F) -> [u8; 32]
     where
         F: Fn(&StorePathRef) -> [u8; 32],
     {
@@ -200,16 +201,16 @@ impl Derivation {
         // Non-Fixed-output derivations return the sha256 digest of the ATerm
         // notation, but with all input_derivation paths replaced by a recursive
         // call to this function.
-        // We use fn_get_derivation_or_fod_hash here, so callers can precompute this.
+        // We call [fn_lookup_hash_derivation_modulo] rather than recursing
+        // ourselves, so callers can precompute this.
         self.fod_digest().unwrap_or({
-            // For each input_derivation, look up the
-            // derivation_or_fod_hash, and replace the derivation path with
-            // it's HEXLOWER digest.
+            // For each input_derivation, look up the hash derivation modulo,
+            // and replace the derivation path in the aterm with it's HEXLOWER digest.
             let aterm_bytes = self.to_aterm_bytes_with_replacements(&BTreeMap::from_iter(
                 self.input_derivations
                     .iter()
                     .map(|(drv_path, output_names)| {
-                        let hash = fn_get_derivation_or_fod_hash(&drv_path.into());
+                        let hash = fn_lookup_hash_derivation_modulo(&drv_path.into());
 
                         (hash, output_names.to_owned())
                     }),
@@ -226,20 +227,22 @@ impl Derivation {
     /// and self.environment[$outputName] needs to be an empty string.
     ///
     /// Output path calculation requires knowledge of the
-    /// derivation_or_fod_hash [NixHash], which (in case of non-fixed-output
-    /// derivations) also requires knowledge of other hash_derivation_modulo
-    /// [NixHash]es.
+    /// [hash_derivation_modulo], which (in case of non-fixed-output
+    /// derivations) also requires knowledge of the [hash_derivation_modulo] of
+    /// input derivations (recursively).
     ///
-    /// We solve this by asking the caller of this function to provide the
-    /// hash_derivation_modulo of the current Derivation.
+    /// To avoid recursing and doing unnecessary calculation, we simply
+    /// ask the caller of this function to provide the result of the
+    /// [hash_derivation_modulo] call of the current [Derivation],
+    /// and leave it up to them to calculate it when needed.
     ///
-    /// On completion, self.environment[$outputName] and
-    /// self.outputs[$outputName].path are set to the calculated output path for all
+    /// On completion, `self.environment[$outputName]` and
+    /// `self.outputs[$outputName].path` are set to the calculated output path for all
     /// outputs.
     pub fn calculate_output_paths(
         &mut self,
         name: &str,
-        derivation_or_fod_hash: &[u8; 32],
+        hash_derivation_modulo: &[u8; 32],
     ) -> Result<(), DerivationError> {
         // The fingerprint and hash differs per output
         for (output_name, output) in self.outputs.iter_mut() {
@@ -250,14 +253,14 @@ impl Derivation {
 
             let path_name = output_path_name(name, output_name);
 
-            // For fixed output derivation we use the per-output info, otherwise we use the
-            // derivation hash.
+            // For fixed output derivation we use [build_ca_path], otherwise we
+            // use [build_output_path] with [hash_derivation_modulo].
             let abs_store_path = if let Some(ref hwm) = output.ca_hash {
                 build_ca_path(&path_name, hwm, Vec::<String>::new(), false).map_err(|e| {
                     DerivationError::InvalidOutputDerivationPath(output_name.to_string(), e)
                 })?
             } else {
-                build_output_path(derivation_or_fod_hash, output_name, &path_name).map_err(|e| {
+                build_output_path(hash_derivation_modulo, output_name, &path_name).map_err(|e| {
                     DerivationError::InvalidOutputDerivationPath(
                         output_name.to_string(),
                         store_path::BuildStorePathError::InvalidStorePath(e),
diff --git a/tvix/nix-compat/src/derivation/parser.rs b/tvix/nix-compat/src/derivation/parser.rs
index 769a7c5a96..2775294960 100644
--- a/tvix/nix-compat/src/derivation/parser.rs
+++ b/tvix/nix-compat/src/derivation/parser.rs
@@ -339,7 +339,8 @@ mod tests {
     use bstr::{BString, ByteSlice};
     use hex_literal::hex;
     use lazy_static::lazy_static;
-    use test_case::test_case;
+    use rstest::rstest;
+
     const DIGEST_SHA256: [u8; 32] =
         hex!("a5ce9c155ed09397614646c9717fc7cd94b1023d7b76b618d409e4fefd6e9d39");
 
@@ -418,9 +419,14 @@ mod tests {
     }
 
     /// Ensure parsing KVs works
-    #[test_case(b"[]", &BTreeMap::new(), b""; "empty")]
-    #[test_case(b"[(\"a\",\"1\"),(\"b\",\"2\")]", &EXP_AB_MAP, b""; "simple")]
-    fn parse_kv(input: &'static [u8], expected: &BTreeMap<String, BString>, exp_rest: &[u8]) {
+    #[rstest]
+    #[case::empty(b"[]", &BTreeMap::new(), b"")]
+    #[case::simple(b"[(\"a\",\"1\"),(\"b\",\"2\")]", &EXP_AB_MAP, b"")]
+    fn parse_kv(
+        #[case] input: &'static [u8],
+        #[case] expected: &BTreeMap<String, BString>,
+        #[case] exp_rest: &[u8],
+    ) {
         let (rest, parsed) = super::parse_kv::<BString, _>(crate::aterm::parse_bstr_field)(input)
             .expect("must parse");
         assert_eq!(exp_rest, rest, "expected remainder");
@@ -443,11 +449,12 @@ mod tests {
     }
 
     /// Ensure parsing input derivations works.
-    #[test_case(b"[]", &BTreeMap::new(); "empty")]
-    #[test_case(EXP_INPUT_DERIVATIONS_SIMPLE_ATERM.as_bytes(), &EXP_INPUT_DERIVATIONS_SIMPLE; "simple")]
+    #[rstest]
+    #[case::empty(b"[]", &BTreeMap::new())]
+    #[case::simple(EXP_INPUT_DERIVATIONS_SIMPLE_ATERM.as_bytes(), &EXP_INPUT_DERIVATIONS_SIMPLE)]
     fn parse_input_derivations(
-        input: &'static [u8],
-        expected: &BTreeMap<StorePath, BTreeSet<String>>,
+        #[case] input: &'static [u8],
+        #[case] expected: &BTreeMap<StorePath, BTreeSet<String>>,
     ) {
         let (rest, parsed) = super::parse_input_derivations(input).expect("must parse");
 
@@ -480,9 +487,10 @@ mod tests {
     }
 
     /// Ensure parsing input sources works
-    #[test_case(b"[]", &BTreeSet::new(); "empty")]
-    #[test_case(b"[\"/nix/store/55lwldka5nyxa08wnvlizyqw02ihy8ic-has-multi-out\",\"/nix/store/2vixb94v0hy2xc6p7mbnxxcyc095yyia-has-multi-out-lib\"]", &EXP_INPUT_SOURCES_SIMPLE; "simple")]
-    fn parse_input_sources(input: &'static [u8], expected: &BTreeSet<String>) {
+    #[rstest]
+    #[case::empty(b"[]", &BTreeSet::new())]
+    #[case::simple(b"[\"/nix/store/55lwldka5nyxa08wnvlizyqw02ihy8ic-has-multi-out\",\"/nix/store/2vixb94v0hy2xc6p7mbnxxcyc095yyia-has-multi-out-lib\"]", &EXP_INPUT_SOURCES_SIMPLE)]
+    fn parse_input_sources(#[case] input: &'static [u8], #[case] expected: &BTreeSet<String>) {
         let (rest, parsed) = super::parse_input_sources(input).expect("must parse");
 
         assert_eq!(
@@ -519,15 +527,16 @@ mod tests {
         }
     }
 
-    #[test_case(
+    #[rstest]
+    #[case::simple(
         br#"("out","/nix/store/5vyvcwah9l9kf07d52rcgdk70g2f4y13-foo","","")"#,
         ("out".to_string(), Output {
             path: Some(
                 StorePathRef::from_absolute_path("/nix/store/5vyvcwah9l9kf07d52rcgdk70g2f4y13-foo".as_bytes()).unwrap().to_owned()),
             ca_hash: None
-        }); "simple"
+        })
     )]
-    #[test_case(
+    #[case::fod(
         br#"("out","/nix/store/4q0pg5zpfmznxscq3avycvf9xdvx50n3-bar","r:sha256","08813cbee9903c62be4c5027726a418a300da4500b2d369d3af9286f4815ceba")"#,
         ("out".to_string(), Output {
             path: Some(
@@ -535,28 +544,33 @@ mod tests {
                 "/nix/store/4q0pg5zpfmznxscq3avycvf9xdvx50n3-bar".as_bytes()).unwrap().to_owned()),
             ca_hash: Some(from_algo_and_mode_and_digest("r:sha256",
                    data_encoding::HEXLOWER.decode(b"08813cbee9903c62be4c5027726a418a300da4500b2d369d3af9286f4815ceba").unwrap()            ).unwrap()),
-        }); "fod"
+        })
      )]
-    fn parse_output(input: &[u8], expected: (String, Output)) {
+    fn parse_output(#[case] input: &[u8], #[case] expected: (String, Output)) {
         let (rest, parsed) = super::parse_output(input).expect("must parse");
         assert!(rest.is_empty());
         assert_eq!(expected, parsed);
     }
 
-    #[test_case(
+    #[rstest]
+    #[case::multi_out(
         br#"[("lib","/nix/store/2vixb94v0hy2xc6p7mbnxxcyc095yyia-has-multi-out-lib","",""),("out","/nix/store/55lwldka5nyxa08wnvlizyqw02ihy8ic-has-multi-out","","")]"#,
-        &EXP_MULTI_OUTPUTS;
-        "multi-out"
+        &EXP_MULTI_OUTPUTS
     )]
-    fn parse_outputs(input: &[u8], expected: &BTreeMap<String, Output>) {
+    fn parse_outputs(#[case] input: &[u8], #[case] expected: &BTreeMap<String, Output>) {
         let (rest, parsed) = super::parse_outputs(input).expect("must parse");
         assert!(rest.is_empty());
         assert_eq!(*expected, parsed);
     }
 
-    #[test_case("sha256", &DIGEST_SHA256, CAHash::Flat(NIXHASH_SHA256.clone()); "sha256 flat")]
-    #[test_case("r:sha256", &DIGEST_SHA256, CAHash::Nar(NIXHASH_SHA256.clone()); "sha256 recursive")]
-    fn test_from_algo_and_mode_and_digest(algo_and_mode: &str, digest: &[u8], expected: CAHash) {
+    #[rstest]
+    #[case::sha256_flat("sha256", &DIGEST_SHA256, CAHash::Flat(NIXHASH_SHA256.clone()))]
+    #[case::sha256_recursive("r:sha256", &DIGEST_SHA256, CAHash::Nar(NIXHASH_SHA256.clone()))]
+    fn test_from_algo_and_mode_and_digest(
+        #[case] algo_and_mode: &str,
+        #[case] digest: &[u8],
+        #[case] expected: CAHash,
+    ) {
         assert_eq!(
             expected,
             from_algo_and_mode_and_digest(algo_and_mode, digest).unwrap()
diff --git a/tvix/nix-compat/src/derivation/tests/mod.rs b/tvix/nix-compat/src/derivation/tests/mod.rs
index 56bad7869d..48d4e8926a 100644
--- a/tvix/nix-compat/src/derivation/tests/mod.rs
+++ b/tvix/nix-compat/src/derivation/tests/mod.rs
@@ -6,51 +6,41 @@ use crate::derivation::Derivation;
 use crate::store_path::StorePath;
 use bstr::{BStr, BString};
 use hex_literal::hex;
+use rstest::rstest;
 use std::collections::BTreeSet;
-use std::fs::File;
-use std::io::Read;
-use std::path::Path;
+use std::fs;
+use std::path::{Path, PathBuf};
 use std::str::FromStr;
-use test_case::test_case;
-use test_generator::test_resources;
 
 const RESOURCES_PATHS: &str = "src/derivation/tests/derivation_tests";
 
-fn read_file(path: &str) -> BString {
-    let path = Path::new(path);
-    let mut file = File::open(path).unwrap();
-    let mut file_contents = Vec::new();
-
-    file.read_to_end(&mut file_contents).unwrap();
-
-    file_contents.into()
-}
-
-#[test_resources("src/derivation/tests/derivation_tests/ok/*.drv")]
-fn check_serialization(path_to_drv_file: &str) {
-    // skip JSON files known to fail parsing
-    if path_to_drv_file.ends_with("cp1252.drv") || path_to_drv_file.ends_with("latin1.drv") {
-        return;
-    }
-    let json_bytes = read_file(&format!("{}.json", path_to_drv_file));
+#[rstest]
+fn check_serialization(
+    #[files("src/derivation/tests/derivation_tests/ok/*.drv")]
+    #[exclude("(cp1252)|(latin1)")] // skip JSON files known to fail parsing
+    path_to_drv_file: PathBuf,
+) {
+    let json_bytes =
+        fs::read(path_to_drv_file.with_extension("drv.json")).expect("unable to read JSON");
     let derivation: Derivation =
         serde_json::from_slice(&json_bytes).expect("JSON was not well-formatted");
 
     let mut serialized_derivation = Vec::new();
     derivation.serialize(&mut serialized_derivation).unwrap();
 
-    let expected = read_file(path_to_drv_file);
+    let expected = fs::read(&path_to_drv_file).expect("unable to read .drv");
 
     assert_eq!(expected, BStr::new(&serialized_derivation));
 }
 
-#[test_resources("src/derivation/tests/derivation_tests/ok/*.drv")]
-fn validate(path_to_drv_file: &str) {
-    // skip JSON files known to fail parsing
-    if path_to_drv_file.ends_with("cp1252.drv") || path_to_drv_file.ends_with("latin1.drv") {
-        return;
-    }
-    let json_bytes = read_file(&format!("{}.json", path_to_drv_file));
+#[rstest]
+fn validate(
+    #[files("src/derivation/tests/derivation_tests/ok/*.drv")]
+    #[exclude("(cp1252)|(latin1)")] // skip JSON files known to fail parsing
+    path_to_drv_file: PathBuf,
+) {
+    let json_bytes =
+        fs::read(path_to_drv_file.with_extension("drv.json")).expect("unable to read JSON");
     let derivation: Derivation =
         serde_json::from_slice(&json_bytes).expect("JSON was not well-formatted");
 
@@ -59,17 +49,18 @@ fn validate(path_to_drv_file: &str) {
         .expect("derivation failed to validate")
 }
 
-#[test_resources("src/derivation/tests/derivation_tests/ok/*.drv")]
-fn check_to_aterm_bytes(path_to_drv_file: &str) {
-    // skip JSON files known to fail parsing
-    if path_to_drv_file.ends_with("cp1252.drv") || path_to_drv_file.ends_with("latin1.drv") {
-        return;
-    }
-    let json_bytes = read_file(&format!("{}.json", path_to_drv_file));
+#[rstest]
+fn check_to_aterm_bytes(
+    #[files("src/derivation/tests/derivation_tests/ok/*.drv")]
+    #[exclude("(cp1252)|(latin1)")] // skip JSON files known to fail parsing
+    path_to_drv_file: PathBuf,
+) {
+    let json_bytes =
+        fs::read(path_to_drv_file.with_extension("drv.json")).expect("unable to read JSON");
     let derivation: Derivation =
         serde_json::from_slice(&json_bytes).expect("JSON was not well-formatted");
 
-    let expected = read_file(path_to_drv_file);
+    let expected = fs::read(&path_to_drv_file).expect("unable to read .drv");
 
     assert_eq!(expected, BStr::new(&derivation.to_aterm_bytes()));
 }
@@ -77,22 +68,28 @@ fn check_to_aterm_bytes(path_to_drv_file: &str) {
 /// Reads in derivations in ATerm representation, parses with that parser,
 /// then compares the structs with the ones obtained by parsing the JSON
 /// representations.
-#[test_resources("src/derivation/tests/derivation_tests/ok/*.drv")]
-fn from_aterm_bytes(path_to_drv_file: &str) {
+#[rstest]
+fn from_aterm_bytes(
+    #[files("src/derivation/tests/derivation_tests/ok/*.drv")] path_to_drv_file: PathBuf,
+) {
     // Read in ATerm representation.
-    let aterm_bytes = read_file(path_to_drv_file);
+    let aterm_bytes = fs::read(&path_to_drv_file).expect("unable to read .drv");
     let parsed_drv = Derivation::from_aterm_bytes(&aterm_bytes).expect("must succeed");
 
     // For where we're able to load JSON fixtures, parse them and compare the structs.
     // For where we're not, compare the bytes manually.
-    if path_to_drv_file.ends_with("cp1252.drv") || path_to_drv_file.ends_with("latin1.drv") {
+    if path_to_drv_file.file_name().is_some_and(|s| {
+        s.as_encoded_bytes().ends_with(b"cp1252.drv")
+            || s.as_encoded_bytes().ends_with(b"latin1.drv")
+    }) {
         assert_eq!(
             &[0xc5, 0xc4, 0xd6][..],
             parsed_drv.environment.get("chars").unwrap(),
             "expected bytes to match",
         );
     } else {
-        let json_bytes = read_file(&format!("{}.json", path_to_drv_file));
+        let json_bytes =
+            fs::read(path_to_drv_file.with_extension("drv.json")).expect("unable to read JSON");
         let fixture_derivation: Derivation =
             serde_json::from_slice(&json_bytes).expect("JSON was not well-formatted");
 
@@ -112,7 +109,8 @@ fn from_aterm_bytes(path_to_drv_file: &str) {
 
 #[test]
 fn from_aterm_bytes_duplicate_map_key() {
-    let buf: Vec<u8> = read_file(&format!("{}/{}", RESOURCES_PATHS, "duplicate.drv")).into();
+    let buf: Vec<u8> =
+        fs::read(format!("{}/{}", RESOURCES_PATHS, "duplicate.drv")).expect("unable to read .drv");
 
     let err = Derivation::from_aterm_bytes(&buf).expect_err("must fail");
 
@@ -130,26 +128,31 @@ fn from_aterm_bytes_duplicate_map_key() {
 /// Ensure the parser detects and fails in this case.
 #[test]
 fn from_aterm_bytes_trailer() {
-    let mut buf: Vec<u8> = read_file(&format!(
+    let mut buf: Vec<u8> = fs::read(format!(
         "{}/ok/{}",
         RESOURCES_PATHS, "0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv"
     ))
-    .into();
+    .expect("unable to read .drv");
 
     buf.push(0x00);
 
     Derivation::from_aterm_bytes(&buf).expect_err("must fail");
 }
 
-#[test_case("bar","0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv"; "fixed_sha256")]
-#[test_case("foo", "4wvvbi4jwn0prsdxb7vs673qa5h9gr7x-foo.drv"; "simple-sha256")]
-#[test_case("bar", "ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv"; "fixed-sha1")]
-#[test_case("foo", "ch49594n9avinrf8ip0aslidkc4lxkqv-foo.drv"; "simple-sha1")]
-#[test_case("has-multi-out", "h32dahq0bx5rp1krcdx3a53asj21jvhk-has-multi-out.drv"; "multiple-outputs")]
-#[test_case("structured-attrs", "9lj1lkjm2ag622mh4h9rpy6j607an8g2-structured-attrs.drv"; "structured-attrs")]
-#[test_case("unicode", "52a9id8hx688hvlnz4d1n25ml1jdykz0-unicode.drv"; "unicode")]
-fn derivation_path(name: &str, expected_path: &str) {
-    let json_bytes = read_file(&format!("{}/ok/{}.json", RESOURCES_PATHS, expected_path));
+#[rstest]
+#[case::fixed_sha256("bar", "0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv")]
+#[case::simple_sha256("foo", "4wvvbi4jwn0prsdxb7vs673qa5h9gr7x-foo.drv")]
+#[case::fixed_sha1("bar", "ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv")]
+#[case::simple_sha1("foo", "ch49594n9avinrf8ip0aslidkc4lxkqv-foo.drv")]
+#[case::multiple_outputs("has-multi-out", "h32dahq0bx5rp1krcdx3a53asj21jvhk-has-multi-out.drv")]
+#[case::structured_attrs(
+    "structured-attrs",
+    "9lj1lkjm2ag622mh4h9rpy6j607an8g2-structured-attrs.drv"
+)]
+#[case::unicode("unicode", "52a9id8hx688hvlnz4d1n25ml1jdykz0-unicode.drv")]
+fn derivation_path(#[case] name: &str, #[case] expected_path: &str) {
+    let json_bytes = fs::read(format!("{}/ok/{}.json", RESOURCES_PATHS, expected_path))
+        .expect("unable to read JSON");
     let derivation: Derivation =
         serde_json::from_slice(&json_bytes).expect("JSON was not well-formatted");
 
@@ -161,7 +164,7 @@ fn derivation_path(name: &str, expected_path: &str) {
 
 /// This trims all output paths from a Derivation struct,
 /// by setting outputs[$outputName].path and environment[$outputName] to the empty string.
-fn derivation_with_trimmed_output_paths(derivation: &Derivation) -> Derivation {
+fn derivation_without_output_paths(derivation: &Derivation) -> Derivation {
     let mut trimmed_env = derivation.environment.clone();
     let mut trimmed_outputs = derivation.outputs.clone();
 
@@ -185,43 +188,49 @@ fn derivation_with_trimmed_output_paths(derivation: &Derivation) -> Derivation {
     }
 }
 
-#[test_case("0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv", hex!("724f3e3634fce4cbbbd3483287b8798588e80280660b9a63fd13a1bc90485b33"); "fixed_sha256")]
-#[test_case("ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv", hex!("c79aebd0ce3269393d4a1fde2cbd1d975d879b40f0bf40a48f550edc107fd5df");"fixed-sha1")]
-fn derivation_or_fod_hash(drv_path: &str, expected_digest: [u8; 32]) {
+#[rstest]
+#[case::fixed_sha256("0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv", hex!("724f3e3634fce4cbbbd3483287b8798588e80280660b9a63fd13a1bc90485b33"))]
+#[case::fixed_sha1("ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv", hex!("c79aebd0ce3269393d4a1fde2cbd1d975d879b40f0bf40a48f550edc107fd5df"))]
+fn hash_derivation_modulo_fixed(#[case] drv_path: &str, #[case] expected_digest: [u8; 32]) {
     // read in the fixture
-    let json_bytes = read_file(&format!("{}/ok/{}.json", RESOURCES_PATHS, drv_path));
+    let json_bytes =
+        fs::read(format!("{}/ok/{}.json", RESOURCES_PATHS, drv_path)).expect("unable to read JSON");
     let drv: Derivation = serde_json::from_slice(&json_bytes).expect("must deserialize");
 
-    let actual = drv.derivation_or_fod_hash(|_| panic!("must not be called"));
+    let actual = drv.hash_derivation_modulo(|_| panic!("must not be called"));
     assert_eq!(expected_digest, actual);
 }
 
 /// This reads a Derivation (in A-Term), trims out all fields containing
 /// calculated output paths, then triggers the output path calculation and
 /// compares the struct to match what was originally read in.
-#[test_case("bar","0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv"; "fixed_sha256")]
-#[test_case("foo", "4wvvbi4jwn0prsdxb7vs673qa5h9gr7x-foo.drv"; "simple-sha256")]
-#[test_case("bar", "ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv"; "fixed-sha1")]
-#[test_case("foo", "ch49594n9avinrf8ip0aslidkc4lxkqv-foo.drv"; "simple-sha1")]
-#[test_case("has-multi-out", "h32dahq0bx5rp1krcdx3a53asj21jvhk-has-multi-out.drv"; "multiple-outputs")]
-#[test_case("structured-attrs", "9lj1lkjm2ag622mh4h9rpy6j607an8g2-structured-attrs.drv"; "structured-attrs")]
-#[test_case("unicode", "52a9id8hx688hvlnz4d1n25ml1jdykz0-unicode.drv"; "unicode")]
-#[test_case("cp1252", "m1vfixn8iprlf0v9abmlrz7mjw1xj8kp-cp1252.drv"; "cp1252")]
-#[test_case("latin1", "x6p0hg79i3wg0kkv7699935f7rrj9jf3-latin1.drv"; "latin1")]
-fn output_paths(name: &str, drv_path_str: &str) {
+#[rstest]
+#[case::fixed_sha256("bar", "0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv")]
+#[case::simple_sha256("foo", "4wvvbi4jwn0prsdxb7vs673qa5h9gr7x-foo.drv")]
+#[case::fixed_sha1("bar", "ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv")]
+#[case::simple_sha1("foo", "ch49594n9avinrf8ip0aslidkc4lxkqv-foo.drv")]
+#[case::multiple_outputs("has-multi-out", "h32dahq0bx5rp1krcdx3a53asj21jvhk-has-multi-out.drv")]
+#[case::structured_attrs(
+    "structured-attrs",
+    "9lj1lkjm2ag622mh4h9rpy6j607an8g2-structured-attrs.drv"
+)]
+#[case::unicode("unicode", "52a9id8hx688hvlnz4d1n25ml1jdykz0-unicode.drv")]
+#[case::cp1252("cp1252", "m1vfixn8iprlf0v9abmlrz7mjw1xj8kp-cp1252.drv")]
+#[case::latin1("latin1", "x6p0hg79i3wg0kkv7699935f7rrj9jf3-latin1.drv")]
+fn output_paths(#[case] name: &str, #[case] drv_path_str: &str) {
     // read in the derivation
     let expected_derivation = Derivation::from_aterm_bytes(
-        read_file(&format!("{}/ok/{}", RESOURCES_PATHS, drv_path_str)).as_ref(),
+        &fs::read(format!("{}/ok/{}", RESOURCES_PATHS, drv_path_str)).expect("unable to read .drv"),
     )
     .expect("must succeed");
 
-    // create a version with trimmed output paths, simulating we constructed
-    // the struct.
-    let mut derivation = derivation_with_trimmed_output_paths(&expected_derivation);
+    // create a version without output paths, simulating we constructed the
+    // struct.
+    let mut derivation = derivation_without_output_paths(&expected_derivation);
 
-    // calculate the derivation_or_fod_hash of derivation
+    // calculate the hash_derivation_modulo of Derivation
     // We don't expect the lookup function to be called for most derivations.
-    let calculated_derivation_or_fod_hash = derivation.derivation_or_fod_hash(|parent_drv_path| {
+    let actual_hash_derivation_modulo = derivation.hash_derivation_modulo(|parent_drv_path| {
         // 4wvvbi4jwn0prsdxb7vs673qa5h9gr7x-foo.drv may lookup /nix/store/0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv
         // ch49594n9avinrf8ip0aslidkc4lxkqv-foo.drv may lookup /nix/store/ss2p4wmxijn652haqyd7dckxwl4c7hxx-bar.drv
         if name == "foo"
@@ -234,20 +243,21 @@ fn output_paths(name: &str, drv_path_str: &str) {
             // drv_name, and calculating its drv replacement (on the non-stripped version)
             // In a real-world scenario you would have already done this during construction.
 
-            let json_bytes = read_file(&format!(
+            let json_bytes = fs::read(format!(
                 "{}/ok/{}.json",
                 RESOURCES_PATHS,
                 Path::new(&parent_drv_path.to_string())
                     .file_name()
                     .unwrap()
                     .to_string_lossy()
-            ));
+            ))
+            .expect("unable to read JSON");
 
             let drv: Derivation = serde_json::from_slice(&json_bytes).expect("must deserialize");
 
-            // calculate derivation_or_fod_hash for each parent.
+            // calculate hash_derivation_modulo for each parent.
             // This may not trigger subsequent requests, as both parents are FOD.
-            drv.derivation_or_fod_hash(|_| panic!("must not lookup"))
+            drv.hash_derivation_modulo(|_| panic!("must not lookup"))
         } else {
             // we only expect this to be called in the "foo" testcase, for the "bar derivations"
             panic!("may only be called for foo testcase on bar derivations");
@@ -255,7 +265,7 @@ fn output_paths(name: &str, drv_path_str: &str) {
     });
 
     derivation
-        .calculate_output_paths(name, &calculated_derivation_or_fod_hash)
+        .calculate_output_paths(name, &actual_hash_derivation_modulo)
         .unwrap();
 
     // The derivation should now look like it was before
@@ -333,15 +343,16 @@ fn output_path_construction() {
     // calculate bar output paths
     let bar_calc_result = bar_drv.calculate_output_paths(
         "bar",
-        &bar_drv.derivation_or_fod_hash(|_| panic!("is FOD, should not lookup")),
+        &bar_drv.hash_derivation_modulo(|_| panic!("is FOD, should not lookup")),
     );
     assert!(bar_calc_result.is_ok());
 
     // ensure it matches our bar fixture
-    let bar_json_bytes = read_file(&format!(
+    let bar_json_bytes = fs::read(format!(
         "{}/ok/{}.json",
         RESOURCES_PATHS, "0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv"
-    ));
+    ))
+    .expect("unable to read JSON");
     let bar_drv_expected: Derivation =
         serde_json::from_slice(&bar_json_bytes).expect("must deserialize");
     assert_eq!(bar_drv_expected, bar_drv);
@@ -349,8 +360,8 @@ fn output_path_construction() {
     // now construct foo, which requires bar_drv
     // Note how we refer to the output path, drv name and replacement_str (with calculated output paths) of bar.
     let bar_output_path = &bar_drv.outputs.get("out").expect("must exist").path;
-    let bar_drv_derivation_or_fod_hash =
-        bar_drv.derivation_or_fod_hash(|_| panic!("is FOD, should not lookup"));
+    let bar_drv_hash_derivation_modulo =
+        bar_drv.hash_derivation_modulo(|_| panic!("is FOD, should not lookup"));
 
     let bar_drv_path = bar_drv
         .calculate_derivation_path("bar")
@@ -397,20 +408,21 @@ fn output_path_construction() {
     // calculate foo output paths
     let foo_calc_result = foo_drv.calculate_output_paths(
         "foo",
-        &foo_drv.derivation_or_fod_hash(|drv_path| {
+        &foo_drv.hash_derivation_modulo(|drv_path| {
             if drv_path.to_string() != "0hm2f1psjpcwg8fijsmr4wwxrx59s092-bar.drv" {
                 panic!("lookup called with unexpected drv_path: {}", drv_path);
             }
-            bar_drv_derivation_or_fod_hash
+            bar_drv_hash_derivation_modulo
         }),
     );
     assert!(foo_calc_result.is_ok());
 
     // ensure it matches our foo fixture
-    let foo_json_bytes = read_file(&format!(
+    let foo_json_bytes = fs::read(format!(
         "{}/ok/{}.json",
         RESOURCES_PATHS, "4wvvbi4jwn0prsdxb7vs673qa5h9gr7x-foo.drv",
-    ));
+    ))
+    .expect("unable to read JSON");
     let foo_drv_expected: Derivation =
         serde_json::from_slice(&foo_json_bytes).expect("must deserialize");
     assert_eq!(foo_drv_expected, foo_drv);
diff --git a/tvix/nix-compat/src/nar/mod.rs b/tvix/nix-compat/src/nar/mod.rs
index 058977f4fc..c678d26ffb 100644
--- a/tvix/nix-compat/src/nar/mod.rs
+++ b/tvix/nix-compat/src/nar/mod.rs
@@ -1,4 +1,4 @@
-mod wire;
+pub(crate) mod wire;
 
 pub mod reader;
 pub mod writer;
diff --git a/tvix/nix-compat/src/nar/reader/async/mod.rs b/tvix/nix-compat/src/nar/reader/async/mod.rs
new file mode 100644
index 0000000000..0808fba38c
--- /dev/null
+++ b/tvix/nix-compat/src/nar/reader/async/mod.rs
@@ -0,0 +1,173 @@
+use std::{
+    mem::MaybeUninit,
+    pin::Pin,
+    task::{self, Poll},
+};
+
+use tokio::io::{self, AsyncBufRead, AsyncRead, ErrorKind::InvalidData};
+
+// Required reading for understanding this module.
+use crate::{
+    nar::{self, wire::PadPar},
+    wire::{self, BytesReader},
+};
+
+mod read;
+#[cfg(test)]
+mod test;
+
+pub type Reader<'a> = dyn AsyncBufRead + Unpin + Send + 'a;
+
+/// Start reading a NAR file from `reader`.
+pub async fn open<'a, 'r>(reader: &'a mut Reader<'r>) -> io::Result<Node<'a, 'r>> {
+    read::token(reader, &nar::wire::TOK_NAR).await?;
+    Node::new(reader).await
+}
+
+pub enum Node<'a, 'r: 'a> {
+    Symlink {
+        target: Vec<u8>,
+    },
+    File {
+        executable: bool,
+        reader: FileReader<'a, 'r>,
+    },
+    Directory(DirReader<'a, 'r>),
+}
+
+impl<'a, 'r: 'a> Node<'a, 'r> {
+    /// Start reading a [Node], matching the next [wire::Node].
+    ///
+    /// Reading the terminating [wire::TOK_PAR] is done immediately for [Node::Symlink],
+    /// but is otherwise left to [DirReader] or [BytesReader].
+    async fn new(reader: &'a mut Reader<'r>) -> io::Result<Self> {
+        Ok(match read::tag(reader).await? {
+            nar::wire::Node::Sym => {
+                let target = wire::read_bytes(reader, 1..=nar::wire::MAX_TARGET_LEN).await?;
+
+                if target.contains(&0) {
+                    return Err(InvalidData.into());
+                }
+
+                read::token(reader, &nar::wire::TOK_PAR).await?;
+
+                Node::Symlink { target }
+            }
+            tag @ (nar::wire::Node::Reg | nar::wire::Node::Exe) => Node::File {
+                executable: tag == nar::wire::Node::Exe,
+                reader: FileReader {
+                    inner: BytesReader::new_internal(reader, ..).await?,
+                },
+            },
+            nar::wire::Node::Dir => Node::Directory(DirReader::new(reader)),
+        })
+    }
+}
+
+/// File contents, readable through the [AsyncRead] trait.
+///
+/// It comes with some caveats:
+///  * You must always read the entire file, unless you intend to abandon the entire archive reader.
+///  * You must abandon the entire archive reader upon the first error.
+///
+/// It's fine to read exactly `reader.len()` bytes without ever seeing an explicit EOF.
+pub struct FileReader<'a, 'r> {
+    inner: BytesReader<&'a mut Reader<'r>, PadPar>,
+}
+
+impl<'a, 'r> FileReader<'a, 'r> {
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    pub fn len(&self) -> u64 {
+        self.inner.len()
+    }
+}
+
+impl<'a, 'r> AsyncRead for FileReader<'a, 'r> {
+    fn poll_read(
+        self: Pin<&mut Self>,
+        cx: &mut task::Context,
+        buf: &mut io::ReadBuf,
+    ) -> Poll<io::Result<()>> {
+        Pin::new(&mut self.get_mut().inner).poll_read(cx, buf)
+    }
+}
+
+impl<'a, 'r> AsyncBufRead for FileReader<'a, 'r> {
+    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<io::Result<&[u8]>> {
+        Pin::new(&mut self.get_mut().inner).poll_fill_buf(cx)
+    }
+
+    fn consume(self: Pin<&mut Self>, amt: usize) {
+        Pin::new(&mut self.get_mut().inner).consume(amt)
+    }
+}
+
+/// A directory iterator, yielding a sequence of [Node]s.
+/// It must be fully consumed before reading further from the [DirReader] that produced it, if any.
+pub struct DirReader<'a, 'r> {
+    reader: &'a mut Reader<'r>,
+    /// Previous directory entry name.
+    /// We have to hang onto this to enforce name monotonicity.
+    prev_name: Vec<u8>,
+}
+
+pub struct Entry<'a, 'r> {
+    pub name: &'a [u8],
+    pub node: Node<'a, 'r>,
+}
+
+impl<'a, 'r> DirReader<'a, 'r> {
+    fn new(reader: &'a mut Reader<'r>) -> Self {
+        Self {
+            reader,
+            prev_name: vec![],
+        }
+    }
+
+    /// Read the next [Entry] from the directory.
+    ///
+    /// We explicitly don't implement [Iterator], since treating this as
+    /// a regular Rust iterator will surely lead you astray.
+    ///
+    ///  * You must always consume the entire iterator, unless you abandon the entire archive reader.
+    ///  * You must abandon the entire archive reader on the first error.
+    ///  * You must abandon the directory reader upon the first [None].
+    ///  * Even if you know the amount of elements up front, you must keep reading until you encounter [None].
+    pub async fn next(&mut self) -> io::Result<Option<Entry<'_, 'r>>> {
+        // COME FROM the previous iteration: if we've already read an entry,
+        // read its terminating TOK_PAR here.
+        if !self.prev_name.is_empty() {
+            read::token(self.reader, &nar::wire::TOK_PAR).await?;
+        }
+
+        if let nar::wire::Entry::None = read::tag(self.reader).await? {
+            return Ok(None);
+        }
+
+        let mut name = [MaybeUninit::uninit(); nar::wire::MAX_NAME_LEN + 1];
+        let name =
+            wire::read_bytes_buf(self.reader, &mut name, 1..=nar::wire::MAX_NAME_LEN).await?;
+
+        if name.contains(&0) || name.contains(&b'/') || name == b"." || name == b".." {
+            return Err(InvalidData.into());
+        }
+
+        // Enforce strict monotonicity of directory entry names.
+        if &self.prev_name[..] >= name {
+            return Err(InvalidData.into());
+        }
+
+        self.prev_name.clear();
+        self.prev_name.extend_from_slice(name);
+
+        read::token(self.reader, &nar::wire::TOK_NOD).await?;
+
+        Ok(Some(Entry {
+            name: &self.prev_name,
+            node: Node::new(self.reader).await?,
+        }))
+    }
+}
diff --git a/tvix/nix-compat/src/nar/reader/async/read.rs b/tvix/nix-compat/src/nar/reader/async/read.rs
new file mode 100644
index 0000000000..2adf894922
--- /dev/null
+++ b/tvix/nix-compat/src/nar/reader/async/read.rs
@@ -0,0 +1,69 @@
+use tokio::io::{
+    self, AsyncReadExt,
+    ErrorKind::{InvalidData, UnexpectedEof},
+};
+
+use crate::nar::wire::Tag;
+
+use super::Reader;
+
+/// Consume a known token from the reader.
+pub async fn token<const N: usize>(reader: &mut Reader<'_>, token: &[u8; N]) -> io::Result<()> {
+    let mut buf = [0u8; N];
+
+    // This implements something similar to [AsyncReadExt::read_exact], but verifies that
+    // the input data matches the token while we read it. These two slices respectively
+    // represent the remaining token to be verified, and the remaining input buffer.
+    let mut token = &token[..];
+    let mut buf = &mut buf[..];
+
+    while !token.is_empty() {
+        match reader.read(buf).await? {
+            0 => {
+                return Err(UnexpectedEof.into());
+            }
+            n => {
+                let (t, b);
+                (t, token) = token.split_at(n);
+                (b, buf) = buf.split_at_mut(n);
+
+                if t != b {
+                    return Err(InvalidData.into());
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
+
+/// Consume a [Tag] from the reader.
+pub async fn tag<T: Tag>(reader: &mut Reader<'_>) -> io::Result<T> {
+    let mut buf = T::make_buf();
+    let buf = buf.as_mut();
+
+    // first read the known minimum lengthโ€ฆ
+    reader.read_exact(&mut buf[..T::MIN]).await?;
+
+    // then decide which tag we're expecting
+    let tag = T::from_u8(buf[T::OFF]).ok_or(InvalidData)?;
+    let (head, tail) = tag.as_bytes().split_at(T::MIN);
+
+    // make sure what we've read so far is valid
+    if buf[..T::MIN] != *head {
+        return Err(InvalidData.into());
+    }
+
+    // โ€ฆthen read the rest, if any
+    if !tail.is_empty() {
+        let rest = tail.len();
+        reader.read_exact(&mut buf[..rest]).await?;
+
+        // and make sure it's what we expect
+        if buf[..rest] != *tail {
+            return Err(InvalidData.into());
+        }
+    }
+
+    Ok(tag)
+}
diff --git a/tvix/nix-compat/src/nar/reader/async/test.rs b/tvix/nix-compat/src/nar/reader/async/test.rs
new file mode 100644
index 0000000000..7bc1f8942f
--- /dev/null
+++ b/tvix/nix-compat/src/nar/reader/async/test.rs
@@ -0,0 +1,310 @@
+use tokio::io::AsyncReadExt;
+
+mod nar {
+    pub use crate::nar::reader::r#async as reader;
+}
+
+#[tokio::test]
+async fn symlink() {
+    let mut f = std::io::Cursor::new(include_bytes!("../../tests/symlink.nar"));
+    let node = nar::reader::open(&mut f).await.unwrap();
+
+    match node {
+        nar::reader::Node::Symlink { target } => {
+            assert_eq!(
+                &b"/nix/store/somewhereelse"[..],
+                &target,
+                "target must match"
+            );
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[tokio::test]
+async fn file() {
+    let mut f = std::io::Cursor::new(include_bytes!("../../tests/helloworld.nar"));
+    let node = nar::reader::open(&mut f).await.unwrap();
+
+    match node {
+        nar::reader::Node::File {
+            executable,
+            mut reader,
+        } => {
+            assert!(!executable);
+            let mut buf = vec![];
+            reader
+                .read_to_end(&mut buf)
+                .await
+                .expect("read must succeed");
+            assert_eq!(&b"Hello World!"[..], &buf);
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[tokio::test]
+async fn complicated() {
+    let mut f = std::io::Cursor::new(include_bytes!("../../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).await.unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            must_read_file(
+                ".keep",
+                dir_reader
+                    .next()
+                    .await
+                    .expect("next must succeed")
+                    .expect("must be some"),
+            )
+            .await;
+
+            // second entry is aa, a symlink to /nix/store/somewhereelse
+            must_be_symlink(
+                "aa",
+                "/nix/store/somewhereelse",
+                dir_reader
+                    .next()
+                    .await
+                    .expect("next must be some")
+                    .expect("must be some"),
+            );
+
+            {
+                // third entry is a directory called "keep"
+                let entry = dir_reader
+                    .next()
+                    .await
+                    .expect("next must be some")
+                    .expect("must be some");
+
+                assert_eq!(b"keep", entry.name);
+
+                match entry.node {
+                    nar::reader::Node::Directory(mut subdir_reader) => {
+                        {
+                            // first entry is .keep, an empty regular file.
+                            let entry = subdir_reader
+                                .next()
+                                .await
+                                .expect("next must succeed")
+                                .expect("must be some");
+
+                            must_read_file(".keep", entry).await;
+                        }
+
+                        // we must read the None
+                        assert!(
+                            subdir_reader
+                                .next()
+                                .await
+                                .expect("next must succeed")
+                                .is_none(),
+                            "keep directory contains only .keep"
+                        );
+                    }
+                    _ => panic!("unexpected type for keep/.keep"),
+                }
+            };
+
+            // reading more entries yields None (and we actually must read until this)
+            assert!(dir_reader.next().await.expect("must succeed").is_none());
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[tokio::test]
+#[should_panic]
+#[ignore = "TODO: async poisoning"]
+async fn file_read_abandoned() {
+    let mut f = std::io::Cursor::new(include_bytes!("../../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).await.unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            {
+                let entry = dir_reader
+                    .next()
+                    .await
+                    .expect("next must succeed")
+                    .expect("must be some");
+
+                assert_eq!(b".keep", entry.name);
+                // don't bother to finish reading it.
+            };
+
+            // this should panic (not return an error), because we are meant to abandon the archive reader now.
+            assert!(dir_reader.next().await.expect("must succeed").is_none());
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[tokio::test]
+#[should_panic]
+#[ignore = "TODO: async poisoning"]
+async fn dir_read_abandoned() {
+    let mut f = std::io::Cursor::new(include_bytes!("../../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).await.unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            must_read_file(
+                ".keep",
+                dir_reader
+                    .next()
+                    .await
+                    .expect("next must succeed")
+                    .expect("must be some"),
+            )
+            .await;
+
+            // second entry is aa, a symlink to /nix/store/somewhereelse
+            must_be_symlink(
+                "aa",
+                "/nix/store/somewhereelse",
+                dir_reader
+                    .next()
+                    .await
+                    .expect("next must be some")
+                    .expect("must be some"),
+            );
+
+            {
+                // third entry is a directory called "keep"
+                let entry = dir_reader
+                    .next()
+                    .await
+                    .expect("next must be some")
+                    .expect("must be some");
+
+                assert_eq!(b"keep", entry.name);
+
+                match entry.node {
+                    nar::reader::Node::Directory(_) => {
+                        // don't finish using it, which poisons the archive reader
+                    }
+                    _ => panic!("unexpected type for keep/.keep"),
+                }
+            };
+
+            // this should panic, because we didn't finish reading the child subdirectory
+            assert!(dir_reader.next().await.expect("must succeed").is_none());
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[tokio::test]
+#[should_panic]
+#[ignore = "TODO: async poisoning"]
+async fn dir_read_after_none() {
+    let mut f = std::io::Cursor::new(include_bytes!("../../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).await.unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            must_read_file(
+                ".keep",
+                dir_reader
+                    .next()
+                    .await
+                    .expect("next must succeed")
+                    .expect("must be some"),
+            )
+            .await;
+
+            // second entry is aa, a symlink to /nix/store/somewhereelse
+            must_be_symlink(
+                "aa",
+                "/nix/store/somewhereelse",
+                dir_reader
+                    .next()
+                    .await
+                    .expect("next must be some")
+                    .expect("must be some"),
+            );
+
+            {
+                // third entry is a directory called "keep"
+                let entry = dir_reader
+                    .next()
+                    .await
+                    .expect("next must be some")
+                    .expect("must be some");
+
+                assert_eq!(b"keep", entry.name);
+
+                match entry.node {
+                    nar::reader::Node::Directory(mut subdir_reader) => {
+                        // first entry is .keep, an empty regular file.
+                        must_read_file(
+                            ".keep",
+                            subdir_reader
+                                .next()
+                                .await
+                                .expect("next must succeed")
+                                .expect("must be some"),
+                        )
+                        .await;
+
+                        // we must read the None
+                        assert!(
+                            subdir_reader
+                                .next()
+                                .await
+                                .expect("next must succeed")
+                                .is_none(),
+                            "keep directory contains only .keep"
+                        );
+                    }
+                    _ => panic!("unexpected type for keep/.keep"),
+                }
+            };
+
+            // reading more entries yields None (and we actually must read until this)
+            assert!(dir_reader.next().await.expect("must succeed").is_none());
+
+            // this should panic, because we already got a none so we're meant to stop.
+            dir_reader.next().await.unwrap();
+            unreachable!()
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+async fn must_read_file(name: &'static str, entry: nar::reader::Entry<'_, '_>) {
+    assert_eq!(name.as_bytes(), entry.name);
+
+    match entry.node {
+        nar::reader::Node::File {
+            executable,
+            mut reader,
+        } => {
+            assert!(!executable);
+            assert_eq!(reader.read(&mut [0]).await.unwrap(), 0);
+        }
+        _ => panic!("unexpected type for {}", name),
+    }
+}
+
+fn must_be_symlink(
+    name: &'static str,
+    exp_target: &'static str,
+    entry: nar::reader::Entry<'_, '_>,
+) {
+    assert_eq!(name.as_bytes(), entry.name);
+
+    match entry.node {
+        nar::reader::Node::Symlink { target } => {
+            assert_eq!(exp_target.as_bytes(), &target);
+        }
+        _ => panic!("unexpected type for {}", name),
+    }
+}
diff --git a/tvix/nix-compat/src/nar/reader/mod.rs b/tvix/nix-compat/src/nar/reader/mod.rs
index fa7ddc77f9..9e9237ead3 100644
--- a/tvix/nix-compat/src/nar/reader/mod.rs
+++ b/tvix/nix-compat/src/nar/reader/mod.rs
@@ -10,19 +10,50 @@ use std::io::{
     Read, Write,
 };
 
+#[cfg(not(debug_assertions))]
+use std::marker::PhantomData;
+
 // Required reading for understanding this module.
 use crate::nar::wire;
 
+#[cfg(feature = "async")]
+pub mod r#async;
+
 mod read;
 #[cfg(test)]
 mod test;
 
 pub type Reader<'a> = dyn BufRead + Send + 'a;
 
+struct ArchiveReader<'a, 'r> {
+    inner: &'a mut Reader<'r>,
+
+    /// In debug mode, also track when we need to abandon this archive reader.
+    /// The archive reader must be abandoned when:
+    ///   * An error is encountered at any point
+    ///   * A file or directory reader is dropped before being read entirely.
+    /// All of these checks vanish in release mode.
+    status: ArchiveReaderStatus<'a>,
+}
+
+macro_rules! try_or_poison {
+    ($it:expr, $ex:expr) => {
+        match $ex {
+            Ok(x) => x,
+            Err(e) => {
+                $it.status.poison();
+                return Err(e.into());
+            }
+        }
+    };
+}
 /// Start reading a NAR file from `reader`.
 pub fn open<'a, 'r>(reader: &'a mut Reader<'r>) -> io::Result<Node<'a, 'r>> {
     read::token(reader, &wire::TOK_NAR)?;
-    Node::new(reader)
+    Node::new(ArchiveReader {
+        inner: reader,
+        status: ArchiveReaderStatus::top(),
+    })
 }
 
 pub enum Node<'a, 'r> {
@@ -41,21 +72,24 @@ impl<'a, 'r> Node<'a, 'r> {
     ///
     /// Reading the terminating [wire::TOK_PAR] is done immediately for [Node::Symlink],
     /// but is otherwise left to [DirReader] or [FileReader].
-    fn new(reader: &'a mut Reader<'r>) -> io::Result<Self> {
-        Ok(match read::tag(reader)? {
+    fn new(mut reader: ArchiveReader<'a, 'r>) -> io::Result<Self> {
+        Ok(match read::tag(reader.inner)? {
             wire::Node::Sym => {
-                let target = read::bytes(reader, wire::MAX_TARGET_LEN)?;
+                let target =
+                    try_or_poison!(reader, read::bytes(reader.inner, wire::MAX_TARGET_LEN));
 
                 if target.is_empty() || target.contains(&0) {
+                    reader.status.poison();
                     return Err(InvalidData.into());
                 }
 
-                read::token(reader, &wire::TOK_PAR)?;
+                try_or_poison!(reader, read::token(reader.inner, &wire::TOK_PAR));
+                reader.status.ready_parent(); // Immediately allow reading from parent again
 
                 Node::Symlink { target }
             }
             tag @ (wire::Node::Reg | wire::Node::Exe) => {
-                let len = read::u64(reader)?;
+                let len = try_or_poison!(&mut reader, read::u64(reader.inner));
 
                 Node::File {
                     executable: tag == wire::Node::Exe,
@@ -74,10 +108,8 @@ impl<'a, 'r> Node<'a, 'r> {
 ///  * You must abandon the entire archive reader upon the first error.
 ///
 /// It's fine to read exactly `reader.len()` bytes without ever seeing an explicit EOF.
-///
-/// TODO(edef): enforce these in `#[cfg(debug_assertions)]`
 pub struct FileReader<'a, 'r> {
-    reader: &'a mut Reader<'r>,
+    reader: ArchiveReader<'a, 'r>,
     len: u64,
     /// Truncated original file length for padding computation.
     /// We only care about the 3 least significant bits; semantically, this is a u3.
@@ -87,12 +119,13 @@ pub struct FileReader<'a, 'r> {
 impl<'a, 'r> FileReader<'a, 'r> {
     /// Instantiate a new reader, starting after [wire::TOK_REG] or [wire::TOK_EXE].
     /// We handle the terminating [wire::TOK_PAR] on semantic EOF.
-    fn new(reader: &'a mut Reader<'r>, len: u64) -> io::Result<Self> {
+    fn new(mut reader: ArchiveReader<'a, 'r>, len: u64) -> io::Result<Self> {
         // For zero-length files, we have to read the terminating TOK_PAR
         // immediately, since FileReader::read may never be called; we've
         // already reached semantic EOF by definition.
         if len == 0 {
-            read::token(reader, &wire::TOK_PAR)?;
+            read::token(reader.inner, &wire::TOK_PAR)?;
+            reader.status.ready_parent();
         }
 
         Ok(Self {
@@ -121,9 +154,12 @@ impl FileReader<'_, '_> {
             return Ok(&[]);
         }
 
-        let mut buf = self.reader.fill_buf()?;
+        self.reader.check_correct();
+
+        let mut buf = try_or_poison!(self.reader, self.reader.inner.fill_buf());
 
         if buf.is_empty() {
+            self.reader.status.poison();
             return Err(UnexpectedEof.into());
         }
 
@@ -141,12 +177,14 @@ impl FileReader<'_, '_> {
             return Ok(());
         }
 
+        self.reader.check_correct();
+
         self.len = self
             .len
             .checked_sub(n as u64)
             .expect("consumed bytes past EOF");
 
-        self.reader.consume(n);
+        self.reader.inner.consume(n);
 
         if self.is_empty() {
             self.finish()?;
@@ -159,7 +197,7 @@ impl FileReader<'_, '_> {
     pub fn copy(&mut self, mut dst: impl Write) -> io::Result<()> {
         while !self.is_empty() {
             let buf = self.fill_buf()?;
-            let n = dst.write(buf)?;
+            let n = try_or_poison!(self.reader, dst.write(buf));
             self.consume(n)?;
         }
 
@@ -173,14 +211,17 @@ impl Read for FileReader<'_, '_> {
             return Ok(0);
         }
 
+        self.reader.check_correct();
+
         if buf.len() as u64 > self.len {
             buf = &mut buf[..self.len as usize];
         }
 
-        let n = self.reader.read(buf)?;
+        let n = try_or_poison!(self.reader, self.reader.inner.read(buf));
         self.len -= n as u64;
 
         if n == 0 {
+            self.reader.status.poison();
             return Err(UnexpectedEof.into());
         }
 
@@ -200,36 +241,42 @@ impl FileReader<'_, '_> {
 
         if pad != 0 {
             let mut buf = [0; 8];
-            self.reader.read_exact(&mut buf[pad..])?;
+            try_or_poison!(self.reader, self.reader.inner.read_exact(&mut buf[pad..]));
 
             if buf != [0; 8] {
+                self.reader.status.poison();
                 return Err(InvalidData.into());
             }
         }
 
-        read::token(self.reader, &wire::TOK_PAR)
+        try_or_poison!(self.reader, read::token(self.reader.inner, &wire::TOK_PAR));
+
+        // Done with reading this file, allow going back up the chain of readers
+        self.reader.status.ready_parent();
+
+        Ok(())
     }
 }
 
 /// A directory iterator, yielding a sequence of [Node]s.
 /// It must be fully consumed before reading further from the [DirReader] that produced it, if any.
 pub struct DirReader<'a, 'r> {
-    reader: &'a mut Reader<'r>,
+    reader: ArchiveReader<'a, 'r>,
     /// Previous directory entry name.
     /// We have to hang onto this to enforce name monotonicity.
-    prev_name: Option<Vec<u8>>,
+    prev_name: Vec<u8>,
 }
 
 pub struct Entry<'a, 'r> {
-    pub name: Vec<u8>,
+    pub name: &'a [u8],
     pub node: Node<'a, 'r>,
 }
 
 impl<'a, 'r> DirReader<'a, 'r> {
-    fn new(reader: &'a mut Reader<'r>) -> Self {
+    fn new(reader: ArchiveReader<'a, 'r>) -> Self {
         Self {
             reader,
-            prev_name: None,
+            prev_name: vec![],
         }
     }
 
@@ -242,23 +289,28 @@ impl<'a, 'r> DirReader<'a, 'r> {
     ///  * You must abandon the entire archive reader on the first error.
     ///  * You must abandon the directory reader upon the first [None].
     ///  * Even if you know the amount of elements up front, you must keep reading until you encounter [None].
-    ///
-    /// TODO(edef): enforce these in `#[cfg(debug_assertions)]`
     #[allow(clippy::should_implement_trait)]
-    pub fn next(&mut self) -> io::Result<Option<Entry>> {
+    pub fn next(&mut self) -> io::Result<Option<Entry<'_, 'r>>> {
+        self.reader.check_correct();
+
         // COME FROM the previous iteration: if we've already read an entry,
         // read its terminating TOK_PAR here.
-        if self.prev_name.is_some() {
-            read::token(self.reader, &wire::TOK_PAR)?;
+        if !self.prev_name.is_empty() {
+            try_or_poison!(self.reader, read::token(self.reader.inner, &wire::TOK_PAR));
         }
 
         // Determine if there are more entries to follow
-        if let wire::Entry::None = read::tag(self.reader)? {
+        if let wire::Entry::None = try_or_poison!(self.reader, read::tag(self.reader.inner)) {
             // We've reached the end of this directory.
+            self.reader.status.ready_parent();
             return Ok(None);
         }
 
-        let name = read::bytes(self.reader, wire::MAX_NAME_LEN)?;
+        let mut name = [0; wire::MAX_NAME_LEN + 1];
+        let name = try_or_poison!(
+            self.reader,
+            read::bytes_buf(self.reader.inner, &mut name, wire::MAX_NAME_LEN)
+        );
 
         if name.is_empty()
             || name.contains(&0)
@@ -266,28 +318,160 @@ impl<'a, 'r> DirReader<'a, 'r> {
             || name == b"."
             || name == b".."
         {
+            self.reader.status.poison();
             return Err(InvalidData.into());
         }
 
         // Enforce strict monotonicity of directory entry names.
-        match &mut self.prev_name {
-            None => {
-                self.prev_name = Some(name.clone());
+        if &self.prev_name[..] >= name {
+            self.reader.status.poison();
+            return Err(InvalidData.into());
+        }
+
+        self.prev_name.clear();
+        self.prev_name.extend_from_slice(name);
+
+        try_or_poison!(self.reader, read::token(self.reader.inner, &wire::TOK_NOD));
+
+        Ok(Some(Entry {
+            name: &self.prev_name,
+            // Don't need to worry about poisoning here: Node::new will do it for us if needed
+            node: Node::new(self.reader.child())?,
+        }))
+    }
+}
+
+/// We use a stack of statuses to:
+///   * Share poisoned state across all objects from the same underlying reader,
+///     so we can check they are abandoned when an error occurs
+///   * Make sure only the most recently created object is read from, and is fully exhausted
+///     before anything it was created from is used again.
+enum ArchiveReaderStatus<'a> {
+    #[cfg(not(debug_assertions))]
+    None(PhantomData<&'a ()>),
+    #[cfg(debug_assertions)]
+    StackTop { poisoned: bool, ready: bool },
+    #[cfg(debug_assertions)]
+    StackChild {
+        poisoned: &'a mut bool,
+        parent_ready: &'a mut bool,
+        ready: bool,
+    },
+}
+
+impl ArchiveReaderStatus<'_> {
+    fn top() -> Self {
+        #[cfg(debug_assertions)]
+        {
+            ArchiveReaderStatus::StackTop {
+                poisoned: false,
+                ready: true,
             }
-            Some(prev_name) => {
-                if *prev_name >= name {
-                    return Err(InvalidData.into());
-                }
+        }
 
-                name[..].clone_into(prev_name);
+        #[cfg(not(debug_assertions))]
+        ArchiveReaderStatus::None(PhantomData)
+    }
+
+    /// Poison all the objects sharing the same reader, to be used when an error occurs
+    fn poison(&mut self) {
+        match self {
+            #[cfg(not(debug_assertions))]
+            ArchiveReaderStatus::None(_) => {}
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackTop { poisoned: x, .. } => *x = true,
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackChild { poisoned: x, .. } => **x = true,
+        }
+    }
+
+    /// Mark the parent as ready, allowing it to be used again and preventing this reference to the reader being used again.
+    fn ready_parent(&mut self) {
+        match self {
+            #[cfg(not(debug_assertions))]
+            ArchiveReaderStatus::None(_) => {}
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackTop { ready, .. } => {
+                *ready = false;
+            }
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackChild {
+                ready,
+                parent_ready,
+                ..
+            } => {
+                *ready = false;
+                **parent_ready = true;
             }
+        };
+    }
+
+    fn poisoned(&self) -> bool {
+        match self {
+            #[cfg(not(debug_assertions))]
+            ArchiveReaderStatus::None(_) => false,
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackTop { poisoned, .. } => *poisoned,
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackChild { poisoned, .. } => **poisoned,
         }
+    }
 
-        read::token(self.reader, &wire::TOK_NOD)?;
+    fn ready(&self) -> bool {
+        match self {
+            #[cfg(not(debug_assertions))]
+            ArchiveReaderStatus::None(_) => true,
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackTop { ready, .. } => *ready,
+            #[cfg(debug_assertions)]
+            ArchiveReaderStatus::StackChild { ready, .. } => *ready,
+        }
+    }
+}
 
-        Ok(Some(Entry {
-            name,
-            node: Node::new(&mut self.reader)?,
-        }))
+impl<'a, 'r> ArchiveReader<'a, 'r> {
+    /// Create a new child reader from this one.
+    /// In debug mode, this reader will panic if called before the new child is exhausted / calls `ready_parent`
+    fn child(&mut self) -> ArchiveReader<'_, 'r> {
+        ArchiveReader {
+            inner: self.inner,
+            #[cfg(not(debug_assertions))]
+            status: ArchiveReaderStatus::None(PhantomData),
+            #[cfg(debug_assertions)]
+            status: match &mut self.status {
+                ArchiveReaderStatus::StackTop { poisoned, ready } => {
+                    *ready = false;
+                    ArchiveReaderStatus::StackChild {
+                        poisoned,
+                        parent_ready: ready,
+                        ready: true,
+                    }
+                }
+                ArchiveReaderStatus::StackChild {
+                    poisoned, ready, ..
+                } => {
+                    *ready = false;
+                    ArchiveReaderStatus::StackChild {
+                        poisoned,
+                        parent_ready: ready,
+                        ready: true,
+                    }
+                }
+            },
+        }
+    }
+
+    /// Check the reader is in the correct status.
+    /// Only does anything when debug assertions are on.
+    #[inline(always)]
+    fn check_correct(&self) {
+        assert!(
+            !self.status.poisoned(),
+            "Archive reader used after it was meant to be abandoned!"
+        );
+        assert!(
+            self.status.ready(),
+            "Non-ready archive reader used! (Should've been reading from something else)"
+        );
     }
 }
diff --git a/tvix/nix-compat/src/nar/reader/read.rs b/tvix/nix-compat/src/nar/reader/read.rs
index 1ce1613764..9938581f2a 100644
--- a/tvix/nix-compat/src/nar/reader/read.rs
+++ b/tvix/nix-compat/src/nar/reader/read.rs
@@ -15,6 +15,38 @@ pub fn u64(reader: &mut Reader) -> io::Result<u64> {
     Ok(u64::from_le_bytes(buf))
 }
 
+/// Consume a byte string from the reader into a provided buffer,
+/// returning the data bytes.
+pub fn bytes_buf<'a, const N: usize>(
+    reader: &mut Reader,
+    buf: &'a mut [u8; N],
+    max_len: usize,
+) -> io::Result<&'a [u8]> {
+    assert_eq!(N % 8, 0);
+    assert!(max_len <= N);
+
+    // read the length, and reject excessively large values
+    let len = self::u64(reader)?;
+    if len > max_len as u64 {
+        return Err(InvalidData.into());
+    }
+    // we know the length fits in a usize now
+    let len = len as usize;
+
+    // read the data and padding into a buffer
+    let buf_len = (len + 7) & !7;
+    reader.read_exact(&mut buf[..buf_len])?;
+
+    // verify that the padding is all zeroes
+    for &b in &buf[len..buf_len] {
+        if b != 0 {
+            return Err(InvalidData.into());
+        }
+    }
+
+    Ok(&buf[..len])
+}
+
 /// Consume a byte string of up to `max_len` bytes from the reader.
 pub fn bytes(reader: &mut Reader, max_len: usize) -> io::Result<Vec<u8>> {
     assert!(max_len <= isize::MAX as usize);
diff --git a/tvix/nix-compat/src/nar/reader/test.rs b/tvix/nix-compat/src/nar/reader/test.rs
index fd0d6a9f5a..63e4fb289f 100644
--- a/tvix/nix-compat/src/nar/reader/test.rs
+++ b/tvix/nix-compat/src/nar/reader/test.rs
@@ -46,75 +46,233 @@ fn complicated() {
     match node {
         nar::reader::Node::Directory(mut dir_reader) => {
             // first entry is .keep, an empty regular file.
-            let entry = dir_reader
-                .next()
-                .expect("next must succeed")
-                .expect("must be some");
-
-            assert_eq!(&b".keep"[..], &entry.name);
-
-            match entry.node {
-                nar::reader::Node::File {
-                    executable,
-                    mut reader,
-                } => {
-                    assert!(!executable);
-                    assert_eq!(reader.read(&mut [0]).unwrap(), 0);
+            must_read_file(
+                ".keep",
+                dir_reader
+                    .next()
+                    .expect("next must succeed")
+                    .expect("must be some"),
+            );
+
+            // second entry is aa, a symlink to /nix/store/somewhereelse
+            must_be_symlink(
+                "aa",
+                "/nix/store/somewhereelse",
+                dir_reader
+                    .next()
+                    .expect("next must be some")
+                    .expect("must be some"),
+            );
+
+            {
+                // third entry is a directory called "keep"
+                let entry = dir_reader
+                    .next()
+                    .expect("next must be some")
+                    .expect("must be some");
+
+                assert_eq!(b"keep", entry.name);
+
+                match entry.node {
+                    nar::reader::Node::Directory(mut subdir_reader) => {
+                        {
+                            // first entry is .keep, an empty regular file.
+                            let entry = subdir_reader
+                                .next()
+                                .expect("next must succeed")
+                                .expect("must be some");
+
+                            must_read_file(".keep", entry);
+                        }
+
+                        // we must read the None
+                        assert!(
+                            subdir_reader.next().expect("next must succeed").is_none(),
+                            "keep directory contains only .keep"
+                        );
+                    }
+                    _ => panic!("unexpected type for keep/.keep"),
                 }
-                _ => panic!("unexpected type for .keep"),
-            }
+            };
+
+            // reading more entries yields None (and we actually must read until this)
+            assert!(dir_reader.next().expect("must succeed").is_none());
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[test]
+#[should_panic]
+fn file_read_abandoned() {
+    let mut f = std::io::Cursor::new(include_bytes!("../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            {
+                let entry = dir_reader
+                    .next()
+                    .expect("next must succeed")
+                    .expect("must be some");
+
+                assert_eq!(b".keep", entry.name);
+                // don't bother to finish reading it.
+            };
+
+            // this should panic (not return an error), because we are meant to abandon the archive reader now.
+            assert!(dir_reader.next().expect("must succeed").is_none());
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[test]
+#[should_panic]
+fn dir_read_abandoned() {
+    let mut f = std::io::Cursor::new(include_bytes!("../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            must_read_file(
+                ".keep",
+                dir_reader
+                    .next()
+                    .expect("next must succeed")
+                    .expect("must be some"),
+            );
 
             // second entry is aa, a symlink to /nix/store/somewhereelse
-            let entry = dir_reader
-                .next()
-                .expect("next must be some")
-                .expect("must be some");
+            must_be_symlink(
+                "aa",
+                "/nix/store/somewhereelse",
+                dir_reader
+                    .next()
+                    .expect("next must be some")
+                    .expect("must be some"),
+            );
 
-            assert_eq!(&b"aa"[..], &entry.name);
+            {
+                // third entry is a directory called "keep"
+                let entry = dir_reader
+                    .next()
+                    .expect("next must be some")
+                    .expect("must be some");
 
-            match entry.node {
-                nar::reader::Node::Symlink { target } => {
-                    assert_eq!(&b"/nix/store/somewhereelse"[..], &target);
+                assert_eq!(b"keep", entry.name);
+
+                match entry.node {
+                    nar::reader::Node::Directory(_) => {
+                        // don't finish using it, which poisons the archive reader
+                    }
+                    _ => panic!("unexpected type for keep/.keep"),
                 }
-                _ => panic!("unexpected type for aa"),
-            }
-
-            // third entry is a directory called "keep"
-            let entry = dir_reader
-                .next()
-                .expect("next must be some")
-                .expect("must be some");
-
-            assert_eq!(&b"keep"[..], &entry.name);
-
-            match entry.node {
-                nar::reader::Node::Directory(mut subdir_reader) => {
-                    // first entry is .keep, an empty regular file.
-                    let entry = subdir_reader
-                        .next()
-                        .expect("next must succeed")
-                        .expect("must be some");
-
-                    // โ€ฆ it contains a single .keep, an empty regular file.
-                    assert_eq!(&b".keep"[..], &entry.name);
-
-                    match entry.node {
-                        nar::reader::Node::File {
-                            executable,
-                            mut reader,
-                        } => {
-                            assert!(!executable);
-                            assert_eq!(reader.read(&mut [0]).unwrap(), 0);
-                        }
-                        _ => panic!("unexpected type for keep/.keep"),
+            };
+
+            // this should panic, because we didn't finish reading the child subdirectory
+            assert!(dir_reader.next().expect("must succeed").is_none());
+        }
+        _ => panic!("unexpected type"),
+    }
+}
+
+#[test]
+#[should_panic]
+fn dir_read_after_none() {
+    let mut f = std::io::Cursor::new(include_bytes!("../tests/complicated.nar"));
+    let node = nar::reader::open(&mut f).unwrap();
+
+    match node {
+        nar::reader::Node::Directory(mut dir_reader) => {
+            // first entry is .keep, an empty regular file.
+            must_read_file(
+                ".keep",
+                dir_reader
+                    .next()
+                    .expect("next must succeed")
+                    .expect("must be some"),
+            );
+
+            // second entry is aa, a symlink to /nix/store/somewhereelse
+            must_be_symlink(
+                "aa",
+                "/nix/store/somewhereelse",
+                dir_reader
+                    .next()
+                    .expect("next must be some")
+                    .expect("must be some"),
+            );
+
+            {
+                // third entry is a directory called "keep"
+                let entry = dir_reader
+                    .next()
+                    .expect("next must be some")
+                    .expect("must be some");
+
+                assert_eq!(b"keep", entry.name);
+
+                match entry.node {
+                    nar::reader::Node::Directory(mut subdir_reader) => {
+                        // first entry is .keep, an empty regular file.
+                        must_read_file(
+                            ".keep",
+                            subdir_reader
+                                .next()
+                                .expect("next must succeed")
+                                .expect("must be some"),
+                        );
+
+                        // we must read the None
+                        assert!(
+                            subdir_reader.next().expect("next must succeed").is_none(),
+                            "keep directory contains only .keep"
+                        );
                     }
+                    _ => panic!("unexpected type for keep/.keep"),
                 }
-                _ => panic!("unexpected type for keep/.keep"),
-            }
+            };
 
             // reading more entries yields None (and we actually must read until this)
             assert!(dir_reader.next().expect("must succeed").is_none());
+
+            // this should panic, because we already got a none so we're meant to stop.
+            dir_reader.next().unwrap();
+            unreachable!()
         }
         _ => panic!("unexpected type"),
     }
 }
+
+fn must_read_file(name: &'static str, entry: nar::reader::Entry<'_, '_>) {
+    assert_eq!(name.as_bytes(), entry.name);
+
+    match entry.node {
+        nar::reader::Node::File {
+            executable,
+            mut reader,
+        } => {
+            assert!(!executable);
+            assert_eq!(reader.read(&mut [0]).unwrap(), 0);
+        }
+        _ => panic!("unexpected type for {}", name),
+    }
+}
+
+fn must_be_symlink(
+    name: &'static str,
+    exp_target: &'static str,
+    entry: nar::reader::Entry<'_, '_>,
+) {
+    assert_eq!(name.as_bytes(), entry.name);
+
+    match entry.node {
+        nar::reader::Node::Symlink { target } => {
+            assert_eq!(exp_target.as_bytes(), &target);
+        }
+        _ => panic!("unexpected type for {}", name),
+    }
+}
diff --git a/tvix/nix-compat/src/nar/wire/mod.rs b/tvix/nix-compat/src/nar/wire/mod.rs
index b9e0212495..9e99b530ce 100644
--- a/tvix/nix-compat/src/nar/wire/mod.rs
+++ b/tvix/nix-compat/src/nar/wire/mod.rs
@@ -90,6 +90,23 @@ pub const TOK_DIR: [u8; 24] = *b"\x09\0\0\0\0\0\0\0directory\0\0\0\0\0\0\0";
 pub const TOK_ENT: [u8; 48] = *b"\x05\0\0\0\0\0\0\0entry\0\0\0\x01\0\0\0\0\0\0\0(\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0name\0\0\0\0";
 pub const TOK_NOD: [u8; 48] = *b"\x04\0\0\0\0\0\0\0node\0\0\0\0\x01\0\0\0\0\0\0\0(\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0type\0\0\0\0";
 pub const TOK_PAR: [u8; 16] = *b"\x01\0\0\0\0\0\0\0)\0\0\0\0\0\0\0";
+#[cfg(feature = "async")]
+const TOK_PAD_PAR: [u8; 24] = *b"\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0)\0\0\0\0\0\0\0";
+
+#[cfg(feature = "async")]
+#[derive(Debug)]
+pub(crate) enum PadPar {}
+
+#[cfg(feature = "async")]
+impl crate::wire::reader::Tag for PadPar {
+    const PATTERN: &'static [u8] = &TOK_PAD_PAR;
+
+    type Buf = [u8; 24];
+
+    fn make_buf() -> Self::Buf {
+        [0; 24]
+    }
+}
 
 #[test]
 fn tokens() {
diff --git a/tvix/nix-compat/src/nar/wire/tag.rs b/tvix/nix-compat/src/nar/wire/tag.rs
index 55b93f9985..4982a0d707 100644
--- a/tvix/nix-compat/src/nar/wire/tag.rs
+++ b/tvix/nix-compat/src/nar/wire/tag.rs
@@ -10,6 +10,7 @@ pub trait Tag: Sized {
     const MIN: usize;
 
     /// Minimal suitably sized buffer for reading the wire representation
+    ///
     /// HACK: This is a workaround for const generics limitations.
     type Buf: AsMut<[u8]> + Send;
 
diff --git a/tvix/nix-compat/src/narinfo/public_keys.rs b/tvix/nix-compat/src/narinfo/public_keys.rs
index ced05cadb1..27dd90e096 100644
--- a/tvix/nix-compat/src/narinfo/public_keys.rs
+++ b/tvix/nix-compat/src/narinfo/public_keys.rs
@@ -106,40 +106,43 @@ impl Display for PubKey {
 mod test {
     use data_encoding::BASE64;
     use ed25519_dalek::PUBLIC_KEY_LENGTH;
-    use test_case::test_case;
+    use rstest::rstest;
 
     use crate::narinfo::Signature;
 
     use super::PubKey;
     const FINGERPRINT: &str = "1;/nix/store/syd87l2rxw8cbsxmxl853h0r6pdwhwjr-curl-7.82.0-bin;sha256:1b4sb93wp679q4zx9k1ignby1yna3z7c4c2ri3wphylbc2dwsys0;196040;/nix/store/0jqd0rlxzra1rs38rdxl43yh6rxchgc6-curl-7.82.0,/nix/store/6w8g7njm4mck5dmjxws0z1xnrxvl81xa-glibc-2.34-115,/nix/store/j5jxw3iy7bbz4a57fh9g2xm2gxmyal8h-zlib-1.2.12,/nix/store/yxvjs9drzsphm9pcf42a4byzj1kb9m7k-openssl-1.1.1n";
 
-    #[test_case("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", "cache.nixos.org-1", BASE64.decode(b"6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=").unwrap()[..].try_into().unwrap(); "cache.nixos.org")]
-    #[test_case("cheesecake:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", "cheesecake", BASE64.decode(b"6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=").unwrap()[..].try_into().unwrap(); "cache.nixos.org different name")]
-    #[test_case("test1:tLAEn+EeaBUJYqEpTd2yeerr7Ic6+0vWe+aXL/vYUpE=", "test1", BASE64.decode(b"tLAEn+EeaBUJYqEpTd2yeerr7Ic6+0vWe+aXL/vYUpE=").unwrap()[..].try_into().unwrap(); "test-1")]
+    #[rstest]
+    #[case::cache_nixos_org("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", "cache.nixos.org-1", &BASE64.decode(b"6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=").unwrap()[..].try_into().unwrap())]
+    #[case::cache_nixos_org_different_name("cheesecake:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", "cheesecake", &BASE64.decode(b"6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=").unwrap()[..].try_into().unwrap())]
+    #[case::test_1("test1:tLAEn+EeaBUJYqEpTd2yeerr7Ic6+0vWe+aXL/vYUpE=", "test1", &BASE64.decode(b"tLAEn+EeaBUJYqEpTd2yeerr7Ic6+0vWe+aXL/vYUpE=").unwrap()[..].try_into().unwrap())]
     fn parse(
-        input: &'static str,
-        exp_name: &'static str,
-        exp_verifying_key_bytes: &[u8; PUBLIC_KEY_LENGTH],
+        #[case] input: &'static str,
+        #[case] exp_name: &'static str,
+        #[case] exp_verifying_key_bytes: &[u8; PUBLIC_KEY_LENGTH],
     ) {
         let pubkey = PubKey::parse(input).expect("must parse");
         assert_eq!(exp_name, pubkey.name());
         assert_eq!(exp_verifying_key_bytes, pubkey.verifying_key.as_bytes());
     }
 
-    #[test_case("6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="; "empty name")]
-    #[test_case("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY"; "missing padding")]
-    #[test_case("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDS"; "wrong length")]
-    fn parse_fail(input: &'static str) {
+    #[rstest]
+    #[case::empty_name("6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=")]
+    #[case::missing_padding("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY")]
+    #[case::wrong_length("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDS")]
+    fn parse_fail(#[case] input: &'static str) {
         PubKey::parse(input).expect_err("must fail");
     }
 
-    #[test_case("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", FINGERPRINT, "cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", true; "correct cache.nixos.org")]
-    #[test_case("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", FINGERPRINT, "cache.nixos.org:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", false; "wrong name mismatch")]
+    #[rstest]
+    #[case::correct_cache_nixos_org("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", FINGERPRINT, "cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", true)]
+    #[case::wrong_name_mismatch("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", FINGERPRINT, "cache.nixos.org:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", false)]
     fn verify(
-        pubkey_str: &'static str,
-        fingerprint: &'static str,
-        signature_str: &'static str,
-        expected: bool,
+        #[case] pubkey_str: &'static str,
+        #[case] fingerprint: &'static str,
+        #[case] signature_str: &'static str,
+        #[case] expected: bool,
     ) {
         let pubkey = PubKey::parse(pubkey_str).expect("must parse");
         let signature = Signature::parse(signature_str).expect("must parse");
diff --git a/tvix/nix-compat/src/narinfo/signature.rs b/tvix/nix-compat/src/narinfo/signature.rs
index 6162ec0e38..fd197e771d 100644
--- a/tvix/nix-compat/src/narinfo/signature.rs
+++ b/tvix/nix-compat/src/narinfo/signature.rs
@@ -107,7 +107,7 @@ mod test {
     use lazy_static::lazy_static;
 
     use super::Signature;
-    use test_case::test_case;
+    use rstest::rstest;
 
     const FINGERPRINT: &str = "1;/nix/store/syd87l2rxw8cbsxmxl853h0r6pdwhwjr-curl-7.82.0-bin;sha256:1b4sb93wp679q4zx9k1ignby1yna3z7c4c2ri3wphylbc2dwsys0;196040;/nix/store/0jqd0rlxzra1rs38rdxl43yh6rxchgc6-curl-7.82.0,/nix/store/6w8g7njm4mck5dmjxws0z1xnrxvl81xa-glibc-2.34-115,/nix/store/j5jxw3iy7bbz4a57fh9g2xm2gxmyal8h-zlib-1.2.12,/nix/store/yxvjs9drzsphm9pcf42a4byzj1kb9m7k-openssl-1.1.1n";
 
@@ -131,27 +131,33 @@ mod test {
         .unwrap();
     }
 
-    #[test_case(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, true; "valid cache.nixos.org-1")]
-    #[test_case(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, true; "valid test1")]
-    #[test_case(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-2:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, true; "valid cache.nixos.org different name")]
-    #[test_case(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb000000000000000000000000ytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, false; "fail invalid cache.nixos.org-1 signature")]
-    #[test_case(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", &FINGERPRINT[0..5], false; "fail valid sig but wrong fp cache.nixos.org-1")]
+    #[rstest]
+    #[case::valid_cache_nixos_org_1(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, true)]
+    #[case::valid_test1(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, true)]
+    #[case::valid_cache_nixos_org_different_name(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-2:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, true)]
+    #[case::fail_invalid_cache_nixos_org_1_signature(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb000000000000000000000000ytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", FINGERPRINT, false)]
+    #[case::fail_valid_sig_but_wrong_fp_cache_nixos_org_1(&PUB_CACHE_NIXOS_ORG_1, &"cache.nixos.org-1:TsTTb3WGTZKphvYdBHXwo6weVILmTytUjLB+vcX89fOjjRicCHmKA4RCPMVLkj6TMJ4GMX3HPVWRdD1hkeKZBQ==", &FINGERPRINT[0..5], false)]
     fn verify_sigs(
-        verifying_key: &VerifyingKey,
-        sig_str: &'static str,
-        fp: &str,
-        expect_valid: bool,
+        #[case] verifying_key: &VerifyingKey,
+        #[case] sig_str: &'static str,
+        #[case] fp: &str,
+        #[case] expect_valid: bool,
     ) {
         let sig = Signature::parse(sig_str).expect("must parse");
         assert_eq!(expect_valid, sig.verify(fp.as_bytes(), verifying_key));
     }
 
-    #[test_case("cache.nixos.org-1:o1DTsjCz0PofLJ216P2RBuSulI8BAb6zHxWE4N+tzlcELk5Uk/GO2SCxWTRN5wJutLZZ+cHTMdWqOHF8"; "wrong length")]
-    #[test_case("test\n:u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw=="; "wrong name newline")]
-    #[test_case("test :u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw=="; "wrong name space")]
-    #[test_case(":u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw=="; "empty name")]
-    #[test_case("u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw=="; "b64 only")]
-    fn parse_fail(input: &'static str) {
+    #[rstest]
+    #[case::wrong_length("cache.nixos.org-1:o1DTsjCz0PofLJ216P2RBuSulI8BAb6zHxWE4N+tzlcELk5Uk/GO2SCxWTRN5wJutLZZ+cHTMdWqOHF8")]
+    #[case::wrong_name_newline("test\n:u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw==")]
+    #[case::wrong_name_space("test :u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw==")]
+    #[case::empty_name(
+        ":u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw=="
+    )]
+    #[case::b64_only(
+        "u01BybwQhyI5H1bW1EIWXssMDhDDIvXOG5uh8Qzgdyjz6U1qg6DHhMAvXZOUStIj6X5t4/ufFgR8i3fjf0bMAw=="
+    )]
+    fn parse_fail(#[case] input: &'static str) {
         Signature::parse(input).expect_err("must fail");
     }
 
diff --git a/tvix/nix-compat/src/nix_daemon/worker_protocol.rs b/tvix/nix-compat/src/nix_daemon/worker_protocol.rs
index 58a48d1bdd..7e3adc0db2 100644
--- a/tvix/nix-compat/src/nix_daemon/worker_protocol.rs
+++ b/tvix/nix-compat/src/nix_daemon/worker_protocol.rs
@@ -15,13 +15,34 @@ static WORKER_MAGIC_1: u64 = 0x6e697863; // "nixc"
 static WORKER_MAGIC_2: u64 = 0x6478696f; // "dxio"
 pub static STDERR_LAST: u64 = 0x616c7473; // "alts"
 
+/// | Nix version     | Protocol |
+/// |-----------------|----------|
+/// | 0.11            | 1.02     |
+/// | 0.12            | 1.04     |
+/// | 0.13            | 1.05     |
+/// | 0.14            | 1.05     |
+/// | 0.15            | 1.05     |
+/// | 0.16            | 1.06     |
+/// | 1.0             | 1.10     |
+/// | 1.1             | 1.11     |
+/// | 1.2             | 1.12     |
+/// | 1.3 - 1.5.3     | 1.13     |
+/// | 1.6 - 1.10      | 1.14     |
+/// | 1.11 - 1.11.16  | 1.15     |
+/// | 2.0 - 2.0.4     | 1.20     |
+/// | 2.1 - 2.3.18    | 1.21     |
+/// | 2.4 - 2.6.1     | 1.32     |
+/// | 2.7.0           | 1.33     |
+/// | 2.8.0 - 2.14.1  | 1.34     |
+/// | 2.15.0 - 2.19.4 | 1.35     |
+/// | 2.20.0 - 2.22.0 | 1.37     |
 static PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts(1, 37);
 
 /// Max length of a Nix setting name/value. In bytes.
 ///
 /// This value has been arbitrarily choosen after looking the nix.conf
 /// manpage. Don't hesitate to increase it if it's too limiting.
-pub static MAX_SETTING_SIZE: u64 = 1024;
+pub static MAX_SETTING_SIZE: usize = 1024;
 
 /// Worker Operation
 ///
@@ -131,30 +152,30 @@ pub async fn read_client_settings<R: AsyncReadExt + Unpin>(
     r: &mut R,
     client_version: ProtocolVersion,
 ) -> std::io::Result<ClientSettings> {
-    let keep_failed = wire::read_bool(r).await?;
-    let keep_going = wire::read_bool(r).await?;
-    let try_fallback = wire::read_bool(r).await?;
-    let verbosity_uint = wire::read_u64(r).await?;
+    let keep_failed = r.read_u64_le().await? != 0;
+    let keep_going = r.read_u64_le().await? != 0;
+    let try_fallback = r.read_u64_le().await? != 0;
+    let verbosity_uint = r.read_u64_le().await?;
     let verbosity = Verbosity::from_u64(verbosity_uint).ok_or_else(|| {
         Error::new(
             ErrorKind::InvalidData,
             format!("Can't convert integer {} to verbosity", verbosity_uint),
         )
     })?;
-    let max_build_jobs = wire::read_u64(r).await?;
-    let max_silent_time = wire::read_u64(r).await?;
-    _ = wire::read_u64(r).await?; // obsolete useBuildHook
-    let verbose_build = wire::read_bool(r).await?;
-    _ = wire::read_u64(r).await?; // obsolete logType
-    _ = wire::read_u64(r).await?; // obsolete printBuildTrace
-    let build_cores = wire::read_u64(r).await?;
-    let use_substitutes = wire::read_bool(r).await?;
+    let max_build_jobs = r.read_u64_le().await?;
+    let max_silent_time = r.read_u64_le().await?;
+    _ = r.read_u64_le().await?; // obsolete useBuildHook
+    let verbose_build = r.read_u64_le().await? != 0;
+    _ = r.read_u64_le().await?; // obsolete logType
+    _ = r.read_u64_le().await?; // obsolete printBuildTrace
+    let build_cores = r.read_u64_le().await?;
+    let use_substitutes = r.read_u64_le().await? != 0;
     let mut overrides = HashMap::new();
     if client_version.minor() >= 12 {
-        let num_overrides = wire::read_u64(r).await?;
+        let num_overrides = r.read_u64_le().await?;
         for _ in 0..num_overrides {
-            let name = wire::read_string(r, 0..MAX_SETTING_SIZE).await?;
-            let value = wire::read_string(r, 0..MAX_SETTING_SIZE).await?;
+            let name = wire::read_string(r, 0..=MAX_SETTING_SIZE).await?;
+            let value = wire::read_string(r, 0..=MAX_SETTING_SIZE).await?;
             overrides.insert(name, value);
         }
     }
@@ -197,17 +218,17 @@ pub async fn server_handshake_client<'a, RW: 'a>(
 where
     &'a mut RW: AsyncReadExt + AsyncWriteExt + Unpin,
 {
-    let worker_magic_1 = wire::read_u64(&mut conn).await?;
+    let worker_magic_1 = conn.read_u64_le().await?;
     if worker_magic_1 != WORKER_MAGIC_1 {
         Err(std::io::Error::new(
             ErrorKind::InvalidData,
             format!("Incorrect worker magic number received: {}", worker_magic_1),
         ))
     } else {
-        wire::write_u64(&mut conn, WORKER_MAGIC_2).await?;
-        wire::write_u64(&mut conn, PROTOCOL_VERSION.into()).await?;
+        conn.write_u64_le(WORKER_MAGIC_2).await?;
+        conn.write_u64_le(PROTOCOL_VERSION.into()).await?;
         conn.flush().await?;
-        let client_version = wire::read_u64(&mut conn).await?;
+        let client_version = conn.read_u64_le().await?;
         // Parse into ProtocolVersion.
         let client_version: ProtocolVersion = client_version
             .try_into()
@@ -220,14 +241,14 @@ where
         }
         if client_version.minor() >= 14 {
             // Obsolete CPU affinity.
-            let read_affinity = wire::read_u64(&mut conn).await?;
+            let read_affinity = conn.read_u64_le().await?;
             if read_affinity != 0 {
-                let _cpu_affinity = wire::read_u64(&mut conn).await?;
+                let _cpu_affinity = conn.read_u64_le().await?;
             };
         }
         if client_version.minor() >= 11 {
             // Obsolete reserveSpace
-            let _reserve_space = wire::read_u64(&mut conn).await?;
+            let _reserve_space = conn.read_u64_le().await?;
         }
         if client_version.minor() >= 33 {
             // Nix version. We're plain lying, we're not Nix, but ehโ€ฆ
@@ -245,7 +266,7 @@ where
 
 /// Read a worker [Operation] from the wire.
 pub async fn read_op<R: AsyncReadExt + Unpin>(r: &mut R) -> std::io::Result<Operation> {
-    let op_number = wire::read_u64(r).await?;
+    let op_number = r.read_u64_le().await?;
     Operation::from_u64(op_number).ok_or(Error::new(
         ErrorKind::InvalidData,
         format!("Invalid OP number {}", op_number),
@@ -278,8 +299,8 @@ where
     W: AsyncReadExt + AsyncWriteExt + Unpin,
 {
     match t {
-        Trust::Trusted => wire::write_u64(conn, 1).await,
-        Trust::NotTrusted => wire::write_u64(conn, 2).await,
+        Trust::Trusted => conn.write_u64_le(1).await,
+        Trust::NotTrusted => conn.write_u64_le(2).await,
     }
 }
 
diff --git a/tvix/nix-compat/src/nixbase32.rs b/tvix/nix-compat/src/nixbase32.rs
index c4c2f2b967..b7ffc1dc2b 100644
--- a/tvix/nix-compat/src/nixbase32.rs
+++ b/tvix/nix-compat/src/nixbase32.rs
@@ -141,27 +141,30 @@ pub const fn encode_len(len: usize) -> usize {
 #[cfg(test)]
 mod tests {
     use hex_literal::hex;
-    use test_case::test_case;
+    use rstest::rstest;
 
-    #[test_case("", &[]; "empty bytes")]
-    #[test_case("0z", &hex!("1f"); "one byte")]
-    #[test_case("00bgd045z0d4icpbc2yyz4gx48ak44la", &hex!("8a12321522fd91efbd60ebb2481af88580f61600"); "store path")]
-    #[test_case("0c5b8vw40dy178xlpddw65q9gf1h2186jcc3p4swinwggbllv8mk", &hex!("b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30"); "sha256")]
-    fn encode(enc: &str, dec: &[u8]) {
+    #[rstest]
+    #[case::empty_bytes("", &[])]
+    #[case::one_byte("0z", &hex!("1f"))]
+    #[case::store_path("00bgd045z0d4icpbc2yyz4gx48ak44la", &hex!("8a12321522fd91efbd60ebb2481af88580f61600"))]
+    #[case::sha256("0c5b8vw40dy178xlpddw65q9gf1h2186jcc3p4swinwggbllv8mk", &hex!("b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30"))]
+    #[test]
+    fn encode(#[case] enc: &str, #[case] dec: &[u8]) {
         assert_eq!(enc, super::encode(dec));
     }
 
-    #[test_case("", Some(&[]) ; "empty bytes")]
-    #[test_case("0z", Some(&hex!("1f")); "one byte")]
-    #[test_case("00bgd045z0d4icpbc2yyz4gx48ak44la", Some(&hex!("8a12321522fd91efbd60ebb2481af88580f61600")); "store path")]
-    #[test_case("0c5b8vw40dy178xlpddw65q9gf1h2186jcc3p4swinwggbllv8mk", Some(&hex!("b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30")); "sha256")]
+    #[rstest]
+    #[case::empty_bytes("", Some(&[][..]) )]
+    #[case::one_byte("0z", Some(&hex!("1f")[..]))]
+    #[case::store_path("00bgd045z0d4icpbc2yyz4gx48ak44la", Some(&hex!("8a12321522fd91efbd60ebb2481af88580f61600")[..]))]
+    #[case::sha256("0c5b8vw40dy178xlpddw65q9gf1h2186jcc3p4swinwggbllv8mk", Some(&hex!("b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30")[..]))]
     // this is invalid encoding, because it encodes 10 1-bits, so the carry
     // would be 2 1-bits
-    #[test_case("zz", None; "invalid encoding-1")]
+    #[case::invalid_encoding_1("zz", None)]
     // this is an even more specific example - it'd decode as 00000000 11
-    #[test_case("c0", None; "invalid encoding-2")]
-
-    fn decode(enc: &str, dec: Option<&[u8]>) {
+    #[case::invalid_encoding_2("c0", None)]
+    #[test]
+    fn decode(#[case] enc: &str, #[case] dec: Option<&[u8]>) {
         match dec {
             Some(dec) => {
                 // The decode needs to match what's passed in dec
diff --git a/tvix/nix-compat/src/nixhash/mod.rs b/tvix/nix-compat/src/nixhash/mod.rs
index 7336831aa0..d86cb8b79f 100644
--- a/tvix/nix-compat/src/nixhash/mod.rs
+++ b/tvix/nix-compat/src/nixhash/mod.rs
@@ -347,7 +347,7 @@ mod tests {
     };
     use data_encoding::{BASE64, BASE64_NOPAD, HEXLOWER};
     use hex_literal::hex;
-    use test_case::test_case;
+    use rstest::rstest;
 
     const DIGEST_SHA1: [u8; 20] = hex!("6016777997c30ab02413cf5095622cd7924283ac");
     const DIGEST_SHA256: [u8; 32] =
@@ -380,11 +380,12 @@ mod tests {
     }
 
     /// Test parsing a hash string in various formats, and also when/how the out-of-band algo is needed.
-    #[test_case(&NixHash::Sha1(DIGEST_SHA1); "sha1")]
-    #[test_case(&NixHash::Sha256(DIGEST_SHA256); "sha256")]
-    #[test_case(&NixHash::Sha512(Box::new(DIGEST_SHA512)); "sha512")]
-    #[test_case(&NixHash::Md5(DIGEST_MD5); "md5")]
-    fn from_str(expected_hash: &NixHash) {
+    #[rstest]
+    #[case::sha1(&NixHash::Sha1(DIGEST_SHA1))]
+    #[case::sha256(&NixHash::Sha256(DIGEST_SHA256))]
+    #[case::sha512(&NixHash::Sha512(Box::new(DIGEST_SHA512)))]
+    #[case::md5(&NixHash::Md5(DIGEST_MD5))]
+    fn from_str(#[case] expected_hash: &NixHash) {
         let algo = &expected_hash.algo();
         let digest = expected_hash.digest_as_bytes();
         // parse SRI
@@ -490,12 +491,13 @@ mod tests {
     }
 
     /// Test parsing sha512 SRI hash with various paddings, Nix accepts all of them.
-    #[test_case("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ"; "no padding")]
-    #[test_case("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ="; "too little padding")]
-    #[test_case("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ=="; "correct padding")]
-    #[test_case("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ==="; "too much padding")]
-    #[test_case("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ== cheesecake"; "additional suffix ignored")]
-    fn from_sri_str_sha512_paddings(sri_str: &str) {
+    #[rstest]
+    #[case::no_padding("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ")]
+    #[case::too_little_padding("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ=")]
+    #[case::correct_padding("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ==")]
+    #[case::too_much_padding("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ===")]
+    #[case::additional_suffix_ignored("sha512-7g91TBvYoYQorRTqo+rYD/i5YnWvUBLnqDhPHxBJDaBW7smuPMeRp6E6JOFuVN9bzN0QnH1ToUU0u9c2CjALEQ== cheesecake")]
+    fn from_sri_str_sha512_paddings(#[case] sri_str: &str) {
         let nix_hash = nixhash::from_sri_str(sri_str).expect("must succeed");
 
         assert_eq!(HashAlgo::Sha512, nix_hash.algo());
diff --git a/tvix/nix-compat/src/store_path/mod.rs b/tvix/nix-compat/src/store_path/mod.rs
index a6dc74fb90..707c41a92d 100644
--- a/tvix/nix-compat/src/store_path/mod.rs
+++ b/tvix/nix-compat/src/store_path/mod.rs
@@ -56,7 +56,7 @@ pub enum Error {
 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
 pub struct StorePath {
     digest: [u8; DIGEST_SIZE],
-    name: String,
+    name: Box<str>,
 }
 
 impl StorePath {
@@ -65,7 +65,7 @@ impl StorePath {
     }
 
     pub fn name(&self) -> &str {
-        self.name.as_ref()
+        &self.name
     }
 
     pub fn as_ref(&self) -> StorePathRef<'_> {
@@ -176,10 +176,7 @@ pub struct StorePathRef<'a> {
 
 impl<'a> From<&'a StorePath> for StorePathRef<'a> {
     fn from(&StorePath { digest, ref name }: &'a StorePath) -> Self {
-        StorePathRef {
-            digest,
-            name: name.as_ref(),
-        }
+        StorePathRef { digest, name }
     }
 }
 
@@ -209,7 +206,7 @@ impl<'a> StorePathRef<'a> {
     pub fn to_owned(&self) -> StorePath {
         StorePath {
             digest: self.digest,
-            name: self.name.to_owned(),
+            name: self.name.into(),
         }
     }
 
@@ -303,8 +300,7 @@ impl Serialize for StorePathRef<'_> {
     }
 }
 
-/// NAME_CHARS contains `true` for bytes that are valid in store path names,
-/// not accounting for '.' being permitted only past the first character.
+/// NAME_CHARS contains `true` for bytes that are valid in store path names.
 static NAME_CHARS: [bool; 256] = {
     let mut tbl = [false; 256];
     let mut c = 0;
@@ -332,10 +328,6 @@ pub(crate) fn validate_name(s: &(impl AsRef<[u8]> + ?Sized)) -> Result<&str, Err
         return Err(Error::InvalidLength);
     }
 
-    if s[0] == b'.' {
-        return Err(Error::InvalidName(s.to_vec(), 0));
-    }
-
     let mut valid = true;
     for &c in s {
         valid = valid && NAME_CHARS[c as usize];
@@ -379,8 +371,8 @@ mod tests {
     use crate::store_path::{StorePath, StorePathRef, DIGEST_SIZE};
     use hex_literal::hex;
     use pretty_assertions::assert_eq;
+    use rstest::rstest;
     use serde::Deserialize;
-    use test_case::test_case;
 
     #[derive(Deserialize)]
     /// An example struct, holding a StorePathRef.
@@ -399,7 +391,7 @@ mod tests {
 
         let expected_digest: [u8; DIGEST_SIZE] = hex!("8a12321522fd91efbd60ebb2481af88580f61600");
 
-        assert_eq!("net-tools-1.60_p20170221182432", nixpath.name);
+        assert_eq!("net-tools-1.60_p20170221182432", nixpath.name());
         assert_eq!(nixpath.digest, expected_digest);
 
         assert_eq!(example_nix_path_str, nixpath.to_string())
@@ -446,15 +438,18 @@ mod tests {
         }
     }
 
-    /// This is the store path rejected when `nix-store --add`'ing an
+    /// This is the store path *accepted* when `nix-store --add`'ing an
     /// empty `.gitignore` file.
     ///
-    /// Nix 2.4 accidentally dropped this behaviour, but this is considered a bug.
-    /// See https://github.com/NixOS/nix/pull/9095.
+    /// Nix 2.4 accidentally permitted this behaviour, but the revert came
+    /// too late to beat Hyrum's law. It is now considered permissible.
+    ///
+    /// https://github.com/NixOS/nix/pull/9095 (revert)
+    /// https://github.com/NixOS/nix/pull/9867 (revert-of-revert)
     #[test]
     fn starts_with_dot() {
         StorePath::from_bytes(b"fli4bwscgna7lpm7v5xgnjxrxh0yc7ra-.gitignore")
-            .expect_err("must fail");
+            .expect("must succeed");
     }
 
     #[test]
@@ -591,25 +586,29 @@ mod tests {
         );
     }
 
-    #[test_case(
+    #[rstest]
+    #[case::without_prefix(
         "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432",
-        (StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::new())
-    ; "without prefix")]
-    #[test_case(
+        StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::new())]
+    #[case::without_prefix_but_trailing_slash(
         "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432/",
-        (StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::new())
-    ; "without prefix, but trailing slash")]
-    #[test_case(
+        StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::new())]
+    #[case::with_prefix(
         "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432/bin/arp",
-        (StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::from("bin/arp"))
-    ; "with prefix")]
-    #[test_case(
+        StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::from("bin/arp"))]
+    #[case::with_prefix_and_trailing_slash(
         "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432/bin/arp/",
-        (StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::from("bin/arp/"))
-    ; "with prefix and trailing slash")]
-    fn from_absolute_path_full(s: &str, expected: (StorePath, PathBuf)) {
-        let actual = StorePath::from_absolute_path_full(s).expect("must succeed");
-        assert_eq!(expected, actual);
+        StorePath::from_bytes(b"00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432").unwrap(), PathBuf::from("bin/arp/"))]
+    fn from_absolute_path_full(
+        #[case] s: &str,
+        #[case] exp_store_path: StorePath,
+        #[case] exp_path: PathBuf,
+    ) {
+        let (actual_store_path, actual_path) =
+            StorePath::from_absolute_path_full(s).expect("must succeed");
+
+        assert_eq!(exp_store_path, actual_store_path);
+        assert_eq!(exp_path, actual_path);
     }
 
     #[test]
diff --git a/tvix/nix-compat/src/wire/bytes/mod.rs b/tvix/nix-compat/src/wire/bytes/mod.rs
index 9487536eb7..2ed071e379 100644
--- a/tvix/nix-compat/src/wire/bytes/mod.rs
+++ b/tvix/nix-compat/src/wire/bytes/mod.rs
@@ -1,23 +1,21 @@
 use std::{
     io::{Error, ErrorKind},
-    ops::RangeBounds,
+    mem::MaybeUninit,
+    ops::RangeInclusive,
 };
-use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::io::{self, AsyncReadExt, AsyncWriteExt, ReadBuf};
 
-mod reader;
+pub(crate) mod reader;
 pub use reader::BytesReader;
 mod writer;
 pub use writer::BytesWriter;
 
-use super::primitive;
-
 /// 8 null bytes, used to write out padding.
 const EMPTY_BYTES: &[u8; 8] = &[0u8; 8];
 
 /// The length of the size field, in bytes is always 8.
 const LEN_SIZE: usize = 8;
 
-#[allow(dead_code)]
 /// Read a "bytes wire packet" from the AsyncRead.
 /// Rejects reading more than `allowed_size` bytes of payload.
 ///
@@ -33,26 +31,31 @@ const LEN_SIZE: usize = 8;
 /// On failure (for example if a too large byte packet was sent), the reader
 /// becomes unusable.
 ///
-/// This buffers the entire payload into memory, a streaming version will be
-/// added later.
-pub async fn read_bytes<R, S>(r: &mut R, allowed_size: S) -> std::io::Result<Vec<u8>>
+/// This buffers the entire payload into memory,
+/// a streaming version is available at [crate::wire::bytes::BytesReader].
+pub async fn read_bytes<R: ?Sized>(
+    r: &mut R,
+    allowed_size: RangeInclusive<usize>,
+) -> io::Result<Vec<u8>>
 where
     R: AsyncReadExt + Unpin,
-    S: RangeBounds<u64>,
 {
     // read the length field
-    let len = primitive::read_u64(r).await?;
-
-    if !allowed_size.contains(&len) {
-        return Err(std::io::Error::new(
-            std::io::ErrorKind::InvalidData,
-            "signalled package size not in allowed range",
-        ));
-    }
+    let len = r.read_u64_le().await?;
+    let len: usize = len
+        .try_into()
+        .ok()
+        .filter(|len| allowed_size.contains(len))
+        .ok_or_else(|| {
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                "signalled package size not in allowed range",
+            )
+        })?;
 
     // calculate the total length, including padding.
     // byte packets are padded to 8 byte blocks each.
-    let padded_len = padding_len(len) as u64 + (len as u64);
+    let padded_len = padding_len(len as u64) as u64 + (len as u64);
     let mut limited_reader = r.take(padded_len);
 
     let mut buf = Vec::new();
@@ -61,34 +64,87 @@ where
 
     // make sure we got exactly the number of bytes, and not less.
     if s as u64 != padded_len {
-        return Err(std::io::Error::new(
-            std::io::ErrorKind::InvalidData,
-            "got less bytes than expected",
-        ));
+        return Err(io::ErrorKind::UnexpectedEof.into());
     }
 
-    let (_content, padding) = buf.split_at(len as usize);
+    let (_content, padding) = buf.split_at(len);
 
     // ensure the padding is all zeroes.
-    if !padding.iter().all(|e| *e == b'\0') {
-        return Err(std::io::Error::new(
-            std::io::ErrorKind::InvalidData,
+    if padding.iter().any(|&b| b != 0) {
+        return Err(io::Error::new(
+            io::ErrorKind::InvalidData,
             "padding is not all zeroes",
         ));
     }
 
     // return the data without the padding
-    buf.truncate(len as usize);
+    buf.truncate(len);
     Ok(buf)
 }
 
+pub(crate) async fn read_bytes_buf<'a, const N: usize, R: ?Sized>(
+    reader: &mut R,
+    buf: &'a mut [MaybeUninit<u8>; N],
+    allowed_size: RangeInclusive<usize>,
+) -> io::Result<&'a [u8]>
+where
+    R: AsyncReadExt + Unpin,
+{
+    assert_eq!(N % 8, 0);
+    assert!(*allowed_size.end() <= N);
+
+    let len = reader.read_u64_le().await?;
+    let len: usize = len
+        .try_into()
+        .ok()
+        .filter(|len| allowed_size.contains(len))
+        .ok_or_else(|| {
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                "signalled package size not in allowed range",
+            )
+        })?;
+
+    let buf_len = (len + 7) & !7;
+    let buf = {
+        let mut read_buf = ReadBuf::uninit(&mut buf[..buf_len]);
+
+        while read_buf.filled().len() < buf_len {
+            reader.read_buf(&mut read_buf).await?;
+        }
+
+        // ReadBuf::filled does not pass the underlying buffer's lifetime through,
+        // so we must make a trip to hell.
+        //
+        // SAFETY: `read_buf` is filled up to `buf_len`, and we verify that it is
+        // still pointing at the same underlying buffer.
+        unsafe {
+            assert_eq!(read_buf.filled().as_ptr(), buf.as_ptr() as *const u8);
+            assume_init_bytes(&buf[..buf_len])
+        }
+    };
+
+    if buf[len..buf_len].iter().any(|&b| b != 0) {
+        return Err(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "padding is not all zeroes",
+        ));
+    }
+
+    Ok(&buf[..len])
+}
+
+/// SAFETY: The bytes have to actually be initialized.
+unsafe fn assume_init_bytes(slice: &[MaybeUninit<u8>]) -> &[u8] {
+    &*(slice as *const [MaybeUninit<u8>] as *const [u8])
+}
+
 /// Read a "bytes wire packet" of from the AsyncRead and tries to parse as string.
 /// Internally uses [read_bytes].
 /// Rejects reading more than `allowed_size` bytes of payload.
-pub async fn read_string<R, S>(r: &mut R, allowed_size: S) -> std::io::Result<String>
+pub async fn read_string<R>(r: &mut R, allowed_size: RangeInclusive<usize>) -> io::Result<String>
 where
     R: AsyncReadExt + Unpin,
-    S: RangeBounds<u64>,
 {
     let bytes = read_bytes(r, allowed_size).await?;
     String::from_utf8(bytes).map_err(|e| Error::new(ErrorKind::InvalidData, e))
@@ -106,9 +162,9 @@ where
 pub async fn write_bytes<W: AsyncWriteExt + Unpin, B: AsRef<[u8]>>(
     w: &mut W,
     b: B,
-) -> std::io::Result<()> {
+) -> io::Result<()> {
     // write the size packet.
-    primitive::write_u64(w, b.as_ref().len() as u64).await?;
+    w.write_u64_le(b.as_ref().len() as u64).await?;
 
     // write the payload
     w.write_all(b.as_ref()).await?;
@@ -122,33 +178,10 @@ pub async fn write_bytes<W: AsyncWriteExt + Unpin, B: AsRef<[u8]>>(
 }
 
 /// Computes the number of bytes we should add to len (a length in
-/// bytes) to be alined on 64 bits (8 bytes).
+/// bytes) to be aligned on 64 bits (8 bytes).
 fn padding_len(len: u64) -> u8 {
-    let modulo = len % 8;
-    if modulo == 0 {
-        0
-    } else {
-        8 - modulo as u8
-    }
-}
-
-/// Models the position inside a "bytes wire packet" that the reader or writer
-/// is in.
-/// It can be in three different stages, inside size, payload or padding fields.
-/// The number tracks the number of bytes written inside the specific field.
-/// There shall be no ambiguous states, at the end of a stage we immediately
-/// move to the beginning of the next one:
-/// - Size(LEN_SIZE) must be expressed as Payload(0)
-/// - Payload(self.payload_len) must be expressed as Padding(0)
-/// There's one exception - Size(LEN_SIZE) in the reader represents a failure
-/// state we enter in case the allowed size doesn't match the allowed range.
-///
-/// Padding(padding_len) means we're at the end of the bytes wire packet.
-#[derive(Clone, Debug, PartialEq, Eq)]
-enum BytesPacketPosition {
-    Size(usize),
-    Payload(u64),
-    Padding(usize),
+    let aligned = len.wrapping_add(7) & !7;
+    aligned.wrapping_sub(len) as u8
 }
 
 #[cfg(test)]
@@ -160,7 +193,7 @@ mod tests {
 
     /// The maximum length of bytes packets we're willing to accept in the test
     /// cases.
-    const MAX_LEN: u64 = 1024;
+    const MAX_LEN: usize = 1024;
 
     #[tokio::test]
     async fn test_read_8_bytes() {
@@ -171,10 +204,7 @@ mod tests {
 
         assert_eq!(
             &12345678u64.to_le_bytes(),
-            read_bytes(&mut mock, 0u64..MAX_LEN)
-                .await
-                .unwrap()
-                .as_slice()
+            read_bytes(&mut mock, 0..=MAX_LEN).await.unwrap().as_slice()
         );
     }
 
@@ -187,10 +217,7 @@ mod tests {
 
         assert_eq!(
             hex!("010203040506070809"),
-            read_bytes(&mut mock, 0u64..MAX_LEN)
-                .await
-                .unwrap()
-                .as_slice()
+            read_bytes(&mut mock, 0..=MAX_LEN).await.unwrap().as_slice()
         );
     }
 
@@ -202,10 +229,7 @@ mod tests {
 
         assert_eq!(
             hex!(""),
-            read_bytes(&mut mock, 0u64..MAX_LEN)
-                .await
-                .unwrap()
-                .as_slice()
+            read_bytes(&mut mock, 0..=MAX_LEN).await.unwrap().as_slice()
         );
     }
 
@@ -215,7 +239,7 @@ mod tests {
     async fn test_read_reject_too_large() {
         let mut mock = Builder::new().read(&100u64.to_le_bytes()).build();
 
-        read_bytes(&mut mock, 10..10)
+        read_bytes(&mut mock, 10..=10)
             .await
             .expect_err("expect this to fail");
     }
@@ -251,4 +275,9 @@ mod tests {
             .build();
         assert_ok!(write_bytes(&mut mock, &input).await)
     }
+
+    #[test]
+    fn padding_len_u64_max() {
+        assert_eq!(padding_len(u64::MAX), 1);
+    }
 }
diff --git a/tvix/nix-compat/src/wire/bytes/reader.rs b/tvix/nix-compat/src/wire/bytes/reader.rs
deleted file mode 100644
index c36729c614..0000000000
--- a/tvix/nix-compat/src/wire/bytes/reader.rs
+++ /dev/null
@@ -1,464 +0,0 @@
-use pin_project_lite::pin_project;
-use std::{
-    ops::RangeBounds,
-    task::{ready, Poll},
-};
-use tokio::io::AsyncRead;
-
-use super::{padding_len, BytesPacketPosition, LEN_SIZE};
-
-pin_project! {
-    /// Reads a "bytes wire packet" from the underlying reader.
-    /// The format is the same as in [crate::wire::bytes::read_bytes],
-    /// however this structure provides a [AsyncRead] interface,
-    /// allowing to not having to pass around the entire payload in memory.
-    ///
-    /// After being constructed with the underlying reader and an allowed size,
-    /// subsequent requests to poll_read will return payload data until the end
-    /// of the packet is reached.
-    ///
-    /// Internally, it will first read over the size packet, filling payload_size,
-    /// ensuring it fits allowed_size, then return payload data.
-    /// It will only signal EOF (returning `Ok(())` without filling the buffer anymore)
-    /// when all padding has been successfully consumed too.
-    ///
-    /// This also means, it's important for a user to always read to the end,
-    /// and not just call read_exact - otherwise it might not skip over the
-    /// padding, and return garbage when reading the next packet.
-    ///
-    /// In case of an error due to size constraints, or in case of not reading
-    /// all the way to the end (and getting a EOF), the underlying reader is no
-    /// longer usable and might return garbage.
-    pub struct BytesReader<R, S>
-    where
-    R: AsyncRead,
-    S: RangeBounds<u64>,
-
-    {
-        #[pin]
-        inner: R,
-
-        allowed_size: S,
-        payload_size: [u8; 8],
-        state: BytesPacketPosition,
-    }
-}
-
-impl<R, S> BytesReader<R, S>
-where
-    R: AsyncRead + Unpin,
-    S: RangeBounds<u64>,
-{
-    /// Constructs a new BytesReader, using the underlying passed reader.
-    pub fn new(r: R, allowed_size: S) -> Self {
-        Self {
-            inner: r,
-            allowed_size,
-            payload_size: [0; 8],
-            state: BytesPacketPosition::Size(0),
-        }
-    }
-}
-/// Returns an error if the passed usize is 0.
-#[inline]
-fn ensure_nonzero_bytes_read(bytes_read: usize) -> Result<usize, std::io::Error> {
-    if bytes_read == 0 {
-        Err(std::io::Error::new(
-            std::io::ErrorKind::UnexpectedEof,
-            "underlying reader returned EOF",
-        ))
-    } else {
-        Ok(bytes_read)
-    }
-}
-
-impl<R, S> AsyncRead for BytesReader<R, S>
-where
-    R: AsyncRead,
-    S: RangeBounds<u64>,
-{
-    fn poll_read(
-        self: std::pin::Pin<&mut Self>,
-        cx: &mut std::task::Context<'_>,
-        buf: &mut tokio::io::ReadBuf<'_>,
-    ) -> Poll<std::io::Result<()>> {
-        let mut this = self.project();
-
-        // Use a loop, so we can deal with (multiple) state transitions.
-        loop {
-            match *this.state {
-                BytesPacketPosition::Size(LEN_SIZE) => {
-                    // used in case an invalid size was signalled.
-                    Err(std::io::Error::new(
-                        std::io::ErrorKind::InvalidData,
-                        "signalled package size not in allowed range",
-                    ))?
-                }
-                BytesPacketPosition::Size(pos) => {
-                    // try to read more of the size field.
-                    // We wrap a BufRead around this.payload_size here, and set_filled.
-                    let mut read_buf = tokio::io::ReadBuf::new(this.payload_size);
-                    read_buf.advance(pos);
-                    ready!(this.inner.as_mut().poll_read(cx, &mut read_buf))?;
-
-                    ensure_nonzero_bytes_read(read_buf.filled().len() - pos)?;
-
-                    let total_size_read = read_buf.filled().len();
-                    if total_size_read == LEN_SIZE {
-                        // If the entire payload size was read, parse it
-                        let payload_size = u64::from_le_bytes(*this.payload_size);
-
-                        if !this.allowed_size.contains(&payload_size) {
-                            // If it's not in the allowed
-                            // range, transition to failure mode
-                            // `BytesPacketPosition::Size(LEN_SIZE)`, where only
-                            // an error is returned.
-                            *this.state = BytesPacketPosition::Size(LEN_SIZE)
-                        } else if payload_size == 0 {
-                            // If the payload size is 0, move on to reading padding directly.
-                            *this.state = BytesPacketPosition::Padding(0)
-                        } else {
-                            // Else, transition to reading the payload.
-                            *this.state = BytesPacketPosition::Payload(0)
-                        }
-                    } else {
-                        // If we still need to read more of payload size, update
-                        // our position in the state.
-                        *this.state = BytesPacketPosition::Size(total_size_read)
-                    }
-                }
-                BytesPacketPosition::Payload(pos) => {
-                    let signalled_size = u64::from_le_bytes(*this.payload_size);
-                    // We don't enter this match arm at all if we're expecting empty payload
-                    debug_assert!(signalled_size > 0, "signalled size must be larger than 0");
-
-                    // Read from the underlying reader into buf
-                    // We cap the ReadBuf to the size of the payload, as we
-                    // don't want to leak padding to the caller.
-                    let bytes_read = ensure_nonzero_bytes_read({
-                        // Reducing these two u64 to usize on 32bits is fine - we
-                        // only care about not reading too much, not too less.
-                        let mut limited_buf = buf.take((signalled_size - pos) as usize);
-                        ready!(this.inner.as_mut().poll_read(cx, &mut limited_buf))?;
-                        limited_buf.filled().len()
-                    })?;
-
-                    // SAFETY: we just did populate this, but through limited_buf.
-                    unsafe { buf.assume_init(bytes_read) }
-                    buf.advance(bytes_read);
-
-                    if pos + bytes_read as u64 == signalled_size {
-                        // If we now read all payload, transition to padding
-                        // state.
-                        *this.state = BytesPacketPosition::Padding(0);
-                    } else {
-                        // if we didn't read everything yet, update our position
-                        // in the state.
-                        *this.state = BytesPacketPosition::Payload(pos + bytes_read as u64);
-                    }
-
-                    // We return from poll_read here.
-                    // This is important, as any error (or even Pending) from
-                    // the underlying reader on the next read (be it padding or
-                    // payload) would require us to roll back buf, as generally
-                    // a AsyncRead::poll_read may not advance the buffer in case
-                    // of a nonsuccessful read.
-                    // It can't be misinterpreted as EOF, as we definitely *did*
-                    // write something into buf if we come to here (we pass
-                    // `ensure_nonzero_bytes_read`).
-                    return Ok(()).into();
-                }
-                BytesPacketPosition::Padding(pos) => {
-                    // Consume whatever padding is left, ensuring it's all null
-                    // bytes. Only return `Ready(Ok(()))` once we're past the
-                    // padding (or in cases where polling the inner reader
-                    // returns `Poll::Pending`).
-                    let signalled_size = u64::from_le_bytes(*this.payload_size);
-                    let total_padding_len = padding_len(signalled_size) as usize;
-
-                    let padding_len_remaining = total_padding_len - pos;
-                    if padding_len_remaining != 0 {
-                        // create a buffer only accepting the number of remaining padding bytes.
-                        let mut buf = [0; 8];
-                        let mut padding_buf = tokio::io::ReadBuf::new(&mut buf);
-                        let mut padding_buf = padding_buf.take(padding_len_remaining);
-
-                        // read into padding_buf.
-                        ready!(this.inner.as_mut().poll_read(cx, &mut padding_buf))?;
-                        let bytes_read = ensure_nonzero_bytes_read(padding_buf.filled().len())?;
-
-                        *this.state = BytesPacketPosition::Padding(pos + bytes_read);
-
-                        // ensure the bytes are not null bytes
-                        if !padding_buf.filled().iter().all(|e| *e == b'\0') {
-                            return Err(std::io::Error::new(
-                                std::io::ErrorKind::InvalidData,
-                                "padding is not all zeroes",
-                            ))
-                            .into();
-                        }
-
-                        // if we still have padding to read, run the loop again.
-                        continue;
-                    }
-                    // return EOF
-                    return Ok(()).into();
-                }
-            }
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use std::time::Duration;
-
-    use crate::wire::bytes::write_bytes;
-    use hex_literal::hex;
-    use lazy_static::lazy_static;
-    use rstest::rstest;
-    use tokio::io::AsyncReadExt;
-    use tokio_test::{assert_err, io::Builder};
-
-    use super::*;
-
-    /// The maximum length of bytes packets we're willing to accept in the test
-    /// cases.
-    const MAX_LEN: u64 = 1024;
-
-    lazy_static! {
-        pub static ref LARGE_PAYLOAD: Vec<u8> = (0..255).collect::<Vec<u8>>().repeat(4 * 1024);
-    }
-
-    /// Helper function, calling the (simpler) write_bytes with the payload.
-    /// We use this to create data we want to read from the wire.
-    async fn produce_packet_bytes(payload: &[u8]) -> Vec<u8> {
-        let mut exp = vec![];
-        write_bytes(&mut exp, payload).await.unwrap();
-        exp
-    }
-
-    /// Read bytes packets of various length, and ensure read_to_end returns the
-    /// expected payload.
-    #[rstest]
-    #[case::empty(&[])] // empty bytes packet
-    #[case::size_1b(&[0xff])] // 1 bytes payload
-    #[case::size_8b(&hex!("0001020304050607"))] // 8 bytes payload (no padding)
-    #[case::size_9b( &hex!("000102030405060708"))] // 9 bytes payload (7 bytes padding)
-    #[case::size_1m(LARGE_PAYLOAD.as_slice())] // larger bytes packet
-    #[tokio::test]
-    async fn read_payload_correct(#[case] payload: &[u8]) {
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await)
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64);
-        let mut buf = Vec::new();
-        r.read_to_end(&mut buf).await.expect("must succeed");
-
-        assert_eq!(payload, &buf[..]);
-    }
-
-    /// Fail if the bytes packet is larger than allowed
-    #[tokio::test]
-    async fn read_bigger_than_allowed_fail() {
-        let payload = LARGE_PAYLOAD.as_slice();
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[0..8]) // We stop reading after the size packet
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..2048);
-        let mut buf = Vec::new();
-        assert_err!(r.read_to_end(&mut buf).await);
-    }
-
-    /// Fail if the bytes packet is smaller than allowed
-    #[tokio::test]
-    async fn read_smaller_than_allowed_fail() {
-        let payload = &[0x00, 0x01, 0x02];
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[0..8]) // We stop reading after the size packet
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, 1024..2048);
-        let mut buf = Vec::new();
-        assert_err!(r.read_to_end(&mut buf).await);
-    }
-
-    /// Fail if the padding is not all zeroes
-    #[tokio::test]
-    async fn read_fail_if_nonzero_padding() {
-        let payload = &[0x00, 0x01, 0x02];
-        let mut packet_bytes = produce_packet_bytes(payload).await;
-        // Flip some bits in the padding
-        packet_bytes[12] = 0xff;
-        let mut mock = Builder::new().read(&packet_bytes).build(); // We stop reading after the faulty bit
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = Vec::new();
-
-        r.read_to_end(&mut buf).await.expect_err("must fail");
-    }
-
-    /// Start a 9 bytes payload packet, but have the underlying reader return
-    /// EOF in the middle of the size packet (after 4 bytes).
-    /// We should get an unexpected EOF error, already when trying to read the
-    /// first byte (of payload)
-    #[tokio::test]
-    async fn read_9b_eof_during_size() {
-        let payload = &hex!("FF0102030405060708");
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[..4])
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = [0u8; 1];
-
-        assert_eq!(
-            r.read_exact(&mut buf).await.expect_err("must fail").kind(),
-            std::io::ErrorKind::UnexpectedEof
-        );
-
-        assert_eq!(&[0], &buf, "buffer should stay empty");
-    }
-
-    /// Start a 9 bytes payload packet, but have the underlying reader return
-    /// EOF in the middle of the payload (4 bytes into the payload).
-    /// We should get an unexpected EOF error, after reading the first 4 bytes
-    /// (successfully).
-    #[tokio::test]
-    async fn read_9b_eof_during_payload() {
-        let payload = &hex!("FF0102030405060708");
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[..8 + 4])
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = [0; 9];
-
-        r.read_exact(&mut buf[..4]).await.expect("must succeed");
-
-        assert_eq!(
-            r.read_exact(&mut buf[4..=4])
-                .await
-                .expect_err("must fail")
-                .kind(),
-            std::io::ErrorKind::UnexpectedEof
-        );
-    }
-
-    /// Start a 9 bytes payload packet, but return an error at various stages *after* the actual payload.
-    /// read_exact with a 9 bytes buffer is expected to succeed, but any further
-    /// read, as well as read_to_end are expected to fail.
-    #[rstest]
-    #[case::before_padding(8 + 9)]
-    #[case::during_padding(8 + 9 + 2)]
-    #[case::after_padding(8 + 9 + padding_len(9) as usize)]
-    #[tokio::test]
-    async fn read_9b_eof_after_payload(#[case] offset: usize) {
-        let payload = &hex!("FF0102030405060708");
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[..offset])
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = [0; 9];
-
-        // read_exact of the payload will succeed, but a subsequent read will
-        // return UnexpectedEof error.
-        r.read_exact(&mut buf).await.expect("should succeed");
-        assert_eq!(
-            r.read_exact(&mut buf[4..=4])
-                .await
-                .expect_err("must fail")
-                .kind(),
-            std::io::ErrorKind::UnexpectedEof
-        );
-
-        // read_to_end will fail.
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[..8 + payload.len()])
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = Vec::new();
-        assert_eq!(
-            r.read_to_end(&mut buf).await.expect_err("must fail").kind(),
-            std::io::ErrorKind::UnexpectedEof
-        );
-    }
-
-    /// Start a 9 bytes payload packet, but return an error after a certain position.
-    /// Ensure that error is propagated.
-    #[rstest]
-    #[case::during_size(4)]
-    #[case::before_payload(8)]
-    #[case::during_payload(8 + 4)]
-    #[case::before_padding(8 + 4)]
-    #[case::during_padding(8 + 9 + 2)]
-    #[tokio::test]
-    async fn propagate_error_from_reader(#[case] offset: usize) {
-        let payload = &hex!("FF0102030405060708");
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[..offset])
-            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = Vec::new();
-
-        let err = r.read_to_end(&mut buf).await.expect_err("must fail");
-        assert_eq!(
-            err.kind(),
-            std::io::ErrorKind::Other,
-            "error kind must match"
-        );
-
-        assert_eq!(
-            err.into_inner().unwrap().to_string(),
-            "foo",
-            "error payload must contain foo"
-        );
-    }
-
-    /// If there's an error right after the padding, we don't propagate it, as
-    /// we're done reading. We just return EOF.
-    #[tokio::test]
-    async fn no_error_after_eof() {
-        let payload = &hex!("FF0102030405060708");
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await)
-            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..MAX_LEN);
-        let mut buf = Vec::new();
-
-        r.read_to_end(&mut buf).await.expect("must succeed");
-        assert_eq!(buf.as_slice(), payload);
-    }
-
-    /// Introduce various stalls in various places of the packet, to ensure we
-    /// handle these cases properly, too.
-    #[rstest]
-    #[case::beginning(0)]
-    #[case::before_payload(8)]
-    #[case::during_payload(8 + 4)]
-    #[case::before_padding(8 + 4)]
-    #[case::during_padding(8 + 9 + 2)]
-    #[tokio::test]
-    async fn read_payload_correct_pending(#[case] offset: usize) {
-        let payload = &hex!("FF0102030405060708");
-        let mut mock = Builder::new()
-            .read(&produce_packet_bytes(payload).await[..offset])
-            .wait(Duration::from_nanos(0))
-            .read(&produce_packet_bytes(payload).await[offset..])
-            .build();
-
-        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64);
-        let mut buf = Vec::new();
-        r.read_to_end(&mut buf).await.expect("must succeed");
-
-        assert_eq!(payload, &buf[..]);
-    }
-}
diff --git a/tvix/nix-compat/src/wire/bytes/reader/mod.rs b/tvix/nix-compat/src/wire/bytes/reader/mod.rs
new file mode 100644
index 0000000000..6bd376c06f
--- /dev/null
+++ b/tvix/nix-compat/src/wire/bytes/reader/mod.rs
@@ -0,0 +1,684 @@
+use std::{
+    future::Future,
+    io,
+    num::NonZeroU64,
+    ops::RangeBounds,
+    pin::Pin,
+    task::{self, ready, Poll},
+};
+use tokio::io::{AsyncBufRead, AsyncRead, AsyncReadExt, ReadBuf};
+
+use trailer::{read_trailer, ReadTrailer, Trailer};
+
+#[doc(hidden)]
+pub use self::trailer::Pad;
+pub(crate) use self::trailer::Tag;
+mod trailer;
+
+/// Reads a "bytes wire packet" from the underlying reader.
+/// The format is the same as in [crate::wire::bytes::read_bytes],
+/// however this structure provides a [AsyncRead] interface,
+/// allowing to not having to pass around the entire payload in memory.
+///
+/// It is constructed by reading a size with [BytesReader::new],
+/// and yields payload data until the end of the packet is reached.
+///
+/// It will not return the final bytes before all padding has been successfully
+/// consumed as well, but the full length of the reader must be consumed.
+///
+/// If the data is not read all the way to the end, or an error is encountered,
+/// the underlying reader is no longer usable and might return garbage.
+#[derive(Debug)]
+#[allow(private_bounds)]
+pub struct BytesReader<R, T: Tag = Pad> {
+    state: State<R, T>,
+}
+
+/// Split the `user_len` into `body_len` and `tail_len`, which are respectively
+/// the non-terminal 8-byte blocks, and the โ‰ค8 bytes of user data contained in
+/// the trailer block.
+#[inline(always)]
+fn split_user_len(user_len: NonZeroU64) -> (u64, u8) {
+    let n = user_len.get() - 1;
+    let body_len = n & !7;
+    let tail_len = (n & 7) as u8 + 1;
+    (body_len, tail_len)
+}
+
+#[derive(Debug)]
+enum State<R, T: Tag> {
+    /// Full 8-byte blocks are being read and released to the caller.
+    /// NOTE: The final 8-byte block is *always* part of the trailer.
+    Body {
+        reader: Option<R>,
+        consumed: u64,
+        /// The total length of all user data contained in both the body and trailer.
+        user_len: NonZeroU64,
+    },
+    /// The trailer is in the process of being read.
+    ReadTrailer(ReadTrailer<R, T>),
+    /// The trailer has been fully read and validated,
+    /// and data can now be released to the caller.
+    ReleaseTrailer { consumed: u8, data: Trailer },
+}
+
+impl<R> BytesReader<R>
+where
+    R: AsyncRead + Unpin,
+{
+    /// Constructs a new BytesReader, using the underlying passed reader.
+    pub async fn new<S: RangeBounds<u64>>(reader: R, allowed_size: S) -> io::Result<Self> {
+        BytesReader::new_internal(reader, allowed_size).await
+    }
+}
+
+#[allow(private_bounds)]
+impl<R, T: Tag> BytesReader<R, T>
+where
+    R: AsyncRead + Unpin,
+{
+    /// Constructs a new BytesReader, using the underlying passed reader.
+    pub(crate) async fn new_internal<S: RangeBounds<u64>>(
+        mut reader: R,
+        allowed_size: S,
+    ) -> io::Result<Self> {
+        let size = reader.read_u64_le().await?;
+
+        if !allowed_size.contains(&size) {
+            return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid size"));
+        }
+
+        Ok(Self {
+            state: match NonZeroU64::new(size) {
+                Some(size) => State::Body {
+                    reader: Some(reader),
+                    consumed: 0,
+                    user_len: size,
+                },
+                None => State::ReleaseTrailer {
+                    consumed: 0,
+                    data: read_trailer::<R, T>(reader, 0).await?,
+                },
+            },
+        })
+    }
+
+    /// Returns whether there is any remaining data to be read.
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Remaining data length, ie not including data already read.
+    ///
+    /// If the size has not been read yet, this is [None].
+    pub fn len(&self) -> u64 {
+        match self.state {
+            State::Body {
+                consumed, user_len, ..
+            } => user_len.get() - consumed,
+            State::ReadTrailer(ref fut) => fut.len() as u64,
+            State::ReleaseTrailer { consumed, ref data } => data.len() as u64 - consumed as u64,
+        }
+    }
+}
+
+#[allow(private_bounds)]
+impl<R: AsyncRead + Unpin, T: Tag> AsyncRead for BytesReader<R, T> {
+    fn poll_read(
+        mut self: Pin<&mut Self>,
+        cx: &mut task::Context,
+        buf: &mut ReadBuf,
+    ) -> Poll<io::Result<()>> {
+        let this = &mut self.state;
+
+        loop {
+            match this {
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } => {
+                    let (body_len, tail_len) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    let reader = if remaining == 0 {
+                        let reader = reader.take().unwrap();
+                        *this = State::ReadTrailer(read_trailer(reader, tail_len));
+                        continue;
+                    } else {
+                        Pin::new(reader.as_mut().unwrap())
+                    };
+
+                    let mut bytes_read = 0;
+                    ready!(with_limited(buf, remaining, |buf| {
+                        let ret = reader.poll_read(cx, buf);
+                        bytes_read = buf.initialized().len();
+                        ret
+                    }))?;
+
+                    *consumed += bytes_read as u64;
+
+                    return if bytes_read != 0 {
+                        Ok(())
+                    } else {
+                        Err(io::ErrorKind::UnexpectedEof.into())
+                    }
+                    .into();
+                }
+                State::ReadTrailer(fut) => {
+                    *this = State::ReleaseTrailer {
+                        consumed: 0,
+                        data: ready!(Pin::new(fut).poll(cx))?,
+                    };
+                }
+                State::ReleaseTrailer { consumed, data } => {
+                    let data = &data[*consumed as usize..];
+                    let data = &data[..usize::min(data.len(), buf.remaining())];
+
+                    buf.put_slice(data);
+                    *consumed += data.len() as u8;
+
+                    return Ok(()).into();
+                }
+            }
+        }
+    }
+}
+
+#[allow(private_bounds)]
+impl<R: AsyncBufRead + Unpin, T: Tag> AsyncBufRead for BytesReader<R, T> {
+    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<io::Result<&[u8]>> {
+        let this = &mut self.get_mut().state;
+
+        loop {
+            match this {
+                // This state comes *after* the following case,
+                // but we can't keep it in logical order because
+                // that would lengthen the borrow lifetime.
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } if {
+                    let (body_len, _) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    remaining == 0
+                } =>
+                {
+                    let reader = reader.take().unwrap();
+                    let (_, tail_len) = split_user_len(*user_len);
+
+                    *this = State::ReadTrailer(read_trailer(reader, tail_len));
+                }
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } => {
+                    let (body_len, _) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    let reader = Pin::new(reader.as_mut().unwrap());
+
+                    match ready!(reader.poll_fill_buf(cx))? {
+                        &[] => {
+                            return Err(io::ErrorKind::UnexpectedEof.into()).into();
+                        }
+                        mut buf => {
+                            if buf.len() as u64 > remaining {
+                                buf = &buf[..remaining as usize];
+                            }
+
+                            return Ok(buf).into();
+                        }
+                    }
+                }
+                State::ReadTrailer(fut) => {
+                    *this = State::ReleaseTrailer {
+                        consumed: 0,
+                        data: ready!(Pin::new(fut).poll(cx))?,
+                    };
+                }
+                State::ReleaseTrailer { consumed, data } => {
+                    return Ok(&data[*consumed as usize..]).into();
+                }
+            }
+        }
+    }
+
+    fn consume(mut self: Pin<&mut Self>, amt: usize) {
+        match &mut self.state {
+            State::Body {
+                reader,
+                consumed,
+                user_len,
+            } => {
+                let reader = Pin::new(reader.as_mut().unwrap());
+                let (body_len, _) = split_user_len(*user_len);
+
+                *consumed = consumed
+                    .checked_add(amt as u64)
+                    .filter(|&consumed| consumed <= body_len)
+                    .expect("consumed out of bounds");
+
+                reader.consume(amt);
+            }
+            State::ReadTrailer(_) => unreachable!(),
+            State::ReleaseTrailer { consumed, data } => {
+                *consumed = amt
+                    .checked_add(*consumed as usize)
+                    .filter(|&consumed| consumed <= data.len())
+                    .expect("consumed out of bounds") as u8;
+            }
+        }
+    }
+}
+
+/// Make a limited version of `buf`, consisting only of up to `n` bytes of the unfilled section, and call `f` with it.
+/// After `f` returns, we propagate the filled cursor advancement back to `buf`.
+fn with_limited<R>(buf: &mut ReadBuf, n: u64, f: impl FnOnce(&mut ReadBuf) -> R) -> R {
+    let mut nbuf = buf.take(n.try_into().unwrap_or(usize::MAX));
+    let ptr = nbuf.initialized().as_ptr();
+    let ret = f(&mut nbuf);
+
+    // SAFETY: `ReadBuf::take` only returns the *unfilled* section of `buf`,
+    // so anything filled is new, initialized data.
+    //
+    // We verify that `nbuf` still points to the same buffer,
+    // so we're sure it hasn't been swapped out.
+    unsafe {
+        // ensure our buffer hasn't been swapped out
+        assert_eq!(nbuf.initialized().as_ptr(), ptr);
+
+        let n = nbuf.filled().len();
+        buf.assume_init(n);
+        buf.advance(n);
+    }
+
+    ret
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use crate::wire::bytes::{padding_len, write_bytes};
+    use hex_literal::hex;
+    use lazy_static::lazy_static;
+    use rstest::rstest;
+    use tokio::io::{AsyncReadExt, BufReader};
+    use tokio_test::io::Builder;
+
+    use super::*;
+
+    /// The maximum length of bytes packets we're willing to accept in the test
+    /// cases.
+    const MAX_LEN: u64 = 1024;
+
+    lazy_static! {
+        pub static ref LARGE_PAYLOAD: Vec<u8> = (0..255).collect::<Vec<u8>>().repeat(4 * 1024);
+    }
+
+    /// Helper function, calling the (simpler) write_bytes with the payload.
+    /// We use this to create data we want to read from the wire.
+    async fn produce_packet_bytes(payload: &[u8]) -> Vec<u8> {
+        let mut exp = vec![];
+        write_bytes(&mut exp, payload).await.unwrap();
+        exp
+    }
+
+    /// Read bytes packets of various length, and ensure read_to_end returns the
+    /// expected payload.
+    #[rstest]
+    #[case::empty(&[])] // empty bytes packet
+    #[case::size_1b(&[0xff])] // 1 bytes payload
+    #[case::size_8b(&hex!("0001020304050607"))] // 8 bytes payload (no padding)
+    #[case::size_9b(&hex!("000102030405060708"))] // 9 bytes payload (7 bytes padding)
+    #[case::size_1m(LARGE_PAYLOAD.as_slice())] // larger bytes packet
+    #[tokio::test]
+    async fn read_payload_correct(#[case] payload: &[u8]) {
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+        let mut buf = Vec::new();
+        r.read_to_end(&mut buf).await.expect("must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+
+    /// Read bytes packets of various length, and ensure copy_buf reads the
+    /// expected payload.
+    #[rstest]
+    #[case::empty(&[])] // empty bytes packet
+    #[case::size_1b(&[0xff])] // 1 bytes payload
+    #[case::size_8b(&hex!("0001020304050607"))] // 8 bytes payload (no padding)
+    #[case::size_9b(&hex!("000102030405060708"))] // 9 bytes payload (7 bytes padding)
+    #[case::size_1m(LARGE_PAYLOAD.as_slice())] // larger bytes packet
+    #[tokio::test]
+    async fn read_payload_correct_readbuf(#[case] payload: &[u8]) {
+        let mut mock = BufReader::new(
+            Builder::new()
+                .read(&produce_packet_bytes(payload).await)
+                .build(),
+        );
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+
+        let mut buf = Vec::new();
+        tokio::io::copy_buf(&mut r, &mut buf)
+            .await
+            .expect("copy_buf must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+
+    /// Fail if the bytes packet is larger than allowed
+    #[tokio::test]
+    async fn read_bigger_than_allowed_fail() {
+        let payload = LARGE_PAYLOAD.as_slice();
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[0..8]) // We stop reading after the size packet
+            .build();
+
+        assert_eq!(
+            BytesReader::new(&mut mock, ..2048)
+                .await
+                .unwrap_err()
+                .kind(),
+            io::ErrorKind::InvalidData
+        );
+    }
+
+    /// Fail if the bytes packet is smaller than allowed
+    #[tokio::test]
+    async fn read_smaller_than_allowed_fail() {
+        let payload = &[0x00, 0x01, 0x02];
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[0..8]) // We stop reading after the size packet
+            .build();
+
+        assert_eq!(
+            BytesReader::new(&mut mock, 1024..2048)
+                .await
+                .unwrap_err()
+                .kind(),
+            io::ErrorKind::InvalidData
+        );
+    }
+
+    /// Read the trailer immediately if there is no payload.
+    #[tokio::test]
+    async fn read_trailer_immediately() {
+        use crate::nar::wire::PadPar;
+
+        let mut mock = Builder::new()
+            .read(&[0; 8])
+            .read(&PadPar::PATTERN[8..])
+            .build();
+
+        BytesReader::<_, PadPar>::new_internal(&mut mock, ..)
+            .await
+            .unwrap();
+
+        // The mock reader will panic if dropped without reading all data.
+    }
+
+    /// Read the trailer even if we only read the exact payload size.
+    #[tokio::test]
+    async fn read_exact_trailer() {
+        use crate::nar::wire::PadPar;
+
+        let mut mock = Builder::new()
+            .read(&16u64.to_le_bytes())
+            .read(&[0x55; 16])
+            .read(&PadPar::PATTERN[8..])
+            .build();
+
+        let mut reader = BytesReader::<_, PadPar>::new_internal(&mut mock, ..)
+            .await
+            .unwrap();
+
+        let mut buf = [0; 16];
+        reader.read_exact(&mut buf).await.unwrap();
+        assert_eq!(buf, [0x55; 16]);
+
+        // The mock reader will panic if dropped without reading all data.
+    }
+
+    /// Fail if the padding is not all zeroes
+    #[tokio::test]
+    async fn read_fail_if_nonzero_padding() {
+        let payload = &[0x00, 0x01, 0x02];
+        let mut packet_bytes = produce_packet_bytes(payload).await;
+        // Flip some bits in the padding
+        packet_bytes[12] = 0xff;
+        let mut mock = Builder::new().read(&packet_bytes).build(); // We stop reading after the faulty bit
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        r.read_to_end(&mut buf).await.expect_err("must fail");
+    }
+
+    /// Start a 9 bytes payload packet, but have the underlying reader return
+    /// EOF in the middle of the size packet (after 4 bytes).
+    /// We should get an unexpected EOF error, already when trying to read the
+    /// first byte (of payload)
+    #[tokio::test]
+    async fn read_9b_eof_during_size() {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..4])
+            .build();
+
+        assert_eq!(
+            BytesReader::new(&mut mock, ..MAX_LEN)
+                .await
+                .expect_err("must fail")
+                .kind(),
+            io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but have the underlying reader return
+    /// EOF in the middle of the payload (4 bytes into the payload).
+    /// We should get an unexpected EOF error, after reading the first 4 bytes
+    /// (successfully).
+    #[tokio::test]
+    async fn read_9b_eof_during_payload() {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..8 + 4])
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = [0; 9];
+
+        r.read_exact(&mut buf[..4]).await.expect("must succeed");
+
+        assert_eq!(
+            r.read_exact(&mut buf[4..=4])
+                .await
+                .expect_err("must fail")
+                .kind(),
+            std::io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but don't supply the necessary padding.
+    /// This is expected to always fail before returning the final data.
+    #[rstest]
+    #[case::before_padding(8 + 9)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[case::after_padding(8 + 9 + padding_len(9) as usize - 1)]
+    #[tokio::test]
+    async fn read_9b_eof_after_payload(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+
+        // read_exact of the payload *body* will succeed, but a subsequent read will
+        // return UnexpectedEof error.
+        assert_eq!(r.read_exact(&mut [0; 8]).await.unwrap(), 8);
+        assert_eq!(
+            r.read_exact(&mut [0]).await.unwrap_err().kind(),
+            std::io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but return an error after a certain position.
+    /// Ensure that error is propagated.
+    #[rstest]
+    #[case::during_size(4)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn propagate_error_from_reader(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+
+        // Either length reading or data reading can fail, depending on which test case we're in.
+        let err: io::Error = async {
+            let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await?;
+            let mut buf = Vec::new();
+
+            r.read_to_end(&mut buf).await?;
+
+            Ok(())
+        }
+        .await
+        .expect_err("must fail");
+
+        assert_eq!(
+            err.kind(),
+            std::io::ErrorKind::Other,
+            "error kind must match"
+        );
+
+        assert_eq!(
+            err.into_inner().unwrap().to_string(),
+            "foo",
+            "error payload must contain foo"
+        );
+    }
+
+    /// Start a 9 bytes payload packet, but return an error after a certain position.
+    /// Ensure that error is propagated (AsyncReadBuf case)
+    #[rstest]
+    #[case::during_size(4)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn propagate_error_from_reader_buffered(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+        let mut mock = BufReader::new(mock);
+
+        // Either length reading or data reading can fail, depending on which test case we're in.
+        let err: io::Error = async {
+            let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await?;
+            let mut buf = Vec::new();
+
+            tokio::io::copy_buf(&mut r, &mut buf).await?;
+
+            Ok(())
+        }
+        .await
+        .expect_err("must fail");
+
+        assert_eq!(
+            err.kind(),
+            std::io::ErrorKind::Other,
+            "error kind must match"
+        );
+
+        assert_eq!(
+            err.into_inner().unwrap().to_string(),
+            "foo",
+            "error payload must contain foo"
+        );
+    }
+
+    /// If there's an error right after the padding, we don't propagate it, as
+    /// we're done reading. We just return EOF.
+    #[tokio::test]
+    async fn no_error_after_eof() {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        r.read_to_end(&mut buf).await.expect("must succeed");
+        assert_eq!(buf.as_slice(), payload);
+    }
+
+    /// If there's an error right after the padding, we don't propagate it, as
+    /// we're done reading. We just return EOF.
+    #[tokio::test]
+    async fn no_error_after_eof_buffered() {
+        let payload = &hex!("FF0102030405060708");
+        let mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+        let mut mock = BufReader::new(mock);
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        tokio::io::copy_buf(&mut r, &mut buf)
+            .await
+            .expect("must succeed");
+        assert_eq!(buf.as_slice(), payload);
+    }
+
+    /// Introduce various stalls in various places of the packet, to ensure we
+    /// handle these cases properly, too.
+    #[rstest]
+    #[case::beginning(0)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn read_payload_correct_pending(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mut mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .wait(Duration::from_nanos(0))
+            .read(&produce_packet_bytes(payload).await[offset..])
+            .build();
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+        let mut buf = Vec::new();
+        r.read_to_end(&mut buf).await.expect("must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+}
diff --git a/tvix/nix-compat/src/wire/bytes/reader/trailer.rs b/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
new file mode 100644
index 0000000000..3a5bb75e71
--- /dev/null
+++ b/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
@@ -0,0 +1,197 @@
+use std::{
+    fmt::Debug,
+    future::Future,
+    marker::PhantomData,
+    ops::Deref,
+    pin::Pin,
+    task::{self, ready, Poll},
+};
+
+use tokio::io::{self, AsyncRead, ReadBuf};
+
+/// Trailer represents up to 8 bytes of data read as part of the trailer block(s)
+#[derive(Debug)]
+pub(crate) struct Trailer {
+    data_len: u8,
+    buf: [u8; 8],
+}
+
+impl Deref for Trailer {
+    type Target = [u8];
+
+    fn deref(&self) -> &Self::Target {
+        &self.buf[..self.data_len as usize]
+    }
+}
+
+/// Tag defines a "trailer tag": specific, fixed bytes that must follow wire data.
+pub(crate) trait Tag {
+    /// The expected suffix
+    ///
+    /// The first 8 bytes may be ignored, and it must be an 8-byte aligned size.
+    const PATTERN: &'static [u8];
+
+    /// Suitably sized buffer for reading [Self::PATTERN]
+    ///
+    /// HACK: This is a workaround for const generics limitations.
+    type Buf: AsRef<[u8]> + AsMut<[u8]> + Debug + Unpin;
+
+    /// Make an instance of [Self::Buf]
+    fn make_buf() -> Self::Buf;
+}
+
+#[derive(Debug)]
+pub enum Pad {}
+
+impl Tag for Pad {
+    const PATTERN: &'static [u8] = &[0; 8];
+
+    type Buf = [u8; 8];
+
+    fn make_buf() -> Self::Buf {
+        [0; 8]
+    }
+}
+
+#[derive(Debug)]
+pub(crate) struct ReadTrailer<R, T: Tag> {
+    reader: R,
+    data_len: u8,
+    filled: u8,
+    buf: T::Buf,
+    _phantom: PhantomData<fn(T) -> T>,
+}
+
+/// read_trailer returns a [Future] that reads a trailer with a given [Tag] from `reader`
+pub(crate) fn read_trailer<R: AsyncRead + Unpin, T: Tag>(
+    reader: R,
+    data_len: u8,
+) -> ReadTrailer<R, T> {
+    assert!(data_len <= 8, "payload in trailer must be <= 8 bytes");
+
+    let buf = T::make_buf();
+    assert_eq!(buf.as_ref().len(), T::PATTERN.len());
+    assert_eq!(T::PATTERN.len() % 8, 0);
+
+    ReadTrailer {
+        reader,
+        data_len,
+        filled: if data_len != 0 { 0 } else { 8 },
+        buf,
+        _phantom: PhantomData,
+    }
+}
+
+impl<R, T: Tag> ReadTrailer<R, T> {
+    pub fn len(&self) -> u8 {
+        self.data_len
+    }
+}
+
+impl<R: AsyncRead + Unpin, T: Tag> Future for ReadTrailer<R, T> {
+    type Output = io::Result<Trailer>;
+
+    fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> {
+        let this = &mut *self;
+
+        loop {
+            if this.filled >= this.data_len {
+                let check_range = || this.data_len as usize..this.filled as usize;
+
+                if this.buf.as_ref()[check_range()] != T::PATTERN[check_range()] {
+                    return Err(io::Error::new(
+                        io::ErrorKind::InvalidData,
+                        "invalid trailer",
+                    ))
+                    .into();
+                }
+            }
+
+            if this.filled as usize == T::PATTERN.len() {
+                let mut buf = [0; 8];
+                buf.copy_from_slice(&this.buf.as_ref()[..8]);
+
+                return Ok(Trailer {
+                    data_len: this.data_len,
+                    buf,
+                })
+                .into();
+            }
+
+            let mut buf = ReadBuf::new(this.buf.as_mut());
+            buf.advance(this.filled as usize);
+
+            ready!(Pin::new(&mut this.reader).poll_read(cx, &mut buf))?;
+
+            this.filled = {
+                let filled = buf.filled().len() as u8;
+
+                if filled == this.filled {
+                    return Err(io::ErrorKind::UnexpectedEof.into()).into();
+                }
+
+                filled
+            };
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use super::*;
+
+    #[tokio::test]
+    async fn unexpected_eof() {
+        let reader = tokio_test::io::Builder::new()
+            .read(&[0xed])
+            .wait(Duration::ZERO)
+            .read(&[0xef, 0x00])
+            .build();
+
+        assert_eq!(
+            read_trailer::<_, Pad>(reader, 2).await.unwrap_err().kind(),
+            io::ErrorKind::UnexpectedEof
+        );
+    }
+
+    #[tokio::test]
+    async fn invalid_padding() {
+        let reader = tokio_test::io::Builder::new()
+            .read(&[0xed])
+            .wait(Duration::ZERO)
+            .read(&[0xef, 0x01, 0x00])
+            .wait(Duration::ZERO)
+            .build();
+
+        assert_eq!(
+            read_trailer::<_, Pad>(reader, 2).await.unwrap_err().kind(),
+            io::ErrorKind::InvalidData
+        );
+    }
+
+    #[tokio::test]
+    async fn success() {
+        let reader = tokio_test::io::Builder::new()
+            .read(&[0xed])
+            .wait(Duration::ZERO)
+            .read(&[0xef, 0x00])
+            .wait(Duration::ZERO)
+            .read(&[0x00, 0x00, 0x00, 0x00, 0x00])
+            .build();
+
+        assert_eq!(
+            &*read_trailer::<_, Pad>(reader, 2).await.unwrap(),
+            &[0xed, 0xef]
+        );
+    }
+
+    #[tokio::test]
+    async fn no_padding() {
+        assert!(read_trailer::<_, Pad>(io::empty(), 0)
+            .await
+            .unwrap()
+            .is_empty());
+    }
+}
diff --git a/tvix/nix-compat/src/wire/bytes/writer.rs b/tvix/nix-compat/src/wire/bytes/writer.rs
index 347934b3dc..f5632771e9 100644
--- a/tvix/nix-compat/src/wire/bytes/writer.rs
+++ b/tvix/nix-compat/src/wire/bytes/writer.rs
@@ -3,7 +3,7 @@ use std::task::{ready, Poll};
 
 use tokio::io::AsyncWrite;
 
-use super::{padding_len, BytesPacketPosition, EMPTY_BYTES, LEN_SIZE};
+use super::{padding_len, EMPTY_BYTES, LEN_SIZE};
 
 pin_project! {
     /// Writes a "bytes wire packet" to the underlying writer.
@@ -41,6 +41,22 @@ pin_project! {
     }
 }
 
+/// Models the position inside a "bytes wire packet" that the writer is in.
+/// It can be in three different stages, inside size, payload or padding fields.
+/// The number tracks the number of bytes written inside the specific field.
+/// There shall be no ambiguous states, at the end of a stage we immediately
+/// move to the beginning of the next one:
+/// - Size(LEN_SIZE) must be expressed as Payload(0)
+/// - Payload(self.payload_len) must be expressed as Padding(0)
+///
+/// Padding(padding_len) means we're at the end of the bytes wire packet.
+#[derive(Clone, Debug, PartialEq, Eq)]
+enum BytesPacketPosition {
+    Size(usize),
+    Payload(u64),
+    Padding(usize),
+}
+
 impl<W> BytesWriter<W>
 where
     W: AsyncWrite,
diff --git a/tvix/nix-compat/src/wire/mod.rs b/tvix/nix-compat/src/wire/mod.rs
index 65c053d58e..a197e3a1f4 100644
--- a/tvix/nix-compat/src/wire/mod.rs
+++ b/tvix/nix-compat/src/wire/mod.rs
@@ -3,6 +3,3 @@
 
 mod bytes;
 pub use bytes::*;
-
-mod primitive;
-pub use primitive::*;
diff --git a/tvix/nix-compat/src/wire/primitive.rs b/tvix/nix-compat/src/wire/primitive.rs
deleted file mode 100644
index ee0f5fc427..0000000000
--- a/tvix/nix-compat/src/wire/primitive.rs
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-FileCopyrightText: 2023 embr <git@liclac.eu>
-//
-// SPDX-License-Identifier: EUPL-1.2
-
-use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
-
-#[allow(dead_code)]
-/// Read a u64 from the AsyncRead (little endian).
-pub async fn read_u64<R: AsyncReadExt + Unpin>(r: &mut R) -> std::io::Result<u64> {
-    r.read_u64_le().await
-}
-
-/// Write a u64 to the AsyncWrite (little endian).
-pub async fn write_u64<W: AsyncWrite + Unpin>(w: &mut W, v: u64) -> std::io::Result<()> {
-    w.write_u64_le(v).await
-}
-
-#[allow(dead_code)]
-/// Read a boolean from the AsyncRead, encoded as u64 (>0 is true).
-pub async fn read_bool<R: AsyncRead + Unpin>(r: &mut R) -> std::io::Result<bool> {
-    Ok(read_u64(r).await? > 0)
-}
-
-#[allow(dead_code)]
-/// Write a boolean to the AsyncWrite, encoded as u64 (>0 is true).
-pub async fn write_bool<W: AsyncWrite + Unpin>(w: &mut W, v: bool) -> std::io::Result<()> {
-    write_u64(w, if v { 1u64 } else { 0u64 }).await
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use tokio_test::io::Builder;
-
-    // Integers.
-    #[tokio::test]
-    async fn test_read_u64() {
-        let mut mock = Builder::new().read(&1234567890u64.to_le_bytes()).build();
-        assert_eq!(1234567890u64, read_u64(&mut mock).await.unwrap());
-    }
-    #[tokio::test]
-    async fn test_write_u64() {
-        let mut mock = Builder::new().write(&1234567890u64.to_le_bytes()).build();
-        write_u64(&mut mock, 1234567890).await.unwrap();
-    }
-
-    // Booleans.
-    #[tokio::test]
-    async fn test_read_bool_0() {
-        let mut mock = Builder::new().read(&0u64.to_le_bytes()).build();
-        assert!(!read_bool(&mut mock).await.unwrap());
-    }
-    #[tokio::test]
-    async fn test_read_bool_1() {
-        let mut mock = Builder::new().read(&1u64.to_le_bytes()).build();
-        assert!(read_bool(&mut mock).await.unwrap());
-    }
-    #[tokio::test]
-    async fn test_read_bool_2() {
-        let mut mock = Builder::new().read(&2u64.to_le_bytes()).build();
-        assert!(read_bool(&mut mock).await.unwrap());
-    }
-
-    #[tokio::test]
-    async fn test_write_bool_false() {
-        let mut mock = Builder::new().write(&0u64.to_le_bytes()).build();
-        write_bool(&mut mock, false).await.unwrap();
-    }
-    #[tokio::test]
-    async fn test_write_bool_true() {
-        let mut mock = Builder::new().write(&1u64.to_le_bytes()).build();
-        write_bool(&mut mock, true).await.unwrap();
-    }
-}
diff --git a/tvix/shell.nix b/tvix/shell.nix
index 422f1c8dd4..f0d8ab1657 100644
--- a/tvix/shell.nix
+++ b/tvix/shell.nix
@@ -29,12 +29,10 @@ pkgs.mkShell {
     pkgs.cargo
     pkgs.cargo-machete
     pkgs.cargo-expand
-    pkgs.cbtemulator
     pkgs.clippy
     pkgs.evans
     pkgs.fuse
     pkgs.go
-    pkgs.google-cloud-bigtable-tool
     pkgs.grpcurl
     pkgs.hyperfine
     pkgs.mdbook
diff --git a/tvix/store/Cargo.toml b/tvix/store/Cargo.toml
index a62d578370..4727f43f78 100644
--- a/tvix/store/Cargo.toml
+++ b/tvix/store/Cargo.toml
@@ -5,6 +5,7 @@ edition = "2021"
 
 [dependencies]
 anyhow = "1.0.68"
+async-compression = { version = "0.4.9", features = ["tokio", "bzip2", "gzip", "xz", "zstd"]}
 async-stream = "0.3.5"
 blake3 = { version = "1.3.1", features = ["rayon", "std"] }
 bstr = "1.6.0"
@@ -17,9 +18,9 @@ lazy_static = "1.4.0"
 nix-compat = { path = "../nix-compat", features = ["async"] }
 pin-project-lite = "0.2.13"
 prost = "0.12.1"
-opentelemetry = { version = "0.21.0", optional = true}
-opentelemetry-otlp = { version = "0.14.0", optional = true }
-opentelemetry_sdk = { version = "0.21.0", features = ["rt-tokio"], optional = true}
+opentelemetry = { version = "0.22.0", optional = true}
+opentelemetry-otlp = { version = "0.15.0", optional = true }
+opentelemetry_sdk = { version = "0.22.1", features = ["rt-tokio"], optional = true}
 serde = { version = "1.0.197", features = [ "derive" ] }
 serde_json = "1.0"
 serde_with = "3.7.0"
@@ -28,20 +29,20 @@ sha2 = "0.10.6"
 sled = { version = "0.34.7" }
 thiserror = "1.0.38"
 tokio = { version = "1.32.0", features = ["fs", "macros", "net", "rt", "rt-multi-thread", "signal"] }
-tokio-listener = { version = "0.3.2", features = [ "tonic011" ] }
+tokio-listener = { version = "0.4.1", features = [ "tonic011" ] }
 tokio-stream = { version = "0.1.14", features = ["fs"] }
 tokio-util = { version = "0.7.9", features = ["io", "io-util", "compat"] }
 tonic = { version = "0.11.0", features = ["tls", "tls-roots"] }
 tower = "0.4.13"
 tracing = "0.1.37"
-tracing-opentelemetry = "0.22.0"
-tracing-subscriber = { version = "0.3.16", features = ["env-filter", "json"] }
+tracing-opentelemetry = "0.23.0"
+tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
 tvix-castore = { path = "../castore" }
 url = "2.4.0"
 walkdir = "2.4.0"
-async-recursion = "1.0.5"
 reqwest = { version = "0.11.22", features = ["rustls-tls-native-roots", "stream"], default-features = false }
-xz2 = "0.1.7"
+lru = "0.12.3"
+parking_lot = "0.12.2"
 
 [dependencies.tonic-reflection]
 optional = true
@@ -61,7 +62,6 @@ tonic-build = "0.11.0"
 async-process = "2.1.0"
 rstest = "0.19.0"
 rstest_reuse = "0.6.0"
-test-case = "3.3.1"
 tempfile = "3.3.0"
 tokio-retry = "0.3.0"
 
@@ -75,3 +75,10 @@ fuse = ["tvix-castore/fuse"]
 otlp = ["dep:opentelemetry", "dep:opentelemetry-otlp", "dep:opentelemetry_sdk"]
 tonic-reflection = ["dep:tonic-reflection", "tvix-castore/tonic-reflection"]
 virtiofs = ["tvix-castore/virtiofs"]
+# Whether to run the integration tests.
+# Requires the following packages in $PATH:
+# cbtemulator, google-cloud-bigtable-tool
+integration = []
+
+[lints]
+workspace = true
diff --git a/tvix/store/default.nix b/tvix/store/default.nix
index f30923ac27..ad47994f24 100644
--- a/tvix/store/default.nix
+++ b/tvix/store/default.nix
@@ -26,7 +26,6 @@ in
   runTests = true;
   testPreRun = ''
     export SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt
-    export PATH="$PATH:${pkgs.lib.makeBinPath [pkgs.cbtemulator pkgs.google-cloud-bigtable-tool]}"
   '';
 
   # enable some optional features.
@@ -34,7 +33,20 @@ in
     # virtiofs feature currently fails to build on Darwin.
     ++ pkgs.lib.optional pkgs.stdenv.isLinux "virtiofs";
 }).overrideAttrs (_: {
+  meta.ci.targets = [ "integration-tests" ];
   meta.ci.extraSteps = {
     import-docs = (mkImportCheck "tvix/store/docs" ./docs);
   };
+  passthru.integration-tests = depot.tvix.crates.workspaceMembers.tvix-store.build.override {
+    runTests = true;
+    testPreRun = ''
+      export SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt
+      export PATH="$PATH:${pkgs.lib.makeBinPath [pkgs.cbtemulator pkgs.google-cloud-bigtable-tool]}"
+    '';
+
+    # enable some optional features.
+    features = [ "default" "cloud" "integration" ]
+      # virtiofs feature currently fails to build on Darwin.
+      ++ pkgs.lib.optional pkgs.stdenv.isLinux "virtiofs";
+  };
 })
diff --git a/tvix/store/docs/api.md b/tvix/store/docs/api.md
index c1dacc89a5..01e72671a7 100644
--- a/tvix/store/docs/api.md
+++ b/tvix/store/docs/api.md
@@ -218,7 +218,7 @@ This is useful for people running a Tvix-only system, or running builds on a
 In a system with Nix installed, we can't simply manually "extract" things to
 `/nix/store`, as Nix assumes to own all writes to this location.
 In these use cases, we're probably better off exposing a tvix-store as a local
-binary cache (that's what `//tvix/nar-bridge` does).
+binary cache (that's what `//tvix/nar-bridge-go` does).
 
 Assuming we are in an environment where we control `/nix/store` exclusively, a
 "realize to disk" would either "extract" things from the `tvix-store` to a
diff --git a/tvix/store/src/bin/tvix-store.rs b/tvix/store/src/bin/tvix-store.rs
index 15f37d301f..906d0ab520 100644
--- a/tvix/store/src/bin/tvix-store.rs
+++ b/tvix/store/src/bin/tvix-store.rs
@@ -2,6 +2,8 @@ use clap::Parser;
 use clap::Subcommand;
 
 use futures::future::try_join_all;
+use futures::StreamExt;
+use futures::TryStreamExt;
 use nix_compat::path_info::ExportedPathInfo;
 use serde::Deserialize;
 use serde::Serialize;
@@ -16,7 +18,8 @@ use tracing::Level;
 use tracing_subscriber::EnvFilter;
 use tracing_subscriber::Layer;
 use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
-use tvix_castore::import::ingest_path;
+use tvix_castore::import::fs::ingest_path;
+use tvix_store::nar::NarCalculationService;
 use tvix_store::proto::NarInfo;
 use tvix_store::proto::PathInfo;
 
@@ -54,10 +57,6 @@ use tvix_store::proto::FILE_DESCRIPTOR_SET;
 #[derive(Parser)]
 #[command(author, version, about, long_about = None)]
 struct Cli {
-    /// Whether to log in JSON
-    #[arg(long)]
-    json: bool,
-
     /// Whether to configure OTLP. Set --otlp=false to disable.
     #[arg(long, default_missing_value = "true", default_value = "true", num_args(0..=1), require_equals(true), action(clap::ArgAction::Set))]
     otlp: bool,
@@ -80,7 +79,11 @@ enum Commands {
         #[arg(long, short = 'l')]
         listen_address: Option<String>,
 
-        #[arg(long, env, default_value = "sled:///var/lib/tvix-store/blobs.sled")]
+        #[arg(
+            long,
+            env,
+            default_value = "objectstore+file:///var/lib/tvix-store/blobs.object_store"
+        )]
         blob_service_addr: String,
 
         #[arg(
@@ -216,33 +219,17 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
     let level = cli.log_level.unwrap_or(Level::INFO);
 
     // Set up the tracing subscriber.
-    let subscriber = tracing_subscriber::registry()
-        .with(
-            cli.json.then_some(
-                tracing_subscriber::fmt::Layer::new()
-                    .with_writer(std::io::stderr)
-                    .json()
-                    .with_filter(
-                        EnvFilter::builder()
-                            .with_default_directive(level.into())
-                            .from_env()
-                            .expect("invalid RUST_LOG"),
-                    ),
-            ),
-        )
-        .with(
-            (!cli.json).then_some(
-                tracing_subscriber::fmt::Layer::new()
-                    .with_writer(std::io::stderr)
-                    .pretty()
-                    .with_filter(
-                        EnvFilter::builder()
-                            .with_default_directive(level.into())
-                            .from_env()
-                            .expect("invalid RUST_LOG"),
-                    ),
+    let subscriber = tracing_subscriber::registry().with(
+        tracing_subscriber::fmt::Layer::new()
+            .with_writer(std::io::stderr)
+            .compact()
+            .with_filter(
+                EnvFilter::builder()
+                    .with_default_directive(level.into())
+                    .from_env()
+                    .expect("invalid RUST_LOG"),
             ),
-        );
+    );
 
     // Add the otlp layer (when otlp is enabled, and it's not disabled in the CLI)
     // then init the registry.
@@ -300,7 +287,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             path_info_service_addr,
         } => {
             // initialize stores
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -325,6 +312,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                 ))
                 .add_service(PathInfoServiceServer::new(GRPCPathInfoServiceWrapper::new(
                     Arc::from(path_info_service),
+                    nar_calculation_service,
                 )));
 
             #[cfg(feature = "tonic-reflection")]
@@ -354,7 +342,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             path_info_service_addr,
         } => {
             // FUTUREWORK: allow flat for single files?
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -362,8 +350,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                 )
                 .await?;
 
-            // Arc the PathInfoService, as we clone it .
+            // Arc PathInfoService and NarCalculationService, as we clone it .
             let path_info_service: Arc<dyn PathInfoService> = path_info_service.into();
+            let nar_calculation_service: Arc<dyn NarCalculationService> =
+                nar_calculation_service.into();
 
             let tasks = paths
                 .into_iter()
@@ -372,6 +362,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                         let blob_service = blob_service.clone();
                         let directory_service = directory_service.clone();
                         let path_info_service = path_info_service.clone();
+                        let nar_calculation_service = nar_calculation_service.clone();
 
                         async move {
                             if let Ok(name) = tvix_store::import::path_to_name(&path) {
@@ -381,6 +372,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                                     blob_service,
                                     directory_service,
                                     path_info_service,
+                                    nar_calculation_service,
                                 )
                                 .await;
                                 if let Ok(output_path) = resp {
@@ -401,7 +393,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             path_info_service_addr,
             reference_graph_path,
         } => {
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, _nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -421,19 +413,58 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             let reference_graph: ReferenceGraph<'_> =
                 serde_json::from_slice(reference_graph_json.as_slice())?;
 
-            // We currently simply upload all store paths in linear order.
-            // FUTUREWORK: properly walk the reference graph from the leaves, and upload multiple in parallel.
-            for elem in reference_graph.closure {
-                // Skip if that store path already exists
-                if path_info_service.get(*elem.path.digest()).await?.is_some() {
-                    continue;
-                }
+            // Arc the PathInfoService, as we clone it .
+            let path_info_service: Arc<dyn PathInfoService> = path_info_service.into();
+
+            // From our reference graph, lookup all pathinfos that might exist.
+            let elems: Vec<_> = futures::stream::iter(reference_graph.closure)
+                .map(|elem| {
+                    let path_info_service = path_info_service.clone();
+                    async move {
+                        path_info_service
+                            .get(*elem.path.digest())
+                            .await
+                            .map(|resp| (elem, resp))
+                    }
+                })
+                .buffer_unordered(50)
+                // Filter out all that are already uploaded.
+                // TODO: check if there's a better combinator for this
+                .try_filter_map(|(elem, path_info)| {
+                    std::future::ready(if path_info.is_none() {
+                        Ok(Some(elem))
+                    } else {
+                        Ok(None)
+                    })
+                })
+                .try_collect()
+                .await?;
 
-                let path: PathBuf = elem.path.to_absolute_path().into();
-                // Ingest the given path
-                let root_node =
-                    ingest_path(blob_service.clone(), directory_service.clone(), path).await?;
+            // Run ingest_path on all of them.
+            let uploads: Vec<_> = futures::stream::iter(elems)
+                .map(|elem| {
+                    // Map to a future returning the root node, alongside with the closure info.
+                    let blob_service = blob_service.clone();
+                    let directory_service = directory_service.clone();
+                    async move {
+                        // Ingest the given path.
+
+                        ingest_path(
+                            blob_service,
+                            directory_service,
+                            PathBuf::from(elem.path.to_absolute_path()),
+                        )
+                        .await
+                        .map(|root_node| (elem, root_node))
+                    }
+                })
+                .buffer_unordered(10)
+                .try_collect()
+                .await?;
 
+            // Insert them into the PathInfoService.
+            // FUTUREWORK: do this properly respecting the reference graph.
+            for (elem, root_node) in uploads {
                 // Create and upload a PathInfo pointing to the root_node,
                 // annotated with information we have from the reference graph.
                 let path_info = PathInfo {
@@ -469,7 +500,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             allow_other,
             show_xattr,
         } => {
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, _nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -511,7 +542,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             list_root,
             show_xattr,
         } => {
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, _nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
diff --git a/tvix/store/src/import.rs b/tvix/store/src/import.rs
index 69f68d46a2..888380bca9 100644
--- a/tvix/store/src/import.rs
+++ b/tvix/store/src/import.rs
@@ -1,7 +1,8 @@
 use std::path::Path;
 use tracing::{debug, instrument};
 use tvix_castore::{
-    blobservice::BlobService, directoryservice::DirectoryService, proto::node::Node, B3Digest,
+    blobservice::BlobService, directoryservice::DirectoryService, import::fs::ingest_path,
+    proto::node::Node, B3Digest,
 };
 
 use nix_compat::{
@@ -10,6 +11,7 @@ use nix_compat::{
 };
 
 use crate::{
+    nar::NarCalculationService,
     pathinfoservice::PathInfoService,
     proto::{nar_info, NarInfo, PathInfo},
 };
@@ -103,24 +105,27 @@ pub fn derive_nar_ca_path_info(
 /// Ingest the given path `path` and register the resulting output path in the
 /// [`PathInfoService`] as a recursive fixed output NAR.
 #[instrument(skip_all, fields(store_name=name, path=?path), err)]
-pub async fn import_path_as_nar_ca<BS, DS, PS, P>(
+pub async fn import_path_as_nar_ca<BS, DS, PS, NS, P>(
     path: P,
     name: &str,
     blob_service: BS,
     directory_service: DS,
     path_info_service: PS,
+    nar_calculation_service: NS,
 ) -> Result<StorePath, std::io::Error>
 where
     P: AsRef<Path> + std::fmt::Debug,
-    BS: AsRef<dyn BlobService> + Clone,
-    DS: AsRef<dyn DirectoryService>,
+    BS: BlobService + Clone,
+    DS: DirectoryService,
     PS: AsRef<dyn PathInfoService>,
+    NS: NarCalculationService,
 {
-    let root_node =
-        tvix_castore::import::ingest_path(blob_service, directory_service, &path).await?;
+    let root_node = ingest_path(blob_service, directory_service, path.as_ref())
+        .await
+        .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
 
-    // Ask the PathInfoService for the NAR size and sha256
-    let (nar_size, nar_sha256) = path_info_service.as_ref().calculate_nar(&root_node).await?;
+    // Ask for the NAR size and sha256
+    let (nar_size, nar_sha256) = nar_calculation_service.calculate_nar(&root_node).await?;
 
     // Calculate the output path. This might still fail, as some names are illegal.
     // FUTUREWORK: express the `name` at the type level to be valid and move the conversion
@@ -156,21 +161,22 @@ mod tests {
     use std::{ffi::OsStr, path::PathBuf};
 
     use crate::import::path_to_name;
-    use test_case::test_case;
+    use rstest::rstest;
 
-    #[test_case("a/b/c", "c"; "simple path")]
-    #[test_case("a/b/../c", "c"; "simple path containing ..")]
-    #[test_case("a/b/../c/d/../e", "e"; "path containing multiple ..")]
+    #[rstest]
+    #[case::simple_path("a/b/c", "c")]
+    #[case::simple_path_containing_dotdot("a/b/../c", "c")]
+    #[case::path_containing_multiple_dotdot("a/b/../c/d/../e", "e")]
 
-    fn test_path_to_name(path: &str, expected_name: &str) {
+    fn test_path_to_name(#[case] path: &str, #[case] expected_name: &str) {
         let path: PathBuf = path.into();
         assert_eq!(path_to_name(&path).expect("must succeed"), expected_name);
     }
 
-    #[test_case(b"a/b/.."; "path ending in ..")]
-    #[test_case(b"\xf8\xa1\xa1\xa1\xa1"; "non unicode path")]
-
-    fn test_invalid_path_to_name(invalid_path: &[u8]) {
+    #[rstest]
+    #[case::path_ending_in_dotdot(b"a/b/..")]
+    #[case::non_unicode_path(b"\xf8\xa1\xa1\xa1\xa1")]
+    fn test_invalid_path_to_name(#[case] invalid_path: &[u8]) {
         let path: PathBuf = unsafe { OsStr::from_encoded_bytes_unchecked(invalid_path) }.into();
         path_to_name(&path).expect_err("must fail");
     }
diff --git a/tvix/store/src/nar/import.rs b/tvix/store/src/nar/import.rs
index 6f4dcdea5d..3d7c50014a 100644
--- a/tvix/store/src/nar/import.rs
+++ b/tvix/store/src/nar/import.rs
@@ -1,225 +1,122 @@
-use bytes::Bytes;
-use nix_compat::nar;
-use std::io::{self, BufRead};
-use tokio_util::io::SyncIoBridge;
-use tracing::warn;
+use nix_compat::nar::reader::r#async as nar_reader;
+use tokio::{io::AsyncBufRead, sync::mpsc, try_join};
 use tvix_castore::{
     blobservice::BlobService,
-    directoryservice::{DirectoryPutter, DirectoryService},
-    proto::{self as castorepb},
-    B3Digest,
+    directoryservice::DirectoryService,
+    import::{ingest_entries, IngestionEntry, IngestionError},
+    proto::{node::Node, NamedNode},
+    PathBuf,
 };
 
-/// Accepts a reader providing a NAR.
-/// Will traverse it, uploading blobs to the given [BlobService], and
-/// directories to the given [DirectoryService].
-/// On success, the root node is returned.
-/// This function is not async (because the NAR reader is not)
-/// and calls [tokio::task::block_in_place] when interacting with backing
-/// services, so make sure to only call this with spawn_blocking.
-pub fn read_nar<R, BS, DS>(
-    r: &mut R,
+/// Ingests the contents from a [AsyncRead] providing NAR into the tvix store,
+/// interacting with a [BlobService] and [DirectoryService].
+/// It returns the castore root node or an error.
+pub async fn ingest_nar<R, BS, DS>(
     blob_service: BS,
     directory_service: DS,
-) -> io::Result<castorepb::node::Node>
+    r: &mut R,
+) -> Result<Node, IngestionError<Error>>
 where
-    R: BufRead + Send,
-    BS: AsRef<dyn BlobService>,
-    DS: AsRef<dyn DirectoryService>,
+    R: AsyncBufRead + Unpin + Send,
+    BS: BlobService + Clone,
+    DS: DirectoryService,
 {
-    let handle = tokio::runtime::Handle::current();
+    // open the NAR for reading.
+    // The NAR reader emits nodes in DFS preorder.
+    let root_node = nar_reader::open(r).await.map_err(Error::IO)?;
 
-    let directory_putter = directory_service.as_ref().put_multiple_start();
+    let (tx, rx) = mpsc::channel(1);
+    let rx = tokio_stream::wrappers::ReceiverStream::new(rx);
 
-    let node = nix_compat::nar::reader::open(r)?;
-    let (root_node, mut directory_putter, _) = process_node(
-        handle.clone(),
-        "".into(), // this is the root node, it has an empty name
-        node,
-        &blob_service,
-        directory_putter,
-    )?;
+    let produce = async move {
+        let res = produce_nar_inner(
+            blob_service,
+            root_node,
+            "root".parse().unwrap(), // HACK: the root node sent to ingest_entries may not be ROOT.
+            tx.clone(),
+        )
+        .await;
 
-    // In case the root node points to a directory, we need to close
-    // [directory_putter], and ensure the digest we got back from there matches
-    // what the root node is pointing to.
-    if let castorepb::node::Node::Directory(ref directory_node) = root_node {
-        // Close directory_putter to make sure all directories have been inserted.
-        let directory_putter_digest =
-            handle.block_on(handle.spawn(async move { directory_putter.close().await }))??;
-        let root_directory_node_digest: B3Digest =
-            directory_node.digest.clone().try_into().unwrap();
+        tx.send(res)
+            .await
+            .map_err(|e| Error::IO(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)))?;
 
-        if directory_putter_digest != root_directory_node_digest {
-            warn!(
-                root_directory_node_digest = %root_directory_node_digest,
-                directory_putter_digest =%directory_putter_digest,
-                "directory digest mismatch",
-            );
-            return Err(io::Error::new(
-                io::ErrorKind::Other,
-                "directory digest mismatch",
-            ));
-        }
-    }
-    // In case it's not a Directory, [directory_putter] doesn't need to be
-    // closed (as we didn't end up uploading anything).
-    // It can just be dropped, as documented in its trait.
+        Ok(())
+    };
+
+    let consume = ingest_entries(directory_service, rx);
 
-    Ok(root_node)
+    let (_, node) = try_join!(produce, consume)?;
+
+    // remove the fake "root" name again
+    debug_assert_eq!(&node.get_name(), b"root");
+    Ok(node.rename("".into()))
 }
 
-/// This is called on a [nar::reader::Node] and returns a [castorepb::node::Node].
-/// It does so by handling all three kinds, and recursing for directories.
-///
-/// [DirectoryPutter] is passed around, so a single instance of it can be used,
-/// which is sufficient, as this reads through the whole NAR linerarly.
-fn process_node<BS>(
-    handle: tokio::runtime::Handle,
-    name: bytes::Bytes,
-    node: nar::reader::Node,
+async fn produce_nar_inner<BS>(
     blob_service: BS,
-    directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::node::Node, Box<dyn DirectoryPutter>, BS)>
+    node: nar_reader::Node<'_, '_>,
+    path: PathBuf,
+    tx: mpsc::Sender<Result<IngestionEntry, Error>>,
+) -> Result<IngestionEntry, Error>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService + Clone,
 {
     Ok(match node {
-        nar::reader::Node::Symlink { target } => (
-            castorepb::node::Node::Symlink(castorepb::SymlinkNode {
-                name,
-                target: target.into(),
-            }),
-            directory_putter,
-            blob_service,
-        ),
-        nar::reader::Node::File { executable, reader } => (
-            castorepb::node::Node::File(process_file_reader(
-                handle,
-                name,
-                reader,
+        nar_reader::Node::Symlink { target } => IngestionEntry::Symlink { path, target },
+        nar_reader::Node::File {
+            executable,
+            mut reader,
+        } => {
+            let (digest, size) = {
+                let mut blob_writer = blob_service.open_write().await;
+                let size = tokio::io::copy_buf(&mut reader, &mut blob_writer).await?;
+
+                (blob_writer.close().await?, size)
+            };
+
+            IngestionEntry::Regular {
+                path,
+                size,
                 executable,
-                &blob_service,
-            )?),
-            directory_putter,
-            blob_service,
-        ),
-        nar::reader::Node::Directory(dir_reader) => {
-            let (directory_node, directory_putter, blob_service_back) =
-                process_dir_reader(handle, name, dir_reader, blob_service, directory_putter)?;
-
-            (
-                castorepb::node::Node::Directory(directory_node),
-                directory_putter,
-                blob_service_back,
-            )
+                digest,
+            }
+        }
+        nar_reader::Node::Directory(mut dir_reader) => {
+            while let Some(entry) = dir_reader.next().await? {
+                let mut path = path.clone();
+
+                // valid NAR names are valid castore names
+                path.try_push(entry.name)
+                    .expect("Tvix bug: failed to join name");
+
+                let entry = Box::pin(produce_nar_inner(
+                    blob_service.clone(),
+                    entry.node,
+                    path,
+                    tx.clone(),
+                ))
+                .await?;
+
+                tx.send(Ok(entry)).await.map_err(|e| {
+                    Error::IO(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e))
+                })?;
+            }
+
+            IngestionEntry::Dir { path }
         }
     })
 }
 
-/// Given a name and [nar::reader::FileReader], this ingests the file into the
-/// passed [BlobService] and returns a [castorepb::FileNode].
-fn process_file_reader<BS>(
-    handle: tokio::runtime::Handle,
-    name: Bytes,
-    mut file_reader: nar::reader::FileReader,
-    executable: bool,
-    blob_service: BS,
-) -> io::Result<castorepb::FileNode>
-where
-    BS: AsRef<dyn BlobService>,
-{
-    // store the length. If we read any other length, reading will fail.
-    let expected_len = file_reader.len();
-
-    // prepare writing a new blob.
-    let blob_writer = handle.block_on(async { blob_service.as_ref().open_write().await });
-
-    // write the blob.
-    let mut blob_writer = {
-        let mut dst = SyncIoBridge::new(blob_writer);
-
-        file_reader.copy(&mut dst)?;
-        dst.shutdown()?;
-
-        // return back the blob_writer
-        dst.into_inner()
-    };
-
-    // close the blob_writer, retrieve the digest.
-    let blob_digest = handle.block_on(async { blob_writer.close().await })?;
-
-    Ok(castorepb::FileNode {
-        name,
-        digest: blob_digest.into(),
-        size: expected_len,
-        executable,
-    })
-}
-
-/// Given a name and [nar::reader::DirReader], this returns a [castorepb::DirectoryNode].
-/// It uses [process_node] to iterate over all children.
-///
-/// [DirectoryPutter] is passed around, so a single instance of it can be used,
-/// which is sufficient, as this reads through the whole NAR linerarly.
-fn process_dir_reader<BS>(
-    handle: tokio::runtime::Handle,
-    name: Bytes,
-    mut dir_reader: nar::reader::DirReader,
-    blob_service: BS,
-    directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::DirectoryNode, Box<dyn DirectoryPutter>, BS)>
-where
-    BS: AsRef<dyn BlobService>,
-{
-    let mut directory = castorepb::Directory::default();
-
-    let mut directory_putter = directory_putter;
-    let mut blob_service = blob_service;
-    while let Some(entry) = dir_reader.next()? {
-        let (node, directory_putter_back, blob_service_back) = process_node(
-            handle.clone(),
-            entry.name.into(),
-            entry.node,
-            blob_service,
-            directory_putter,
-        )?;
-
-        blob_service = blob_service_back;
-        directory_putter = directory_putter_back;
-
-        match node {
-            castorepb::node::Node::Directory(node) => directory.directories.push(node),
-            castorepb::node::Node::File(node) => directory.files.push(node),
-            castorepb::node::Node::Symlink(node) => directory.symlinks.push(node),
-        }
-    }
-
-    // calculate digest and size.
-    let directory_digest = directory.digest();
-    let directory_size = directory.size();
-
-    // upload the directory. This is a bit more verbose, as we want to get back
-    // directory_putter for later reuse.
-    let directory_putter = handle.block_on(handle.spawn(async move {
-        directory_putter.put(directory).await?;
-        Ok::<_, io::Error>(directory_putter)
-    }))??;
-
-    Ok((
-        castorepb::DirectoryNode {
-            name,
-            digest: directory_digest.into(),
-            size: directory_size,
-        },
-        directory_putter,
-        blob_service,
-    ))
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+    #[error(transparent)]
+    IO(#[from] std::io::Error),
 }
 
 #[cfg(test)]
 mod test {
-    use crate::nar::read_nar;
+    use crate::nar::ingest_nar;
     use std::io::Cursor;
     use std::sync::Arc;
 
@@ -244,19 +141,13 @@ mod test {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
     ) {
-        let handle = tokio::runtime::Handle::current();
-
-        let root_node = handle
-            .spawn_blocking(|| {
-                read_nar(
-                    &mut Cursor::new(&NAR_CONTENTS_SYMLINK.clone()),
-                    blob_service,
-                    directory_service,
-                )
-            })
-            .await
-            .unwrap()
-            .expect("must parse");
+        let root_node = ingest_nar(
+            blob_service,
+            directory_service,
+            &mut Cursor::new(&NAR_CONTENTS_SYMLINK.clone()),
+        )
+        .await
+        .expect("must parse");
 
         assert_eq!(
             castorepb::node::Node::Symlink(castorepb::SymlinkNode {
@@ -273,22 +164,13 @@ mod test {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
     ) {
-        let handle = tokio::runtime::Handle::current();
-
-        let root_node = handle
-            .spawn_blocking({
-                let blob_service = blob_service.clone();
-                move || {
-                    read_nar(
-                        &mut Cursor::new(&NAR_CONTENTS_HELLOWORLD.clone()),
-                        blob_service,
-                        directory_service,
-                    )
-                }
-            })
-            .await
-            .unwrap()
-            .expect("must parse");
+        let root_node = ingest_nar(
+            blob_service.clone(),
+            directory_service,
+            &mut Cursor::new(&NAR_CONTENTS_HELLOWORLD.clone()),
+        )
+        .await
+        .expect("must parse");
 
         assert_eq!(
             castorepb::node::Node::File(castorepb::FileNode {
@@ -310,23 +192,13 @@ mod test {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
     ) {
-        let handle = tokio::runtime::Handle::current();
-
-        let root_node = handle
-            .spawn_blocking({
-                let blob_service = blob_service.clone();
-                let directory_service = directory_service.clone();
-                || {
-                    read_nar(
-                        &mut Cursor::new(&NAR_CONTENTS_COMPLICATED.clone()),
-                        blob_service,
-                        directory_service,
-                    )
-                }
-            })
-            .await
-            .unwrap()
-            .expect("must parse");
+        let root_node = ingest_nar(
+            blob_service.clone(),
+            directory_service.clone(),
+            &mut Cursor::new(&NAR_CONTENTS_COMPLICATED.clone()),
+        )
+        .await
+        .expect("must parse");
 
         assert_eq!(
             castorepb::node::Node::Directory(castorepb::DirectoryNode {
diff --git a/tvix/store/src/nar/mod.rs b/tvix/store/src/nar/mod.rs
index 49bb92fb0f..164748a655 100644
--- a/tvix/store/src/nar/mod.rs
+++ b/tvix/store/src/nar/mod.rs
@@ -1,10 +1,36 @@
+use tonic::async_trait;
 use tvix_castore::B3Digest;
 
 mod import;
 mod renderer;
-pub use import::read_nar;
+pub use import::ingest_nar;
 pub use renderer::calculate_size_and_sha256;
 pub use renderer::write_nar;
+pub use renderer::SimpleRenderer;
+use tvix_castore::proto as castorepb;
+
+#[async_trait]
+pub trait NarCalculationService: Send + Sync {
+    /// Return the nar size and nar sha256 digest for a given root node.
+    /// This can be used to calculate NAR-based output paths.
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), tvix_castore::Error>;
+}
+
+#[async_trait]
+impl<A> NarCalculationService for A
+where
+    A: AsRef<dyn NarCalculationService> + Send + Sync,
+{
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), tvix_castore::Error> {
+        self.as_ref().calculate_nar(root_node).await
+    }
+}
 
 /// Errors that can encounter while rendering NARs.
 #[derive(Debug, thiserror::Error)]
diff --git a/tvix/store/src/nar/renderer.rs b/tvix/store/src/nar/renderer.rs
index 0816b8e973..efd67671db 100644
--- a/tvix/store/src/nar/renderer.rs
+++ b/tvix/store/src/nar/renderer.rs
@@ -1,17 +1,51 @@
 use crate::utils::AsyncIoBridge;
 
-use super::RenderError;
-use async_recursion::async_recursion;
+use super::{NarCalculationService, RenderError};
 use count_write::CountWrite;
 use nix_compat::nar::writer::r#async as nar_writer;
 use sha2::{Digest, Sha256};
 use tokio::io::{self, AsyncWrite, BufReader};
+use tonic::async_trait;
 use tvix_castore::{
     blobservice::BlobService,
     directoryservice::DirectoryService,
     proto::{self as castorepb, NamedNode},
 };
 
+pub struct SimpleRenderer<BS, DS> {
+    blob_service: BS,
+    directory_service: DS,
+}
+
+impl<BS, DS> SimpleRenderer<BS, DS> {
+    pub fn new(blob_service: BS, directory_service: DS) -> Self {
+        Self {
+            blob_service,
+            directory_service,
+        }
+    }
+}
+
+#[async_trait]
+impl<BS, DS> NarCalculationService for SimpleRenderer<BS, DS>
+where
+    BS: BlobService + Clone,
+    DS: DirectoryService + Clone,
+{
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), tvix_castore::Error> {
+        calculate_size_and_sha256(
+            root_node,
+            self.blob_service.clone(),
+            self.directory_service.clone(),
+        )
+        .await
+        .map_err(|e| tvix_castore::Error::StorageError(format!("failed rendering nar: {}", e)))
+    }
+}
+
 /// Invoke [write_nar], and return the size and sha256 digest of the produced
 /// NAR output.
 pub async fn calculate_size_and_sha256<BS, DS>(
@@ -72,9 +106,8 @@ where
 
 /// Process an intermediate node in the structure.
 /// This consumes the node.
-#[async_recursion]
 async fn walk_node<BS, DS>(
-    nar_node: nar_writer::Node<'async_recursion, '_>,
+    nar_node: nar_writer::Node<'_, '_>,
     proto_node: &castorepb::node::Node,
     blob_service: BS,
     directory_service: DS,
@@ -164,9 +197,13 @@ where
                             .await
                             .map_err(RenderError::NARWriterError)?;
 
-                        (blob_service, directory_service) =
-                            walk_node(child_node, &proto_node, blob_service, directory_service)
-                                .await?;
+                        (blob_service, directory_service) = Box::pin(walk_node(
+                            child_node,
+                            &proto_node,
+                            blob_service,
+                            directory_service,
+                        ))
+                        .await?;
                     }
 
                     // close the directory
diff --git a/tvix/store/src/pathinfoservice/bigtable.rs b/tvix/store/src/pathinfoservice/bigtable.rs
index f49ef475eb..7df9989fc5 100644
--- a/tvix/store/src/pathinfoservice/bigtable.rs
+++ b/tvix/store/src/pathinfoservice/bigtable.rs
@@ -6,12 +6,12 @@ use bigtable_rs::{bigtable, google::bigtable::v2 as bigtable_v2};
 use bytes::Bytes;
 use data_encoding::HEXLOWER;
 use futures::stream::BoxStream;
+use nix_compat::nixbase32;
 use prost::Message;
 use serde::{Deserialize, Serialize};
 use serde_with::{serde_as, DurationSeconds};
 use tonic::async_trait;
-use tracing::trace;
-use tvix_castore::proto as castorepb;
+use tracing::{instrument, trace};
 use tvix_castore::Error;
 
 /// There should not be more than 10 MiB in a single cell.
@@ -116,7 +116,7 @@ impl BigtablePathInfoService {
             .stdout(Stdio::piped())
             .kill_on_drop(true)
             .spawn()
-            .expect("failed to spwan emulator");
+            .expect("failed to spawn emulator");
 
         Retry::spawn(
             ExponentialBackoff::from_millis(20)
@@ -182,6 +182,7 @@ fn derive_pathinfo_key(digest: &[u8; 20]) -> String {
 
 #[async_trait]
 impl PathInfoService for BigtablePathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
         let mut client = self.client.clone();
         let path_info_key = derive_pathinfo_key(&digest);
@@ -278,6 +279,7 @@ impl PathInfoService for BigtablePathInfoService {
         Ok(Some(path_info))
     }
 
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
         let store_path = path_info
             .validate()
@@ -330,13 +332,6 @@ impl PathInfoService for BigtablePathInfoService {
         Ok(path_info)
     }
 
-    async fn calculate_nar(
-        &self,
-        _root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        return Err(Error::StorageError("unimplemented".into()));
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         let mut client = self.client.clone();
 
diff --git a/tvix/store/src/pathinfoservice/combinators.rs b/tvix/store/src/pathinfoservice/combinators.rs
new file mode 100644
index 0000000000..664144ef49
--- /dev/null
+++ b/tvix/store/src/pathinfoservice/combinators.rs
@@ -0,0 +1,111 @@
+use crate::proto::PathInfo;
+use futures::stream::BoxStream;
+use nix_compat::nixbase32;
+use tonic::async_trait;
+use tracing::{debug, instrument};
+use tvix_castore::Error;
+
+use super::PathInfoService;
+
+/// Asks near first, if not found, asks far.
+/// If found in there, returns it, and *inserts* it into
+/// near.
+/// There is no negative cache.
+/// Inserts and listings are not implemented for now.
+pub struct Cache<PS1, PS2> {
+    near: PS1,
+    far: PS2,
+}
+
+impl<PS1, PS2> Cache<PS1, PS2> {
+    pub fn new(near: PS1, far: PS2) -> Self {
+        Self { near, far }
+    }
+}
+
+#[async_trait]
+impl<PS1, PS2> PathInfoService for Cache<PS1, PS2>
+where
+    PS1: PathInfoService,
+    PS2: PathInfoService,
+{
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
+    async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
+        match self.near.get(digest).await? {
+            Some(path_info) => {
+                debug!("serving from cache");
+                Ok(Some(path_info))
+            }
+            None => {
+                debug!("not found in near, asking remoteโ€ฆ");
+                match self.far.get(digest).await? {
+                    None => Ok(None),
+                    Some(path_info) => {
+                        debug!("found in remote, adding to cache");
+                        self.near.put(path_info.clone()).await?;
+                        Ok(Some(path_info))
+                    }
+                }
+            }
+        }
+    }
+
+    async fn put(&self, _path_info: PathInfo) -> Result<PathInfo, Error> {
+        Err(Error::StorageError("unimplemented".to_string()))
+    }
+
+    fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
+        Box::pin(tokio_stream::once(Err(Error::StorageError(
+            "unimplemented".to_string(),
+        ))))
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::num::NonZeroUsize;
+
+    use crate::{
+        pathinfoservice::{LruPathInfoService, MemoryPathInfoService, PathInfoService},
+        tests::fixtures::PATH_INFO_WITH_NARINFO,
+    };
+
+    const PATH_INFO_DIGEST: [u8; 20] = [0; 20];
+
+    /// Helper function setting up an instance of a "far" and "near"
+    /// PathInfoService.
+    async fn create_pathinfoservice() -> super::Cache<LruPathInfoService, MemoryPathInfoService> {
+        // Create an instance of a "far" PathInfoService.
+        let far = MemoryPathInfoService::default();
+
+        // โ€ฆ and an instance of a "near" PathInfoService.
+        let near = LruPathInfoService::with_capacity(NonZeroUsize::new(1).unwrap());
+
+        // create a Pathinfoservice combining the two and return it.
+        super::Cache::new(near, far)
+    }
+
+    /// Getting from the far backend is gonna insert it into the near one.
+    #[tokio::test]
+    async fn test_populate_cache() {
+        let svc = create_pathinfoservice().await;
+
+        // query the PathInfo, things should not be there.
+        assert!(svc.get(PATH_INFO_DIGEST).await.unwrap().is_none());
+
+        // insert it into the far one.
+        svc.far.put(PATH_INFO_WITH_NARINFO.clone()).await.unwrap();
+
+        // now try getting it again, it should succeed.
+        assert_eq!(
+            Some(PATH_INFO_WITH_NARINFO.clone()),
+            svc.get(PATH_INFO_DIGEST).await.unwrap()
+        );
+
+        // peek near, it should now be there.
+        assert_eq!(
+            Some(PATH_INFO_WITH_NARINFO.clone()),
+            svc.near.get(PATH_INFO_DIGEST).await.unwrap()
+        );
+    }
+}
diff --git a/tvix/store/src/pathinfoservice/from_addr.rs b/tvix/store/src/pathinfoservice/from_addr.rs
index c14696f225..455909e7f2 100644
--- a/tvix/store/src/pathinfoservice/from_addr.rs
+++ b/tvix/store/src/pathinfoservice/from_addr.rs
@@ -47,7 +47,7 @@ pub async fn from_addr(
             if url.has_host() || !url.path().is_empty() {
                 return Err(Error::StorageError("invalid url".to_string()));
             }
-            Box::new(MemoryPathInfoService::new(blob_service, directory_service))
+            Box::<MemoryPathInfoService>::default()
         }
         "sled" => {
             // sled doesn't support host, and a path can be provided (otherwise
@@ -65,10 +65,10 @@ pub async fn from_addr(
             // TODO: expose other parameters as URL parameters?
 
             Box::new(if url.path().is_empty() {
-                SledPathInfoService::new_temporary(blob_service, directory_service)
+                SledPathInfoService::new_temporary()
                     .map_err(|e| Error::StorageError(e.to_string()))?
             } else {
-                SledPathInfoService::new(url.path(), blob_service, directory_service)
+                SledPathInfoService::new(url.path())
                     .map_err(|e| Error::StorageError(e.to_string()))?
             })
         }
@@ -146,9 +146,9 @@ pub async fn from_addr(
 mod tests {
     use super::from_addr;
     use lazy_static::lazy_static;
+    use rstest::rstest;
     use std::sync::Arc;
     use tempfile::TempDir;
-    use test_case::test_case;
     use tvix_castore::{
         blobservice::{BlobService, MemoryBlobService},
         directoryservice::{DirectoryService, MemoryDirectoryService},
@@ -161,72 +161,66 @@ mod tests {
 
     // the gRPC tests below don't fail, because we connect lazily.
 
+    #[rstest]
     /// This uses a unsupported scheme.
-    #[test_case("http://foo.example/test", false; "unsupported scheme")]
+    #[case::unsupported_scheme("http://foo.example/test", false)]
     /// This configures sled in temporary mode.
-    #[test_case("sled://", true; "sled valid temporary")]
+    #[case::sled_temporary("sled://", true)]
     /// This configures sled with /, which should fail.
-    #[test_case("sled:///", false; "sled invalid root")]
+    #[case::sled_invalid_root("sled:///", false)]
     /// This configures sled with a host, not path, which should fail.
-    #[test_case("sled://foo.example", false; "sled invalid host")]
+    #[case::sled_invalid_host("sled://foo.example", false)]
     /// This configures sled with a valid path path, which should succeed.
-    #[test_case(&format!("sled://{}", &TMPDIR_SLED_1.path().to_str().unwrap()), true; "sled valid path")]
+    #[case::sled_valid_path(&format!("sled://{}", &TMPDIR_SLED_1.path().to_str().unwrap()), true)]
     /// This configures sled with a host, and a valid path path, which should fail.
-    #[test_case(&format!("sled://foo.example{}", &TMPDIR_SLED_2.path().to_str().unwrap()), false; "sled invalid host with valid path")]
+    #[case::sled_invalid_host_with_valid_path(&format!("sled://foo.example{}", &TMPDIR_SLED_2.path().to_str().unwrap()), false)]
     /// This correctly sets the scheme, and doesn't set a path.
-    #[test_case("memory://", true; "memory valid")]
+    #[case::memory_valid("memory://", true)]
     /// This sets a memory url host to `foo`
-    #[test_case("memory://foo", false; "memory invalid host")]
+    #[case::memory_invalid_host("memory://foo", false)]
     /// This sets a memory url path to "/", which is invalid.
-    #[test_case("memory:///", false; "memory invalid root path")]
+    #[case::memory_invalid_root_path("memory:///", false)]
     /// This sets a memory url path to "/foo", which is invalid.
-    #[test_case("memory:///foo", false; "memory invalid root path foo")]
+    #[case::memory_invalid_root_path_foo("memory:///foo", false)]
     /// Correct Scheme for the cache.nixos.org binary cache.
-    #[test_case("nix+https://cache.nixos.org", true; "correct nix+https")]
+    #[case::correct_nix_https("nix+https://cache.nixos.org", true)]
     /// Correct Scheme for the cache.nixos.org binary cache (HTTP URL).
-    #[test_case("nix+http://cache.nixos.org", true; "correct nix+http")]
+    #[case::correct_nix_http("nix+http://cache.nixos.org", true)]
     /// Correct Scheme for Nix HTTP Binary cache, with a subpath.
-    #[test_case("nix+http://192.0.2.1/foo", true; "correct nix http with subpath")]
+    #[case::correct_nix_http_with_subpath("nix+http://192.0.2.1/foo", true)]
     /// Correct Scheme for Nix HTTP Binary cache, with a subpath and port.
-    #[test_case("nix+http://[::1]:8080/foo", true; "correct nix http with subpath and port")]
+    #[case::correct_nix_http_with_subpath_and_port("nix+http://[::1]:8080/foo", true)]
     /// Correct Scheme for the cache.nixos.org binary cache, and correct trusted public key set
-    #[test_case("nix+https://cache.nixos.org?trusted-public-keys=cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", true; "correct nix+https with trusted-public-key")]
+    #[case::correct_nix_https_with_trusted_public_key("nix+https://cache.nixos.org?trusted-public-keys=cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=", true)]
     /// Correct Scheme for the cache.nixos.org binary cache, and two correct trusted public keys set
-    #[test_case("nix+https://cache.nixos.org?trusted-public-keys=cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=%20foo:jp4fCEx9tBEId/L0ZsVJ26k0wC0fu7vJqLjjIGFkup8=", true; "correct nix+https with two trusted-public-key")]
+    #[case::correct_nix_https_with_two_trusted_public_keys("nix+https://cache.nixos.org?trusted-public-keys=cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=%20foo:jp4fCEx9tBEId/L0ZsVJ26k0wC0fu7vJqLjjIGFkup8=", true)]
     /// Correct scheme to connect to a unix socket.
-    #[test_case("grpc+unix:///path/to/somewhere", true; "grpc valid unix socket")]
+    #[case::grpc_valid_unix_socket("grpc+unix:///path/to/somewhere", true)]
     /// Correct scheme for unix socket, but setting a host too, which is invalid.
-    #[test_case("grpc+unix://host.example/path/to/somewhere", false; "grpc invalid unix socket and host")]
+    #[case::grpc_invalid_unix_socket_and_host("grpc+unix://host.example/path/to/somewhere", false)]
     /// Correct scheme to connect to localhost, with port 12345
-    #[test_case("grpc+http://[::1]:12345", true; "grpc valid IPv6 localhost port 12345")]
+    #[case::grpc_valid_ipv6_localhost_port_12345("grpc+http://[::1]:12345", true)]
     /// Correct scheme to connect to localhost over http, without specifying a port.
-    #[test_case("grpc+http://localhost", true; "grpc valid http host without port")]
+    #[case::grpc_valid_http_host_without_port("grpc+http://localhost", true)]
     /// Correct scheme to connect to localhost over http, without specifying a port.
-    #[test_case("grpc+https://localhost", true; "grpc valid https host without port")]
+    #[case::grpc_valid_https_host_without_port("grpc+https://localhost", true)]
     /// Correct scheme to connect to localhost over http, but with additional path, which is invalid.
-    #[test_case("grpc+http://localhost/some-path", false; "grpc valid invalid host and path")]
-    #[tokio::test]
-    async fn test_from_addr_tokio(uri_str: &str, exp_succeed: bool) {
-        let blob_service: Arc<dyn BlobService> = Arc::from(MemoryBlobService::default());
-        let directory_service: Arc<dyn DirectoryService> =
-            Arc::from(MemoryDirectoryService::default());
-
-        let resp = from_addr(uri_str, blob_service, directory_service).await;
-
-        if exp_succeed {
-            resp.expect("should succeed");
-        } else {
-            assert!(resp.is_err(), "should fail");
-        }
-    }
-
-    #[cfg(feature = "cloud")]
+    #[case::grpc_invalid_host_and_path("grpc+http://localhost/some-path", false)]
     /// A valid example for Bigtable.
-    #[test_case("bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1", true; "objectstore valid bigtable url")]
-    /// An invalid examplee for Bigtable, missing fields
-    #[test_case("bigtable://instance-1", false; "objectstore invalid bigtable url, missing fields")]
+    #[cfg_attr(
+        all(feature = "cloud", feature = "integration"),
+        case::bigtable_valid(
+            "bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1",
+            true
+        )
+    )]
+    /// An invalid example for Bigtable, missing fields
+    #[cfg_attr(
+        all(feature = "cloud", feature = "integration"),
+        case::bigtable_invalid_missing_fields("bigtable://instance-1", false)
+    )]
     #[tokio::test]
-    async fn test_from_addr_tokio_cloud(uri_str: &str, exp_succeed: bool) {
+    async fn test_from_addr_tokio(#[case] uri_str: &str, #[case] exp_succeed: bool) {
         let blob_service: Arc<dyn BlobService> = Arc::from(MemoryBlobService::default());
         let directory_service: Arc<dyn DirectoryService> =
             Arc::from(MemoryDirectoryService::default());
diff --git a/tvix/store/src/pathinfoservice/grpc.rs b/tvix/store/src/pathinfoservice/grpc.rs
index 02e0cb590b..93d2d67c31 100644
--- a/tvix/store/src/pathinfoservice/grpc.rs
+++ b/tvix/store/src/pathinfoservice/grpc.rs
@@ -1,8 +1,11 @@
 use super::PathInfoService;
-use crate::proto::{self, ListPathInfoRequest, PathInfo};
+use crate::{
+    nar::NarCalculationService,
+    proto::{self, ListPathInfoRequest, PathInfo},
+};
 use async_stream::try_stream;
-use data_encoding::BASE64;
 use futures::stream::BoxStream;
+use nix_compat::nixbase32;
 use tonic::{async_trait, transport::Channel, Code};
 use tracing::instrument;
 use tvix_castore::{proto as castorepb, Error};
@@ -27,7 +30,7 @@ impl GRPCPathInfoService {
 
 #[async_trait]
 impl PathInfoService for GRPCPathInfoService {
-    #[instrument(level = "trace", skip_all, fields(path_info.digest = BASE64.encode(&digest)))]
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
         let path_info = self
             .grpc_client
@@ -67,30 +70,6 @@ impl PathInfoService for GRPCPathInfoService {
         Ok(path_info)
     }
 
-    #[instrument(level = "trace", skip_all, fields(root_node = ?root_node))]
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        let path_info = self
-            .grpc_client
-            .clone()
-            .calculate_nar(castorepb::Node {
-                node: Some(root_node.clone()),
-            })
-            .await
-            .map_err(|e| Error::StorageError(e.to_string()))?
-            .into_inner();
-
-        let nar_sha256: [u8; 32] = path_info
-            .nar_sha256
-            .to_vec()
-            .try_into()
-            .map_err(|_e| Error::StorageError("invalid digest length".to_string()))?;
-
-        Ok((path_info.nar_size, nar_sha256))
-    }
-
     #[instrument(level = "trace", skip_all)]
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         let mut grpc_client = self.grpc_client.clone();
@@ -126,88 +105,46 @@ impl PathInfoService for GRPCPathInfoService {
     }
 }
 
+#[async_trait]
+impl NarCalculationService for GRPCPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(root_node = ?root_node))]
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), Error> {
+        let path_info = self
+            .grpc_client
+            .clone()
+            .calculate_nar(castorepb::Node {
+                node: Some(root_node.clone()),
+            })
+            .await
+            .map_err(|e| Error::StorageError(e.to_string()))?
+            .into_inner();
+
+        let nar_sha256: [u8; 32] = path_info
+            .nar_sha256
+            .to_vec()
+            .try_into()
+            .map_err(|_e| Error::StorageError("invalid digest length".to_string()))?;
+
+        Ok((path_info.nar_size, nar_sha256))
+    }
+}
+
 #[cfg(test)]
 mod tests {
-    use std::sync::Arc;
-    use std::time::Duration;
-
-    use rstest::*;
-    use tempfile::TempDir;
-    use tokio::net::UnixListener;
-    use tokio_retry::strategy::ExponentialBackoff;
-    use tokio_retry::Retry;
-    use tokio_stream::wrappers::UnixListenerStream;
-    use tvix_castore::blobservice::BlobService;
-    use tvix_castore::directoryservice::DirectoryService;
-
-    use crate::pathinfoservice::MemoryPathInfoService;
-    use crate::proto::path_info_service_client::PathInfoServiceClient;
-    use crate::proto::GRPCPathInfoServiceWrapper;
-    use crate::tests::fixtures::{self, blob_service, directory_service};
-
-    use super::GRPCPathInfoService;
-    use super::PathInfoService;
+    use crate::pathinfoservice::tests::make_grpc_path_info_service_client;
+    use crate::tests::fixtures;
 
     /// This ensures connecting via gRPC works as expected.
-    #[rstest]
     #[tokio::test]
-    async fn test_valid_unix_path_ping_pong(
-        blob_service: Arc<dyn BlobService>,
-        directory_service: Arc<dyn DirectoryService>,
-    ) {
-        let tmpdir = TempDir::new().unwrap();
-        let socket_path = tmpdir.path().join("daemon");
-
-        let path_clone = socket_path.clone();
-
-        // Spin up a server
-        tokio::spawn(async {
-            let uds = UnixListener::bind(path_clone).unwrap();
-            let uds_stream = UnixListenerStream::new(uds);
-
-            // spin up a new server
-            let mut server = tonic::transport::Server::builder();
-            let router = server.add_service(
-                crate::proto::path_info_service_server::PathInfoServiceServer::new(
-                    GRPCPathInfoServiceWrapper::new(Box::new(MemoryPathInfoService::new(
-                        blob_service,
-                        directory_service,
-                    ))
-                        as Box<dyn PathInfoService>),
-                ),
-            );
-            router.serve_with_incoming(uds_stream).await
-        });
-
-        // wait for the socket to be created
-        Retry::spawn(
-            ExponentialBackoff::from_millis(20).max_delay(Duration::from_secs(10)),
-            || async {
-                if socket_path.exists() {
-                    Ok(())
-                } else {
-                    Err(())
-                }
-            },
-        )
-        .await
-        .expect("failed to wait for socket");
-
-        // prepare a client
-        let grpc_client = {
-            let url = url::Url::parse(&format!("grpc+unix://{}", socket_path.display()))
-                .expect("must parse");
-            let client = PathInfoServiceClient::new(
-                tvix_castore::tonic::channel_from_url(&url)
-                    .await
-                    .expect("must succeed"),
-            );
-
-            GRPCPathInfoService::from_client(client)
-        };
+    async fn test_valid_unix_path_ping_pong() {
+        let (_blob_service, _directory_service, path_info_service) =
+            make_grpc_path_info_service_client().await;
 
-        let path_info = grpc_client
-            .get(fixtures::DUMMY_OUTPUT_HASH)
+        let path_info = path_info_service
+            .get(fixtures::DUMMY_PATH_DIGEST)
             .await
             .expect("must not be error");
 
diff --git a/tvix/store/src/pathinfoservice/lru.rs b/tvix/store/src/pathinfoservice/lru.rs
new file mode 100644
index 0000000000..da674f497a
--- /dev/null
+++ b/tvix/store/src/pathinfoservice/lru.rs
@@ -0,0 +1,128 @@
+use async_stream::try_stream;
+use futures::stream::BoxStream;
+use lru::LruCache;
+use nix_compat::nixbase32;
+use std::num::NonZeroUsize;
+use std::sync::Arc;
+use tokio::sync::RwLock;
+use tonic::async_trait;
+use tracing::instrument;
+
+use crate::proto::PathInfo;
+use tvix_castore::Error;
+
+use super::PathInfoService;
+
+pub struct LruPathInfoService {
+    lru: Arc<RwLock<LruCache<[u8; 20], PathInfo>>>,
+}
+
+impl LruPathInfoService {
+    pub fn with_capacity(capacity: NonZeroUsize) -> Self {
+        Self {
+            lru: Arc::new(RwLock::new(LruCache::new(capacity))),
+        }
+    }
+}
+
+#[async_trait]
+impl PathInfoService for LruPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
+    async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
+        Ok(self.lru.write().await.get(&digest).cloned())
+    }
+
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
+    async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
+        // call validate
+        let store_path = path_info
+            .validate()
+            .map_err(|e| Error::InvalidRequest(format!("invalid PathInfo: {}", e)))?;
+
+        self.lru
+            .write()
+            .await
+            .put(*store_path.digest(), path_info.clone());
+
+        Ok(path_info)
+    }
+
+    fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
+        let lru = self.lru.clone();
+        Box::pin(try_stream! {
+            let lru = lru.read().await;
+            let it = lru.iter();
+
+            for (_k,v) in it {
+                yield v.clone()
+            }
+        })
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::num::NonZeroUsize;
+
+    use crate::{
+        pathinfoservice::{LruPathInfoService, PathInfoService},
+        proto::PathInfo,
+        tests::fixtures::PATH_INFO_WITH_NARINFO,
+    };
+    use lazy_static::lazy_static;
+    use tvix_castore::proto as castorepb;
+
+    lazy_static! {
+        static ref PATHINFO_1: PathInfo = PATH_INFO_WITH_NARINFO.clone();
+        static ref PATHINFO_1_DIGEST: [u8; 20] = [0; 20];
+        static ref PATHINFO_2: PathInfo = {
+            let mut p = PATHINFO_1.clone();
+            let root_node = p.node.as_mut().unwrap();
+            if let castorepb::Node { node: Some(node) } = root_node {
+                let n = node.to_owned();
+                *node = n.rename("11111111111111111111111111111111-dummy2".into());
+            } else {
+                unreachable!()
+            }
+            p
+        };
+        static ref PATHINFO_2_DIGEST: [u8; 20] = *(PATHINFO_2.validate().unwrap()).digest();
+    }
+
+    #[tokio::test]
+    async fn evict() {
+        let svc = LruPathInfoService::with_capacity(NonZeroUsize::new(1).unwrap());
+
+        // pathinfo_1 should not be there
+        assert!(svc
+            .get(*PATHINFO_1_DIGEST)
+            .await
+            .expect("no error")
+            .is_none());
+
+        // insert it
+        svc.put(PATHINFO_1.clone()).await.expect("no error");
+
+        // now it should be there.
+        assert_eq!(
+            Some(PATHINFO_1.clone()),
+            svc.get(*PATHINFO_1_DIGEST).await.expect("no error")
+        );
+
+        // insert pathinfo_2. This will evict pathinfo 1
+        svc.put(PATHINFO_2.clone()).await.expect("no error");
+
+        // now pathinfo 2 should be there.
+        assert_eq!(
+            Some(PATHINFO_2.clone()),
+            svc.get(*PATHINFO_2_DIGEST).await.expect("no error")
+        );
+
+        // โ€ฆ but pathinfo 1 not anymore.
+        assert!(svc
+            .get(*PATHINFO_1_DIGEST)
+            .await
+            .expect("no error")
+            .is_none());
+    }
+}
diff --git a/tvix/store/src/pathinfoservice/memory.rs b/tvix/store/src/pathinfoservice/memory.rs
index f8435dbbf8..3de3221df2 100644
--- a/tvix/store/src/pathinfoservice/memory.rs
+++ b/tvix/store/src/pathinfoservice/memory.rs
@@ -1,40 +1,24 @@
 use super::PathInfoService;
-use crate::{nar::calculate_size_and_sha256, proto::PathInfo};
-use futures::stream::{iter, BoxStream};
-use std::{
-    collections::HashMap,
-    sync::{Arc, RwLock},
-};
+use crate::proto::PathInfo;
+use async_stream::try_stream;
+use futures::stream::BoxStream;
+use nix_compat::nixbase32;
+use std::{collections::HashMap, sync::Arc};
+use tokio::sync::RwLock;
 use tonic::async_trait;
-use tvix_castore::proto as castorepb;
+use tracing::instrument;
 use tvix_castore::Error;
-use tvix_castore::{blobservice::BlobService, directoryservice::DirectoryService};
 
-pub struct MemoryPathInfoService<BS, DS> {
+#[derive(Default)]
+pub struct MemoryPathInfoService {
     db: Arc<RwLock<HashMap<[u8; 20], PathInfo>>>,
-
-    blob_service: BS,
-    directory_service: DS,
-}
-
-impl<BS, DS> MemoryPathInfoService<BS, DS> {
-    pub fn new(blob_service: BS, directory_service: DS) -> Self {
-        Self {
-            db: Default::default(),
-            blob_service,
-            directory_service,
-        }
-    }
 }
 
 #[async_trait]
-impl<BS, DS> PathInfoService for MemoryPathInfoService<BS, DS>
-where
-    BS: AsRef<dyn BlobService> + Send + Sync,
-    DS: AsRef<dyn DirectoryService> + Send + Sync,
-{
+impl PathInfoService for MemoryPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
-        let db = self.db.read().unwrap();
+        let db = self.db.read().await;
 
         match db.get(&digest) {
             None => Ok(None),
@@ -42,6 +26,7 @@ where
         }
     }
 
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
         // Call validate on the received PathInfo message.
         match path_info.validate() {
@@ -53,7 +38,7 @@ where
             // In case the PathInfo is valid, and we were able to extract a NixPath, store it in the database.
             // This overwrites existing PathInfo objects.
             Ok(nix_path) => {
-                let mut db = self.db.write().unwrap();
+                let mut db = self.db.write().await;
                 db.insert(*nix_path.digest(), path_info.clone());
 
                 Ok(path_info)
@@ -61,24 +46,16 @@ where
         }
     }
 
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        calculate_size_and_sha256(root_node, &self.blob_service, &self.directory_service)
-            .await
-            .map_err(|e| Error::StorageError(e.to_string()))
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
-        let db = self.db.read().unwrap();
+        let db = self.db.clone();
 
-        // Copy all elements into a list.
-        // This is a bit ugly, because we can't have db escape the lifetime
-        // of this function, but elements need to be returned owned anyways, and this in-
-        // memory impl is only for testing purposes anyways.
-        let items: Vec<_> = db.iter().map(|(_k, v)| Ok(v.clone())).collect();
+        Box::pin(try_stream! {
+            let db = db.read().await;
+            let it = db.iter();
 
-        Box::pin(iter(items))
+            for (_k, v) in it {
+                yield v.clone()
+            }
+        })
     }
 }
diff --git a/tvix/store/src/pathinfoservice/mod.rs b/tvix/store/src/pathinfoservice/mod.rs
index c1a482bbb5..574bcc0b8b 100644
--- a/tvix/store/src/pathinfoservice/mod.rs
+++ b/tvix/store/src/pathinfoservice/mod.rs
@@ -1,5 +1,7 @@
+mod combinators;
 mod from_addr;
 mod grpc;
+mod lru;
 mod memory;
 mod nix_http;
 mod sled;
@@ -12,13 +14,14 @@ mod tests;
 
 use futures::stream::BoxStream;
 use tonic::async_trait;
-use tvix_castore::proto as castorepb;
 use tvix_castore::Error;
 
 use crate::proto::PathInfo;
 
+pub use self::combinators::Cache as CachePathInfoService;
 pub use self::from_addr::from_addr;
 pub use self::grpc::GRPCPathInfoService;
+pub use self::lru::LruPathInfoService;
 pub use self::memory::MemoryPathInfoService;
 pub use self::nix_http::NixHTTPPathInfoService;
 pub use self::sled::SledPathInfoService;
@@ -41,14 +44,6 @@ pub trait PathInfoService: Send + Sync {
     /// invalid messages.
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error>;
 
-    /// Return the nar size and nar sha256 digest for a given root node.
-    /// This can be used to calculate NAR-based output paths,
-    /// and implementations are encouraged to cache it.
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error>;
-
     /// Iterate over all PathInfo objects in the store.
     /// Implementations can decide to disallow listing.
     ///
@@ -72,13 +67,6 @@ where
         self.as_ref().put(path_info).await
     }
 
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        self.as_ref().calculate_nar(root_node).await
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         self.as_ref().list()
     }
diff --git a/tvix/store/src/pathinfoservice/nix_http.rs b/tvix/store/src/pathinfoservice/nix_http.rs
index bdb0e2c3cb..cccd4805c6 100644
--- a/tvix/store/src/pathinfoservice/nix_http.rs
+++ b/tvix/store/src/pathinfoservice/nix_http.rs
@@ -1,6 +1,3 @@
-use std::io::{self, BufRead, Read, Write};
-
-use data_encoding::BASE64;
 use futures::{stream::BoxStream, TryStreamExt};
 use nix_compat::{
     narinfo::{self, NarInfo},
@@ -8,7 +5,10 @@ use nix_compat::{
     nixhash::NixHash,
 };
 use reqwest::StatusCode;
-use sha2::{digest::FixedOutput, Digest, Sha256};
+use sha2::Digest;
+use std::io::{self, Write};
+use tokio::io::{AsyncRead, BufReader};
+use tokio_util::io::InspectReader;
 use tonic::async_trait;
 use tracing::{debug, instrument, warn};
 use tvix_castore::{
@@ -32,8 +32,7 @@ use super::PathInfoService;
 ///
 /// The client is expected to be (indirectly) using the same [BlobService] and
 /// [DirectoryService], so able to fetch referred Directories and Blobs.
-/// [PathInfoService::put] and [PathInfoService::calculate_nar] are not
-/// implemented and return an error if called.
+/// [PathInfoService::put] is not implemented and returns an error if called.
 /// TODO: what about reading from nix-cache-info?
 pub struct NixHTTPPathInfoService<BS, DS> {
     base_url: url::Url,
@@ -71,7 +70,7 @@ where
     BS: AsRef<dyn BlobService> + Send + Sync + Clone + 'static,
     DS: AsRef<dyn DirectoryService> + Send + Sync + Clone + 'static,
 {
-    #[instrument(skip_all, err, fields(path.digest=BASE64.encode(&digest)))]
+    #[instrument(skip_all, err, fields(path.digest=nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
         let narinfo_url = self
             .base_url
@@ -171,85 +170,83 @@ where
             )));
         }
 
-        // get an AsyncRead of the response body.
-        let async_r = tokio_util::io::StreamReader::new(resp.bytes_stream().map_err(|e| {
+        // get a reader of the response body.
+        let r = tokio_util::io::StreamReader::new(resp.bytes_stream().map_err(|e| {
             let e = e.without_url();
             warn!(e=%e, "failed to get response body");
             io::Error::new(io::ErrorKind::BrokenPipe, e.to_string())
         }));
-        let sync_r = tokio_util::io::SyncIoBridge::new(async_r);
 
-        // handle decompression, by wrapping the reader.
-        let sync_r: Box<dyn BufRead + Send> = match narinfo.compression {
-            Some("none") => Box::new(sync_r),
-            Some("xz") => Box::new(io::BufReader::new(xz2::read::XzDecoder::new(sync_r))),
-            Some(comp) => {
-                return Err(Error::InvalidRequest(
-                    format!("unsupported compression: {}", comp).to_string(),
-                ))
-            }
-            None => {
-                return Err(Error::InvalidRequest(
-                    "unsupported compression: bzip2".to_string(),
-                ))
+        // handle decompression, depending on the compression field.
+        let r: Box<dyn AsyncRead + Send + Unpin> = match narinfo.compression {
+            Some("none") => Box::new(r) as Box<dyn AsyncRead + Send + Unpin>,
+            Some("bzip2") | None => Box::new(async_compression::tokio::bufread::BzDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some("gzip") => Box::new(async_compression::tokio::bufread::GzipDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some("xz") => Box::new(async_compression::tokio::bufread::XzDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some("zstd") => Box::new(async_compression::tokio::bufread::ZstdDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some(comp_str) => {
+                return Err(Error::StorageError(format!(
+                    "unsupported compression: {comp_str}"
+                )));
             }
         };
-
-        let res = tokio::task::spawn_blocking({
-            let blob_service = self.blob_service.clone();
-            let directory_service = self.directory_service.clone();
-            move || -> io::Result<_> {
-                // Wrap the reader once more, so we can calculate NarSize and NarHash
-                let mut sync_r = io::BufReader::new(NarReader::from(sync_r));
-                let root_node = crate::nar::read_nar(&mut sync_r, blob_service, directory_service)?;
-
-                let (_, nar_hash, nar_size) = sync_r.into_inner().into_inner();
-
-                Ok((root_node, nar_hash, nar_size))
-            }
-        })
+        let mut nar_hash = sha2::Sha256::new();
+        let mut nar_size = 0;
+
+        // Assemble NarHash and NarSize as we read bytes.
+        let r = InspectReader::new(r, |b| {
+            nar_size += b.len() as u64;
+            nar_hash.write_all(b).unwrap();
+        });
+
+        // HACK: InspectReader doesn't implement AsyncBufRead, but neither do our decompressors.
+        let mut r = BufReader::new(r);
+
+        let root_node = crate::nar::ingest_nar(
+            self.blob_service.clone(),
+            self.directory_service.clone(),
+            &mut r,
+        )
         .await
-        .unwrap();
-
-        match res {
-            Ok((root_node, nar_hash, nar_size)) => {
-                // ensure the ingested narhash and narsize do actually match.
-                if narinfo.nar_size != nar_size {
-                    warn!(
-                        narinfo.nar_size = narinfo.nar_size,
-                        http.nar_size = nar_size,
-                        "NARSize mismatch"
-                    );
-                    Err(io::Error::new(
-                        io::ErrorKind::InvalidData,
-                        "NarSize mismatch".to_string(),
-                    ))?;
-                }
-                if narinfo.nar_hash != nar_hash {
-                    warn!(
-                        narinfo.nar_hash = %NixHash::Sha256(narinfo.nar_hash),
-                        http.nar_hash = %NixHash::Sha256(nar_hash),
-                        "NarHash mismatch"
-                    );
-                    Err(io::Error::new(
-                        io::ErrorKind::InvalidData,
-                        "NarHash mismatch".to_string(),
-                    ))?;
-                }
-
-                Ok(Some(PathInfo {
-                    node: Some(castorepb::Node {
-                        // set the name of the root node to the digest-name of the store path.
-                        node: Some(
-                            root_node.rename(narinfo.store_path.to_string().to_owned().into()),
-                        ),
-                    }),
-                    references: pathinfo.references,
-                    narinfo: pathinfo.narinfo,
-                }))
-            }
-            Err(e) => Err(e.into()),
+        .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+
+        // ensure the ingested narhash and narsize do actually match.
+        if narinfo.nar_size != nar_size {
+            warn!(
+                narinfo.nar_size = narinfo.nar_size,
+                http.nar_size = nar_size,
+                "NarSize mismatch"
+            );
+            Err(io::Error::new(
+                io::ErrorKind::InvalidData,
+                "NarSize mismatch".to_string(),
+            ))?;
+        }
+        let nar_hash: [u8; 32] = nar_hash.finalize().into();
+        if narinfo.nar_hash != nar_hash {
+            warn!(
+                narinfo.nar_hash = %NixHash::Sha256(narinfo.nar_hash),
+                http.nar_hash = %NixHash::Sha256(nar_hash),
+                "NarHash mismatch"
+            );
+            Err(io::Error::new(
+                io::ErrorKind::InvalidData,
+                "NarHash mismatch".to_string(),
+            ))?;
         }
+
+        Ok(Some(PathInfo {
+            node: Some(castorepb::Node {
+                // set the name of the root node to the digest-name of the store path.
+                node: Some(root_node.rename(narinfo.store_path.to_string().to_owned().into())),
+            }),
+            references: pathinfo.references,
+            narinfo: pathinfo.narinfo,
+        }))
     }
 
     #[instrument(skip_all, fields(path_info=?_path_info))]
@@ -259,16 +256,6 @@ where
         ))
     }
 
-    #[instrument(skip_all, fields(root_node=?root_node))]
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        Err(Error::InvalidRequest(
-            "calculate_nar not supported for this backend".to_string(),
-        ))
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         Box::pin(futures::stream::once(async {
             Err(Error::InvalidRequest(
@@ -277,38 +264,3 @@ where
         }))
     }
 }
-
-/// Small helper reader implementing [std::io::Read].
-/// It can be used to wrap another reader, counts the number of bytes read
-/// and the sha256 digest of the contents.
-struct NarReader<R: Read> {
-    r: R,
-
-    sha256: sha2::Sha256,
-    bytes_read: u64,
-}
-
-impl<R: Read> NarReader<R> {
-    pub fn from(inner: R) -> Self {
-        Self {
-            r: inner,
-            sha256: Sha256::new(),
-            bytes_read: 0,
-        }
-    }
-
-    /// Returns the (remaining) inner reader, the sha256 digest and the number of bytes read.
-    pub fn into_inner(self) -> (R, [u8; 32], u64) {
-        (self.r, self.sha256.finalize_fixed().into(), self.bytes_read)
-    }
-}
-
-impl<R: Read> Read for NarReader<R> {
-    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        self.r.read(buf).map(|n| {
-            self.bytes_read += n as u64;
-            self.sha256.write_all(&buf[..n]).unwrap();
-            n
-        })
-    }
-}
diff --git a/tvix/store/src/pathinfoservice/sled.rs b/tvix/store/src/pathinfoservice/sled.rs
index 7b6d7fd7ab..eb3cf2ff1b 100644
--- a/tvix/store/src/pathinfoservice/sled.rs
+++ b/tvix/store/src/pathinfoservice/sled.rs
@@ -1,140 +1,117 @@
 use super::PathInfoService;
-use crate::nar::calculate_size_and_sha256;
 use crate::proto::PathInfo;
-use futures::stream::iter;
+use async_stream::try_stream;
 use futures::stream::BoxStream;
+use nix_compat::nixbase32;
 use prost::Message;
 use std::path::Path;
 use tonic::async_trait;
+use tracing::instrument;
 use tracing::warn;
-use tvix_castore::proto as castorepb;
-use tvix_castore::{blobservice::BlobService, directoryservice::DirectoryService, Error};
+use tvix_castore::Error;
 
 /// SledPathInfoService stores PathInfo in a [sled](https://github.com/spacejam/sled).
 ///
 /// The PathInfo messages are stored as encoded protos, and keyed by their output hash,
 /// as that's currently the only request type available.
-pub struct SledPathInfoService<BS, DS> {
+pub struct SledPathInfoService {
     db: sled::Db,
-
-    blob_service: BS,
-    directory_service: DS,
 }
 
-impl<BS, DS> SledPathInfoService<BS, DS> {
-    pub fn new<P: AsRef<Path>>(
-        p: P,
-        blob_service: BS,
-        directory_service: DS,
-    ) -> Result<Self, sled::Error> {
+impl SledPathInfoService {
+    pub fn new<P: AsRef<Path>>(p: P) -> Result<Self, sled::Error> {
         let config = sled::Config::default()
             .use_compression(false) // is a required parameter
             .path(p);
         let db = config.open()?;
 
-        Ok(Self {
-            db,
-            blob_service,
-            directory_service,
-        })
+        Ok(Self { db })
     }
 
-    pub fn new_temporary(blob_service: BS, directory_service: DS) -> Result<Self, sled::Error> {
+    pub fn new_temporary() -> Result<Self, sled::Error> {
         let config = sled::Config::default().temporary(true);
         let db = config.open()?;
 
-        Ok(Self {
-            db,
-            blob_service,
-            directory_service,
-        })
+        Ok(Self { db })
     }
 }
 
 #[async_trait]
-impl<BS, DS> PathInfoService for SledPathInfoService<BS, DS>
-where
-    BS: AsRef<dyn BlobService> + Send + Sync,
-    DS: AsRef<dyn DirectoryService> + Send + Sync,
-{
+impl PathInfoService for SledPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
-        match self.db.get(digest) {
-            Ok(None) => Ok(None),
-            Ok(Some(data)) => match PathInfo::decode(&*data) {
-                Ok(path_info) => Ok(Some(path_info)),
-                Err(e) => {
+        let resp = tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            move || db.get(digest.as_slice())
+        })
+        .await?
+        .map_err(|e| {
+            warn!("failed to retrieve PathInfo: {}", e);
+            Error::StorageError(format!("failed to retrieve PathInfo: {}", e))
+        })?;
+        match resp {
+            None => Ok(None),
+            Some(data) => {
+                let path_info = PathInfo::decode(&*data).map_err(|e| {
                     warn!("failed to decode stored PathInfo: {}", e);
-                    Err(Error::StorageError(format!(
-                        "failed to decode stored PathInfo: {}",
-                        e
-                    )))
-                }
-            },
-            Err(e) => {
-                warn!("failed to retrieve PathInfo: {}", e);
-                Err(Error::StorageError(format!(
-                    "failed to retrieve PathInfo: {}",
-                    e
-                )))
+                    Error::StorageError(format!("failed to decode stored PathInfo: {}", e))
+                })?;
+                Ok(Some(path_info))
             }
         }
     }
 
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
         // Call validate on the received PathInfo message.
-        match path_info.validate() {
-            Err(e) => Err(Error::InvalidRequest(format!(
-                "failed to validate PathInfo: {}",
-                e
-            ))),
-            // In case the PathInfo is valid, and we were able to extract a NixPath, store it in the database.
-            // This overwrites existing PathInfo objects.
-            Ok(nix_path) => match self
-                .db
-                .insert(*nix_path.digest(), path_info.encode_to_vec())
-            {
-                Ok(_) => Ok(path_info),
-                Err(e) => {
-                    warn!("failed to insert PathInfo: {}", e);
-                    Err(Error::StorageError(format! {
-                        "failed to insert PathInfo: {}", e
-                    }))
-                }
-            },
-        }
-    }
+        let store_path = path_info
+            .validate()
+            .map_err(|e| Error::InvalidRequest(format!("failed to validate PathInfo: {}", e)))?;
+
+        // In case the PathInfo is valid, we were able to parse a StorePath.
+        // Store it in the database, keyed by its digest.
+        // This overwrites existing PathInfo objects.
+        tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            let k = *store_path.digest();
+            let data = path_info.encode_to_vec();
+            move || db.insert(k, data)
+        })
+        .await?
+        .map_err(|e| {
+            warn!("failed to insert PathInfo: {}", e);
+            Error::StorageError(format! {
+                "failed to insert PathInfo: {}", e
+            })
+        })?;
 
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        calculate_size_and_sha256(root_node, &self.blob_service, &self.directory_service)
-            .await
-            .map_err(|e| Error::StorageError(e.to_string()))
+        Ok(path_info)
     }
 
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
-        Box::pin(iter(self.db.iter().values().map(|v| match v {
-            Ok(data) => {
-                // we retrieved some bytes
-                match PathInfo::decode(&*data) {
-                    Ok(path_info) => Ok(path_info),
-                    Err(e) => {
-                        warn!("failed to decode stored PathInfo: {}", e);
-                        Err(Error::StorageError(format!(
-                            "failed to decode stored PathInfo: {}",
-                            e
-                        )))
-                    }
-                }
-            }
-            Err(e) => {
-                warn!("failed to retrieve PathInfo: {}", e);
-                Err(Error::StorageError(format!(
-                    "failed to retrieve PathInfo: {}",
-                    e
-                )))
+        let db = self.db.clone();
+        let mut it = db.iter().values();
+
+        Box::pin(try_stream! {
+            // Don't block the executor while waiting for .next(), so wrap that
+            // in a spawn_blocking call.
+            // We need to pass around it to be able to reuse it.
+            while let (Some(elem), new_it) = tokio::task::spawn_blocking(move || {
+                (it.next(), it)
+            }).await? {
+                it = new_it;
+                let data = elem.map_err(|e| {
+                    warn!("failed to retrieve PathInfo: {}", e);
+                    Error::StorageError(format!("failed to retrieve PathInfo: {}", e))
+                })?;
+
+                let path_info = PathInfo::decode(&*data).map_err(|e| {
+                    warn!("failed to decode stored PathInfo: {}", e);
+                    Error::StorageError(format!("failed to decode stored PathInfo: {}", e))
+                })?;
+
+                yield path_info
             }
-        })))
+        })
     }
 }
diff --git a/tvix/store/src/pathinfoservice/tests/mod.rs b/tvix/store/src/pathinfoservice/tests/mod.rs
index da9ad11cab..26166d1b75 100644
--- a/tvix/store/src/pathinfoservice/tests/mod.rs
+++ b/tvix/store/src/pathinfoservice/tests/mod.rs
@@ -10,10 +10,10 @@ use tvix_castore::{blobservice::BlobService, directoryservice::DirectoryService}
 
 use super::PathInfoService;
 use crate::proto::PathInfo;
-use crate::tests::fixtures::DUMMY_OUTPUT_HASH;
+use crate::tests::fixtures::DUMMY_PATH_DIGEST;
 
 mod utils;
-use self::utils::make_grpc_path_info_service_client;
+pub use self::utils::make_grpc_path_info_service_client;
 
 /// Convenience type alias batching all three servives together.
 #[allow(clippy::upper_case_acronyms)]
@@ -51,7 +51,7 @@ pub async fn make_path_info_service(uri: &str) -> BSDSPS {
 #[case::memory(make_path_info_service("memory://").await)]
 #[case::grpc(make_grpc_path_info_service_client().await)]
 #[case::sled(make_path_info_service("sled://").await)]
-#[cfg_attr(feature = "cloud", case::bigtable(make_path_info_service("bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1").await))]
+#[cfg_attr(all(feature = "cloud",feature="integration"), case::bigtable(make_path_info_service("bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1").await))]
 pub fn path_info_services(
     #[case] services: (
         impl BlobService,
@@ -71,7 +71,7 @@ pub fn path_info_services(
 async fn not_found(services: BSDSPS) {
     let (_, _, path_info_service) = services;
     assert!(path_info_service
-        .get(DUMMY_OUTPUT_HASH)
+        .get(DUMMY_PATH_DIGEST)
         .await
         .expect("must succeed")
         .is_none());
@@ -105,7 +105,7 @@ async fn put_get(services: BSDSPS) {
 
     // get it back
     let resp = path_info_service
-        .get(DUMMY_OUTPUT_HASH)
+        .get(DUMMY_PATH_DIGEST)
         .await
         .expect("must succeed");
 
diff --git a/tvix/store/src/pathinfoservice/tests/utils.rs b/tvix/store/src/pathinfoservice/tests/utils.rs
index 31ec57aade..30c5902b61 100644
--- a/tvix/store/src/pathinfoservice/tests/utils.rs
+++ b/tvix/store/src/pathinfoservice/tests/utils.rs
@@ -3,6 +3,7 @@ use std::sync::Arc;
 use tonic::transport::{Endpoint, Server, Uri};
 
 use crate::{
+    nar::{NarCalculationService, SimpleRenderer},
     pathinfoservice::{GRPCPathInfoService, MemoryPathInfoService, PathInfoService},
     proto::{
         path_info_service_client::PathInfoServiceClient,
@@ -26,12 +27,15 @@ pub async fn make_grpc_path_info_service_client() -> super::BSDSPS {
         let directory_service = directory_service.clone();
         async move {
             let path_info_service: Arc<dyn PathInfoService> =
-                Arc::from(MemoryPathInfoService::new(blob_service, directory_service));
+                Arc::from(MemoryPathInfoService::default());
+            let nar_calculation_service =
+                Box::new(SimpleRenderer::new(blob_service, directory_service))
+                    as Box<dyn NarCalculationService>;
 
-            // spin up a new DirectoryService
+            // spin up a new PathInfoService
             let mut server = Server::builder();
             let router = server.add_service(PathInfoServiceServer::new(
-                GRPCPathInfoServiceWrapper::new(path_info_service),
+                GRPCPathInfoServiceWrapper::new(path_info_service, nar_calculation_service),
             ));
 
             router
diff --git a/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs b/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs
index 9f45818227..68f5575676 100644
--- a/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs
+++ b/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs
@@ -1,4 +1,4 @@
-use crate::nar::RenderError;
+use crate::nar::{NarCalculationService, RenderError};
 use crate::pathinfoservice::PathInfoService;
 use crate::proto;
 use futures::{stream::BoxStream, TryStreamExt};
@@ -7,23 +7,26 @@ use tonic::{async_trait, Request, Response, Result, Status};
 use tracing::{instrument, warn};
 use tvix_castore::proto as castorepb;
 
-pub struct GRPCPathInfoServiceWrapper<PS> {
-    inner: PS,
+pub struct GRPCPathInfoServiceWrapper<PS, NS> {
+    path_info_service: PS,
     // FUTUREWORK: allow exposing without allowing listing
+    nar_calculation_service: NS,
 }
 
-impl<PS> GRPCPathInfoServiceWrapper<PS> {
-    pub fn new(path_info_service: PS) -> Self {
+impl<PS, NS> GRPCPathInfoServiceWrapper<PS, NS> {
+    pub fn new(path_info_service: PS, nar_calculation_service: NS) -> Self {
         Self {
-            inner: path_info_service,
+            path_info_service,
+            nar_calculation_service,
         }
     }
 }
 
 #[async_trait]
-impl<PS> proto::path_info_service_server::PathInfoService for GRPCPathInfoServiceWrapper<PS>
+impl<PS, NS> proto::path_info_service_server::PathInfoService for GRPCPathInfoServiceWrapper<PS, NS>
 where
     PS: Deref<Target = dyn PathInfoService> + Send + Sync + 'static,
+    NS: NarCalculationService + Send + Sync + 'static,
 {
     type ListStream = BoxStream<'static, tonic::Result<proto::PathInfo, Status>>;
 
@@ -39,7 +42,7 @@ where
                     .to_vec()
                     .try_into()
                     .map_err(|_e| Status::invalid_argument("invalid output digest length"))?;
-                match self.inner.get(digest).await {
+                match self.path_info_service.get(digest).await {
                     Ok(None) => Err(Status::not_found("PathInfo not found")),
                     Ok(Some(path_info)) => Ok(Response::new(path_info)),
                     Err(e) => {
@@ -57,7 +60,7 @@ where
 
         // Store the PathInfo in the client. Clients MUST validate the data
         // they receive, so we don't validate additionally here.
-        match self.inner.put(path_info).await {
+        match self.path_info_service.put(path_info).await {
             Ok(path_info_new) => Ok(Response::new(path_info_new)),
             Err(e) => {
                 warn!(err = %e, "failed to put PathInfo");
@@ -79,7 +82,7 @@ where
                     Err(Status::invalid_argument("invalid root node"))?
                 }
 
-                match self.inner.calculate_nar(&root_node).await {
+                match self.nar_calculation_service.calculate_nar(&root_node).await {
                     Ok((nar_size, nar_sha256)) => Ok(Response::new(proto::CalculateNarResponse {
                         nar_size,
                         nar_sha256: nar_sha256.to_vec().into(),
@@ -99,7 +102,7 @@ where
         _request: Request<proto::ListPathInfoRequest>,
     ) -> Result<Response<Self::ListStream>, Status> {
         let stream = Box::pin(
-            self.inner
+            self.path_info_service
                 .list()
                 .map_err(|e| Status::internal(e.to_string())),
         );
diff --git a/tvix/store/src/proto/tests/pathinfo.rs b/tvix/store/src/proto/tests/pathinfo.rs
index 36c4f33933..4d0834878d 100644
--- a/tvix/store/src/proto/tests/pathinfo.rs
+++ b/tvix/store/src/proto/tests/pathinfo.rs
@@ -4,95 +4,81 @@ use bytes::Bytes;
 use data_encoding::BASE64;
 use nix_compat::nixbase32;
 use nix_compat::store_path::{self, StorePathRef};
-use test_case::test_case;
+use rstest::rstest;
 use tvix_castore::proto as castorepb;
 
-#[test_case(
-    None,
-    Err(ValidatePathInfoError::NoNodePresent) ;
-    "No node"
-)]
-#[test_case(
-    Some(castorepb::Node { node: None }),
-    Err(ValidatePathInfoError::NoNodePresent);
-    "No node 2"
-)]
-fn validate_no_node(
-    t_node: Option<castorepb::Node>,
-    t_result: Result<StorePathRef, ValidatePathInfoError>,
+#[rstest]
+#[case::no_node(None, Err(ValidatePathInfoError::NoNodePresent))]
+#[case::no_node_2(Some(castorepb::Node { node: None}), Err(ValidatePathInfoError::NoNodePresent))]
+
+fn validate_pathinfo(
+    #[case] node: Option<castorepb::Node>,
+    #[case] exp_result: Result<StorePathRef, ValidatePathInfoError>,
 ) {
     // construct the PathInfo object
     let p = PathInfo {
-        node: t_node,
+        node,
         ..Default::default()
     };
-    assert_eq!(t_result, p.validate());
+
+    assert_eq!(exp_result, p.validate());
+
+    let err = p.validate().expect_err("validation should fail");
+    assert!(matches!(err, ValidatePathInfoError::NoNodePresent));
 }
 
-#[test_case(
-    castorepb::DirectoryNode {
-        name: DUMMY_NAME.into(),
+#[rstest]
+#[case::ok(castorepb::DirectoryNode {
+        name: DUMMY_PATH.into(),
         digest: DUMMY_DIGEST.clone().into(),
         size: 0,
-    },
-    Ok(StorePathRef::from_bytes(DUMMY_NAME.as_bytes()).expect("must succeed"));
-    "ok"
-)]
-#[test_case(
-    castorepb::DirectoryNode {
-        name: DUMMY_NAME.into(),
+}, Ok(StorePathRef::from_bytes(DUMMY_PATH.as_bytes()).unwrap()))]
+#[case::invalid_digest_length(castorepb::DirectoryNode {
+        name: DUMMY_PATH.into(),
         digest: Bytes::new(),
         size: 0,
-    },
-    Err(ValidatePathInfoError::InvalidRootNode(castorepb::ValidateNodeError::InvalidDigestLen(0)));
-    "invalid digest length"
-)]
-#[test_case(
-    castorepb::DirectoryNode {
+}, Err(ValidatePathInfoError::InvalidRootNode(castorepb::ValidateNodeError::InvalidDigestLen(0))))]
+#[case::invalid_node_name_no_storepath(castorepb::DirectoryNode {
         name: "invalid".into(),
         digest: DUMMY_DIGEST.clone().into(),
         size: 0,
-    },
-    Err(ValidatePathInfoError::InvalidNodeName(
+}, Err(ValidatePathInfoError::InvalidNodeName(
         "invalid".into(),
         store_path::Error::InvalidLength
-    ));
-    "invalid node name"
-)]
+)))]
 fn validate_directory(
-    t_directory_node: castorepb::DirectoryNode,
-    t_result: Result<StorePathRef, ValidatePathInfoError>,
+    #[case] directory_node: castorepb::DirectoryNode,
+    #[case] exp_result: Result<StorePathRef, ValidatePathInfoError>,
 ) {
     // construct the PathInfo object
     let p = PathInfo {
         node: Some(castorepb::Node {
-            node: Some(castorepb::node::Node::Directory(t_directory_node)),
+            node: Some(castorepb::node::Node::Directory(directory_node)),
         }),
         ..Default::default()
     };
-    assert_eq!(t_result, p.validate());
+    assert_eq!(exp_result, p.validate());
 }
 
-#[test_case(
+#[rstest]
+#[case::ok(
     castorepb::FileNode {
-        name: DUMMY_NAME.into(),
+        name: DUMMY_PATH.into(),
         digest: DUMMY_DIGEST.clone().into(),
         size: 0,
         executable: false,
     },
-    Ok(StorePathRef::from_bytes(DUMMY_NAME.as_bytes()).expect("must succeed"));
-    "ok"
+    Ok(StorePathRef::from_bytes(DUMMY_PATH.as_bytes()).unwrap())
 )]
-#[test_case(
+#[case::invalid_digest_len(
     castorepb::FileNode {
-        name: DUMMY_NAME.into(),
+        name: DUMMY_PATH.into(),
         digest: Bytes::new(),
         ..Default::default()
     },
-    Err(ValidatePathInfoError::InvalidRootNode(castorepb::ValidateNodeError::InvalidDigestLen(0)));
-    "invalid digest length"
+    Err(ValidatePathInfoError::InvalidRootNode(castorepb::ValidateNodeError::InvalidDigestLen(0)))
 )]
-#[test_case(
+#[case::invalid_node_name(
     castorepb::FileNode {
         name: "invalid".into(),
         digest: DUMMY_DIGEST.clone().into(),
@@ -101,32 +87,31 @@ fn validate_directory(
     Err(ValidatePathInfoError::InvalidNodeName(
         "invalid".into(),
         store_path::Error::InvalidLength
-    ));
-    "invalid node name"
+    ))
 )]
 fn validate_file(
-    t_file_node: castorepb::FileNode,
-    t_result: Result<StorePathRef, ValidatePathInfoError>,
+    #[case] file_node: castorepb::FileNode,
+    #[case] exp_result: Result<StorePathRef, ValidatePathInfoError>,
 ) {
     // construct the PathInfo object
     let p = PathInfo {
         node: Some(castorepb::Node {
-            node: Some(castorepb::node::Node::File(t_file_node)),
+            node: Some(castorepb::node::Node::File(file_node)),
         }),
         ..Default::default()
     };
-    assert_eq!(t_result, p.validate());
+    assert_eq!(exp_result, p.validate());
 }
 
-#[test_case(
+#[rstest]
+#[case::ok(
     castorepb::SymlinkNode {
-        name: DUMMY_NAME.into(),
+        name: DUMMY_PATH.into(),
         target: "foo".into(),
     },
-    Ok(StorePathRef::from_bytes(DUMMY_NAME.as_bytes()).expect("must succeed"));
-    "ok"
+    Ok(StorePathRef::from_bytes(DUMMY_PATH.as_bytes()).unwrap())
 )]
-#[test_case(
+#[case::invalid_node_name(
     castorepb::SymlinkNode {
         name: "invalid".into(),
         target: "foo".into(),
@@ -134,21 +119,20 @@ fn validate_file(
     Err(ValidatePathInfoError::InvalidNodeName(
         "invalid".into(),
         store_path::Error::InvalidLength
-    ));
-    "invalid node name"
+    ))
 )]
 fn validate_symlink(
-    t_symlink_node: castorepb::SymlinkNode,
-    t_result: Result<StorePathRef, ValidatePathInfoError>,
+    #[case] symlink_node: castorepb::SymlinkNode,
+    #[case] exp_result: Result<StorePathRef, ValidatePathInfoError>,
 ) {
     // construct the PathInfo object
     let p = PathInfo {
         node: Some(castorepb::Node {
-            node: Some(castorepb::node::Node::Symlink(t_symlink_node)),
+            node: Some(castorepb::node::Node::Symlink(symlink_node)),
         }),
         ..Default::default()
     };
-    assert_eq!(t_result, p.validate());
+    assert_eq!(exp_result, p.validate());
 }
 
 /// Ensure parsing a correct PathInfo without narinfo populated succeeds.
@@ -235,7 +219,7 @@ fn validate_inconsistent_narinfo_reference_name_digest() {
     match path_info.validate().expect_err("must fail") {
         ValidatePathInfoError::InconsistentNarinfoReferenceNameDigest(0, e_expected, e_actual) => {
             assert_eq!(path_info.references[0][..], e_expected[..]);
-            assert_eq!(DUMMY_OUTPUT_HASH, e_actual);
+            assert_eq!(DUMMY_PATH_DIGEST, e_actual);
         }
         e => panic!("unexpected error: {:?}", e),
     }
@@ -273,7 +257,7 @@ fn validate_valid_deriver() {
     let narinfo = path_info.narinfo.as_mut().unwrap();
     narinfo.deriver = Some(crate::proto::StorePath {
         name: "foo".to_string(),
-        digest: Bytes::from(DUMMY_OUTPUT_HASH.as_slice()),
+        digest: Bytes::from(DUMMY_PATH_DIGEST.as_slice()),
     });
 
     path_info.validate().expect("must validate");
diff --git a/tvix/store/src/tests/fixtures.rs b/tvix/store/src/tests/fixtures.rs
index 500ac0aa5b..1c8359a2c0 100644
--- a/tvix/store/src/tests/fixtures.rs
+++ b/tvix/store/src/tests/fixtures.rs
@@ -13,8 +13,8 @@ use crate::proto::{
     NarInfo, PathInfo,
 };
 
-pub const DUMMY_NAME: &str = "00000000000000000000000000000000-dummy";
-pub const DUMMY_OUTPUT_HASH: [u8; 20] = [0; 20];
+pub const DUMMY_PATH: &str = "00000000000000000000000000000000-dummy";
+pub const DUMMY_PATH_DIGEST: [u8; 20] = [0; 20];
 
 lazy_static! {
     /// The NAR representation of a symlink pointing to `/nix/store/somewhereelse`
@@ -106,12 +106,12 @@ lazy_static! {
     pub static ref PATH_INFO_WITHOUT_NARINFO : PathInfo = PathInfo {
         node: Some(castorepb::Node {
             node: Some(castorepb::node::Node::Directory(castorepb::DirectoryNode {
-                name: DUMMY_NAME.into(),
+                name: DUMMY_PATH.into(),
                 digest: DUMMY_DIGEST.clone().into(),
                 size: 0,
             })),
         }),
-        references: vec![DUMMY_OUTPUT_HASH.as_slice().into()],
+        references: vec![DUMMY_PATH_DIGEST.as_slice().into()],
         narinfo: None,
     };
 
@@ -123,7 +123,7 @@ lazy_static! {
             nar_size: 0,
             nar_sha256: DUMMY_DIGEST.clone().into(),
             signatures: vec![],
-            reference_names: vec![DUMMY_NAME.to_string()],
+            reference_names: vec![DUMMY_PATH.to_string()],
             deriver: None,
             ca: Some(Ca { r#type: ca::Hash::NarSha256.into(), digest:  DUMMY_DIGEST.clone().into() })
         }),
diff --git a/tvix/store/src/utils.rs b/tvix/store/src/utils.rs
index 0b171377bd..e6e42f6ec4 100644
--- a/tvix/store/src/utils.rs
+++ b/tvix/store/src/utils.rs
@@ -10,9 +10,10 @@ use tvix_castore::{
     directoryservice::{self, DirectoryService},
 };
 
+use crate::nar::{NarCalculationService, SimpleRenderer};
 use crate::pathinfoservice::{self, PathInfoService};
 
-/// Construct the three store handles from their addrs.
+/// Construct the store handles from their addrs.
 pub async fn construct_services(
     blob_service_addr: impl AsRef<str>,
     directory_service_addr: impl AsRef<str>,
@@ -21,6 +22,7 @@ pub async fn construct_services(
     Arc<dyn BlobService>,
     Arc<dyn DirectoryService>,
     Box<dyn PathInfoService>,
+    Box<dyn NarCalculationService>,
 )> {
     let blob_service: Arc<dyn BlobService> = blobservice::from_addr(blob_service_addr.as_ref())
         .await?
@@ -36,7 +38,18 @@ pub async fn construct_services(
     )
     .await?;
 
-    Ok((blob_service, directory_service, path_info_service))
+    // TODO: grpc client also implements NarCalculationService
+    let nar_calculation_service = Box::new(SimpleRenderer::new(
+        blob_service.clone(),
+        directory_service.clone(),
+    )) as Box<dyn NarCalculationService>;
+
+    Ok((
+        blob_service,
+        directory_service,
+        path_info_service,
+        nar_calculation_service,
+    ))
 }
 
 /// The inverse of [tokio_util::io::SyncIoBridge].
diff --git a/tvix/tools/crunch-v2/Cargo.lock b/tvix/tools/crunch-v2/Cargo.lock
index cff5509d0b..3748d7e4e9 100644
--- a/tvix/tools/crunch-v2/Cargo.lock
+++ b/tvix/tools/crunch-v2/Cargo.lock
@@ -752,6 +752,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
 
 [[package]]
+name = "enum-primitive-derive"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c"
+dependencies = [
+ "num-traits",
+ "quote",
+ "syn 2.0.39",
+]
+
+[[package]]
 name = "enum_dispatch"
 version = "0.3.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1394,12 +1405,16 @@ dependencies = [
  "data-encoding",
  "ed25519",
  "ed25519-dalek",
+ "enum-primitive-derive",
  "glob",
  "nom",
+ "num-traits",
+ "pin-project-lite",
  "serde",
  "serde_json",
  "sha2 0.10.8",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -1432,9 +1447,9 @@ dependencies = [
 
 [[package]]
 name = "num-traits"
-version = "0.2.17"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
 dependencies = [
  "autocfg",
  "libm",
@@ -2682,9 +2697,9 @@ dependencies = [
 
 [[package]]
 name = "tokio"
-version = "1.34.0"
+version = "1.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9"
+checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
 dependencies = [
  "backtrace",
  "bytes",
diff --git a/tvix/tools/crunch-v2/Cargo.toml b/tvix/tools/crunch-v2/Cargo.toml
index 1e3f025250..d2b7126bd2 100644
--- a/tvix/tools/crunch-v2/Cargo.toml
+++ b/tvix/tools/crunch-v2/Cargo.toml
@@ -14,7 +14,7 @@ bstr = "1.8.0"
 bytes = "1.5.0"
 
 futures = "0.3.29"
-tokio = { version = "1.34.0", features = ["full"] }
+tokio = { version = "1.37.0", features = ["full"] }
 
 rusoto_core = { version = "0.48.0", default-features = false, features = ["hyper-rustls"] }
 rusoto_s3 = { version = "0.48.0", default-features = false, features = ["rustls"] }
diff --git a/tvix/tools/crunch-v2/src/main.rs b/tvix/tools/crunch-v2/src/main.rs
index a5d538f6be..5be8c28e29 100644
--- a/tvix/tools/crunch-v2/src/main.rs
+++ b/tvix/tools/crunch-v2/src/main.rs
@@ -147,7 +147,7 @@ fn ingest(node: nar::Node, name: Vec<u8>, avg_chunk_size: u32) -> Result<proto::
             let mut symlinks = vec![];
 
             while let Some(node) = reader.next()? {
-                match ingest(node.node, node.name, avg_chunk_size)? {
+                match ingest(node.node, node.name.to_owned(), avg_chunk_size)? {
                     proto::path::Node::Directory(node) => {
                         directories.push(node);
                     }
diff --git a/tvix/tools/narinfo2parquet/Cargo.lock b/tvix/tools/narinfo2parquet/Cargo.lock
index e59f70732d..070a468510 100644
--- a/tvix/tools/narinfo2parquet/Cargo.lock
+++ b/tvix/tools/narinfo2parquet/Cargo.lock
@@ -487,6 +487,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
 
 [[package]]
+name = "enum-primitive-derive"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c"
+dependencies = [
+ "num-traits",
+ "quote",
+ "syn 2.0.39",
+]
+
+[[package]]
 name = "enum_dispatch"
 version = "0.3.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -878,9 +889,9 @@ dependencies = [
 
 [[package]]
 name = "mio"
-version = "0.8.9"
+version = "0.8.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0"
+checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
 dependencies = [
  "libc",
  "wasi",
@@ -930,12 +941,16 @@ dependencies = [
  "data-encoding",
  "ed25519",
  "ed25519-dalek",
+ "enum-primitive-derive",
  "glob",
  "nom",
+ "num-traits",
+ "pin-project-lite",
  "serde",
  "serde_json",
  "sha2",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -968,9 +983,9 @@ dependencies = [
 
 [[package]]
 name = "num-traits"
-version = "0.2.17"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
 dependencies = [
  "autocfg",
  "libm",
@@ -1805,9 +1820,9 @@ dependencies = [
 
 [[package]]
 name = "tokio"
-version = "1.33.0"
+version = "1.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653"
+checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
 dependencies = [
  "backtrace",
  "bytes",
@@ -1816,10 +1831,22 @@ dependencies = [
  "num_cpus",
  "pin-project-lite",
  "socket2",
+ "tokio-macros",
  "windows-sys",
 ]
 
 [[package]]
+name = "tokio-macros"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.39",
+]
+
+[[package]]
 name = "tokio-util"
 version = "0.7.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2091,18 +2118,18 @@ checksum = "9828b178da53440fa9c766a3d2f73f7cf5d0ac1fe3980c1e5018d899fd19e07b"
 
 [[package]]
 name = "zerocopy"
-version = "0.7.25"
+version = "0.7.34"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557"
+checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
 dependencies = [
  "zerocopy-derive",
 ]
 
 [[package]]
 name = "zerocopy-derive"
-version = "0.7.25"
+version = "0.7.34"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b"
+checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
 dependencies = [
  "proc-macro2",
  "quote",
diff --git a/tvix/tools/narinfo2parquet/Cargo.nix b/tvix/tools/narinfo2parquet/Cargo.nix
index 27a5d684b6..0ae873c844 100644
--- a/tvix/tools/narinfo2parquet/Cargo.nix
+++ b/tvix/tools/narinfo2parquet/Cargo.nix
@@ -1,4 +1,4 @@
-# This file was @generated by crate2nix 0.13.0 with the command:
+# This file was @generated by crate2nix 0.14.0 with the command:
 #   "generate" "--all-features"
 # See https://github.com/kolloch/crate2nix for more info.
 
@@ -1429,6 +1429,32 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "use_std" ];
       };
+      "enum-primitive-derive" = rec {
+        crateName = "enum-primitive-derive";
+        version = "0.3.0";
+        edition = "2018";
+        sha256 = "0k6wcf58h5kh64yq5nfq71va53kaya0kzxwsjwbgwm2n2zd9axxs";
+        procMacro = true;
+        authors = [
+          "Doug Goldstein <cardoe@cardoe.com>"
+        ];
+        dependencies = [
+          {
+            name = "num-traits";
+            packageId = "num-traits";
+            usesDefaultFeatures = false;
+          }
+          {
+            name = "quote";
+            packageId = "quote";
+          }
+          {
+            name = "syn";
+            packageId = "syn 2.0.39";
+          }
+        ];
+
+      };
       "enum_dispatch" = rec {
         crateName = "enum_dispatch";
         version = "0.3.12";
@@ -2511,9 +2537,9 @@ rec {
       };
       "mio" = rec {
         crateName = "mio";
-        version = "0.8.9";
+        version = "0.8.11";
         edition = "2018";
-        sha256 = "1l23hg513c23nhcdzvk25caaj28mic6qgqadbn8axgj6bqf2ikix";
+        sha256 = "034byyl0ardml5yliy1hmvx8arkmn9rv479pid794sm07ia519m4";
         authors = [
           "Carl Lerche <me@carllerche.com>"
           "Thomas de Zeeuw <thomasdezeeuw@gmail.com>"
@@ -2689,6 +2715,10 @@ rec {
             packageId = "ed25519-dalek";
           }
           {
+            name = "enum-primitive-derive";
+            packageId = "enum-primitive-derive";
+          }
+          {
             name = "glob";
             packageId = "glob";
           }
@@ -2697,6 +2727,15 @@ rec {
             packageId = "nom";
           }
           {
+            name = "num-traits";
+            packageId = "num-traits";
+          }
+          {
+            name = "pin-project-lite";
+            packageId = "pin-project-lite";
+            optional = true;
+          }
+          {
             name = "serde";
             packageId = "serde";
             features = [ "derive" ];
@@ -2713,6 +2752,12 @@ rec {
             name = "thiserror";
             packageId = "thiserror";
           }
+          {
+            name = "tokio";
+            packageId = "tokio";
+            optional = true;
+            features = [ "io-util" "macros" ];
+          }
         ];
         devDependencies = [
           {
@@ -2721,9 +2766,13 @@ rec {
           }
         ];
         features = {
-          "async" = [ "futures-util" ];
-          "futures-util" = [ "dep:futures-util" ];
+          "async" = [ "tokio" ];
+          "default" = [ "async" "wire" ];
+          "pin-project-lite" = [ "dep:pin-project-lite" ];
+          "tokio" = [ "dep:tokio" ];
+          "wire" = [ "tokio" "pin-project-lite" ];
         };
+        resolvedDefaultFeatures = [ "async" "default" "pin-project-lite" "tokio" "wire" ];
       };
       "nom" = rec {
         crateName = "nom";
@@ -2792,9 +2841,9 @@ rec {
       };
       "num-traits" = rec {
         crateName = "num-traits";
-        version = "0.2.17";
-        edition = "2018";
-        sha256 = "0z16bi5zwgfysz6765v3rd6whfbjpihx3mhsn4dg8dzj2c221qrr";
+        version = "0.2.19";
+        edition = "2021";
+        sha256 = "0h984rhdkkqd4ny9cif7y2azl3xdfb7768hb9irhpsch4q3gq787";
         authors = [
           "The Rust Project Developers"
         ];
@@ -6026,9 +6075,9 @@ rec {
       };
       "tokio" = rec {
         crateName = "tokio";
-        version = "1.33.0";
+        version = "1.37.0";
         edition = "2021";
-        sha256 = "0lynj8nfqziviw72qns9mjlhmnm66bsc5bivy5g5x6gp7q720f2g";
+        sha256 = "11v7qhvpwsf976frqgrjl1jy308bdkxq195gb38cypx7xkzypnqs";
         authors = [
           "Tokio Contributors <team@tokio.rs>"
         ];
@@ -6072,6 +6121,11 @@ rec {
             features = [ "all" ];
           }
           {
+            name = "tokio-macros";
+            packageId = "tokio-macros";
+            optional = true;
+          }
+          {
             name = "windows-sys";
             packageId = "windows-sys";
             optional = true;
@@ -6116,7 +6170,33 @@ rec {
           "tracing" = [ "dep:tracing" ];
           "windows-sys" = [ "dep:windows-sys" ];
         };
-        resolvedDefaultFeatures = [ "bytes" "default" "io-util" "libc" "mio" "net" "num_cpus" "rt" "rt-multi-thread" "socket2" "sync" "time" "windows-sys" ];
+        resolvedDefaultFeatures = [ "bytes" "default" "io-util" "libc" "macros" "mio" "net" "num_cpus" "rt" "rt-multi-thread" "socket2" "sync" "time" "tokio-macros" "windows-sys" ];
+      };
+      "tokio-macros" = rec {
+        crateName = "tokio-macros";
+        version = "2.2.0";
+        edition = "2021";
+        sha256 = "0fwjy4vdx1h9pi4g2nml72wi0fr27b5m954p13ji9anyy8l1x2jv";
+        procMacro = true;
+        authors = [
+          "Tokio Contributors <team@tokio.rs>"
+        ];
+        dependencies = [
+          {
+            name = "proc-macro2";
+            packageId = "proc-macro2";
+          }
+          {
+            name = "quote";
+            packageId = "quote";
+          }
+          {
+            name = "syn";
+            packageId = "syn 2.0.39";
+            features = [ "full" ];
+          }
+        ];
+
       };
       "tokio-util" = rec {
         crateName = "tokio-util";
@@ -7673,9 +7753,9 @@ rec {
       };
       "zerocopy" = rec {
         crateName = "zerocopy";
-        version = "0.7.25";
+        version = "0.7.34";
         edition = "2018";
-        sha256 = "0mv5w4fq1kcpw1ydcb5cvr8zdms5pqy0r60g04ayzpqfgjk6klwc";
+        sha256 = "11xhrwixm78m6ca1jdxf584wdwvpgg7q00vg21fhwl0psvyf71xf";
         authors = [
           "Joshua Liebow-Feeser <joshlf@google.com>"
         ];
@@ -7709,9 +7789,9 @@ rec {
       };
       "zerocopy-derive" = rec {
         crateName = "zerocopy-derive";
-        version = "0.7.25";
+        version = "0.7.34";
         edition = "2018";
-        sha256 = "0svxr32pp4lav1vjar127g2r09gpiajxn0yv1k66r8hrlayl1wf2";
+        sha256 = "0fqvglw01w3hp7xj9gdk1800x9j7v58s9w8ijiyiz2a7krb39s8m";
         procMacro = true;
         authors = [
           "Joshua Liebow-Feeser <joshlf@google.com>"
@@ -7969,8 +8049,9 @@ rec {
             # because we compiled those test binaries in the former and not the latter.
             # So all paths will expect source tree to be there and not in the build top directly.
             # For example: $NIX_BUILD_TOP := /build in general, if you ask yourself.
-            # TODO(raitobezarius): I believe there could be more edge cases if `crate.sourceRoot`
-            # do exist but it's very hard to reason about them, so let's wait until the first bug report.
+            # NOTE: There could be edge cases if `crate.sourceRoot` does exist but
+            # it's very hard to reason about them.
+            # Open a bug if you run into this!
             mkdir -p source/
             cd source/
 
diff --git a/tvix/website/landing-en.md b/tvix/website/landing-en.md
index 61a011dee9..f677f20f2f 100644
--- a/tvix/website/landing-en.md
+++ b/tvix/website/landing-en.md
@@ -15,7 +15,7 @@ There are several projects within Tvix, such as:
 * `//tvix/castore` - subtree storage/transfer in a content-addressed fashion
 * `//tvix/cli` - preliminary REPL & CLI implementation for Tvix
 * `//tvix/eval` - an implementation of the Nix programming language
-* `//tvix/nar-bridge` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
+* `//tvix/nar-bridge[-go]` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
 * `//tvix/nix-compat` - a Rust library for compatibility with C++ Nix, features like encodings and hashing schemes and formats
 * `//tvix/serde` - a Rust library for using the Nix language for app configuration
 * `//tvix/store` - a "filesystem" linking Nix store paths and metadata with the content-addressed layer