about summary refs log tree commit diff
path: root/tvix
diff options
context:
space:
mode:
authorFlorian Klink <flokli@flokli.de>2022-11-19T20·34+0000
committerflokli <flokli@flokli.de>2023-09-17T13·24+0000
commit0ecd10bf307049b9833e69f331ec049ae8840d85 (patch)
tree1718b6e0cd7cb3177b951c88dff1dba11faecabf /tvix
parent683d3e0d2d1de30eb7895861627203e62702a770 (diff)
feat(tvix/nar-bridge): init r/6600
This provides a Nix HTTP Binary Cache interface in front of a tvix-store
that's reachable via gRPC.

TODOs:

 - remove import command, move serve up to toplevel. We have nix-copy-
   closure and tvix-store commands.
 - loop into CI. We should be able to fetch the protos as a third-party
   dependency.
 - Check if we can test nar-bridge slightly easier in an integration
   test.
 - Ensure we support connecting to unix sockets and grpc+http at least,
   using the same syntax as tvix-store.
 - Don't buffer the entire blob when rendering NAR

Co-Authored-By: Connor Brewster <cbrewster@hey.com>
Co-Authored-By: Márton Boros <martonboros@gmail.com>
Co-Authored-By: Vo Minh Thu <noteed@gmail.com>

Change-Id: I6064474e49dfe78cea67676957462d9f28658d4a
Reviewed-on: https://cl.tvl.fyi/c/depot/+/9339
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
Diffstat (limited to 'tvix')
-rw-r--r--tvix/nar-bridge/.gitignore1
-rw-r--r--tvix/nar-bridge/README.md7
-rw-r--r--tvix/nar-bridge/cmd/nar_bridge/import.go57
-rw-r--r--tvix/nar-bridge/cmd/nar_bridge/main.go30
-rw-r--r--tvix/nar-bridge/cmd/nar_bridge/serve.go60
-rw-r--r--tvix/nar-bridge/default.nix10
-rw-r--r--tvix/nar-bridge/go.mod35
-rw-r--r--tvix/nar-bridge/go.sum129
-rw-r--r--tvix/nar-bridge/pkg/reader/hashers.go66
-rw-r--r--tvix/nar-bridge/pkg/reader/reader.go264
-rw-r--r--tvix/nar-bridge/pkg/reader/reader_test.go568
-rw-r--r--tvix/nar-bridge/pkg/server/blob_upload.go48
-rw-r--r--tvix/nar-bridge/pkg/server/directory_upload.go66
-rw-r--r--tvix/nar-bridge/pkg/server/nar_get.go212
-rw-r--r--tvix/nar-bridge/pkg/server/nar_put.go140
-rw-r--r--tvix/nar-bridge/pkg/server/narinfo_get.go146
-rw-r--r--tvix/nar-bridge/pkg/server/narinfo_put.go174
-rw-r--r--tvix/nar-bridge/pkg/server/server.go86
-rw-r--r--tvix/nar-bridge/pkg/server/util.go24
-rw-r--r--tvix/nar-bridge/pkg/writer/writer.go278
-rw-r--r--tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go51
-rw-r--r--tvix/nar-bridge/pkg/writer/writer_test.go211
-rw-r--r--tvix/nar-bridge/testdata/emptydirectory.narbin0 -> 96 bytes
-rw-r--r--tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.narbin0 -> 464152 bytes
-rw-r--r--tvix/nar-bridge/testdata/onebyteexecutable.narbin0 -> 152 bytes
-rw-r--r--tvix/nar-bridge/testdata/onebyteregular.narbin0 -> 120 bytes
-rw-r--r--tvix/nar-bridge/testdata/symlink.narbin0 -> 136 bytes
27 files changed, 2663 insertions, 0 deletions
diff --git a/tvix/nar-bridge/.gitignore b/tvix/nar-bridge/.gitignore
new file mode 100644
index 000000000000..04260dbc58e5
--- /dev/null
+++ b/tvix/nar-bridge/.gitignore
@@ -0,0 +1 @@
+/nar_bridge
diff --git a/tvix/nar-bridge/README.md b/tvix/nar-bridge/README.md
new file mode 100644
index 000000000000..b14ee7af7b10
--- /dev/null
+++ b/tvix/nar-bridge/README.md
@@ -0,0 +1,7 @@
+# //tvix/nar-bridge
+
+This exposes a HTTP Binary cache interface (GET/HEAD/PUT requests) for a `tvix-
+store`.
+
+It can be used to configure a tvix-store as a substitutor for Nix, or to upload
+store paths from Nix via `nix copy` into a `tvix-store`.
diff --git a/tvix/nar-bridge/cmd/nar_bridge/import.go b/tvix/nar-bridge/cmd/nar_bridge/import.go
new file mode 100644
index 000000000000..51b99c93a04b
--- /dev/null
+++ b/tvix/nar-bridge/cmd/nar_bridge/import.go
@@ -0,0 +1,57 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"os"
+	"os/signal"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+
+	"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
+	log "github.com/sirupsen/logrus"
+)
+
+type ImportCmd struct {
+	NarPath string `name:"nar-path" help:"A path to a NAR file"`
+}
+
+// `help:"Read a NAR file and display some information"`
+
+func (cmd *ImportCmd) Run() error {
+	retcode := 0
+
+	defer func() { os.Exit(retcode) }()
+
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+
+	go func() {
+		for range c {
+			log.Info("Received Signal, shutting down…")
+			os.Exit(1)
+		}
+	}()
+
+	log.Infof("Reading %v...", cmd.NarPath)
+
+	f, _ := os.Open(cmd.NarPath)
+
+	r := reader.New(f)
+
+	actualPathInfo, _ := r.Import(
+		context.Background(),
+		func(fileReader io.Reader) error {
+			return nil
+		},
+		func(directory *storev1pb.Directory) error {
+			return nil
+		},
+	)
+
+	fmt.Printf("Node: %+v\n", actualPathInfo.Node)
+	fmt.Printf("References: %+v\n", actualPathInfo.References)
+	fmt.Printf("Narinfo: %+v\n", actualPathInfo.Narinfo)
+	return nil
+}
diff --git a/tvix/nar-bridge/cmd/nar_bridge/main.go b/tvix/nar-bridge/cmd/nar_bridge/main.go
new file mode 100644
index 000000000000..78ed3e272d69
--- /dev/null
+++ b/tvix/nar-bridge/cmd/nar_bridge/main.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+	"os"
+
+	"github.com/alecthomas/kong"
+)
+
+//nolint:gochecknoglobals
+var cli struct {
+	// TODO: make log level configurable
+	Import ImportCmd `kong:"cmd,name='import',help='Import a local NAR file into a tvix-store'"`
+	Serve  ServeCmd  `kong:"cmd,name='serve',help='Expose a tvix-store RPC interface as NAR/NARInfo'"`
+}
+
+func main() {
+	parser, err := kong.New(&cli)
+	if err != nil {
+		panic(err)
+	}
+
+	ctx, err := parser.Parse(os.Args[1:])
+	if err != nil {
+		panic(err)
+	}
+	// Call the Run() method of the selected parsed command.
+	err = ctx.Run()
+
+	ctx.FatalIfErrorf(err)
+}
diff --git a/tvix/nar-bridge/cmd/nar_bridge/serve.go b/tvix/nar-bridge/cmd/nar_bridge/serve.go
new file mode 100644
index 000000000000..6566257176d8
--- /dev/null
+++ b/tvix/nar-bridge/cmd/nar_bridge/serve.go
@@ -0,0 +1,60 @@
+package main
+
+import (
+	"os"
+	"os/signal"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials/insecure"
+
+	"code.tvl.fyi/tvix/nar-bridge/pkg/server"
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	log "github.com/sirupsen/logrus"
+)
+
+type ServeCmd struct {
+	ListenAddr      string `name:"listen-addr" help:"The address this service listens on" type:"string" default:"[::]:9000"` //nolint:lll
+	EnableAccessLog bool   `name:"access-log" help:"Enable access logging" type:"bool" default:"true" negatable:""`          //nolint:lll
+	StoreAddr       string `name:"store-addr" help:"The address to the tvix-store RPC interface this will connect to"`
+}
+
+// `help:"Expose a tvix-store RPC interface as NAR/NARInfo"`
+func (cmd *ServeCmd) Run() error {
+	retcode := 0
+
+	defer func() { os.Exit(retcode) }()
+
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt)
+
+	go func() {
+		for range c {
+			log.Info("Received Signal, shutting down…")
+			//s.Close()
+			os.Exit(1)
+		}
+	}()
+
+	// connect to tvix-store
+	log.Debugf("Dialing to %v", cmd.StoreAddr)
+	conn, err := grpc.Dial(cmd.StoreAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	if err != nil {
+		log.Fatalf("did not connect: %v", err)
+	}
+	defer conn.Close()
+
+	log.Printf("Starting nar-bridge at %v", cmd.ListenAddr)
+	s := server.New(
+		storev1pb.NewDirectoryServiceClient(conn),
+		storev1pb.NewBlobServiceClient(conn),
+		storev1pb.NewPathInfoServiceClient(conn),
+		cmd.EnableAccessLog,
+		30,
+	)
+
+	err = s.ListenAndServe(cmd.ListenAddr)
+	if err != nil {
+		log.Error("Server failed: %w", err)
+	}
+	return nil
+}
diff --git a/tvix/nar-bridge/default.nix b/tvix/nar-bridge/default.nix
new file mode 100644
index 000000000000..4f26c894966d
--- /dev/null
+++ b/tvix/nar-bridge/default.nix
@@ -0,0 +1,10 @@
+# Target containing just the proto files.
+
+{ depot, pkgs, lib, ... }:
+
+pkgs.buildGoModule {
+  name = "nar-bridge";
+  src = depot.third_party.gitignoreSource ./.;
+
+  vendorHash = "sha256-ankJbu6mHziF04NTA8opnWH765Jv1wQALYI8SeEst1Q=";
+}
diff --git a/tvix/nar-bridge/go.mod b/tvix/nar-bridge/go.mod
new file mode 100644
index 000000000000..dc22e32147f7
--- /dev/null
+++ b/tvix/nar-bridge/go.mod
@@ -0,0 +1,35 @@
+module code.tvl.fyi/tvix/nar-bridge
+
+require (
+	code.tvl.fyi/tvix/store/protos v0.0.0-20230909121302-7b7aa6704a9d
+	github.com/alecthomas/kong v0.7.1
+	github.com/go-chi/chi v1.5.4
+	github.com/go-chi/chi/v5 v5.0.7
+	github.com/google/go-cmp v0.5.9
+	github.com/nix-community/go-nix v0.0.0-20230825195510-c72199eca18e
+	github.com/sirupsen/logrus v1.9.0
+	github.com/stretchr/testify v1.8.1
+	google.golang.org/grpc v1.51.0
+	google.golang.org/protobuf v1.28.1
+	lukechampine.com/blake3 v1.1.7
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/golang/protobuf v1.5.2 // indirect
+	github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+	github.com/minio/sha256-simd v1.0.0 // indirect
+	github.com/mr-tron/base58 v1.2.0 // indirect
+	github.com/multiformats/go-multihash v0.2.1 // indirect
+	github.com/multiformats/go-varint v0.0.6 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/spaolacci/murmur3 v1.1.0 // indirect
+	golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
+	golang.org/x/net v0.7.0 // indirect
+	golang.org/x/sys v0.5.0 // indirect
+	golang.org/x/text v0.7.0 // indirect
+	google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+go 1.19
diff --git a/tvix/nar-bridge/go.sum b/tvix/nar-bridge/go.sum
new file mode 100644
index 000000000000..e2c182c9b25d
--- /dev/null
+++ b/tvix/nar-bridge/go.sum
@@ -0,0 +1,129 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+code.tvl.fyi/tvix/store/protos v0.0.0-20230909121302-7b7aa6704a9d h1:Vnmh6ve4xVj7GPValMR+/bZC6IadLwa/phL6HUZ6DdA=
+code.tvl.fyi/tvix/store/protos v0.0.0-20230909121302-7b7aa6704a9d/go.mod h1:uOGlmwyEraDd9kzAWHdDeZdNgKrpOAmtWCdMeY+DVKs=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
+github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
+github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
+github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs=
+github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg=
+github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8=
+github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
+github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
+github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
+github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
+github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/nix-community/go-nix v0.0.0-20230825195510-c72199eca18e h1:15CPg2PQMyBl+TTEKuonrQqS9uOJyi7JcuU0FpvV088=
+github.com/nix-community/go-nix v0.0.0-20230825195510-c72199eca18e/go.mod h1:y3eASc0gMh26jjoP9Xz+qqMKjTnqJgG1RG8xvKvFR8s=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
+lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
diff --git a/tvix/nar-bridge/pkg/reader/hashers.go b/tvix/nar-bridge/pkg/reader/hashers.go
new file mode 100644
index 000000000000..7e98d2062d21
--- /dev/null
+++ b/tvix/nar-bridge/pkg/reader/hashers.go
@@ -0,0 +1,66 @@
+package reader
+
+import (
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+)
+
+var _ io.Reader = &Hasher{}
+
+// Hasher wraps io.Reader.
+// You can ask it for the digest of the hash function used internally, and the
+// number of bytes written.
+type Hasher struct {
+	r         io.Reader
+	h         hash.Hash
+	bytesRead uint32
+}
+
+func NewHasher(r io.Reader, h hash.Hash) *Hasher {
+	return &Hasher{
+		r:         r,
+		h:         h,
+		bytesRead: 0,
+	}
+}
+
+func (h *Hasher) Read(p []byte) (int, error) {
+	nRead, rdErr := h.r.Read(p)
+
+	// write the number of bytes read from the reader to the hash.
+	// We need to do this independently on whether there's been error.
+	// n always describes the number of successfully written bytes.
+	nHash, hashErr := h.h.Write(p[0:nRead])
+	if hashErr != nil {
+		return nRead, fmt.Errorf("unable to write to hash: %w", hashErr)
+	}
+
+	// We assume here the hash function accepts the whole p in one Go,
+	// and doesn't early-return on the Write.
+	// We compare it with nRead and bail out if that was not the case.
+	if nHash != nRead {
+		return nRead, fmt.Errorf("hash didn't accept the full write")
+	}
+
+	// update bytesWritten
+	h.bytesRead += uint32(nRead)
+
+	if rdErr != nil {
+		if errors.Is(rdErr, io.EOF) {
+			return nRead, rdErr
+		}
+		return nRead, fmt.Errorf("error from underlying reader: %w", rdErr)
+	}
+
+	return nRead, hashErr
+}
+
+func (h *Hasher) BytesWritten() uint32 {
+	return h.bytesRead
+}
+
+func (h *Hasher) Sum(b []byte) []byte {
+	return h.h.Sum(b)
+}
diff --git a/tvix/nar-bridge/pkg/reader/reader.go b/tvix/nar-bridge/pkg/reader/reader.go
new file mode 100644
index 000000000000..6d34cd168db4
--- /dev/null
+++ b/tvix/nar-bridge/pkg/reader/reader.go
@@ -0,0 +1,264 @@
+package reader
+
+import (
+	"context"
+	"crypto/sha256"
+	"errors"
+	"fmt"
+	"io"
+	"path"
+	"strings"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/nix-community/go-nix/pkg/nar"
+	"lukechampine.com/blake3"
+)
+
+type Reader struct {
+	hrSha256 *Hasher
+}
+
+// An item on the directories stack
+type item struct {
+	path      string
+	directory *storev1pb.Directory
+}
+
+func New(r io.Reader) *Reader {
+	// Instead of using the underlying reader itself, wrap the reader
+	// with a hasher calculating sha256 and one calculating sha512,
+	// and feed that one into the NAR reader.
+	hrSha256 := NewHasher(r, sha256.New())
+
+	return &Reader{
+		hrSha256: hrSha256,
+	}
+}
+
+// Import reads from the internally-wrapped reader,
+// and calls the callback functions whenever regular file contents are
+// encountered, or a Directory node is about to be finished.
+func (r *Reader) Import(
+	ctx context.Context,
+	// callback function called with each regular file content
+	fileCb func(fileReader io.Reader) error,
+	// callback function called with each finalized directory node
+	directoryCb func(directory *storev1pb.Directory) error,
+) (*storev1pb.PathInfo, error) {
+
+	// construct a NAR reader, by reading through hrSha256
+	narReader, err := nar.NewReader(r.hrSha256)
+	if err != nil {
+		return nil, fmt.Errorf("failed to instantiate nar reader: %w", err)
+	}
+	defer narReader.Close()
+
+	// If we store a symlink or regular file at the root, these are not nil.
+	// If they are nil, we instead have a stackDirectory.
+	var rootSymlink *storev1pb.SymlinkNode
+	var rootFile *storev1pb.FileNode
+	var stackDirectory *storev1pb.Directory
+
+	var stack = []item{}
+
+	// popFromStack is used when we transition to a different directory or
+	// drain the stack when we reach the end of the NAR.
+	// It adds the popped element to the element underneath if any,
+	// and passes it to the directoryCb callback.
+	// This function may only be called if the stack is not already empty.
+	popFromStack := func() error {
+		// Keep the top item, and "resize" the stack slice.
+		// This will only make the last element unaccessible, but chances are high
+		// we're re-using that space anyways.
+		toPop := stack[len(stack)-1]
+		stack = stack[:len(stack)-1]
+
+		// if there's still a parent left on the stack, refer to it from there.
+		if len(stack) > 0 {
+			dgst, err := toPop.directory.Digest()
+			if err != nil {
+				return fmt.Errorf("unable to calculate directory digest: %w", err)
+			}
+
+			topOfStack := stack[len(stack)-1].directory
+			topOfStack.Directories = append(topOfStack.Directories, &storev1pb.DirectoryNode{
+				Name:   []byte(path.Base(toPop.path)),
+				Digest: dgst,
+				Size:   toPop.directory.Size(),
+			})
+		}
+		// call the directoryCb
+		if err := directoryCb(toPop.directory); err != nil {
+			return fmt.Errorf("failed calling directoryCb: %w", err)
+		}
+		// Keep track that we have encounter at least one directory
+		stackDirectory = toPop.directory
+		return nil
+	}
+
+	// Assemble a PathInfo struct, the Node is populated later.
+	assemblePathInfo := func() *storev1pb.PathInfo {
+		return &storev1pb.PathInfo{
+			Node:       nil,
+			References: [][]byte{},
+			Narinfo: &storev1pb.NARInfo{
+				NarSize:        uint64(r.hrSha256.BytesWritten()),
+				NarSha256:      r.hrSha256.Sum(nil),
+				Signatures:     []*storev1pb.NARInfo_Signature{},
+				ReferenceNames: []string{},
+			},
+		}
+	}
+
+	getBasename := func(p string) string {
+		// extract the basename. In case of "/", replace with empty string.
+		basename := path.Base(p)
+		if basename == "/" {
+			basename = ""
+		}
+		return basename
+	}
+
+	for {
+		select {
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		default:
+			// call narReader.Next() to get the next element
+			hdr, err := narReader.Next()
+
+			// If this returns an error, it's either EOF (when we're done reading from the NAR),
+			// or another error
+			if err != nil {
+				// if this returns no EOF, bail out
+				if !errors.Is(err, io.EOF) {
+					return nil, fmt.Errorf("failed getting next nar element: %w", err)
+				}
+
+				// The NAR has been read all the way to the end…
+				// Make sure we close the nar reader, which might read some final trailers.
+				if err := narReader.Close(); err != nil {
+					return nil, fmt.Errorf("unable to close nar reader: %w", err)
+				}
+
+				// Check the stack. While it's not empty, we need to pop things off the stack.
+				for len(stack) > 0 {
+					err := popFromStack()
+					if err != nil {
+						return nil, fmt.Errorf("unable to pop from stack: %w", err)
+					}
+
+				}
+
+				// Stack is empty. We now either have a regular or symlink root node, or we encountered at least one directory.
+				// assemble pathInfo with these and return.
+				pi := assemblePathInfo()
+				if rootFile != nil {
+					pi.Node = &storev1pb.Node{
+						Node: &storev1pb.Node_File{
+							File: rootFile,
+						},
+					}
+				}
+				if rootSymlink != nil {
+					pi.Node = &storev1pb.Node{
+						Node: &storev1pb.Node_Symlink{
+							Symlink: rootSymlink,
+						},
+					}
+				}
+				if stackDirectory != nil {
+					// calculate directory digest (i.e. after we received all its contents)
+					dgst, err := stackDirectory.Digest()
+					if err != nil {
+						return nil, fmt.Errorf("unable to calculate root directory digest: %w", err)
+					}
+
+					pi.Node = &storev1pb.Node{
+						Node: &storev1pb.Node_Directory{
+							Directory: &storev1pb.DirectoryNode{
+								Name:   []byte{},
+								Digest: dgst,
+								Size:   stackDirectory.Size(),
+							},
+						},
+					}
+				}
+				return pi, nil
+			}
+
+			// Check for valid path transitions, pop from stack if needed
+			// The nar reader already gives us some guarantees about ordering and illegal transitions,
+			// So we really only need to check if the top-of-stack path is a prefix of the path,
+			// and if it's not, pop from the stack.
+
+			// We don't need to worry about the root node case, because we can only finish the root "/"
+			// If we're at the end of the NAR reader (covered by the EOF check)
+			if len(stack) > 0 && !strings.HasPrefix(hdr.Path, stack[len(stack)-1].path) {
+				err := popFromStack()
+				if err != nil {
+					return nil, fmt.Errorf("unable to pop from stack: %w", err)
+				}
+			}
+
+			if hdr.Type == nar.TypeSymlink {
+				symlinkNode := &storev1pb.SymlinkNode{
+					Name:   []byte(getBasename(hdr.Path)),
+					Target: []byte(hdr.LinkTarget),
+				}
+				if len(stack) > 0 {
+					topOfStack := stack[len(stack)-1].directory
+					topOfStack.Symlinks = append(topOfStack.Symlinks, symlinkNode)
+				} else {
+					rootSymlink = symlinkNode
+				}
+
+			}
+			if hdr.Type == nar.TypeRegular {
+				// wrap reader with a reader calculating the blake3 hash
+				fileReader := NewHasher(narReader, blake3.New(32, nil))
+
+				err := fileCb(fileReader)
+				if err != nil {
+					return nil, fmt.Errorf("failure from fileCb: %w", err)
+				}
+
+				// drive the file reader to the end, in case the CB function doesn't read
+				// all the way to the end on its own
+				if fileReader.BytesWritten() != uint32(hdr.Size) {
+					_, err := io.ReadAll(fileReader)
+					if err != nil {
+						return nil, fmt.Errorf("unable to read until the end of the file content: %w", err)
+					}
+				}
+
+				// read the blake3 hash
+				dgst := fileReader.Sum(nil)
+
+				fileNode := &storev1pb.FileNode{
+					Name:       []byte(getBasename(hdr.Path)),
+					Digest:     dgst,
+					Size:       uint32(hdr.Size),
+					Executable: hdr.Executable,
+				}
+				if len(stack) > 0 {
+					topOfStack := stack[len(stack)-1].directory
+					topOfStack.Files = append(topOfStack.Files, fileNode)
+				} else {
+					rootFile = fileNode
+				}
+			}
+			if hdr.Type == nar.TypeDirectory {
+				directory := &storev1pb.Directory{
+					Directories: []*storev1pb.DirectoryNode{},
+					Files:       []*storev1pb.FileNode{},
+					Symlinks:    []*storev1pb.SymlinkNode{},
+				}
+				stack = append(stack, item{
+					directory: directory,
+					path:      hdr.Path,
+				})
+			}
+		}
+	}
+}
diff --git a/tvix/nar-bridge/pkg/reader/reader_test.go b/tvix/nar-bridge/pkg/reader/reader_test.go
new file mode 100644
index 000000000000..6ca1ca2d7444
--- /dev/null
+++ b/tvix/nar-bridge/pkg/reader/reader_test.go
@@ -0,0 +1,568 @@
+package reader_test
+
+import (
+	"context"
+	"errors"
+	"io"
+	"os"
+	"testing"
+
+	"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/google/go-cmp/cmp"
+	"github.com/stretchr/testify/require"
+	"google.golang.org/protobuf/testing/protocmp"
+)
+
+func requireProtoEq(t *testing.T, expected interface{}, actual interface{}) {
+	if diff := cmp.Diff(expected, actual, protocmp.Transform()); diff != "" {
+		t.Errorf("unexpected difference:\n%v", diff)
+	}
+}
+
+func mustDigest(d *storev1pb.Directory) []byte {
+	dgst, err := d.Digest()
+	if err != nil {
+		panic(err)
+	}
+	return dgst
+}
+
+func TestSymlink(t *testing.T) {
+	f, err := os.Open("../../testdata/symlink.nar")
+	require.NoError(t, err)
+
+	r := reader.New(f)
+
+	actualPathInfo, err := r.Import(
+		context.Background(),
+		func(fileReader io.Reader) error {
+			panic("no file contents expected!")
+		}, func(directory *storev1pb.Directory) error {
+			panic("no directories expected!")
+		},
+	)
+	require.NoError(t, err)
+
+	expectedPathInfo := &storev1pb.PathInfo{
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_Symlink{
+				Symlink: &storev1pb.SymlinkNode{
+					Name:   []byte(""),
+					Target: []byte("/nix/store/somewhereelse"),
+				},
+			},
+		},
+		References: [][]byte{},
+		Narinfo: &storev1pb.NARInfo{
+			NarSize: 136,
+			NarSha256: []byte{
+				0x09, 0x7d, 0x39, 0x7e, 0x9b, 0x58, 0x26, 0x38, 0x4e, 0xaa, 0x16, 0xc4, 0x57, 0x71, 0x5d, 0x1c, 0x1a, 0x51, 0x67, 0x03, 0x13, 0xea, 0xd0, 0xf5, 0x85, 0x66, 0xe0, 0xb2, 0x32, 0x53, 0x9c, 0xf1,
+			},
+			Signatures:     []*storev1pb.NARInfo_Signature{},
+			ReferenceNames: []string{},
+		},
+	}
+
+	requireProtoEq(t, expectedPathInfo, actualPathInfo)
+}
+
+func TestRegular(t *testing.T) {
+	f, err := os.Open("../../testdata/onebyteregular.nar")
+	require.NoError(t, err)
+
+	r := reader.New(f)
+
+	actualPathInfo, err := r.Import(
+		context.Background(),
+		func(fileReader io.Reader) error {
+			contents, err := io.ReadAll(fileReader)
+			require.NoError(t, err, "reading fileReader should not error")
+			require.Equal(t, []byte{0x01}, contents, "contents read from fileReader should match expectations")
+			return nil
+		}, func(directory *storev1pb.Directory) error {
+			panic("no directories expected!")
+		},
+	)
+	require.NoError(t, err)
+
+	// The blake3 digest of the 0x01 byte.
+	BLAKE3_DIGEST_0X01 := []byte{
+		0x48, 0xfc, 0x72, 0x1f, 0xbb, 0xc1, 0x72, 0xe0, 0x92, 0x5f, 0xa2, 0x7a, 0xf1, 0x67, 0x1d,
+		0xe2, 0x25, 0xba, 0x92, 0x71, 0x34, 0x80, 0x29, 0x98, 0xb1, 0x0a, 0x15, 0x68, 0xa1, 0x88,
+		0x65, 0x2b,
+	}
+
+	expectedPathInfo := &storev1pb.PathInfo{
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_File{
+				File: &storev1pb.FileNode{
+					Name:       []byte(""),
+					Digest:     BLAKE3_DIGEST_0X01,
+					Size:       1,
+					Executable: false,
+				},
+			},
+		},
+		References: [][]byte{},
+		Narinfo: &storev1pb.NARInfo{
+			NarSize: 120,
+			NarSha256: []byte{
+				0x73, 0x08, 0x50, 0xa8, 0x11, 0x25, 0x9d, 0xbf, 0x3a, 0x68, 0xdc, 0x2e, 0xe8, 0x7a, 0x79, 0xaa, 0x6c, 0xae, 0x9f, 0x71, 0x37, 0x5e, 0xdf, 0x39, 0x6f, 0x9d, 0x7a, 0x91, 0xfb, 0xe9, 0x13, 0x4d,
+			},
+			Signatures:     []*storev1pb.NARInfo_Signature{},
+			ReferenceNames: []string{},
+		},
+	}
+
+	requireProtoEq(t, expectedPathInfo, actualPathInfo)
+}
+
+func TestEmptyDirectory(t *testing.T) {
+	f, err := os.Open("../../testdata/emptydirectory.nar")
+	require.NoError(t, err)
+
+	r := reader.New(f)
+
+	expectedDirectory := &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{},
+		Files:       []*storev1pb.FileNode{},
+		Symlinks:    []*storev1pb.SymlinkNode{},
+	}
+	actualPathInfo, err := r.Import(
+		context.Background(),
+		func(fileReader io.Reader) error {
+			panic("no file contents expected!")
+		}, func(directory *storev1pb.Directory) error {
+			requireProtoEq(t, expectedDirectory, directory)
+			return nil
+		},
+	)
+	require.NoError(t, err)
+
+	expectedPathInfo := &storev1pb.PathInfo{
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_Directory{
+				Directory: &storev1pb.DirectoryNode{
+					Name:   []byte(""),
+					Digest: mustDigest(expectedDirectory),
+					Size:   expectedDirectory.Size(),
+				},
+			},
+		},
+		References: [][]byte{},
+		Narinfo: &storev1pb.NARInfo{
+			NarSize: 96,
+			NarSha256: []byte{
+				0xa5, 0x0a, 0x5a, 0xb6, 0xd9, 0x92, 0xf5, 0x59, 0x8e, 0xdd, 0x92, 0x10, 0x50, 0x59, 0xfa, 0xe9, 0xac, 0xfc, 0x19, 0x29, 0x81, 0xe0, 0x8b, 0xd8, 0x85, 0x34, 0xc2, 0x16, 0x7e, 0x92, 0x52, 0x6a,
+			},
+			Signatures:     []*storev1pb.NARInfo_Signature{},
+			ReferenceNames: []string{},
+		},
+	}
+	requireProtoEq(t, expectedPathInfo, actualPathInfo)
+}
+
+func TestFull(t *testing.T) {
+	f, err := os.Open("../../testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar")
+	require.NoError(t, err)
+
+	r := reader.New(f)
+
+	expectedDirectoryPaths := []string{
+		"/bin",
+		"/share/man/man1",
+		"/share/man/man5",
+		"/share/man/man8",
+		"/share/man",
+		"/share",
+		"/",
+	}
+	expectedDirectories := make(map[string]*storev1pb.Directory, len(expectedDirectoryPaths))
+
+	// /bin is a leaf directory
+	expectedDirectories["/bin"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{},
+		Files: []*storev1pb.FileNode{
+			{
+				Name: []byte("arp"),
+				Digest: []byte{
+					0xfb, 0xc4, 0x61, 0x4a, 0x29, 0x27, 0x11, 0xcb, 0xcc, 0xe4, 0x99, 0x81, 0x9c, 0xf0, 0xa9, 0x17, 0xf7, 0xd0, 0x91, 0xbe, 0xea, 0x08, 0xcb, 0x5b, 0xaa, 0x76, 0x76, 0xf5, 0xee, 0x4f, 0x82, 0xbb,
+				},
+				Size:       55288,
+				Executable: true,
+			},
+			{
+				Name: []byte("hostname"),
+				Digest: []byte{
+					0x9c, 0x6a, 0xe4, 0xb5, 0xe4, 0x6c, 0xb5, 0x67, 0x45, 0x0e, 0xaa, 0x2a, 0xd8, 0xdd, 0x9b, 0x38, 0xd7, 0xed, 0x01, 0x02, 0x84, 0xf7, 0x26, 0xe1, 0xc7, 0xf3, 0x1c, 0xeb, 0xaa, 0x8a, 0x01, 0x30,
+				},
+				Size:       17704,
+				Executable: true,
+			},
+			{
+				Name: []byte("ifconfig"),
+				Digest: []byte{
+					0x25, 0xbe, 0x3b, 0x1d, 0xf4, 0x1a, 0x45, 0x42, 0x79, 0x09, 0x2c, 0x2a, 0x83, 0xf0, 0x0b, 0xff, 0xe8, 0xc0, 0x9c, 0x26, 0x98, 0x70, 0x15, 0x4d, 0xa8, 0xca, 0x05, 0xfe, 0x92, 0x68, 0x35, 0x2e,
+				},
+				Size:       72576,
+				Executable: true,
+			},
+			{
+				Name: []byte("nameif"),
+				Digest: []byte{
+					0x8e, 0xaa, 0xc5, 0xdb, 0x71, 0x08, 0x8e, 0xe5, 0xe6, 0x30, 0x1f, 0x2c, 0x3a, 0xf2, 0x42, 0x39, 0x0c, 0x57, 0x15, 0xaf, 0x50, 0xaa, 0x1c, 0xdf, 0x84, 0x22, 0x08, 0x77, 0x03, 0x54, 0x62, 0xb1,
+				},
+				Size:       18776,
+				Executable: true,
+			},
+			{
+				Name: []byte("netstat"),
+				Digest: []byte{
+					0x13, 0x34, 0x7e, 0xdd, 0x2a, 0x9a, 0x17, 0x0b, 0x3f, 0xc7, 0x0a, 0xe4, 0x92, 0x89, 0x25, 0x9f, 0xaa, 0xb5, 0x05, 0x6b, 0x24, 0xa7, 0x91, 0xeb, 0xaf, 0xf9, 0xe9, 0x35, 0x56, 0xaa, 0x2f, 0xb2,
+				},
+				Size:       131784,
+				Executable: true,
+			},
+			{
+				Name: []byte("plipconfig"),
+				Digest: []byte{
+					0x19, 0x7c, 0x80, 0xdc, 0x81, 0xdc, 0xb4, 0xc0, 0x45, 0xe1, 0xf9, 0x76, 0x51, 0x4f, 0x50, 0xbf, 0xa4, 0x69, 0x51, 0x9a, 0xd4, 0xa9, 0xe7, 0xaa, 0xe7, 0x0d, 0x53, 0x32, 0xff, 0x28, 0x40, 0x60,
+				},
+				Size:       13160,
+				Executable: true,
+			},
+			{
+				Name: []byte("rarp"),
+				Digest: []byte{
+					0x08, 0x85, 0xb4, 0x85, 0x03, 0x2b, 0x3c, 0x7a, 0x3e, 0x24, 0x4c, 0xf8, 0xcc, 0x45, 0x01, 0x9e, 0x79, 0x43, 0x8c, 0x6f, 0x5e, 0x32, 0x46, 0x54, 0xb6, 0x68, 0x91, 0x8e, 0xa0, 0xcb, 0x6e, 0x0d,
+				},
+				Size:       30384,
+				Executable: true,
+			},
+			{
+				Name: []byte("route"),
+				Digest: []byte{
+					0x4d, 0x14, 0x20, 0x89, 0x9e, 0x76, 0xf4, 0xe2, 0x92, 0x53, 0xee, 0x9b, 0x78, 0x7d, 0x23, 0x80, 0x6c, 0xff, 0xe6, 0x33, 0xdc, 0x4a, 0x10, 0x29, 0x39, 0x02, 0xa0, 0x60, 0xff, 0xe2, 0xbb, 0xd7,
+				},
+				Size:       61928,
+				Executable: true,
+			},
+			{
+				Name: []byte("slattach"),
+				Digest: []byte{
+					0xfb, 0x25, 0xc3, 0x73, 0xb7, 0xb1, 0x0b, 0x25, 0xcd, 0x7b, 0x62, 0xf6, 0x71, 0x83, 0xfe, 0x36, 0x80, 0xf6, 0x48, 0xc3, 0xdb, 0xd8, 0x0c, 0xfe, 0xb8, 0xd3, 0xda, 0x32, 0x9b, 0x47, 0x4b, 0x05,
+				},
+				Size:       35672,
+				Executable: true,
+			},
+		},
+		Symlinks: []*storev1pb.SymlinkNode{
+			{
+				Name:   []byte("dnsdomainname"),
+				Target: []byte("hostname"),
+			},
+			{
+				Name:   []byte("domainname"),
+				Target: []byte("hostname"),
+			},
+			{
+				Name:   []byte("nisdomainname"),
+				Target: []byte("hostname"),
+			},
+			{
+				Name:   []byte("ypdomainname"),
+				Target: []byte("hostname"),
+			},
+		},
+	}
+
+	// /share/man/man1 is a leaf directory.
+	// The parser traversed over /sbin, but only added it to / which is still on the stack.
+	expectedDirectories["/share/man/man1"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{},
+		Files: []*storev1pb.FileNode{
+			{
+				Name: []byte("dnsdomainname.1.gz"),
+				Digest: []byte{
+					0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
+				},
+				Size:       40,
+				Executable: false,
+			},
+			{
+				Name: []byte("domainname.1.gz"),
+				Digest: []byte{
+					0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
+				},
+				Size:       40,
+				Executable: false,
+			},
+			{
+				Name: []byte("hostname.1.gz"),
+				Digest: []byte{
+					0xbf, 0x89, 0xe6, 0x28, 0x00, 0x24, 0x66, 0x79, 0x70, 0x04, 0x38, 0xd6, 0xdd, 0x9d, 0xf6, 0x0e, 0x0d, 0xee, 0x00, 0xf7, 0x64, 0x4f, 0x05, 0x08, 0x9d, 0xf0, 0x36, 0xde, 0x85, 0xf4, 0x75, 0xdb,
+				},
+				Size:       1660,
+				Executable: false,
+			},
+			{
+				Name: []byte("nisdomainname.1.gz"),
+				Digest: []byte{
+					0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
+				},
+				Size:       40,
+				Executable: false,
+			},
+			{
+				Name: []byte("ypdomainname.1.gz"),
+				Digest: []byte{
+					0x98, 0x8a, 0xbd, 0xfa, 0x64, 0xd5, 0xb9, 0x27, 0xfe, 0x37, 0x43, 0x56, 0xb3, 0x18, 0xc7, 0x2b, 0xcb, 0xe3, 0x17, 0x1c, 0x17, 0xf4, 0x17, 0xeb, 0x4a, 0xa4, 0x99, 0x64, 0x39, 0xca, 0x2d, 0xee,
+				},
+				Size:       40,
+				Executable: false,
+			},
+		},
+		Symlinks: []*storev1pb.SymlinkNode{},
+	}
+
+	// /share/man/man5 is a leaf directory
+	expectedDirectories["/share/man/man5"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{},
+		Files: []*storev1pb.FileNode{
+			{
+				Name: []byte("ethers.5.gz"),
+				Digest: []byte{
+					0x42, 0x63, 0x8c, 0xc4, 0x18, 0x93, 0xcf, 0x60, 0xd6, 0xff, 0x43, 0xbc, 0x16, 0xb4, 0xfd, 0x22, 0xd2, 0xf2, 0x05, 0x0b, 0x52, 0xdc, 0x6a, 0x6b, 0xff, 0x34, 0xe2, 0x6a, 0x38, 0x3a, 0x07, 0xe3,
+				},
+				Size:       563,
+				Executable: false,
+			},
+		},
+		Symlinks: []*storev1pb.SymlinkNode{},
+	}
+
+	// /share/man/man8 is a leaf directory
+	expectedDirectories["/share/man/man8"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{},
+		Files: []*storev1pb.FileNode{
+			{
+				Name: []byte("arp.8.gz"),
+				Digest: []byte{
+					0xf5, 0x35, 0x4e, 0xf5, 0xf6, 0x44, 0xf7, 0x52, 0x0f, 0x42, 0xa0, 0x26, 0x51, 0xd9, 0x89, 0xf9, 0x68, 0xf2, 0xef, 0xeb, 0xba, 0xe1, 0xf4, 0x55, 0x01, 0x57, 0x77, 0xb7, 0x68, 0x55, 0x92, 0xef,
+				},
+				Size:       2464,
+				Executable: false,
+			},
+			{
+				Name: []byte("ifconfig.8.gz"),
+				Digest: []byte{
+					0x18, 0x65, 0x25, 0x11, 0x32, 0xee, 0x77, 0x91, 0x35, 0x4c, 0x3c, 0x24, 0xdb, 0xaf, 0x66, 0xdb, 0xfc, 0x17, 0x7b, 0xba, 0xe1, 0x3d, 0x05, 0xd2, 0xca, 0x6e, 0x2c, 0xe4, 0xef, 0xb8, 0xa8, 0xbe,
+				},
+				Size:       3382,
+				Executable: false,
+			},
+			{
+				Name: []byte("nameif.8.gz"),
+				Digest: []byte{
+					0x73, 0xc1, 0x27, 0xe8, 0x3b, 0xa8, 0x49, 0xdc, 0x0e, 0xdf, 0x70, 0x5f, 0xaf, 0x06, 0x01, 0x2c, 0x62, 0xe9, 0x18, 0x67, 0x01, 0x94, 0x64, 0x26, 0xca, 0x95, 0x22, 0xc0, 0xdc, 0xe4, 0x42, 0xb6,
+				},
+				Size:       523,
+				Executable: false,
+			},
+			{
+				Name: []byte("netstat.8.gz"),
+				Digest: []byte{
+					0xc0, 0x86, 0x43, 0x4a, 0x43, 0x57, 0xaa, 0x84, 0xa7, 0x24, 0xa0, 0x7c, 0x65, 0x38, 0x46, 0x1c, 0xf2, 0x45, 0xa2, 0xef, 0x12, 0x44, 0x18, 0xba, 0x52, 0x56, 0xe9, 0x8e, 0x6a, 0x0f, 0x70, 0x63,
+				},
+				Size:       4284,
+				Executable: false,
+			},
+			{
+				Name: []byte("plipconfig.8.gz"),
+				Digest: []byte{
+					0x2a, 0xd9, 0x1d, 0xa8, 0x9e, 0x0d, 0x05, 0xd0, 0xb0, 0x49, 0xaa, 0x64, 0xba, 0x29, 0x28, 0xc6, 0x45, 0xe1, 0xbb, 0x5e, 0x72, 0x8d, 0x48, 0x7b, 0x09, 0x4f, 0x0a, 0x82, 0x1e, 0x26, 0x83, 0xab,
+				},
+				Size:       889,
+				Executable: false,
+			},
+			{
+				Name: []byte("rarp.8.gz"),
+				Digest: []byte{
+					0x3d, 0x51, 0xc1, 0xd0, 0x6a, 0x59, 0x1e, 0x6d, 0x9a, 0xf5, 0x06, 0xd2, 0xe7, 0x7d, 0x7d, 0xd0, 0x70, 0x3d, 0x84, 0x64, 0xc3, 0x7d, 0xfb, 0x10, 0x84, 0x3b, 0xe1, 0xa9, 0xdf, 0x46, 0xee, 0x9f,
+				},
+				Size:       1198,
+				Executable: false,
+			},
+			{
+				Name: []byte("route.8.gz"),
+				Digest: []byte{
+					0x2a, 0x5a, 0x4b, 0x4f, 0x91, 0xf2, 0x78, 0xe4, 0xa9, 0x25, 0xb2, 0x7f, 0xa7, 0x2a, 0xc0, 0x8a, 0x4a, 0x65, 0xc9, 0x5f, 0x07, 0xa0, 0x48, 0x44, 0xeb, 0x46, 0xf9, 0xc9, 0xe1, 0x17, 0x96, 0x21,
+				},
+				Size:       3525,
+				Executable: false,
+			},
+			{
+				Name: []byte("slattach.8.gz"),
+				Digest: []byte{
+					0x3f, 0x05, 0x6b, 0x20, 0xe1, 0xe4, 0xf0, 0xba, 0x16, 0x15, 0x66, 0x6b, 0x57, 0x96, 0xe9, 0x9d, 0x83, 0xa8, 0x20, 0xaf, 0x8a, 0xca, 0x16, 0x4d, 0xa2, 0x6d, 0x94, 0x8e, 0xca, 0x91, 0x8f, 0xd4,
+				},
+				Size:       1441,
+				Executable: false,
+			},
+		},
+		Symlinks: []*storev1pb.SymlinkNode{},
+	}
+
+	// /share/man holds /share/man/man{1,5,8}.
+	expectedDirectories["/share/man"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{
+			{
+				Name:   []byte("man1"),
+				Digest: mustDigest(expectedDirectories["/share/man/man1"]),
+				Size:   expectedDirectories["/share/man/man1"].Size(),
+			},
+			{
+				Name:   []byte("man5"),
+				Digest: mustDigest(expectedDirectories["/share/man/man5"]),
+				Size:   expectedDirectories["/share/man/man5"].Size(),
+			},
+			{
+				Name:   []byte("man8"),
+				Digest: mustDigest(expectedDirectories["/share/man/man8"]),
+				Size:   expectedDirectories["/share/man/man8"].Size(),
+			},
+		},
+		Files:    []*storev1pb.FileNode{},
+		Symlinks: []*storev1pb.SymlinkNode{},
+	}
+
+	// /share holds /share/man.
+	expectedDirectories["/share"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{
+			{
+				Name:   []byte("man"),
+				Digest: mustDigest(expectedDirectories["/share/man"]),
+				Size:   expectedDirectories["/share/man"].Size(),
+			},
+		},
+		Files:    []*storev1pb.FileNode{},
+		Symlinks: []*storev1pb.SymlinkNode{},
+	}
+
+	// / holds /bin, /share, and a /sbin symlink.
+	expectedDirectories["/"] = &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{
+			{
+				Name:   []byte("bin"),
+				Digest: mustDigest(expectedDirectories["/bin"]),
+				Size:   expectedDirectories["/bin"].Size(),
+			},
+			{
+				Name:   []byte("share"),
+				Digest: mustDigest(expectedDirectories["/share"]),
+				Size:   expectedDirectories["/share"].Size(),
+			},
+		},
+		Files: []*storev1pb.FileNode{},
+		Symlinks: []*storev1pb.SymlinkNode{
+			{
+				Name:   []byte("sbin"),
+				Target: []byte("bin"),
+			},
+		},
+	}
+	// assert we populated the two fixtures properly
+	require.Equal(t, len(expectedDirectoryPaths), len(expectedDirectories))
+
+	numDirectoriesReceived := 0
+
+	actualPathInfo, err := r.Import(
+		context.Background(),
+		func(fileReader io.Reader) error {
+			// Don't really bother reading and comparing the contents here,
+			// We already verify the right digests are produced by comparing the
+			// directoryCb calls, and TestRegular ensures the reader works.
+			// This also covers the case when the client doesn't read from the reader, and that the
+			// importer will take care of reading all the way to the end no matter what.
+			return nil
+		}, func(directory *storev1pb.Directory) error {
+			// use actualDirectoryOrder to look up the Directory object we expect at this specific invocation.
+			currentDirectoryPath := expectedDirectoryPaths[numDirectoriesReceived]
+
+			expectedDirectory, found := expectedDirectories[currentDirectoryPath]
+			require.True(t, found, "must find the current directory")
+
+			requireProtoEq(t, expectedDirectory, directory)
+
+			numDirectoriesReceived += 1
+			return nil
+		},
+	)
+	require.NoError(t, err)
+
+	expectedPathInfo := &storev1pb.PathInfo{
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_Directory{
+				Directory: &storev1pb.DirectoryNode{
+					Name:   []byte(""),
+					Digest: mustDigest(expectedDirectories["/"]),
+					Size:   expectedDirectories["/"].Size(),
+				},
+			},
+		},
+		References: [][]byte{},
+		Narinfo: &storev1pb.NARInfo{
+			NarSize: 464152,
+			NarSha256: []byte{
+				0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53,
+			},
+			Signatures:     []*storev1pb.NARInfo_Signature{},
+			ReferenceNames: []string{},
+		},
+	}
+	requireProtoEq(t, expectedPathInfo, actualPathInfo)
+}
+
+// TestCallbackErrors ensures that errors returned from the callback function
+// bubble up to the importer process, and are not ignored.
+func TestCallbackErrors(t *testing.T) {
+	t.Run("callback file", func(t *testing.T) {
+		// Pick an example NAR with a regular file.
+		f, err := os.Open("../../testdata/onebyteregular.nar")
+		require.NoError(t, err)
+
+		r := reader.New(f)
+
+		targetErr := errors.New("expected error")
+
+		_, err = r.Import(
+			context.Background(),
+			func(fileReader io.Reader) error {
+				return targetErr
+			}, func(directory *storev1pb.Directory) error {
+				panic("no directories expected!")
+			},
+		)
+		require.ErrorIs(t, err, targetErr)
+	})
+	t.Run("callback directory", func(t *testing.T) {
+		// Pick an example NAR with a directory node
+		f, err := os.Open("../../testdata/emptydirectory.nar")
+		require.NoError(t, err)
+
+		r := reader.New(f)
+
+		targetErr := errors.New("expected error")
+
+		_, err = r.Import(
+			context.Background(),
+			func(fileReader io.Reader) error {
+				panic("no file contents expected!")
+			}, func(directory *storev1pb.Directory) error {
+				return targetErr
+			},
+		)
+		require.ErrorIs(t, err, targetErr)
+	})
+}
diff --git a/tvix/nar-bridge/pkg/server/blob_upload.go b/tvix/nar-bridge/pkg/server/blob_upload.go
new file mode 100644
index 000000000000..fe554f5a5a2b
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/blob_upload.go
@@ -0,0 +1,48 @@
+package server
+
+import (
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"context"
+	"encoding/base64"
+	"fmt"
+	log "github.com/sirupsen/logrus"
+	"io"
+)
+
+// this returns a callback function that can be used as fileCb
+// for the reader.Import function call
+func genBlobServiceWriteCb(ctx context.Context, blobServiceClient storev1pb.BlobServiceClient) func(io.Reader) error {
+	return func(fileReader io.Reader) error {
+		// Read from fileReader into a buffer.
+		// We currently buffer all contents and send them to blobServiceClient at once,
+		// but that's about to change.
+		contents, err := io.ReadAll(fileReader)
+		if err != nil {
+			return fmt.Errorf("unable to read all contents from file reader: %w", err)
+		}
+
+		log := log.WithField("blob_size", len(contents))
+
+		log.Infof("about to upload blob")
+
+		putter, err := blobServiceClient.Put(ctx)
+		if err != nil {
+			// return error to the importer
+			return fmt.Errorf("error from blob service: %w", err)
+		}
+		err = putter.Send(&storev1pb.BlobChunk{
+			Data: contents,
+		})
+		if err != nil {
+			return fmt.Errorf("putting blob chunk: %w", err)
+		}
+		resp, err := putter.CloseAndRecv()
+		if err != nil {
+			return fmt.Errorf("close blob putter: %w", err)
+		}
+
+		log.WithField("digest", base64.StdEncoding.EncodeToString(resp.GetDigest())).Info("uploaded blob")
+
+		return nil
+	}
+}
diff --git a/tvix/nar-bridge/pkg/server/directory_upload.go b/tvix/nar-bridge/pkg/server/directory_upload.go
new file mode 100644
index 000000000000..02b173698042
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/directory_upload.go
@@ -0,0 +1,66 @@
+package server
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	log "github.com/sirupsen/logrus"
+)
+
+type DirectoriesUploader struct {
+	ctx                       context.Context
+	directoryServiceClient    storev1pb.DirectoryServiceClient
+	directoryServicePutStream storev1pb.DirectoryService_PutClient
+}
+
+func NewDirectoriesUploader(ctx context.Context, directoryServiceClient storev1pb.DirectoryServiceClient) *DirectoriesUploader {
+	return &DirectoriesUploader{
+		ctx:                       ctx,
+		directoryServiceClient:    directoryServiceClient,
+		directoryServicePutStream: nil,
+	}
+}
+
+func (du *DirectoriesUploader) Put(directory *storev1pb.Directory) error {
+	directoryDgst, err := directory.Digest()
+	if err != nil {
+		return fmt.Errorf("failed calculating directory digest: %w", err)
+	}
+
+	// Send the directory to the directory service
+	// If the stream hasn't been initialized yet, do it first
+	if du.directoryServicePutStream == nil {
+		directoryServicePutStream, err := du.directoryServiceClient.Put(du.ctx)
+		if err != nil {
+			return fmt.Errorf("unable to initialize directory service put stream: %v", err)
+		}
+		du.directoryServicePutStream = directoryServicePutStream
+	}
+
+	// send the directory out
+	err = du.directoryServicePutStream.Send(directory)
+	if err != nil {
+		return fmt.Errorf("error sending directory: %w", err)
+	}
+	log.WithField("digest", base64.StdEncoding.EncodeToString(directoryDgst)).Info("uploaded directory")
+
+	return nil
+}
+
+// Done is called whenever we're
+func (du *DirectoriesUploader) Done() (*storev1pb.PutDirectoryResponse, error) {
+	// only close once, and only if we opened.
+	if du.directoryServicePutStream == nil {
+		return nil, nil
+	}
+	putDirectoryResponse, err := du.directoryServicePutStream.CloseAndRecv()
+	if err != nil {
+		return nil, fmt.Errorf("unable to close directory service put stream: %v", err)
+	}
+
+	du.directoryServicePutStream = nil
+
+	return putDirectoryResponse, nil
+}
diff --git a/tvix/nar-bridge/pkg/server/nar_get.go b/tvix/nar-bridge/pkg/server/nar_get.go
new file mode 100644
index 000000000000..c8c35e69ff16
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/nar_get.go
@@ -0,0 +1,212 @@
+package server
+
+import (
+	"bytes"
+	"context"
+	"encoding/base64"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"io/fs"
+	"net/http"
+	"sync"
+
+	"code.tvl.fyi/tvix/nar-bridge/pkg/writer"
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/go-chi/chi/v5"
+	nixhash "github.com/nix-community/go-nix/pkg/hash"
+	"github.com/nix-community/go-nix/pkg/nixbase32"
+	log "github.com/sirupsen/logrus"
+)
+
+const (
+	narUrl = "/nar/{narhash:^([" + nixbase32.Alphabet + "]{52})$}.nar"
+)
+
+func renderNar(
+	ctx context.Context,
+	log *log.Entry,
+	directoryServiceClient storev1pb.DirectoryServiceClient,
+	blobServiceClient storev1pb.BlobServiceClient,
+	narHashToPathInfoMu *sync.Mutex,
+	narHashToPathInfo map[string]*storev1pb.PathInfo,
+	w io.Writer,
+	narHash *nixhash.Hash,
+	headOnly bool,
+) error {
+	// look in the lookup table
+	narHashToPathInfoMu.Lock()
+	pathInfo, found := narHashToPathInfo[narHash.SRIString()]
+	narHashToPathInfoMu.Unlock()
+
+	// if we didn't find anything, return 404.
+	if !found {
+		return fmt.Errorf("narHash not found: %w", fs.ErrNotExist)
+	}
+
+	// if this was only a head request, we're done.
+	if headOnly {
+		return nil
+	}
+
+	directories := make(map[string]*storev1pb.Directory)
+
+	// If the root node is a directory, ask the directory service for all directories
+	if pathInfoDirectory := pathInfo.GetNode().GetDirectory(); pathInfoDirectory != nil {
+		rootDirectoryDigest := pathInfoDirectory.GetDigest()
+		log = log.WithField("root_directory", base64.StdEncoding.EncodeToString(rootDirectoryDigest))
+
+		directoryStream, err := directoryServiceClient.Get(ctx, &storev1pb.GetDirectoryRequest{
+			ByWhat: &storev1pb.GetDirectoryRequest_Digest{
+				Digest: rootDirectoryDigest,
+			},
+			Recursive: true,
+		})
+		if err != nil {
+			return fmt.Errorf("unable to query directory stream: %w", err)
+		}
+
+		// For now, we just stream all of these locally and put them into a hashmap,
+		// which is used in the lookup function below.
+		for {
+			directory, err := directoryStream.Recv()
+			if err != nil {
+				if err == io.EOF {
+					break
+				}
+				return fmt.Errorf("unable to receive from directory stream: %w", err)
+			}
+
+			// calculate directory digest
+			// TODO: do we need to do any more validation?
+			directoryDgst, err := directory.Digest()
+			if err != nil {
+				return fmt.Errorf("unable to calculate directory digest: %w", err)
+			}
+
+			// TODO: debug level
+			log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDgst)).Info("received directory node")
+
+			directories[hex.EncodeToString(directoryDgst)] = directory
+		}
+
+	}
+
+	// render the NAR file
+	err := writer.Export(
+		w,
+		pathInfo,
+		func(directoryDigest []byte) (*storev1pb.Directory, error) {
+			// TODO: debug level
+			log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDigest)).Info("Get directory")
+			directoryRefStr := hex.EncodeToString(directoryDigest)
+			directory, found := directories[directoryRefStr]
+			if !found {
+				return nil, fmt.Errorf(
+					"directory with hash %v does not exist: %w",
+					directoryDigest,
+					fs.ErrNotExist,
+				)
+			}
+
+			return directory, nil
+		},
+		func(blobDigest []byte) (io.ReadCloser, error) {
+			// TODO: debug level
+			log.WithField("blob", base64.StdEncoding.EncodeToString(blobDigest)).Info("Get blob")
+			resp, err := blobServiceClient.Read(ctx, &storev1pb.ReadBlobRequest{
+				Digest: blobDigest,
+			})
+			if err != nil {
+				return nil, fmt.Errorf("unable to get blob: %w", err)
+
+			}
+
+			// TODO: spin up a goroutine producing this.
+			data := &bytes.Buffer{}
+			for {
+				chunk, err := resp.Recv()
+				if errors.Is(err, io.EOF) {
+					break
+				}
+				if err != nil {
+					return nil, fmt.Errorf("read chunk: %w", err)
+				}
+				_, err = data.Write(chunk.GetData())
+				if err != nil {
+					return nil, fmt.Errorf("buffer chunk: %w", err)
+				}
+			}
+			return io.NopCloser(data), nil
+		},
+	)
+	if err != nil {
+		return fmt.Errorf("unable to export nar: %w", err)
+	}
+	return nil
+}
+
+func registerNarGet(s *Server) {
+	// TODO: properly compose this
+	s.handler.Head(narUrl, func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+
+		ctx := r.Context()
+
+		// parse the narhash sent in the request URL
+		narHash, err := parseNarHashFromUrl(chi.URLParamFromCtx(ctx, "narhash"))
+		if err != nil {
+			log.WithError(err).WithField("url", r.URL).Error("unable to decode nar hash from url")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to decode nar hash from url"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		log := log.WithField("narhash_url", narHash.SRIString())
+
+		err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, w, narHash, true)
+		if err != nil {
+			log.WithError(err).Info("unable to render nar")
+			if errors.Is(err, fs.ErrNotExist) {
+				w.WriteHeader(http.StatusNotFound)
+			} else {
+				w.WriteHeader(http.StatusInternalServerError)
+			}
+		}
+
+	})
+	s.handler.Get(narUrl, func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+
+		ctx := r.Context()
+
+		// parse the narhash sent in the request URL
+		narHash, err := parseNarHashFromUrl(chi.URLParamFromCtx(ctx, "narhash"))
+		if err != nil {
+			log.WithError(err).WithField("url", r.URL).Error("unable to decode nar hash from url")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to decode nar hash from url"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		log := log.WithField("narhash_url", narHash.SRIString())
+
+		err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, w, narHash, false)
+		if err != nil {
+			if errors.Is(err, fs.ErrNotExist) {
+				w.WriteHeader(http.StatusNotFound)
+			} else {
+				w.WriteHeader(http.StatusInternalServerError)
+			}
+		}
+	})
+}
diff --git a/tvix/nar-bridge/pkg/server/nar_put.go b/tvix/nar-bridge/pkg/server/nar_put.go
new file mode 100644
index 000000000000..9d6752e85bf1
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/nar_put.go
@@ -0,0 +1,140 @@
+package server
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"net/http"
+
+	"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/go-chi/chi/v5"
+	nixhash "github.com/nix-community/go-nix/pkg/hash"
+	"github.com/nix-community/go-nix/pkg/nixbase32"
+	"github.com/sirupsen/logrus"
+	log "github.com/sirupsen/logrus"
+)
+
+func registerNarPut(s *Server) {
+	s.handler.Put(narUrl, func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+
+		ctx := r.Context()
+
+		// parse the narhash sent in the request URL
+		narHashFromUrl, err := parseNarHashFromUrl(chi.URLParamFromCtx(ctx, "narhash"))
+		if err != nil {
+			log.WithError(err).WithField("url", r.URL).Error("unable to decode nar hash from url")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to decode nar hash from url"))
+			if err != nil {
+				log.WithError(err).Error("unable to write error message to client")
+			}
+
+			return
+		}
+
+		log := log.WithField("narhash_url", narHashFromUrl.SRIString())
+
+		directoriesUploader := NewDirectoriesUploader(ctx, s.directoryServiceClient)
+		defer directoriesUploader.Done() //nolint:errcheck
+
+		rd := reader.New(bufio.NewReader(r.Body))
+		pathInfo, err := rd.Import(
+			ctx,
+			genBlobServiceWriteCb(ctx, s.blobServiceClient),
+			func(directory *storev1pb.Directory) error {
+				return directoriesUploader.Put(directory)
+			},
+		)
+
+		if err != nil {
+			log.Errorf("error during NAR import: %v", err)
+			w.WriteHeader(http.StatusInternalServerError)
+			_, err := w.Write([]byte(fmt.Sprintf("error during NAR import: %v", err)))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		log.Infof("closing the stream")
+
+		// Close the directories uploader
+		directoriesPutResponse, err := directoriesUploader.Done()
+		if err != nil {
+			log.WithError(err).Error("error during directory upload")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("error during directory upload"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+		// If we uploaded directories (so directoriesPutResponse doesn't return null),
+		// the RootDigest field in directoriesPutResponse should match the digest
+		// returned in the PathInfo struct returned by the `Import` call.
+		// This check ensures the server-side came up with the same root hash.
+
+		if directoriesPutResponse != nil {
+			rootDigestPathInfo := pathInfo.GetNode().GetDirectory().GetDigest()
+			rootDigestDirectoriesPutResponse := directoriesPutResponse.GetRootDigest()
+
+			log := log.WithFields(logrus.Fields{
+				"root_digest_pathinfo":             rootDigestPathInfo,
+				"root_digest_directories_put_resp": rootDigestDirectoriesPutResponse,
+			})
+			if !bytes.Equal(rootDigestPathInfo, rootDigestDirectoriesPutResponse) {
+				log.Errorf("returned root digest doesn't match what's calculated")
+
+				w.WriteHeader(http.StatusBadRequest)
+				_, err := w.Write([]byte("error in root digest calculation"))
+				if err != nil {
+					log.WithError(err).Error("unable to write error message to client")
+				}
+
+				return
+			}
+		}
+
+		// Compare the nar hash specified in the URL with the one that has been
+		// calculated while processing the NAR file
+		piNarHash, err := nixhash.ParseNixBase32(
+			"sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().NarSha256),
+		)
+		if err != nil {
+			panic("must parse nixbase32")
+		}
+
+		if !bytes.Equal(narHashFromUrl.Digest(), piNarHash.Digest()) {
+			log := log.WithFields(logrus.Fields{
+				"narhash_received_sha256": piNarHash.SRIString(),
+				"narsize":                 pathInfo.GetNarinfo().GetNarSize(),
+			})
+			log.Error("received bytes don't match narhash from URL")
+
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("received bytes don't match narHash specified in URL"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+
+		}
+
+		// Insert the partial pathinfo structs into our lookup map,
+		// so requesting the NAR file will be possible.
+		// The same  might exist already, but it'll have the same contents (so
+		// replacing will be a no-op), except maybe the root node Name field value, which
+		// is safe to ignore (as not part of the NAR).
+		s.narHashToPathInfoMu.Lock()
+		s.narHashToPathInfo[piNarHash.SRIString()] = pathInfo
+		s.narHashToPathInfoMu.Unlock()
+
+		// Done!
+	})
+
+}
diff --git a/tvix/nar-bridge/pkg/server/narinfo_get.go b/tvix/nar-bridge/pkg/server/narinfo_get.go
new file mode 100644
index 000000000000..977e1136130f
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/narinfo_get.go
@@ -0,0 +1,146 @@
+package server
+
+import (
+	"context"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io"
+	"io/fs"
+	"net/http"
+	"path"
+	"strings"
+	"sync"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/go-chi/chi/v5"
+	nixhash "github.com/nix-community/go-nix/pkg/hash"
+	"github.com/nix-community/go-nix/pkg/narinfo"
+	"github.com/nix-community/go-nix/pkg/narinfo/signature"
+	"github.com/nix-community/go-nix/pkg/nixbase32"
+	"github.com/nix-community/go-nix/pkg/nixpath"
+	log "github.com/sirupsen/logrus"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// renderNarinfo writes narinfo contents to a passes io.Writer, or a returns a
+// (wrapped) io.ErrNoExist error if something doesn't exist.
+// if headOnly is set to true, only the existence is checked, but no content is
+// actually written.
+func renderNarinfo(
+	ctx context.Context,
+	log *log.Entry,
+	pathInfoServiceClient storev1pb.PathInfoServiceClient,
+	narHashToPathInfoMu *sync.Mutex,
+	narHashToPathInfo map[string]*storev1pb.PathInfo,
+	outputHash []byte,
+	w io.Writer,
+	headOnly bool,
+) error {
+	pathInfo, err := pathInfoServiceClient.Get(ctx, &storev1pb.GetPathInfoRequest{
+		ByWhat: &storev1pb.GetPathInfoRequest_ByOutputHash{
+			ByOutputHash: outputHash,
+		},
+	})
+	if err != nil {
+		st, ok := status.FromError(err)
+		if ok {
+			if st.Code() == codes.NotFound {
+				return fmt.Errorf("output hash %v not found: %w", base64.StdEncoding.EncodeToString(outputHash), fs.ErrNotExist)
+			}
+			return fmt.Errorf("unable to get pathinfo, code %v: %w", st.Code(), err)
+		}
+
+		return fmt.Errorf("unable to get pathinfo: %w", err)
+	}
+
+	narHash, err := nixhash.ParseNixBase32("sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().GetNarSha256()))
+	if err != nil {
+		// TODO: return proper error
+		return fmt.Errorf("No usable NarHash found in PathInfo")
+	}
+
+	// add things to the lookup table, in case the same process didn't handle the NAR hash yet.
+	narHashToPathInfoMu.Lock()
+	narHashToPathInfo[narHash.SRIString()] = pathInfo
+	narHashToPathInfoMu.Unlock()
+
+	if headOnly {
+		return nil
+	}
+
+	// convert the signatures from storev1pb signatures to narinfo signatures
+	narinfoSignatures := make([]signature.Signature, 0)
+	for _, pathInfoSignature := range pathInfo.Narinfo.Signatures {
+		narinfoSignatures = append(narinfoSignatures, signature.Signature{
+			Name: pathInfoSignature.GetName(),
+			Data: pathInfoSignature.GetData(),
+		})
+	}
+
+	// extract the name of the node in the pathInfo structure, which will become the output path
+	var nodeName []byte
+	switch v := (pathInfo.GetNode().GetNode()).(type) {
+	case *storev1pb.Node_File:
+		nodeName = v.File.GetName()
+	case *storev1pb.Node_Symlink:
+		nodeName = v.Symlink.GetName()
+	case *storev1pb.Node_Directory:
+		nodeName = v.Directory.GetName()
+	}
+
+	narInfo := narinfo.NarInfo{
+		StorePath:   path.Join(nixpath.StoreDir, string(nodeName)),
+		URL:         "nar/" + nixbase32.EncodeToString(narHash.Digest()) + ".nar",
+		Compression: "none", // TODO: implement zstd compression
+		NarHash:     narHash,
+		NarSize:     uint64(pathInfo.Narinfo.NarSize),
+		References:  pathInfo.Narinfo.GetReferenceNames(),
+		Signatures:  narinfoSignatures,
+	}
+
+	// render .narinfo from pathInfo
+	_, err = io.Copy(w, strings.NewReader(narInfo.String()))
+	if err != nil {
+		return fmt.Errorf("unable to write narinfo to client: %w", err)
+	}
+
+	return nil
+}
+
+func registerNarinfoGet(s *Server) {
+	// GET $outHash.narinfo looks up the PathInfo from the tvix-store,
+	// and then render a .narinfo file to the client.
+	// It will keep the PathInfo in the lookup map,
+	// so a subsequent GET /nar/ $narhash.nar request can find it.
+	s.handler.Get("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+
+		ctx := r.Context()
+		log := log.WithField("outputhash", chi.URLParamFromCtx(ctx, "outputhash"))
+
+		// parse the output hash sent in the request URL
+		outputHash, err := nixbase32.DecodeString(chi.URLParamFromCtx(ctx, "outputhash"))
+		if err != nil {
+			log.WithError(err).Error("unable to decode output hash from url")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to decode output hash from url"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, outputHash, w, false)
+		if err != nil {
+			log.WithError(err).Info("unable to render narinfo")
+			if errors.Is(err, fs.ErrNotExist) {
+				w.WriteHeader(http.StatusNotFound)
+			} else {
+				w.WriteHeader(http.StatusInternalServerError)
+			}
+		}
+	})
+}
diff --git a/tvix/nar-bridge/pkg/server/narinfo_put.go b/tvix/nar-bridge/pkg/server/narinfo_put.go
new file mode 100644
index 000000000000..c5b4094f8582
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/narinfo_put.go
@@ -0,0 +1,174 @@
+package server
+
+import (
+	"net/http"
+	"path"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/go-chi/chi/v5"
+	"github.com/nix-community/go-nix/pkg/narinfo"
+	"github.com/nix-community/go-nix/pkg/nixbase32"
+	"github.com/nix-community/go-nix/pkg/nixpath"
+	"github.com/sirupsen/logrus"
+	log "github.com/sirupsen/logrus"
+)
+
+func registerNarinfoPut(s *Server) {
+	s.handler.Put("/{outputhash:^["+nixbase32.Alphabet+"]{32}}.narinfo", func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+
+		ctx := r.Context()
+		log := log.WithField("outputhash", chi.URLParamFromCtx(ctx, "outputhash"))
+
+		// TODO: decide on merging behaviour.
+		// Maybe it's fine to add if contents are the same, but more sigs can be added?
+		// Right now, just replace a .narinfo for a path that already exists.
+
+		// read and parse the .narinfo file
+		narInfo, err := narinfo.Parse(r.Body)
+		if err != nil {
+			log.WithError(err).Error("unable to parse narinfo")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to parse narinfo"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		log = log.WithFields(logrus.Fields{
+			"narhash":     narInfo.NarHash.SRIString(),
+			"output_path": narInfo.StorePath,
+		})
+
+		var pathInfo *storev1pb.PathInfo
+
+		// look up the narHash in our temporary map
+		s.narHashToPathInfoMu.Lock()
+		pathInfo, found := s.narHashToPathInfo[narInfo.NarHash.SRIString()]
+		s.narHashToPathInfoMu.Unlock()
+		if !found {
+			log.Error("unable to find referred NAR")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to find referred NAR"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		// compare fields with what we computed while receiving the NAR file
+
+		// NarSize needs to match
+		if pathInfo.Narinfo.NarSize != narInfo.NarSize {
+			log.Error("narsize mismatch")
+			w.WriteHeader(http.StatusBadRequest)
+			_, err := w.Write([]byte("unable to parse narinfo"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+		// We know the narhash in the .narinfo matches one of the two narhashes in the partial pathInfo,
+		// because that's how we found it.
+
+		// FUTUREWORK: We can't compare References yet, but it'd be a good idea to
+		// do reference checking on .nar files server-side during upload.
+		// We however still need to be parse them, because we store
+		// the bytes in pathInfo.References, and the full strings in pathInfo.Narinfo.ReferenceNames.
+		referencesBytes := make([][]byte, 0)
+		for _, reference := range narInfo.References {
+			np, err := nixpath.FromString(path.Join(nixpath.StoreDir, reference))
+			if err != nil {
+				log.WithField("reference", reference).WithError(err).Error("unable to parse reference")
+				w.WriteHeader(http.StatusBadRequest)
+				_, err := w.Write([]byte("unable to parse reference"))
+				if err != nil {
+					log.WithError(err).Errorf("unable to write error message to client")
+				}
+
+				return
+			}
+			referencesBytes = append(referencesBytes, np.Digest)
+		}
+
+		// assemble the []*storev1pb.NARInfo_Signature{} from narinfo.Signatures.
+		pbNarinfoSignatures := make([]*storev1pb.NARInfo_Signature, 0)
+		for _, narinfoSig := range narInfo.Signatures {
+
+			pbNarinfoSignatures = append(pbNarinfoSignatures, &storev1pb.NARInfo_Signature{
+				Name: narinfoSig.Name,
+				Data: narinfoSig.Data,
+			})
+		}
+
+		// If everything matches, We will add References, NAR signatures and the
+		// output path name, and then upload to the pathinfo service.
+		// We want a copy here, because we don't want to mutate the contents in the lookup table
+		// until we get things back from the remote store.
+		pathInfoToUpload := &storev1pb.PathInfo{
+			Node:       nil, // set below
+			References: referencesBytes,
+			Narinfo: &storev1pb.NARInfo{
+				NarSize:        pathInfo.Narinfo.NarSize,
+				NarSha256:      pathInfo.Narinfo.NarSha256,
+				Signatures:     pbNarinfoSignatures,
+				ReferenceNames: narInfo.References,
+			},
+		}
+
+		// We need to add the basename of the storepath from the .narinfo
+		// to the pathInfo to be sent.
+		switch v := (pathInfo.GetNode().GetNode()).(type) {
+		case *storev1pb.Node_File:
+			pathInfoToUpload.Node = &storev1pb.Node{
+				Node: &storev1pb.Node_File{
+					File: &storev1pb.FileNode{
+						Name:       []byte(path.Base(narInfo.StorePath)),
+						Digest:     v.File.Digest,
+						Size:       v.File.Size,
+						Executable: v.File.Executable,
+					},
+				},
+			}
+		case *storev1pb.Node_Symlink:
+			pathInfoToUpload.Node = &storev1pb.Node{
+				Node: &storev1pb.Node_Symlink{
+					Symlink: &storev1pb.SymlinkNode{
+						Name:   []byte(path.Base(narInfo.StorePath)),
+						Target: v.Symlink.Target,
+					},
+				},
+			}
+		case *storev1pb.Node_Directory:
+			pathInfoToUpload.Node = &storev1pb.Node{
+				Node: &storev1pb.Node_Directory{
+					Directory: &storev1pb.DirectoryNode{
+						Name:   []byte(path.Base(narInfo.StorePath)),
+						Digest: v.Directory.Digest,
+						Size:   v.Directory.Size,
+					},
+				},
+			}
+		}
+
+		receivedPathInfo, err := s.pathInfoServiceClient.Put(ctx, pathInfoToUpload)
+		if err != nil {
+			log.WithError(err).Error("unable to upload pathinfo to service")
+			w.WriteHeader(http.StatusInternalServerError)
+			_, err := w.Write([]byte("unable to upload pathinfo to server"))
+			if err != nil {
+				log.WithError(err).Errorf("unable to write error message to client")
+			}
+
+			return
+		}
+
+		log.Infof("received new pathInfo: %v+", receivedPathInfo)
+
+		// TODO: update the local temporary pathinfo with this?
+	})
+}
diff --git a/tvix/nar-bridge/pkg/server/server.go b/tvix/nar-bridge/pkg/server/server.go
new file mode 100644
index 000000000000..083b7f295a12
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/server.go
@@ -0,0 +1,86 @@
+package server
+
+import (
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/go-chi/chi/middleware"
+	"github.com/go-chi/chi/v5"
+	log "github.com/sirupsen/logrus"
+)
+
+type Server struct {
+	handler chi.Router
+
+	directoryServiceClient storev1pb.DirectoryServiceClient
+	blobServiceClient      storev1pb.BlobServiceClient
+	pathInfoServiceClient  storev1pb.PathInfoServiceClient
+
+	// When uploading NAR files to a HTTP binary cache, the .nar
+	// files are uploaded before the .narinfo files.
+	// We need *both* to be able to fully construct a PathInfo object.
+	// Keep a in-memory map of narhash(es) (in SRI) to sparse PathInfo.
+	// This is necessary until we can ask a PathInfoService for a node with a given
+	// narSha256.
+	narHashToPathInfoMu sync.Mutex
+	narHashToPathInfo   map[string]*storev1pb.PathInfo
+}
+
+func New(
+	directoryServiceClient storev1pb.DirectoryServiceClient,
+	blobServiceClient storev1pb.BlobServiceClient,
+	pathInfoServiceClient storev1pb.PathInfoServiceClient,
+	enableAccessLog bool,
+	priority int,
+) *Server {
+	r := chi.NewRouter()
+
+	if enableAccessLog {
+		r.Use(middleware.Logger)
+	}
+
+	r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+		_, err := w.Write([]byte("nar-bridge"))
+		if err != nil {
+			log.Errorf("Unable to write response: %v", err)
+		}
+	})
+
+	r.Get("/nix-cache-info", func(w http.ResponseWriter, r *http.Request) {
+		_, err := w.Write([]byte(fmt.Sprintf("StoreDir: /nix/store\nWantMassQuery: 1\nPriority: %d\n", priority)))
+		if err != nil {
+			log.Errorf("Unable to write response: %v", err)
+		}
+	})
+
+	s := &Server{
+		handler:                r,
+		directoryServiceClient: directoryServiceClient,
+		blobServiceClient:      blobServiceClient,
+		pathInfoServiceClient:  pathInfoServiceClient,
+		narHashToPathInfo:      make(map[string]*storev1pb.PathInfo),
+	}
+
+	registerNarPut(s)
+	registerNarinfoPut(s)
+
+	registerNarinfoGet(s)
+	registerNarGet(s)
+
+	return s
+}
+
+func (s *Server) ListenAndServe(addr string) error {
+	srv := &http.Server{
+		Addr:         addr,
+		Handler:      s.handler,
+		ReadTimeout:  50 * time.Second,
+		WriteTimeout: 100 * time.Second,
+		IdleTimeout:  150 * time.Second,
+	}
+
+	return srv.ListenAndServe()
+}
diff --git a/tvix/nar-bridge/pkg/server/util.go b/tvix/nar-bridge/pkg/server/util.go
new file mode 100644
index 000000000000..47e368adde80
--- /dev/null
+++ b/tvix/nar-bridge/pkg/server/util.go
@@ -0,0 +1,24 @@
+package server
+
+import (
+	"fmt"
+	nixhash "github.com/nix-community/go-nix/pkg/hash"
+)
+
+// parseNarHashFromUrl parses a nixbase32 string representing a sha256 NarHash
+// and returns a nixhash.Hash when it was able to parse, or an error.
+func parseNarHashFromUrl(narHashFromUrl string) (*nixhash.Hash, error) {
+	// peek at the length. If it's 52 characters, assume sha256,
+	// if it's something else, this is an error.
+	l := len(narHashFromUrl)
+	if l != 52 {
+		return nil, fmt.Errorf("invalid length of narHash: %v", l)
+	}
+
+	nixHash, err := nixhash.ParseNixBase32("sha256:" + narHashFromUrl)
+	if err != nil {
+		return nil, fmt.Errorf("unable to parse nixbase32 hash: %w", err)
+	}
+
+	return nixHash, nil
+}
diff --git a/tvix/nar-bridge/pkg/writer/writer.go b/tvix/nar-bridge/pkg/writer/writer.go
new file mode 100644
index 000000000000..25510f933e3a
--- /dev/null
+++ b/tvix/nar-bridge/pkg/writer/writer.go
@@ -0,0 +1,278 @@
+package writer
+
+import (
+	"fmt"
+	"io"
+	"path"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/nix-community/go-nix/pkg/nar"
+)
+
+type DirectoryLookupFn func([]byte) (*storev1pb.Directory, error)
+type BlobLookupFn func([]byte) (io.ReadCloser, error)
+
+// Export will traverse a given pathInfo structure, and write the contents
+// in NAR format to the passed Writer.
+// It uses directoryLookupFn and blobLookupFn to resolve references.
+func Export(
+	w io.Writer,
+	pathInfo *storev1pb.PathInfo,
+	directoryLookupFn DirectoryLookupFn,
+	blobLookupFn BlobLookupFn,
+) error {
+	// initialize a NAR writer
+	narWriter, err := nar.NewWriter(w)
+	if err != nil {
+		return fmt.Errorf("unable to initialize nar writer: %w", err)
+	}
+	defer narWriter.Close()
+
+	// populate rootHeader
+	rootHeader := &nar.Header{
+		Path: "/",
+	}
+
+	// populate a stack
+	// we will push paths and directories to it when entering a directory,
+	// and emit individual elements to the NAR writer, draining the Directory object.
+	// once it's empty, we can pop it off the stack.
+	var stackPaths = []string{}
+	var stackDirectories = []*storev1pb.Directory{}
+
+	// peek at the pathInfo root and assemble the root node and write to writer
+	// in the case of a regular file, we retrieve and write the contents, close and exit
+	// in the case of a symlink, we write the symlink, close and exit
+	switch v := (pathInfo.GetNode().GetNode()).(type) {
+	case *storev1pb.Node_File:
+		rootHeader.Type = nar.TypeRegular
+		rootHeader.Size = int64(v.File.GetSize())
+		rootHeader.Executable = v.File.GetExecutable()
+		err := narWriter.WriteHeader(rootHeader)
+		if err != nil {
+			return fmt.Errorf("unable to write root header: %w", err)
+		}
+
+		// if it's a regular file, retrieve and write the contents
+		contentReader, err := blobLookupFn(v.File.GetDigest())
+		if err != nil {
+			return fmt.Errorf("unable to lookup blob: %w", err)
+		}
+		defer contentReader.Close()
+
+		_, err = io.Copy(narWriter, contentReader)
+		if err != nil {
+			return fmt.Errorf("unable to copy contents from contentReader: %w", err)
+		}
+
+		err = contentReader.Close()
+		if err != nil {
+			return fmt.Errorf("unable to close content reader: %w", err)
+		}
+
+		err = narWriter.Close()
+		if err != nil {
+			return fmt.Errorf("unable to close nar reader: %w", err)
+		}
+
+		return nil
+
+	case *storev1pb.Node_Symlink:
+		rootHeader.Type = nar.TypeSymlink
+		rootHeader.LinkTarget = string(v.Symlink.GetTarget())
+		err := narWriter.WriteHeader(rootHeader)
+		if err != nil {
+			return fmt.Errorf("unable to write root header: %w", err)
+		}
+
+		err = narWriter.Close()
+		if err != nil {
+			return fmt.Errorf("unable to close nar reader: %w", err)
+		}
+
+		return nil
+	case *storev1pb.Node_Directory:
+		// We have a directory at the root, look it up and put in on the stack.
+		directory, err := directoryLookupFn(v.Directory.Digest)
+		if err != nil {
+			return fmt.Errorf("unable to lookup directory: %w", err)
+		}
+		stackDirectories = append(stackDirectories, directory)
+		stackPaths = append(stackPaths, "/")
+
+		err = narWriter.WriteHeader(&nar.Header{
+			Path: "/",
+			Type: nar.TypeDirectory,
+		})
+
+		if err != nil {
+			return fmt.Errorf("error writing header: %w", err)
+		}
+	}
+
+	// as long as the stack is not empty, we keep running.
+	for {
+		if len(stackDirectories) == 0 {
+			return nil
+		}
+
+		// Peek at the current top of the stack.
+		topOfStack := stackDirectories[len(stackDirectories)-1]
+		topOfStackPath := stackPaths[len(stackPaths)-1]
+
+		// get the next element that's lexicographically smallest, and drain it from
+		// the current directory on top of the stack.
+		nextNode := drainNextNode(topOfStack)
+
+		// If nextNode returns nil, there's nothing left in the directory node, so we
+		// can emit it from the stack.
+		// Contrary to the import case, we don't emit the node popping from the stack, but when pushing.
+		if nextNode == nil {
+			// pop off stack
+			stackDirectories = stackDirectories[:len(stackDirectories)-1]
+			stackPaths = stackPaths[:len(stackPaths)-1]
+
+			continue
+		}
+
+		switch n := (nextNode).(type) {
+		case *storev1pb.DirectoryNode:
+			err := narWriter.WriteHeader(&nar.Header{
+				Path: path.Join(topOfStackPath, string(n.GetName())),
+				Type: nar.TypeDirectory,
+			})
+			if err != nil {
+				return fmt.Errorf("unable to write nar header: %w", err)
+			}
+
+			d, err := directoryLookupFn(n.GetDigest())
+			if err != nil {
+				return fmt.Errorf("unable to lookup directory: %w", err)
+			}
+
+			// add to stack
+			stackDirectories = append(stackDirectories, d)
+			stackPaths = append(stackPaths, path.Join(topOfStackPath, string(n.GetName())))
+		case *storev1pb.FileNode:
+			err := narWriter.WriteHeader(&nar.Header{
+				Path:       path.Join(topOfStackPath, string(n.GetName())),
+				Type:       nar.TypeRegular,
+				Size:       int64(n.GetSize()),
+				Executable: n.GetExecutable(),
+			})
+			if err != nil {
+				return fmt.Errorf("unable to write nar header: %w", err)
+			}
+
+			// copy file contents
+			contentReader, err := blobLookupFn(n.GetDigest())
+			if err != nil {
+				return fmt.Errorf("unable to get blob: %w", err)
+			}
+			defer contentReader.Close()
+
+			_, err = io.Copy(narWriter, contentReader)
+			if err != nil {
+				return fmt.Errorf("unable to copy contents from contentReader: %w", err)
+			}
+
+			err = contentReader.Close()
+			if err != nil {
+				return fmt.Errorf("unable to close content reader: %w", err)
+			}
+		case *storev1pb.SymlinkNode:
+			err := narWriter.WriteHeader(&nar.Header{
+				Path:       path.Join(topOfStackPath, string(n.GetName())),
+				Type:       nar.TypeSymlink,
+				LinkTarget: string(n.GetTarget()),
+			})
+			if err != nil {
+				return fmt.Errorf("unable to write nar header: %w", err)
+			}
+		}
+	}
+}
+
+// TODO: add validation functions to Directory in both rust and golang, to
+// validate the keys in directories, files and symlinks are sorted.
+
+// drainNextNode will drain a directory message with one of its child nodes,
+// whichever comes first alphabetically.
+func drainNextNode(d *storev1pb.Directory) interface{} {
+	switch v := (smallestNode(d)).(type) {
+	case *storev1pb.DirectoryNode:
+		d.Directories = d.Directories[1:]
+		return v
+	case *storev1pb.FileNode:
+		d.Files = d.Files[1:]
+		return v
+	case *storev1pb.SymlinkNode:
+		d.Symlinks = d.Symlinks[1:]
+		return v
+	case nil:
+		return nil
+	default:
+		panic("invalid type encountered")
+	}
+}
+
+// smallestNode will return the node from a directory message,
+// whichever comes first alphabetically.
+func smallestNode(d *storev1pb.Directory) interface{} {
+	childDirectories := d.GetDirectories()
+	childFiles := d.GetFiles()
+	childSymlinks := d.GetSymlinks()
+
+	if len(childDirectories) > 0 {
+		if len(childFiles) > 0 {
+			if len(childSymlinks) > 0 {
+				// directories,files,symlinks
+				return smallerNode(smallerNode(childDirectories[0], childFiles[0]), childSymlinks[0])
+			} else {
+				// directories,files,!symlinks
+				return smallerNode(childDirectories[0], childFiles[0])
+			}
+		} else {
+			// directories,!files
+			if len(childSymlinks) > 0 {
+				// directories,!files,symlinks
+				return smallerNode(childDirectories[0], childSymlinks[0])
+			} else {
+				// directories,!files,!symlinks
+				return childDirectories[0]
+			}
+		}
+	} else {
+		// !directories
+		if len(childFiles) > 0 {
+			// !directories,files
+			if len(childSymlinks) > 0 {
+				// !directories,files,symlinks
+				return smallerNode(childFiles[0], childSymlinks[0])
+			} else {
+				// !directories,files,!symlinks
+				return childFiles[0]
+			}
+		} else {
+			//!directories,!files
+			if len(childSymlinks) > 0 {
+				//!directories,!files,symlinks
+				return childSymlinks[0]
+			} else {
+				//!directories,!files,!symlinks
+				return nil
+			}
+		}
+	}
+}
+
+// smallerNode compares two nodes by their name,
+// and returns the one with the smaller name.
+// both nodes may not be nil, we do check for these cases in smallestNode.
+func smallerNode(a interface{ GetName() []byte }, b interface{ GetName() []byte }) interface{ GetName() []byte } {
+	if string(a.GetName()) < string(b.GetName()) {
+		return a
+	} else {
+		return b
+	}
+}
diff --git a/tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go b/tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go
new file mode 100644
index 000000000000..3601e9fb5296
--- /dev/null
+++ b/tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go
@@ -0,0 +1,51 @@
+package writer
+
+import (
+	"testing"
+
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/google/go-cmp/cmp"
+	"github.com/stretchr/testify/require"
+	"google.golang.org/protobuf/testing/protocmp"
+)
+
+func requireProtoEq(t *testing.T, expected interface{}, actual interface{}) {
+	if diff := cmp.Diff(expected, actual, protocmp.Transform()); diff != "" {
+		t.Errorf("unexpected difference:\n%v", diff)
+	}
+}
+
+func TestPopNextNode(t *testing.T) {
+	t.Run("empty directory", func(t *testing.T) {
+		d := &storev1pb.Directory{
+			Directories: []*storev1pb.DirectoryNode{},
+			Files:       []*storev1pb.FileNode{},
+			Symlinks:    []*storev1pb.SymlinkNode{},
+		}
+
+		n := drainNextNode(d)
+		require.Equal(t, nil, n)
+	})
+	t.Run("only directories", func(t *testing.T) {
+		ds := &storev1pb.Directory{
+			Directories: []*storev1pb.DirectoryNode{{
+				Name:   []byte("a"),
+				Digest: []byte{},
+				Size:   0,
+			}, {
+				Name:   []byte("b"),
+				Digest: []byte{},
+				Size:   0,
+			}},
+			Files:    []*storev1pb.FileNode{},
+			Symlinks: []*storev1pb.SymlinkNode{},
+		}
+
+		n := drainNextNode(ds)
+		requireProtoEq(t, &storev1pb.DirectoryNode{
+			Name:   []byte("a"),
+			Digest: []byte{},
+			Size:   0,
+		}, n)
+	})
+}
diff --git a/tvix/nar-bridge/pkg/writer/writer_test.go b/tvix/nar-bridge/pkg/writer/writer_test.go
new file mode 100644
index 000000000000..5914e1792bcb
--- /dev/null
+++ b/tvix/nar-bridge/pkg/writer/writer_test.go
@@ -0,0 +1,211 @@
+package writer_test
+
+import (
+	"bytes"
+	"context"
+	"encoding/hex"
+	"io"
+	"os"
+	"testing"
+
+	"code.tvl.fyi/tvix/nar-bridge/pkg/reader"
+	"code.tvl.fyi/tvix/nar-bridge/pkg/writer"
+	storev1pb "code.tvl.fyi/tvix/store/protos"
+	"github.com/stretchr/testify/require"
+	"lukechampine.com/blake3"
+)
+
+func mustDigest(d *storev1pb.Directory) []byte {
+	dgst, err := d.Digest()
+	if err != nil {
+		panic(err)
+	}
+	return dgst
+}
+
+func TestSymlink(t *testing.T) {
+	pathInfo := &storev1pb.PathInfo{
+
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_Symlink{
+				Symlink: &storev1pb.SymlinkNode{
+					Name:   []byte("doesntmatter"),
+					Target: []byte("/nix/store/somewhereelse"),
+				},
+			},
+		},
+	}
+
+	var buf bytes.Buffer
+
+	err := writer.Export(&buf, pathInfo, func([]byte) (*storev1pb.Directory, error) {
+		panic("no directories expected")
+	}, func([]byte) (io.ReadCloser, error) {
+		panic("no files expected")
+	})
+	require.NoError(t, err, "exporter shouldn't fail")
+
+	f, err := os.Open("../../testdata/symlink.nar")
+	require.NoError(t, err)
+
+	bytesExpected, err := io.ReadAll(f)
+	if err != nil {
+		panic(err)
+	}
+
+	require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match")
+}
+
+func TestRegular(t *testing.T) {
+	// The blake3 digest of the 0x01 byte.
+	BLAKE3_DIGEST_0X01 := []byte{
+		0x48, 0xfc, 0x72, 0x1f, 0xbb, 0xc1, 0x72, 0xe0, 0x92, 0x5f, 0xa2, 0x7a, 0xf1, 0x67, 0x1d,
+		0xe2, 0x25, 0xba, 0x92, 0x71, 0x34, 0x80, 0x29, 0x98, 0xb1, 0x0a, 0x15, 0x68, 0xa1, 0x88,
+		0x65, 0x2b,
+	}
+
+	pathInfo := &storev1pb.PathInfo{
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_File{
+				File: &storev1pb.FileNode{
+					Name:       []byte("doesntmatter"),
+					Digest:     BLAKE3_DIGEST_0X01,
+					Size:       1,
+					Executable: false,
+				},
+			},
+		},
+	}
+
+	var buf bytes.Buffer
+
+	err := writer.Export(&buf, pathInfo, func([]byte) (*storev1pb.Directory, error) {
+		panic("no directories expected")
+	}, func(blobRef []byte) (io.ReadCloser, error) {
+		if !bytes.Equal(blobRef, BLAKE3_DIGEST_0X01) {
+			panic("unexpected blobref")
+		}
+		return io.NopCloser(bytes.NewBuffer([]byte{0x01})), nil
+	})
+	require.NoError(t, err, "exporter shouldn't fail")
+
+	f, err := os.Open("../../testdata/onebyteregular.nar")
+	require.NoError(t, err)
+
+	bytesExpected, err := io.ReadAll(f)
+	if err != nil {
+		panic(err)
+	}
+
+	require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match")
+}
+
+func TestEmptyDirectory(t *testing.T) {
+	// construct empty directory node this refers to
+	emptyDirectory := &storev1pb.Directory{
+		Directories: []*storev1pb.DirectoryNode{},
+		Files:       []*storev1pb.FileNode{},
+		Symlinks:    []*storev1pb.SymlinkNode{},
+	}
+	emptyDirectoryDigest := mustDigest(emptyDirectory)
+
+	pathInfo := &storev1pb.PathInfo{
+		Node: &storev1pb.Node{
+			Node: &storev1pb.Node_Directory{
+				Directory: &storev1pb.DirectoryNode{
+					Name:   []byte("doesntmatter"),
+					Digest: emptyDirectoryDigest,
+					Size:   0,
+				},
+			},
+		},
+	}
+
+	var buf bytes.Buffer
+
+	err := writer.Export(&buf, pathInfo, func(directoryRef []byte) (*storev1pb.Directory, error) {
+		if !bytes.Equal(directoryRef, emptyDirectoryDigest) {
+			panic("unexpected directoryRef")
+		}
+		return emptyDirectory, nil
+	}, func([]byte) (io.ReadCloser, error) {
+		panic("no files expected")
+	})
+	require.NoError(t, err, "exporter shouldn't fail")
+
+	f, err := os.Open("../../testdata/emptydirectory.nar")
+	require.NoError(t, err)
+
+	bytesExpected, err := io.ReadAll(f)
+	if err != nil {
+		panic(err)
+	}
+
+	require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match")
+}
+
+func TestFull(t *testing.T) {
+	// We pipe nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar to the exporter,
+	// and store all the file contents and directory objects received in two hashmaps.
+	// We then feed it to the writer, and test we come up with the same NAR file.
+
+	f, err := os.Open("../../testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar")
+	require.NoError(t, err)
+
+	narContents, err := io.ReadAll(f)
+	require.NoError(t, err)
+
+	filesMap := make(map[string][]byte, 0)
+	directoriesMap := make(map[string]*storev1pb.Directory)
+
+	r := reader.New(bytes.NewBuffer(narContents))
+	pathInfo, err := r.Import(
+		context.Background(),
+		func(fileReader io.Reader) error {
+			fileContents, err := io.ReadAll(fileReader)
+			require.NoError(t, err)
+
+			b3Writer := blake3.New(32, nil)
+			_, err = io.Copy(b3Writer, bytes.NewReader(fileContents))
+			require.NoError(t, err)
+
+			// put it in filesMap
+			filesMap[hex.EncodeToString(b3Writer.Sum(nil))] = fileContents
+
+			return nil
+		},
+		func(directory *storev1pb.Directory) error {
+			dgst := mustDigest(directory)
+
+			directoriesMap[hex.EncodeToString(dgst)] = directory
+			return nil
+		},
+	)
+
+	require.NoError(t, err)
+
+	// done populating everything, now actually test the export :-)
+
+	var buf bytes.Buffer
+	err = writer.Export(
+		&buf,
+		pathInfo,
+		func(directoryRef []byte) (*storev1pb.Directory, error) {
+			d, found := directoriesMap[hex.EncodeToString(directoryRef)]
+			if !found {
+				panic("directories not found")
+			}
+			return d, nil
+		},
+		func(fileRef []byte) (io.ReadCloser, error) {
+			fileContents, found := filesMap[hex.EncodeToString(fileRef)]
+			if !found {
+				panic("file not found")
+			}
+			return io.NopCloser(bytes.NewReader(fileContents)), nil
+		},
+	)
+
+	require.NoError(t, err, "exporter shouldn't fail")
+	require.Equal(t, narContents, buf.Bytes())
+}
diff --git a/tvix/nar-bridge/testdata/emptydirectory.nar b/tvix/nar-bridge/testdata/emptydirectory.nar
new file mode 100644
index 000000000000..baba55862255
--- /dev/null
+++ b/tvix/nar-bridge/testdata/emptydirectory.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar b/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
new file mode 100644
index 000000000000..6cb0b16e5d5d
--- /dev/null
+++ b/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/onebyteexecutable.nar b/tvix/nar-bridge/testdata/onebyteexecutable.nar
new file mode 100644
index 000000000000..68682196665c
--- /dev/null
+++ b/tvix/nar-bridge/testdata/onebyteexecutable.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/onebyteregular.nar b/tvix/nar-bridge/testdata/onebyteregular.nar
new file mode 100644
index 000000000000..b8c94932bf0c
--- /dev/null
+++ b/tvix/nar-bridge/testdata/onebyteregular.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/symlink.nar b/tvix/nar-bridge/testdata/symlink.nar
new file mode 100644
index 000000000000..7990e4ad5bc2
--- /dev/null
+++ b/tvix/nar-bridge/testdata/symlink.nar
Binary files differ