diff options
author | Florian Klink <flokli@flokli.de> | 2022-11-19T20·34+0000 |
---|---|---|
committer | flokli <flokli@flokli.de> | 2023-09-17T13·24+0000 |
commit | 0ecd10bf307049b9833e69f331ec049ae8840d85 (patch) | |
tree | 1718b6e0cd7cb3177b951c88dff1dba11faecabf /tvix/nar-bridge/pkg/writer | |
parent | 683d3e0d2d1de30eb7895861627203e62702a770 (diff) |
feat(tvix/nar-bridge): init r/6600
This provides a Nix HTTP Binary Cache interface in front of a tvix-store that's reachable via gRPC. TODOs: - remove import command, move serve up to toplevel. We have nix-copy- closure and tvix-store commands. - loop into CI. We should be able to fetch the protos as a third-party dependency. - Check if we can test nar-bridge slightly easier in an integration test. - Ensure we support connecting to unix sockets and grpc+http at least, using the same syntax as tvix-store. - Don't buffer the entire blob when rendering NAR Co-Authored-By: Connor Brewster <cbrewster@hey.com> Co-Authored-By: Márton Boros <martonboros@gmail.com> Co-Authored-By: Vo Minh Thu <noteed@gmail.com> Change-Id: I6064474e49dfe78cea67676957462d9f28658d4a Reviewed-on: https://cl.tvl.fyi/c/depot/+/9339 Tested-by: BuildkiteCI Reviewed-by: tazjin <tazjin@tvl.su>
Diffstat (limited to 'tvix/nar-bridge/pkg/writer')
-rw-r--r-- | tvix/nar-bridge/pkg/writer/writer.go | 278 | ||||
-rw-r--r-- | tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go | 51 | ||||
-rw-r--r-- | tvix/nar-bridge/pkg/writer/writer_test.go | 211 |
3 files changed, 540 insertions, 0 deletions
diff --git a/tvix/nar-bridge/pkg/writer/writer.go b/tvix/nar-bridge/pkg/writer/writer.go new file mode 100644 index 000000000000..25510f933e3a --- /dev/null +++ b/tvix/nar-bridge/pkg/writer/writer.go @@ -0,0 +1,278 @@ +package writer + +import ( + "fmt" + "io" + "path" + + storev1pb "code.tvl.fyi/tvix/store/protos" + "github.com/nix-community/go-nix/pkg/nar" +) + +type DirectoryLookupFn func([]byte) (*storev1pb.Directory, error) +type BlobLookupFn func([]byte) (io.ReadCloser, error) + +// Export will traverse a given pathInfo structure, and write the contents +// in NAR format to the passed Writer. +// It uses directoryLookupFn and blobLookupFn to resolve references. +func Export( + w io.Writer, + pathInfo *storev1pb.PathInfo, + directoryLookupFn DirectoryLookupFn, + blobLookupFn BlobLookupFn, +) error { + // initialize a NAR writer + narWriter, err := nar.NewWriter(w) + if err != nil { + return fmt.Errorf("unable to initialize nar writer: %w", err) + } + defer narWriter.Close() + + // populate rootHeader + rootHeader := &nar.Header{ + Path: "/", + } + + // populate a stack + // we will push paths and directories to it when entering a directory, + // and emit individual elements to the NAR writer, draining the Directory object. + // once it's empty, we can pop it off the stack. + var stackPaths = []string{} + var stackDirectories = []*storev1pb.Directory{} + + // peek at the pathInfo root and assemble the root node and write to writer + // in the case of a regular file, we retrieve and write the contents, close and exit + // in the case of a symlink, we write the symlink, close and exit + switch v := (pathInfo.GetNode().GetNode()).(type) { + case *storev1pb.Node_File: + rootHeader.Type = nar.TypeRegular + rootHeader.Size = int64(v.File.GetSize()) + rootHeader.Executable = v.File.GetExecutable() + err := narWriter.WriteHeader(rootHeader) + if err != nil { + return fmt.Errorf("unable to write root header: %w", err) + } + + // if it's a regular file, retrieve and write the contents + contentReader, err := blobLookupFn(v.File.GetDigest()) + if err != nil { + return fmt.Errorf("unable to lookup blob: %w", err) + } + defer contentReader.Close() + + _, err = io.Copy(narWriter, contentReader) + if err != nil { + return fmt.Errorf("unable to copy contents from contentReader: %w", err) + } + + err = contentReader.Close() + if err != nil { + return fmt.Errorf("unable to close content reader: %w", err) + } + + err = narWriter.Close() + if err != nil { + return fmt.Errorf("unable to close nar reader: %w", err) + } + + return nil + + case *storev1pb.Node_Symlink: + rootHeader.Type = nar.TypeSymlink + rootHeader.LinkTarget = string(v.Symlink.GetTarget()) + err := narWriter.WriteHeader(rootHeader) + if err != nil { + return fmt.Errorf("unable to write root header: %w", err) + } + + err = narWriter.Close() + if err != nil { + return fmt.Errorf("unable to close nar reader: %w", err) + } + + return nil + case *storev1pb.Node_Directory: + // We have a directory at the root, look it up and put in on the stack. + directory, err := directoryLookupFn(v.Directory.Digest) + if err != nil { + return fmt.Errorf("unable to lookup directory: %w", err) + } + stackDirectories = append(stackDirectories, directory) + stackPaths = append(stackPaths, "/") + + err = narWriter.WriteHeader(&nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }) + + if err != nil { + return fmt.Errorf("error writing header: %w", err) + } + } + + // as long as the stack is not empty, we keep running. + for { + if len(stackDirectories) == 0 { + return nil + } + + // Peek at the current top of the stack. + topOfStack := stackDirectories[len(stackDirectories)-1] + topOfStackPath := stackPaths[len(stackPaths)-1] + + // get the next element that's lexicographically smallest, and drain it from + // the current directory on top of the stack. + nextNode := drainNextNode(topOfStack) + + // If nextNode returns nil, there's nothing left in the directory node, so we + // can emit it from the stack. + // Contrary to the import case, we don't emit the node popping from the stack, but when pushing. + if nextNode == nil { + // pop off stack + stackDirectories = stackDirectories[:len(stackDirectories)-1] + stackPaths = stackPaths[:len(stackPaths)-1] + + continue + } + + switch n := (nextNode).(type) { + case *storev1pb.DirectoryNode: + err := narWriter.WriteHeader(&nar.Header{ + Path: path.Join(topOfStackPath, string(n.GetName())), + Type: nar.TypeDirectory, + }) + if err != nil { + return fmt.Errorf("unable to write nar header: %w", err) + } + + d, err := directoryLookupFn(n.GetDigest()) + if err != nil { + return fmt.Errorf("unable to lookup directory: %w", err) + } + + // add to stack + stackDirectories = append(stackDirectories, d) + stackPaths = append(stackPaths, path.Join(topOfStackPath, string(n.GetName()))) + case *storev1pb.FileNode: + err := narWriter.WriteHeader(&nar.Header{ + Path: path.Join(topOfStackPath, string(n.GetName())), + Type: nar.TypeRegular, + Size: int64(n.GetSize()), + Executable: n.GetExecutable(), + }) + if err != nil { + return fmt.Errorf("unable to write nar header: %w", err) + } + + // copy file contents + contentReader, err := blobLookupFn(n.GetDigest()) + if err != nil { + return fmt.Errorf("unable to get blob: %w", err) + } + defer contentReader.Close() + + _, err = io.Copy(narWriter, contentReader) + if err != nil { + return fmt.Errorf("unable to copy contents from contentReader: %w", err) + } + + err = contentReader.Close() + if err != nil { + return fmt.Errorf("unable to close content reader: %w", err) + } + case *storev1pb.SymlinkNode: + err := narWriter.WriteHeader(&nar.Header{ + Path: path.Join(topOfStackPath, string(n.GetName())), + Type: nar.TypeSymlink, + LinkTarget: string(n.GetTarget()), + }) + if err != nil { + return fmt.Errorf("unable to write nar header: %w", err) + } + } + } +} + +// TODO: add validation functions to Directory in both rust and golang, to +// validate the keys in directories, files and symlinks are sorted. + +// drainNextNode will drain a directory message with one of its child nodes, +// whichever comes first alphabetically. +func drainNextNode(d *storev1pb.Directory) interface{} { + switch v := (smallestNode(d)).(type) { + case *storev1pb.DirectoryNode: + d.Directories = d.Directories[1:] + return v + case *storev1pb.FileNode: + d.Files = d.Files[1:] + return v + case *storev1pb.SymlinkNode: + d.Symlinks = d.Symlinks[1:] + return v + case nil: + return nil + default: + panic("invalid type encountered") + } +} + +// smallestNode will return the node from a directory message, +// whichever comes first alphabetically. +func smallestNode(d *storev1pb.Directory) interface{} { + childDirectories := d.GetDirectories() + childFiles := d.GetFiles() + childSymlinks := d.GetSymlinks() + + if len(childDirectories) > 0 { + if len(childFiles) > 0 { + if len(childSymlinks) > 0 { + // directories,files,symlinks + return smallerNode(smallerNode(childDirectories[0], childFiles[0]), childSymlinks[0]) + } else { + // directories,files,!symlinks + return smallerNode(childDirectories[0], childFiles[0]) + } + } else { + // directories,!files + if len(childSymlinks) > 0 { + // directories,!files,symlinks + return smallerNode(childDirectories[0], childSymlinks[0]) + } else { + // directories,!files,!symlinks + return childDirectories[0] + } + } + } else { + // !directories + if len(childFiles) > 0 { + // !directories,files + if len(childSymlinks) > 0 { + // !directories,files,symlinks + return smallerNode(childFiles[0], childSymlinks[0]) + } else { + // !directories,files,!symlinks + return childFiles[0] + } + } else { + //!directories,!files + if len(childSymlinks) > 0 { + //!directories,!files,symlinks + return childSymlinks[0] + } else { + //!directories,!files,!symlinks + return nil + } + } + } +} + +// smallerNode compares two nodes by their name, +// and returns the one with the smaller name. +// both nodes may not be nil, we do check for these cases in smallestNode. +func smallerNode(a interface{ GetName() []byte }, b interface{ GetName() []byte }) interface{ GetName() []byte } { + if string(a.GetName()) < string(b.GetName()) { + return a + } else { + return b + } +} diff --git a/tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go b/tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go new file mode 100644 index 000000000000..3601e9fb5296 --- /dev/null +++ b/tvix/nar-bridge/pkg/writer/writer_pick_next_node_test.go @@ -0,0 +1,51 @@ +package writer + +import ( + "testing" + + storev1pb "code.tvl.fyi/tvix/store/protos" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" +) + +func requireProtoEq(t *testing.T, expected interface{}, actual interface{}) { + if diff := cmp.Diff(expected, actual, protocmp.Transform()); diff != "" { + t.Errorf("unexpected difference:\n%v", diff) + } +} + +func TestPopNextNode(t *testing.T) { + t.Run("empty directory", func(t *testing.T) { + d := &storev1pb.Directory{ + Directories: []*storev1pb.DirectoryNode{}, + Files: []*storev1pb.FileNode{}, + Symlinks: []*storev1pb.SymlinkNode{}, + } + + n := drainNextNode(d) + require.Equal(t, nil, n) + }) + t.Run("only directories", func(t *testing.T) { + ds := &storev1pb.Directory{ + Directories: []*storev1pb.DirectoryNode{{ + Name: []byte("a"), + Digest: []byte{}, + Size: 0, + }, { + Name: []byte("b"), + Digest: []byte{}, + Size: 0, + }}, + Files: []*storev1pb.FileNode{}, + Symlinks: []*storev1pb.SymlinkNode{}, + } + + n := drainNextNode(ds) + requireProtoEq(t, &storev1pb.DirectoryNode{ + Name: []byte("a"), + Digest: []byte{}, + Size: 0, + }, n) + }) +} diff --git a/tvix/nar-bridge/pkg/writer/writer_test.go b/tvix/nar-bridge/pkg/writer/writer_test.go new file mode 100644 index 000000000000..5914e1792bcb --- /dev/null +++ b/tvix/nar-bridge/pkg/writer/writer_test.go @@ -0,0 +1,211 @@ +package writer_test + +import ( + "bytes" + "context" + "encoding/hex" + "io" + "os" + "testing" + + "code.tvl.fyi/tvix/nar-bridge/pkg/reader" + "code.tvl.fyi/tvix/nar-bridge/pkg/writer" + storev1pb "code.tvl.fyi/tvix/store/protos" + "github.com/stretchr/testify/require" + "lukechampine.com/blake3" +) + +func mustDigest(d *storev1pb.Directory) []byte { + dgst, err := d.Digest() + if err != nil { + panic(err) + } + return dgst +} + +func TestSymlink(t *testing.T) { + pathInfo := &storev1pb.PathInfo{ + + Node: &storev1pb.Node{ + Node: &storev1pb.Node_Symlink{ + Symlink: &storev1pb.SymlinkNode{ + Name: []byte("doesntmatter"), + Target: []byte("/nix/store/somewhereelse"), + }, + }, + }, + } + + var buf bytes.Buffer + + err := writer.Export(&buf, pathInfo, func([]byte) (*storev1pb.Directory, error) { + panic("no directories expected") + }, func([]byte) (io.ReadCloser, error) { + panic("no files expected") + }) + require.NoError(t, err, "exporter shouldn't fail") + + f, err := os.Open("../../testdata/symlink.nar") + require.NoError(t, err) + + bytesExpected, err := io.ReadAll(f) + if err != nil { + panic(err) + } + + require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match") +} + +func TestRegular(t *testing.T) { + // The blake3 digest of the 0x01 byte. + BLAKE3_DIGEST_0X01 := []byte{ + 0x48, 0xfc, 0x72, 0x1f, 0xbb, 0xc1, 0x72, 0xe0, 0x92, 0x5f, 0xa2, 0x7a, 0xf1, 0x67, 0x1d, + 0xe2, 0x25, 0xba, 0x92, 0x71, 0x34, 0x80, 0x29, 0x98, 0xb1, 0x0a, 0x15, 0x68, 0xa1, 0x88, + 0x65, 0x2b, + } + + pathInfo := &storev1pb.PathInfo{ + Node: &storev1pb.Node{ + Node: &storev1pb.Node_File{ + File: &storev1pb.FileNode{ + Name: []byte("doesntmatter"), + Digest: BLAKE3_DIGEST_0X01, + Size: 1, + Executable: false, + }, + }, + }, + } + + var buf bytes.Buffer + + err := writer.Export(&buf, pathInfo, func([]byte) (*storev1pb.Directory, error) { + panic("no directories expected") + }, func(blobRef []byte) (io.ReadCloser, error) { + if !bytes.Equal(blobRef, BLAKE3_DIGEST_0X01) { + panic("unexpected blobref") + } + return io.NopCloser(bytes.NewBuffer([]byte{0x01})), nil + }) + require.NoError(t, err, "exporter shouldn't fail") + + f, err := os.Open("../../testdata/onebyteregular.nar") + require.NoError(t, err) + + bytesExpected, err := io.ReadAll(f) + if err != nil { + panic(err) + } + + require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match") +} + +func TestEmptyDirectory(t *testing.T) { + // construct empty directory node this refers to + emptyDirectory := &storev1pb.Directory{ + Directories: []*storev1pb.DirectoryNode{}, + Files: []*storev1pb.FileNode{}, + Symlinks: []*storev1pb.SymlinkNode{}, + } + emptyDirectoryDigest := mustDigest(emptyDirectory) + + pathInfo := &storev1pb.PathInfo{ + Node: &storev1pb.Node{ + Node: &storev1pb.Node_Directory{ + Directory: &storev1pb.DirectoryNode{ + Name: []byte("doesntmatter"), + Digest: emptyDirectoryDigest, + Size: 0, + }, + }, + }, + } + + var buf bytes.Buffer + + err := writer.Export(&buf, pathInfo, func(directoryRef []byte) (*storev1pb.Directory, error) { + if !bytes.Equal(directoryRef, emptyDirectoryDigest) { + panic("unexpected directoryRef") + } + return emptyDirectory, nil + }, func([]byte) (io.ReadCloser, error) { + panic("no files expected") + }) + require.NoError(t, err, "exporter shouldn't fail") + + f, err := os.Open("../../testdata/emptydirectory.nar") + require.NoError(t, err) + + bytesExpected, err := io.ReadAll(f) + if err != nil { + panic(err) + } + + require.Equal(t, bytesExpected, buf.Bytes(), "expected nar contents to match") +} + +func TestFull(t *testing.T) { + // We pipe nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar to the exporter, + // and store all the file contents and directory objects received in two hashmaps. + // We then feed it to the writer, and test we come up with the same NAR file. + + f, err := os.Open("../../testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar") + require.NoError(t, err) + + narContents, err := io.ReadAll(f) + require.NoError(t, err) + + filesMap := make(map[string][]byte, 0) + directoriesMap := make(map[string]*storev1pb.Directory) + + r := reader.New(bytes.NewBuffer(narContents)) + pathInfo, err := r.Import( + context.Background(), + func(fileReader io.Reader) error { + fileContents, err := io.ReadAll(fileReader) + require.NoError(t, err) + + b3Writer := blake3.New(32, nil) + _, err = io.Copy(b3Writer, bytes.NewReader(fileContents)) + require.NoError(t, err) + + // put it in filesMap + filesMap[hex.EncodeToString(b3Writer.Sum(nil))] = fileContents + + return nil + }, + func(directory *storev1pb.Directory) error { + dgst := mustDigest(directory) + + directoriesMap[hex.EncodeToString(dgst)] = directory + return nil + }, + ) + + require.NoError(t, err) + + // done populating everything, now actually test the export :-) + + var buf bytes.Buffer + err = writer.Export( + &buf, + pathInfo, + func(directoryRef []byte) (*storev1pb.Directory, error) { + d, found := directoriesMap[hex.EncodeToString(directoryRef)] + if !found { + panic("directories not found") + } + return d, nil + }, + func(fileRef []byte) (io.ReadCloser, error) { + fileContents, found := filesMap[hex.EncodeToString(fileRef)] + if !found { + panic("file not found") + } + return io.NopCloser(bytes.NewReader(fileContents)), nil + }, + ) + + require.NoError(t, err, "exporter shouldn't fail") + require.Equal(t, narContents, buf.Bytes()) +} |