about summary refs log tree commit diff
path: root/tvix/store/src
diff options
context:
space:
mode:
authorFlorian Klink <flokli@flokli.de>2024-05-04T19·23+0300
committerflokli <flokli@flokli.de>2024-05-04T21·27+0000
commitba00f0c6955fcd505cfa7ef06dc35b53ac14868a (patch)
treec695c21ee57bc51926fc24daf1ddba5dc8ca0796 /tvix/store/src
parentf2f12d15568b068b7b38473c03b74275a7f43cee (diff)
refactor(tvix/*store): use DS: DirectoryService r/8073
We implement DirectoryService for Arc<DirectoryService> and
Box<DirectoryService>, this is sufficient.

Change-Id: I0a5a81cbc4782764406b5bca57f908ace6090737
Reviewed-on: https://cl.tvl.fyi/c/depot/+/11586
Tested-by: BuildkiteCI
Reviewed-by: Connor Brewster <cbrewster@hey.com>
Diffstat (limited to 'tvix/store/src')
-rw-r--r--tvix/store/src/import.rs2
-rw-r--r--tvix/store/src/nar/import.rs36
2 files changed, 16 insertions, 22 deletions
diff --git a/tvix/store/src/import.rs b/tvix/store/src/import.rs
index 88cebc8be613..2331fd77ea5e 100644
--- a/tvix/store/src/import.rs
+++ b/tvix/store/src/import.rs
@@ -114,7 +114,7 @@ pub async fn import_path_as_nar_ca<BS, DS, PS, P>(
 where
     P: AsRef<Path> + std::fmt::Debug,
     BS: BlobService + Clone,
-    DS: AsRef<dyn DirectoryService>,
+    DS: DirectoryService,
     PS: AsRef<dyn PathInfoService>,
 {
     let root_node = ingest_path(blob_service, directory_service, path.as_ref())
diff --git a/tvix/store/src/nar/import.rs b/tvix/store/src/nar/import.rs
index 6f4dcdea5d96..70f8137e8951 100644
--- a/tvix/store/src/nar/import.rs
+++ b/tvix/store/src/nar/import.rs
@@ -24,19 +24,19 @@ pub fn read_nar<R, BS, DS>(
 ) -> io::Result<castorepb::node::Node>
 where
     R: BufRead + Send,
-    BS: AsRef<dyn BlobService>,
-    DS: AsRef<dyn DirectoryService>,
+    BS: BlobService + Clone,
+    DS: DirectoryService,
 {
     let handle = tokio::runtime::Handle::current();
 
-    let directory_putter = directory_service.as_ref().put_multiple_start();
+    let directory_putter = directory_service.put_multiple_start();
 
     let node = nix_compat::nar::reader::open(r)?;
-    let (root_node, mut directory_putter, _) = process_node(
+    let (root_node, mut directory_putter) = process_node(
         handle.clone(),
         "".into(), // this is the root node, it has an empty name
         node,
-        &blob_service,
+        blob_service,
         directory_putter,
     )?;
 
@@ -80,9 +80,9 @@ fn process_node<BS>(
     node: nar::reader::Node,
     blob_service: BS,
     directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::node::Node, Box<dyn DirectoryPutter>, BS)>
+) -> io::Result<(castorepb::node::Node, Box<dyn DirectoryPutter>)>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService + Clone,
 {
     Ok(match node {
         nar::reader::Node::Symlink { target } => (
@@ -91,7 +91,6 @@ where
                 target: target.into(),
             }),
             directory_putter,
-            blob_service,
         ),
         nar::reader::Node::File { executable, reader } => (
             castorepb::node::Node::File(process_file_reader(
@@ -99,19 +98,17 @@ where
                 name,
                 reader,
                 executable,
-                &blob_service,
+                blob_service,
             )?),
             directory_putter,
-            blob_service,
         ),
         nar::reader::Node::Directory(dir_reader) => {
-            let (directory_node, directory_putter, blob_service_back) =
+            let (directory_node, directory_putter) =
                 process_dir_reader(handle, name, dir_reader, blob_service, directory_putter)?;
 
             (
                 castorepb::node::Node::Directory(directory_node),
                 directory_putter,
-                blob_service_back,
             )
         }
     })
@@ -127,13 +124,13 @@ fn process_file_reader<BS>(
     blob_service: BS,
 ) -> io::Result<castorepb::FileNode>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService,
 {
     // store the length. If we read any other length, reading will fail.
     let expected_len = file_reader.len();
 
     // prepare writing a new blob.
-    let blob_writer = handle.block_on(async { blob_service.as_ref().open_write().await });
+    let blob_writer = handle.block_on(async { blob_service.open_write().await });
 
     // write the blob.
     let mut blob_writer = {
@@ -168,24 +165,22 @@ fn process_dir_reader<BS>(
     mut dir_reader: nar::reader::DirReader,
     blob_service: BS,
     directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::DirectoryNode, Box<dyn DirectoryPutter>, BS)>
+) -> io::Result<(castorepb::DirectoryNode, Box<dyn DirectoryPutter>)>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService + Clone,
 {
     let mut directory = castorepb::Directory::default();
 
     let mut directory_putter = directory_putter;
-    let mut blob_service = blob_service;
     while let Some(entry) = dir_reader.next()? {
-        let (node, directory_putter_back, blob_service_back) = process_node(
+        let (node, directory_putter_back) = process_node(
             handle.clone(),
             entry.name.into(),
             entry.node,
-            blob_service,
+            blob_service.clone(),
             directory_putter,
         )?;
 
-        blob_service = blob_service_back;
         directory_putter = directory_putter_back;
 
         match node {
@@ -213,7 +208,6 @@ where
             size: directory_size,
         },
         directory_putter,
-        blob_service,
     ))
 }