diff options
-rw-r--r-- | tvix/castore/src/blobservice/sled.rs | 8 | ||||
-rw-r--r-- | tvix/castore/src/digests.rs | 5 | ||||
-rw-r--r-- | tvix/castore/src/directoryservice/sled.rs | 4 | ||||
-rw-r--r-- | tvix/castore/src/proto/grpc_blobservice_wrapper.rs | 3 | ||||
-rw-r--r-- | tvix/castore/src/proto/tests/grpc_blobservice.rs | 2 | ||||
-rw-r--r-- | tvix/castore/src/proto/tests/grpc_directoryservice.rs | 6 | ||||
-rw-r--r-- | tvix/store/src/fs/tests.rs | 8 |
7 files changed, 17 insertions, 19 deletions
diff --git a/tvix/castore/src/blobservice/sled.rs b/tvix/castore/src/blobservice/sled.rs index 209f0b76fc7a..101ae78698fd 100644 --- a/tvix/castore/src/blobservice/sled.rs +++ b/tvix/castore/src/blobservice/sled.rs @@ -61,7 +61,7 @@ impl BlobService for SledBlobService { #[instrument(skip(self), fields(blob.digest=%digest))] async fn has(&self, digest: &B3Digest) -> Result<bool, Error> { - match self.db.contains_key(digest.to_vec()) { + match self.db.contains_key(digest.as_slice()) { Ok(has) => Ok(has), Err(e) => Err(Error::StorageError(e.to_string())), } @@ -69,7 +69,7 @@ impl BlobService for SledBlobService { #[instrument(skip(self), fields(blob.digest=%digest))] async fn open_read(&self, digest: &B3Digest) -> Result<Option<Box<dyn BlobReader>>, Error> { - match self.db.get(digest.to_vec()) { + match self.db.get(digest.as_slice()) { Ok(None) => Ok(None), Ok(Some(data)) => Ok(Some(Box::new(Cursor::new(data[..].to_vec())))), Err(e) => Err(Error::StorageError(e.to_string())), @@ -158,12 +158,12 @@ impl BlobWriter for SledBlobWriter { let digest: B3Digest = hasher.finalize().as_bytes().into(); // Only insert if the blob doesn't already exist. - if !self.db.contains_key(digest.to_vec()).map_err(|e| { + if !self.db.contains_key(digest.as_slice()).map_err(|e| { Error::StorageError(format!("Unable to check if we have blob {}: {}", digest, e)) })? { // put buf in there. This will move buf out. self.db - .insert(digest.to_vec(), buf) + .insert(digest.as_slice(), buf) .map_err(|e| Error::StorageError(format!("unable to insert blob: {}", e)))?; } diff --git a/tvix/castore/src/digests.rs b/tvix/castore/src/digests.rs index a9e810aac5dd..137ed2669a8f 100644 --- a/tvix/castore/src/digests.rs +++ b/tvix/castore/src/digests.rs @@ -15,9 +15,8 @@ pub enum Error { pub const B3_LEN: usize = 32; impl B3Digest { - // returns a copy of the inner [Vec<u8>]. - pub fn to_vec(&self) -> Vec<u8> { - self.0.to_vec() + pub fn as_slice(&self) -> &[u8] { + &self.0[..] } } diff --git a/tvix/castore/src/directoryservice/sled.rs b/tvix/castore/src/directoryservice/sled.rs index 0dc5496803cb..041908f9f956 100644 --- a/tvix/castore/src/directoryservice/sled.rs +++ b/tvix/castore/src/directoryservice/sled.rs @@ -64,7 +64,7 @@ impl DirectoryService for SledDirectoryService { #[instrument(skip(self, digest), fields(directory.digest = %digest))] async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> { - match self.db.get(digest.to_vec()) { + match self.db.get(digest.as_slice()) { // The directory was not found, return Ok(None) => Ok(None), @@ -114,7 +114,7 @@ impl DirectoryService for SledDirectoryService { ))); } // store it - let result = self.db.insert(digest.to_vec(), directory.encode_to_vec()); + let result = self.db.insert(digest.as_slice(), directory.encode_to_vec()); if let Err(e) = result { return Err(Error::StorageError(e.to_string())); } diff --git a/tvix/castore/src/proto/grpc_blobservice_wrapper.rs b/tvix/castore/src/proto/grpc_blobservice_wrapper.rs index e7092bec1481..a37cc299b8a3 100644 --- a/tvix/castore/src/proto/grpc_blobservice_wrapper.rs +++ b/tvix/castore/src/proto/grpc_blobservice_wrapper.rs @@ -167,8 +167,7 @@ impl super::blob_service_server::BlobService for GRPCBlobServiceWrapper { warn!("error closing stream: {}", e); Status::internal("error closing stream") }) - .await? - .to_vec(); + .await?; Ok(Response::new(super::PutBlobResponse { digest: digest.into(), diff --git a/tvix/castore/src/proto/tests/grpc_blobservice.rs b/tvix/castore/src/proto/tests/grpc_blobservice.rs index a7816bd1e961..fb202b7d8a51 100644 --- a/tvix/castore/src/proto/tests/grpc_blobservice.rs +++ b/tvix/castore/src/proto/tests/grpc_blobservice.rs @@ -54,7 +54,7 @@ async fn put_read_stat() { .expect("must succeed") .into_inner(); - assert_eq!(BLOB_A_DIGEST.to_vec(), put_resp.digest); + assert_eq!(BLOB_A_DIGEST.as_slice(), put_resp.digest); // Stat for the digest of A. // We currently don't ask for more granular chunking data, as we don't diff --git a/tvix/castore/src/proto/tests/grpc_directoryservice.rs b/tvix/castore/src/proto/tests/grpc_directoryservice.rs index e443b4b1916a..1b522472be5b 100644 --- a/tvix/castore/src/proto/tests/grpc_directoryservice.rs +++ b/tvix/castore/src/proto/tests/grpc_directoryservice.rs @@ -74,7 +74,7 @@ async fn put_get() { }; // the sent root_digest should match the calculated digest - assert_eq!(put_resp.root_digest, DIRECTORY_A.digest().to_vec()); + assert_eq!(put_resp.root_digest, DIRECTORY_A.digest().as_slice()); // get it back let items = get_directories( @@ -117,7 +117,7 @@ async fn put_get_multiple() { .into_inner() }; - assert_eq!(DIRECTORY_B.digest().to_vec(), put_resp.root_digest); + assert_eq!(DIRECTORY_B.digest().as_slice(), put_resp.root_digest); // now, request b, first in non-recursive mode. let items = get_directories( @@ -167,7 +167,7 @@ async fn put_get_dedup() { }; assert_eq!( - DIRECTORY_C.digest().to_vec(), + DIRECTORY_C.digest().as_slice(), put_resp.into_inner().root_digest ); diff --git a/tvix/store/src/fs/tests.rs b/tvix/store/src/fs/tests.rs index 25a3df1df145..7b69d2fc0c14 100644 --- a/tvix/store/src/fs/tests.rs +++ b/tvix/store/src/fs/tests.rs @@ -199,8 +199,8 @@ async fn populate_directory_with_keep( // upload empty blob let mut bw = blob_service.open_write().await; assert_eq!( - fixtures::EMPTY_BLOB_DIGEST.to_vec(), - bw.close().await.expect("must succeed closing").to_vec(), + fixtures::EMPTY_BLOB_DIGEST.as_slice(), + bw.close().await.expect("must succeed closing").as_slice(), ); // upload directory @@ -282,8 +282,8 @@ async fn populate_directory_complicated( // upload empty blob let mut bw = blob_service.open_write().await; assert_eq!( - fixtures::EMPTY_BLOB_DIGEST.to_vec(), - bw.close().await.expect("must succeed closing").to_vec(), + fixtures::EMPTY_BLOB_DIGEST.as_slice(), + bw.close().await.expect("must succeed closing").as_slice(), ); // upload inner directory |