about summary refs log tree commit diff
path: root/tvix/castore/src/blobservice/object_store.rs
diff options
context:
space:
mode:
Diffstat (limited to 'tvix/castore/src/blobservice/object_store.rs')
-rw-r--r--tvix/castore/src/blobservice/object_store.rs13
1 files changed, 8 insertions, 5 deletions
diff --git a/tvix/castore/src/blobservice/object_store.rs b/tvix/castore/src/blobservice/object_store.rs
index b688ebafc7f4..10874af64011 100644
--- a/tvix/castore/src/blobservice/object_store.rs
+++ b/tvix/castore/src/blobservice/object_store.rs
@@ -64,6 +64,7 @@ use super::{BlobReader, BlobService, BlobWriter, ChunkedReader};
 /// all keys stored so far, but no promises ;-)
 #[derive(Clone)]
 pub struct ObjectStoreBlobService {
+    instance_name: String,
     object_store: Arc<dyn ObjectStore>,
     base_path: Path,
 
@@ -92,7 +93,7 @@ fn derive_chunk_path(base_path: &Path, digest: &B3Digest) -> Path {
 
 #[async_trait]
 impl BlobService for ObjectStoreBlobService {
-    #[instrument(skip_all, ret(level = Level::TRACE), err, fields(blob.digest=%digest))]
+    #[instrument(skip_all, ret(level = Level::TRACE), err, fields(blob.digest=%digest, instance_name=%self.instance_name))]
     async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
         // TODO: clarify if this should work for chunks or not, and explicitly
         // document in the proto docs.
@@ -112,7 +113,7 @@ impl BlobService for ObjectStoreBlobService {
         }
     }
 
-    #[instrument(skip_all, err, fields(blob.digest=%digest))]
+    #[instrument(skip_all, err, fields(blob.digest=%digest, instance_name=%self.instance_name))]
     async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
         // handle reading the empty blob.
         if digest.as_slice() == blake3::hash(b"").as_bytes() {
@@ -169,7 +170,7 @@ impl BlobService for ObjectStoreBlobService {
         }
     }
 
-    #[instrument(skip_all)]
+    #[instrument(skip_all, fields(instance_name=%self.instance_name))]
     async fn open_write(&self) -> Box<dyn BlobWriter> {
         // ObjectStoreBlobWriter implements AsyncWrite, but all the chunking
         // needs an AsyncRead, so we create a pipe here.
@@ -192,7 +193,7 @@ impl BlobService for ObjectStoreBlobService {
         })
     }
 
-    #[instrument(skip_all, err, fields(blob.digest=%digest))]
+    #[instrument(skip_all, err, fields(blob.digest=%digest, instance_name=%self.instance_name))]
     async fn chunks(&self, digest: &B3Digest) -> io::Result<Option<Vec<ChunkMeta>>> {
         match self
             .object_store
@@ -294,7 +295,7 @@ impl ServiceBuilder for ObjectStoreBlobServiceConfig {
     type Output = dyn BlobService;
     async fn build<'a>(
         &'a self,
-        _instance_name: &str,
+        instance_name: &str,
         _context: &CompositionContext,
     ) -> Result<Arc<dyn BlobService>, Box<dyn std::error::Error + Send + Sync + 'static>> {
         let (object_store, path) = object_store::parse_url_opts(
@@ -302,6 +303,7 @@ impl ServiceBuilder for ObjectStoreBlobServiceConfig {
             &self.object_store_options,
         )?;
         Ok(Arc::new(ObjectStoreBlobService {
+            instance_name: instance_name.to_string(),
             object_store: Arc::new(object_store),
             base_path: path,
             avg_chunk_size: self.avg_chunk_size,
@@ -582,6 +584,7 @@ mod test {
             object_store::parse_url(&Url::parse("memory:///").unwrap()).unwrap();
         let object_store: Arc<dyn object_store::ObjectStore> = Arc::from(object_store);
         let blobsvc = Arc::new(ObjectStoreBlobService {
+            instance_name: "test".into(),
             object_store: object_store.clone(),
             avg_chunk_size: default_avg_chunk_size(),
             base_path,