diff options
author | Yureka <tvl@yuka.dev> | 2024-07-19T10·29+0200 |
---|---|---|
committer | clbot <clbot@tvl.fyi> | 2024-07-19T12·57+0000 |
commit | 7ccdf6dad561a5be37bab3d199af87f0d71bafca (patch) | |
tree | d94eed1c96135de887fffebdd3d1c8e1c35ac487 | |
parent | 3c160731ed82484d2aaec29ba2364153a7cbe9c8 (diff) |
test(tvix/castore/ObjectStoreBlobService): test correct chunking r/8371
Change-Id: I4a8f5cda2705560a6d52a2eb2be0dde35b784c8f Reviewed-on: https://cl.tvl.fyi/c/depot/+/11982 Tested-by: BuildkiteCI Autosubmit: yuka <yuka@yuka.dev> Reviewed-by: flokli <flokli@flokli.de>
-rw-r--r-- | tvix/castore/src/blobservice/object_store.rs | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/tvix/castore/src/blobservice/object_store.rs b/tvix/castore/src/blobservice/object_store.rs index 846f3643761d..d898ce19e56c 100644 --- a/tvix/castore/src/blobservice/object_store.rs +++ b/tvix/castore/src/blobservice/object_store.rs @@ -564,14 +564,20 @@ mod test { use super::{chunk_and_upload, default_avg_chunk_size}; use crate::{ blobservice::{BlobService, ObjectStoreBlobService}, - fixtures::{BLOB_A, BLOB_A_DIGEST}, + fixtures::{BLOB_A, BLOB_A_DIGEST, BLOB_B, BLOB_B_DIGEST}, }; use std::{io::Cursor, sync::Arc}; use url::Url; /// Tests chunk_and_upload directly, bypassing the BlobWriter at open_write(). + #[rstest::rstest] + #[case::a(&BLOB_A, &BLOB_A_DIGEST)] + #[case::b(&BLOB_B, &BLOB_B_DIGEST)] #[tokio::test] - async fn test_chunk_and_upload() { + async fn test_chunk_and_upload( + #[case] blob: &bytes::Bytes, + #[case] blob_digest: &crate::B3Digest, + ) { let (object_store, base_path) = object_store::parse_url(&Url::parse("memory:///").unwrap()).unwrap(); let object_store: Arc<dyn object_store::ObjectStore> = Arc::from(object_store); @@ -581,8 +587,8 @@ mod test { base_path, }); - let blob_digest = chunk_and_upload( - &mut Cursor::new(BLOB_A.to_vec()), + let inserted_blob_digest = chunk_and_upload( + &mut Cursor::new(blob.to_vec()), object_store, object_store::path::Path::from("/"), 1024 / 2, @@ -592,9 +598,20 @@ mod test { .await .expect("chunk_and_upload succeeds"); - assert_eq!(BLOB_A_DIGEST.clone(), blob_digest); + assert_eq!(blob_digest.clone(), inserted_blob_digest); // Now we should have the blob - assert!(blobsvc.has(&BLOB_A_DIGEST).await.unwrap()); + assert!(blobsvc.has(blob_digest).await.unwrap()); + + // Check if it was chunked correctly + let chunks = blobsvc.chunks(blob_digest).await.unwrap().unwrap(); + if blob.len() < 1024 / 2 { + // The blob is smaller than the min chunk size, it should have been inserted as a whole + assert!(chunks.is_empty()); + } else if blob.len() > 1024 * 2 { + // The blob is larger than the max chunk size, make sure it was split up into at least + // two chunks + assert!(chunks.len() >= 2); + } } } |