diff options
author | Florian Klink <flokli@flokli.de> | 2024-04-20T16·32+0300 |
---|---|---|
committer | clbot <clbot@tvl.fyi> | 2024-04-20T18·54+0000 |
commit | 5fc403587ffb38635e767697f1c18e7e3559aea6 (patch) | |
tree | 8779c1714af0145ca580de482a41e63821cd59f2 /tvix/castore | |
parent | 01239a4f6f871733231c01d6126c3ffedcc504b7 (diff) |
refactor(tvix/castore): ingest filesystem entries in parallel r/7987
Rather than carrying around an Future in the IngestionEntry::Regular, simply carry the plain B3Digest. Code reading through a non-seekable data stream has no choice but to read and upload blobs immediately, and code seeking through something seekable (like a filesystem) probably knows better what concurrency to pick when ingesting, rather than the consuming side. (Our only) one of these seekable source implementations is now doing exactly that. We produce a stream of futures, and then use [StreamExt::buffered] to process more than one, concurrently. We still keep the same order, to avoid shuffling things and violating the stream order. This also cleans up walk_path_for_ingestion in castore/import, as well as ingest_dir_entries in glue/tvix_store_io. Change-Id: I5eb70f3e1e372c74bcbfcf6b6e2653eba36e151d Reviewed-on: https://cl.tvl.fyi/c/depot/+/11491 Autosubmit: flokli <flokli@flokli.de> Reviewed-by: Connor Brewster <cbrewster@hey.com> Tested-by: BuildkiteCI
Diffstat (limited to 'tvix/castore')
-rw-r--r-- | tvix/castore/src/import/fs.rs | 79 | ||||
-rw-r--r-- | tvix/castore/src/import/mod.rs | 17 |
2 files changed, 41 insertions, 55 deletions
diff --git a/tvix/castore/src/import/fs.rs b/tvix/castore/src/import/fs.rs index a87a0278da4d..6709d4a127e9 100644 --- a/tvix/castore/src/import/fs.rs +++ b/tvix/castore/src/import/fs.rs @@ -1,8 +1,8 @@ +use futures::stream::BoxStream; +use futures::StreamExt; use std::os::unix::fs::MetadataExt; use std::os::unix::fs::PermissionsExt; use std::path::Path; - -use futures::stream::BoxStream; use tracing::instrument; use walkdir::DirEntry; use walkdir::WalkDir; @@ -22,6 +22,9 @@ use super::IngestionEntry; /// [DirectoryService]. It returns the root node or an error. /// /// It does not follow symlinks at the root, they will be ingested as actual symlinks. +/// +/// This function will walk the filesystem using `walkdir` and will consume +/// `O(#number of entries)` space. #[instrument(skip(blob_service, directory_service), fields(path), err)] pub async fn ingest_path<BS, DS, P>( blob_service: BS, @@ -33,61 +36,51 @@ where BS: BlobService + Clone, DS: AsRef<dyn DirectoryService>, { - let entry_stream = walk_path_for_ingestion(blob_service, path.as_ref()); - ingest_entries(directory_service, entry_stream).await -} - -/// Walk the filesystem at a given path and returns a stream of ingestion entries. -/// -/// This is how [`ingest_path`] assembles the set of entries to pass on [`ingest_entries`]. -/// This low-level function can be used if additional filtering or processing is required on the -/// entries. -/// -/// It does not follow symlinks at the root, they will be ingested as actual symlinks. -/// -/// This function will walk the filesystem using `walkdir` and will consume -/// `O(#number of entries)` space. -#[instrument(fields(path), skip(blob_service))] -fn walk_path_for_ingestion<'a, BS>( - blob_service: BS, - path: &'a Path, -) -> BoxStream<'a, Result<IngestionEntry<'a>, Error>> -where - BS: BlobService + Clone + 'a, -{ - let iter = WalkDir::new(path) + let iter = WalkDir::new(path.as_ref()) .follow_links(false) .follow_root_links(false) .contents_first(true) .into_iter(); - dir_entry_iter_to_ingestion_stream(blob_service, iter, path) + let entries = dir_entries_to_ingestion_stream(blob_service, iter, path.as_ref()); + ingest_entries(directory_service, entries).await } /// Converts an iterator of [walkdir::DirEntry]s into a stream of ingestion entries. /// This can then be fed into [ingest_entries] to ingest all the entries into the castore. /// +/// The produced stream is buffered, so uploads can happen concurrently. +/// /// The root is the [Path] in the filesystem that is being ingested into the castore. -pub fn dir_entry_iter_to_ingestion_stream<'a, BS, I>( +pub fn dir_entries_to_ingestion_stream<'a, BS, I>( blob_service: BS, iter: I, root: &'a Path, -) -> BoxStream<'a, Result<IngestionEntry<'a>, Error>> +) -> BoxStream<'a, Result<IngestionEntry, Error>> where BS: BlobService + Clone + 'a, I: Iterator<Item = Result<DirEntry, walkdir::Error>> + Send + 'a, { let prefix = root.parent().unwrap_or_else(|| Path::new("")); - let iter = iter.map(move |entry| match entry { - Ok(entry) => dir_entry_to_ingestion_entry(blob_service.clone(), &entry, prefix), - Err(error) => Err(Error::UnableToStat( - root.to_path_buf(), - error.into_io_error().expect("walkdir err must be some"), - )), - }); - - Box::pin(futures::stream::iter(iter)) + Box::pin( + futures::stream::iter(iter) + .map(move |x| { + let blob_service = blob_service.clone(); + async move { + match x { + Ok(dir_entry) => { + dir_entry_to_ingestion_entry(blob_service, &dir_entry, prefix).await + } + Err(e) => Err(Error::UnableToStat( + prefix.to_path_buf(), + e.into_io_error().expect("walkdir err must be some"), + )), + } + } + }) + .buffered(50), + ) } /// Converts a [walkdir::DirEntry] into an [IngestionEntry], uploading blobs to the @@ -95,13 +88,13 @@ where /// /// The prefix path is stripped from the path of each entry. This is usually the parent path /// of the path being ingested so that the last element of the stream only has one component. -fn dir_entry_to_ingestion_entry<'a, BS>( +pub async fn dir_entry_to_ingestion_entry<BS>( blob_service: BS, entry: &DirEntry, prefix: &Path, -) -> Result<IngestionEntry<'a>, Error> +) -> Result<IngestionEntry, Error> where - BS: BlobService + 'a, + BS: BlobService, { let file_type = entry.file_type(); @@ -123,11 +116,7 @@ where .metadata() .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e.into()))?; - // TODO: In the future, for small files, hash right away and upload async. - let digest = Box::pin(upload_blob_at_path( - blob_service, - entry.path().to_path_buf(), - )); + let digest = upload_blob_at_path(blob_service, entry.path().to_path_buf()).await?; Ok(IngestionEntry::Regular { path, diff --git a/tvix/castore/src/import/mod.rs b/tvix/castore/src/import/mod.rs index 7e60cf44d903..09d5b8d06ea1 100644 --- a/tvix/castore/src/import/mod.rs +++ b/tvix/castore/src/import/mod.rs @@ -13,10 +13,9 @@ use crate::proto::DirectoryNode; use crate::proto::FileNode; use crate::proto::SymlinkNode; use crate::B3Digest; -use futures::Future; use futures::{Stream, StreamExt}; use std::fs::FileType; -use std::pin::Pin; + use tracing::Level; #[cfg(target_family = "unix")] @@ -52,10 +51,10 @@ pub mod fs; /// /// On success, returns the root node. #[instrument(skip_all, ret(level = Level::TRACE), err)] -pub async fn ingest_entries<'a, DS, S>(directory_service: DS, mut entries: S) -> Result<Node, Error> +pub async fn ingest_entries<DS, S>(directory_service: DS, mut entries: S) -> Result<Node, Error> where DS: AsRef<dyn DirectoryService>, - S: Stream<Item = Result<IngestionEntry<'a>, Error>> + Send + std::marker::Unpin, + S: Stream<Item = Result<IngestionEntry, Error>> + Send + std::marker::Unpin, { // For a given path, this holds the [Directory] structs as they are populated. let mut directories: HashMap<PathBuf, Directory> = HashMap::default(); @@ -124,7 +123,7 @@ where .. } => Node::File(FileNode { name, - digest: digest.await?.into(), + digest: digest.to_owned().into(), size: *size, executable: *executable, }), @@ -200,14 +199,12 @@ where Ok(digest) } -type BlobFut<'a> = Pin<Box<dyn Future<Output = Result<B3Digest, Error>> + Send + 'a>>; - -pub enum IngestionEntry<'a> { +pub enum IngestionEntry { Regular { path: PathBuf, size: u64, executable: bool, - digest: BlobFut<'a>, + digest: B3Digest, }, Symlink { path: PathBuf, @@ -222,7 +219,7 @@ pub enum IngestionEntry<'a> { }, } -impl<'a> IngestionEntry<'a> { +impl IngestionEntry { fn path(&self) -> &Path { match self { IngestionEntry::Regular { path, .. } => path, |