From e9db0449e700154baee1470f914c3f09089442d0 Mon Sep 17 00:00:00 2001 From: Florian Klink Date: Sat, 20 Apr 2024 15:01:49 +0300 Subject: refactor(tvix/castore/import): make module, split off fs and error Move error types and filesystem-specific functions to a separate file, and keep the fs:: namespace in public exports. Change-Id: I5e9e83ad78d9aea38553fafc293d3e4f8c31a8c1 Reviewed-on: https://cl.tvl.fyi/c/depot/+/11486 Tested-by: BuildkiteCI Reviewed-by: Connor Brewster Autosubmit: flokli --- tvix/castore/src/import.rs | 370 --------------------------------------- tvix/castore/src/import/error.rs | 39 +++++ tvix/castore/src/import/fs.rs | 143 +++++++++++++++ tvix/castore/src/import/mod.rs | 216 +++++++++++++++++++++++ tvix/castore/src/tests/import.rs | 2 +- tvix/glue/src/tvix_store_io.rs | 2 +- tvix/store/src/bin/tvix-store.rs | 2 +- tvix/store/src/import.rs | 6 +- 8 files changed, 404 insertions(+), 376 deletions(-) delete mode 100644 tvix/castore/src/import.rs create mode 100644 tvix/castore/src/import/error.rs create mode 100644 tvix/castore/src/import/fs.rs create mode 100644 tvix/castore/src/import/mod.rs (limited to 'tvix') diff --git a/tvix/castore/src/import.rs b/tvix/castore/src/import.rs deleted file mode 100644 index a07cf71f6c41..000000000000 --- a/tvix/castore/src/import.rs +++ /dev/null @@ -1,370 +0,0 @@ -use crate::blobservice::BlobService; -use crate::directoryservice::DirectoryPutter; -use crate::directoryservice::DirectoryService; -use crate::proto::node::Node; -use crate::proto::Directory; -use crate::proto::DirectoryNode; -use crate::proto::FileNode; -use crate::proto::SymlinkNode; -use crate::B3Digest; -use crate::Error as CastoreError; -use futures::stream::BoxStream; -use futures::Future; -use futures::{Stream, StreamExt}; -use std::fs::FileType; -use std::os::unix::fs::MetadataExt; -use std::pin::Pin; -use tracing::Level; - -#[cfg(target_family = "unix")] -use std::os::unix::ffi::OsStrExt; - -use std::{ - collections::HashMap, - fmt::Debug, - os::unix::prelude::PermissionsExt, - path::{Path, PathBuf}, -}; -use tracing::instrument; -use walkdir::DirEntry; -use walkdir::WalkDir; - -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("failed to upload directory at {0}: {1}")] - UploadDirectoryError(PathBuf, CastoreError), - - #[error("invalid encoding encountered for entry {0:?}")] - InvalidEncoding(PathBuf), - - #[error("unable to stat {0}: {1}")] - UnableToStat(PathBuf, std::io::Error), - - #[error("unable to open {0}: {1}")] - UnableToOpen(PathBuf, std::io::Error), - - #[error("unable to read {0}: {1}")] - UnableToRead(PathBuf, std::io::Error), - - #[error("unsupported file {0} type: {1:?}")] - UnsupportedFileType(PathBuf, FileType), -} - -impl From for Error { - fn from(value: CastoreError) -> Self { - match value { - CastoreError::InvalidRequest(_) => panic!("tvix bug"), - CastoreError::StorageError(_) => panic!("error"), - } - } -} - -impl From for std::io::Error { - fn from(value: Error) -> Self { - std::io::Error::new(std::io::ErrorKind::Other, value) - } -} - -/// Walk the filesystem at a given path and returns a stream of ingestion entries. -/// -/// This is how [`ingest_path`] assembles the set of entries to pass on [`ingest_entries`]. -/// This low-level function can be used if additional filtering or processing is required on the -/// entries. -/// -/// It does not follow symlinks at the root, they will be ingested as actual symlinks. -/// -/// This function will walk the filesystem using `walkdir` and will consume -/// `O(#number of entries)` space. -#[instrument(fields(path), skip(blob_service))] -fn walk_path_for_ingestion<'a, BS>( - blob_service: BS, - path: &'a Path, -) -> BoxStream<'a, Result, Error>> -where - BS: BlobService + Clone + 'a, -{ - let iter = WalkDir::new(path) - .follow_links(false) - .follow_root_links(false) - .contents_first(true) - .into_iter(); - - dir_entry_iter_to_ingestion_stream(blob_service, iter, path) -} - -/// Converts an iterator of [walkdir::DirEntry]s into a stream of ingestion entries. -/// This can then be fed into [ingest_entries] to ingest all the entries into the castore. -/// -/// The root is the [Path] in the filesystem that is being ingested into the castore. -pub fn dir_entry_iter_to_ingestion_stream<'a, BS, I>( - blob_service: BS, - iter: I, - root: &'a Path, -) -> BoxStream<'a, Result, Error>> -where - BS: BlobService + Clone + 'a, - I: Iterator> + Send + 'a, -{ - let prefix = root.parent().unwrap_or_else(|| Path::new("")); - - let iter = iter.map(move |entry| match entry { - Ok(entry) => dir_entry_to_ingestion_entry(blob_service.clone(), &entry, prefix), - Err(error) => Err(Error::UnableToStat( - root.to_path_buf(), - error.into_io_error().expect("walkdir err must be some"), - )), - }); - - Box::pin(futures::stream::iter(iter)) -} - -/// Converts a [walkdir::DirEntry] into an [IngestionEntry], uploading blobs to the -/// provided [BlobService]. -/// -/// The prefix path is stripped from the path of each entry. This is usually the parent path -/// of the path being ingested so that the last element of the stream only has one component. -fn dir_entry_to_ingestion_entry<'a, BS>( - blob_service: BS, - entry: &DirEntry, - prefix: &Path, -) -> Result, Error> -where - BS: BlobService + 'a, -{ - let file_type = entry.file_type(); - - let path = entry - .path() - .strip_prefix(prefix) - .expect("Tvix bug: failed to strip root path prefix") - .to_path_buf(); - - if file_type.is_dir() { - Ok(IngestionEntry::Dir { path }) - } else if file_type.is_symlink() { - let target = std::fs::read_link(entry.path()) - .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e))?; - - Ok(IngestionEntry::Symlink { path, target }) - } else if file_type.is_file() { - let metadata = entry - .metadata() - .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e.into()))?; - - // TODO: In the future, for small files, hash right away and upload async. - let digest = Box::pin(upload_blob_at_path( - blob_service, - entry.path().to_path_buf(), - )); - - Ok(IngestionEntry::Regular { - path, - size: metadata.size(), - // If it's executable by the user, it'll become executable. - // This matches nix's dump() function behaviour. - executable: metadata.permissions().mode() & 64 != 0, - digest, - }) - } else { - Ok(IngestionEntry::Unknown { path, file_type }) - } -} - -/// Uploads the file at the provided [Path] the the [BlobService]. -#[instrument(skip(blob_service), fields(path), err)] -async fn upload_blob_at_path(blob_service: BS, path: PathBuf) -> Result -where - BS: BlobService, -{ - let mut file = match tokio::fs::File::open(&path).await { - Ok(file) => file, - Err(e) => return Err(Error::UnableToRead(path, e)), - }; - - let mut writer = blob_service.open_write().await; - - if let Err(e) = tokio::io::copy(&mut file, &mut writer).await { - return Err(Error::UnableToRead(path, e)); - }; - - let digest = writer - .close() - .await - .map_err(|e| Error::UnableToRead(path, e))?; - - Ok(digest) -} - -/// Ingests the contents at a given path into the tvix store, interacting with a [BlobService] and -/// [DirectoryService]. It returns the root node or an error. -/// -/// It does not follow symlinks at the root, they will be ingested as actual symlinks. -#[instrument(skip(blob_service, directory_service), fields(path), err)] -pub async fn ingest_path( - blob_service: BS, - directory_service: DS, - path: P, -) -> Result -where - P: AsRef + std::fmt::Debug, - BS: BlobService + Clone, - DS: AsRef, -{ - let entry_stream = walk_path_for_ingestion(blob_service, path.as_ref()); - ingest_entries(directory_service, entry_stream).await -} - -/// Ingests elements from the given stream of [IngestionEntry] into a the passed [DirectoryService]. -/// -/// The stream must have the following invariants: -/// - All children entries must come before their parents. -/// - The last entry must be the root node which must have a single path component. -/// - Every entry should have a unique path. -/// -/// Internally we maintain a [HashMap] of [PathBuf] to partially populated [Directory] at that -/// path. Once we receive an [IngestionEntry] for the directory itself, we remove it from the -/// map and upload it to the [DirectoryService] through a lazily created [DirectoryPutter]. -/// -/// On success, returns the root node. -#[instrument(skip_all, ret(level = Level::TRACE), err)] -pub async fn ingest_entries<'a, DS, S>(directory_service: DS, mut entries: S) -> Result -where - DS: AsRef, - S: Stream, Error>> + Send + std::marker::Unpin, -{ - // For a given path, this holds the [Directory] structs as they are populated. - let mut directories: HashMap = HashMap::default(); - let mut maybe_directory_putter: Option> = None; - - let root_node = loop { - let mut entry = entries - .next() - .await - // The last entry of the stream must have 1 path component, after which - // we break the loop manually. - .expect("Tvix bug: unexpected end of stream")?; - - let name = entry - .path() - .file_name() - // If this is the root node, it will have an empty name. - .unwrap_or_default() - .as_bytes() - .to_owned() - .into(); - - let node = match &mut entry { - IngestionEntry::Dir { .. } => { - // If the entry is a directory, we traversed all its children (and - // populated it in `directories`). - // If we don't have it in there, it's an empty directory. - let directory = directories - .remove(entry.path()) - // In that case, it contained no children - .unwrap_or_default(); - - let directory_size = directory.size(); - let directory_digest = directory.digest(); - - // Use the directory_putter to upload the directory. - // If we don't have one yet (as that's the first one to upload), - // initialize the putter. - maybe_directory_putter - .get_or_insert_with(|| directory_service.as_ref().put_multiple_start()) - .put(directory) - .await?; - - Node::Directory(DirectoryNode { - name, - digest: directory_digest.into(), - size: directory_size, - }) - } - IngestionEntry::Symlink { ref target, .. } => Node::Symlink(SymlinkNode { - name, - target: target.as_os_str().as_bytes().to_owned().into(), - }), - IngestionEntry::Regular { - size, - executable, - digest, - .. - } => Node::File(FileNode { - name, - digest: digest.await?.into(), - size: *size, - executable: *executable, - }), - IngestionEntry::Unknown { path, file_type } => { - return Err(Error::UnsupportedFileType(path.clone(), *file_type)); - } - }; - - if entry.path().components().count() == 1 { - break node; - } - - // record node in parent directory, creating a new [Directory] if not there yet. - directories - .entry(entry.path().parent().unwrap().to_path_buf()) - .or_default() - .add(node); - }; - - // if there were directories uploaded, make sure we flush the putter, so - // they're all persisted to the backend. - if let Some(mut directory_putter) = maybe_directory_putter { - let root_directory_digest = directory_putter.close().await?; - - #[cfg(debug_assertions)] - { - if let Node::Directory(directory_node) = &root_node { - debug_assert_eq!( - root_directory_digest, - directory_node - .digest - .to_vec() - .try_into() - .expect("invalid digest len") - ) - } else { - unreachable!("Tvix bug: directory putter initialized but no root directory node"); - } - } - }; - - Ok(root_node) -} - -type BlobFut<'a> = Pin> + Send + 'a>>; - -pub enum IngestionEntry<'a> { - Regular { - path: PathBuf, - size: u64, - executable: bool, - digest: BlobFut<'a>, - }, - Symlink { - path: PathBuf, - target: PathBuf, - }, - Dir { - path: PathBuf, - }, - Unknown { - path: PathBuf, - file_type: FileType, - }, -} - -impl<'a> IngestionEntry<'a> { - fn path(&self) -> &Path { - match self { - IngestionEntry::Regular { path, .. } => path, - IngestionEntry::Symlink { path, .. } => path, - IngestionEntry::Dir { path } => path, - IngestionEntry::Unknown { path, .. } => path, - } - } -} diff --git a/tvix/castore/src/import/error.rs b/tvix/castore/src/import/error.rs new file mode 100644 index 000000000000..15dd0664deaa --- /dev/null +++ b/tvix/castore/src/import/error.rs @@ -0,0 +1,39 @@ +use std::{fs::FileType, path::PathBuf}; + +use crate::Error as CastoreError; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("failed to upload directory at {0}: {1}")] + UploadDirectoryError(PathBuf, CastoreError), + + #[error("invalid encoding encountered for entry {0:?}")] + InvalidEncoding(PathBuf), + + #[error("unable to stat {0}: {1}")] + UnableToStat(PathBuf, std::io::Error), + + #[error("unable to open {0}: {1}")] + UnableToOpen(PathBuf, std::io::Error), + + #[error("unable to read {0}: {1}")] + UnableToRead(PathBuf, std::io::Error), + + #[error("unsupported file {0} type: {1:?}")] + UnsupportedFileType(PathBuf, FileType), +} + +impl From for Error { + fn from(value: CastoreError) -> Self { + match value { + CastoreError::InvalidRequest(_) => panic!("tvix bug"), + CastoreError::StorageError(_) => panic!("error"), + } + } +} + +impl From for std::io::Error { + fn from(value: Error) -> Self { + std::io::Error::new(std::io::ErrorKind::Other, value) + } +} diff --git a/tvix/castore/src/import/fs.rs b/tvix/castore/src/import/fs.rs new file mode 100644 index 000000000000..a87a0278da4d --- /dev/null +++ b/tvix/castore/src/import/fs.rs @@ -0,0 +1,143 @@ +use std::os::unix::fs::MetadataExt; +use std::os::unix::fs::PermissionsExt; +use std::path::Path; + +use futures::stream::BoxStream; +use tracing::instrument; +use walkdir::DirEntry; +use walkdir::WalkDir; + +use crate::blobservice::BlobService; +use crate::directoryservice::DirectoryService; +use crate::proto::node::Node; + +use super::ingest_entries; +use super::upload_blob_at_path; +use super::Error; +use super::IngestionEntry; + +///! Imports that deal with a real filesystem. + +/// Ingests the contents at a given path into the tvix store, interacting with a [BlobService] and +/// [DirectoryService]. It returns the root node or an error. +/// +/// It does not follow symlinks at the root, they will be ingested as actual symlinks. +#[instrument(skip(blob_service, directory_service), fields(path), err)] +pub async fn ingest_path( + blob_service: BS, + directory_service: DS, + path: P, +) -> Result +where + P: AsRef + std::fmt::Debug, + BS: BlobService + Clone, + DS: AsRef, +{ + let entry_stream = walk_path_for_ingestion(blob_service, path.as_ref()); + ingest_entries(directory_service, entry_stream).await +} + +/// Walk the filesystem at a given path and returns a stream of ingestion entries. +/// +/// This is how [`ingest_path`] assembles the set of entries to pass on [`ingest_entries`]. +/// This low-level function can be used if additional filtering or processing is required on the +/// entries. +/// +/// It does not follow symlinks at the root, they will be ingested as actual symlinks. +/// +/// This function will walk the filesystem using `walkdir` and will consume +/// `O(#number of entries)` space. +#[instrument(fields(path), skip(blob_service))] +fn walk_path_for_ingestion<'a, BS>( + blob_service: BS, + path: &'a Path, +) -> BoxStream<'a, Result, Error>> +where + BS: BlobService + Clone + 'a, +{ + let iter = WalkDir::new(path) + .follow_links(false) + .follow_root_links(false) + .contents_first(true) + .into_iter(); + + dir_entry_iter_to_ingestion_stream(blob_service, iter, path) +} + +/// Converts an iterator of [walkdir::DirEntry]s into a stream of ingestion entries. +/// This can then be fed into [ingest_entries] to ingest all the entries into the castore. +/// +/// The root is the [Path] in the filesystem that is being ingested into the castore. +pub fn dir_entry_iter_to_ingestion_stream<'a, BS, I>( + blob_service: BS, + iter: I, + root: &'a Path, +) -> BoxStream<'a, Result, Error>> +where + BS: BlobService + Clone + 'a, + I: Iterator> + Send + 'a, +{ + let prefix = root.parent().unwrap_or_else(|| Path::new("")); + + let iter = iter.map(move |entry| match entry { + Ok(entry) => dir_entry_to_ingestion_entry(blob_service.clone(), &entry, prefix), + Err(error) => Err(Error::UnableToStat( + root.to_path_buf(), + error.into_io_error().expect("walkdir err must be some"), + )), + }); + + Box::pin(futures::stream::iter(iter)) +} + +/// Converts a [walkdir::DirEntry] into an [IngestionEntry], uploading blobs to the +/// provided [BlobService]. +/// +/// The prefix path is stripped from the path of each entry. This is usually the parent path +/// of the path being ingested so that the last element of the stream only has one component. +fn dir_entry_to_ingestion_entry<'a, BS>( + blob_service: BS, + entry: &DirEntry, + prefix: &Path, +) -> Result, Error> +where + BS: BlobService + 'a, +{ + let file_type = entry.file_type(); + + let path = entry + .path() + .strip_prefix(prefix) + .expect("Tvix bug: failed to strip root path prefix") + .to_path_buf(); + + if file_type.is_dir() { + Ok(IngestionEntry::Dir { path }) + } else if file_type.is_symlink() { + let target = std::fs::read_link(entry.path()) + .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e))?; + + Ok(IngestionEntry::Symlink { path, target }) + } else if file_type.is_file() { + let metadata = entry + .metadata() + .map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e.into()))?; + + // TODO: In the future, for small files, hash right away and upload async. + let digest = Box::pin(upload_blob_at_path( + blob_service, + entry.path().to_path_buf(), + )); + + Ok(IngestionEntry::Regular { + path, + size: metadata.size(), + // If it's executable by the user, it'll become executable. + // This matches nix's dump() function behaviour. + executable: metadata.permissions().mode() & 64 != 0, + digest, + }) + } else { + Ok(IngestionEntry::Unknown { path, file_type }) + } +} diff --git a/tvix/castore/src/import/mod.rs b/tvix/castore/src/import/mod.rs new file mode 100644 index 000000000000..d2b1ee9ff70f --- /dev/null +++ b/tvix/castore/src/import/mod.rs @@ -0,0 +1,216 @@ +//! Deals with ingesting contents into castore. +//! The main library function here is [ingest_entries], receiving a stream of +//! [IngestionEntry]. +//! +//! Specific implementations, such as ingesting from the filesystem, live in +//! child modules. + +use crate::blobservice::BlobService; +use crate::directoryservice::DirectoryPutter; +use crate::directoryservice::DirectoryService; +use crate::proto::node::Node; +use crate::proto::Directory; +use crate::proto::DirectoryNode; +use crate::proto::FileNode; +use crate::proto::SymlinkNode; +use crate::B3Digest; +use futures::Future; +use futures::{Stream, StreamExt}; +use std::fs::FileType; +use std::pin::Pin; +use tracing::Level; + +#[cfg(target_family = "unix")] +use std::os::unix::ffi::OsStrExt; + +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; +use tracing::instrument; + +mod error; +pub use error::Error; + +pub mod fs; + +/// Ingests [IngestionEntry] from the given stream into a the passed [DirectoryService]. +/// On success, returns the root [Node]. +/// +/// The stream must have the following invariants: +/// - All children entries must come before their parents. +/// - The last entry must be the root node which must have a single path component. +/// - Every entry should have a unique path. +/// +/// Internally we maintain a [HashMap] of [PathBuf] to partially populated [Directory] at that +/// path. Once we receive an [IngestionEntry] for the directory itself, we remove it from the +/// map and upload it to the [DirectoryService] through a lazily created [DirectoryPutter]. +/// +/// On success, returns the root node. +#[instrument(skip_all, ret(level = Level::TRACE), err)] +pub async fn ingest_entries<'a, DS, S>(directory_service: DS, mut entries: S) -> Result +where + DS: AsRef, + S: Stream, Error>> + Send + std::marker::Unpin, +{ + // For a given path, this holds the [Directory] structs as they are populated. + let mut directories: HashMap = HashMap::default(); + let mut maybe_directory_putter: Option> = None; + + let root_node = loop { + let mut entry = entries + .next() + .await + // The last entry of the stream must have 1 path component, after which + // we break the loop manually. + .expect("Tvix bug: unexpected end of stream")?; + + let name = entry + .path() + .file_name() + // If this is the root node, it will have an empty name. + .unwrap_or_default() + .as_bytes() + .to_owned() + .into(); + + let node = match &mut entry { + IngestionEntry::Dir { .. } => { + // If the entry is a directory, we traversed all its children (and + // populated it in `directories`). + // If we don't have it in there, it's an empty directory. + let directory = directories + .remove(entry.path()) + // In that case, it contained no children + .unwrap_or_default(); + + let directory_size = directory.size(); + let directory_digest = directory.digest(); + + // Use the directory_putter to upload the directory. + // If we don't have one yet (as that's the first one to upload), + // initialize the putter. + maybe_directory_putter + .get_or_insert_with(|| directory_service.as_ref().put_multiple_start()) + .put(directory) + .await?; + + Node::Directory(DirectoryNode { + name, + digest: directory_digest.into(), + size: directory_size, + }) + } + IngestionEntry::Symlink { ref target, .. } => Node::Symlink(SymlinkNode { + name, + target: target.as_os_str().as_bytes().to_owned().into(), + }), + IngestionEntry::Regular { + size, + executable, + digest, + .. + } => Node::File(FileNode { + name, + digest: digest.await?.into(), + size: *size, + executable: *executable, + }), + IngestionEntry::Unknown { path, file_type } => { + return Err(Error::UnsupportedFileType(path.clone(), *file_type)); + } + }; + + if entry.path().components().count() == 1 { + break node; + } + + // record node in parent directory, creating a new [Directory] if not there yet. + directories + .entry(entry.path().parent().unwrap().to_path_buf()) + .or_default() + .add(node); + }; + + // if there were directories uploaded, make sure we flush the putter, so + // they're all persisted to the backend. + if let Some(mut directory_putter) = maybe_directory_putter { + let root_directory_digest = directory_putter.close().await?; + + #[cfg(debug_assertions)] + { + if let Node::Directory(directory_node) = &root_node { + debug_assert_eq!( + root_directory_digest, + directory_node + .digest + .to_vec() + .try_into() + .expect("invalid digest len") + ) + } else { + unreachable!("Tvix bug: directory putter initialized but no root directory node"); + } + } + }; + + Ok(root_node) +} + +/// Uploads the file at the provided [Path] the the [BlobService]. +#[instrument(skip(blob_service), fields(path), err)] +async fn upload_blob_at_path(blob_service: BS, path: PathBuf) -> Result +where + BS: BlobService, +{ + let mut file = match tokio::fs::File::open(&path).await { + Ok(file) => file, + Err(e) => return Err(Error::UnableToRead(path, e)), + }; + + let mut writer = blob_service.open_write().await; + + if let Err(e) = tokio::io::copy(&mut file, &mut writer).await { + return Err(Error::UnableToRead(path, e)); + }; + + let digest = writer + .close() + .await + .map_err(|e| Error::UnableToRead(path, e))?; + + Ok(digest) +} + +type BlobFut<'a> = Pin> + Send + 'a>>; + +pub enum IngestionEntry<'a> { + Regular { + path: PathBuf, + size: u64, + executable: bool, + digest: BlobFut<'a>, + }, + Symlink { + path: PathBuf, + target: PathBuf, + }, + Dir { + path: PathBuf, + }, + Unknown { + path: PathBuf, + file_type: FileType, + }, +} + +impl<'a> IngestionEntry<'a> { + fn path(&self) -> &Path { + match self { + IngestionEntry::Regular { path, .. } => path, + IngestionEntry::Symlink { path, .. } => path, + IngestionEntry::Dir { path } => path, + IngestionEntry::Unknown { path, .. } => path, + } + } +} diff --git a/tvix/castore/src/tests/import.rs b/tvix/castore/src/tests/import.rs index b44b71cd784d..8b3bd5ce0ffc 100644 --- a/tvix/castore/src/tests/import.rs +++ b/tvix/castore/src/tests/import.rs @@ -1,7 +1,7 @@ use crate::blobservice::{self, BlobService}; use crate::directoryservice; use crate::fixtures::*; -use crate::import::ingest_path; +use crate::import::fs::ingest_path; use crate::proto; use std::sync::Arc; diff --git a/tvix/glue/src/tvix_store_io.rs b/tvix/glue/src/tvix_store_io.rs index 46575743c462..f0f2f5cf918b 100644 --- a/tvix/glue/src/tvix_store_io.rs +++ b/tvix/glue/src/tvix_store_io.rs @@ -18,7 +18,7 @@ use std::{ use tokio_util::io::SyncIoBridge; use tracing::{error, instrument, warn, Level}; use tvix_build::buildservice::BuildService; -use tvix_castore::import::dir_entry_iter_to_ingestion_stream; +use tvix_castore::import::fs::dir_entry_iter_to_ingestion_stream; use tvix_eval::{ErrorKind, EvalIO, FileType, StdIO}; use tvix_store::utils::AsyncIoBridge; use walkdir::DirEntry; diff --git a/tvix/store/src/bin/tvix-store.rs b/tvix/store/src/bin/tvix-store.rs index 1f172d65c62f..7e362576a1dc 100644 --- a/tvix/store/src/bin/tvix-store.rs +++ b/tvix/store/src/bin/tvix-store.rs @@ -17,7 +17,7 @@ use tracing::Level; use tracing_subscriber::EnvFilter; use tracing_subscriber::Layer; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; -use tvix_castore::import::ingest_path; +use tvix_castore::import::fs::ingest_path; use tvix_store::proto::NarInfo; use tvix_store::proto::PathInfo; diff --git a/tvix/store/src/import.rs b/tvix/store/src/import.rs index 5cff29a9e5b0..7b6aeb824ea6 100644 --- a/tvix/store/src/import.rs +++ b/tvix/store/src/import.rs @@ -1,7 +1,8 @@ use std::path::Path; use tracing::{debug, instrument}; use tvix_castore::{ - blobservice::BlobService, directoryservice::DirectoryService, proto::node::Node, B3Digest, + blobservice::BlobService, directoryservice::DirectoryService, import::fs::ingest_path, + proto::node::Node, B3Digest, }; use nix_compat::{ @@ -116,8 +117,7 @@ where DS: AsRef, PS: AsRef, { - let root_node = - tvix_castore::import::ingest_path(blob_service, directory_service, path.as_ref()).await?; + let root_node = ingest_path(blob_service, directory_service, path.as_ref()).await?; // Ask the PathInfoService for the NAR size and sha256 let (nar_size, nar_sha256) = path_info_service.as_ref().calculate_nar(&root_node).await?; -- cgit 1.4.1