diff options
Diffstat (limited to 'src/libstore')
-rw-r--r-- | src/libstore/binary-cache-store.hh | 21 | ||||
-rw-r--r-- | src/libstore/build.cc | 500 | ||||
-rw-r--r-- | src/libstore/download.cc | 17 | ||||
-rw-r--r-- | src/libstore/legacy-ssh-store.cc | 247 | ||||
-rw-r--r-- | src/libstore/nar-info-disk-cache.cc | 52 | ||||
-rw-r--r-- | src/libstore/pathlocks.cc | 5 | ||||
-rw-r--r-- | src/libstore/pathlocks.hh | 6 | ||||
-rw-r--r-- | src/libstore/remote-store.cc | 1 | ||||
-rw-r--r-- | src/libstore/serve-protocol.hh | 23 | ||||
-rw-r--r-- | src/libstore/sqlite.hh | 1 | ||||
-rw-r--r-- | src/libstore/ssh-store.cc | 24 | ||||
-rw-r--r-- | src/libstore/store-api.cc | 81 | ||||
-rw-r--r-- | src/libstore/store-api.hh | 32 |
13 files changed, 727 insertions, 283 deletions
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index 31878bbb2476..a70d50d4949c 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -71,9 +71,6 @@ public: PathSet & referrers) override { notImpl(); } - PathSet queryValidDerivers(const Path & path) override - { return {}; } - PathSet queryDerivationOutputs(const Path & path) override { notImpl(); } @@ -83,13 +80,6 @@ public: Path queryPathFromHashPart(const string & hashPart) override { notImpl(); } - PathSet querySubstitutablePaths(const PathSet & paths) override - { return {}; } - - void querySubstitutablePathInfos(const PathSet & paths, - SubstitutablePathInfos & infos) override - { } - bool wantMassQuery() override { return wantMassQuery_; } void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, @@ -121,25 +111,14 @@ public: void addIndirectRoot(const Path & path) override { notImpl(); } - void syncWithGC() override - { } - Roots findRoots() override { notImpl(); } void collectGarbage(const GCOptions & options, GCResults & results) override { notImpl(); } - void optimiseStore() override - { } - - bool verifyStore(bool checkContents, bool repair) override - { return true; } - ref<FSAccessor> getFSAccessor() override; -public: - void addSignatures(const Path & storePath, const StringSet & sigs) override { notImpl(); } diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 7fc6ff0df0f8..5d6fff4e349f 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -10,6 +10,7 @@ #include "builtins.hh" #include "finally.hh" #include "compression.hh" +#include "json.hh" #include <algorithm> #include <iostream> @@ -399,6 +400,8 @@ void Goal::trace(const format & f) /* Common initialisation performed in child processes. */ static void commonChildInit(Pipe & logPipe) { + restoreSignals(); + /* Put the child in a separate session (and thus a separate process group) so that it has no controlling terminal (meaning that e.g. ssh cannot open /dev/tty) and it doesn't receive @@ -434,22 +437,20 @@ private: close that file again (without closing the original file descriptor), we lose the lock. So we have to be *very* careful not to open a lock file on which we are holding a lock. */ - static PathSet lockedPaths; /* !!! not thread-safe */ + static Sync<PathSet> lockedPaths_; Path fnUserLock; AutoCloseFD fdUserLock; string user; - uid_t uid = 0; - gid_t gid = 0; + uid_t uid; + gid_t gid; std::vector<gid_t> supplementaryGIDs; public: + UserLock(); ~UserLock(); - void acquire(); - void release(); - void kill(); string getUser() { return user; } @@ -462,19 +463,11 @@ public: }; -PathSet UserLock::lockedPaths; +Sync<PathSet> UserLock::lockedPaths_; -UserLock::~UserLock() +UserLock::UserLock() { - release(); -} - - -void UserLock::acquire() -{ - assert(uid == 0); - assert(settings.buildUsersGroup != ""); /* Get the members of the build-users-group. */ @@ -509,39 +502,48 @@ void UserLock::acquire() fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str(); - if (lockedPaths.find(fnUserLock) != lockedPaths.end()) - /* We already have a lock on this one. */ - continue; + { + auto lockedPaths(lockedPaths_.lock()); + if (lockedPaths->count(fnUserLock)) + /* We already have a lock on this one. */ + continue; + lockedPaths->insert(fnUserLock); + } - AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600); - if (!fd) - throw SysError(format("opening user lock ‘%1%’") % fnUserLock); + try { - if (lockFile(fd.get(), ltWrite, false)) { - fdUserLock = std::move(fd); - lockedPaths.insert(fnUserLock); - user = i; - uid = pw->pw_uid; + AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600); + if (!fd) + throw SysError(format("opening user lock ‘%1%’") % fnUserLock); - /* Sanity check... */ - if (uid == getuid() || uid == geteuid()) - throw Error(format("the Nix user should not be a member of ‘%1%’") - % settings.buildUsersGroup); + if (lockFile(fd.get(), ltWrite, false)) { + fdUserLock = std::move(fd); + user = i; + uid = pw->pw_uid; + + /* Sanity check... */ + if (uid == getuid() || uid == geteuid()) + throw Error(format("the Nix user should not be a member of ‘%1%’") + % settings.buildUsersGroup); #if __linux__ - /* Get the list of supplementary groups of this build user. This - is usually either empty or contains a group such as "kvm". */ - supplementaryGIDs.resize(10); - int ngroups = supplementaryGIDs.size(); - int err = getgrouplist(pw->pw_name, pw->pw_gid, - supplementaryGIDs.data(), &ngroups); - if (err == -1) - throw Error(format("failed to get list of supplementary groups for ‘%1%’") % pw->pw_name); - - supplementaryGIDs.resize(ngroups); + /* Get the list of supplementary groups of this build user. This + is usually either empty or contains a group such as "kvm". */ + supplementaryGIDs.resize(10); + int ngroups = supplementaryGIDs.size(); + int err = getgrouplist(pw->pw_name, pw->pw_gid, + supplementaryGIDs.data(), &ngroups); + if (err == -1) + throw Error(format("failed to get list of supplementary groups for ‘%1%’") % pw->pw_name); + + supplementaryGIDs.resize(ngroups); #endif - return; + return; + } + + } catch (...) { + lockedPaths_.lock()->erase(fnUserLock); } } @@ -551,20 +553,16 @@ void UserLock::acquire() } -void UserLock::release() +UserLock::~UserLock() { - if (uid == 0) return; - fdUserLock = -1; /* releases lock */ - assert(lockedPaths.find(fnUserLock) != lockedPaths.end()); - lockedPaths.erase(fnUserLock); - fnUserLock = ""; - uid = 0; + auto lockedPaths(lockedPaths_.lock()); + assert(lockedPaths->count(fnUserLock)); + lockedPaths->erase(fnUserLock); } void UserLock::kill() { - assert(enabled()); killUser(uid); } @@ -720,7 +718,7 @@ private: PathSet missingPaths; /* User selected for running the builder. */ - UserLock buildUser; + std::unique_ptr<UserLock> buildUser; /* The process ID of the builder. */ Pid pid; @@ -780,6 +778,7 @@ private: }; typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path DirsInChroot dirsInChroot; + typedef map<string, string> Environment; Environment env; @@ -817,6 +816,8 @@ private: const uid_t sandboxUid = 1000; const gid_t sandboxGid = 100; + const static Path homeDir; + public: DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode = bmNormal); @@ -864,6 +865,18 @@ private: /* Start building a derivation. */ void startBuilder(); + /* Fill in the environment for the builder. */ + void initEnv(); + + /* Write a JSON file containing the derivation attributes. */ + void writeStructuredAttrs(); + + /* Make a file owned by the builder. */ + void chownToBuilder(const Path & path); + + /* Handle the exportReferencesGraph attribute. */ + void doExportReferencesGraph(); + /* Run the builder's process. */ void runChild(); @@ -904,6 +917,9 @@ private: }; +const Path DerivationGoal::homeDir = "/homeless-shelter"; + + DerivationGoal::DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode) : Goal(worker) @@ -951,7 +967,7 @@ void DerivationGoal::killChild() if (pid != -1) { worker.childTerminated(this); - if (buildUser.enabled()) { + if (buildUser) { /* If we're using a build user, then there is a tricky race condition: if we kill the build user before the child has done its setuid() to the build user uid, then @@ -959,7 +975,7 @@ void DerivationGoal::killChild() pid.wait(). So also send a conventional kill to the child. */ ::kill(-pid, SIGKILL); /* ignore the result */ - buildUser.kill(); + buildUser->kill(); pid.wait(); } else pid.kill(); @@ -1380,7 +1396,7 @@ void DerivationGoal::tryToBuild() } catch (BuildError & e) { printError(e.msg()); outputLocks.unlock(); - buildUser.release(); + buildUser.reset(); worker.permanentFailure = true; done(BuildResult::InputRejected, e.msg()); return; @@ -1414,6 +1430,11 @@ void DerivationGoal::buildDone() { trace("build done"); + /* Release the build user at the end of this function. We don't do + it right away because we don't want another build grabbing this + uid and then messing around with our output. */ + Finally releaseBuildUser([&]() { buildUser.reset(); }); + /* Since we got an EOF on the logger pipe, the builder is presumed to have terminated. In fact, the builder could also have simply have closed its end of the pipe, so just to be sure, @@ -1443,7 +1464,7 @@ void DerivationGoal::buildDone() malicious user from leaving behind a process that keeps files open and modifies them after they have been chown'ed to root. */ - if (buildUser.enabled()) buildUser.kill(); + if (buildUser) buildUser->kill(); bool diskFull = false; @@ -1513,7 +1534,6 @@ void DerivationGoal::buildDone() /* Repeat the build if necessary. */ if (curRound++ < nrRounds) { outputLocks.unlock(); - buildUser.release(); state = &DerivationGoal::tryToBuild; worker.wakeUp(shared_from_this()); return; @@ -1530,7 +1550,6 @@ void DerivationGoal::buildDone() if (!hook) printError(e.msg()); outputLocks.unlock(); - buildUser.release(); BuildResult::Status st = BuildResult::MiscFailure; @@ -1552,9 +1571,6 @@ void DerivationGoal::buildDone() return; } - /* Release the build user, if applicable. */ - buildUser.release(); - done(BuildResult::Built); } @@ -1669,11 +1685,7 @@ void DerivationGoal::startBuilder() additionalSandboxProfile = get(drv->env, "__sandboxProfile"); #endif - /* Are we doing a chroot build? Note that fixed-output - derivations are never done in a chroot, mainly so that - functions like fetchurl (which needs a proper /etc/resolv.conf) - work properly. Purity checking for fixed-output derivations - is somewhat pointless anyway. */ + /* Are we doing a chroot build? */ { string x = settings.get("build-use-sandbox", /* deprecated alias */ @@ -1700,31 +1712,15 @@ void DerivationGoal::startBuilder() if (worker.store.storeDir != worker.store.realStoreDir) useChroot = true; - /* Construct the environment passed to the builder. */ - env.clear(); - - /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when - PATH is not set. We don't want this, so we fill it in with some dummy - value. */ - env["PATH"] = "/path-not-set"; - - /* Set HOME to a non-existing path to prevent certain programs from using - /etc/passwd (or NIS, or whatever) to locate the home directory (for - example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd - if HOME is not set, but they will just assume that the settings file - they are looking for does not exist if HOME is set but points to some - non-existing path. */ - Path homeDir = "/homeless-shelter"; - env["HOME"] = homeDir; - - /* Tell the builder where the Nix store is. Usually they - shouldn't care, but this is useful for purity checking (e.g., - the compiler or linker might only want to accept paths to files - in the store or in the build directory). */ - env["NIX_STORE"] = worker.store.storeDir; + /* If `build-users-group' is not empty, then we have to build as + one of the members of that group. */ + if (settings.buildUsersGroup != "" && getuid() == 0) { + buildUser = std::make_unique<UserLock>(); - /* The maximum number of cores to utilize for parallel building. */ - env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str(); + /* Make sure that no other processes are executing under this + uid. */ + buildUser->kill(); + } /* Create a temporary directory where the build will take place. */ @@ -1734,127 +1730,19 @@ void DerivationGoal::startBuilder() /* In a sandbox, for determinism, always use the same temporary directory. */ tmpDirInSandbox = useChroot ? canonPath("/tmp", true) + "/nix-build-" + drvName + "-0" : tmpDir; - - /* Add all bindings specified in the derivation via the - environments, except those listed in the passAsFile - attribute. Those are passed as file names pointing to - temporary files containing the contents. */ - PathSet filesToChown; - StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile")); - int fileNr = 0; - for (auto & i : drv->env) { - if (passAsFile.find(i.first) == passAsFile.end()) { - env[i.first] = i.second; - } else { - string fn = ".attr-" + std::to_string(fileNr++); - Path p = tmpDir + "/" + fn; - writeFile(p, i.second); - filesToChown.insert(p); - env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; - } - } - - /* For convenience, set an environment pointing to the top build - directory. */ - env["NIX_BUILD_TOP"] = tmpDirInSandbox; - - /* Also set TMPDIR and variants to point to this directory. */ - env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDirInSandbox; - - /* Explicitly set PWD to prevent problems with chroot builds. In - particular, dietlibc cannot figure out the cwd because the - inode of the current directory doesn't appear in .. (because - getdents returns the inode of the mount point). */ - env["PWD"] = tmpDirInSandbox; - - /* Compatibility hack with Nix <= 0.7: if this is a fixed-output - derivation, tell the builder, so that for instance `fetchurl' - can skip checking the output. On older Nixes, this environment - variable won't be set, so `fetchurl' will do the check. */ - if (fixedOutput) env["NIX_OUTPUT_CHECKED"] = "1"; - - /* *Only* if this is a fixed-output derivation, propagate the - values of the environment variables specified in the - `impureEnvVars' attribute to the builder. This allows for - instance environment variables for proxy configuration such as - `http_proxy' to be easily passed to downloaders like - `fetchurl'. Passing such environment variables from the caller - to the builder is generally impure, but the output of - fixed-output derivations is by definition pure (since we - already know the cryptographic hash of the output). */ - if (fixedOutput) { - Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars")); - for (auto & i : varNames) env[i] = getEnv(i); - } + chownToBuilder(tmpDir); /* Substitute output placeholders with the actual output paths. */ for (auto & output : drv->outputs) inputRewrites[hashPlaceholder(output.first)] = output.second.path; - /* The `exportReferencesGraph' feature allows the references graph - to be passed to a builder. This attribute should be a list of - pairs [name1 path1 name2 path2 ...]. The references graph of - each `pathN' will be stored in a text file `nameN' in the - temporary build directory. The text files have the format used - by `nix-store --register-validity'. However, the deriver - fields are left empty. */ - string s = get(drv->env, "exportReferencesGraph"); - Strings ss = tokenizeString<Strings>(s); - if (ss.size() % 2 != 0) - throw BuildError(format("odd number of tokens in ‘exportReferencesGraph’: ‘%1%’") % s); - for (Strings::iterator i = ss.begin(); i != ss.end(); ) { - string fileName = *i++; - checkStoreName(fileName); /* !!! abuse of this function */ - - /* Check that the store path is valid. */ - Path storePath = *i++; - if (!worker.store.isInStore(storePath)) - throw BuildError(format("‘exportReferencesGraph’ contains a non-store path ‘%1%’") - % storePath); - storePath = worker.store.toStorePath(storePath); - if (!worker.store.isValidPath(storePath)) - throw BuildError(format("‘exportReferencesGraph’ contains an invalid path ‘%1%’") - % storePath); - - /* If there are derivations in the graph, then include their - outputs as well. This is useful if you want to do things - like passing all build-time dependencies of some path to a - derivation that builds a NixOS DVD image. */ - PathSet paths, paths2; - worker.store.computeFSClosure(storePath, paths); - paths2 = paths; - - for (auto & j : paths2) { - if (isDerivation(j)) { - Derivation drv = worker.store.derivationFromPath(j); - for (auto & k : drv.outputs) - worker.store.computeFSClosure(k.second.path, paths); - } - } - - /* Write closure info to `fileName'. */ - writeFile(tmpDir + "/" + fileName, - worker.store.makeValidityRegistration(paths, false, false)); - } - - - /* If `build-users-group' is not empty, then we have to build as - one of the members of that group. */ - if (settings.buildUsersGroup != "" && getuid() == 0) { - buildUser.acquire(); - - /* Make sure that no other processes are executing under this - uid. */ - buildUser.kill(); - - /* Change ownership of the temporary build directory. */ - filesToChown.insert(tmpDir); + /* Construct the environment passed to the builder. */ + initEnv(); - for (auto & p : filesToChown) - if (chown(p.c_str(), buildUser.getUID(), buildUser.getGID()) == -1) - throw SysError(format("cannot change ownership of ‘%1%’") % p); - } + writeStructuredAttrs(); + /* Handle exportReferencesGraph(), if set. */ + doExportReferencesGraph(); if (useChroot) { @@ -1946,7 +1834,7 @@ void DerivationGoal::startBuilder() if (mkdir(chrootRootDir.c_str(), 0750) == -1) throw SysError(format("cannot create ‘%1%’") % chrootRootDir); - if (buildUser.enabled() && chown(chrootRootDir.c_str(), 0, buildUser.getGID()) == -1) + if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1) throw SysError(format("cannot change ownership of ‘%1%’") % chrootRootDir); /* Create a writable /tmp in the chroot. Many builders need @@ -1990,7 +1878,7 @@ void DerivationGoal::startBuilder() createDirs(chrootStoreDir); chmod_(chrootStoreDir, 01775); - if (buildUser.enabled() && chown(chrootStoreDir.c_str(), 0, buildUser.getGID()) == -1) + if (buildUser && chown(chrootStoreDir.c_str(), 0, buildUser->getGID()) == -1) throw SysError(format("cannot change ownership of ‘%1%’") % chrootStoreDir); for (auto & i : inputPaths) { @@ -2200,8 +2088,8 @@ void DerivationGoal::startBuilder() /* Set the UID/GID mapping of the builder's user namespace such that the sandbox user maps to the build user, or to the calling user (if build users are disabled). */ - uid_t hostUid = buildUser.enabled() ? buildUser.getUID() : getuid(); - uid_t hostGid = buildUser.enabled() ? buildUser.getGID() : getgid(); + uid_t hostUid = buildUser ? buildUser->getUID() : getuid(); + uid_t hostGid = buildUser ? buildUser->getGID() : getgid(); writeFile("/proc/" + std::to_string(pid) + "/uid_map", (format("%d %d 1") % sandboxUid % hostUid).str()); @@ -2219,7 +2107,7 @@ void DerivationGoal::startBuilder() } else #endif { - options.allowVfork = !buildUser.enabled() && !drv->isBuiltin(); + options.allowVfork = !buildUser && !drv->isBuiltin(); pid = startProcess([&]() { runChild(); }, options); @@ -2242,6 +2130,174 @@ void DerivationGoal::startBuilder() } +void DerivationGoal::initEnv() +{ + env.clear(); + + /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when + PATH is not set. We don't want this, so we fill it in with some dummy + value. */ + env["PATH"] = "/path-not-set"; + + /* Set HOME to a non-existing path to prevent certain programs from using + /etc/passwd (or NIS, or whatever) to locate the home directory (for + example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd + if HOME is not set, but they will just assume that the settings file + they are looking for does not exist if HOME is set but points to some + non-existing path. */ + env["HOME"] = homeDir; + + /* Tell the builder where the Nix store is. Usually they + shouldn't care, but this is useful for purity checking (e.g., + the compiler or linker might only want to accept paths to files + in the store or in the build directory). */ + env["NIX_STORE"] = worker.store.storeDir; + + /* The maximum number of cores to utilize for parallel building. */ + env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str(); + + /* In non-structured mode, add all bindings specified in the + derivation via the environments, except those listed in the + passAsFile attribute. Those are passed as file names pointing + to temporary files containing the contents. Note that + passAsFile is ignored in structure mode because it's not + needed (attributes are not passed through the environment, so + there is no size constraint). */ + if (!drv->env.count("__json")) { + + StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile")); + int fileNr = 0; + for (auto & i : drv->env) { + if (passAsFile.find(i.first) == passAsFile.end()) { + env[i.first] = i.second; + } else { + string fn = ".attr-" + std::to_string(fileNr++); + Path p = tmpDir + "/" + fn; + writeFile(p, i.second); + chownToBuilder(p); + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; + } + } + + } + + /* For convenience, set an environment pointing to the top build + directory. */ + env["NIX_BUILD_TOP"] = tmpDirInSandbox; + + /* Also set TMPDIR and variants to point to this directory. */ + env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDirInSandbox; + + /* Explicitly set PWD to prevent problems with chroot builds. In + particular, dietlibc cannot figure out the cwd because the + inode of the current directory doesn't appear in .. (because + getdents returns the inode of the mount point). */ + env["PWD"] = tmpDirInSandbox; + + /* Compatibility hack with Nix <= 0.7: if this is a fixed-output + derivation, tell the builder, so that for instance `fetchurl' + can skip checking the output. On older Nixes, this environment + variable won't be set, so `fetchurl' will do the check. */ + if (fixedOutput) env["NIX_OUTPUT_CHECKED"] = "1"; + + /* *Only* if this is a fixed-output derivation, propagate the + values of the environment variables specified in the + `impureEnvVars' attribute to the builder. This allows for + instance environment variables for proxy configuration such as + `http_proxy' to be easily passed to downloaders like + `fetchurl'. Passing such environment variables from the caller + to the builder is generally impure, but the output of + fixed-output derivations is by definition pure (since we + already know the cryptographic hash of the output). */ + if (fixedOutput) { + Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars")); + for (auto & i : varNames) env[i] = getEnv(i); + } +} + + +void DerivationGoal::writeStructuredAttrs() +{ + auto json = drv->env.find("__json"); + if (json == drv->env.end()) return; + + writeFile(tmpDir + "/.attrs.json", rewriteStrings(json->second, inputRewrites)); +} + + +void DerivationGoal::chownToBuilder(const Path & path) +{ + if (!buildUser) return; + if (chown(path.c_str(), buildUser->getUID(), buildUser->getGID()) == -1) + throw SysError(format("cannot change ownership of ‘%1%’") % path); +} + + +void DerivationGoal::doExportReferencesGraph() +{ + /* The `exportReferencesGraph' feature allows the references graph + to be passed to a builder. This attribute should be a list of + pairs [name1 path1 name2 path2 ...]. The references graph of + each `pathN' will be stored in a text file `nameN' in the + temporary build directory. The text files have the format used + by `nix-store --register-validity'. However, the deriver + fields are left empty. */ + string s = get(drv->env, "exportReferencesGraph"); + Strings ss = tokenizeString<Strings>(s); + if (ss.size() % 2 != 0) + throw BuildError(format("odd number of tokens in ‘exportReferencesGraph’: ‘%1%’") % s); + for (Strings::iterator i = ss.begin(); i != ss.end(); ) { + string fileName = *i++; + checkStoreName(fileName); /* !!! abuse of this function */ + + /* Check that the store path is valid. */ + Path storePath = *i++; + if (!worker.store.isInStore(storePath)) + throw BuildError(format("‘exportReferencesGraph’ contains a non-store path ‘%1%’") + % storePath); + storePath = worker.store.toStorePath(storePath); + if (!worker.store.isValidPath(storePath)) + throw BuildError(format("‘exportReferencesGraph’ contains an invalid path ‘%1%’") + % storePath); + + /* If there are derivations in the graph, then include their + outputs as well. This is useful if you want to do things + like passing all build-time dependencies of some path to a + derivation that builds a NixOS DVD image. */ + PathSet paths, paths2; + worker.store.computeFSClosure(storePath, paths); + paths2 = paths; + + for (auto & j : paths2) { + if (isDerivation(j)) { + Derivation drv = worker.store.derivationFromPath(j); + for (auto & k : drv.outputs) + worker.store.computeFSClosure(k.second.path, paths); + } + } + + if (!drv->env.count("__json")) { + + /* Write closure info to <fileName>. */ + writeFile(tmpDir + "/" + fileName, + worker.store.makeValidityRegistration(paths, false, false)); + + } else { + + /* Write a more comprehensive JSON serialisation to + <fileName>. */ + std::ostringstream str; + { + JSONPlaceholder jsonRoot(str, true); + worker.store.pathInfoToJSON(jsonRoot, paths, false, true); + } + writeFile(tmpDir + "/" + fileName, str.str()); + + } + } +} + + void DerivationGoal::runChild() { /* Warning: in the child we should absolutely not make any SQLite @@ -2475,22 +2531,22 @@ void DerivationGoal::runChild() descriptors except std*, so that's safe. Also note that setuid() when run as root sets the real, effective and saved UIDs. */ - if (setUser && buildUser.enabled()) { + if (setUser && buildUser) { /* Preserve supplementary groups of the build user, to allow admins to specify groups such as "kvm". */ - if (!buildUser.getSupplementaryGIDs().empty() && - setgroups(buildUser.getSupplementaryGIDs().size(), - buildUser.getSupplementaryGIDs().data()) == -1) + if (!buildUser->getSupplementaryGIDs().empty() && + setgroups(buildUser->getSupplementaryGIDs().size(), + buildUser->getSupplementaryGIDs().data()) == -1) throw SysError("cannot set supplementary groups of build user"); - if (setgid(buildUser.getGID()) == -1 || - getgid() != buildUser.getGID() || - getegid() != buildUser.getGID()) + if (setgid(buildUser->getGID()) == -1 || + getgid() != buildUser->getGID() || + getegid() != buildUser->getGID()) throw SysError("setgid failed"); - if (setuid(buildUser.getUID()) == -1 || - getuid() != buildUser.getUID() || - geteuid() != buildUser.getUID()) + if (setuid(buildUser->getUID()) == -1 || + getuid() != buildUser->getUID() || + geteuid() != buildUser->getUID()) throw SysError("setuid failed"); } @@ -2614,8 +2670,6 @@ void DerivationGoal::runChild() for (auto & i : drv->args) args.push_back(rewriteStrings(i, inputRewrites)); - restoreSIGPIPE(); - /* Indicate that we managed to set up the build environment. */ writeFull(STDERR_FILENO, string("\1\n")); @@ -2730,7 +2784,7 @@ void DerivationGoal::registerOutputs() build. Also, the output should be owned by the build user. */ if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) || - (buildUser.enabled() && st.st_uid != buildUser.getUID())) + (buildUser && st.st_uid != buildUser->getUID())) throw BuildError(format("suspicious ownership or permission on ‘%1%’; rejecting this build output") % path); #endif @@ -2742,7 +2796,7 @@ void DerivationGoal::registerOutputs() /* Canonicalise first. This ensures that the path we're rewriting doesn't contain a hard link to /etc/shadow or something like that. */ - canonicalisePathMetaData(actualPath, buildUser.enabled() ? buildUser.getUID() : -1, inodesSeen); + canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen); /* FIXME: this is in-memory. */ StringSink sink; @@ -2800,7 +2854,7 @@ void DerivationGoal::registerOutputs() /* Get rid of all weird permissions. This also checks that all files are owned by the build user, if applicable. */ canonicalisePathMetaData(actualPath, - buildUser.enabled() && !rewritten ? buildUser.getUID() : -1, inodesSeen); + buildUser && !rewritten ? buildUser->getUID() : -1, inodesSeen); /* For this output path, find the references to other paths contained in it. Compute the SHA-256 NAR hash at the same diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 42873d9e8a10..074e0ca6642a 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -172,6 +172,13 @@ struct CurlDownloader : public Downloader return ((DownloadItem *) userp)->progressCallback(dltotal, dlnow); } + static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr) + { + if (type == CURLINFO_TEXT) + vomit("curl: %s", chomp(std::string(data, size))); + return 0; + } + void init() { // FIXME: handle parallel downloads. @@ -184,6 +191,12 @@ struct CurlDownloader : public Downloader if (!req) req = curl_easy_init(); curl_easy_reset(req); + + if (verbosity >= lvlVomit) { + curl_easy_setopt(req, CURLOPT_VERBOSE, 1); + curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, DownloadItem::debugCallback); + } + curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str()); curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L); curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1); @@ -263,7 +276,7 @@ struct CurlDownloader : public Downloader code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted ? DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri) : httpStatus != 0 - ? DownloadError(err, format("unable to download ‘%s’: HTTP error %d") % request.uri % httpStatus) + ? DownloadError(err, format("unable to download ‘%s’: HTTP error %d (curl error: %s)") % request.uri % httpStatus % curl_easy_strerror(code)) : DownloadError(err, format("unable to download ‘%s’: %s (%d)") % request.uri % curl_easy_strerror(code) % code); /* If this is a transient error, then maybe retry the @@ -387,7 +400,7 @@ struct CurlDownloader : public Downloader nextWakeup != std::chrono::steady_clock::time_point() ? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count()) : 1000000000; - //printMsg(lvlVomit, format("download thread waiting for %d ms") % sleepTimeMs); + vomit("download thread waiting for %d ms", sleepTimeMs); mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds); if (mc != CURLM_OK) throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % curl_multi_strerror(mc)); diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc new file mode 100644 index 000000000000..5d9e5aad6e0a --- /dev/null +++ b/src/libstore/legacy-ssh-store.cc @@ -0,0 +1,247 @@ +#include "archive.hh" +#include "pool.hh" +#include "remote-store.hh" +#include "serve-protocol.hh" +#include "store-api.hh" +#include "worker-protocol.hh" + +namespace nix { + +static std::string uriScheme = "legacy-ssh://"; + +struct LegacySSHStore : public Store +{ + string host; + + struct Connection + { + Pid sshPid; + AutoCloseFD out; + AutoCloseFD in; + FdSink to; + FdSource from; + }; + + AutoDelete tmpDir; + + Path socketPath; + + Pid sshMaster; + + ref<Pool<Connection>> connections; + + Path key; + + LegacySSHStore(const string & host, const Params & params, + size_t maxConnections = std::numeric_limits<size_t>::max()) + : Store(params) + , host(host) + , tmpDir(createTempDir("", "nix", true, true, 0700)) + , socketPath((Path) tmpDir + "/ssh.sock") + , connections(make_ref<Pool<Connection>>( + maxConnections, + [this]() { return openConnection(); }, + [](const ref<Connection> & r) { return true; } + )) + , key(get(params, "ssh-key", "")) + { + } + + ref<Connection> openConnection() + { + if ((pid_t) sshMaster == -1) { + sshMaster = startProcess([&]() { + restoreSignals(); + Strings args{ "ssh", "-M", "-S", socketPath, "-N", "-x", "-a", host }; + if (!key.empty()) + args.insert(args.end(), {"-i", key}); + execvp("ssh", stringsToCharPtrs(args).data()); + throw SysError("starting SSH master connection to host ‘%s’", host); + }); + } + + auto conn = make_ref<Connection>(); + Pipe in, out; + in.create(); + out.create(); + conn->sshPid = startProcess([&]() { + if (dup2(in.readSide.get(), STDIN_FILENO) == -1) + throw SysError("duping over STDIN"); + if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + throw SysError("duping over STDOUT"); + execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-store", "--serve", "--write", nullptr); + throw SysError("executing ‘nix-store --serve’ on remote host ‘%s’", host); + }); + in.readSide = -1; + out.writeSide = -1; + conn->out = std::move(out.readSide); + conn->in = std::move(in.writeSide); + conn->to = FdSink(conn->in.get()); + conn->from = FdSource(conn->out.get()); + + int remoteVersion; + + try { + conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION; + conn->to.flush(); + + unsigned int magic = readInt(conn->from); + if (magic != SERVE_MAGIC_2) + throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%s’", host); + remoteVersion = readInt(conn->from); + if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200) + throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%s’", host); + + } catch (EndOfFile & e) { + throw Error("cannot connect to ‘%1%’", host); + } + + return conn; + }; + + string getUri() override + { + return uriScheme + host; + } + + void queryPathInfoUncached(const Path & path, + std::function<void(std::shared_ptr<ValidPathInfo>)> success, + std::function<void(std::exception_ptr exc)> failure) override + { + sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> { + auto conn(connections->get()); + + debug("querying remote host ‘%s’ for info on ‘%s’", host, path); + + conn->to << cmdQueryPathInfos << PathSet{path}; + conn->to.flush(); + + auto info = std::make_shared<ValidPathInfo>(); + conn->from >> info->path; + if (info->path.empty()) return nullptr; + assert(path == info->path); + + PathSet references; + conn->from >> info->deriver; + info->references = readStorePaths<PathSet>(*this, conn->from); + readLongLong(conn->from); // download size + info->narSize = readLongLong(conn->from); + + auto s = readString(conn->from); + assert(s == ""); + + return info; + }); + } + + void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + bool repair, bool dontCheckSigs, + std::shared_ptr<FSAccessor> accessor) override + { + debug("adding path ‘%s’ to remote host ‘%s’", info.path, host); + + auto conn(connections->get()); + + conn->to + << cmdImportPaths + << 1; + conn->to(*nar); + conn->to + << exportMagic + << info.path + << info.references + << info.deriver + << 0 + << 0; + conn->to.flush(); + + if (readInt(conn->from) != 1) + throw Error("failed to add path ‘%s’ to remote host ‘%s’, info.path, host"); + + } + + void narFromPath(const Path & path, Sink & sink) override + { + auto conn(connections->get()); + + conn->to << cmdDumpStorePath << path; + conn->to.flush(); + + /* FIXME: inefficient. */ + ParseSink parseSink; /* null sink; just parse the NAR */ + SavingSourceAdapter savedNAR(conn->from); + parseDump(parseSink, savedNAR); + sink(savedNAR.s); + } + + /* Unsupported methods. */ + [[noreturn]] void unsupported() + { + throw Error("operation not supported on SSH stores"); + } + + PathSet queryAllValidPaths() override { unsupported(); } + + void queryReferrers(const Path & path, PathSet & referrers) override + { unsupported(); } + + PathSet queryDerivationOutputs(const Path & path) override + { unsupported(); } + + StringSet queryDerivationOutputNames(const Path & path) override + { unsupported(); } + + Path queryPathFromHashPart(const string & hashPart) override + { unsupported(); } + + Path addToStore(const string & name, const Path & srcPath, + bool recursive, HashType hashAlgo, + PathFilter & filter, bool repair) override + { unsupported(); } + + Path addTextToStore(const string & name, const string & s, + const PathSet & references, bool repair) override + { unsupported(); } + + void buildPaths(const PathSet & paths, BuildMode buildMode) override + { unsupported(); } + + BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode) override + { unsupported(); } + + void ensurePath(const Path & path) override + { unsupported(); } + + void addTempRoot(const Path & path) override + { unsupported(); } + + void addIndirectRoot(const Path & path) override + { unsupported(); } + + Roots findRoots() override + { unsupported(); } + + void collectGarbage(const GCOptions & options, GCResults & results) override + { unsupported(); } + + ref<FSAccessor> getFSAccessor() + { unsupported(); } + + void addSignatures(const Path & storePath, const StringSet & sigs) override + { unsupported(); } + + bool isTrusted() override + { return true; } + +}; + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const Store::Params & params) + -> std::shared_ptr<Store> +{ + if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0; + return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params); +}); + +} diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index d28ff42c7f23..13b67b81f35e 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -36,13 +36,9 @@ create table if not exists NARs ( foreign key (cache) references BinaryCaches(id) on delete cascade ); -create table if not exists NARExistence ( - cache integer not null, - storePath text not null, - exist integer not null, - timestamp integer not null, - primary key (cache, storePath), - foreign key (cache) references BinaryCaches(id) on delete cascade +create table if not exists LastPurge ( + dummy text primary key, + value integer ); )sql"; @@ -51,8 +47,12 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache { public: - /* How long negative lookups are valid. */ + /* How long negative and positive lookups are valid. */ const int ttlNegative = 3600; + const int ttlPositive = 30 * 24 * 3600; + + /* How often to purge expired entries from the cache. */ + const int purgeInterval = 24 * 3600; struct Cache { @@ -65,7 +65,7 @@ public: struct State { SQLite db; - SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR; + SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache; std::map<std::string, Cache> caches; }; @@ -103,7 +103,28 @@ public: "insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)"); state->queryNAR.create(state->db, - "select * from NARs where cache = ? and hashPart = ?"); + "select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); + + /* Periodically purge expired entries from the database. */ + auto now = time(0); + + SQLiteStmt queryLastPurge(state->db, "select value from LastPurge"); + auto queryLastPurge_(queryLastPurge.use()); + + if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) { + SQLiteStmt(state->db, + "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))") + .use() + (now - ttlNegative) + (now - ttlPositive) + .exec(); + + debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db)); + + SQLiteStmt(state->db, + "insert or replace into LastPurge(dummy, value) values ('', ?)") + .use()(now).exec(); + } } Cache & getCache(State & state, const std::string & uri) @@ -152,10 +173,15 @@ public: auto & cache(getCache(*state, uri)); - auto queryNAR(state->queryNAR.use()(cache.id)(hashPart)); + auto now = time(0); + + auto queryNAR(state->queryNAR.use() + (cache.id) + (hashPart) + (now - ttlNegative) + (now - ttlPositive)); if (!queryNAR.next()) - // FIXME: check NARExistence return {oUnknown, 0}; if (!queryNAR.getInt(13)) @@ -163,8 +189,6 @@ public: auto narInfo = make_ref<NarInfo>(); - // FIXME: implement TTL. - auto namePart = queryNAR.getStr(2); narInfo->path = cache.storeDir + "/" + hashPart + (namePart.empty() ? "" : "-" + namePart); diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index 620c9a6b752d..bf7ad3d21851 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -13,7 +13,7 @@ namespace nix { -int openLockFile(const Path & path, bool create) +AutoCloseFD openLockFile(const Path & path, bool create) { AutoCloseFD fd; @@ -21,7 +21,7 @@ int openLockFile(const Path & path, bool create) if (!fd && (create || errno != ENOENT)) throw SysError(format("opening lock file ‘%1%’") % path); - return fd.release(); + return fd; } @@ -136,6 +136,7 @@ bool PathLocks::lockPaths(const PathSet & _paths, /* Failed to lock this path; release all other locks. */ unlock(); + lockedPaths_.lock()->erase(lockPath); return false; } } diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh index 40103c393f64..2a7de611446e 100644 --- a/src/libstore/pathlocks.hh +++ b/src/libstore/pathlocks.hh @@ -1,6 +1,6 @@ #pragma once -#include "types.hh" +#include "util.hh" namespace nix { @@ -9,7 +9,7 @@ namespace nix { /* Open (possibly create) a lock file and return the file descriptor. -1 is returned if create is false and the lock could not be opened because it doesn't exist. Any other error throws an exception. */ -int openLockFile(const Path & path, bool create); +AutoCloseFD openLockFile(const Path & path, bool create); /* Delete an open lock file. */ void deleteLockFile(const Path & path, int fd); @@ -19,7 +19,7 @@ enum LockType { ltRead, ltWrite, ltNone }; bool lockFile(int fd, LockType lockType, bool wait); -class PathLocks +class PathLocks { private: typedef std::pair<int, Path> FDPair; diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 816d95ba6075..42c09ec7e0b6 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -37,6 +37,7 @@ template<class T> T readStorePaths(Store & store, Source & from) } template PathSet readStorePaths(Store & store, Source & from); +template Paths readStorePaths(Store & store, Source & from); /* TODO: Separate these store impls into different files, give them better names */ RemoteStore::RemoteStore(const Params & params, size_t maxConnections) diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh new file mode 100644 index 000000000000..f8cc9a4b6ebe --- /dev/null +++ b/src/libstore/serve-protocol.hh @@ -0,0 +1,23 @@ +#pragma once + +namespace nix { + +#define SERVE_MAGIC_1 0x390c9deb +#define SERVE_MAGIC_2 0x5452eecb + +#define SERVE_PROTOCOL_VERSION 0x203 +#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) +#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) + +typedef enum { + cmdQueryValidPaths = 1, + cmdQueryPathInfos = 2, + cmdDumpStorePath = 3, + cmdImportPaths = 4, + cmdExportPaths = 5, + cmdBuildPaths = 6, + cmdQueryClosure = 7, + cmdBuildDerivation = 8, +} ServeCommand; + +} diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 7c1ed538215c..4d347a2e56ab 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -31,6 +31,7 @@ struct SQLiteStmt sqlite3 * db = 0; sqlite3_stmt * stmt = 0; SQLiteStmt() { } + SQLiteStmt(sqlite3 * db, const std::string & s) { create(db, s); } void create(sqlite3 * db, const std::string & s); ~SQLiteStmt(); operator sqlite3_stmt * () { return stmt; } diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index cce0458c69f2..6f1862afa899 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -7,11 +7,13 @@ namespace nix { +static std::string uriScheme = "ssh://"; + class SSHStore : public RemoteStore { public: - SSHStore(string uri, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max()); + SSHStore(string host, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max()); std::string getUri() override; @@ -36,19 +38,19 @@ private: Pid sshMaster; - string uri; + string host; Path key; bool compress; }; -SSHStore::SSHStore(string uri, const Params & params, size_t maxConnections) +SSHStore::SSHStore(string host, const Params & params, size_t maxConnections) : Store(params) , RemoteStore(params, maxConnections) , tmpDir(createTempDir("", "nix", true, true, 0700)) , socketPath((Path) tmpDir + "/ssh.sock") - , uri(std::move(uri)) + , host(std::move(host)) , key(get(params, "ssh-key", "")) , compress(get(params, "compress", "") == "true") { @@ -58,7 +60,7 @@ SSHStore::SSHStore(string uri, const Params & params, size_t maxConnections) string SSHStore::getUri() { - return "ssh://" + uri; + return uriScheme + host; } class ForwardSource : public Source @@ -93,12 +95,12 @@ ref<FSAccessor> SSHStore::getFSAccessor() ref<RemoteStore::Connection> SSHStore::openConnection() { if ((pid_t) sshMaster == -1) { - auto flags = compress ? "-NMCS" : "-NMS"; sshMaster = startProcess([&]() { + restoreSignals(); if (key.empty()) - execlp("ssh", "ssh", flags, socketPath.c_str(), uri.c_str(), NULL); + execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), host.c_str(), NULL); else - execlp("ssh", "ssh", flags, socketPath.c_str(), "-i", key.c_str(), uri.c_str(), NULL); + execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), host.c_str(), NULL); throw SysError("starting ssh master"); }); } @@ -112,7 +114,7 @@ ref<RemoteStore::Connection> SSHStore::openConnection() throw SysError("duping over STDIN"); if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) throw SysError("duping over STDOUT"); - execlp("ssh", "ssh", "-S", socketPath.c_str(), uri.c_str(), "nix-daemon", "--stdio", NULL); + execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-daemon", "--stdio", NULL); throw SysError("executing nix-daemon --stdio over ssh"); }); in.readSide = -1; @@ -129,8 +131,8 @@ static RegisterStoreImplementation regStore([]( const std::string & uri, const Store::Params & params) -> std::shared_ptr<Store> { - if (std::string(uri, 0, 6) != "ssh://") return 0; - return std::make_shared<SSHStore>(uri.substr(6), params); + if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0; + return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params); }); } diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index c8ca00f00694..b5934a0d1232 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -4,6 +4,7 @@ #include "util.hh" #include "nar-info-disk-cache.hh" #include "thread-pool.hh" +#include "json.hh" #include "derivations.hh" #include <future> @@ -285,6 +286,19 @@ bool Store::isValidPath(const Path & storePath) } +/* Default implementation for stores that only implement + queryPathInfoUncached(). */ +bool Store::isValidPathUncached(const Path & path) +{ + try { + queryPathInfo(path); + return true; + } catch (InvalidPath &) { + return false; + } +} + + ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath) { std::promise<ref<ValidPathInfo>> promise; @@ -440,6 +454,64 @@ string Store::makeValidityRegistration(const PathSet & paths, } +void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths, + bool includeImpureInfo, bool showClosureSize) +{ + auto jsonList = jsonOut.list(); + + for (auto storePath : storePaths) { + auto info = queryPathInfo(storePath); + storePath = info->path; + + auto jsonPath = jsonList.object(); + jsonPath + .attr("path", storePath) + .attr("narHash", info->narHash.to_string()) + .attr("narSize", info->narSize); + + { + auto jsonRefs = jsonPath.list("references"); + for (auto & ref : info->references) + jsonRefs.elem(ref); + } + + if (info->ca != "") + jsonPath.attr("ca", info->ca); + + if (showClosureSize) + jsonPath.attr("closureSize", getClosureSize(storePath)); + + if (!includeImpureInfo) continue; + + if (info->deriver != "") + jsonPath.attr("deriver", info->deriver); + + if (info->registrationTime) + jsonPath.attr("registrationTime", info->registrationTime); + + if (info->ultimate) + jsonPath.attr("ultimate", info->ultimate); + + if (!info->sigs.empty()) { + auto jsonSigs = jsonPath.list("signatures"); + for (auto & sig : info->sigs) + jsonSigs.elem(sig); + } + } +} + + +unsigned long long Store::getClosureSize(const Path & storePath) +{ + unsigned long long totalSize = 0; + PathSet closure; + computeFSClosure(storePath, closure, false, false); + for (auto & p : closure) + totalSize += queryPathInfo(p)->narSize; + return totalSize; +} + + const Store::Stats & Store::getStats() { { @@ -458,6 +530,15 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, StringSink sink; srcStore->narFromPath({storePath}, sink); + if (srcStore->isTrusted()) + dontCheckSigs = true; + + if (!info->narHash && dontCheckSigs) { + auto info2 = make_ref<ValidPathInfo>(*info); + info2->narHash = hashString(htSHA256, *sink.s); + info = info2; + } + dstStore->addToStore(*info, sink.s, repair, dontCheckSigs); } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 30ee433bf074..d03e70849f93 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -22,6 +22,7 @@ struct Derivation; class FSAccessor; class NarInfoDiskCache; class Store; +class JSONPlaceholder; /* Size of the hash part of store paths, in base-32 characters. */ @@ -319,7 +320,7 @@ public: protected: - virtual bool isValidPathUncached(const Path & path) = 0; + virtual bool isValidPathUncached(const Path & path); public: @@ -359,7 +360,7 @@ public: output. (Note that the result of `queryDeriver()' is the derivation that was actually used to produce `path', which may not exist anymore.) */ - virtual PathSet queryValidDerivers(const Path & path) = 0; + virtual PathSet queryValidDerivers(const Path & path) { return {}; }; /* Query the outputs of the derivation denoted by `path'. */ virtual PathSet queryDerivationOutputs(const Path & path) = 0; @@ -372,13 +373,13 @@ public: virtual Path queryPathFromHashPart(const string & hashPart) = 0; /* Query which of the given paths have substitutes. */ - virtual PathSet querySubstitutablePaths(const PathSet & paths) = 0; + virtual PathSet querySubstitutablePaths(const PathSet & paths) { return {}; }; /* Query substitute info (i.e. references, derivers and download sizes) of a set of paths. If a path does not have substitute info, it's omitted from the resulting ‘infos’ map. */ virtual void querySubstitutablePathInfos(const PathSet & paths, - SubstitutablePathInfos & infos) = 0; + SubstitutablePathInfos & infos) { return; }; virtual bool wantMassQuery() { return false; } @@ -453,7 +454,7 @@ public: permanent root and sees our's. In either case the permanent root is seen by the collector. */ - virtual void syncWithGC() = 0; + virtual void syncWithGC() { }; /* Find the roots of the garbage collector. Each root is a pair (link, storepath) where `link' is the path of the symlink @@ -469,13 +470,26 @@ public: string makeValidityRegistration(const PathSet & paths, bool showDerivers, bool showHash); + /* Write a JSON representation of store path metadata, such as the + hash and the references. If ‘includeImpureInfo’ is true, + variable elements such as the registration time are + included. If ‘showClosureSize’ is true, the closure size of + each path is included. */ + void pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths, + bool includeImpureInfo, bool showClosureSize); + + /* Return the size of the closure of the specified path, that is, + the sum of the size of the NAR serialisation of each path in + the closure. */ + unsigned long long getClosureSize(const Path & storePath); + /* Optimise the disk space usage of the Nix store by hard-linking files with the same contents. */ - virtual void optimiseStore() = 0; + virtual void optimiseStore() { }; /* Check the integrity of the Nix store. Returns true if errors remain. */ - virtual bool verifyStore(bool checkContents, bool repair) = 0; + virtual bool verifyStore(bool checkContents, bool repair) { return false; }; /* Return an object to access files in the Nix store. */ virtual ref<FSAccessor> getFSAccessor() = 0; @@ -548,6 +562,10 @@ public: const Stats & getStats(); + /* Whether this store paths from this store can be imported even + if they lack a signature. */ + virtual bool isTrusted() { return false; } + protected: Stats stats; |