diff options
Diffstat (limited to 'src/libstore')
46 files changed, 5286 insertions, 2926 deletions
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc new file mode 100644 index 000000000000..58cb87a516b9 --- /dev/null +++ b/src/libstore/binary-cache-store.cc @@ -0,0 +1,299 @@ +#include "archive.hh" +#include "binary-cache-store.hh" +#include "compression.hh" +#include "derivations.hh" +#include "fs-accessor.hh" +#include "globals.hh" +#include "nar-info.hh" +#include "sync.hh" +#include "worker-protocol.hh" +#include "nar-accessor.hh" +#include "nar-info-disk-cache.hh" + +#include <chrono> + +namespace nix { + +BinaryCacheStore::BinaryCacheStore(const StoreParams & params) + : compression(get(params, "compression", "xz")) +{ + auto secretKeyFile = get(params, "secret-key", ""); + if (secretKeyFile != "") + secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile))); + + StringSink sink; + sink << narVersionMagic1; + narMagic = *sink.s; +} + +void BinaryCacheStore::init() +{ + std::string cacheInfoFile = "nix-cache-info"; + + auto cacheInfo = getFile(cacheInfoFile); + if (!cacheInfo) { + upsertFile(cacheInfoFile, "StoreDir: " + settings.nixStore + "\n"); + } else { + for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) { + size_t colon = line.find(':'); + if (colon == std::string::npos) continue; + auto name = line.substr(0, colon); + auto value = trim(line.substr(colon + 1, std::string::npos)); + if (name == "StoreDir") { + if (value != settings.nixStore) + throw Error(format("binary cache ‘%s’ is for Nix stores with prefix ‘%s’, not ‘%s’") + % getUri() % value % settings.nixStore); + } else if (name == "WantMassQuery") { + wantMassQuery_ = value == "1"; + } else if (name == "Priority") { + string2Int(value, priority); + } + } + } +} + +void BinaryCacheStore::notImpl() +{ + throw Error("operation not implemented for binary cache stores"); +} + +Path BinaryCacheStore::narInfoFileFor(const Path & storePath) +{ + assertStorePath(storePath); + return storePathToHash(storePath) + ".narinfo"; +} + +void BinaryCacheStore::addToStore(const ValidPathInfo & info, const std::string & nar, bool repair) +{ + if (!repair && isValidPath(info.path)) return; + + /* Verify that all references are valid. This may do some .narinfo + reads, but typically they'll already be cached. */ + for (auto & ref : info.references) + try { + if (ref != info.path) + queryPathInfo(ref); + } catch (InvalidPath &) { + throw Error(format("cannot add ‘%s’ to the binary cache because the reference ‘%s’ is not valid") + % info.path % ref); + } + + auto narInfoFile = narInfoFileFor(info.path); + + assert(nar.compare(0, narMagic.size(), narMagic) == 0); + + auto narInfo = make_ref<NarInfo>(info); + + narInfo->narSize = nar.size(); + narInfo->narHash = hashString(htSHA256, nar); + + if (info.narHash && info.narHash != narInfo->narHash) + throw Error(format("refusing to copy corrupted path ‘%1%’ to binary cache") % info.path); + + /* Compress the NAR. */ + narInfo->compression = compression; + auto now1 = std::chrono::steady_clock::now(); + auto narCompressed = compress(compression, nar); + auto now2 = std::chrono::steady_clock::now(); + narInfo->fileHash = hashString(htSHA256, *narCompressed); + narInfo->fileSize = narCompressed->size(); + + auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count(); + printMsg(lvlTalkative, format("copying path ‘%1%’ (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache") + % narInfo->path % narInfo->narSize + % ((1.0 - (double) narCompressed->size() / nar.size()) * 100.0) + % duration); + + /* Atomically write the NAR file. */ + narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar" + + (compression == "xz" ? ".xz" : + compression == "bzip2" ? ".bz2" : + ""); + if (repair || !fileExists(narInfo->url)) { + stats.narWrite++; + upsertFile(narInfo->url, *narCompressed); + } else + stats.narWriteAverted++; + + stats.narWriteBytes += nar.size(); + stats.narWriteCompressedBytes += narCompressed->size(); + stats.narWriteCompressionTimeMs += duration; + + /* Atomically write the NAR info file.*/ + if (secretKey) narInfo->sign(*secretKey); + + upsertFile(narInfoFile, narInfo->to_string()); + + auto hashPart = storePathToHash(narInfo->path); + + { + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo)); + } + + if (diskCache) + diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo)); + + stats.narInfoWrite++; +} + +bool BinaryCacheStore::isValidPathUncached(const Path & storePath) +{ + // FIXME: this only checks whether a .narinfo with a matching hash + // part exists. So ‘f4kb...-foo’ matches ‘f4kb...-bar’, even + // though they shouldn't. Not easily fixed. + return fileExists(narInfoFileFor(storePath)); +} + +void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink) +{ + auto info = queryPathInfo(storePath).cast<const NarInfo>(); + + auto nar = getFile(info->url); + + if (!nar) throw Error(format("file ‘%s’ missing from binary cache") % info->url); + + stats.narRead++; + stats.narReadCompressedBytes += nar->size(); + + /* Decompress the NAR. FIXME: would be nice to have the remote + side do this. */ + try { + nar = decompress(info->compression, *nar); + } catch (UnknownCompressionMethod &) { + throw Error(format("binary cache path ‘%s’ uses unknown compression method ‘%s’") + % storePath % info->compression); + } + + stats.narReadBytes += nar->size(); + + printMsg(lvlTalkative, format("exporting path ‘%1%’ (%2% bytes)") % storePath % nar->size()); + + assert(nar->size() % 8 == 0); + + sink((unsigned char *) nar->c_str(), nar->size()); +} + +std::shared_ptr<ValidPathInfo> BinaryCacheStore::queryPathInfoUncached(const Path & storePath) +{ + auto narInfoFile = narInfoFileFor(storePath); + auto data = getFile(narInfoFile); + if (!data) return 0; + + auto narInfo = make_ref<NarInfo>(*data, narInfoFile); + + stats.narInfoRead++; + + return std::shared_ptr<NarInfo>(narInfo); +} + +Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath, + bool recursive, HashType hashAlgo, PathFilter & filter, bool repair) +{ + // FIXME: some cut&paste from LocalStore::addToStore(). + + /* Read the whole path into memory. This is not a very scalable + method for very large paths, but `copyPath' is mainly used for + small files. */ + StringSink sink; + Hash h; + if (recursive) { + dumpPath(srcPath, sink, filter); + h = hashString(hashAlgo, *sink.s); + } else { + auto s = readFile(srcPath); + dumpString(s, sink); + h = hashString(hashAlgo, s); + } + + ValidPathInfo info; + info.path = makeFixedOutputPath(recursive, hashAlgo, h, name); + + addToStore(info, *sink.s, repair); + + return info.path; +} + +Path BinaryCacheStore::addTextToStore(const string & name, const string & s, + const PathSet & references, bool repair) +{ + ValidPathInfo info; + info.path = computeStorePathForText(name, s, references); + info.references = references; + + if (repair || !isValidPath(info.path)) { + StringSink sink; + dumpString(s, sink); + addToStore(info, *sink.s, repair); + } + + return info.path; +} + +/* Given requests for a path /nix/store/<x>/<y>, this accessor will + first download the NAR for /nix/store/<x> from the binary cache, + build a NAR accessor for that NAR, and use that to access <y>. */ +struct BinaryCacheStoreAccessor : public FSAccessor +{ + ref<BinaryCacheStore> store; + + std::map<Path, ref<FSAccessor>> nars; + + BinaryCacheStoreAccessor(ref<BinaryCacheStore> store) + : store(store) + { + } + + std::pair<ref<FSAccessor>, Path> fetch(const Path & path_) + { + auto path = canonPath(path_); + + auto storePath = toStorePath(path); + std::string restPath = std::string(path, storePath.size()); + + if (!store->isValidPath(storePath)) + throw Error(format("path ‘%1%’ is not a valid store path") % storePath); + + auto i = nars.find(storePath); + if (i != nars.end()) return {i->second, restPath}; + + StringSink sink; + store->narFromPath(storePath, sink); + + auto accessor = makeNarAccessor(sink.s); + nars.emplace(storePath, accessor); + return {accessor, restPath}; + } + + Stat stat(const Path & path) override + { + auto res = fetch(path); + return res.first->stat(res.second); + } + + StringSet readDirectory(const Path & path) override + { + auto res = fetch(path); + return res.first->readDirectory(res.second); + } + + std::string readFile(const Path & path) override + { + auto res = fetch(path); + return res.first->readFile(res.second); + } + + std::string readLink(const Path & path) override + { + auto res = fetch(path); + return res.first->readLink(res.second); + } +}; + +ref<FSAccessor> BinaryCacheStore::getFSAccessor() +{ + return make_ref<BinaryCacheStoreAccessor>(ref<BinaryCacheStore>( + std::dynamic_pointer_cast<BinaryCacheStore>(shared_from_this()))); +} + +} diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh new file mode 100644 index 000000000000..c14ab8676a9c --- /dev/null +++ b/src/libstore/binary-cache-store.hh @@ -0,0 +1,136 @@ +#pragma once + +#include "crypto.hh" +#include "store-api.hh" + +#include "pool.hh" + +#include <atomic> + +namespace nix { + +struct NarInfo; + +class BinaryCacheStore : public Store +{ +private: + + std::unique_ptr<SecretKey> secretKey; + + std::string compression; + +protected: + + BinaryCacheStore(const StoreParams & params); + + [[noreturn]] void notImpl(); + + virtual bool fileExists(const std::string & path) = 0; + + virtual void upsertFile(const std::string & path, const std::string & data) = 0; + + /* Return the contents of the specified file, or null if it + doesn't exist. */ + virtual std::shared_ptr<std::string> getFile(const std::string & path) = 0; + + bool wantMassQuery_ = false; + int priority = 50; + +public: + + virtual void init(); + +private: + + std::string narMagic; + + std::string narInfoFileFor(const Path & storePath); + +public: + + bool isValidPathUncached(const Path & path) override; + + PathSet queryValidPaths(const PathSet & paths) override + { notImpl(); } + + PathSet queryAllValidPaths() override + { notImpl(); } + + std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) override; + + void queryReferrers(const Path & path, + PathSet & referrers) override + { notImpl(); } + + PathSet queryValidDerivers(const Path & path) override + { return {}; } + + PathSet queryDerivationOutputs(const Path & path) override + { notImpl(); } + + StringSet queryDerivationOutputNames(const Path & path) override + { notImpl(); } + + Path queryPathFromHashPart(const string & hashPart) override + { notImpl(); } + + PathSet querySubstitutablePaths(const PathSet & paths) override + { return {}; } + + void querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos) + { } + + bool wantMassQuery() { return wantMassQuery_; } + + void addToStore(const ValidPathInfo & info, const std::string & nar, + bool repair = false) override; + + Path addToStore(const string & name, const Path & srcPath, + bool recursive = true, HashType hashAlgo = htSHA256, + PathFilter & filter = defaultPathFilter, bool repair = false) override; + + Path addTextToStore(const string & name, const string & s, + const PathSet & references, bool repair = false) override; + + void narFromPath(const Path & path, Sink & sink) override; + + void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) override + { notImpl(); } + + BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode = bmNormal) override + { notImpl(); } + + void ensurePath(const Path & path) override + { notImpl(); } + + void addTempRoot(const Path & path) override + { notImpl(); } + + void addIndirectRoot(const Path & path) override + { notImpl(); } + + void syncWithGC() override + { } + + Roots findRoots() override + { notImpl(); } + + void collectGarbage(const GCOptions & options, GCResults & results) override + { notImpl(); } + + void optimiseStore() override + { } + + bool verifyStore(bool checkContents, bool repair) override + { return true; } + + ref<FSAccessor> getFSAccessor() override; + + void addSignatures(const Path & storePath, const StringSet & sigs) + { notImpl(); } + +}; + +} diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 8c4412f11a8b..cca357dfb31b 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2,16 +2,21 @@ #include "references.hh" #include "pathlocks.hh" -#include "misc.hh" #include "globals.hh" #include "local-store.hh" #include "util.hh" #include "archive.hh" #include "affinity.hh" +#include "builtins.hh" +#include "finally.hh" +#include "compression.hh" +#include <algorithm> +#include <iostream> #include <map> #include <sstream> -#include <algorithm> +#include <thread> +#include <future> #include <limits.h> #include <time.h> @@ -21,57 +26,35 @@ #include <sys/stat.h> #include <sys/utsname.h> #include <sys/select.h> +#include <sys/resource.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> -#include <stdio.h> #include <cstring> #include <pwd.h> #include <grp.h> -#include <bzlib.h> - -/* Includes required for chroot support. */ -#if HAVE_SYS_PARAM_H -#include <sys/param.h> -#endif -#if HAVE_SYS_MOUNT_H -#include <sys/mount.h> -#endif -#if HAVE_SYS_SYSCALL_H -#include <sys/syscall.h> -#endif -#if HAVE_SCHED_H -#include <sched.h> -#endif - -/* In GNU libc 2.11, <sys/mount.h> does not define `MS_PRIVATE', but - <linux/fs.h> does. */ -#if !defined MS_PRIVATE && defined HAVE_LINUX_FS_H -#include <linux/fs.h> -#endif - -#define CHROOT_ENABLED HAVE_CHROOT && HAVE_UNSHARE && HAVE_SYS_MOUNT_H && defined(MS_BIND) && defined(MS_PRIVATE) && defined(CLONE_NEWNS) && defined(SYS_pivot_root) - /* chroot-like behavior from Apple's sandbox */ #if __APPLE__ - #define SANDBOX_ENABLED 1 #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh" #else - #define SANDBOX_ENABLED 0 - #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/bin" "/usr/bin" + #define DEFAULT_ALLOWED_IMPURE_PREFIXES "" #endif -#if CHROOT_ENABLED +/* Includes required for chroot support. */ +#if __linux__ #include <sys/socket.h> #include <sys/ioctl.h> #include <net/if.h> #include <netinet/ip.h> -#endif - -#if __linux__ #include <sys/personality.h> +#include <sys/mman.h> +#include <sched.h> +#include <sys/param.h> +#include <sys/mount.h> +#include <sys/syscall.h> +#define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old)) #endif #if HAVE_STATVFS @@ -94,6 +77,7 @@ struct HookInstance; /* A pointer to a goal. */ class Goal; +class DerivationGoal; typedef std::shared_ptr<Goal> GoalPtr; typedef std::weak_ptr<Goal> WeakGoalPtr; @@ -184,10 +168,10 @@ public: return exitCode; } - /* Cancel the goal. It should wake up its waiters, get rid of any - running child processes that are being monitored by the worker - (important!), etc. */ - virtual void cancel(bool timeout) = 0; + /* Callback in case of a timeout. It should wake up its waiters, + get rid of any running child processes that are being monitored + by the worker (important!), etc. */ + virtual void timedOut() = 0; virtual string key() = 0; @@ -216,8 +200,6 @@ struct Child time_t timeStarted; }; -typedef map<pid_t, Child> Children; - /* The worker class. */ class Worker @@ -237,7 +219,7 @@ private: WeakGoals wantingToBuild; /* Child processes currently running. */ - Children children; + std::list<Child> children; /* Number of build slots occupied. This includes local builds and substitutions but not remote builds via the build hook. */ @@ -257,6 +239,9 @@ private: /* Last time the goals in `waitingForAWhile' where woken up. */ time_t lastWokenUp; + /* Cache for pathContentsGood(). */ + std::map<Path, bool> pathContentsGoodCache; + public: /* Set if at least one derivation had a BuildError (i.e. permanent @@ -275,6 +260,8 @@ public: /* Make a goal (with caching). */ GoalPtr makeDerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, BuildMode buildMode = bmNormal); + std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(const Path & drvPath, + const BasicDerivation & drv, BuildMode buildMode = bmNormal); GoalPtr makeSubstitutionGoal(const Path & storePath, bool repair = false); /* Remove a dead goal. */ @@ -290,14 +277,14 @@ public: /* Registers a running child process. `inBuildSlot' means that the process counts towards the jobs limit. */ - void childStarted(GoalPtr goal, pid_t pid, - const set<int> & fds, bool inBuildSlot, bool respectTimeouts); + void childStarted(GoalPtr goal, const set<int> & fds, + bool inBuildSlot, bool respectTimeouts); /* Unregisters a running child process. `wakeSleepers' should be false if there is no sense in waking up goals that are sleeping because they can't run yet (e.g., there is no free build slot, or the hook would still say `postpone'). */ - void childTerminated(pid_t pid, bool wakeSleepers = true); + void childTerminated(GoalPtr goal, bool wakeSleepers = true); /* Put `goal' to sleep until a build slot becomes available (which might be right away). */ @@ -320,6 +307,12 @@ public: void waitForInput(); unsigned int exitStatus(); + + /* Check whether the given valid path exists and has the right + contents. */ + bool pathContentsGood(const Path & path); + + void markContentsGood(const Path & path); }; @@ -330,8 +323,8 @@ void addToWeakGoals(WeakGoals & goals, GoalPtr p) { // FIXME: necessary? // FIXME: O(n) - foreach (WeakGoals::iterator, i, goals) - if (i->lock() == p) return; + for (auto & i : goals) + if (i.lock() == p) return; goals.push_back(p); } @@ -361,11 +354,10 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result) /* If we failed and keepGoing is not set, we remove all remaining waitees. */ - foreach (Goals::iterator, i, waitees) { - GoalPtr goal = *i; + for (auto & goal : waitees) { WeakGoals waiters2; - foreach (WeakGoals::iterator, j, goal->waiters) - if (j->lock() != shared_from_this()) waiters2.push_back(*j); + for (auto & j : goal->waiters) + if (j.lock() != shared_from_this()) waiters2.push_back(j); goal->waiters = waiters2; } waitees.clear(); @@ -381,8 +373,8 @@ void Goal::amDone(ExitCode result) assert(exitCode == ecBusy); assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure); exitCode = result; - foreach (WeakGoals::iterator, i, waiters) { - GoalPtr goal = i->lock(); + for (auto & i : waiters) { + GoalPtr goal = i.lock(); if (goal) goal->waiteeDone(shared_from_this(), result); } waiters.clear(); @@ -508,13 +500,13 @@ void UserLock::acquire() /* Find a user account that isn't currently in use for another build. */ - foreach (Strings::iterator, i, users) { - debug(format("trying user ‘%1%’") % *i); + for (auto & i : users) { + debug(format("trying user ‘%1%’") % i); - struct passwd * pw = getpwnam(i->c_str()); + struct passwd * pw = getpwnam(i.c_str()); if (!pw) throw Error(format("the user ‘%1%’ in the group ‘%2%’ does not exist") - % *i % settings.buildUsersGroup); + % i % settings.buildUsersGroup); createDirs(settings.nixStateDir + "/userpool"); @@ -532,7 +524,7 @@ void UserLock::acquire() if (lockFile(fd, ltWrite, false)) { fdUserLock = fd.borrow(); lockedPaths.insert(fnUserLock); - user = *i; + user = i; uid = pw->pw_uid; /* Sanity check... */ @@ -540,6 +532,7 @@ void UserLock::acquire() throw Error(format("the Nix user should not be a member of ‘%1%’") % settings.buildUsersGroup); +#if __linux__ /* Get the list of supplementary groups of this build user. This is usually either empty or contains a group such as "kvm". */ supplementaryGIDs.resize(10); @@ -550,6 +543,7 @@ void UserLock::acquire() throw Error(format("failed to get list of supplementary groups for ‘%1%’") % pw->pw_name); supplementaryGIDs.resize(ngroups); +#endif return; } @@ -634,11 +628,14 @@ HookInstance::HookInstance() if (dup2(builderOut.writeSide, 4) == -1) throw SysError("dupping builder's stdout/stderr"); - execl(buildHook.c_str(), buildHook.c_str(), settings.thisSystem.c_str(), - (format("%1%") % settings.maxSilentTime).str().c_str(), - (format("%1%") % settings.printBuildTrace).str().c_str(), - (format("%1%") % settings.buildTimeout).str().c_str(), - NULL); + Strings args = { + baseNameOf(buildHook), + settings.thisSystem, + (format("%1%") % settings.maxSilentTime).str(), + (format("%1%") % settings.buildTimeout).str() + }; + + execv(buildHook.c_str(), stringsToCharPtrs(args).data()); throw SysError(format("executing ‘%1%’") % buildHook); }); @@ -668,12 +665,12 @@ typedef map<string, string> HashRewrites; string rewriteHashes(string s, const HashRewrites & rewrites) { - foreach (HashRewrites::const_iterator, i, rewrites) { - assert(i->first.size() == i->second.size()); + for (auto & i : rewrites) { + assert(i.first.size() == i.second.size()); size_t j = 0; - while ((j = s.find(i->first, j)) != string::npos) { + while ((j = s.find(i.first, j)) != string::npos) { debug(format("rewriting @ %1%") % j); - s.replace(j, i->second.size(), i->second); + s.replace(j, i.second.size(), i.second); } } return s; @@ -690,6 +687,9 @@ class SubstitutionGoal; class DerivationGoal : public Goal { private: + /* Whether to use an on-disk .drv file. */ + bool useDerivation; + /* The path of the derivation. */ Path drvPath; @@ -698,14 +698,14 @@ private: StringSet wantedOutputs; /* Whether additional wanted outputs have been added. */ - bool needRestart; + bool needRestart = false; /* Whether to retry substituting the outputs after building the inputs. */ - bool retrySubstitution; + bool retrySubstitution = false; /* The derivation stored at drvPath. */ - Derivation drv; + std::unique_ptr<BasicDerivation> drv; /* The remainder is state held during the build. */ @@ -735,14 +735,22 @@ private: /* The temporary directory. */ Path tmpDir; + /* The path of the temporary directory in the sandbox. */ + Path tmpDirInSandbox; + /* File descriptor for the log file. */ - FILE * fLogFile; - BZFILE * bzLogFile; AutoCloseFD fdLogFile; + std::shared_ptr<BufferedSink> logFileSink, logSink; /* Number of bytes received from the builder's stdout/stderr. */ unsigned long logSize; + /* The most recent log lines. */ + std::list<std::string> logTail; + + std::string currentLogLine; + size_t currentLogLinePos = 0; // to handle carriage return + /* Pipe for the builder's standard output/error. */ Pipe builderOut; @@ -750,16 +758,13 @@ private: std::shared_ptr<HookInstance> hook; /* Whether we're currently doing a chroot build. */ - bool useChroot; + bool useChroot = false; Path chrootRootDir; /* RAII object to delete the chroot directory. */ std::shared_ptr<AutoDelete> autoDelChroot; - /* All inputs that are regular files. */ - PathSet regularInputPaths; - /* Whether this is a fixed-output derivation. */ bool fixedOutput; @@ -772,6 +777,12 @@ private: typedef map<string, string> Environment; Environment env; +#if __APPLE__ + typedef string SandboxProfile; + SandboxProfile additionalSandboxProfile; + AutoDelete autoDelSandbox; +#endif + /* Hash rewriting. */ HashRewrites rewritesToTmp, rewritesFromTmp; typedef map<Path, Path> RedirectedOutputs; @@ -784,18 +795,29 @@ private: temporary paths. */ PathSet redirectedBadOutputs; - /* Set of inodes seen during calls to canonicalisePathMetaData() - for this build's outputs. This needs to be shared between - outputs to allow hard links between outputs. */ - InodesSeen inodesSeen; + BuildResult result; + + /* The current round, if we're building multiple times. */ + unsigned int curRound = 1; + + unsigned int nrRounds; + + /* Path registration info from the previous round, if we're + building multiple times. Since this contains the hash, it + allows us to compare whether two rounds produced the same + result. */ + ValidPathInfos prevInfos; public: - DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode = bmNormal); + DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, + Worker & worker, BuildMode buildMode = bmNormal); + DerivationGoal(const Path & drvPath, const BasicDerivation & drv, + Worker & worker, BuildMode buildMode = bmNormal); ~DerivationGoal(); - void cancel(bool timeout); + void timedOut() override; - string key() + string key() override { /* Ensure that derivations get built in order of their name, i.e. a derivation named "aardvark" always comes before @@ -804,7 +826,7 @@ public: return "b$" + storePathToName(drvPath) + "$" + drvPath; } - void work(); + void work() override; Path getDrvPath() { @@ -814,9 +836,12 @@ public: /* Add wanted outputs to an already existing derivation goal. */ void addWantedOutputs(const StringSet & outputs); + BuildResult getResult() { return result; } + private: /* The states. */ - void init(); + void getDerivation(); + void loadDerivation(); void haveDerivation(); void outputsSubstituted(); void closureRepaired(); @@ -849,8 +874,9 @@ private: void deleteTmpDir(bool force); /* Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, const string & data); - void handleEOF(int fd); + void handleChildOutput(int fd, const string & data) override; + void handleEOF(int fd) override; + void flushLine(); /* Return the set of (in)valid paths. */ PathSet checkPathValidity(bool returnValid, bool checkHash); @@ -864,26 +890,43 @@ private: Path addHashRewrite(const Path & path); void repairClosure(); + + void done(BuildResult::Status status, const string & msg = ""); }; -DerivationGoal::DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode) +DerivationGoal::DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, + Worker & worker, BuildMode buildMode) : Goal(worker) + , useDerivation(true) + , drvPath(drvPath) , wantedOutputs(wantedOutputs) - , needRestart(false) - , retrySubstitution(false) - , fLogFile(0) - , bzLogFile(0) - , useChroot(false) , buildMode(buildMode) { - this->drvPath = drvPath; - state = &DerivationGoal::init; + state = &DerivationGoal::getDerivation; name = (format("building of ‘%1%’") % drvPath).str(); trace("created"); } +DerivationGoal::DerivationGoal(const Path & drvPath, const BasicDerivation & drv, + Worker & worker, BuildMode buildMode) + : Goal(worker) + , useDerivation(false) + , drvPath(drvPath) + , buildMode(buildMode) +{ + this->drv = std::unique_ptr<BasicDerivation>(new BasicDerivation(drv)); + state = &DerivationGoal::haveDerivation; + name = (format("building of %1%") % showPaths(drv.outputPaths())).str(); + trace("created"); + + /* Prevent the .chroot directory from being + garbage-collected. (See isActiveTempFile() in gc.cc.) */ + worker.store.addTempRoot(drvPath); +} + + DerivationGoal::~DerivationGoal() { /* Careful: we should never ever throw an exception from a @@ -897,7 +940,7 @@ DerivationGoal::~DerivationGoal() void DerivationGoal::killChild() { if (pid != -1) { - worker.childTerminated(pid); + worker.childTerminated(shared_from_this()); if (buildUser.enabled()) { /* If we're using a build user, then there is a tricky @@ -919,12 +962,10 @@ void DerivationGoal::killChild() } -void DerivationGoal::cancel(bool timeout) +void DerivationGoal::timedOut() { - if (settings.printBuildTrace && timeout) - printMsg(lvlError, format("@ build-failed %1% - timeout") % drvPath); killChild(); - amDone(ecFailed); + done(BuildResult::TimedOut); } @@ -943,42 +984,39 @@ void DerivationGoal::addWantedOutputs(const StringSet & outputs) wantedOutputs.clear(); needRestart = true; } else - foreach (StringSet::const_iterator, i, outputs) - if (wantedOutputs.find(*i) == wantedOutputs.end()) { - wantedOutputs.insert(*i); + for (auto & i : outputs) + if (wantedOutputs.find(i) == wantedOutputs.end()) { + wantedOutputs.insert(i); needRestart = true; } } -void DerivationGoal::init() +void DerivationGoal::getDerivation() { trace("init"); - if (settings.readOnlyMode) - throw Error(format("cannot build derivation ‘%1%’ - no write access to the Nix store") % drvPath); - /* The first thing to do is to make sure that the derivation exists. If it doesn't, it may be created through a substitute. */ if (buildMode == bmNormal && worker.store.isValidPath(drvPath)) { - haveDerivation(); + loadDerivation(); return; } addWaitee(worker.makeSubstitutionGoal(drvPath)); - state = &DerivationGoal::haveDerivation; + state = &DerivationGoal::loadDerivation; } -void DerivationGoal::haveDerivation() +void DerivationGoal::loadDerivation() { trace("loading derivation"); if (nrFailed != 0) { printMsg(lvlError, format("cannot build missing derivation ‘%1%’") % drvPath); - amDone(ecFailed); + done(BuildResult::MiscFailure); return; } @@ -990,31 +1028,43 @@ void DerivationGoal::haveDerivation() assert(worker.store.isValidPath(drvPath)); /* Get the derivation. */ - drv = derivationFromPath(worker.store, drvPath); + drv = std::unique_ptr<BasicDerivation>(new Derivation(worker.store.derivationFromPath(drvPath))); + + haveDerivation(); +} + + +void DerivationGoal::haveDerivation() +{ + trace("have derivation"); - foreach (DerivationOutputs::iterator, i, drv.outputs) - worker.store.addTempRoot(i->second.path); + for (auto & i : drv->outputs) + worker.store.addTempRoot(i.second.path); /* Check what outputs paths are not already valid. */ PathSet invalidOutputs = checkPathValidity(false, buildMode == bmRepair); /* If they are all valid, then we're done. */ if (invalidOutputs.size() == 0 && buildMode == bmNormal) { - amDone(ecSuccess); + done(BuildResult::AlreadyValid); return; } - /* Check whether any output previously failed to build. If so, - don't bother. */ - foreach (PathSet::iterator, i, invalidOutputs) - if (pathFailed(*i)) return; + /* Reject doing a hash build of anything other than a fixed-output + derivation. */ + if (buildMode == bmHash) { + if (drv->outputs.size() != 1 || + drv->outputs.find("out") == drv->outputs.end() || + drv->outputs["out"].hashAlgo == "") + throw Error(format("cannot do a hash build of non-fixed-output derivation ‘%1%’") % drvPath); + } /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ - if (settings.useSubstitutes && substitutesAllowed(drv)) - foreach (PathSet::iterator, i, invalidOutputs) - addWaitee(worker.makeSubstitutionGoal(*i, buildMode == bmRepair)); + if (settings.useSubstitutes && drv->substitutesAllowed()) + for (auto & i : invalidOutputs) + addWaitee(worker.makeSubstitutionGoal(i, buildMode == bmRepair)); if (waitees.empty()) /* to prevent hang (no wake-up event) */ outputsSubstituted(); @@ -1045,7 +1095,7 @@ void DerivationGoal::outputsSubstituted() unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); if (buildMode == bmNormal && nrInvalid == 0) { - amDone(ecSuccess); + done(BuildResult::Substituted); return; } if (buildMode == bmRepair && nrInvalid == 0) { @@ -1063,11 +1113,17 @@ void DerivationGoal::outputsSubstituted() wantedOutputs = PathSet(); /* The inputs must be built before we can build this goal. */ - foreach (DerivationInputs::iterator, i, drv.inputDrvs) - addWaitee(worker.makeDerivationGoal(i->first, i->second, buildMode == bmRepair ? bmRepair : bmNormal)); + if (useDerivation) + for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) + addWaitee(worker.makeDerivationGoal(i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal)); - foreach (PathSet::iterator, i, drv.inputSrcs) - addWaitee(worker.makeSubstitutionGoal(*i)); + for (auto & i : drv->inputSrcs) { + if (worker.store.isValidPath(i)) continue; + if (!settings.useSubstitutes) + throw Error(format("dependency of ‘%1%’ of ‘%2%’ does not exist, and substitution is disabled") + % i % drvPath); + addWaitee(worker.makeSubstitutionGoal(i)); + } if (waitees.empty()) /* to prevent hang (no wake-up event) */ inputsRealised(); @@ -1085,40 +1141,42 @@ void DerivationGoal::repairClosure() /* Get the output closure. */ PathSet outputClosure; - foreach (DerivationOutputs::iterator, i, drv.outputs) - computeFSClosure(worker.store, i->second.path, outputClosure); + for (auto & i : drv->outputs) { + if (!wantOutput(i.first, wantedOutputs)) continue; + worker.store.computeFSClosure(i.second.path, outputClosure); + } /* Filter out our own outputs (which we have already checked). */ - foreach (DerivationOutputs::iterator, i, drv.outputs) - outputClosure.erase(i->second.path); + for (auto & i : drv->outputs) + outputClosure.erase(i.second.path); /* Get all dependencies of this derivation so that we know which derivation is responsible for which path in the output closure. */ PathSet inputClosure; - computeFSClosure(worker.store, drvPath, inputClosure); + if (useDerivation) worker.store.computeFSClosure(drvPath, inputClosure); std::map<Path, Path> outputsToDrv; - foreach (PathSet::iterator, i, inputClosure) - if (isDerivation(*i)) { - Derivation drv = derivationFromPath(worker.store, *i); - foreach (DerivationOutputs::iterator, j, drv.outputs) - outputsToDrv[j->second.path] = *i; + for (auto & i : inputClosure) + if (isDerivation(i)) { + Derivation drv = worker.store.derivationFromPath(i); + for (auto & j : drv.outputs) + outputsToDrv[j.second.path] = i; } /* Check each path (slow!). */ PathSet broken; - foreach (PathSet::iterator, i, outputClosure) { - if (worker.store.pathContentsGood(*i)) continue; - printMsg(lvlError, format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % *i % drvPath); - Path drvPath2 = outputsToDrv[*i]; + for (auto & i : outputClosure) { + if (worker.pathContentsGood(i)) continue; + printMsg(lvlError, format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % i % drvPath); + Path drvPath2 = outputsToDrv[i]; if (drvPath2 == "") - addWaitee(worker.makeSubstitutionGoal(*i, true)); + addWaitee(worker.makeSubstitutionGoal(i, true)); else addWaitee(worker.makeDerivationGoal(drvPath2, PathSet(), bmRepair)); } if (waitees.empty()) { - amDone(ecSuccess); + done(BuildResult::AlreadyValid); return; } @@ -1131,7 +1189,7 @@ void DerivationGoal::closureRepaired() trace("closure repaired"); if (nrFailed > 0) throw Error(format("some paths in the output closure of derivation ‘%1%’ could not be repaired") % drvPath); - amDone(ecSuccess); + done(BuildResult::AlreadyValid); } @@ -1140,10 +1198,12 @@ void DerivationGoal::inputsRealised() trace("all inputs realised"); if (nrFailed != 0) { + if (!useDerivation) + throw Error(format("some dependencies of ‘%1%’ are missing") % drvPath); printMsg(lvlError, format("cannot build derivation ‘%1%’: %2% dependencies couldn't be built") % drvPath % nrFailed); - amDone(ecFailed); + done(BuildResult::DependencyFailed); return; } @@ -1156,32 +1216,33 @@ void DerivationGoal::inputsRealised() running the build hook. */ /* The outputs are referenceable paths. */ - foreach (DerivationOutputs::iterator, i, drv.outputs) { - debug(format("building path ‘%1%’") % i->second.path); - allPaths.insert(i->second.path); + for (auto & i : drv->outputs) { + debug(format("building path ‘%1%’") % i.second.path); + allPaths.insert(i.second.path); } /* Determine the full set of input paths. */ /* First, the input derivations. */ - foreach (DerivationInputs::iterator, i, drv.inputDrvs) { - /* Add the relevant output closures of the input derivation - `*i' as input paths. Only add the closures of output paths - that are specified as inputs. */ - assert(worker.store.isValidPath(i->first)); - Derivation inDrv = derivationFromPath(worker.store, i->first); - foreach (StringSet::iterator, j, i->second) - if (inDrv.outputs.find(*j) != inDrv.outputs.end()) - computeFSClosure(worker.store, inDrv.outputs[*j].path, inputPaths); - else - throw Error( - format("derivation ‘%1%’ requires non-existent output ‘%2%’ from input derivation ‘%3%’") - % drvPath % *j % i->first); - } + if (useDerivation) + for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) { + /* Add the relevant output closures of the input derivation + `i' as input paths. Only add the closures of output paths + that are specified as inputs. */ + assert(worker.store.isValidPath(i.first)); + Derivation inDrv = worker.store.derivationFromPath(i.first); + for (auto & j : i.second) + if (inDrv.outputs.find(j) != inDrv.outputs.end()) + worker.store.computeFSClosure(inDrv.outputs[j].path, inputPaths); + else + throw Error( + format("derivation ‘%1%’ requires non-existent output ‘%2%’ from input derivation ‘%3%’") + % drvPath % j % i.first); + } /* Second, the input sources. */ - foreach (PathSet::iterator, i, drv.inputSrcs) - computeFSClosure(worker.store, *i, inputPaths); + for (auto & i : drv->inputSrcs) + worker.store.computeFSClosure(i, inputPaths); debug(format("added input paths %1%") % showPaths(inputPaths)); @@ -1189,8 +1250,12 @@ void DerivationGoal::inputsRealised() /* Is this a fixed-output derivation? */ fixedOutput = true; - foreach (DerivationOutputs::iterator, i, drv.outputs) - if (i->second.hash == "") fixedOutput = false; + for (auto & i : drv->outputs) + if (i.second.hash == "") fixedOutput = false; + + /* Don't repeat fixed-output derivations since they're already + verified by their output hash.*/ + nrRounds = fixedOutput ? 1 : settings.get("build-repeat", 0) + 1; /* Okay, try to build. Note that here we don't wait for a build slot to become available, since we don't need one if there is a @@ -1200,35 +1265,6 @@ void DerivationGoal::inputsRealised() } -static bool canBuildLocally(const string & platform) -{ - return platform == settings.thisSystem -#if __linux__ - || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux") -#endif - ; -} - - -static string get(const StringPairs & map, const string & key, const string & def = "") -{ - StringPairs::const_iterator i = map.find(key); - return i == map.end() ? def : i->second; -} - - -bool willBuildLocally(const Derivation & drv) -{ - return get(drv.env, "preferLocalBuild") == "1" && canBuildLocally(drv.platform); -} - - -bool substitutesAllowed(const Derivation & drv) -{ - return get(drv.env, "allowSubstitutes", "1") == "1"; -} - - void DerivationGoal::tryToBuild() { trace("trying to build"); @@ -1238,10 +1274,10 @@ void DerivationGoal::tryToBuild() (It can't happen between here and the lockPaths() call below because we're not allowing multi-threading.) If so, put this goal to sleep until another goal finishes, then try again. */ - foreach (DerivationOutputs::iterator, i, drv.outputs) - if (pathIsLockedByMe(i->second.path)) { + for (auto & i : drv->outputs) + if (pathIsLockedByMe(i.second.path)) { debug(format("putting derivation ‘%1%’ to sleep because ‘%2%’ is locked by another goal") - % drvPath % i->second.path); + % drvPath % i.second.path); worker.waitForAnyGoal(shared_from_this()); return; } @@ -1251,7 +1287,7 @@ void DerivationGoal::tryToBuild() can't acquire the lock, then continue; hopefully some other goal can start a build, and if not, the main loop will sleep a few seconds and then retry this goal. */ - if (!outputLocks.lockPaths(outputPaths(drv), "", false)) { + if (!outputLocks.lockPaths(drv->outputPaths(), "", false)) { worker.waitForAWhile(shared_from_this()); return; } @@ -1264,38 +1300,30 @@ void DerivationGoal::tryToBuild() now hold the locks on the output paths, no other process can build this derivation, so no further checks are necessary. */ validPaths = checkPathValidity(true, buildMode == bmRepair); - assert(buildMode != bmCheck || validPaths.size() == drv.outputs.size()); - if (buildMode != bmCheck && validPaths.size() == drv.outputs.size()) { + if (buildMode != bmCheck && validPaths.size() == drv->outputs.size()) { debug(format("skipping build of derivation ‘%1%’, someone beat us to it") % drvPath); outputLocks.setDeletion(true); - amDone(ecSuccess); + done(BuildResult::AlreadyValid); return; } - missingPaths = outputPaths(drv); + missingPaths = drv->outputPaths(); if (buildMode != bmCheck) - foreach (PathSet::iterator, i, validPaths) missingPaths.erase(*i); + for (auto & i : validPaths) missingPaths.erase(i); /* If any of the outputs already exist but are not valid, delete them. */ - foreach (DerivationOutputs::iterator, i, drv.outputs) { - Path path = i->second.path; + for (auto & i : drv->outputs) { + Path path = i.second.path; if (worker.store.isValidPath(path)) continue; - if (!pathExists(path)) continue; debug(format("removing invalid path ‘%1%’") % path); deletePath(path); } - /* Check again whether any output previously failed to build, - because some other process may have tried and failed before we - acquired the lock. */ - foreach (DerivationOutputs::iterator, i, drv.outputs) - if (pathFailed(i->second.path)) return; - /* Don't do a remote build if the derivation has the attribute `preferLocalBuild' set. Also, check and repair modes are only supported for local builds. */ - bool buildLocally = buildMode != bmNormal || willBuildLocally(drv); + bool buildLocally = buildMode != bmNormal || drv->willBuildLocally(); /* Is the build hook willing to accept this job? */ if (!buildLocally) { @@ -1336,11 +1364,8 @@ void DerivationGoal::tryToBuild() printMsg(lvlError, e.msg()); outputLocks.unlock(); buildUser.release(); - if (settings.printBuildTrace) - printMsg(lvlError, format("@ build-failed %1% - %2% %3%") - % drvPath % 0 % e.msg()); worker.permanentFailure = true; - amDone(ecFailed); + done(BuildResult::InputRejected, e.msg()); return; } @@ -1361,11 +1386,13 @@ void replaceValidPath(const Path & storePath, const Path tmpPath) rename(storePath.c_str(), oldPath.c_str()); if (rename(tmpPath.c_str(), storePath.c_str()) == -1) throw SysError(format("moving ‘%1%’ to ‘%2%’") % tmpPath % storePath); - if (pathExists(oldPath)) - deletePath(oldPath); + deletePath(oldPath); } +MakeError(NotDeterministic, BuildError) + + void DerivationGoal::buildDone() { trace("build done"); @@ -1374,22 +1401,14 @@ void DerivationGoal::buildDone() to have terminated. In fact, the builder could also have simply have closed its end of the pipe --- just don't do that :-) */ - int status; - pid_t savedPid; - if (hook) { - savedPid = hook->pid; - status = hook->pid.wait(true); - } else { - /* !!! this could block! security problem! solution: kill the - child */ - savedPid = pid; - status = pid.wait(true); - } + /* !!! this could block! security problem! solution: kill the + child */ + int status = hook ? hook->pid.wait(true) : pid.wait(true); debug(format("builder process for ‘%1%’ finished") % drvPath); /* So the child is gone now. */ - worker.childTerminated(savedPid); + worker.childTerminated(shared_from_this()); /* Close the read side of the logger pipe. */ if (hook) { @@ -1436,15 +1455,23 @@ void DerivationGoal::buildDone() /* Move paths out of the chroot for easier debugging of build failures. */ if (useChroot && buildMode == bmNormal) - foreach (PathSet::iterator, i, missingPaths) - if (pathExists(chrootRootDir + *i)) - rename((chrootRootDir + *i).c_str(), i->c_str()); + for (auto & i : missingPaths) + if (pathExists(chrootRootDir + i)) + rename((chrootRootDir + i).c_str(), i.c_str()); + + std::string msg = (format("builder for ‘%1%’ %2%") + % drvPath % statusToString(status)).str(); + + if (!settings.verboseBuild && !logTail.empty()) { + msg += (format("; last %d log lines:") % logTail.size()).str(); + for (auto & line : logTail) + msg += "\n " + line; + } if (diskFull) - printMsg(lvlError, "note: build failure may have been caused by lack of free disk space"); + msg += "\nnote: build failure may have been caused by lack of free disk space"; - throw BuildError(format("builder for ‘%1%’ %2%") - % drvPath % statusToString(status)); + throw BuildError(msg); } /* Compute the FS closure of the outputs and register them as @@ -1452,19 +1479,28 @@ void DerivationGoal::buildDone() registerOutputs(); if (buildMode == bmCheck) { - amDone(ecSuccess); + done(BuildResult::Built); return; } /* Delete unused redirected outputs (when doing hash rewriting). */ - foreach (RedirectedOutputs::iterator, i, redirectedOutputs) - if (pathExists(i->second)) deletePath(i->second); + for (auto & i : redirectedOutputs) + deletePath(i.second); /* Delete the chroot (if we were using one). */ autoDelChroot.reset(); /* this runs the destructor */ deleteTmpDir(true); + /* Repeat the build if necessary. */ + if (curRound++ < nrRounds) { + outputLocks.unlock(); + buildUser.release(); + state = &DerivationGoal::tryToBuild; + worker.wakeUp(shared_from_this()); + return; + } + /* It is now safe to delete the lock files, since all future lockers will see that the output paths are valid; they will not create new lock files with the same names as the old @@ -1478,67 +1514,50 @@ void DerivationGoal::buildDone() outputLocks.unlock(); buildUser.release(); - if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101) { - if (settings.printBuildTrace) - printMsg(lvlError, format("@ build-failed %1% - timeout") % drvPath); - worker.timedOut = true; - } + BuildResult::Status st = BuildResult::MiscFailure; + + if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101) + st = BuildResult::TimedOut; else if (hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100)) { - if (settings.printBuildTrace) - printMsg(lvlError, format("@ hook-failed %1% - %2% %3%") - % drvPath % status % e.msg()); } else { - if (settings.printBuildTrace) - printMsg(lvlError, format("@ build-failed %1% - %2% %3%") - % drvPath % 1 % e.msg()); - worker.permanentFailure = !fixedOutput && !diskFull; - - /* Register the outputs of this build as "failed" so we - won't try to build them again (negative caching). - However, don't do this for fixed-output derivations, - since they're likely to fail for transient reasons - (e.g., fetchurl not being able to access the network). - Hook errors (like communication problems with the - remote machine) shouldn't be cached either. */ - if (settings.cacheFailure && !fixedOutput && !diskFull) - foreach (DerivationOutputs::iterator, i, drv.outputs) - worker.store.registerFailedPath(i->second.path); + st = + dynamic_cast<NotDeterministic*>(&e) ? BuildResult::NotDeterministic : + statusOk(status) ? BuildResult::OutputRejected : + fixedOutput || diskFull ? BuildResult::TransientFailure : + BuildResult::PermanentFailure; } - amDone(ecFailed); + done(st, e.msg()); return; } /* Release the build user, if applicable. */ buildUser.release(); - if (settings.printBuildTrace) - printMsg(lvlError, format("@ build-succeeded %1% -") % drvPath); - - amDone(ecSuccess); + done(BuildResult::Built); } HookReply DerivationGoal::tryBuildHook() { - if (!settings.useBuildHook || getEnv("NIX_BUILD_HOOK") == "") return rpDecline; + if (!settings.useBuildHook || getEnv("NIX_BUILD_HOOK") == "" || !useDerivation) return rpDecline; if (!worker.hook) - worker.hook = std::shared_ptr<HookInstance>(new HookInstance); + worker.hook = std::make_shared<HookInstance>(); /* Tell the hook about system features (beyond the system type) required from the build machine. (The hook could parse the drv file itself, but this is easier.) */ - Strings features = tokenizeString<Strings>(get(drv.env, "requiredSystemFeatures")); - foreach (Strings::iterator, i, features) checkStoreName(*i); /* !!! abuse */ + Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures")); + for (auto & i : features) checkStoreName(i); /* !!! abuse */ /* Send the request to the hook. */ writeLine(worker.hook->toHook.writeSide, (format("%1% %2% %3% %4%") % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0") - % drv.platform % drvPath % concatStringsSep(",", features)).str()); + % drv->platform % drvPath % concatStringsSep(",", features)).str()); /* Read the first line of input, which should be a word indicating whether the hook wishes to perform the build. */ @@ -1572,16 +1591,16 @@ HookReply DerivationGoal::tryBuildHook() list it since the remote system *probably* already has it.) */ PathSet allInputs; allInputs.insert(inputPaths.begin(), inputPaths.end()); - computeFSClosure(worker.store, drvPath, allInputs); + worker.store.computeFSClosure(drvPath, allInputs); string s; - foreach (PathSet::iterator, i, allInputs) { s += *i; s += ' '; } + for (auto & i : allInputs) { s += i; s += ' '; } writeLine(hook->toHook.writeSide, s); /* Tell the hooks the missing outputs that have to be copied back from the remote system. */ s = ""; - foreach (PathSet::iterator, i, missingPaths) { s += *i; s += ' '; } + for (auto & i : missingPaths) { s += i; s += ' '; } writeLine(hook->toHook.writeSide, s); hook->toHook.writeSide.close(); @@ -1592,11 +1611,7 @@ HookReply DerivationGoal::tryBuildHook() set<int> fds; fds.insert(hook->fromHook.readSide); fds.insert(hook->builderOut.readSide); - worker.childStarted(shared_from_this(), hook->pid, fds, false, false); - - if (settings.printBuildTrace) - printMsg(lvlError, format("@ build-started %1% - %2% %3%") - % drvPath % drv.platform % logFile); + worker.childStarted(shared_from_this(), fds, false, false); return rpAccept; } @@ -1618,21 +1633,55 @@ int childEntry(void * arg) void DerivationGoal::startBuilder() { - startNest(nest, lvlInfo, format( - buildMode == bmRepair ? "repairing path(s) %1%" : - buildMode == bmCheck ? "checking path(s) %1%" : - "building path(s) %1%") % showPaths(missingPaths)); + auto f = format( + buildMode == bmRepair ? "repairing path(s) %1%" : + buildMode == bmCheck ? "checking path(s) %1%" : + nrRounds > 1 ? "building path(s) %1% (round %2%/%3%)" : + "building path(s) %1%"); + f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit); + printMsg(lvlInfo, f % showPaths(missingPaths) % curRound % nrRounds); /* Right platform? */ - if (!canBuildLocally(drv.platform)) { - if (settings.printBuildTrace) - printMsg(lvlError, format("@ unsupported-platform %1% %2%") % drvPath % drv.platform); + if (!drv->canBuildLocally()) { throw Error( format("a ‘%1%’ is required to build ‘%3%’, but I am a ‘%2%’") - % drv.platform % settings.thisSystem % drvPath); + % drv->platform % settings.thisSystem % drvPath); + } + +#if __APPLE__ + additionalSandboxProfile = get(drv->env, "__sandboxProfile"); +#endif + + /* Are we doing a chroot build? Note that fixed-output + derivations are never done in a chroot, mainly so that + functions like fetchurl (which needs a proper /etc/resolv.conf) + work properly. Purity checking for fixed-output derivations + is somewhat pointless anyway. */ + { + string x = settings.get("build-use-sandbox", + /* deprecated alias */ + settings.get("build-use-chroot", string("false"))); + if (x != "true" && x != "false" && x != "relaxed") + throw Error("option ‘build-use-sandbox’ must be set to one of ‘true’, ‘false’ or ‘relaxed’"); + if (x == "true") { + if (get(drv->env, "__noChroot") == "1") + throw Error(format("derivation ‘%1%’ has ‘__noChroot’ set, " + "but that's not allowed when ‘build-use-sandbox’ is ‘true’") % drvPath); +#if __APPLE__ + if (additionalSandboxProfile != "") + throw Error(format("derivation ‘%1%’ specifies a sandbox profile, " + "but this is only allowed when ‘build-use-sandbox’ is ‘relaxed’") % drvPath); +#endif + useChroot = true; + } + else if (x == "false") + useChroot = false; + else if (x == "relaxed") + useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1"; } /* Construct the environment passed to the builder. */ + env.clear(); /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when PATH is not set. We don't want this, so we fill it in with some dummy @@ -1659,38 +1708,44 @@ void DerivationGoal::startBuilder() /* Create a temporary directory where the build will take place. */ - tmpDir = createTempDir("", "nix-build-" + storePathToName(drvPath), false, false, 0700); + auto drvName = storePathToName(drvPath); + tmpDir = createTempDir("", "nix-build-" + drvName, false, false, 0700); + + /* In a sandbox, for determinism, always use the same temporary + directory. */ + tmpDirInSandbox = useChroot ? canonPath("/tmp", true) + "/nix-build-" + drvName + "-0" : tmpDir; /* Add all bindings specified in the derivation via the environments, except those listed in the passAsFile attribute. Those are passed as file names pointing to temporary files containing the contents. */ PathSet filesToChown; - StringSet passAsFile = tokenizeString<StringSet>(get(drv.env, "passAsFile")); + StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile")); int fileNr = 0; - for (auto & i : drv.env) { + for (auto & i : drv->env) { if (passAsFile.find(i.first) == passAsFile.end()) { env[i.first] = i.second; } else { - Path p = tmpDir + "/.attr-" + int2String(fileNr++); + string fn = ".attr-" + std::to_string(fileNr++); + Path p = tmpDir + "/" + fn; writeFile(p, i.second); filesToChown.insert(p); - env[i.first + "Path"] = p; + env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; } } /* For convenience, set an environment pointing to the top build directory. */ - env["NIX_BUILD_TOP"] = tmpDir; + env["NIX_BUILD_TOP"] = tmpDirInSandbox; /* Also set TMPDIR and variants to point to this directory. */ - env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDir; + env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDirInSandbox; /* Explicitly set PWD to prevent problems with chroot builds. In particular, dietlibc cannot figure out the cwd because the inode of the current directory doesn't appear in .. (because getdents returns the inode of the mount point). */ - env["PWD"] = tmpDir; + env["PWD"] = tmpDirInSandbox; /* Compatibility hack with Nix <= 0.7: if this is a fixed-output derivation, tell the builder, so that for instance `fetchurl' @@ -1708,8 +1763,8 @@ void DerivationGoal::startBuilder() fixed-output derivations is by definition pure (since we already know the cryptographic hash of the output). */ if (fixedOutput) { - Strings varNames = tokenizeString<Strings>(get(drv.env, "impureEnvVars")); - foreach (Strings::iterator, i, varNames) env[*i] = getEnv(*i); + Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars")); + for (auto & i : varNames) env[i] = getEnv(i); } /* The `exportReferencesGraph' feature allows the references graph @@ -1719,7 +1774,7 @@ void DerivationGoal::startBuilder() temporary build directory. The text files have the format used by `nix-store --register-validity'. However, the deriver fields are left empty. */ - string s = get(drv.env, "exportReferencesGraph"); + string s = get(drv->env, "exportReferencesGraph"); Strings ss = tokenizeString<Strings>(s); if (ss.size() % 2 != 0) throw BuildError(format("odd number of tokens in ‘exportReferencesGraph’: ‘%1%’") % s); @@ -1742,14 +1797,14 @@ void DerivationGoal::startBuilder() like passing all build-time dependencies of some path to a derivation that builds a NixOS DVD image. */ PathSet paths, paths2; - computeFSClosure(worker.store, storePath, paths); + worker.store.computeFSClosure(storePath, paths); paths2 = paths; - foreach (PathSet::iterator, j, paths2) { - if (isDerivation(*j)) { - Derivation drv = derivationFromPath(worker.store, *j); - foreach (DerivationOutputs::iterator, k, drv.outputs) - computeFSClosure(worker.store, k->second.path, paths); + for (auto & j : paths2) { + if (isDerivation(j)) { + Derivation drv = worker.store.derivationFromPath(j); + for (auto & k : drv.outputs) + worker.store.computeFSClosure(k.second.path, paths); } } @@ -1779,40 +1834,27 @@ void DerivationGoal::startBuilder() } - /* Are we doing a chroot build? Note that fixed-output - derivations are never done in a chroot, mainly so that - functions like fetchurl (which needs a proper /etc/resolv.conf) - work properly. Purity checking for fixed-output derivations - is somewhat pointless anyway. */ - { - string x = settings.get("build-use-chroot", string("false")); - if (x != "true" && x != "false" && x != "relaxed") - throw Error("option ‘build-use-chroot’ must be set to one of ‘true’, ‘false’ or ‘relaxed’"); - if (x == "true") { - if (get(drv.env, "__noChroot") == "1") - throw Error(format("derivation ‘%1%’ has ‘__noChroot’ set, but that's not allowed when ‘build-use-chroot’ is ‘true’") % drvPath); - useChroot = true; - } - else if (x == "false") - useChroot = false; - else if (x == "relaxed") - useChroot = !fixedOutput && get(drv.env, "__noChroot") != "1"; - } - if (useChroot) { string defaultChrootDirs; -#if CHROOT_ENABLED +#if __linux__ if (isInStore(BASH_PATH)) defaultChrootDirs = "/bin/sh=" BASH_PATH; #endif /* Allow a user-configurable set of directories from the host file system. */ - PathSet dirs = tokenizeString<StringSet>(settings.get("build-chroot-dirs", defaultChrootDirs)); - PathSet dirs2 = tokenizeString<StringSet>(settings.get("build-extra-chroot-dirs", string(""))); + PathSet dirs = tokenizeString<StringSet>( + settings.get("build-sandbox-paths", + /* deprecated alias with lower priority */ + settings.get("build-chroot-dirs", defaultChrootDirs))); + PathSet dirs2 = tokenizeString<StringSet>( + settings.get("build-extra-chroot-dirs", + settings.get("build-extra-sandbox-paths", string("")))); dirs.insert(dirs2.begin(), dirs2.end()); + dirsInChroot.clear(); + for (auto & i : dirs) { size_t p = i.find('='); if (p == string::npos) @@ -1820,13 +1862,13 @@ void DerivationGoal::startBuilder() else dirsInChroot[string(i, 0, p)] = string(i, p + 1); } - dirsInChroot[tmpDir] = tmpDir; + dirsInChroot[tmpDirInSandbox] = tmpDir; /* Add the closure of store paths to the chroot. */ PathSet closure; for (auto & i : dirsInChroot) if (isInStore(i.second)) - computeFSClosure(worker.store, toStorePath(i.second), closure); + worker.store.computeFSClosure(toStorePath(i.second), closure); for (auto & i : closure) dirsInChroot[i] = i; @@ -1834,7 +1876,7 @@ void DerivationGoal::startBuilder() PathSet allowedPaths = tokenizeString<StringSet>(allowed); /* This works like the above, except on a per-derivation level */ - Strings impurePaths = tokenizeString<Strings>(get(drv.env, "__impureHostDeps")); + Strings impurePaths = tokenizeString<Strings>(get(drv->env, "__impureHostDeps")); for (auto & i : impurePaths) { bool found = false; @@ -1851,21 +1893,21 @@ void DerivationGoal::startBuilder() } } if (!found) - throw Error(format("derivation '%1%' requested impure path ‘%2%’, but it was not in allowed-impure-host-deps (‘%3%’)") % drvPath % i % allowed); + throw Error(format("derivation ‘%1%’ requested impure path ‘%2%’, but it was not in allowed-impure-host-deps (‘%3%’)") % drvPath % i % allowed); dirsInChroot[i] = i; } -#if CHROOT_ENABLED +#if __linux__ /* Create a temporary directory in which we set up the chroot environment using bind-mounts. We put it in the Nix store to ensure that we can create hard-links to non-directory inputs in the fake Nix store in the chroot (see below). */ chrootRootDir = drvPath + ".chroot"; - if (pathExists(chrootRootDir)) deletePath(chrootRootDir); + deletePath(chrootRootDir); /* Clean up the chroot directory automatically. */ - autoDelChroot = std::shared_ptr<AutoDelete>(new AutoDelete(chrootRootDir)); + autoDelChroot = std::make_shared<AutoDelete>(chrootRootDir); printMsg(lvlChatty, format("setting up chroot environment in ‘%1%’") % chrootRootDir); @@ -1918,44 +1960,42 @@ void DerivationGoal::startBuilder() if (chown(chrootStoreDir.c_str(), 0, buildUser.getGID()) == -1) throw SysError(format("cannot change ownership of ‘%1%’") % chrootStoreDir); - foreach (PathSet::iterator, i, inputPaths) { + for (auto & i : inputPaths) { struct stat st; - if (lstat(i->c_str(), &st)) - throw SysError(format("getting attributes of path ‘%1%’") % *i); + if (lstat(i.c_str(), &st)) + throw SysError(format("getting attributes of path ‘%1%’") % i); if (S_ISDIR(st.st_mode)) - dirsInChroot[*i] = *i; + dirsInChroot[i] = i; else { - Path p = chrootRootDir + *i; - if (link(i->c_str(), p.c_str()) == -1) { + Path p = chrootRootDir + i; + if (link(i.c_str(), p.c_str()) == -1) { /* Hard-linking fails if we exceed the maximum link count on a file (e.g. 32000 of ext3), which is quite possible after a `nix-store --optimise'. */ if (errno != EMLINK) - throw SysError(format("linking ‘%1%’ to ‘%2%’") % p % *i); + throw SysError(format("linking ‘%1%’ to ‘%2%’") % p % i); StringSink sink; - dumpPath(*i, sink); - StringSource source(sink.s); + dumpPath(i, sink); + StringSource source(*sink.s); restorePath(p, source); } - - regularInputPaths.insert(*i); } } - /* If we're repairing or checking, it's possible that we're + /* If we're repairing, checking or rebuilding part of a + multiple-outputs derivation, it's possible that we're rebuilding a path that is in settings.dirsInChroot (typically the dependencies of /bin/sh). Throw them out. */ - if (buildMode != bmNormal) - foreach (DerivationOutputs::iterator, i, drv.outputs) - dirsInChroot.erase(i->second.path); + for (auto & i : drv->outputs) + dirsInChroot.erase(i.second.path); -#elif SANDBOX_ENABLED +#elif __APPLE__ /* We don't really have any parent prep work to do (yet?) All work happens in the child, instead. */ #else - throw Error("chroot builds are not supported on this platform"); + throw Error("sandboxing builds is not supported on this platform"); #endif } @@ -1974,16 +2014,16 @@ void DerivationGoal::startBuilder() contents of the new outputs to replace the dummy strings with the actual hashes. */ if (validPaths.size() > 0) - foreach (PathSet::iterator, i, validPaths) - addHashRewrite(*i); + for (auto & i : validPaths) + addHashRewrite(i); /* If we're repairing, then we don't want to delete the corrupt outputs in advance. So rewrite them as well. */ if (buildMode == bmRepair) - foreach (PathSet::iterator, i, missingPaths) - if (worker.store.isValidPath(*i) && pathExists(*i)) { - addHashRewrite(*i); - redirectedBadOutputs.insert(*i); + for (auto & i : missingPaths) + if (worker.store.isValidPath(i) && pathExists(i)) { + addHashRewrite(i); + redirectedBadOutputs.insert(i); } } @@ -2001,10 +2041,10 @@ void DerivationGoal::startBuilder() auto lastPos = std::string::size_type{0}; for (auto nlPos = lines.find('\n'); nlPos != string::npos; nlPos = lines.find('\n', lastPos)) { - auto line = std::string{lines, lastPos, nlPos}; + auto line = std::string{lines, lastPos, nlPos - lastPos}; lastPos = nlPos + 1; if (state == stBegin) { - if (line == "extra-chroot-dirs") { + if (line == "extra-sandbox-paths" || line == "extra-chroot-dirs") { state = stExtraChrootDirs; } else { throw Error(format("unknown pre-build hook command ‘%1%’") @@ -2025,7 +2065,7 @@ void DerivationGoal::startBuilder() } /* Run the builder. */ - printMsg(lvlChatty, format("executing builder ‘%1%’") % drv.builder); + printMsg(lvlChatty, format("executing builder ‘%1%’") % drv->builder); /* Create the log file. */ Path logFile = openLogFile(); @@ -2034,7 +2074,7 @@ void DerivationGoal::startBuilder() builderOut.create(); /* Fork a child to build the package. */ -#if CHROOT_ENABLED +#if __linux__ if (useChroot) { /* Set up private namespaces for the build: @@ -2072,16 +2112,19 @@ void DerivationGoal::startBuilder() ProcessOptions options; options.allowVfork = false; Pid helper = startProcess([&]() { - char stack[32 * 1024]; + size_t stackSize = 1 * 1024 * 1024; + char * stack = (char *) mmap(0, stackSize, + PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) throw SysError("allocating stack"); int flags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD; if (!fixedOutput) flags |= CLONE_NEWNET; - pid_t child = clone(childEntry, stack + sizeof(stack) - 8, flags, this); + pid_t child = clone(childEntry, stack + stackSize, flags, this); if (child == -1 && errno == EINVAL) /* Fallback for Linux < 2.13 where CLONE_NEWPID and CLONE_PARENT are not allowed together. */ - child = clone(childEntry, stack + sizeof(stack) - 8, flags & ~CLONE_NEWPID, this); + child = clone(childEntry, stack + stackSize, flags & ~CLONE_NEWPID, this); if (child == -1) throw SysError("cloning builder process"); - writeFull(builderOut.writeSide, int2String(child) + "\n"); + writeFull(builderOut.writeSide, std::to_string(child) + "\n"); _exit(0); }, options); if (helper.wait(true) != 0) @@ -2093,7 +2136,7 @@ void DerivationGoal::startBuilder() #endif { ProcessOptions options; - options.allowVfork = !buildUser.enabled(); + options.allowVfork = !buildUser.enabled() && !drv->isBuiltin(); pid = startProcess([&]() { runChild(); }, options); @@ -2102,16 +2145,16 @@ void DerivationGoal::startBuilder() /* parent */ pid.setSeparatePG(true); builderOut.writeSide.close(); - worker.childStarted(shared_from_this(), pid, - singleton<set<int> >(builderOut.readSide), true, true); + worker.childStarted(shared_from_this(), {builderOut.readSide}, true, true); /* Check if setting up the build environment failed. */ - string msg = readLine(builderOut.readSide); - if (!msg.empty()) throw Error(msg); - - if (settings.printBuildTrace) { - printMsg(lvlError, format("@ build-started %1% - %2% %3%") - % drvPath % drv.platform % logFile); + while (true) { + string msg = readLine(builderOut.readSide); + if (string(msg, 0, 1) == "\1") { + if (msg.size() == 1) break; + throw Error(string(msg, 1)); + } + printMsg(lvlDebug, msg); } } @@ -2125,7 +2168,7 @@ void DerivationGoal::runChild() commonChildInit(builderOut); -#if CHROOT_ENABLED +#if __linux__ if (useChroot) { /* Initialise the loopback interface. */ @@ -2157,8 +2200,8 @@ void DerivationGoal::runChild() local to the namespace, though, so setting MS_PRIVATE does not affect the outside world. */ Strings mounts = tokenizeString<Strings>(readFile("/proc/self/mountinfo", true), "\n"); - foreach (Strings::iterator, i, mounts) { - vector<string> fields = tokenizeString<vector<string> >(*i, " "); + for (auto & i : mounts) { + vector<string> fields = tokenizeString<vector<string> >(i, " "); string fs = decodeOctalEscaped(fields.at(4)); if (mount(0, fs.c_str(), 0, MS_PRIVATE, 0) == -1) throw SysError(format("unable to make filesystem ‘%1%’ private") % fs); @@ -2206,10 +2249,10 @@ void DerivationGoal::runChild() /* Bind-mount all the directories from the "host" filesystem that we want in the chroot environment. */ - foreach (DirsInChroot::iterator, i, dirsInChroot) { + for (auto & i : dirsInChroot) { struct stat st; - Path source = i->second; - Path target = chrootRootDir + i->first; + Path source = i.second; + Path target = chrootRootDir + i.first; if (source == "/proc") continue; // backwards compatibility debug(format("bind mounting ‘%1%’ to ‘%2%’") % source % target); if (stat(source.c_str(), &st) == -1) @@ -2258,10 +2301,8 @@ void DerivationGoal::runChild() if (mkdir("real-root", 0) == -1) throw SysError("cannot create real-root directory"); -#define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old)) if (pivot_root(".", "real-root") == -1) throw SysError(format("cannot pivot old root directory onto ‘%1%’") % (chrootRootDir + "/real-root")); -#undef pivot_root if (chroot(".") == -1) throw SysError(format("cannot change root directory to ‘%1%’") % chrootRootDir); @@ -2274,7 +2315,7 @@ void DerivationGoal::runChild() } #endif - if (chdir(tmpDir.c_str()) == -1) + if (chdir(tmpDirInSandbox.c_str()) == -1) throw SysError(format("changing into ‘%1%’") % tmpDir); /* Close all other file descriptors. */ @@ -2285,7 +2326,7 @@ void DerivationGoal::runChild() i686-linux build on an x86_64-linux machine. */ struct utsname utsbuf; uname(&utsbuf); - if (drv.platform == "i686-linux" && + if (drv->platform == "i686-linux" && (settings.thisSystem == "x86_64-linux" || (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64")))) { if (personality(PER_LINUX32) == -1) @@ -2294,7 +2335,7 @@ void DerivationGoal::runChild() /* Impersonate a Linux 2.6 machine to get some determinism in builds that depend on the kernel version. */ - if ((drv.platform == "i686-linux" || drv.platform == "x86_64-linux") && settings.impersonateLinux26) { + if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && settings.impersonateLinux26) { int cur = personality(0xffffffff); if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */); } @@ -2305,10 +2346,16 @@ void DerivationGoal::runChild() if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE); #endif + /* Disable core dumps by default. */ + struct rlimit limit = { 0, RLIM_INFINITY }; + setrlimit(RLIMIT_CORE, &limit); + + // FIXME: set other limits to deterministic values? + /* Fill in the environment. */ Strings envStrs; - foreach (Environment::const_iterator, i, env) - envStrs.push_back(rewriteHashes(i->first + "=" + i->second, rewritesToTmp)); + for (auto & i : env) + envStrs.push_back(rewriteHashes(i.first + "=" + i.second, rewritesToTmp)); /* If we are running in `build-users' mode, then switch to the user we allocated above. Make sure that we drop all root @@ -2319,7 +2366,8 @@ void DerivationGoal::runChild() if (buildUser.enabled()) { /* Preserve supplementary groups of the build user, to allow admins to specify groups such as "kvm". */ - if (setgroups(buildUser.getSupplementaryGIDs().size(), + if (!buildUser.getSupplementaryGIDs().empty() && + setgroups(buildUser.getSupplementaryGIDs().size(), buildUser.getSupplementaryGIDs().data()) == -1) throw SysError("cannot set supplementary groups of build user"); @@ -2340,7 +2388,10 @@ void DerivationGoal::runChild() const char *builder = "invalid"; string sandboxProfile; - if (useChroot && SANDBOX_ENABLED) { + if (drv->isBuiltin()) { + ; +#if __APPLE__ + } else if (useChroot) { /* Lots and lots and lots of file functions freak out if they can't stat their full ancestry */ PathSet ancestry; @@ -2367,8 +2418,7 @@ void DerivationGoal::runChild() for (auto & i : inputPaths) dirsInChroot[i] = i; - - /* TODO: we should factor out the policy cleanly, so we don't have to repeat the constants every time... */ + /* This has to appear before import statements */ sandboxProfile += "(version 1)\n"; /* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */ @@ -2378,15 +2428,6 @@ void DerivationGoal::runChild() sandboxProfile += "(deny default (with no-log))\n"; } - sandboxProfile += "(allow file-read* file-write-data (literal \"/dev/null\"))\n"; - - sandboxProfile += "(allow file-read-metadata\n" - "\t(literal \"/var\")\n" - "\t(literal \"/tmp\")\n" - "\t(literal \"/etc\")\n" - "\t(literal \"/etc/nix\")\n" - "\t(literal \"/etc/nix/nix.conf\"))\n"; - /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */ Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true); @@ -2394,20 +2435,6 @@ void DerivationGoal::runChild() /* They don't like trailing slashes on subpath directives */ if (globalTmpDir.back() == '/') globalTmpDir.pop_back(); - /* This is where our temp folders are and where most of the building will happen, so we want rwx on it. */ - sandboxProfile += (format("(allow file-read* file-write* process-exec (subpath \"%1%\") (subpath \"/private/tmp\"))\n") % globalTmpDir).str(); - - sandboxProfile += "(allow process-fork)\n"; - sandboxProfile += "(allow sysctl-read)\n"; - sandboxProfile += "(allow signal (target same-sandbox))\n"; - - /* Enables getpwuid (used by git and others) */ - sandboxProfile += "(allow mach-lookup (global-name \"com.apple.system.notification_center\") (global-name \"com.apple.system.opendirectoryd.libinfo\"))\n"; - - /* Allow local networking operations, mostly because lots of test suites use it and it seems mostly harmless */ - sandboxProfile += "(allow network* (local ip) (remote unix-socket))"; - - /* Our rwx outputs */ sandboxProfile += "(allow file-read* file-write* process-exec\n"; for (auto & i : missingPaths) { @@ -2416,15 +2443,15 @@ void DerivationGoal::runChild() sandboxProfile += ")\n"; /* Our inputs (transitive dependencies and any impurities computed above) - Note that the sandbox profile allows file-write* even though it isn't seemingly necessary. First of all, nix's standard user permissioning - mechanism still prevents builders from writing to input directories, so no security/purity is lost. The reason we allow file-write* is that - denying it means the `access` syscall will return EPERM instead of EACCESS, which confuses a few programs that assume (understandably, since - it appears to be a violation of the POSIX spec) that `access` won't do that, and don't deal with it nicely if it does. The most notable of - these is the entire GHC Haskell ecosystem. */ + + without file-write* allowed, access() incorrectly returns EPERM + */ sandboxProfile += "(allow file-read* file-write* process-exec\n"; for (auto & i : dirsInChroot) { if (i.first != i.second) - throw SysError(format("can't map '%1%' to '%2%': mismatched impure paths not supported on darwin")); + throw Error(format( + "can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin") + % i.first % i.second); string path = i.first; struct stat st; @@ -2437,46 +2464,66 @@ void DerivationGoal::runChild() } sandboxProfile += ")\n"; - /* Our ancestry. N.B: this uses literal on folders, instead of subpath. Without that, - you open up the entire filesystem because you end up with (subpath "/") */ - sandboxProfile += "(allow file-read-metadata\n"; + /* Allow file-read* on full directory hierarchy to self. Allows realpath() */ + sandboxProfile += "(allow file-read*\n"; for (auto & i : ancestry) { sandboxProfile += (format("\t(literal \"%1%\")\n") % i.c_str()).str(); } sandboxProfile += ")\n"; + sandboxProfile += additionalSandboxProfile; + + debug("Generated sandbox profile:"); + debug(sandboxProfile); + + Path sandboxFile = drvPath + ".sb"; + deletePath(sandboxFile); + autoDelSandbox.reset(sandboxFile, false); + + writeFile(sandboxFile, sandboxProfile); + builder = "/usr/bin/sandbox-exec"; args.push_back("sandbox-exec"); - args.push_back("-p"); - args.push_back(sandboxProfile); - args.push_back(drv.builder); + args.push_back("-f"); + args.push_back(sandboxFile); + args.push_back("-D"); + args.push_back("_GLOBAL_TMP_DIR=" + globalTmpDir); + args.push_back(drv->builder); +#endif } else { - builder = drv.builder.c_str(); - string builderBasename = baseNameOf(drv.builder); + builder = drv->builder.c_str(); + string builderBasename = baseNameOf(drv->builder); args.push_back(builderBasename); } - foreach (Strings::iterator, i, drv.args) - args.push_back(rewriteHashes(*i, rewritesToTmp)); + for (auto & i : drv->args) + args.push_back(rewriteHashes(i, rewritesToTmp)); restoreSIGPIPE(); /* Indicate that we managed to set up the build environment. */ - writeFull(STDERR_FILENO, "\n"); + writeFull(STDERR_FILENO, string("\1\n")); - /* This needs to be after that fateful '\n', and I didn't want to duplicate code */ - if (useChroot && SANDBOX_ENABLED) { - printMsg(lvlDebug, "Generated sandbox profile:"); - printMsg(lvlDebug, sandboxProfile); + /* Execute the program. This should not return. */ + if (drv->isBuiltin()) { + try { + if (drv->builder == "builtin:fetchurl") + builtinFetchurl(*drv); + else + throw Error(format("unsupported builtin function ‘%1%’") % string(drv->builder, 8)); + _exit(0); + } catch (std::exception & e) { + writeFull(STDERR_FILENO, "error: " + string(e.what()) + "\n"); + _exit(1); + } } - /* Execute the program. This should not return. */ execve(builder, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data()); - throw SysError(format("executing ‘%1%’") % drv.builder); + throw SysError(format("executing ‘%1%’") % drv->builder); } catch (std::exception & e) { - writeFull(STDERR_FILENO, "while setting up the build environment: " + string(e.what()) + "\n"); + writeFull(STDERR_FILENO, "\1while setting up the build environment: " + string(e.what()) + "\n"); _exit(1); } } @@ -2485,18 +2532,17 @@ void DerivationGoal::runChild() /* Parse a list of reference specifiers. Each element must either be a store path, or the symbolic name of the output of the derivation (such as `out'). */ -PathSet parseReferenceSpecifiers(const Derivation & drv, string attr) +PathSet parseReferenceSpecifiers(const BasicDerivation & drv, string attr) { PathSet result; Paths paths = tokenizeString<Paths>(attr); - foreach (Strings::iterator, i, paths) { - if (isStorePath(*i)) - result.insert(*i); - else if (drv.outputs.find(*i) != drv.outputs.end()) - result.insert(drv.outputs.find(*i)->second.path); + for (auto & i : paths) { + if (isStorePath(i)) + result.insert(i); + else if (drv.outputs.find(i) != drv.outputs.end()) + result.insert(drv.outputs.find(i)->second.path); else throw BuildError( - format("derivation contains an illegal reference specifier ‘%1%’") - % *i); + format("derivation contains an illegal reference specifier ‘%1%’") % i); } return result; } @@ -2509,18 +2555,25 @@ void DerivationGoal::registerOutputs() to do anything here. */ if (hook) { bool allValid = true; - foreach (DerivationOutputs::iterator, i, drv.outputs) - if (!worker.store.isValidPath(i->second.path)) allValid = false; + for (auto & i : drv->outputs) + if (!worker.store.isValidPath(i.second.path)) allValid = false; if (allValid) return; } ValidPathInfos infos; + /* Set of inodes seen during calls to canonicalisePathMetaData() + for this build's outputs. This needs to be shared between + outputs to allow hard links between outputs. */ + InodesSeen inodesSeen; + + Path checkSuffix = "-check"; + /* Check whether the output paths were created, and grep each output path to determine what other paths it references. Also make all output paths read-only. */ - foreach (DerivationOutputs::iterator, i, drv.outputs) { - Path path = i->second.path; + for (auto & i : drv->outputs) { + Path path = i.second.path; if (missingPaths.find(path) == missingPaths.end()) continue; Path actualPath = path; @@ -2532,7 +2585,7 @@ void DerivationGoal::registerOutputs() replaceValidPath(path, actualPath); else if (buildMode != bmCheck && rename(actualPath.c_str(), path.c_str()) == -1) - throw SysError(format("moving build output ‘%1%’ from the chroot to the Nix store") % path); + throw SysError(format("moving build output ‘%1%’ from the sandbox to the Nix store") % path); } if (buildMode != bmCheck) actualPath = path; } else { @@ -2541,7 +2594,7 @@ void DerivationGoal::registerOutputs() && redirectedBadOutputs.find(path) != redirectedBadOutputs.end() && pathExists(redirected)) replaceValidPath(path, redirected); - if (buildMode == bmCheck) + if (buildMode == bmCheck && redirected != "") actualPath = redirected; } @@ -2578,23 +2631,22 @@ void DerivationGoal::registerOutputs() StringSink sink; dumpPath(actualPath, sink); deletePath(actualPath); - sink.s = rewriteHashes(sink.s, rewritesFromTmp); - StringSource source(sink.s); + sink.s = make_ref<std::string>(rewriteHashes(*sink.s, rewritesFromTmp)); + StringSource source(*sink.s); restorePath(actualPath, source); rewritten = true; } - startNest(nest, lvlTalkative, - format("scanning for references inside ‘%1%’") % path); + Activity act(*logger, lvlTalkative, format("scanning for references inside ‘%1%’") % path); /* Check that fixed-output derivations produced the right outputs (i.e., the content hash should match the specified hash). */ - if (i->second.hash != "") { + if (i.second.hash != "") { bool recursive; HashType ht; Hash h; - i->second.parseHashInfo(recursive, ht, h); + i.second.parseHashInfo(recursive, ht, h); if (!recursive) { /* The output path should be a regular file without @@ -2604,12 +2656,28 @@ void DerivationGoal::registerOutputs() format("output path ‘%1%’ should be a non-executable regular file") % path); } - /* Check the hash. */ + /* Check the hash. In hash mode, move the path produced by + the derivation to its content-addressed location. */ Hash h2 = recursive ? hashPath(ht, actualPath).first : hashFile(ht, actualPath); - if (h != h2) - throw BuildError( - format("output path ‘%1%’ should have %2% hash ‘%3%’, instead has ‘%4%’") - % path % i->second.hashAlgo % printHash16or32(h) % printHash16or32(h2)); + if (buildMode == bmHash) { + Path dest = makeFixedOutputPath(recursive, ht, h2, drv->env["name"]); + printMsg(lvlError, format("build produced path ‘%1%’ with %2% hash ‘%3%’") + % dest % printHashType(ht) % printHash16or32(h2)); + if (worker.store.isValidPath(dest)) + return; + if (actualPath != dest) { + PathLocks outputLocks({dest}); + deletePath(dest); + if (rename(actualPath.c_str(), dest.c_str()) == -1) + throw SysError(format("moving ‘%1%’ to ‘%2%’") % actualPath % dest); + } + path = actualPath = dest; + } else { + if (h != h2) + throw BuildError( + format("output path ‘%1%’ has %2% hash ‘%3%’ when ‘%4%’ was expected") + % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h)); + } } /* Get rid of all weird permissions. This also checks that @@ -2625,27 +2693,47 @@ void DerivationGoal::registerOutputs() PathSet references = scanForReferences(actualPath, allPaths, hash); if (buildMode == bmCheck) { - ValidPathInfo info = worker.store.queryPathInfo(path); - if (hash.first != info.hash) - throw Error(format("derivation ‘%1%’ may not be deterministic: hash mismatch in output ‘%2%’") % drvPath % path); + if (!worker.store.isValidPath(path)) continue; + auto info = *worker.store.queryPathInfo(path); + if (hash.first != info.narHash) { + if (settings.keepFailed) { + Path dst = path + checkSuffix; + deletePath(dst); + if (rename(actualPath.c_str(), dst.c_str())) + throw SysError(format("renaming ‘%1%’ to ‘%2%’") % actualPath % dst); + throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs from ‘%3%’") + % drvPath % path % dst); + } else + throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs") + % drvPath % path); + } + + /* Since we verified the build, it's now ultimately + trusted. */ + if (!info.ultimate) { + info.ultimate = true; + worker.store.signPathInfo(info); + worker.store.registerValidPaths({info}); + } + continue; } /* For debugging, print out the referenced and unreferenced paths. */ - foreach (PathSet::iterator, i, inputPaths) { - PathSet::iterator j = references.find(*i); + for (auto & i : inputPaths) { + PathSet::iterator j = references.find(i); if (j == references.end()) - debug(format("unreferenced input: ‘%1%’") % *i); + debug(format("unreferenced input: ‘%1%’") % i); else - debug(format("referenced input: ‘%1%’") % *i); + debug(format("referenced input: ‘%1%’") % i); } /* Enforce `allowedReferences' and friends. */ auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) { - if (drv.env.find(attrName) == drv.env.end()) return; + if (drv->env.find(attrName) == drv->env.end()) return; - PathSet spec = parseReferenceSpecifiers(drv, get(drv.env, attrName)); + PathSet spec = parseReferenceSpecifiers(*drv, get(drv->env, attrName)); PathSet used; if (recursive) { @@ -2653,18 +2741,29 @@ void DerivationGoal::registerOutputs() for (auto & i : references) /* Don't call computeFSClosure on ourselves. */ if (actualPath != i) - computeFSClosure(worker.store, i, used); + worker.store.computeFSClosure(i, used); } else used = references; + PathSet badPaths; + for (auto & i : used) if (allowed) { if (spec.find(i) == spec.end()) - throw BuildError(format("output (‘%1%’) is not allowed to refer to path ‘%2%’") % actualPath % i); + badPaths.insert(i); } else { if (spec.find(i) != spec.end()) - throw BuildError(format("output (‘%1%’) is not allowed to refer to path ‘%2%’") % actualPath % i); + badPaths.insert(i); + } + + if (!badPaths.empty()) { + string badPathsStr; + for (auto & i : badPaths) { + badPathsStr += "\n\t"; + badPathsStr += i; } + throw BuildError(format("output ‘%1%’ is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr); + } }; checkRefs("allowedReferences", true, false); @@ -2672,21 +2771,63 @@ void DerivationGoal::registerOutputs() checkRefs("disallowedReferences", false, false); checkRefs("disallowedRequisites", false, true); - worker.store.optimisePath(path); // FIXME: combine with scanForReferences() + if (curRound == nrRounds) { + worker.store.optimisePath(path); // FIXME: combine with scanForReferences() - worker.store.markContentsGood(path); + worker.markContentsGood(path); + } ValidPathInfo info; info.path = path; - info.hash = hash.first; + info.narHash = hash.first; info.narSize = hash.second; info.references = references; info.deriver = drvPath; + info.ultimate = true; + worker.store.signPathInfo(info); + infos.push_back(info); } if (buildMode == bmCheck) return; + /* Compare the result with the previous round, and report which + path is different, if any.*/ + if (curRound > 1 && prevInfos != infos) { + assert(prevInfos.size() == infos.size()); + for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j) + if (!(*i == *j)) { + Path prev = i->path + checkSuffix; + if (pathExists(prev)) + throw NotDeterministic( + format("output ‘%1%’ of ‘%2%’ differs from ‘%3%’ from previous round") + % i->path % drvPath % prev); + else + throw NotDeterministic( + format("output ‘%1%’ of ‘%2%’ differs from previous round") + % i->path % drvPath); + } + assert(false); // shouldn't happen + } + + if (settings.keepFailed) { + for (auto & i : drv->outputs) { + Path prev = i.second.path + checkSuffix; + deletePath(prev); + if (curRound < nrRounds) { + Path dst = i.second.path + checkSuffix; + if (rename(i.second.path.c_str(), dst.c_str())) + throw SysError(format("renaming ‘%1%’ to ‘%2%’") % i.second.path % dst); + } + } + + } + + if (curRound < nrRounds) { + prevInfos = infos; + return; + } + /* Register each output path as valid, and register the sets of paths referenced by each of them. If there are cycles in the outputs, this will fail. */ @@ -2709,46 +2850,31 @@ Path DerivationGoal::openLogFile() Path dir = (format("%1%/%2%/%3%/") % settings.nixLogDir % drvsLogDir % string(baseName, 0, 2)).str(); createDirs(dir); - if (settings.compressLog) { + Path logFileName = (format("%1%/%2%%3%") + % dir + % string(baseName, 2) + % (settings.compressLog ? ".bz2" : "")).str(); - Path logFileName = (format("%1%/%2%.bz2") % dir % string(baseName, 2)).str(); - AutoCloseFD fd = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (fd == -1) throw SysError(format("creating log file ‘%1%’") % logFileName); - closeOnExec(fd); + fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666); + if (fdLogFile == -1) throw SysError(format("creating log file ‘%1%’") % logFileName); - if (!(fLogFile = fdopen(fd.borrow(), "w"))) - throw SysError(format("opening file ‘%1%’") % logFileName); + logFileSink = std::make_shared<FdSink>(fdLogFile); - int err; - if (!(bzLogFile = BZ2_bzWriteOpen(&err, fLogFile, 9, 0, 0))) - throw Error(format("cannot open compressed log file ‘%1%’") % logFileName); - - return logFileName; + if (settings.compressLog) + logSink = std::shared_ptr<CompressionSink>(makeCompressionSink("bzip2", *logFileSink)); + else + logSink = logFileSink; - } else { - Path logFileName = (format("%1%/%2%") % dir % string(baseName, 2)).str(); - fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (fdLogFile == -1) throw SysError(format("creating log file ‘%1%’") % logFileName); - closeOnExec(fdLogFile); - return logFileName; - } + return logFileName; } void DerivationGoal::closeLogFile() { - if (bzLogFile) { - int err; - BZ2_bzWriteClose(&err, bzLogFile, 0, 0, 0); - bzLogFile = 0; - if (err != BZ_OK) throw Error(format("cannot close compressed log file (BZip2 error = %1%)") % err); - } - - if (fLogFile) { - fclose(fLogFile); - fLogFile = 0; - } - + auto logSink2 = std::dynamic_pointer_cast<CompressionSink>(logSink); + if (logSink2) logSink2->finish(); + if (logFileSink) logFileSink->flush(); + logSink = logFileSink = 0; fdLogFile.close(); } @@ -2779,59 +2905,61 @@ void DerivationGoal::handleChildOutput(int fd, const string & data) printMsg(lvlError, format("%1% killed after writing more than %2% bytes of log output") % getName() % settings.maxLogSize); - cancel(true); // not really a timeout, but close enough + killChild(); + done(BuildResult::LogLimitExceeded); return; } - if (verbosity >= settings.buildVerbosity) - writeToStderr(filterANSIEscapes(data, true)); - if (bzLogFile) { - int err; - BZ2_bzWrite(&err, bzLogFile, (unsigned char *) data.data(), data.size()); - if (err != BZ_OK) throw Error(format("cannot write to compressed log file (BZip2 error = %1%)") % err); - } else if (fdLogFile != -1) - writeFull(fdLogFile, data); + + for (auto c : data) + if (c == '\r') + currentLogLinePos = 0; + else if (c == '\n') + flushLine(); + else { + if (currentLogLinePos >= currentLogLine.size()) + currentLogLine.resize(currentLogLinePos + 1); + currentLogLine[currentLogLinePos++] = c; + } + + if (logSink) (*logSink)(data); } if (hook && fd == hook->fromHook.readSide) - writeToStderr(data); + printMsg(lvlError, data); // FIXME? } void DerivationGoal::handleEOF(int fd) { + if (!currentLogLine.empty()) flushLine(); worker.wakeUp(shared_from_this()); } -PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash) +void DerivationGoal::flushLine() { - PathSet result; - foreach (DerivationOutputs::iterator, i, drv.outputs) { - if (!wantOutput(i->first, wantedOutputs)) continue; - bool good = - worker.store.isValidPath(i->second.path) && - (!checkHash || worker.store.pathContentsGood(i->second.path)); - if (good == returnValid) result.insert(i->second.path); + if (settings.verboseBuild) + printMsg(lvlInfo, filterANSIEscapes(currentLogLine, true)); + else { + logTail.push_back(currentLogLine); + if (logTail.size() > settings.logLines) logTail.pop_front(); } - return result; + currentLogLine = ""; + currentLogLinePos = 0; } -bool DerivationGoal::pathFailed(const Path & path) +PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash) { - if (!settings.cacheFailure) return false; - - if (!worker.store.hasPathFailed(path)) return false; - - printMsg(lvlError, format("builder for ‘%1%’ failed previously (cached)") % path); - - if (settings.printBuildTrace) - printMsg(lvlError, format("@ build-failed %1% - cached") % drvPath); - - worker.permanentFailure = true; - amDone(ecFailed); - - return true; + PathSet result; + for (auto & i : drv->outputs) { + if (!wantOutput(i.first, wantedOutputs)) continue; + bool good = + worker.store.isValidPath(i.second.path) && + (!checkHash || worker.pathContentsGood(i.second.path)); + if (good == returnValid) result.insert(i.second.path); + } + return result; } @@ -2840,7 +2968,7 @@ Path DerivationGoal::addHashRewrite(const Path & path) string h1 = string(path, settings.nixStore.size() + 1, 32); string h2 = string(printHash32(hashString(htSHA256, "rewrite:" + drvPath + ":" + path)), 0, 32); Path p = settings.nixStore + "/" + h2 + string(path, settings.nixStore.size() + 33); - if (pathExists(p)) deletePath(p); + deletePath(p); assert(path.size() == p.size()); rewritesToTmp[h1] = h2; rewritesFromTmp[h2] = h1; @@ -2849,6 +2977,18 @@ Path DerivationGoal::addHashRewrite(const Path & path) } +void DerivationGoal::done(BuildResult::Status status, const string & msg) +{ + result.status = status; + result.errorMsg = msg; + amDone(result.success() ? ecSuccess : ecFailed); + if (result.status == BuildResult::TimedOut) + worker.timedOut = true; + if (result.status == BuildResult::PermanentFailure) + worker.permanentFailure = true; +} + + ////////////////////////////////////////////////////////////////////// @@ -2861,28 +3001,24 @@ private: Path storePath; /* The remaining substituters. */ - Paths subs; + std::list<ref<Store>> subs; /* The current substituter. */ - Path sub; + std::shared_ptr<Store> sub; - /* Whether any substituter can realise this path */ + /* Whether any substituter can realise this path. */ bool hasSubstitute; /* Path info returned by the substituter's query info operation. */ - SubstitutablePathInfo info; + std::shared_ptr<const ValidPathInfo> info; /* Pipe for the substituter's standard output. */ Pipe outPipe; - /* Pipe for the substituter's standard error. */ - Pipe logPipe; + /* The substituter thread. */ + std::thread thr; - /* The process ID of the builder. */ - Pid pid; - - /* Lock on the store path. */ - std::shared_ptr<PathLocks> outputLock; + std::promise<void> promise; /* Whether to try to repair a valid path. */ bool repair; @@ -2898,7 +3034,7 @@ public: SubstitutionGoal(const Path & storePath, Worker & worker, bool repair = false); ~SubstitutionGoal(); - void cancel(bool timeout); + void timedOut() { abort(); }; string key() { @@ -2939,20 +3075,14 @@ SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, bool SubstitutionGoal::~SubstitutionGoal() { - if (pid != -1) worker.childTerminated(pid); -} - - -void SubstitutionGoal::cancel(bool timeout) -{ - if (settings.printBuildTrace && timeout) - printMsg(lvlError, format("@ substituter-failed %1% timeout") % storePath); - if (pid != -1) { - pid_t savedPid = pid; - pid.kill(); - worker.childTerminated(savedPid); + try { + if (thr.joinable()) { + thr.join(); + worker.childTerminated(shared_from_this()); + } + } catch (...) { + ignoreException(); } - amDone(ecFailed); } @@ -2977,7 +3107,7 @@ void SubstitutionGoal::init() if (settings.readOnlyMode) throw Error(format("cannot substitute path ‘%1%’ - no write access to the Nix store") % storePath); - subs = settings.substituters; + subs = getDefaultSubstituters(); tryNext(); } @@ -2991,6 +3121,7 @@ void SubstitutionGoal::tryNext() /* None left. Terminate this goal and let someone else deal with it. */ debug(format("path ‘%1%’ is required, but there is no substituter that can build it") % storePath); + /* Hack: don't indicate failure if there were no substituters. In that case the calling derivation should just do a build. */ @@ -3001,19 +3132,31 @@ void SubstitutionGoal::tryNext() sub = subs.front(); subs.pop_front(); - SubstitutablePathInfos infos; - PathSet dummy(singleton<PathSet>(storePath)); - worker.store.querySubstitutablePathInfos(sub, dummy, infos); - SubstitutablePathInfos::iterator k = infos.find(storePath); - if (k == infos.end()) { tryNext(); return; } - info = k->second; + try { + // FIXME: make async + info = sub->queryPathInfo(storePath); + } catch (InvalidPath &) { + tryNext(); + return; + } + hasSubstitute = true; + /* Bail out early if this substituter lacks a valid + signature. LocalStore::addToStore() also checks for this, but + only after we've downloaded the path. */ + if (worker.store.requireSigs && !info->checkSignatures(worker.store.publicKeys)) { + printMsg(lvlInfo, format("warning: substituter ‘%s’ does not have a valid signature for path ‘%s’") + % sub->getUri() % storePath); + tryNext(); + return; + } + /* To maintain the closure invariant, we first have to realise the paths referenced by this one. */ - foreach (PathSet::iterator, i, info.references) - if (*i != storePath) /* ignore self-references */ - addWaitee(worker.makeSubstitutionGoal(*i)); + for (auto & i : info->references) + if (i != storePath) /* ignore self-references */ + addWaitee(worker.makeSubstitutionGoal(i)); if (waitees.empty()) /* to prevent hang (no wake-up event) */ referencesValid(); @@ -3032,9 +3175,9 @@ void SubstitutionGoal::referencesValid() return; } - foreach (PathSet::iterator, i, info.references) - if (*i != storePath) /* ignore self-references */ - assert(worker.store.isValidPath(*i)); + for (auto & i : info->references) + if (i != storePath) /* ignore self-references */ + assert(worker.store.isValidPath(i)); state = &SubstitutionGoal::tryToRun; worker.wakeUp(shared_from_this()); @@ -3054,76 +3197,29 @@ void SubstitutionGoal::tryToRun() return; } - /* Maybe a derivation goal has already locked this path - (exceedingly unlikely, since it should have used a substitute - first, but let's be defensive). */ - outputLock.reset(); // make sure this goal's lock is gone - if (pathIsLockedByMe(storePath)) { - debug(format("restarting substitution of ‘%1%’ because it's locked by another goal") - % storePath); - worker.waitForAnyGoal(shared_from_this()); - return; /* restart in the tryToRun() state when another goal finishes */ - } - - /* Acquire a lock on the output path. */ - outputLock = std::shared_ptr<PathLocks>(new PathLocks); - if (!outputLock->lockPaths(singleton<PathSet>(storePath), "", false)) { - worker.waitForAWhile(shared_from_this()); - return; - } - - /* Check again whether the path is invalid. */ - if (!repair && worker.store.isValidPath(storePath)) { - debug(format("store path ‘%1%’ has become valid") % storePath); - outputLock->setDeletion(true); - amDone(ecSuccess); - return; - } - printMsg(lvlInfo, format("fetching path ‘%1%’...") % storePath); outPipe.create(); - logPipe.create(); - - destPath = repair ? storePath + ".tmp" : storePath; - - /* Remove the (stale) output path if it exists. */ - if (pathExists(destPath)) - deletePath(destPath); - - worker.store.setSubstituterEnv(); - - /* Fill in the arguments. */ - Strings args; - args.push_back(baseNameOf(sub)); - args.push_back("--substitute"); - args.push_back(storePath); - args.push_back(destPath); - - /* Fork the substitute program. */ - pid = startProcess([&]() { - commonChildInit(logPipe); + promise = std::promise<void>(); - if (dup2(outPipe.writeSide, STDOUT_FILENO) == -1) - throw SysError("cannot dup output pipe into stdout"); + thr = std::thread([this]() { + try { + /* Wake up the worker loop when we're done. */ + Finally updateStats([this]() { outPipe.writeSide.close(); }); - execv(sub.c_str(), stringsToCharPtrs(args).data()); + copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()), + storePath, repair); - throw SysError(format("executing ‘%1%’") % sub); + promise.set_value(); + } catch (...) { + promise.set_exception(std::current_exception()); + } }); - pid.setSeparatePG(true); - pid.setKillSignal(SIGTERM); - outPipe.writeSide.close(); - logPipe.writeSide.close(); - worker.childStarted(shared_from_this(), - pid, singleton<set<int> >(logPipe.readSide), true, true); + worker.childStarted(shared_from_this(), {outPipe.readSide}, true, false); state = &SubstitutionGoal::finished; - - if (settings.printBuildTrace) - printMsg(lvlError, format("@ substituter-started %1% %2%") % storePath % sub); } @@ -3131,110 +3227,40 @@ void SubstitutionGoal::finished() { trace("substitute finished"); - /* Since we got an EOF on the logger pipe, the substitute is - presumed to have terminated. */ - pid_t savedPid = pid; - int status = pid.wait(true); + thr.join(); + worker.childTerminated(shared_from_this()); - /* So the child is gone now. */ - worker.childTerminated(savedPid); - - /* Close the read side of the logger pipe. */ - logPipe.readSide.close(); - - /* Get the hash info from stdout. */ - string dummy = readLine(outPipe.readSide); - string expectedHashStr = statusOk(status) ? readLine(outPipe.readSide) : ""; - outPipe.readSide.close(); - - /* Check the exit status and the build result. */ - HashResult hash; try { - - if (!statusOk(status)) - throw SubstError(format("fetching path ‘%1%’ %2%") - % storePath % statusToString(status)); - - if (!pathExists(destPath)) - throw SubstError(format("substitute did not produce path ‘%1%’") % destPath); - - hash = hashPath(htSHA256, destPath); - - /* Verify the expected hash we got from the substituer. */ - if (expectedHashStr != "") { - size_t n = expectedHashStr.find(':'); - if (n == string::npos) - throw Error(format("bad hash from substituter: %1%") % expectedHashStr); - HashType hashType = parseHashType(string(expectedHashStr, 0, n)); - if (hashType == htUnknown) - throw Error(format("unknown hash algorithm in ‘%1%’") % expectedHashStr); - Hash expectedHash = parseHash16or32(hashType, string(expectedHashStr, n + 1)); - Hash actualHash = hashType == htSHA256 ? hash.first : hashPath(hashType, destPath).first; - if (expectedHash != actualHash) - throw SubstError(format("hash mismatch in downloaded path ‘%1%’: expected %2%, got %3%") - % storePath % printHash(expectedHash) % printHash(actualHash)); - } - - } catch (SubstError & e) { - + promise.get_future().get(); + } catch (Error & e) { printMsg(lvlInfo, e.msg()); - if (settings.printBuildTrace) { - printMsg(lvlError, format("@ substituter-failed %1% %2% %3%") - % storePath % status % e.msg()); - } - /* Try the next substitute. */ state = &SubstitutionGoal::tryNext; worker.wakeUp(shared_from_this()); return; } - if (repair) replaceValidPath(storePath, destPath); - - canonicalisePathMetaData(storePath, -1); - - worker.store.optimisePath(storePath); // FIXME: combine with hashPath() - - ValidPathInfo info2; - info2.path = storePath; - info2.hash = hash.first; - info2.narSize = hash.second; - info2.references = info.references; - info2.deriver = info.deriver; - worker.store.registerValidPath(info2); - - outputLock->setDeletion(true); - outputLock.reset(); - - worker.store.markContentsGood(storePath); + worker.markContentsGood(storePath); printMsg(lvlChatty, format("substitution of path ‘%1%’ succeeded") % storePath); - if (settings.printBuildTrace) - printMsg(lvlError, format("@ substituter-succeeded %1%") % storePath); - amDone(ecSuccess); } void SubstitutionGoal::handleChildOutput(int fd, const string & data) { - assert(fd == logPipe.readSide); - if (verbosity >= settings.buildVerbosity) writeToStderr(data); - /* Don't write substitution output to a log file for now. We - probably should, though. */ } void SubstitutionGoal::handleEOF(int fd) { - if (fd == logPipe.readSide) worker.wakeUp(shared_from_this()); + if (fd == outPipe.readSide) worker.wakeUp(shared_from_this()); } - ////////////////////////////////////////////////////////////////////// @@ -3266,11 +3292,12 @@ Worker::~Worker() } -GoalPtr Worker::makeDerivationGoal(const Path & path, const StringSet & wantedOutputs, BuildMode buildMode) +GoalPtr Worker::makeDerivationGoal(const Path & path, + const StringSet & wantedOutputs, BuildMode buildMode) { GoalPtr goal = derivationGoals[path].lock(); if (!goal) { - goal = GoalPtr(new DerivationGoal(path, wantedOutputs, *this, buildMode)); + goal = std::make_shared<DerivationGoal>(path, wantedOutputs, *this, buildMode); derivationGoals[path] = goal; wakeUp(goal); } else @@ -3279,11 +3306,20 @@ GoalPtr Worker::makeDerivationGoal(const Path & path, const StringSet & wantedOu } +std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const Path & drvPath, + const BasicDerivation & drv, BuildMode buildMode) +{ + auto goal = std::make_shared<DerivationGoal>(drvPath, drv, *this, buildMode); + wakeUp(goal); + return goal; +} + + GoalPtr Worker::makeSubstitutionGoal(const Path & path, bool repair) { GoalPtr goal = substitutionGoals[path].lock(); if (!goal) { - goal = GoalPtr(new SubstitutionGoal(path, *this, repair)); + goal = std::make_shared<SubstitutionGoal>(path, *this, repair); substitutionGoals[path] = goal; wakeUp(goal); } @@ -3318,8 +3354,8 @@ void Worker::removeGoal(GoalPtr goal) } /* Wake up goals waiting for any goal to finish. */ - foreach (WeakGoals::iterator, i, waitingForAnyGoal) { - GoalPtr goal = i->lock(); + for (auto & i : waitingForAnyGoal) { + GoalPtr goal = i.lock(); if (goal) wakeUp(goal); } @@ -3340,9 +3376,8 @@ unsigned Worker::getNrLocalBuilds() } -void Worker::childStarted(GoalPtr goal, - pid_t pid, const set<int> & fds, bool inBuildSlot, - bool respectTimeouts) +void Worker::childStarted(GoalPtr goal, const set<int> & fds, + bool inBuildSlot, bool respectTimeouts) { Child child; child.goal = goal; @@ -3350,30 +3385,29 @@ void Worker::childStarted(GoalPtr goal, child.timeStarted = child.lastOutput = time(0); child.inBuildSlot = inBuildSlot; child.respectTimeouts = respectTimeouts; - children[pid] = child; + children.emplace_back(child); if (inBuildSlot) nrLocalBuilds++; } -void Worker::childTerminated(pid_t pid, bool wakeSleepers) +void Worker::childTerminated(GoalPtr goal, bool wakeSleepers) { - assert(pid != -1); /* common mistake */ - - Children::iterator i = children.find(pid); + auto i = std::find_if(children.begin(), children.end(), + [&](const Child & child) { return child.goal.lock() == goal; }); assert(i != children.end()); - if (i->second.inBuildSlot) { + if (i->inBuildSlot) { assert(nrLocalBuilds > 0); nrLocalBuilds--; } - children.erase(pid); + children.erase(i); if (wakeSleepers) { /* Wake up goals waiting for a build slot. */ - foreach (WeakGoals::iterator, i, wantingToBuild) { - GoalPtr goal = i->lock(); + for (auto & j : wantingToBuild) { + GoalPtr goal = j.lock(); if (goal) wakeUp(goal); } @@ -3408,9 +3442,9 @@ void Worker::waitForAWhile(GoalPtr goal) void Worker::run(const Goals & _topGoals) { - foreach (Goals::iterator, i, _topGoals) topGoals.insert(*i); + for (auto & i : _topGoals) topGoals.insert(i); - startNest(nest, lvlDebug, format("entered goal loop")); + Activity act(*logger, lvlDebug, "entered goal loop"); while (1) { @@ -3474,12 +3508,12 @@ void Worker::waitForInput() deadline for any child. */ assert(sizeof(time_t) >= sizeof(long)); time_t nearest = LONG_MAX; // nearest deadline - foreach (Children::iterator, i, children) { - if (!i->second.respectTimeouts) continue; + for (auto & i : children) { + if (!i.respectTimeouts) continue; if (settings.maxSilentTime != 0) - nearest = std::min(nearest, i->second.lastOutput + settings.maxSilentTime); + nearest = std::min(nearest, i.lastOutput + settings.maxSilentTime); if (settings.buildTimeout != 0) - nearest = std::min(nearest, i->second.timeStarted + settings.buildTimeout); + nearest = std::min(nearest, i.timeStarted + settings.buildTimeout); } if (nearest != LONG_MAX) { timeout.tv_sec = std::max((time_t) 1, nearest - before); @@ -3497,17 +3531,16 @@ void Worker::waitForInput() timeout.tv_sec = std::max((time_t) 1, (time_t) (lastWokenUp + settings.pollInterval - before)); } else lastWokenUp = 0; - using namespace std; /* Use select() to wait for the input side of any logger pipe to become `available'. Note that `available' (i.e., non-blocking) includes EOF. */ fd_set fds; FD_ZERO(&fds); int fdMax = 0; - foreach (Children::iterator, i, children) { - foreach (set<int>::iterator, j, i->second.fds) { - FD_SET(*j, &fds); - if (*j >= fdMax) fdMax = *j + 1; + for (auto & i : children) { + for (auto & j : i.fds) { + FD_SET(j, &fds); + if (j >= fdMax) fdMax = j + 1; } } @@ -3519,73 +3552,65 @@ void Worker::waitForInput() time_t after = time(0); /* Process all available file descriptors. */ + decltype(children)::iterator i; + for (auto j = children.begin(); j != children.end(); j = i) { + i = std::next(j); - /* Since goals may be canceled from inside the loop below (causing - them go be erased from the `children' map), we have to be - careful that we don't keep iterators alive across calls to - cancel(). */ - set<pid_t> pids; - foreach (Children::iterator, i, children) pids.insert(i->first); - - foreach (set<pid_t>::iterator, i, pids) { checkInterrupt(); - Children::iterator j = children.find(*i); - if (j == children.end()) continue; // child destroyed - GoalPtr goal = j->second.goal.lock(); + + GoalPtr goal = j->goal.lock(); assert(goal); - set<int> fds2(j->second.fds); - foreach (set<int>::iterator, k, fds2) { - if (FD_ISSET(*k, &fds)) { + set<int> fds2(j->fds); + for (auto & k : fds2) { + if (FD_ISSET(k, &fds)) { unsigned char buffer[4096]; - ssize_t rd = read(*k, buffer, sizeof(buffer)); + ssize_t rd = read(k, buffer, sizeof(buffer)); if (rd == -1) { if (errno != EINTR) throw SysError(format("reading from %1%") % goal->getName()); } else if (rd == 0) { debug(format("%1%: got EOF") % goal->getName()); - goal->handleEOF(*k); - j->second.fds.erase(*k); + goal->handleEOF(k); + j->fds.erase(k); } else { printMsg(lvlVomit, format("%1%: read %2% bytes") % goal->getName() % rd); string data((char *) buffer, rd); - j->second.lastOutput = after; - goal->handleChildOutput(*k, data); + j->lastOutput = after; + goal->handleChildOutput(k, data); } } } if (goal->getExitCode() == Goal::ecBusy && settings.maxSilentTime != 0 && - j->second.respectTimeouts && - after - j->second.lastOutput >= (time_t) settings.maxSilentTime) + j->respectTimeouts && + after - j->lastOutput >= (time_t) settings.maxSilentTime) { printMsg(lvlError, format("%1% timed out after %2% seconds of silence") % goal->getName() % settings.maxSilentTime); - goal->cancel(true); - timedOut = true; + goal->timedOut(); } else if (goal->getExitCode() == Goal::ecBusy && settings.buildTimeout != 0 && - j->second.respectTimeouts && - after - j->second.timeStarted >= (time_t) settings.buildTimeout) + j->respectTimeouts && + after - j->timeStarted >= (time_t) settings.buildTimeout) { printMsg(lvlError, format("%1% timed out after %2% seconds") % goal->getName() % settings.buildTimeout); - goal->cancel(true); - timedOut = true; + goal->timedOut(); } } - if (!waitingForAWhile.empty() && lastWokenUp + settings.pollInterval <= after) { + if (!waitingForAWhile.empty() && lastWokenUp + (time_t) settings.pollInterval <= after) { lastWokenUp = after; - foreach (WeakGoals::iterator, i, waitingForAWhile) { - GoalPtr goal = i->lock(); + for (auto & i : waitingForAWhile) { + GoalPtr goal = i.lock(); if (goal) wakeUp(goal); } waitingForAWhile.clear(); @@ -3599,33 +3624,56 @@ unsigned int Worker::exitStatus() } +bool Worker::pathContentsGood(const Path & path) +{ + std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path); + if (i != pathContentsGoodCache.end()) return i->second; + printMsg(lvlInfo, format("checking path ‘%1%’...") % path); + auto info = store.queryPathInfo(path); + bool res; + if (!pathExists(path)) + res = false; + else { + HashResult current = hashPath(info->narHash.type, path); + Hash nullHash(htSHA256); + res = info->narHash == nullHash || info->narHash == current.first; + } + pathContentsGoodCache[path] = res; + if (!res) printMsg(lvlError, format("path ‘%1%’ is corrupted or missing!") % path); + return res; +} + + +void Worker::markContentsGood(const Path & path) +{ + pathContentsGoodCache[path] = true; +} + + ////////////////////////////////////////////////////////////////////// void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) { - startNest(nest, lvlDebug, - format("building %1%") % showPaths(drvPaths)); - Worker worker(*this); Goals goals; - foreach (PathSet::const_iterator, i, drvPaths) { - DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i); + for (auto & i : drvPaths) { + DrvPathWithOutputs i2 = parseDrvPathWithOutputs(i); if (isDerivation(i2.first)) goals.insert(worker.makeDerivationGoal(i2.first, i2.second, buildMode)); else - goals.insert(worker.makeSubstitutionGoal(*i, buildMode)); + goals.insert(worker.makeSubstitutionGoal(i, buildMode)); } worker.run(goals); PathSet failed; - foreach (Goals::iterator, i, goals) - if ((*i)->getExitCode() == Goal::ecFailed) { - DerivationGoal * i2 = dynamic_cast<DerivationGoal *>(i->get()); + for (auto & i : goals) + if (i->getExitCode() == Goal::ecFailed) { + DerivationGoal * i2 = dynamic_cast<DerivationGoal *>(i.get()); if (i2) failed.insert(i2->getDrvPath()); - else failed.insert(dynamic_cast<SubstitutionGoal *>(i->get())->getStorePath()); + else failed.insert(dynamic_cast<SubstitutionGoal *>(i.get())->getStorePath()); } if (!failed.empty()) @@ -3633,6 +3681,26 @@ void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) } +BuildResult LocalStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode) +{ + Worker worker(*this); + auto goal = worker.makeBasicDerivationGoal(drvPath, drv, buildMode); + + BuildResult result; + + try { + worker.run(Goals{goal}); + result = goal->getResult(); + } catch (Error & e) { + result.status = BuildResult::MiscFailure; + result.errorMsg = e.msg(); + } + + return result; +} + + void LocalStore::ensurePath(const Path & path) { /* If the path is already valid, we're done. */ @@ -3640,7 +3708,7 @@ void LocalStore::ensurePath(const Path & path) Worker worker(*this); GoalPtr goal = worker.makeSubstitutionGoal(path); - Goals goals = singleton<Goals>(goal); + Goals goals = {goal}; worker.run(goals); @@ -3653,12 +3721,21 @@ void LocalStore::repairPath(const Path & path) { Worker worker(*this); GoalPtr goal = worker.makeSubstitutionGoal(path, true); - Goals goals = singleton<Goals>(goal); + Goals goals = {goal}; worker.run(goals); - if (goal->getExitCode() != Goal::ecSuccess) - throw Error(format("cannot repair path ‘%1%’") % path, worker.exitStatus()); + if (goal->getExitCode() != Goal::ecSuccess) { + /* Since substituting the path didn't work, if we have a valid + deriver, then rebuild the deriver. */ + auto deriver = queryPathInfo(path)->deriver; + if (deriver != "" && isValidPath(deriver)) { + goals.clear(); + goals.insert(worker.makeDerivationGoal(deriver, StringSet(), bmRepair)); + worker.run(goals); + } else + throw Error(format("cannot repair path ‘%1%’") % path, worker.exitStatus()); + } } diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc new file mode 100644 index 000000000000..e1ce36e922e1 --- /dev/null +++ b/src/libstore/builtins.cc @@ -0,0 +1,47 @@ +#include "builtins.hh" +#include "download.hh" +#include "store-api.hh" +#include "archive.hh" +#include "compression.hh" + +namespace nix { + +void builtinFetchurl(const BasicDerivation & drv) +{ + auto url = drv.env.find("url"); + if (url == drv.env.end()) throw Error("attribute ‘url’ missing"); + + /* No need to do TLS verification, because we check the hash of + the result anyway. */ + DownloadOptions options; + options.verifyTLS = false; + + /* Show a progress indicator, even though stderr is not a tty. */ + options.showProgress = DownloadOptions::yes; + + auto data = makeDownloader()->download(url->second, options); + assert(data.data); + + auto out = drv.env.find("out"); + if (out == drv.env.end()) throw Error("attribute ‘url’ missing"); + + Path storePath = out->second; + assertStorePath(storePath); + + auto unpack = drv.env.find("unpack"); + if (unpack != drv.env.end() && unpack->second == "1") { + if (string(*data.data, 0, 6) == string("\xfd" "7zXZ\0", 6)) + data.data = decompress("xz", *data.data); + StringSource source(*data.data); + restorePath(storePath, source); + } else + writeFile(storePath, *data.data); + + auto executable = drv.env.find("executable"); + if (executable != drv.env.end() && executable->second == "1") { + if (chmod(storePath.c_str(), 0755) == -1) + throw SysError(format("making ‘%1%’ executable") % storePath); + } +} + +} diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh new file mode 100644 index 000000000000..4b2431aa08cf --- /dev/null +++ b/src/libstore/builtins.hh @@ -0,0 +1,9 @@ +#pragma once + +#include "derivations.hh" + +namespace nix { + +void builtinFetchurl(const BasicDerivation & drv); + +} diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc new file mode 100644 index 000000000000..747483afb30b --- /dev/null +++ b/src/libstore/crypto.cc @@ -0,0 +1,126 @@ +#include "crypto.hh" +#include "util.hh" +#include "globals.hh" + +#if HAVE_SODIUM +#include <sodium.h> +#endif + +namespace nix { + +static std::pair<std::string, std::string> split(const string & s) +{ + size_t colon = s.find(':'); + if (colon == std::string::npos || colon == 0) + return {"", ""}; + return {std::string(s, 0, colon), std::string(s, colon + 1)}; +} + +Key::Key(const string & s) +{ + auto ss = split(s); + + name = ss.first; + key = ss.second; + + if (name == "" || key == "") + throw Error("secret key is corrupt"); + + key = base64Decode(key); +} + +SecretKey::SecretKey(const string & s) + : Key(s) +{ +#if HAVE_SODIUM + if (key.size() != crypto_sign_SECRETKEYBYTES) + throw Error("secret key is not valid"); +#endif +} + +#if !HAVE_SODIUM +[[noreturn]] static void noSodium() +{ + throw Error("Nix was not compiled with libsodium, required for signed binary cache support"); +} +#endif + +std::string SecretKey::signDetached(const std::string & data) const +{ +#if HAVE_SODIUM + unsigned char sig[crypto_sign_BYTES]; + unsigned long long sigLen; + crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(), + (unsigned char *) key.data()); + return name + ":" + base64Encode(std::string((char *) sig, sigLen)); +#else + noSodium(); +#endif +} + +PublicKey SecretKey::toPublicKey() const +{ +#if HAVE_SODIUM + unsigned char pk[crypto_sign_PUBLICKEYBYTES]; + crypto_sign_ed25519_sk_to_pk(pk, (unsigned char *) key.data()); + return PublicKey(name, std::string((char *) pk, crypto_sign_PUBLICKEYBYTES)); +#else + noSodium(); +#endif +} + +PublicKey::PublicKey(const string & s) + : Key(s) +{ +#if HAVE_SODIUM + if (key.size() != crypto_sign_PUBLICKEYBYTES) + throw Error("public key is not valid"); +#endif +} + +bool verifyDetached(const std::string & data, const std::string & sig, + const PublicKeys & publicKeys) +{ +#if HAVE_SODIUM + auto ss = split(sig); + + auto key = publicKeys.find(ss.first); + if (key == publicKeys.end()) return false; + + auto sig2 = base64Decode(ss.second); + if (sig2.size() != crypto_sign_BYTES) + throw Error("signature is not valid"); + + return crypto_sign_verify_detached((unsigned char *) sig2.data(), + (unsigned char *) data.data(), data.size(), + (unsigned char *) key->second.key.data()) == 0; +#else + noSodium(); +#endif +} + +PublicKeys getDefaultPublicKeys() +{ + PublicKeys publicKeys; + + // FIXME: filter duplicates + + for (auto s : settings.get("binary-cache-public-keys", Strings())) { + PublicKey key(s); + publicKeys.emplace(key.name, key); + } + + for (auto secretKeyFile : settings.get("secret-key-files", Strings())) { + try { + SecretKey secretKey(readFile(secretKeyFile)); + publicKeys.emplace(secretKey.name, secretKey.toPublicKey()); + } catch (SysError & e) { + /* Ignore unreadable key files. That's normal in a + multi-user installation. */ + } + } + + return publicKeys; +} + +} diff --git a/src/libstore/crypto.hh b/src/libstore/crypto.hh new file mode 100644 index 000000000000..9110af3aa9e5 --- /dev/null +++ b/src/libstore/crypto.hh @@ -0,0 +1,54 @@ +#pragma once + +#include "types.hh" + +#include <map> + +namespace nix { + +struct Key +{ + std::string name; + std::string key; + + /* Construct Key from a string in the format + ‘<name>:<key-in-base64>’. */ + Key(const std::string & s); + +protected: + Key(const std::string & name, const std::string & key) + : name(name), key(key) { } +}; + +struct PublicKey; + +struct SecretKey : Key +{ + SecretKey(const std::string & s); + + /* Return a detached signature of the given string. */ + std::string signDetached(const std::string & s) const; + + PublicKey toPublicKey() const; +}; + +struct PublicKey : Key +{ + PublicKey(const std::string & data); + +private: + PublicKey(const std::string & name, const std::string & key) + : Key(name, key) { } + friend struct SecretKey; +}; + +typedef std::map<std::string, PublicKey> PublicKeys; + +/* Return true iff ‘sig’ is a correct signature over ‘data’ using one + of the given public keys. */ +bool verifyDetached(const std::string & data, const std::string & sig, + const PublicKeys & publicKeys); + +PublicKeys getDefaultPublicKeys(); + +} diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index fbc1d99f3d6f..becf8524546c 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -2,7 +2,7 @@ #include "store-api.hh" #include "globals.hh" #include "util.hh" -#include "misc.hh" +#include "worker-protocol.hh" namespace nix { @@ -26,21 +26,63 @@ void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash } -Path writeDerivation(StoreAPI & store, +Path BasicDerivation::findOutput(const string & id) const +{ + auto i = outputs.find(id); + if (i == outputs.end()) + throw Error(format("derivation has no output ‘%1%’") % id); + return i->second.path; +} + + +bool BasicDerivation::willBuildLocally() const +{ + return get(env, "preferLocalBuild") == "1" && canBuildLocally(); +} + + +bool BasicDerivation::substitutesAllowed() const +{ + return get(env, "allowSubstitutes", "1") == "1"; +} + + +bool BasicDerivation::isBuiltin() const +{ + return string(builder, 0, 8) == "builtin:"; +} + + +bool BasicDerivation::canBuildLocally() const +{ + return platform == settings.thisSystem + || isBuiltin() +#if __linux__ + || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux") + || (platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux") +#elif __FreeBSD__ + || (platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd") + || (platform == "i686-linux" && settings.thisSystem == "i686-freebsd") +#endif + ; +} + + +Path writeDerivation(ref<Store> store, const Derivation & drv, const string & name, bool repair) { PathSet references; references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end()); - foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) - references.insert(i->first); + for (auto & i : drv.inputDrvs) + references.insert(i.first); /* Note that the outputs of a derivation are *not* references (that can be missing (of course) and should not necessarily be held during a garbage collection). */ string suffix = name + drvExtension; - string contents = unparseDerivation(drv); + string contents = drv.unparse(); return settings.readOnlyMode ? computeStorePathForText(suffix, contents, references) - : store.addTextToStore(suffix, contents, references, repair); + : store->addTextToStore(suffix, contents, references, repair); } @@ -148,44 +190,44 @@ static void printStrings(string & res, ForwardIterator i, ForwardIterator j) } -string unparseDerivation(const Derivation & drv) +string Derivation::unparse() const { string s; s.reserve(65536); s += "Derive(["; bool first = true; - foreach (DerivationOutputs::const_iterator, i, drv.outputs) { + for (auto & i : outputs) { if (first) first = false; else s += ','; - s += '('; printString(s, i->first); - s += ','; printString(s, i->second.path); - s += ','; printString(s, i->second.hashAlgo); - s += ','; printString(s, i->second.hash); + s += '('; printString(s, i.first); + s += ','; printString(s, i.second.path); + s += ','; printString(s, i.second.hashAlgo); + s += ','; printString(s, i.second.hash); s += ')'; } s += "],["; first = true; - foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) { + for (auto & i : inputDrvs) { if (first) first = false; else s += ','; - s += '('; printString(s, i->first); - s += ','; printStrings(s, i->second.begin(), i->second.end()); + s += '('; printString(s, i.first); + s += ','; printStrings(s, i.second.begin(), i.second.end()); s += ')'; } s += "],"; - printStrings(s, drv.inputSrcs.begin(), drv.inputSrcs.end()); + printStrings(s, inputSrcs.begin(), inputSrcs.end()); - s += ','; printString(s, drv.platform); - s += ','; printString(s, drv.builder); - s += ','; printStrings(s, drv.args.begin(), drv.args.end()); + s += ','; printString(s, platform); + s += ','; printString(s, builder); + s += ','; printStrings(s, args.begin(), args.end()); s += ",["; first = true; - foreach (StringPairs::const_iterator, i, drv.env) { + for (auto & i : env) { if (first) first = false; else s += ','; - s += '('; printString(s, i->first); - s += ','; printString(s, i->second); + s += '('; printString(s, i.first); + s += ','; printString(s, i.second); s += ')'; } @@ -201,11 +243,11 @@ bool isDerivation(const string & fileName) } -bool isFixedOutputDrv(const Derivation & drv) +bool BasicDerivation::isFixedOutput() const { - return drv.outputs.size() == 1 && - drv.outputs.begin()->first == "out" && - drv.outputs.begin()->second.hash != ""; + return outputs.size() == 1 && + outputs.begin()->first == "out" && + outputs.begin()->second.hash != ""; } @@ -232,10 +274,10 @@ DrvHashes drvHashes; paths have been replaced by the result of a recursive call to this function, and that for fixed-output derivations we return a hash of its output path. */ -Hash hashDerivationModulo(StoreAPI & store, Derivation drv) +Hash hashDerivationModulo(Store & store, Derivation drv) { /* Return a fixed hash for fixed-output derivations. */ - if (isFixedOutputDrv(drv)) { + if (drv.isFixedOutput()) { DerivationOutputs::const_iterator i = drv.outputs.begin(); return hashString(htSHA256, "fixed:out:" + i->second.hashAlgo + ":" @@ -246,19 +288,19 @@ Hash hashDerivationModulo(StoreAPI & store, Derivation drv) /* For other derivations, replace the inputs paths with recursive calls to this function.*/ DerivationInputs inputs2; - foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) { - Hash h = drvHashes[i->first]; - if (h.type == htUnknown) { - assert(store.isValidPath(i->first)); - Derivation drv2 = readDerivation(i->first); + for (auto & i : drv.inputDrvs) { + Hash h = drvHashes[i.first]; + if (!h) { + assert(store.isValidPath(i.first)); + Derivation drv2 = readDerivation(i.first); h = hashDerivationModulo(store, drv2); - drvHashes[i->first] = h; + drvHashes[i.first] = h; } - inputs2[printHash(h)] = i->second; + inputs2[printHash(h)] = i.second; } drv.inputDrvs = inputs2; - return hashString(htSHA256, unparseDerivation(drv)); + return hashString(htSHA256, drv.unparse()); } @@ -285,13 +327,53 @@ bool wantOutput(const string & output, const std::set<string> & wanted) } -PathSet outputPaths(const Derivation & drv) +PathSet BasicDerivation::outputPaths() const { PathSet paths; - for (auto & i : drv.outputs) + for (auto & i : outputs) paths.insert(i.second.path); return paths; } +Source & operator >> (Source & in, BasicDerivation & drv) +{ + drv.outputs.clear(); + auto nr = readInt(in); + for (unsigned int n = 0; n < nr; n++) { + auto name = readString(in); + DerivationOutput o; + in >> o.path >> o.hashAlgo >> o.hash; + assertStorePath(o.path); + drv.outputs[name] = o; + } + + drv.inputSrcs = readStorePaths<PathSet>(in); + in >> drv.platform >> drv.builder; + drv.args = readStrings<Strings>(in); + + nr = readInt(in); + for (unsigned int n = 0; n < nr; n++) { + auto key = readString(in); + auto value = readString(in); + drv.env[key] = value; + } + + return in; +} + + +Sink & operator << (Sink & out, const BasicDerivation & drv) +{ + out << drv.outputs.size(); + for (auto & i : drv.outputs) + out << i.first << i.second.path << i.second.hashAlgo << i.second.hash; + out << drv.inputSrcs << drv.platform << drv.builder << drv.args; + out << drv.env.size(); + for (auto & i : drv.env) + out << i.first << i.second; + return out; +} + + } diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh index 8d5e4d05d469..6f98869b0fe0 100644 --- a/src/libstore/derivations.hh +++ b/src/libstore/derivations.hh @@ -40,44 +40,66 @@ typedef std::map<Path, StringSet> DerivationInputs; typedef std::map<string, string> StringPairs; -struct Derivation +struct BasicDerivation { DerivationOutputs outputs; /* keyed on symbolic IDs */ - DerivationInputs inputDrvs; /* inputs that are sub-derivations */ PathSet inputSrcs; /* inputs that are sources */ string platform; Path builder; Strings args; StringPairs env; + + virtual ~BasicDerivation() { }; + + /* Return the path corresponding to the output identifier `id' in + the given derivation. */ + Path findOutput(const string & id) const; + + bool willBuildLocally() const; + + bool substitutesAllowed() const; + + bool isBuiltin() const; + + bool canBuildLocally() const; + + /* Return true iff this is a fixed-output derivation. */ + bool isFixedOutput() const; + + /* Return the output paths of a derivation. */ + PathSet outputPaths() const; + }; +struct Derivation : BasicDerivation +{ + DerivationInputs inputDrvs; /* inputs that are sub-derivations */ + + /* Print a derivation. */ + std::string unparse() const; +}; -class StoreAPI; + +class Store; /* Write a derivation to the Nix store, and return its path. */ -Path writeDerivation(StoreAPI & store, +Path writeDerivation(ref<Store> store, const Derivation & drv, const string & name, bool repair = false); /* Read a derivation from a file. */ Derivation readDerivation(const Path & drvPath); -/* Print a derivation. */ -string unparseDerivation(const Derivation & drv); - -/* Check whether a file name ends with the extensions for +/* Check whether a file name ends with the extension for derivations. */ bool isDerivation(const string & fileName); -/* Return true iff this is a fixed-output derivation. */ -bool isFixedOutputDrv(const Derivation & drv); - -Hash hashDerivationModulo(StoreAPI & store, Derivation drv); +Hash hashDerivationModulo(Store & store, Derivation drv); /* Memoisation of hashDerivationModulo(). */ typedef std::map<Path, Hash> DrvHashes; -extern DrvHashes drvHashes; +extern DrvHashes drvHashes; // FIXME: global, not thread-safe /* Split a string specifying a derivation and a set of outputs (/nix/store/hash-foo!out1,out2,...) into the derivation path and @@ -89,6 +111,10 @@ Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outpu bool wantOutput(const string & output, const std::set<string> & wanted); -PathSet outputPaths(const Derivation & drv); +struct Source; +struct Sink; + +Source & operator >> (Source & in, BasicDerivation & drv); +Sink & operator << (Sink & out, const BasicDerivation & drv); } diff --git a/src/libstore/download.cc b/src/libstore/download.cc new file mode 100644 index 000000000000..6e39330e40d9 --- /dev/null +++ b/src/libstore/download.cc @@ -0,0 +1,320 @@ +#include "download.hh" +#include "util.hh" +#include "globals.hh" +#include "hash.hh" +#include "store-api.hh" + +#include <curl/curl.h> + +#include <iostream> + + +namespace nix { + +double getTime() +{ + struct timeval tv; + gettimeofday(&tv, 0); + return tv.tv_sec + (tv.tv_usec / 1000000.0); +} + +std::string resolveUri(const std::string & uri) +{ + if (uri.compare(0, 8, "channel:") == 0) + return "https://nixos.org/channels/" + std::string(uri, 8) + "/nixexprs.tar.xz"; + else + return uri; +} + +struct CurlDownloader : public Downloader +{ + CURL * curl; + ref<std::string> data; + string etag, status, expectedETag; + + struct curl_slist * requestHeaders; + + bool showProgress; + double prevProgressTime{0}, startTime{0}; + unsigned int moveBack{1}; + + size_t writeCallback(void * contents, size_t size, size_t nmemb) + { + size_t realSize = size * nmemb; + data->append((char *) contents, realSize); + return realSize; + } + + static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp) + { + return ((CurlDownloader *) userp)->writeCallback(contents, size, nmemb); + } + + size_t headerCallback(void * contents, size_t size, size_t nmemb) + { + size_t realSize = size * nmemb; + string line = string((char *) contents, realSize); + printMsg(lvlVomit, format("got header: %1%") % trim(line)); + if (line.compare(0, 5, "HTTP/") == 0) { // new response starts + etag = ""; + auto ss = tokenizeString<vector<string>>(line, " "); + status = ss.size() >= 2 ? ss[1] : ""; + } else { + auto i = line.find(':'); + if (i != string::npos) { + string name = trim(string(line, 0, i)); + if (name == "ETag") { // FIXME: case + etag = trim(string(line, i + 1)); + /* Hack to work around a GitHub bug: it sends + ETags, but ignores If-None-Match. So if we get + the expected ETag on a 200 response, then shut + down the connection because we already have the + data. */ + printMsg(lvlDebug, format("got ETag: %1%") % etag); + if (etag == expectedETag && status == "200") { + printMsg(lvlDebug, format("shutting down on 200 HTTP response with expected ETag")); + return 0; + } + } + } + } + return realSize; + } + + static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp) + { + return ((CurlDownloader *) userp)->headerCallback(contents, size, nmemb); + } + + int progressCallback(double dltotal, double dlnow) + { + if (showProgress) { + double now = getTime(); + if (prevProgressTime <= now - 1) { + string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]") + % (dlnow / 1024.0) + % (dltotal / 1024.0) + % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str(); + std::cerr << "\e[" << moveBack << "D" << s; + moveBack = s.size(); + std::cerr.flush(); + prevProgressTime = now; + } + } + return _isInterrupted; + } + + static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow) + { + return ((CurlDownloader *) userp)->progressCallback(dltotal, dlnow); + } + + CurlDownloader() + : data(make_ref<std::string>()) + { + requestHeaders = 0; + + curl = curl_easy_init(); + if (!curl) throw nix::Error("unable to initialize curl"); + } + + ~CurlDownloader() + { + if (curl) curl_easy_cleanup(curl); + if (requestHeaders) curl_slist_free_all(requestHeaders); + } + + bool fetch(const string & url, const DownloadOptions & options) + { + showProgress = + options.showProgress == DownloadOptions::yes || + (options.showProgress == DownloadOptions::automatic && isatty(STDERR_FILENO)); + + curl_easy_reset(curl); + + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(curl, CURLOPT_USERAGENT, ("Nix/" + nixVersion).c_str()); + curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); + + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeCallbackWrapper); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *) this); + + curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, headerCallbackWrapper); + curl_easy_setopt(curl, CURLOPT_HEADERDATA, (void *) this); + + curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper); + curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, (void *) this); + curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0); + + curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); + + curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); + + if (options.verifyTLS) + curl_easy_setopt(curl, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str()); + else { + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0); + } + + data->clear(); + + if (requestHeaders) { + curl_slist_free_all(requestHeaders); + requestHeaders = 0; + } + + if (!options.expectedETag.empty()) { + this->expectedETag = options.expectedETag; + requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + options.expectedETag).c_str()); + } + + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, requestHeaders); + + if (options.head) + curl_easy_setopt(curl, CURLOPT_NOBODY, 1); + + if (showProgress) { + std::cerr << (format("downloading ‘%1%’... ") % url); + std::cerr.flush(); + startTime = getTime(); + } + + CURLcode res = curl_easy_perform(curl); + if (showProgress) + //std::cerr << "\e[" << moveBack << "D\e[K\n"; + std::cerr << "\n"; + checkInterrupt(); + if (res == CURLE_WRITE_ERROR && etag == options.expectedETag) return false; + + long httpStatus = -1; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &httpStatus); + + if (res != CURLE_OK) { + Error err = + httpStatus == 404 ? NotFound : + httpStatus == 403 ? Forbidden : Misc; + throw DownloadError(err, format("unable to download ‘%1%’: %2% (%3%)") + % url % curl_easy_strerror(res) % res); + } + + if (httpStatus == 304) return false; + + return true; + } + + DownloadResult download(string url, const DownloadOptions & options) override + { + DownloadResult res; + if (fetch(resolveUri(url), options)) { + res.cached = false; + res.data = data; + } else + res.cached = true; + res.etag = etag; + return res; + } +}; + +ref<Downloader> makeDownloader() +{ + return make_ref<CurlDownloader>(); +} + +Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack) +{ + auto url = resolveUri(url_); + + Path cacheDir = getCacheDir() + "/nix/tarballs"; + createDirs(cacheDir); + + string urlHash = printHash32(hashString(htSHA256, url)); + + Path dataFile = cacheDir + "/" + urlHash + ".info"; + Path fileLink = cacheDir + "/" + urlHash + "-file"; + + Path storePath; + + string expectedETag; + + int ttl = settings.get("tarball-ttl", 60 * 60); + bool skip = false; + + if (pathExists(fileLink) && pathExists(dataFile)) { + storePath = readLink(fileLink); + store->addTempRoot(storePath); + if (store->isValidPath(storePath)) { + auto ss = tokenizeString<vector<string>>(readFile(dataFile), "\n"); + if (ss.size() >= 3 && ss[0] == url) { + time_t lastChecked; + if (string2Int(ss[2], lastChecked) && lastChecked + ttl >= time(0)) + skip = true; + else if (!ss[1].empty()) { + printMsg(lvlDebug, format("verifying previous ETag ‘%1%’") % ss[1]); + expectedETag = ss[1]; + } + } + } else + storePath = ""; + } + + string name; + auto p = url.rfind('/'); + if (p != string::npos) name = string(url, p + 1); + + if (!skip) { + + try { + DownloadOptions options; + options.expectedETag = expectedETag; + auto res = download(url, options); + + if (!res.cached) + storePath = store->addTextToStore(name, *res.data, PathSet(), false); + + assert(!storePath.empty()); + replaceSymlink(storePath, fileLink); + + writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n"); + } catch (DownloadError & e) { + if (storePath.empty()) throw; + printMsg(lvlError, format("warning: %1%; using cached result") % e.msg()); + } + } + + if (unpack) { + Path unpackedLink = cacheDir + "/" + baseNameOf(storePath) + "-unpacked"; + Path unpackedStorePath; + if (pathExists(unpackedLink)) { + unpackedStorePath = readLink(unpackedLink); + store->addTempRoot(unpackedStorePath); + if (!store->isValidPath(unpackedStorePath)) + unpackedStorePath = ""; + } + if (unpackedStorePath.empty()) { + printMsg(lvlInfo, format("unpacking ‘%1%’...") % url); + Path tmpDir = createTempDir(); + AutoDelete autoDelete(tmpDir, true); + // FIXME: this requires GNU tar for decompression. + runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}, ""); + unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false); + } + replaceSymlink(unpackedStorePath, unpackedLink); + return unpackedStorePath; + } + + return storePath; +} + + +bool isUri(const string & s) +{ + if (s.compare(0, 8, "channel:") == 0) return true; + size_t pos = s.find("://"); + if (pos == string::npos) return false; + string scheme(s, 0, pos); + return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git"; +} + + +} diff --git a/src/libstore/download.hh b/src/libstore/download.hh new file mode 100644 index 000000000000..eb2b76678ac7 --- /dev/null +++ b/src/libstore/download.hh @@ -0,0 +1,48 @@ +#pragma once + +#include "types.hh" + +#include <string> + +namespace nix { + +struct DownloadOptions +{ + string expectedETag; + bool verifyTLS{true}; + enum { yes, no, automatic } showProgress{yes}; + bool head{false}; +}; + +struct DownloadResult +{ + bool cached; + string etag; + std::shared_ptr<std::string> data; +}; + +class Store; + +struct Downloader +{ + virtual DownloadResult download(string url, const DownloadOptions & options) = 0; + + Path downloadCached(ref<Store> store, const string & url, bool unpack); + + enum Error { NotFound, Forbidden, Misc }; +}; + +ref<Downloader> makeDownloader(); + +class DownloadError : public Error +{ +public: + Downloader::Error error; + DownloadError(Downloader::Error error, const FormatOrString & fs) + : Error(fs), error(error) + { } +}; + +bool isUri(const string & s); + +} diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc new file mode 100644 index 000000000000..4ec01add3026 --- /dev/null +++ b/src/libstore/export-import.cc @@ -0,0 +1,136 @@ +#include "store-api.hh" +#include "archive.hh" +#include "worker-protocol.hh" + +#include <algorithm> + +namespace nix { + +struct HashAndWriteSink : Sink +{ + Sink & writeSink; + HashSink hashSink; + HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256) + { + } + virtual void operator () (const unsigned char * data, size_t len) + { + writeSink(data, len); + hashSink(data, len); + } + Hash currentHash() + { + return hashSink.currentHash().first; + } +}; + +void Store::exportPaths(const Paths & paths, Sink & sink) +{ + Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end())); + std::reverse(sorted.begin(), sorted.end()); + + std::string doneLabel("paths exported"); + logger->incExpected(doneLabel, sorted.size()); + + for (auto & path : sorted) { + Activity act(*logger, lvlInfo, format("exporting path ‘%s’") % path); + sink << 1; + exportPath(path, sink); + logger->incProgress(doneLabel); + } + + sink << 0; +} + +void Store::exportPath(const Path & path, Sink & sink) +{ + auto info = queryPathInfo(path); + + HashAndWriteSink hashAndWriteSink(sink); + + narFromPath(path, hashAndWriteSink); + + /* Refuse to export paths that have changed. This prevents + filesystem corruption from spreading to other machines. + Don't complain if the stored hash is zero (unknown). */ + Hash hash = hashAndWriteSink.currentHash(); + if (hash != info->narHash && info->narHash != Hash(info->narHash.type)) + throw Error(format("hash of path ‘%1%’ has changed from ‘%2%’ to ‘%3%’!") % path + % printHash(info->narHash) % printHash(hash)); + + hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0; +} + +struct TeeSource : Source +{ + Source & readSource; + ref<std::string> data; + TeeSource(Source & readSource) + : readSource(readSource) + , data(make_ref<std::string>()) + { + } + size_t read(unsigned char * data, size_t len) + { + size_t n = readSource.read(data, len); + this->data->append((char *) data, n); + return n; + } +}; + +struct NopSink : ParseSink +{ +}; + +Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor) +{ + Paths res; + while (true) { + unsigned long long n = readLongLong(source); + if (n == 0) break; + if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’"); + + /* Extract the NAR from the source. */ + TeeSource tee(source); + NopSink sink; + parseDump(sink, tee); + + uint32_t magic = readInt(source); + if (magic != exportMagic) + throw Error("Nix archive cannot be imported; wrong format"); + + ValidPathInfo info; + + info.path = readStorePath(source); + + Activity act(*logger, lvlInfo, format("importing path ‘%s’") % info.path); + + info.references = readStorePaths<PathSet>(source); + + info.deriver = readString(source); + if (info.deriver != "") assertStorePath(info.deriver); + + info.narHash = hashString(htSHA256, *tee.data); + info.narSize = tee.data->size(); + + // Ignore optional legacy signature. + if (readInt(source) == 1) + readString(source); + + addToStore(info, *tee.data); + + // FIXME: implement accessors? + assert(!accessor); +#if 0 + auto accessor_ = std::dynamic_pointer_cast<BinaryCacheStoreAccessor>(accessor); + if (accessor_) + accessor_->nars.emplace(info.path, makeNarAccessor(tee.data)); +#endif + + res.push_back(info.path); + } + + return res; +} + +} diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh new file mode 100644 index 000000000000..a67e0775b978 --- /dev/null +++ b/src/libstore/fs-accessor.hh @@ -0,0 +1,30 @@ +#pragma once + +#include "types.hh" + +namespace nix { + +/* An abstract class for accessing a filesystem-like structure, such + as a (possibly remote) Nix store or the contents of a NAR file. */ +class FSAccessor +{ +public: + enum Type { tMissing, tRegular, tSymlink, tDirectory }; + + struct Stat + { + Type type; + uint64_t fileSize; // regular files only + bool isExecutable; // regular files only + }; + + virtual Stat stat(const Path & path) = 0; + + virtual StringSet readDirectory(const Path & path) = 0; + + virtual std::string readFile(const Path & path) = 0; + + virtual std::string readLink(const Path & path) = 0; +}; + +} diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 8d7da67f5204..f4cb672cdeb9 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -1,5 +1,5 @@ +#include "derivations.hh" #include "globals.hh" -#include "misc.hh" #include "local-store.hh" #include <functional> @@ -83,7 +83,7 @@ void LocalStore::addIndirectRoot(const Path & path) } -Path addPermRoot(StoreAPI & store, const Path & _storePath, +Path Store::addPermRoot(const Path & _storePath, const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir) { Path storePath(canonPath(_storePath)); @@ -101,7 +101,7 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot)))) throw Error(format("cannot create symlink ‘%1%’; already exists") % gcRoot); makeSymlink(gcRoot, storePath); - store.addIndirectRoot(gcRoot); + addIndirectRoot(gcRoot); } else { @@ -127,7 +127,7 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, check if the root is in a directory in or linked from the gcroots directory. */ if (settings.checkRootReachability) { - Roots roots = store.findRoots(); + Roots roots = findRoots(); if (roots.find(gcRoot) == roots.end()) printMsg(lvlError, format( @@ -139,7 +139,7 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, /* Grab the global GC root, causing us to block while a GC is in progress. This prevents the set of permanent roots from increasing while a GC is in progress. */ - store.syncWithGC(); + syncWithGC(); return gcRoot; } @@ -147,35 +147,36 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, void LocalStore::addTempRoot(const Path & path) { + auto state(_state.lock()); + /* Create the temporary roots file for this process. */ - if (fdTempRoots == -1) { + if (state->fdTempRoots == -1) { while (1) { Path dir = (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str(); createDirs(dir); - fnTempRoots = (format("%1%/%2%") - % dir % getpid()).str(); + state->fnTempRoots = (format("%1%/%2%") % dir % getpid()).str(); AutoCloseFD fdGCLock = openGCLock(ltRead); - if (pathExists(fnTempRoots)) + if (pathExists(state->fnTempRoots)) /* It *must* be stale, since there can be no two processes with the same pid. */ - unlink(fnTempRoots.c_str()); + unlink(state->fnTempRoots.c_str()); - fdTempRoots = openLockFile(fnTempRoots, true); + state->fdTempRoots = openLockFile(state->fnTempRoots, true); fdGCLock.close(); - debug(format("acquiring read lock on ‘%1%’") % fnTempRoots); - lockFile(fdTempRoots, ltRead, true); + debug(format("acquiring read lock on ‘%1%’") % state->fnTempRoots); + lockFile(state->fdTempRoots, ltRead, true); /* Check whether the garbage collector didn't get in our way. */ struct stat st; - if (fstat(fdTempRoots, &st) == -1) - throw SysError(format("statting ‘%1%’") % fnTempRoots); + if (fstat(state->fdTempRoots, &st) == -1) + throw SysError(format("statting ‘%1%’") % state->fnTempRoots); if (st.st_size == 0) break; /* The garbage collector deleted this file before we could @@ -187,15 +188,15 @@ void LocalStore::addTempRoot(const Path & path) /* Upgrade the lock to a write lock. This will cause us to block if the garbage collector is holding our lock. */ - debug(format("acquiring write lock on ‘%1%’") % fnTempRoots); - lockFile(fdTempRoots, ltWrite, true); + debug(format("acquiring write lock on ‘%1%’") % state->fnTempRoots); + lockFile(state->fdTempRoots, ltWrite, true); string s = path + '\0'; - writeFull(fdTempRoots, s); + writeFull(state->fdTempRoots, s); /* Downgrade to a read lock. */ - debug(format("downgrading to read lock on ‘%1%’") % fnTempRoots); - lockFile(fdTempRoots, ltRead, true); + debug(format("downgrading to read lock on ‘%1%’") % state->fnTempRoots); + lockFile(state->fdTempRoots, ltRead, true); } @@ -260,19 +261,16 @@ static void readTempRoots(PathSet & tempRoots, FDs & fds) } -static void foundRoot(StoreAPI & store, - const Path & path, const Path & target, Roots & roots) +void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots) { - Path storePath = toStorePath(target); - if (store.isValidPath(storePath)) - roots[path] = storePath; - else - printMsg(lvlInfo, format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath); -} - + auto foundRoot = [&](const Path & path, const Path & target) { + Path storePath = toStorePath(target); + if (isStorePath(storePath) && isValidPath(storePath)) + roots[path] = storePath; + else + printMsg(lvlInfo, format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath); + }; -static void findRoots(StoreAPI & store, const Path & path, unsigned char type, Roots & roots) -{ try { if (type == DT_UNKNOWN) @@ -280,13 +278,13 @@ static void findRoots(StoreAPI & store, const Path & path, unsigned char type, R if (type == DT_DIR) { for (auto & i : readDirectory(path)) - findRoots(store, path + "/" + i.name, i.type, roots); + findRoots(path + "/" + i.name, i.type, roots); } else if (type == DT_LNK) { Path target = readLink(path); if (isInStore(target)) - foundRoot(store, path, target, roots); + foundRoot(path, target); /* Handle indirect roots. */ else { @@ -300,14 +298,14 @@ static void findRoots(StoreAPI & store, const Path & path, unsigned char type, R struct stat st2 = lstat(target); if (!S_ISLNK(st2.st_mode)) return; Path target2 = readLink(target); - if (isInStore(target2)) foundRoot(store, target, target2, roots); + if (isInStore(target2)) foundRoot(target, target2); } } } else if (type == DT_REG) { Path storePath = settings.nixStore + "/" + baseNameOf(path); - if (store.isValidPath(storePath)) + if (isStorePath(storePath) && isValidPath(storePath)) roots[path] = storePath; } @@ -328,16 +326,16 @@ Roots LocalStore::findRoots() Roots roots; /* Process direct roots in {gcroots,manifests,profiles}. */ - nix::findRoots(*this, settings.nixStateDir + "/" + gcRootsDir, DT_UNKNOWN, roots); + findRoots(settings.nixStateDir + "/" + gcRootsDir, DT_UNKNOWN, roots); if (pathExists(settings.nixStateDir + "/manifests")) - nix::findRoots(*this, settings.nixStateDir + "/manifests", DT_UNKNOWN, roots); - nix::findRoots(*this, settings.nixStateDir + "/profiles", DT_UNKNOWN, roots); + findRoots(settings.nixStateDir + "/manifests", DT_UNKNOWN, roots); + findRoots(settings.nixStateDir + "/profiles", DT_UNKNOWN, roots); return roots; } -static void addAdditionalRoots(StoreAPI & store, PathSet & roots) +void LocalStore::findRuntimeRoots(PathSet & roots) { Path rootFinder = getEnv("NIX_ROOT_FINDER", settings.nixLibexecDir + "/nix/find-runtime-roots.pl"); @@ -350,15 +348,14 @@ static void addAdditionalRoots(StoreAPI & store, PathSet & roots) StringSet paths = tokenizeString<StringSet>(result, "\n"); - foreach (StringSet::iterator, i, paths) { - if (isInStore(*i)) { - Path path = toStorePath(*i); - if (roots.find(path) == roots.end() && store.isValidPath(path)) { + for (auto & i : paths) + if (isInStore(i)) { + Path path = toStorePath(i); + if (roots.find(path) == roots.end() && isStorePath(path) && isValidPath(path)) { debug(format("got additional root ‘%1%’") % path); roots.insert(path); } } - } } @@ -405,12 +402,12 @@ void LocalStore::deletePathRecursive(GCState & state, const Path & path) unsigned long long size = 0; - if (isValidPath(path)) { + if (isStorePath(path) && isValidPath(path)) { PathSet referrers; queryReferrers(path, referrers); - foreach (PathSet::iterator, i, referrers) - if (*i != path) deletePathRecursive(state, *i); - size = queryPathInfo(path).narSize; + for (auto & i : referrers) + if (i != path) deletePathRecursive(state, i); + size = queryPathInfo(path)->narSize; invalidatePathChecked(path); } @@ -476,7 +473,7 @@ bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & p visited.insert(path); - if (!isValidPath(path)) return false; + if (!isStorePath(path) || !isValidPath(path)) return false; PathSet incoming; @@ -487,22 +484,22 @@ bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & p don't delete the derivation if any of the outputs are alive. */ if (state.gcKeepDerivations && isDerivation(path)) { PathSet outputs = queryDerivationOutputs(path); - foreach (PathSet::iterator, i, outputs) - if (isValidPath(*i) && queryDeriver(*i) == path) - incoming.insert(*i); + for (auto & i : outputs) + if (isValidPath(i) && queryPathInfo(i)->deriver == path) + incoming.insert(i); } /* If gc-keep-outputs is set, then don't delete this path if there are derivers of this path that are not garbage. */ if (state.gcKeepOutputs) { PathSet derivers = queryValidDerivers(path); - foreach (PathSet::iterator, i, derivers) - incoming.insert(*i); + for (auto & i : derivers) + incoming.insert(i); } - foreach (PathSet::iterator, i, incoming) - if (*i != path) - if (canReachRoot(state, visited, *i)) { + for (auto & i : incoming) + if (i != path) + if (canReachRoot(state, visited, i)) { state.alive.insert(path); return true; } @@ -517,9 +514,9 @@ void LocalStore::tryToDelete(GCState & state, const Path & path) if (path == linksDir || path == state.trashDir) return; - startNest(nest, lvlDebug, format("considering whether to delete ‘%1%’") % path); + Activity act(*logger, lvlDebug, format("considering whether to delete ‘%1%’") % path); - if (!isValidPath(path)) { + if (!isStorePath(path) || !isValidPath(path)) { /* A lock file belonging to a path that we're building right now isn't garbage. */ if (isActiveTempFile(state, path, ".lock")) return; @@ -612,6 +609,9 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific; + if (state.shouldDelete) + deletePath(reservedPath); + /* Acquire the global GC root. This prevents a) New roots from being added. b) Processes from creating new temporary root files. */ @@ -622,14 +622,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) printMsg(lvlError, format("finding garbage collector roots...")); Roots rootMap = options.ignoreLiveness ? Roots() : findRoots(); - foreach (Roots::iterator, i, rootMap) state.roots.insert(i->second); + for (auto & i : rootMap) state.roots.insert(i.second); /* Add additional roots returned by the program specified by the NIX_ROOT_FINDER environment variable. This is typically used to add running programs to the set of roots (to prevent them from being garbage collected). */ if (!options.ignoreLiveness) - addAdditionalRoots(*this, state.roots); + findRuntimeRoots(state.roots); /* Read the temporary roots. This acquires read locks on all per-process temporary root files. So after this point no paths @@ -659,11 +659,11 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) if (options.action == GCOptions::gcDeleteSpecific) { - foreach (PathSet::iterator, i, options.pathsToDelete) { - assertStorePath(*i); - tryToDelete(state, *i); - if (state.dead.find(*i) == state.dead.end()) - throw Error(format("cannot delete path ‘%1%’ since it is still alive") % *i); + for (auto & i : options.pathsToDelete) { + assertStorePath(i); + tryToDelete(state, i); + if (state.dead.find(i) == state.dead.end()) + throw Error(format("cannot delete path ‘%1%’ since it is still alive") % i); } } else if (options.maxFreed > 0) { @@ -691,7 +691,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) string name = dirent->d_name; if (name == "." || name == "..") continue; Path path = settings.nixStore + "/" + name; - if (isValidPath(path)) + if (isStorePath(path) && isValidPath(path)) entries.push_back(path); else tryToDelete(state, path); @@ -707,8 +707,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) vector<Path> entries_(entries.begin(), entries.end()); random_shuffle(entries_.begin(), entries_.end()); - foreach (vector<Path>::iterator, i, entries_) - tryToDelete(state, *i); + for (auto & i : entries_) + tryToDelete(state, i); } catch (GCLimitReached & e) { } diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 50374f782dee..c12178e4028a 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -28,7 +28,6 @@ Settings::Settings() keepFailed = false; keepGoing = false; tryFallback = false; - buildVerbosity = lvlError; maxBuildJobs = 1; buildCores = 1; #ifdef _SC_NPROCESSORS_ONLN @@ -40,7 +39,6 @@ Settings::Settings() maxSilentTime = 0; buildTimeout = 0; useBuildHook = true; - printBuildTrace = false; reservedSize = 8 * 1024 * 1024; fsyncMetadata = true; useSQLiteWAL = true; @@ -52,7 +50,6 @@ Settings::Settings() keepLog = true; compressLog = true; maxLogSize = 0; - cacheFailure = false; pollInterval = 5; checkRootReachability = false; gcKeepOutputs = false; @@ -77,6 +74,11 @@ void Settings::processEnvironment() nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)); nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)); nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH); + + // should be set with the other config options, but depends on nixLibexecDir +#ifdef __APPLE__ + preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies.pl"; +#endif } @@ -170,7 +172,6 @@ void Settings::update() _get(keepLog, "build-keep-log"); _get(compressLog, "build-compress-log"); _get(maxLogSize, "build-max-log-size"); - _get(cacheFailure, "build-cache-failure"); _get(pollInterval, "build-poll-interval"); _get(checkRootReachability, "gc-check-reachability"); _get(gcKeepOutputs, "gc-keep-outputs"); @@ -183,20 +184,6 @@ void Settings::update() _get(enableImportNative, "allow-unsafe-native-code-during-evaluation"); _get(useCaseHack, "use-case-hack"); _get(preBuildHook, "pre-build-hook"); - - string subs = getEnv("NIX_SUBSTITUTERS", "default"); - if (subs == "default") { - substituters.clear(); -#if 0 - if (getEnv("NIX_OTHER_STORES") != "") - substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); -#endif - substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); - substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl"); - if (useSshSubstituter && !sshSubstituterHosts.empty()) - substituters.push_back(nixLibexecDir + "/nix/substituters/download-via-ssh"); - } else - substituters = tokenizeString<Strings>(subs, ":"); } @@ -248,12 +235,12 @@ template<class N> void Settings::_get(N & res, const string & name) string Settings::pack() { string s; - foreach (SettingsMap::iterator, i, settings) { - if (i->first.find('\n') != string::npos || - i->first.find('=') != string::npos || - i->second.find('\n') != string::npos) + for (auto & i : settings) { + if (i.first.find('\n') != string::npos || + i.first.find('=') != string::npos || + i.second.find('\n') != string::npos) throw Error("illegal option name/value"); - s += i->first; s += '='; s += i->second; s += '\n'; + s += i.first; s += '='; s += i.second; s += '\n'; } return s; } @@ -261,11 +248,11 @@ string Settings::pack() void Settings::unpack(const string & pack) { Strings lines = tokenizeString<Strings>(pack, "\n"); - foreach (Strings::iterator, i, lines) { - string::size_type eq = i->find('='); + for (auto & i : lines) { + string::size_type eq = i.find('='); if (eq == string::npos) throw Error("illegal option name/value"); - set(i->substr(0, eq), i->substr(eq + 1)); + set(i.substr(0, eq), i.substr(eq + 1)); } } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 60b11afe6088..65f763ace3c7 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -1,6 +1,7 @@ #pragma once #include "types.hh" +#include "logging.hh" #include <map> #include <sys/types.h> @@ -77,8 +78,12 @@ struct Settings { instead. */ bool tryFallback; - /* Verbosity level for build output. */ - Verbosity buildVerbosity; + /* Whether to show build log output in real time. */ + bool verboseBuild = true; + + /* If verboseBuild is false, the number of lines of the tail of + the log to show if a build fails. */ + size_t logLines = 10; /* Maximum number of parallel build jobs. 0 means unlimited. */ unsigned int maxBuildJobs; @@ -105,31 +110,10 @@ struct Settings { means infinity. */ time_t buildTimeout; - /* The substituters. There are programs that can somehow realise - a store path without building, e.g., by downloading it or - copying it from a CD. */ - Paths substituters; - /* Whether to use build hooks (for distributed builds). Sometimes users want to disable this from the command-line. */ bool useBuildHook; - /* Whether buildDerivations() should print out lines on stderr in - a fixed format to allow its progress to be monitored. Each - line starts with a "@". The following are defined: - - @ build-started <drvpath> <outpath> <system> <logfile> - @ build-failed <drvpath> <outpath> <exitcode> <error text> - @ build-succeeded <drvpath> <outpath> - @ substituter-started <outpath> <substituter> - @ substituter-failed <outpath> <exitcode> <error text> - @ substituter-succeeded <outpath> - - Best combined with --no-build-output, otherwise stderr might - conceivably contain lines in this format printed by the - builders. */ - bool printBuildTrace; - /* Amount of reserved space for the garbage collector (/nix/var/nix/db/reserved). */ off_t reservedSize; @@ -168,9 +152,6 @@ struct Settings { before being killed (0 means no limit). */ unsigned long maxLogSize; - /* Whether to cache build failures. */ - bool cacheFailure; - /* How often (in seconds) to poll for locks. */ unsigned int pollInterval; diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc new file mode 100644 index 000000000000..8c8d545c6d85 --- /dev/null +++ b/src/libstore/http-binary-cache-store.cc @@ -0,0 +1,105 @@ +#include "binary-cache-store.hh" +#include "download.hh" +#include "globals.hh" +#include "nar-info-disk-cache.hh" + +namespace nix { + +MakeError(UploadToHTTP, Error); + +class HttpBinaryCacheStore : public BinaryCacheStore +{ +private: + + Path cacheUri; + + Pool<Downloader> downloaders; + +public: + + HttpBinaryCacheStore( + const StoreParams & params, const Path & _cacheUri) + : BinaryCacheStore(params) + , cacheUri(_cacheUri) + , downloaders( + std::numeric_limits<size_t>::max(), + []() { return makeDownloader(); }) + { + if (cacheUri.back() == '/') + cacheUri.pop_back(); + + diskCache = getNarInfoDiskCache(); + } + + std::string getUri() override + { + return cacheUri; + } + + void init() override + { + // FIXME: do this lazily? + if (!diskCache->cacheExists(cacheUri)) { + try { + BinaryCacheStore::init(); + } catch (UploadToHTTP &) { + throw Error(format("‘%s’ does not appear to be a binary cache") % cacheUri); + } + diskCache->createCache(cacheUri, wantMassQuery_, priority); + } + } + +protected: + + bool fileExists(const std::string & path) override + { + try { + auto downloader(downloaders.get()); + DownloadOptions options; + options.showProgress = DownloadOptions::no; + options.head = true; + downloader->download(cacheUri + "/" + path, options); + return true; + } catch (DownloadError & e) { + /* S3 buckets return 403 if a file doesn't exist and the + bucket is unlistable, so treat 403 as 404. */ + if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) + return false; + throw; + } + } + + void upsertFile(const std::string & path, const std::string & data) override + { + throw UploadToHTTP("uploading to an HTTP binary cache is not supported"); + } + + std::shared_ptr<std::string> getFile(const std::string & path) override + { + auto downloader(downloaders.get()); + DownloadOptions options; + options.showProgress = DownloadOptions::no; + try { + return downloader->download(cacheUri + "/" + path, options).data; + } catch (DownloadError & e) { + if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) + return 0; + throw; + } + } + +}; + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const StoreParams & params) + -> std::shared_ptr<Store> +{ + if (std::string(uri, 0, 7) != "http://" && + std::string(uri, 0, 8) != "https://") return 0; + auto store = std::make_shared<HttpBinaryCacheStore>(params, uri); + store->init(); + return store; +}); + +} + diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc new file mode 100644 index 000000000000..b418c9c04148 --- /dev/null +++ b/src/libstore/local-binary-cache-store.cc @@ -0,0 +1,105 @@ +#include "binary-cache-store.hh" +#include "globals.hh" +#include "nar-info-disk-cache.hh" + +namespace nix { + +class LocalBinaryCacheStore : public BinaryCacheStore +{ +private: + + Path binaryCacheDir; + +public: + + LocalBinaryCacheStore( + const StoreParams & params, const Path & binaryCacheDir) + : BinaryCacheStore(params) + , binaryCacheDir(binaryCacheDir) + { + /* For testing the NAR info cache. */ + if (getEnv("_NIX_CACHE_FILE_URLS") == "1") + diskCache = getNarInfoDiskCache(); + } + + void init() override; + + std::string getUri() override + { + return "file://" + binaryCacheDir; + } + +protected: + + bool fileExists(const std::string & path) override; + + void upsertFile(const std::string & path, const std::string & data) override; + + std::shared_ptr<std::string> getFile(const std::string & path) override; + + PathSet queryAllValidPaths() override + { + PathSet paths; + + for (auto & entry : readDirectory(binaryCacheDir)) { + if (entry.name.size() != 40 || + !hasSuffix(entry.name, ".narinfo")) + continue; + paths.insert(settings.nixStore + "/" + entry.name.substr(0, entry.name.size() - 8)); + } + + return paths; + } + +}; + +void LocalBinaryCacheStore::init() +{ + createDirs(binaryCacheDir + "/nar"); + BinaryCacheStore::init(); + + if (diskCache && !diskCache->cacheExists(getUri())) + diskCache->createCache(getUri(), wantMassQuery_, priority); +} + +static void atomicWrite(const Path & path, const std::string & s) +{ + Path tmp = path + ".tmp." + std::to_string(getpid()); + AutoDelete del(tmp, false); + writeFile(tmp, s); + if (rename(tmp.c_str(), path.c_str())) + throw SysError(format("renaming ‘%1%’ to ‘%2%’") % tmp % path); + del.cancel(); +} + +bool LocalBinaryCacheStore::fileExists(const std::string & path) +{ + return pathExists(binaryCacheDir + "/" + path); +} + +void LocalBinaryCacheStore::upsertFile(const std::string & path, const std::string & data) +{ + atomicWrite(binaryCacheDir + "/" + path, data); +} + +std::shared_ptr<std::string> LocalBinaryCacheStore::getFile(const std::string & path) +{ + try { + return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path)); + } catch (SysError & e) { + if (e.errNo == ENOENT) return 0; + throw; + } +} + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const StoreParams & params) + -> std::shared_ptr<Store> +{ + if (std::string(uri, 0, 7) != "file://") return 0; + auto store = std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7)); + store->init(); + return store; +}); + +} diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc new file mode 100644 index 000000000000..303c3af27b8d --- /dev/null +++ b/src/libstore/local-fs-store.cc @@ -0,0 +1,79 @@ +#include "archive.hh" +#include "fs-accessor.hh" +#include "store-api.hh" + +namespace nix { + +struct LocalStoreAccessor : public FSAccessor +{ + ref<Store> store; + + LocalStoreAccessor(ref<Store> store) : store(store) { } + + void assertStore(const Path & path) + { + Path storePath = toStorePath(path); + if (!store->isValidPath(storePath)) + throw Error(format("path ‘%1%’ is not a valid store path") % storePath); + } + + FSAccessor::Stat stat(const Path & path) override + { + assertStore(path); + + struct stat st; + if (lstat(path.c_str(), &st)) { + if (errno == ENOENT || errno == ENOTDIR) return {Type::tMissing, 0, false}; + throw SysError(format("getting status of ‘%1%’") % path); + } + + if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode)) + throw Error(format("file ‘%1%’ has unsupported type") % path); + + return { + S_ISREG(st.st_mode) ? Type::tRegular : + S_ISLNK(st.st_mode) ? Type::tSymlink : + Type::tDirectory, + S_ISREG(st.st_mode) ? (uint64_t) st.st_size : 0, + S_ISREG(st.st_mode) && st.st_mode & S_IXUSR}; + } + + StringSet readDirectory(const Path & path) override + { + assertStore(path); + + auto entries = nix::readDirectory(path); + + StringSet res; + for (auto & entry : entries) + res.insert(entry.name); + + return res; + } + + std::string readFile(const Path & path) override + { + assertStore(path); + return nix::readFile(path); + } + + std::string readLink(const Path & path) override + { + assertStore(path); + return nix::readLink(path); + } +}; + +ref<FSAccessor> LocalFSStore::getFSAccessor() +{ + return make_ref<LocalStoreAccessor>(ref<Store>(shared_from_this())); +} + +void LocalFSStore::narFromPath(const Path & path, Sink & sink) +{ + if (!isValidPath(path)) + throw Error(format("path ‘%s’ is not valid") % path); + dumpPath(path, sink); +} + +} diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 074c3394ffb1..b44384957ca6 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -5,7 +5,7 @@ #include "pathlocks.hh" #include "worker-protocol.hh" #include "derivations.hh" -#include "affinity.hh" +#include "nar-info.hh" #include <iostream> #include <algorithm> @@ -23,16 +23,11 @@ #include <time.h> #include <grp.h> -#if HAVE_UNSHARE && HAVE_STATVFS && HAVE_SYS_MOUNT_H +#if __linux__ #include <sched.h> #include <sys/statvfs.h> #include <sys/mount.h> -#endif - -#if HAVE_LINUX_FS_H -#include <linux/fs.h> #include <sys/ioctl.h> -#include <errno.h> #endif #include <sqlite3.h> @@ -41,171 +36,6 @@ namespace nix { -MakeError(SQLiteError, Error); -MakeError(SQLiteBusy, SQLiteError); - - -static void throwSQLiteError(sqlite3 * db, const format & f) - __attribute__ ((noreturn)); - -static void throwSQLiteError(sqlite3 * db, const format & f) -{ - int err = sqlite3_errcode(db); - if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) { - if (err == SQLITE_PROTOCOL) - printMsg(lvlError, "warning: SQLite database is busy (SQLITE_PROTOCOL)"); - else { - static bool warned = false; - if (!warned) { - printMsg(lvlError, "warning: SQLite database is busy"); - warned = true; - } - } - /* Sleep for a while since retrying the transaction right away - is likely to fail again. */ -#if HAVE_NANOSLEEP - struct timespec t; - t.tv_sec = 0; - t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */ - nanosleep(&t, 0); -#else - sleep(1); -#endif - throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db)); - } - else - throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db)); -} - - -/* Convenience macros for retrying a SQLite transaction. */ -#define retry_sqlite while (1) { try { -#define end_retry_sqlite break; } catch (SQLiteBusy & e) { } } - - -SQLite::~SQLite() -{ - try { - if (db && sqlite3_close(db) != SQLITE_OK) - throwSQLiteError(db, "closing database"); - } catch (...) { - ignoreException(); - } -} - - -void SQLiteStmt::create(sqlite3 * db, const string & s) -{ - checkInterrupt(); - assert(!stmt); - if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK) - throwSQLiteError(db, "creating statement"); - this->db = db; -} - - -void SQLiteStmt::reset() -{ - assert(stmt); - /* Note: sqlite3_reset() returns the error code for the most - recent call to sqlite3_step(). So ignore it. */ - sqlite3_reset(stmt); - curArg = 1; -} - - -SQLiteStmt::~SQLiteStmt() -{ - try { - if (stmt && sqlite3_finalize(stmt) != SQLITE_OK) - throwSQLiteError(db, "finalizing statement"); - } catch (...) { - ignoreException(); - } -} - - -void SQLiteStmt::bind(const string & value) -{ - if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK) - throwSQLiteError(db, "binding argument"); -} - - -void SQLiteStmt::bind(int value) -{ - if (sqlite3_bind_int(stmt, curArg++, value) != SQLITE_OK) - throwSQLiteError(db, "binding argument"); -} - - -void SQLiteStmt::bind64(long long value) -{ - if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK) - throwSQLiteError(db, "binding argument"); -} - - -void SQLiteStmt::bind() -{ - if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK) - throwSQLiteError(db, "binding argument"); -} - - -/* Helper class to ensure that prepared statements are reset when - leaving the scope that uses them. Unfinished prepared statements - prevent transactions from being aborted, and can cause locks to be - kept when they should be released. */ -struct SQLiteStmtUse -{ - SQLiteStmt & stmt; - SQLiteStmtUse(SQLiteStmt & stmt) : stmt(stmt) - { - stmt.reset(); - } - ~SQLiteStmtUse() - { - try { - stmt.reset(); - } catch (...) { - ignoreException(); - } - } -}; - - -struct SQLiteTxn -{ - bool active; - sqlite3 * db; - - SQLiteTxn(sqlite3 * db) : active(false) { - this->db = db; - if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK) - throwSQLiteError(db, "starting transaction"); - active = true; - } - - void commit() - { - if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK) - throwSQLiteError(db, "committing transaction"); - active = false; - } - - ~SQLiteTxn() - { - try { - if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK) - throwSQLiteError(db, "aborting transaction"); - } catch (...) { - ignoreException(); - } - } -}; - - void checkStoreNotSymlink() { if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return; @@ -224,20 +54,24 @@ void checkStoreNotSymlink() } -LocalStore::LocalStore(bool reserveSpace) - : didSetSubstituterEnv(false) +LocalStore::LocalStore() + : linksDir(settings.nixStore + "/.links") + , reservedPath(settings.nixDBPath + "/reserved") + , schemaPath(settings.nixDBPath + "/schema") + , requireSigs(settings.get("signed-binary-caches", std::string("")) != "") // FIXME: rename option + , publicKeys(getDefaultPublicKeys()) { - schemaPath = settings.nixDBPath + "/schema"; + auto state(_state.lock()); if (settings.readOnlyMode) { - openDB(false); + openDB(*state, false); return; } /* Create missing state directories if they don't already exist. */ createDirs(settings.nixStore); makeStoreWritable(); - createDirs(linksDir = settings.nixStore + "/.links"); + createDirs(linksDir); Path profilesDir = settings.nixStateDir + "/profiles"; createDirs(profilesDir); createDirs(settings.nixStateDir + "/temproots"); @@ -284,25 +118,20 @@ LocalStore::LocalStore(bool reserveSpace) needed, we reserve some dummy space that we can free just before doing a garbage collection. */ try { - Path reservedPath = settings.nixDBPath + "/reserved"; - if (reserveSpace) { - struct stat st; - if (stat(reservedPath.c_str(), &st) == -1 || - st.st_size != settings.reservedSize) - { - AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT, 0600); - int res = -1; + struct stat st; + if (stat(reservedPath.c_str(), &st) == -1 || + st.st_size != settings.reservedSize) + { + AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT, 0600); + int res = -1; #if HAVE_POSIX_FALLOCATE - res = posix_fallocate(fd, 0, settings.reservedSize); + res = posix_fallocate(fd, 0, settings.reservedSize); #endif - if (res == -1) { - writeFull(fd, string(settings.reservedSize, 'X')); - ftruncate(fd, settings.reservedSize); - } + if (res == -1) { + writeFull(fd, string(settings.reservedSize, 'X')); + ftruncate(fd, settings.reservedSize); } } - else - deletePath(reservedPath); } catch (SysError & e) { /* don't care about errors */ } @@ -314,7 +143,7 @@ LocalStore::LocalStore(bool reserveSpace) } catch (SysError & e) { if (e.errNo != EACCES) throw; settings.readOnlyMode = true; - openDB(false); + openDB(*state, false); return; } @@ -332,7 +161,7 @@ LocalStore::LocalStore(bool reserveSpace) else if (curSchema == 0) { /* new store */ curSchema = nixSchemaVersion; - openDB(true); + openDB(*state, true); writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); } @@ -343,6 +172,12 @@ LocalStore::LocalStore(bool reserveSpace) "which is no longer supported. To convert to the new format,\n" "please upgrade Nix to version 0.12 first."); + if (curSchema < 6) + throw Error( + "Your Nix store has a database in flat file format,\n" + "which is no longer supported. To convert to the new format,\n" + "please upgrade Nix to version 1.11 first."); + if (!lockFile(globalLock, ltWrite, false)) { printMsg(lvlError, "waiting for exclusive access to the Nix store..."); lockFile(globalLock, ltWrite, true); @@ -352,37 +187,43 @@ LocalStore::LocalStore(bool reserveSpace) have performed the upgrade already. */ curSchema = getSchema(); - if (curSchema < 6) upgradeStore6(); - else if (curSchema < 7) { upgradeStore7(); openDB(true); } + if (curSchema < 7) { upgradeStore7(); } + + openDB(*state, false); + + if (curSchema < 8) { + SQLiteTxn txn(state->db); + if (sqlite3_exec(state->db, "alter table ValidPaths add column ultimate integer", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "upgrading database schema"); + if (sqlite3_exec(state->db, "alter table ValidPaths add column sigs text", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "upgrading database schema"); + txn.commit(); + } + + if (curSchema < 9) { + SQLiteTxn txn(state->db); + if (sqlite3_exec(state->db, "drop table FailedPaths", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "upgrading database schema"); + txn.commit(); + } writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); lockFile(globalLock, ltRead, true); } - else openDB(false); + else openDB(*state, false); } LocalStore::~LocalStore() { - try { - foreach (RunningSubstituters::iterator, i, runningSubstituters) { - if (i->second.disabled) continue; - i->second.to.close(); - i->second.from.close(); - i->second.error.close(); - if (i->second.pid != -1) - i->second.pid.wait(true); - } - } catch (...) { - ignoreException(); - } + auto state(_state.lock()); try { - if (fdTempRoots != -1) { - fdTempRoots.close(); - unlink(fnTempRoots.c_str()); + if (state->fdTempRoots != -1) { + state->fdTempRoots.close(); + unlink(state->fnTempRoots.c_str()); } } catch (...) { ignoreException(); @@ -390,6 +231,12 @@ LocalStore::~LocalStore() } +std::string LocalStore::getUri() +{ + return "local"; +} + + int LocalStore::getSchema() { int curSchema = 0; @@ -402,13 +249,20 @@ int LocalStore::getSchema() } -void LocalStore::openDB(bool create) +bool LocalStore::haveWriteAccess() +{ + return access(settings.nixDBPath.c_str(), R_OK | W_OK) == 0; +} + + +void LocalStore::openDB(State & state, bool create) { - if (access(settings.nixDBPath.c_str(), R_OK | W_OK)) + if (!haveWriteAccess()) throw SysError(format("Nix database directory ‘%1%’ is not writable") % settings.nixDBPath); /* Open the Nix database. */ string dbPath = settings.nixDBPath + "/db.sqlite"; + auto & db(state.db); if (sqlite3_open_v2(dbPath.c_str(), &db.db, SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK) throw Error(format("cannot open Nix database ‘%1%’") % dbPath); @@ -461,40 +315,31 @@ void LocalStore::openDB(bool create) } /* Prepare SQL statements. */ - stmtRegisterValidPath.create(db, - "insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);"); - stmtUpdatePathInfo.create(db, - "update ValidPaths set narSize = ?, hash = ? where path = ?;"); - stmtAddReference.create(db, + state.stmtRegisterValidPath.create(db, + "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs) values (?, ?, ?, ?, ?, ?, ?);"); + state.stmtUpdatePathInfo.create(db, + "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ? where path = ?;"); + state.stmtAddReference.create(db, "insert or replace into Refs (referrer, reference) values (?, ?);"); - stmtQueryPathInfo.create(db, - "select id, hash, registrationTime, deriver, narSize from ValidPaths where path = ?;"); - stmtQueryReferences.create(db, + state.stmtQueryPathInfo.create(db, + "select id, hash, registrationTime, deriver, narSize, ultimate, sigs from ValidPaths where path = ?;"); + state.stmtQueryReferences.create(db, "select path from Refs join ValidPaths on reference = id where referrer = ?;"); - stmtQueryReferrers.create(db, + state.stmtQueryReferrers.create(db, "select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);"); - stmtInvalidatePath.create(db, + state.stmtInvalidatePath.create(db, "delete from ValidPaths where path = ?;"); - stmtRegisterFailedPath.create(db, - "insert or ignore into FailedPaths (path, time) values (?, ?);"); - stmtHasPathFailed.create(db, - "select time from FailedPaths where path = ?;"); - stmtQueryFailedPaths.create(db, - "select path from FailedPaths;"); - // If the path is a derivation, then clear its outputs. - stmtClearFailedPath.create(db, - "delete from FailedPaths where ?1 = '*' or path = ?1 " - "or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);"); - stmtAddDerivationOutput.create(db, + state.stmtAddDerivationOutput.create(db, "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);"); - stmtQueryValidDerivers.create(db, + state.stmtQueryValidDerivers.create(db, "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;"); - stmtQueryDerivationOutputs.create(db, + state.stmtQueryDerivationOutputs.create(db, "select id, path from DerivationOutputs where drv = ?;"); // Use "path >= ?" with limit 1 rather than "path like '?%'" to // ensure efficient lookup. - stmtQueryPathFromHashPart.create(db, + state.stmtQueryPathFromHashPart.create(db, "select path from ValidPaths where path >= ? limit 1;"); + state.stmtQueryValidPaths.create(db, "select path from ValidPaths"); } @@ -502,7 +347,7 @@ void LocalStore::openDB(bool create) bind mount. So make the Nix store writable for this process. */ void LocalStore::makeStoreWritable() { -#if HAVE_UNSHARE && HAVE_STATVFS && HAVE_SYS_MOUNT_H && defined(MS_BIND) && defined(MS_REMOUNT) +#if __linux__ if (getuid() != 0) return; /* Check if /nix/store is on a read-only mount. */ struct statvfs stat; @@ -607,10 +452,10 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe users group); we check for this case below. */ if (st.st_uid != geteuid()) { #if HAVE_LCHOWN - if (lchown(path.c_str(), geteuid(), (gid_t) -1) == -1) + if (lchown(path.c_str(), geteuid(), getegid()) == -1) #else if (!S_ISLNK(st.st_mode) && - chown(path.c_str(), geteuid(), (gid_t) -1) == -1) + chown(path.c_str(), geteuid(), getegid()) == -1) #endif throw SysError(format("changing owner of ‘%1%’ to %2%") % path % geteuid()); @@ -654,7 +499,7 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & assert(isDerivation(drvName)); drvName = string(drvName, 0, drvName.size() - drvExtension.size()); - if (isFixedOutputDrv(drv)) { + if (drv.isFixedOutput()) { DerivationOutputs::const_iterator out = drv.outputs.find("out"); if (out == drv.outputs.end()) throw Error(format("derivation ‘%1%’ does not have an output named ‘out’") % drvPath); @@ -671,41 +516,37 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & else { Derivation drvCopy(drv); - foreach (DerivationOutputs::iterator, i, drvCopy.outputs) { - i->second.path = ""; - drvCopy.env[i->first] = ""; + for (auto & i : drvCopy.outputs) { + i.second.path = ""; + drvCopy.env[i.first] = ""; } Hash h = hashDerivationModulo(*this, drvCopy); - foreach (DerivationOutputs::const_iterator, i, drv.outputs) { - Path outPath = makeOutputPath(i->first, h, drvName); - StringPairs::const_iterator j = drv.env.find(i->first); - if (i->second.path != outPath || j == drv.env.end() || j->second != outPath) + for (auto & i : drv.outputs) { + Path outPath = makeOutputPath(i.first, h, drvName); + StringPairs::const_iterator j = drv.env.find(i.first); + if (i.second.path != outPath || j == drv.env.end() || j->second != outPath) throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’") - % drvPath % i->second.path % outPath); + % drvPath % i.second.path % outPath); } } } -unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool checkOutputs) +uint64_t LocalStore::addValidPath(State & state, + const ValidPathInfo & info, bool checkOutputs) { - SQLiteStmtUse use(stmtRegisterValidPath); - stmtRegisterValidPath.bind(info.path); - stmtRegisterValidPath.bind("sha256:" + printHash(info.hash)); - stmtRegisterValidPath.bind(info.registrationTime == 0 ? time(0) : info.registrationTime); - if (info.deriver != "") - stmtRegisterValidPath.bind(info.deriver); - else - stmtRegisterValidPath.bind(); // null - if (info.narSize != 0) - stmtRegisterValidPath.bind64(info.narSize); - else - stmtRegisterValidPath.bind(); // null - if (sqlite3_step(stmtRegisterValidPath) != SQLITE_DONE) - throwSQLiteError(db, format("registering valid path ‘%1%’ in database") % info.path); - unsigned long long id = sqlite3_last_insert_rowid(db); + state.stmtRegisterValidPath.use() + (info.path) + ("sha256:" + printHash(info.narHash)) + (info.registrationTime == 0 ? time(0) : info.registrationTime) + (info.deriver, info.deriver != "") + (info.narSize, info.narSize != 0) + (info.ultimate ? 1 : 0, info.ultimate) + (concatStringsSep(" ", info.sigs), !info.sigs.empty()) + .exec(); + uint64_t id = sqlite3_last_insert_rowid(state.db); /* If this is a derivation, then store the derivation outputs in the database. This is useful for the garbage collector: it can @@ -721,90 +562,21 @@ unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool che registration above is undone. */ if (checkOutputs) checkDerivationOutputs(info.path, drv); - foreach (DerivationOutputs::iterator, i, drv.outputs) { - SQLiteStmtUse use(stmtAddDerivationOutput); - stmtAddDerivationOutput.bind(id); - stmtAddDerivationOutput.bind(i->first); - stmtAddDerivationOutput.bind(i->second.path); - if (sqlite3_step(stmtAddDerivationOutput) != SQLITE_DONE) - throwSQLiteError(db, format("adding derivation output for ‘%1%’ in database") % info.path); + for (auto & i : drv.outputs) { + state.stmtAddDerivationOutput.use() + (id) + (i.first) + (i.second.path) + .exec(); } } - return id; -} - - -void LocalStore::addReference(unsigned long long referrer, unsigned long long reference) -{ - SQLiteStmtUse use(stmtAddReference); - stmtAddReference.bind(referrer); - stmtAddReference.bind(reference); - if (sqlite3_step(stmtAddReference) != SQLITE_DONE) - throwSQLiteError(db, "adding reference to database"); -} - - -void LocalStore::registerFailedPath(const Path & path) -{ - retry_sqlite { - SQLiteStmtUse use(stmtRegisterFailedPath); - stmtRegisterFailedPath.bind(path); - stmtRegisterFailedPath.bind(time(0)); - if (sqlite3_step(stmtRegisterFailedPath) != SQLITE_DONE) - throwSQLiteError(db, format("registering failed path ‘%1%’") % path); - } end_retry_sqlite; -} - - -bool LocalStore::hasPathFailed(const Path & path) -{ - retry_sqlite { - SQLiteStmtUse use(stmtHasPathFailed); - stmtHasPathFailed.bind(path); - int res = sqlite3_step(stmtHasPathFailed); - if (res != SQLITE_DONE && res != SQLITE_ROW) - throwSQLiteError(db, "querying whether path failed"); - return res == SQLITE_ROW; - } end_retry_sqlite; -} - - -PathSet LocalStore::queryFailedPaths() -{ - retry_sqlite { - SQLiteStmtUse use(stmtQueryFailedPaths); - - PathSet res; - int r; - while ((r = sqlite3_step(stmtQueryFailedPaths)) == SQLITE_ROW) { - const char * s = (const char *) sqlite3_column_text(stmtQueryFailedPaths, 0); - assert(s); - res.insert(s); - } - - if (r != SQLITE_DONE) - throwSQLiteError(db, "error querying failed paths"); - - return res; - } end_retry_sqlite; -} - - -void LocalStore::clearFailedPaths(const PathSet & paths) -{ - retry_sqlite { - SQLiteTxn txn(db); - - foreach (PathSet::const_iterator, i, paths) { - SQLiteStmtUse use(stmtClearFailedPath); - stmtClearFailedPath.bind(*i); - if (sqlite3_step(stmtClearFailedPath) != SQLITE_DONE) - throwSQLiteError(db, format("clearing failed path ‘%1%’ in database") % *i); - } + { + auto state_(Store::state.lock()); + state_->pathInfoCache.upsert(storePathToHash(info.path), std::make_shared<ValidPathInfo>(info)); + } - txn.commit(); - } end_retry_sqlite; + return id; } @@ -822,174 +594,124 @@ Hash parseHashField(const Path & path, const string & s) } -ValidPathInfo LocalStore::queryPathInfo(const Path & path) +std::shared_ptr<ValidPathInfo> LocalStore::queryPathInfoUncached(const Path & path) { - ValidPathInfo info; - info.path = path; + auto info = std::make_shared<ValidPathInfo>(); + info->path = path; assertStorePath(path); - retry_sqlite { + return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() { + auto state(_state.lock()); /* Get the path info. */ - SQLiteStmtUse use1(stmtQueryPathInfo); - - stmtQueryPathInfo.bind(path); + auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path)); - int r = sqlite3_step(stmtQueryPathInfo); - if (r == SQLITE_DONE) throw Error(format("path ‘%1%’ is not valid") % path); - if (r != SQLITE_ROW) throwSQLiteError(db, "querying path in database"); + if (!useQueryPathInfo.next()) + return std::shared_ptr<ValidPathInfo>(); - info.id = sqlite3_column_int(stmtQueryPathInfo, 0); + info->id = useQueryPathInfo.getInt(0); - const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1); - assert(s); - info.hash = parseHashField(path, s); + info->narHash = parseHashField(path, useQueryPathInfo.getStr(1)); - info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2); + info->registrationTime = useQueryPathInfo.getInt(2); - s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3); - if (s) info.deriver = s; + auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3); + if (s) info->deriver = s; /* Note that narSize = NULL yields 0. */ - info.narSize = sqlite3_column_int64(stmtQueryPathInfo, 4); + info->narSize = useQueryPathInfo.getInt(4); - /* Get the references. */ - SQLiteStmtUse use2(stmtQueryReferences); + info->ultimate = useQueryPathInfo.getInt(5) == 1; - stmtQueryReferences.bind(info.id); + s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6); + if (s) info->sigs = tokenizeString<StringSet>(s, " "); - while ((r = sqlite3_step(stmtQueryReferences)) == SQLITE_ROW) { - s = (const char *) sqlite3_column_text(stmtQueryReferences, 0); - assert(s); - info.references.insert(s); - } + /* Get the references. */ + auto useQueryReferences(state->stmtQueryReferences.use()(info->id)); - if (r != SQLITE_DONE) - throwSQLiteError(db, format("error getting references of ‘%1%’") % path); + while (useQueryReferences.next()) + info->references.insert(useQueryReferences.getStr(0)); return info; - } end_retry_sqlite; + }); } -/* Update path info in the database. Currently only updates the - narSize field. */ -void LocalStore::updatePathInfo(const ValidPathInfo & info) +/* Update path info in the database. */ +void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info) { - SQLiteStmtUse use(stmtUpdatePathInfo); - if (info.narSize != 0) - stmtUpdatePathInfo.bind64(info.narSize); - else - stmtUpdatePathInfo.bind(); // null - stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash)); - stmtUpdatePathInfo.bind(info.path); - if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE) - throwSQLiteError(db, format("updating info of path ‘%1%’ in database") % info.path); + state.stmtUpdatePathInfo.use() + (info.narSize, info.narSize != 0) + ("sha256:" + printHash(info.narHash)) + (info.ultimate ? 1 : 0, info.ultimate) + (concatStringsSep(" ", info.sigs), !info.sigs.empty()) + (info.path) + .exec(); } -unsigned long long LocalStore::queryValidPathId(const Path & path) +uint64_t LocalStore::queryValidPathId(State & state, const Path & path) { - SQLiteStmtUse use(stmtQueryPathInfo); - stmtQueryPathInfo.bind(path); - int res = sqlite3_step(stmtQueryPathInfo); - if (res == SQLITE_ROW) return sqlite3_column_int(stmtQueryPathInfo, 0); - if (res == SQLITE_DONE) throw Error(format("path ‘%1%’ is not valid") % path); - throwSQLiteError(db, "querying path in database"); + auto use(state.stmtQueryPathInfo.use()(path)); + if (!use.next()) + throw Error(format("path ‘%1%’ is not valid") % path); + return use.getInt(0); } -bool LocalStore::isValidPath_(const Path & path) +bool LocalStore::isValidPath_(State & state, const Path & path) { - SQLiteStmtUse use(stmtQueryPathInfo); - stmtQueryPathInfo.bind(path); - int res = sqlite3_step(stmtQueryPathInfo); - if (res != SQLITE_DONE && res != SQLITE_ROW) - throwSQLiteError(db, "querying path in database"); - return res == SQLITE_ROW; + return state.stmtQueryPathInfo.use()(path).next(); } -bool LocalStore::isValidPath(const Path & path) +bool LocalStore::isValidPathUncached(const Path & path) { - retry_sqlite { - return isValidPath_(path); - } end_retry_sqlite; + return retrySQLite<bool>([&]() { + auto state(_state.lock()); + return isValidPath_(*state, path); + }); } PathSet LocalStore::queryValidPaths(const PathSet & paths) { - retry_sqlite { - PathSet res; - foreach (PathSet::const_iterator, i, paths) - if (isValidPath_(*i)) res.insert(*i); - return res; - } end_retry_sqlite; + PathSet res; + for (auto & i : paths) + if (isValidPath(i)) res.insert(i); + return res; } PathSet LocalStore::queryAllValidPaths() { - retry_sqlite { - SQLiteStmt stmt; - stmt.create(db, "select path from ValidPaths"); - + return retrySQLite<PathSet>([&]() { + auto state(_state.lock()); + auto use(state->stmtQueryValidPaths.use()); PathSet res; - int r; - while ((r = sqlite3_step(stmt)) == SQLITE_ROW) { - const char * s = (const char *) sqlite3_column_text(stmt, 0); - assert(s); - res.insert(s); - } - - if (r != SQLITE_DONE) - throwSQLiteError(db, "error getting valid paths"); - + while (use.next()) res.insert(use.getStr(0)); return res; - } end_retry_sqlite; -} - - -void LocalStore::queryReferences(const Path & path, - PathSet & references) -{ - ValidPathInfo info = queryPathInfo(path); - references.insert(info.references.begin(), info.references.end()); + }); } -void LocalStore::queryReferrers_(const Path & path, PathSet & referrers) +void LocalStore::queryReferrers(State & state, const Path & path, PathSet & referrers) { - SQLiteStmtUse use(stmtQueryReferrers); - - stmtQueryReferrers.bind(path); + auto useQueryReferrers(state.stmtQueryReferrers.use()(path)); - int r; - while ((r = sqlite3_step(stmtQueryReferrers)) == SQLITE_ROW) { - const char * s = (const char *) sqlite3_column_text(stmtQueryReferrers, 0); - assert(s); - referrers.insert(s); - } - - if (r != SQLITE_DONE) - throwSQLiteError(db, format("error getting references of ‘%1%’") % path); + while (useQueryReferrers.next()) + referrers.insert(useQueryReferrers.getStr(0)); } void LocalStore::queryReferrers(const Path & path, PathSet & referrers) { assertStorePath(path); - retry_sqlite { - queryReferrers_(path, referrers); - } end_retry_sqlite; -} - - -Path LocalStore::queryDeriver(const Path & path) -{ - return queryPathInfo(path).deriver; + return retrySQLite<void>([&]() { + auto state(_state.lock()); + queryReferrers(*state, path, referrers); + }); } @@ -997,294 +719,114 @@ PathSet LocalStore::queryValidDerivers(const Path & path) { assertStorePath(path); - retry_sqlite { - SQLiteStmtUse use(stmtQueryValidDerivers); - stmtQueryValidDerivers.bind(path); + return retrySQLite<PathSet>([&]() { + auto state(_state.lock()); - PathSet derivers; - int r; - while ((r = sqlite3_step(stmtQueryValidDerivers)) == SQLITE_ROW) { - const char * s = (const char *) sqlite3_column_text(stmtQueryValidDerivers, 1); - assert(s); - derivers.insert(s); - } + auto useQueryValidDerivers(state->stmtQueryValidDerivers.use()(path)); - if (r != SQLITE_DONE) - throwSQLiteError(db, format("error getting valid derivers of ‘%1%’") % path); + PathSet derivers; + while (useQueryValidDerivers.next()) + derivers.insert(useQueryValidDerivers.getStr(1)); return derivers; - } end_retry_sqlite; + }); } PathSet LocalStore::queryDerivationOutputs(const Path & path) { - retry_sqlite { - SQLiteStmtUse use(stmtQueryDerivationOutputs); - stmtQueryDerivationOutputs.bind(queryValidPathId(path)); + return retrySQLite<PathSet>([&]() { + auto state(_state.lock()); - PathSet outputs; - int r; - while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) { - const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 1); - assert(s); - outputs.insert(s); - } + auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use() + (queryValidPathId(*state, path))); - if (r != SQLITE_DONE) - throwSQLiteError(db, format("error getting outputs of ‘%1%’") % path); + PathSet outputs; + while (useQueryDerivationOutputs.next()) + outputs.insert(useQueryDerivationOutputs.getStr(1)); return outputs; - } end_retry_sqlite; + }); } StringSet LocalStore::queryDerivationOutputNames(const Path & path) { - retry_sqlite { - SQLiteStmtUse use(stmtQueryDerivationOutputs); - stmtQueryDerivationOutputs.bind(queryValidPathId(path)); + return retrySQLite<StringSet>([&]() { + auto state(_state.lock()); - StringSet outputNames; - int r; - while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) { - const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 0); - assert(s); - outputNames.insert(s); - } + auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use() + (queryValidPathId(*state, path))); - if (r != SQLITE_DONE) - throwSQLiteError(db, format("error getting output names of ‘%1%’") % path); + StringSet outputNames; + while (useQueryDerivationOutputs.next()) + outputNames.insert(useQueryDerivationOutputs.getStr(0)); return outputNames; - } end_retry_sqlite; + }); } Path LocalStore::queryPathFromHashPart(const string & hashPart) { - if (hashPart.size() != 32) throw Error("invalid hash part"); + if (hashPart.size() != storePathHashLen) throw Error("invalid hash part"); Path prefix = settings.nixStore + "/" + hashPart; - retry_sqlite { - SQLiteStmtUse use(stmtQueryPathFromHashPart); - stmtQueryPathFromHashPart.bind(prefix); - - int res = sqlite3_step(stmtQueryPathFromHashPart); - if (res == SQLITE_DONE) return ""; - if (res != SQLITE_ROW) throwSQLiteError(db, "finding path in database"); - - const char * s = (const char *) sqlite3_column_text(stmtQueryPathFromHashPart, 0); - return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : ""; - } end_retry_sqlite; -} - - -void LocalStore::setSubstituterEnv() -{ - if (didSetSubstituterEnv) return; - - /* Pass configuration options (including those overridden with - --option) to substituters. */ - setenv("_NIX_OPTIONS", settings.pack().c_str(), 1); - - didSetSubstituterEnv = true; -} - + return retrySQLite<Path>([&]() { + auto state(_state.lock()); -void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run) -{ - if (run.disabled || run.pid != -1) return; - - debug(format("starting substituter program ‘%1%’") % substituter); - - Pipe toPipe, fromPipe, errorPipe; - - toPipe.create(); - fromPipe.create(); - errorPipe.create(); + auto useQueryPathFromHashPart(state->stmtQueryPathFromHashPart.use()(prefix)); - setSubstituterEnv(); + if (!useQueryPathFromHashPart.next()) return ""; - run.pid = startProcess([&]() { - if (dup2(toPipe.readSide, STDIN_FILENO) == -1) - throw SysError("dupping stdin"); - if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1) - throw SysError("dupping stdout"); - if (dup2(errorPipe.writeSide, STDERR_FILENO) == -1) - throw SysError("dupping stderr"); - execl(substituter.c_str(), substituter.c_str(), "--query", NULL); - throw SysError(format("executing ‘%1%’") % substituter); + const char * s = (const char *) sqlite3_column_text(state->stmtQueryPathFromHashPart, 0); + return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : ""; }); - - run.program = baseNameOf(substituter); - run.to = toPipe.writeSide.borrow(); - run.from = run.fromBuf.fd = fromPipe.readSide.borrow(); - run.error = errorPipe.readSide.borrow(); - - toPipe.readSide.close(); - fromPipe.writeSide.close(); - errorPipe.writeSide.close(); - - /* The substituter may exit right away if it's disabled in any way - (e.g. copy-from-other-stores.pl will exit if no other stores - are configured). */ - try { - getLineFromSubstituter(run); - } catch (EndOfFile & e) { - run.to.close(); - run.from.close(); - run.error.close(); - run.disabled = true; - if (run.pid.wait(true) != 0) throw; - } -} - - -/* Read a line from the substituter's stdout, while also processing - its stderr. */ -string LocalStore::getLineFromSubstituter(RunningSubstituter & run) -{ - string res, err; - - /* We might have stdout data left over from the last time. */ - if (run.fromBuf.hasData()) goto haveData; - - while (1) { - checkInterrupt(); - - fd_set fds; - FD_ZERO(&fds); - FD_SET(run.from, &fds); - FD_SET(run.error, &fds); - - /* Wait for data to appear on the substituter's stdout or - stderr. */ - if (select(run.from > run.error ? run.from + 1 : run.error + 1, &fds, 0, 0, 0) == -1) { - if (errno == EINTR) continue; - throw SysError("waiting for input from the substituter"); - } - - /* Completely drain stderr before dealing with stdout. */ - if (FD_ISSET(run.error, &fds)) { - char buf[4096]; - ssize_t n = read(run.error, (unsigned char *) buf, sizeof(buf)); - if (n == -1) { - if (errno == EINTR) continue; - throw SysError("reading from substituter's stderr"); - } - if (n == 0) throw EndOfFile(format("substituter ‘%1%’ died unexpectedly") % run.program); - err.append(buf, n); - string::size_type p; - while ((p = err.find('\n')) != string::npos) { - printMsg(lvlError, run.program + ": " + string(err, 0, p)); - err = string(err, p + 1); - } - } - - /* Read from stdout until we get a newline or the buffer is empty. */ - else if (run.fromBuf.hasData() || FD_ISSET(run.from, &fds)) { - haveData: - do { - unsigned char c; - run.fromBuf(&c, 1); - if (c == '\n') { - if (!err.empty()) printMsg(lvlError, run.program + ": " + err); - return res; - } - res += c; - } while (run.fromBuf.hasData()); - } - } -} - - -template<class T> T LocalStore::getIntLineFromSubstituter(RunningSubstituter & run) -{ - string s = getLineFromSubstituter(run); - T res; - if (!string2Int(s, res)) throw Error("integer expected from stream"); - return res; } PathSet LocalStore::querySubstitutablePaths(const PathSet & paths) { PathSet res; - foreach (Paths::iterator, i, settings.substituters) { - if (res.size() == paths.size()) break; - RunningSubstituter & run(runningSubstituters[*i]); - startSubstituter(*i, run); - if (run.disabled) continue; - string s = "have "; - foreach (PathSet::const_iterator, j, paths) - if (res.find(*j) == res.end()) { s += *j; s += " "; } - writeLine(run.to, s); - while (true) { - /* FIXME: we only read stderr when an error occurs, so - substituters should only write (short) messages to - stderr when they fail. I.e. they shouldn't write debug - output. */ - Path path = getLineFromSubstituter(run); - if (path == "") break; - res.insert(path); + for (auto & sub : getDefaultSubstituters()) { + if (!sub->wantMassQuery()) continue; + for (auto & path : paths) { + if (res.count(path)) continue; + debug(format("checking substituter ‘%s’ for path ‘%s’") + % sub->getUri() % path); + if (sub->isValidPath(path)) + res.insert(path); } } return res; } -void LocalStore::querySubstitutablePathInfos(const Path & substituter, - PathSet & paths, SubstitutablePathInfos & infos) -{ - RunningSubstituter & run(runningSubstituters[substituter]); - startSubstituter(substituter, run); - if (run.disabled) return; - - string s = "info "; - foreach (PathSet::const_iterator, i, paths) - if (infos.find(*i) == infos.end()) { s += *i; s += " "; } - writeLine(run.to, s); - - while (true) { - Path path = getLineFromSubstituter(run); - if (path == "") break; - if (paths.find(path) == paths.end()) - throw Error(format("got unexpected path ‘%1%’ from substituter") % path); - paths.erase(path); - SubstitutablePathInfo & info(infos[path]); - info.deriver = getLineFromSubstituter(run); - if (info.deriver != "") assertStorePath(info.deriver); - int nrRefs = getIntLineFromSubstituter<int>(run); - while (nrRefs--) { - Path p = getLineFromSubstituter(run); - assertStorePath(p); - info.references.insert(p); - } - info.downloadSize = getIntLineFromSubstituter<long long>(run); - info.narSize = getIntLineFromSubstituter<long long>(run); - } -} - - void LocalStore::querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) { - PathSet todo = paths; - foreach (Paths::iterator, i, settings.substituters) { - if (todo.empty()) break; - querySubstitutablePathInfos(*i, todo, infos); + for (auto & sub : getDefaultSubstituters()) { + for (auto & path : paths) { + if (infos.count(path)) continue; + debug(format("checking substituter ‘%s’ for path ‘%s’") + % sub->getUri() % path); + try { + auto info = sub->queryPathInfo(path); + auto narInfo = std::dynamic_pointer_cast<const NarInfo>( + std::shared_ptr<const ValidPathInfo>(info)); + infos[path] = SubstitutablePathInfo{ + info->deriver, + info->references, + narInfo ? narInfo->fileSize : 0, + info->narSize}; + } catch (InvalidPath) { + } + } } } -Hash LocalStore::queryPathHash(const Path & path) -{ - return queryPathInfo(path).hash; -} - - void LocalStore::registerValidPath(const ValidPathInfo & info) { ValidPathInfos infos; @@ -1295,69 +837,112 @@ void LocalStore::registerValidPath(const ValidPathInfo & info) void LocalStore::registerValidPaths(const ValidPathInfos & infos) { - /* SQLite will fsync by default, but the new valid paths may not be fsync-ed. - * So some may want to fsync them before registering the validity, at the - * expense of some speed of the path registering operation. */ + /* SQLite will fsync by default, but the new valid paths may not + be fsync-ed. So some may want to fsync them before registering + the validity, at the expense of some speed of the path + registering operation. */ if (settings.syncBeforeRegistering) sync(); - retry_sqlite { - SQLiteTxn txn(db); + return retrySQLite<void>([&]() { + auto state(_state.lock()); + + SQLiteTxn txn(state->db); PathSet paths; - foreach (ValidPathInfos::const_iterator, i, infos) { - assert(i->hash.type == htSHA256); - if (isValidPath_(i->path)) - updatePathInfo(*i); + for (auto & i : infos) { + assert(i.narHash.type == htSHA256); + if (isValidPath_(*state, i.path)) + updatePathInfo(*state, i); else - addValidPath(*i, false); - paths.insert(i->path); + addValidPath(*state, i, false); + paths.insert(i.path); } - foreach (ValidPathInfos::const_iterator, i, infos) { - unsigned long long referrer = queryValidPathId(i->path); - foreach (PathSet::iterator, j, i->references) - addReference(referrer, queryValidPathId(*j)); + for (auto & i : infos) { + auto referrer = queryValidPathId(*state, i.path); + for (auto & j : i.references) + state->stmtAddReference.use()(referrer)(queryValidPathId(*state, j)).exec(); } /* Check that the derivation outputs are correct. We can't do this in addValidPath() above, because the references might not be valid yet. */ - foreach (ValidPathInfos::const_iterator, i, infos) - if (isDerivation(i->path)) { + for (auto & i : infos) + if (isDerivation(i.path)) { // FIXME: inefficient; we already loaded the // derivation in addValidPath(). - Derivation drv = readDerivation(i->path); - checkDerivationOutputs(i->path, drv); + Derivation drv = readDerivation(i.path); + checkDerivationOutputs(i.path, drv); } /* Do a topological sort of the paths. This will throw an error if a cycle is detected and roll back the transaction. Cycles can only occur when a derivation has multiple outputs. */ - topoSortPaths(*this, paths); + topoSortPaths(paths); txn.commit(); - } end_retry_sqlite; + }); } /* Invalidate a path. The caller is responsible for checking that there are no referrers. */ -void LocalStore::invalidatePath(const Path & path) +void LocalStore::invalidatePath(State & state, const Path & path) { debug(format("invalidating path ‘%1%’") % path); - drvHashes.erase(path); + state.stmtInvalidatePath.use()(path).exec(); - SQLiteStmtUse use(stmtInvalidatePath); + /* Note that the foreign key constraints on the Refs table take + care of deleting the references entries for `path'. */ - stmtInvalidatePath.bind(path); + { + auto state_(Store::state.lock()); + state_->pathInfoCache.erase(storePathToHash(path)); + } +} - if (sqlite3_step(stmtInvalidatePath) != SQLITE_DONE) - throwSQLiteError(db, format("invalidating path ‘%1%’ in database") % path); - /* Note that the foreign key constraints on the Refs table take - care of deleting the references entries for `path'. */ +void LocalStore::addToStore(const ValidPathInfo & info, const std::string & nar, bool repair) +{ + Hash h = hashString(htSHA256, nar); + if (h != info.narHash) + throw Error(format("hash mismatch importing path ‘%s’; expected hash ‘%s’, got ‘%s’") % + info.path % info.narHash.to_string() % h.to_string()); + + if (requireSigs && !info.checkSignatures(publicKeys)) + throw Error(format("cannot import path ‘%s’ because it lacks a valid signature") % info.path); + + addTempRoot(info.path); + + if (repair || !isValidPath(info.path)) { + + PathLocks outputLock; + + /* Lock the output path. But don't lock if we're being called + from a build hook (whose parent process already acquired a + lock on this path). */ + Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS")); + if (find(locksHeld.begin(), locksHeld.end(), info.path) == locksHeld.end()) + outputLock.lockPaths({info.path}); + + if (repair || !isValidPath(info.path)) { + + deletePath(info.path); + + StringSource source(nar); + restorePath(info.path, source); + + canonicalisePathMetaData(info.path, -1); + + optimisePath(info.path); // FIXME: combine with hashPath() + + registerValidPath(info); + } + + outputLock.setDeletion(true); + } } @@ -1375,11 +960,11 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name, /* The first check above is an optimisation to prevent unnecessary lock acquisition. */ - PathLocks outputLock(singleton<PathSet, Path>(dstPath)); + PathLocks outputLock({dstPath}); if (repair || !isValidPath(dstPath)) { - if (pathExists(dstPath)) deletePath(dstPath); + deletePath(dstPath); if (recursive) { StringSource source(dump); @@ -1404,8 +989,9 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name, ValidPathInfo info; info.path = dstPath; - info.hash = hash.first; + info.narHash = hash.first; info.narSize = hash.second; + info.ultimate = true; registerValidPath(info); } @@ -1420,7 +1006,6 @@ Path LocalStore::addToStore(const string & name, const Path & _srcPath, bool recursive, HashType hashAlgo, PathFilter & filter, bool repair) { Path srcPath(absPath(_srcPath)); - debug(format("adding ‘%1%’ to the store") % srcPath); /* Read the whole path into memory. This is not a very scalable method for very large paths, but `copyPath' is mainly used for @@ -1429,9 +1014,9 @@ Path LocalStore::addToStore(const string & name, const Path & _srcPath, if (recursive) dumpPath(srcPath, sink, filter); else - sink.s = readFile(srcPath); + sink.s = make_ref<std::string>(readFile(srcPath)); - return addToStoreFromDump(sink.s, name, recursive, hashAlgo, repair); + return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair); } @@ -1444,25 +1029,28 @@ Path LocalStore::addTextToStore(const string & name, const string & s, if (repair || !isValidPath(dstPath)) { - PathLocks outputLock(singleton<PathSet, Path>(dstPath)); + PathLocks outputLock({dstPath}); if (repair || !isValidPath(dstPath)) { - if (pathExists(dstPath)) deletePath(dstPath); + deletePath(dstPath); writeFile(dstPath, s); canonicalisePathMetaData(dstPath, -1); - HashResult hash = hashPath(htSHA256, dstPath); + StringSink sink; + dumpString(s, sink); + auto hash = hashString(htSHA256, *sink.s); optimisePath(dstPath); ValidPathInfo info; info.path = dstPath; - info.hash = hash.first; - info.narSize = hash.second; + info.narHash = hash; + info.narSize = sink.s->size(); info.references = references; + info.ultimate = true; registerValidPath(info); } @@ -1473,119 +1061,6 @@ Path LocalStore::addTextToStore(const string & name, const string & s, } -struct HashAndWriteSink : Sink -{ - Sink & writeSink; - HashSink hashSink; - HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256) - { - } - virtual void operator () (const unsigned char * data, size_t len) - { - writeSink(data, len); - hashSink(data, len); - } - Hash currentHash() - { - return hashSink.currentHash().first; - } -}; - - -#define EXPORT_MAGIC 0x4558494e - - -static void checkSecrecy(const Path & path) -{ - struct stat st; - if (stat(path.c_str(), &st)) - throw SysError(format("getting status of ‘%1%’") % path); - if ((st.st_mode & (S_IRWXG | S_IRWXO)) != 0) - throw Error(format("file ‘%1%’ should be secret (inaccessible to everybody else)!") % path); -} - - -void LocalStore::exportPath(const Path & path, bool sign, - Sink & sink) -{ - assertStorePath(path); - - printMsg(lvlInfo, format("exporting path ‘%1%’") % path); - - if (!isValidPath(path)) - throw Error(format("path ‘%1%’ is not valid") % path); - - HashAndWriteSink hashAndWriteSink(sink); - - dumpPath(path, hashAndWriteSink); - - /* Refuse to export paths that have changed. This prevents - filesystem corruption from spreading to other machines. - Don't complain if the stored hash is zero (unknown). */ - Hash hash = hashAndWriteSink.currentHash(); - Hash storedHash = queryPathHash(path); - if (hash != storedHash && storedHash != Hash(storedHash.type)) - throw Error(format("hash of path ‘%1%’ has changed from ‘%2%’ to ‘%3%’!") % path - % printHash(storedHash) % printHash(hash)); - - writeInt(EXPORT_MAGIC, hashAndWriteSink); - - writeString(path, hashAndWriteSink); - - PathSet references; - queryReferences(path, references); - writeStrings(references, hashAndWriteSink); - - Path deriver = queryDeriver(path); - writeString(deriver, hashAndWriteSink); - - if (sign) { - Hash hash = hashAndWriteSink.currentHash(); - - writeInt(1, hashAndWriteSink); - - Path tmpDir = createTempDir(); - AutoDelete delTmp(tmpDir); - Path hashFile = tmpDir + "/hash"; - writeFile(hashFile, printHash(hash)); - - Path secretKey = settings.nixConfDir + "/signing-key.sec"; - checkSecrecy(secretKey); - - Strings args; - args.push_back("rsautl"); - args.push_back("-sign"); - args.push_back("-inkey"); - args.push_back(secretKey); - args.push_back("-in"); - args.push_back(hashFile); - string signature = runProgram(OPENSSL_PATH, true, args); - - writeString(signature, hashAndWriteSink); - - } else - writeInt(0, hashAndWriteSink); -} - - -struct HashAndReadSource : Source -{ - Source & readSource; - HashSink hashSink; - bool hashing; - HashAndReadSource(Source & readSource) : readSource(readSource), hashSink(htSHA256) - { - hashing = true; - } - size_t read(unsigned char * data, size_t len) - { - size_t n = readSource.read(data, len); - if (hashing) hashSink(data, n); - return n; - } -}; - - /* Create a temporary directory in the store that won't be garbage-collected. */ Path LocalStore::createTempDirInStore() @@ -1602,145 +1077,26 @@ Path LocalStore::createTempDirInStore() } -Path LocalStore::importPath(bool requireSignature, Source & source) -{ - HashAndReadSource hashAndReadSource(source); - - /* We don't yet know what store path this archive contains (the - store path follows the archive data proper), and besides, we - don't know yet whether the signature is valid. */ - Path tmpDir = createTempDirInStore(); - AutoDelete delTmp(tmpDir); - Path unpacked = tmpDir + "/unpacked"; - - restorePath(unpacked, hashAndReadSource); - - unsigned int magic = readInt(hashAndReadSource); - if (magic != EXPORT_MAGIC) - throw Error("Nix archive cannot be imported; wrong format"); - - Path dstPath = readStorePath(hashAndReadSource); - - PathSet references = readStorePaths<PathSet>(hashAndReadSource); - - Path deriver = readString(hashAndReadSource); - if (deriver != "") assertStorePath(deriver); - - Hash hash = hashAndReadSource.hashSink.finish().first; - hashAndReadSource.hashing = false; - - bool haveSignature = readInt(hashAndReadSource) == 1; - - if (requireSignature && !haveSignature) - throw Error(format("imported archive of ‘%1%’ lacks a signature") % dstPath); - - if (haveSignature) { - string signature = readString(hashAndReadSource); - - if (requireSignature) { - Path sigFile = tmpDir + "/sig"; - writeFile(sigFile, signature); - - Strings args; - args.push_back("rsautl"); - args.push_back("-verify"); - args.push_back("-inkey"); - args.push_back(settings.nixConfDir + "/signing-key.pub"); - args.push_back("-pubin"); - args.push_back("-in"); - args.push_back(sigFile); - string hash2 = runProgram(OPENSSL_PATH, true, args); - - /* Note: runProgram() throws an exception if the signature - is invalid. */ - - if (printHash(hash) != hash2) - throw Error( - "signed hash doesn't match actual contents of imported " - "archive; archive could be corrupt, or someone is trying " - "to import a Trojan horse"); - } - } - - /* Do the actual import. */ - - /* !!! way too much code duplication with addTextToStore() etc. */ - addTempRoot(dstPath); - - if (!isValidPath(dstPath)) { - - PathLocks outputLock; - - /* Lock the output path. But don't lock if we're being called - from a build hook (whose parent process already acquired a - lock on this path). */ - Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS")); - if (find(locksHeld.begin(), locksHeld.end(), dstPath) == locksHeld.end()) - outputLock.lockPaths(singleton<PathSet, Path>(dstPath)); - - if (!isValidPath(dstPath)) { - - if (pathExists(dstPath)) deletePath(dstPath); - - if (rename(unpacked.c_str(), dstPath.c_str()) == -1) - throw SysError(format("cannot move ‘%1%’ to ‘%2%’") - % unpacked % dstPath); - - canonicalisePathMetaData(dstPath, -1); - - /* !!! if we were clever, we could prevent the hashPath() - here. */ - HashResult hash = hashPath(htSHA256, dstPath); - - optimisePath(dstPath); // FIXME: combine with hashPath() - - ValidPathInfo info; - info.path = dstPath; - info.hash = hash.first; - info.narSize = hash.second; - info.references = references; - info.deriver = deriver != "" && isValidPath(deriver) ? deriver : ""; - registerValidPath(info); - } - - outputLock.setDeletion(true); - } - - return dstPath; -} - - -Paths LocalStore::importPaths(bool requireSignature, Source & source) -{ - Paths res; - while (true) { - unsigned long long n = readLongLong(source); - if (n == 0) break; - if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’"); - res.push_back(importPath(requireSignature, source)); - } - return res; -} - - void LocalStore::invalidatePathChecked(const Path & path) { assertStorePath(path); - retry_sqlite { - SQLiteTxn txn(db); + retrySQLite<void>([&]() { + auto state(_state.lock()); + + SQLiteTxn txn(state->db); - if (isValidPath_(path)) { - PathSet referrers; queryReferrers_(path, referrers); + if (isValidPath_(*state, path)) { + PathSet referrers; queryReferrers(*state, path, referrers); referrers.erase(path); /* ignore self-references */ if (!referrers.empty()) throw PathInUse(format("cannot delete path ‘%1%’ because it is in use by %2%") % path % showPaths(referrers)); - invalidatePath(path); + invalidatePath(*state, path); } txn.commit(); - } end_retry_sqlite; + }); } @@ -1761,8 +1117,8 @@ bool LocalStore::verifyStore(bool checkContents, bool repair) PathSet validPaths2 = queryAllValidPaths(), validPaths, done; - foreach (PathSet::iterator, i, validPaths2) - verifyPath(*i, store, done, validPaths, repair, errors); + for (auto & i : validPaths2) + verifyPath(i, store, done, validPaths, repair, errors); /* Release the GC lock so that checking content hashes (which can take ages) doesn't block the GC or builds. */ @@ -1774,45 +1130,48 @@ bool LocalStore::verifyStore(bool checkContents, bool repair) Hash nullHash(htSHA256); - foreach (PathSet::iterator, i, validPaths) { + for (auto & i : validPaths) { try { - ValidPathInfo info = queryPathInfo(*i); + auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(i))); /* Check the content hash (optionally - slow). */ - printMsg(lvlTalkative, format("checking contents of ‘%1%’") % *i); - HashResult current = hashPath(info.hash.type, *i); + printMsg(lvlTalkative, format("checking contents of ‘%1%’") % i); + HashResult current = hashPath(info->narHash.type, i); - if (info.hash != nullHash && info.hash != current.first) { + if (info->narHash != nullHash && info->narHash != current.first) { printMsg(lvlError, format("path ‘%1%’ was modified! " "expected hash ‘%2%’, got ‘%3%’") - % *i % printHash(info.hash) % printHash(current.first)); - if (repair) repairPath(*i); else errors = true; + % i % printHash(info->narHash) % printHash(current.first)); + if (repair) repairPath(i); else errors = true; } else { bool update = false; /* Fill in missing hashes. */ - if (info.hash == nullHash) { - printMsg(lvlError, format("fixing missing hash on ‘%1%’") % *i); - info.hash = current.first; + if (info->narHash == nullHash) { + printMsg(lvlError, format("fixing missing hash on ‘%1%’") % i); + info->narHash = current.first; update = true; } /* Fill in missing narSize fields (from old stores). */ - if (info.narSize == 0) { - printMsg(lvlError, format("updating size field on ‘%1%’ to %2%") % *i % current.second); - info.narSize = current.second; + if (info->narSize == 0) { + printMsg(lvlError, format("updating size field on ‘%1%’ to %2%") % i % current.second); + info->narSize = current.second; update = true; } - if (update) updatePathInfo(info); + if (update) { + auto state(_state.lock()); + updatePathInfo(*state, *info); + } } } catch (Error & e) { /* It's possible that the path got GC'ed, so ignore errors on invalid paths. */ - if (isValidPath(*i)) + if (isValidPath(i)) printMsg(lvlError, format("error: %1%") % e.msg()); else printMsg(lvlError, format("warning: %1%") % e.msg()); @@ -1835,7 +1194,8 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, if (!isStorePath(path)) { printMsg(lvlError, format("path ‘%1%’ is not in the Nix store") % path); - invalidatePath(path); + auto state(_state.lock()); + invalidatePath(*state, path); return; } @@ -1844,16 +1204,17 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, first, then we can invalidate this path as well. */ bool canInvalidate = true; PathSet referrers; queryReferrers(path, referrers); - foreach (PathSet::iterator, i, referrers) - if (*i != path) { - verifyPath(*i, store, done, validPaths, repair, errors); - if (validPaths.find(*i) != validPaths.end()) + for (auto & i : referrers) + if (i != path) { + verifyPath(i, store, done, validPaths, repair, errors); + if (validPaths.find(i) != validPaths.end()) canInvalidate = false; } if (canInvalidate) { printMsg(lvlError, format("path ‘%1%’ disappeared, removing from database...") % path); - invalidatePath(path); + auto state(_state.lock()); + invalidatePath(*state, path); } else { printMsg(lvlError, format("path ‘%1%’ disappeared, but it still has valid referrers!") % path); if (repair) @@ -1873,114 +1234,6 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, } -bool LocalStore::pathContentsGood(const Path & path) -{ - std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path); - if (i != pathContentsGoodCache.end()) return i->second; - printMsg(lvlInfo, format("checking path ‘%1%’...") % path); - ValidPathInfo info = queryPathInfo(path); - bool res; - if (!pathExists(path)) - res = false; - else { - HashResult current = hashPath(info.hash.type, path); - Hash nullHash(htSHA256); - res = info.hash == nullHash || info.hash == current.first; - } - pathContentsGoodCache[path] = res; - if (!res) printMsg(lvlError, format("path ‘%1%’ is corrupted or missing!") % path); - return res; -} - - -void LocalStore::markContentsGood(const Path & path) -{ - pathContentsGoodCache[path] = true; -} - - -/* Functions for upgrading from the pre-SQLite database. */ - -PathSet LocalStore::queryValidPathsOld() -{ - PathSet paths; - for (auto & i : readDirectory(settings.nixDBPath + "/info")) - if (i.name.at(0) != '.') paths.insert(settings.nixStore + "/" + i.name); - return paths; -} - - -ValidPathInfo LocalStore::queryPathInfoOld(const Path & path) -{ - ValidPathInfo res; - res.path = path; - - /* Read the info file. */ - string baseName = baseNameOf(path); - Path infoFile = (format("%1%/info/%2%") % settings.nixDBPath % baseName).str(); - if (!pathExists(infoFile)) - throw Error(format("path ‘%1%’ is not valid") % path); - string info = readFile(infoFile); - - /* Parse it. */ - Strings lines = tokenizeString<Strings>(info, "\n"); - - foreach (Strings::iterator, i, lines) { - string::size_type p = i->find(':'); - if (p == string::npos) - throw Error(format("corrupt line in ‘%1%’: %2%") % infoFile % *i); - string name(*i, 0, p); - string value(*i, p + 2); - if (name == "References") { - Strings refs = tokenizeString<Strings>(value, " "); - res.references = PathSet(refs.begin(), refs.end()); - } else if (name == "Deriver") { - res.deriver = value; - } else if (name == "Hash") { - res.hash = parseHashField(path, value); - } else if (name == "Registered-At") { - int n = 0; - string2Int(value, n); - res.registrationTime = n; - } - } - - return res; -} - - -/* Upgrade from schema 5 (Nix 0.12) to schema 6 (Nix >= 0.15). */ -void LocalStore::upgradeStore6() -{ - printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)..."); - - openDB(true); - - PathSet validPaths = queryValidPathsOld(); - - SQLiteTxn txn(db); - - foreach (PathSet::iterator, i, validPaths) { - addValidPath(queryPathInfoOld(*i), false); - std::cerr << "."; - } - - std::cerr << "|"; - - foreach (PathSet::iterator, i, validPaths) { - ValidPathInfo info = queryPathInfoOld(*i); - unsigned long long referrer = queryValidPathId(*i); - foreach (PathSet::iterator, j, info.references) - addReference(referrer, queryValidPathId(*j)); - std::cerr << "."; - } - - std::cerr << "\n"; - - txn.commit(); -} - - #if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL) static void makeMutable(const Path & path) @@ -2035,8 +1288,41 @@ void LocalStore::upgradeStore7() void LocalStore::vacuumDB() { - if (sqlite3_exec(db, "vacuum;", 0, 0, 0) != SQLITE_OK) - throwSQLiteError(db, "vacuuming SQLite database"); + auto state(_state.lock()); + + if (sqlite3_exec(state->db, "vacuum;", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "vacuuming SQLite database"); +} + + +void LocalStore::addSignatures(const Path & storePath, const StringSet & sigs) +{ + retrySQLite<void>([&]() { + auto state(_state.lock()); + + SQLiteTxn txn(state->db); + + auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(storePath))); + + info->sigs.insert(sigs.begin(), sigs.end()); + + updatePathInfo(*state, *info); + + txn.commit(); + }); +} + + +void LocalStore::signPathInfo(ValidPathInfo & info) +{ + // FIXME: keep secret keys in memory. + + auto secretKeyFiles = settings.get("secret-key-files", Strings()); + + for (auto & secretKeyFile : secretKeyFiles) { + SecretKey secretKey(readFile(secretKeyFile)); + info.sign(secretKey); + } } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 819f59327a23..2a3f452bc5c7 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -1,15 +1,14 @@ #pragma once -#include <string> -#include <unordered_set> +#include "sqlite.hh" +#include "pathlocks.hh" #include "store-api.hh" +#include "sync.hh" #include "util.hh" -#include "pathlocks.hh" - -class sqlite3; -class sqlite3_stmt; +#include <string> +#include <unordered_set> namespace nix { @@ -18,8 +17,8 @@ namespace nix { /* Nix store and database schema version. Version 1 (or 0) was Nix <= 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is - Nix 1.0. Version 7 is Nix 1.3. */ -const int nixSchemaVersion = 7; + Nix 1.0. Version 7 is Nix 1.3. Version 9 is 1.12. */ +const int nixSchemaVersion = 9; extern string drvsLogDir; @@ -41,98 +40,88 @@ struct OptimiseStats }; -struct RunningSubstituter -{ - Path program; - Pid pid; - AutoCloseFD to, from, error; - FdSource fromBuf; - bool disabled; - RunningSubstituter() : disabled(false) { }; -}; - - -/* Wrapper object to close the SQLite database automatically. */ -struct SQLite -{ - sqlite3 * db; - SQLite() { db = 0; } - ~SQLite(); - operator sqlite3 * () { return db; } -}; - - -/* Wrapper object to create and destroy SQLite prepared statements. */ -struct SQLiteStmt -{ - sqlite3 * db; - sqlite3_stmt * stmt; - unsigned int curArg; - SQLiteStmt() { stmt = 0; } - void create(sqlite3 * db, const string & s); - void reset(); - ~SQLiteStmt(); - operator sqlite3_stmt * () { return stmt; } - void bind(const string & value); - void bind(int value); - void bind64(long long value); - void bind(); -}; - - -class LocalStore : public StoreAPI +class LocalStore : public LocalFSStore { private: - typedef std::map<Path, RunningSubstituter> RunningSubstituters; - RunningSubstituters runningSubstituters; - Path linksDir; + /* Lock file used for upgrading. */ + AutoCloseFD globalLock; + + struct State + { + /* The SQLite database object. */ + SQLite db; + + /* Some precompiled SQLite statements. */ + SQLiteStmt stmtRegisterValidPath; + SQLiteStmt stmtUpdatePathInfo; + SQLiteStmt stmtAddReference; + SQLiteStmt stmtQueryPathInfo; + SQLiteStmt stmtQueryReferences; + SQLiteStmt stmtQueryReferrers; + SQLiteStmt stmtInvalidatePath; + SQLiteStmt stmtAddDerivationOutput; + SQLiteStmt stmtQueryValidDerivers; + SQLiteStmt stmtQueryDerivationOutputs; + SQLiteStmt stmtQueryPathFromHashPart; + SQLiteStmt stmtQueryValidPaths; + + /* The file to which we write our temporary roots. */ + Path fnTempRoots; + AutoCloseFD fdTempRoots; + }; + + Sync<State, std::recursive_mutex> _state; + + const Path linksDir; + const Path reservedPath; + const Path schemaPath; + + bool requireSigs; + + PublicKeys publicKeys; public: /* Initialise the local store, upgrading the schema if necessary. */ - LocalStore(bool reserveSpace = true); + LocalStore(); ~LocalStore(); /* Implementations of abstract store API methods. */ - bool isValidPath(const Path & path); - - PathSet queryValidPaths(const PathSet & paths); + std::string getUri() override; - PathSet queryAllValidPaths(); + bool isValidPathUncached(const Path & path) override; - ValidPathInfo queryPathInfo(const Path & path); + PathSet queryValidPaths(const PathSet & paths) override; - Hash queryPathHash(const Path & path); + PathSet queryAllValidPaths() override; - void queryReferences(const Path & path, PathSet & references); + std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) override; - void queryReferrers(const Path & path, PathSet & referrers); + void queryReferrers(const Path & path, PathSet & referrers) override; - Path queryDeriver(const Path & path); + PathSet queryValidDerivers(const Path & path) override; - PathSet queryValidDerivers(const Path & path); + PathSet queryDerivationOutputs(const Path & path) override; - PathSet queryDerivationOutputs(const Path & path); + StringSet queryDerivationOutputNames(const Path & path) override; - StringSet queryDerivationOutputNames(const Path & path); + Path queryPathFromHashPart(const string & hashPart) override; - Path queryPathFromHashPart(const string & hashPart); - - PathSet querySubstitutablePaths(const PathSet & paths); - - void querySubstitutablePathInfos(const Path & substituter, - PathSet & paths, SubstitutablePathInfos & infos); + PathSet querySubstitutablePaths(const PathSet & paths) override; void querySubstitutablePathInfos(const PathSet & paths, - SubstitutablePathInfos & infos); + SubstitutablePathInfos & infos) override; + + void addToStore(const ValidPathInfo & info, const std::string & nar, + bool repair) override; Path addToStore(const string & name, const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter, bool repair = false); + PathFilter & filter = defaultPathFilter, bool repair = false) override; /* Like addToStore(), but the contents of the path are contained in `dump', which is either a NAR serialisation (if recursive == @@ -142,40 +131,35 @@ public: bool recursive = true, HashType hashAlgo = htSHA256, bool repair = false); Path addTextToStore(const string & name, const string & s, - const PathSet & references, bool repair = false); + const PathSet & references, bool repair = false) override; - void exportPath(const Path & path, bool sign, - Sink & sink); + void buildPaths(const PathSet & paths, BuildMode buildMode) override; - Paths importPaths(bool requireSignature, Source & source); + BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode) override; - void buildPaths(const PathSet & paths, BuildMode buildMode); + void ensurePath(const Path & path) override; - void ensurePath(const Path & path); + void addTempRoot(const Path & path) override; - void addTempRoot(const Path & path); + void addIndirectRoot(const Path & path) override; - void addIndirectRoot(const Path & path); + void syncWithGC() override; - void syncWithGC(); + Roots findRoots() override; - Roots findRoots(); - - void collectGarbage(const GCOptions & options, GCResults & results); + void collectGarbage(const GCOptions & options, GCResults & results) override; /* Optimise the disk space usage of the Nix store by hard-linking files with the same contents. */ void optimiseStore(OptimiseStats & stats); - /* Generic variant of the above method. */ - void optimiseStore(); + void optimiseStore() override; /* Optimise a single store path. */ void optimisePath(const Path & path); - /* Check the integrity of the Nix store. Returns true if errors - remain. */ - bool verifyStore(bool checkContents, bool repair); + bool verifyStore(bool checkContents, bool repair) override; /* Register the validity of a path, i.e., that `path' exists, that the paths referenced by it exists, and in the case of an output @@ -187,84 +171,29 @@ public: void registerValidPaths(const ValidPathInfos & infos); - /* Register that the build of a derivation with output `path' has - failed. */ - void registerFailedPath(const Path & path); - - /* Query whether `path' previously failed to build. */ - bool hasPathFailed(const Path & path); - - PathSet queryFailedPaths(); - - void clearFailedPaths(const PathSet & paths); - void vacuumDB(); /* Repair the contents of the given path by redownloading it using a substituter (if available). */ void repairPath(const Path & path); - /* Check whether the given valid path exists and has the right - contents. */ - bool pathContentsGood(const Path & path); - - void markContentsGood(const Path & path); + void addSignatures(const Path & storePath, const StringSet & sigs) override; - void setSubstituterEnv(); + static bool haveWriteAccess(); private: - Path schemaPath; - - /* Lock file used for upgrading. */ - AutoCloseFD globalLock; - - /* The SQLite database object. */ - SQLite db; - - /* Some precompiled SQLite statements. */ - SQLiteStmt stmtRegisterValidPath; - SQLiteStmt stmtUpdatePathInfo; - SQLiteStmt stmtAddReference; - SQLiteStmt stmtQueryPathInfo; - SQLiteStmt stmtQueryReferences; - SQLiteStmt stmtQueryReferrers; - SQLiteStmt stmtInvalidatePath; - SQLiteStmt stmtRegisterFailedPath; - SQLiteStmt stmtHasPathFailed; - SQLiteStmt stmtQueryFailedPaths; - SQLiteStmt stmtClearFailedPath; - SQLiteStmt stmtAddDerivationOutput; - SQLiteStmt stmtQueryValidDerivers; - SQLiteStmt stmtQueryDerivationOutputs; - SQLiteStmt stmtQueryPathFromHashPart; - - /* Cache for pathContentsGood(). */ - std::map<Path, bool> pathContentsGoodCache; - - bool didSetSubstituterEnv; - - /* The file to which we write our temporary roots. */ - Path fnTempRoots; - AutoCloseFD fdTempRoots; - int getSchema(); - void openDB(bool create); + void openDB(State & state, bool create); void makeStoreWritable(); - unsigned long long queryValidPathId(const Path & path); - - unsigned long long addValidPath(const ValidPathInfo & info, bool checkOutputs = true); + uint64_t queryValidPathId(State & state, const Path & path); - void addReference(unsigned long long referrer, unsigned long long reference); + uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true); - void appendReferrer(const Path & from, const Path & to, bool lock); - - void rewriteReferrers(const Path & path, bool purge, PathSet referrers); - - void invalidatePath(const Path & path); + void invalidatePath(State & state, const Path & path); /* Delete a path from the Nix store. */ void invalidatePathChecked(const Path & path); @@ -272,7 +201,7 @@ private: void verifyPath(const Path & path, const PathSet & store, PathSet & done, PathSet & validPaths, bool repair, bool & errors); - void updatePathInfo(const ValidPathInfo & info); + void updatePathInfo(State & state, const ValidPathInfo & info); void upgradeStore6(); void upgradeStore7(); @@ -294,19 +223,14 @@ private: int openGCLock(LockType lockType); - void removeUnusedLinks(const GCState & state); + void findRoots(const Path & path, unsigned char type, Roots & roots); - void startSubstituter(const Path & substituter, - RunningSubstituter & runningSubstituter); + void findRuntimeRoots(PathSet & roots); - string getLineFromSubstituter(RunningSubstituter & run); - - template<class T> T getIntLineFromSubstituter(RunningSubstituter & run); + void removeUnusedLinks(const GCState & state); Path createTempDirInStore(); - Path importPath(bool requireSignature, Source & source); - void checkDerivationOutputs(const Path & drvPath, const Derivation & drv); typedef std::unordered_set<ino_t> InodeHash; @@ -316,8 +240,15 @@ private: void optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash); // Internal versions that are not wrapped in retry_sqlite. - bool isValidPath_(const Path & path); - void queryReferrers_(const Path & path, PathSet & referrers); + bool isValidPath_(State & state, const Path & path); + void queryReferrers(State & state, const Path & path, PathSet & referrers); + + /* Add signatures to a ValidPathInfo using the secret keys + specified by the ‘secret-key-files’ option. */ + void signPathInfo(ValidPathInfo & info); + + friend class DerivationGoal; + friend class SubstitutionGoal; }; diff --git a/src/libstore/local.mk b/src/libstore/local.mk index 771c06753a65..9d5c04dca0c5 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -8,7 +8,11 @@ libstore_SOURCES := $(wildcard $(d)/*.cc) libstore_LIBS = libutil libformat -libstore_LDFLAGS = -lsqlite3 -lbz2 +libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread + +ifeq ($(ENABLE_S3), 1) + libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core +endif ifeq ($(OS), SunOS) libstore_LDFLAGS += -lsocket @@ -33,3 +37,4 @@ $(d)/local-store.cc: $(d)/schema.sql.hh clean-files += $(d)/schema.sql.hh $(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644)) +$(eval $(call install-file-in, $(d)/sandbox-defaults.sb, $(datadir)/nix, 0644)) diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index 736434ca4895..5c284d1b9ab2 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -1,21 +1,21 @@ -#include "misc.hh" -#include "store-api.hh" -#include "local-store.hh" +#include "derivations.hh" #include "globals.hh" +#include "local-store.hh" +#include "store-api.hh" namespace nix { -Derivation derivationFromPath(StoreAPI & store, const Path & drvPath) +Derivation Store::derivationFromPath(const Path & drvPath) { assertStorePath(drvPath); - store.ensurePath(drvPath); + ensurePath(drvPath); return readDerivation(drvPath); } -void computeFSClosure(StoreAPI & store, const Path & path, +void Store::computeFSClosure(const Path & path, PathSet & paths, bool flipDirection, bool includeOutputs, bool includeDerivers) { if (paths.find(path) != paths.end()) return; @@ -24,50 +24,41 @@ void computeFSClosure(StoreAPI & store, const Path & path, PathSet edges; if (flipDirection) { - store.queryReferrers(path, edges); + queryReferrers(path, edges); if (includeOutputs) { - PathSet derivers = store.queryValidDerivers(path); - foreach (PathSet::iterator, i, derivers) - edges.insert(*i); + PathSet derivers = queryValidDerivers(path); + for (auto & i : derivers) + edges.insert(i); } if (includeDerivers && isDerivation(path)) { - PathSet outputs = store.queryDerivationOutputs(path); - foreach (PathSet::iterator, i, outputs) - if (store.isValidPath(*i) && store.queryDeriver(*i) == path) - edges.insert(*i); + PathSet outputs = queryDerivationOutputs(path); + for (auto & i : outputs) + if (isValidPath(i) && queryPathInfo(i)->deriver == path) + edges.insert(i); } } else { - store.queryReferences(path, edges); + auto info = queryPathInfo(path); + edges = info->references; if (includeOutputs && isDerivation(path)) { - PathSet outputs = store.queryDerivationOutputs(path); - foreach (PathSet::iterator, i, outputs) - if (store.isValidPath(*i)) edges.insert(*i); + PathSet outputs = queryDerivationOutputs(path); + for (auto & i : outputs) + if (isValidPath(i)) edges.insert(i); } - if (includeDerivers) { - Path deriver = store.queryDeriver(path); - if (store.isValidPath(deriver)) edges.insert(deriver); - } + if (includeDerivers && isValidPath(info->deriver)) + edges.insert(info->deriver); } - foreach (PathSet::iterator, i, edges) - computeFSClosure(store, *i, paths, flipDirection, includeOutputs, includeDerivers); -} - - -Path findOutput(const Derivation & drv, string id) -{ - foreach (DerivationOutputs::const_iterator, i, drv.outputs) - if (i->first == id) return i->second.path; - throw Error(format("derivation has no output ‘%1%’") % id); + for (auto & i : edges) + computeFSClosure(i, paths, flipDirection, includeOutputs, includeDerivers); } -void queryMissing(StoreAPI & store, const PathSet & targets, +void Store::queryMissing(const PathSet & targets, PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown, unsigned long long & downloadSize, unsigned long long & narSize) { @@ -98,60 +89,60 @@ void queryMissing(StoreAPI & store, const PathSet & targets, PathSet query, todoDrv, todoNonDrv; - foreach (PathSet::iterator, i, todo) { - if (done.find(*i) != done.end()) continue; - done.insert(*i); + for (auto & i : todo) { + if (done.find(i) != done.end()) continue; + done.insert(i); - DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i); + DrvPathWithOutputs i2 = parseDrvPathWithOutputs(i); if (isDerivation(i2.first)) { - if (!store.isValidPath(i2.first)) { + if (!isValidPath(i2.first)) { // FIXME: we could try to substitute p. - unknown.insert(*i); + unknown.insert(i); continue; } - Derivation drv = derivationFromPath(store, i2.first); + Derivation drv = derivationFromPath(i2.first); PathSet invalid; - foreach (DerivationOutputs::iterator, j, drv.outputs) - if (wantOutput(j->first, i2.second) - && !store.isValidPath(j->second.path)) - invalid.insert(j->second.path); + for (auto & j : drv.outputs) + if (wantOutput(j.first, i2.second) + && !isValidPath(j.second.path)) + invalid.insert(j.second.path); if (invalid.empty()) continue; - todoDrv.insert(*i); - if (settings.useSubstitutes && substitutesAllowed(drv)) + todoDrv.insert(i); + if (settings.useSubstitutes && drv.substitutesAllowed()) query.insert(invalid.begin(), invalid.end()); } else { - if (store.isValidPath(*i)) continue; - query.insert(*i); - todoNonDrv.insert(*i); + if (isValidPath(i)) continue; + query.insert(i); + todoNonDrv.insert(i); } } todo.clear(); SubstitutablePathInfos infos; - store.querySubstitutablePathInfos(query, infos); + querySubstitutablePathInfos(query, infos); - foreach (PathSet::iterator, i, todoDrv) { - DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i); + for (auto & i : todoDrv) { + DrvPathWithOutputs i2 = parseDrvPathWithOutputs(i); // FIXME: cache this - Derivation drv = derivationFromPath(store, i2.first); + Derivation drv = derivationFromPath(i2.first); PathSet outputs; bool mustBuild = false; - if (settings.useSubstitutes && substitutesAllowed(drv)) { - foreach (DerivationOutputs::iterator, j, drv.outputs) { - if (!wantOutput(j->first, i2.second)) continue; - if (!store.isValidPath(j->second.path)) { - if (infos.find(j->second.path) == infos.end()) + if (settings.useSubstitutes && drv.substitutesAllowed()) { + for (auto & j : drv.outputs) { + if (!wantOutput(j.first, i2.second)) continue; + if (!isValidPath(j.second.path)) { + if (infos.find(j.second.path) == infos.end()) mustBuild = true; else - outputs.insert(j->second.path); + outputs.insert(j.second.path); } } } else @@ -160,59 +151,61 @@ void queryMissing(StoreAPI & store, const PathSet & targets, if (mustBuild) { willBuild.insert(i2.first); todo.insert(drv.inputSrcs.begin(), drv.inputSrcs.end()); - foreach (DerivationInputs::iterator, j, drv.inputDrvs) - todo.insert(makeDrvPathWithOutputs(j->first, j->second)); + for (auto & j : drv.inputDrvs) + todo.insert(makeDrvPathWithOutputs(j.first, j.second)); } else todoNonDrv.insert(outputs.begin(), outputs.end()); } - foreach (PathSet::iterator, i, todoNonDrv) { - done.insert(*i); - SubstitutablePathInfos::iterator info = infos.find(*i); + for (auto & i : todoNonDrv) { + done.insert(i); + SubstitutablePathInfos::iterator info = infos.find(i); if (info != infos.end()) { - willSubstitute.insert(*i); + willSubstitute.insert(i); downloadSize += info->second.downloadSize; narSize += info->second.narSize; todo.insert(info->second.references.begin(), info->second.references.end()); } else - unknown.insert(*i); + unknown.insert(i); } } } -static void dfsVisit(StoreAPI & store, const PathSet & paths, - const Path & path, PathSet & visited, Paths & sorted, - PathSet & parents) +Paths Store::topoSortPaths(const PathSet & paths) { - if (parents.find(path) != parents.end()) - throw BuildError(format("cycle detected in the references of ‘%1%’") % path); + Paths sorted; + PathSet visited, parents; - if (visited.find(path) != visited.end()) return; - visited.insert(path); - parents.insert(path); + std::function<void(const Path & path)> dfsVisit; - PathSet references; - if (store.isValidPath(path)) - store.queryReferences(path, references); + dfsVisit = [&](const Path & path) { + if (parents.find(path) != parents.end()) + throw BuildError(format("cycle detected in the references of ‘%1%’") % path); - foreach (PathSet::iterator, i, references) - /* Don't traverse into paths that don't exist. That can - happen due to substitutes for non-existent paths. */ - if (*i != path && paths.find(*i) != paths.end()) - dfsVisit(store, paths, *i, visited, sorted, parents); + if (visited.find(path) != visited.end()) return; + visited.insert(path); + parents.insert(path); - sorted.push_front(path); - parents.erase(path); -} + PathSet references; + try { + references = queryPathInfo(path)->references; + } catch (InvalidPath &) { + } + for (auto & i : references) + /* Don't traverse into paths that don't exist. That can + happen due to substitutes for non-existent paths. */ + if (i != path && paths.find(i) != paths.end()) + dfsVisit(i); + + sorted.push_front(path); + parents.erase(path); + }; + + for (auto & i : paths) + dfsVisit(i); -Paths topoSortPaths(StoreAPI & store, const PathSet & paths) -{ - Paths sorted; - PathSet visited, parents; - foreach (PathSet::const_iterator, i, paths) - dfsVisit(store, paths, *i, visited, sorted, parents); return sorted; } diff --git a/src/libstore/misc.hh b/src/libstore/misc.hh deleted file mode 100644 index d3e31d51f72c..000000000000 --- a/src/libstore/misc.hh +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -#include "derivations.hh" - - -namespace nix { - - -/* Read a derivation, after ensuring its existence through - ensurePath(). */ -Derivation derivationFromPath(StoreAPI & store, const Path & drvPath); - -/* Place in `paths' the set of all store paths in the file system - closure of `storePath'; that is, all paths than can be directly or - indirectly reached from it. `paths' is not cleared. If - `flipDirection' is true, the set of paths that can reach - `storePath' is returned; that is, the closures under the - `referrers' relation instead of the `references' relation is - returned. */ -void computeFSClosure(StoreAPI & store, const Path & path, - PathSet & paths, bool flipDirection = false, - bool includeOutputs = false, bool includeDerivers = false); - -/* Return the path corresponding to the output identifier `id' in the - given derivation. */ -Path findOutput(const Derivation & drv, string id); - -/* Given a set of paths that are to be built, return the set of - derivations that will be built, and the set of output paths that - will be substituted. */ -void queryMissing(StoreAPI & store, const PathSet & targets, - PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown, - unsigned long long & downloadSize, unsigned long long & narSize); - -bool willBuildLocally(const Derivation & drv); - -bool substitutesAllowed(const Derivation & drv); - - -} diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc new file mode 100644 index 000000000000..8896862be149 --- /dev/null +++ b/src/libstore/nar-accessor.cc @@ -0,0 +1,141 @@ +#include "nar-accessor.hh" +#include "archive.hh" + +#include <map> + +namespace nix { + +struct NarMember +{ + FSAccessor::Type type; + + bool isExecutable; + + /* If this is a regular file, position of the contents of this + file in the NAR. */ + size_t start, size; + + std::string target; +}; + +struct NarIndexer : ParseSink, StringSource +{ + // FIXME: should store this as a tree. Now we're vulnerable to + // O(nm) memory consumption (e.g. for x_0/.../x_n/{y_0..y_m}). + typedef std::map<Path, NarMember> Members; + Members members; + + Path currentPath; + std::string currentStart; + bool isExec; + + NarIndexer(const std::string & nar) : StringSource(nar) + { + } + + void createDirectory(const Path & path) override + { + members.emplace(path, + NarMember{FSAccessor::Type::tDirectory, false, 0, 0}); + } + + void createRegularFile(const Path & path) override + { + currentPath = path; + } + + void isExecutable() override + { + isExec = true; + } + + void preallocateContents(unsigned long long size) override + { + currentStart = string(s, pos, 16); + members.emplace(currentPath, + NarMember{FSAccessor::Type::tRegular, isExec, pos, size}); + } + + void receiveContents(unsigned char * data, unsigned int len) override + { + // Sanity check + if (!currentStart.empty()) { + assert(len < 16 || currentStart == string((char *) data, 16)); + currentStart.clear(); + } + } + + void createSymlink(const Path & path, const string & target) override + { + members.emplace(path, + NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target}); + } + + Members::iterator find(const Path & path) + { + auto i = members.find(path); + if (i == members.end()) + throw Error(format("NAR file does not contain path ‘%1%’") % path); + return i; + } +}; + +struct NarAccessor : public FSAccessor +{ + ref<const std::string> nar; + NarIndexer indexer; + + NarAccessor(ref<const std::string> nar) : nar(nar), indexer(*nar) + { + parseDump(indexer, indexer); + } + + Stat stat(const Path & path) override + { + auto i = indexer.members.find(path); + if (i == indexer.members.end()) + return {FSAccessor::Type::tMissing, 0, false}; + return {i->second.type, i->second.size, i->second.isExecutable}; + } + + StringSet readDirectory(const Path & path) override + { + auto i = indexer.find(path); + + if (i->second.type != FSAccessor::Type::tDirectory) + throw Error(format("path ‘%1%’ inside NAR file is not a directory") % path); + + ++i; + StringSet res; + while (i != indexer.members.end() && isInDir(i->first, path)) { + // FIXME: really bad performance. + if (i->first.find('/', path.size() + 1) == std::string::npos) + res.insert(std::string(i->first, path.size() + 1)); + ++i; + } + return res; + } + + std::string readFile(const Path & path) override + { + auto i = indexer.find(path); + if (i->second.type != FSAccessor::Type::tRegular) + throw Error(format("path ‘%1%’ inside NAR file is not a regular file") % path); + return std::string(*nar, i->second.start, i->second.size); + } + + std::string readLink(const Path & path) override + { + auto i = indexer.find(path); + if (i->second.type != FSAccessor::Type::tSymlink) + throw Error(format("path ‘%1%’ inside NAR file is not a symlink") % path); + return i->second.target; + } +}; + +ref<FSAccessor> makeNarAccessor(ref<const std::string> nar) +{ + return make_ref<NarAccessor>(nar); +} + +} diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh new file mode 100644 index 000000000000..83c570be4c7b --- /dev/null +++ b/src/libstore/nar-accessor.hh @@ -0,0 +1,11 @@ +#pragma once + +#include "fs-accessor.hh" + +namespace nix { + +/* Return an object that provides access to the contents of a NAR + file. */ +ref<FSAccessor> makeNarAccessor(ref<const std::string> nar); + +} diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc new file mode 100644 index 000000000000..ae368e152866 --- /dev/null +++ b/src/libstore/nar-info-disk-cache.cc @@ -0,0 +1,224 @@ +#include "nar-info-disk-cache.hh" +#include "sync.hh" +#include "sqlite.hh" +#include "globals.hh" + +#include <sqlite3.h> + +namespace nix { + +static const char * schema = R"sql( + +create table if not exists BinaryCaches ( + id integer primary key autoincrement not null, + url text unique not null, + timestamp integer not null, + storeDir text not null, + wantMassQuery integer not null, + priority integer not null +); + +create table if not exists NARs ( + cache integer not null, + hashPart text not null, + namePart text not null, + url text, + compression text, + fileHash text, + fileSize integer, + narHash text, + narSize integer, + refs text, + deriver text, + sigs text, + timestamp integer not null, + primary key (cache, hashPart), + foreign key (cache) references BinaryCaches(id) on delete cascade +); + +create table if not exists NARExistence ( + cache integer not null, + storePath text not null, + exist integer not null, + timestamp integer not null, + primary key (cache, storePath), + foreign key (cache) references BinaryCaches(id) on delete cascade +); + +)sql"; + +class NarInfoDiskCacheImpl : public NarInfoDiskCache +{ +public: + + /* How long negative lookups are valid. */ + const int ttlNegative = 3600; + + struct State + { + SQLite db; + SQLiteStmt insertCache, queryCache, insertNAR, queryNAR, insertNARExistence, queryNARExistence; + std::map<std::string, int> caches; + }; + + Sync<State> _state; + + NarInfoDiskCacheImpl() + { + auto state(_state.lock()); + + Path dbPath = getCacheDir() + "/nix/binary-cache-v4.sqlite"; + createDirs(dirOf(dbPath)); + + if (sqlite3_open_v2(dbPath.c_str(), &state->db.db, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK) + throw Error(format("cannot open store cache ‘%s’") % dbPath); + + if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK) + throwSQLiteError(state->db, "setting timeout"); + + // We can always reproduce the cache. + if (sqlite3_exec(state->db, "pragma synchronous = off", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "making database asynchronous"); + if (sqlite3_exec(state->db, "pragma main.journal_mode = truncate", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "setting journal mode"); + + if (sqlite3_exec(state->db, schema, 0, 0, 0) != SQLITE_OK) + throwSQLiteError(state->db, "initialising database schema"); + + state->insertCache.create(state->db, + "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)"); + + state->queryCache.create(state->db, + "select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?"); + + state->insertNAR.create(state->db, + "insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, " + "narSize, refs, deriver, sigs, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); + + state->queryNAR.create(state->db, + "select * from NARs where cache = ? and hashPart = ?"); + + state->insertNARExistence.create(state->db, + "insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)"); + + state->queryNARExistence.create(state->db, + "select exist, timestamp from NARExistence where cache = ? and storePath = ?"); + } + + int uriToInt(State & state, const std::string & uri) + { + auto i = state.caches.find(uri); + if (i == state.caches.end()) abort(); + return i->second; + } + + void createCache(const std::string & uri, bool wantMassQuery, int priority) override + { + auto state(_state.lock()); + + // FIXME: race + + state->insertCache.use()(uri)(time(0))(settings.nixStore)(wantMassQuery)(priority).exec(); + assert(sqlite3_changes(state->db) == 1); + state->caches[uri] = sqlite3_last_insert_rowid(state->db); + } + + bool cacheExists(const std::string & uri) override + { + auto state(_state.lock()); + + auto i = state->caches.find(uri); + if (i != state->caches.end()) return true; + + auto queryCache(state->queryCache.use()(uri)); + + if (queryCache.next()) { + state->caches[uri] = queryCache.getInt(0); + return true; + } + + return false; + } + + std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo( + const std::string & uri, const std::string & hashPart) override + { + auto state(_state.lock()); + + auto queryNAR(state->queryNAR.use() + (uriToInt(*state, uri)) + (hashPart)); + + if (!queryNAR.next()) + // FIXME: check NARExistence + return {oUnknown, 0}; + + auto narInfo = make_ref<NarInfo>(); + + // FIXME: implement TTL. + + auto namePart = queryNAR.getStr(2); + narInfo->path = settings.nixStore + "/" + + hashPart + (namePart.empty() ? "" : "-" + namePart); + narInfo->url = queryNAR.getStr(3); + narInfo->compression = queryNAR.getStr(4); + if (!queryNAR.isNull(5)) + narInfo->fileHash = parseHash(queryNAR.getStr(5)); + narInfo->fileSize = queryNAR.getInt(6); + narInfo->narHash = parseHash(queryNAR.getStr(7)); + narInfo->narSize = queryNAR.getInt(8); + for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " ")) + narInfo->references.insert(settings.nixStore + "/" + r); + if (!queryNAR.isNull(10)) + narInfo->deriver = settings.nixStore + "/" + queryNAR.getStr(10); + for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " ")) + narInfo->sigs.insert(sig); + + return {oValid, narInfo}; + } + + void upsertNarInfo( + const std::string & uri, const std::string & hashPart, + std::shared_ptr<ValidPathInfo> info) override + { + auto state(_state.lock()); + + if (info) { + + auto narInfo = std::dynamic_pointer_cast<NarInfo>(info); + + assert(hashPart == storePathToHash(info->path)); + + state->insertNAR.use() + (uriToInt(*state, uri)) + (hashPart) + (storePathToName(info->path)) + (narInfo ? narInfo->url : "", narInfo != 0) + (narInfo ? narInfo->compression : "", narInfo != 0) + (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash) + (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize) + (info->narHash.to_string()) + (info->narSize) + (concatStringsSep(" ", info->shortRefs())) + (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "") + (concatStringsSep(" ", info->sigs)) + (time(0)).exec(); + + } else { + // not implemented + abort(); + } + } +}; + +ref<NarInfoDiskCache> getNarInfoDiskCache() +{ + static Sync<std::shared_ptr<NarInfoDiskCache>> cache; + + auto cache_(cache.lock()); + if (!*cache_) *cache_ = std::make_shared<NarInfoDiskCacheImpl>(); + return ref<NarInfoDiskCache>(*cache_); +} + +} diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh new file mode 100644 index 000000000000..ce5da062c5e3 --- /dev/null +++ b/src/libstore/nar-info-disk-cache.hh @@ -0,0 +1,29 @@ +#pragma once + +#include "ref.hh" +#include "nar-info.hh" + +namespace nix { + +class NarInfoDiskCache +{ +public: + typedef enum { oValid, oInvalid, oUnknown } Outcome; + + virtual void createCache(const std::string & uri, bool wantMassQuery, int priority) = 0; + + virtual bool cacheExists(const std::string & uri) = 0; + + virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo( + const std::string & uri, const std::string & hashPart) = 0; + + virtual void upsertNarInfo( + const std::string & uri, const std::string & hashPart, + std::shared_ptr<ValidPathInfo> info) = 0; +}; + +/* Return a singleton cache object that can be used concurrently by + multiple threads. */ +ref<NarInfoDiskCache> getNarInfoDiskCache(); + +} diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc new file mode 100644 index 000000000000..c0c5cecd1730 --- /dev/null +++ b/src/libstore/nar-info.cc @@ -0,0 +1,106 @@ +#include "globals.hh" +#include "nar-info.hh" + +namespace nix { + +NarInfo::NarInfo(const std::string & s, const std::string & whence) +{ + auto corrupt = [&]() [[noreturn]] { + throw Error("NAR info file ‘%1%’ is corrupt"); + }; + + auto parseHashField = [&](const string & s) { + try { + return parseHash(s); + } catch (BadHash &) { + corrupt(); + } + }; + + size_t pos = 0; + while (pos < s.size()) { + + size_t colon = s.find(':', pos); + if (colon == std::string::npos) corrupt(); + + std::string name(s, pos, colon - pos); + + size_t eol = s.find('\n', colon + 2); + if (eol == std::string::npos) corrupt(); + + std::string value(s, colon + 2, eol - colon - 2); + + if (name == "StorePath") { + if (!isStorePath(value)) corrupt(); + path = value; + } + else if (name == "URL") + url = value; + else if (name == "Compression") + compression = value; + else if (name == "FileHash") + fileHash = parseHashField(value); + else if (name == "FileSize") { + if (!string2Int(value, fileSize)) corrupt(); + } + else if (name == "NarHash") + narHash = parseHashField(value); + else if (name == "NarSize") { + if (!string2Int(value, narSize)) corrupt(); + } + else if (name == "References") { + auto refs = tokenizeString<Strings>(value, " "); + if (!references.empty()) corrupt(); + for (auto & r : refs) { + auto r2 = settings.nixStore + "/" + r; + if (!isStorePath(r2)) corrupt(); + references.insert(r2); + } + } + else if (name == "Deriver") { + auto p = settings.nixStore + "/" + value; + if (!isStorePath(p)) corrupt(); + deriver = p; + } + else if (name == "System") + system = value; + else if (name == "Sig") + sigs.insert(value); + + pos = eol + 1; + } + + if (compression == "") compression = "bzip2"; + + if (path.empty() || url.empty()) corrupt(); +} + +std::string NarInfo::to_string() const +{ + std::string res; + res += "StorePath: " + path + "\n"; + res += "URL: " + url + "\n"; + assert(compression != ""); + res += "Compression: " + compression + "\n"; + assert(fileHash.type == htSHA256); + res += "FileHash: sha256:" + printHash32(fileHash) + "\n"; + res += "FileSize: " + std::to_string(fileSize) + "\n"; + assert(narHash.type == htSHA256); + res += "NarHash: sha256:" + printHash32(narHash) + "\n"; + res += "NarSize: " + std::to_string(narSize) + "\n"; + + res += "References: " + concatStringsSep(" ", shortRefs()) + "\n"; + + if (!deriver.empty()) + res += "Deriver: " + baseNameOf(deriver) + "\n"; + + if (!system.empty()) + res += "System: " + system + "\n"; + + for (auto sig : sigs) + res += "Sig: " + sig + "\n"; + + return res; +} + +} diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh new file mode 100644 index 000000000000..6bc2f03b139b --- /dev/null +++ b/src/libstore/nar-info.hh @@ -0,0 +1,24 @@ +#pragma once + +#include "types.hh" +#include "hash.hh" +#include "store-api.hh" + +namespace nix { + +struct NarInfo : ValidPathInfo +{ + std::string url; + std::string compression; + Hash fileHash; + uint64_t fileSize = 0; + std::string system; + + NarInfo() { } + NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } + NarInfo(const std::string & s, const std::string & whence); + + std::string to_string() const; +}; + +} diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 55c252b9b2e3..ad7fe0e8bebf 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -99,8 +99,8 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa if (S_ISDIR(st.st_mode)) { Strings names = readDirectoryIgnoringInodes(path, inodeHash); - foreach (Strings::iterator, i, names) - optimisePath_(stats, path + "/" + *i, inodeHash); + for (auto & i : names) + optimisePath_(stats, path + "/" + i, inodeHash); return; } @@ -120,9 +120,9 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa return; } - /* This can still happen on top-level files */ + /* This can still happen on top-level files. */ if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) { - printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s).") % path % (st.st_nlink - 2)); + printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2)); return; } @@ -141,6 +141,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa /* Check if this is a known hash. */ Path linkPath = linksDir + "/" + printHash32(hash); + retry: if (!pathExists(linkPath)) { /* Nope, create a hard link in the links directory. */ if (link(path.c_str(), linkPath.c_str()) == 0) { @@ -164,6 +165,12 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa return; } + if (st.st_size != stLink.st_size) { + printMsg(lvlError, format("removing corrupted link ‘%1%’") % linkPath); + unlink(linkPath.c_str()); + goto retry; + } + printMsg(lvlTalkative, format("linking ‘%1%’ to ‘%2%’") % path % linkPath); /* Make the containing directory writable, but only if it's not @@ -218,11 +225,11 @@ void LocalStore::optimiseStore(OptimiseStats & stats) PathSet paths = queryAllValidPaths(); InodeHash inodeHash = loadInodeHash(); - foreach (PathSet::iterator, i, paths) { - addTempRoot(*i); - if (!isValidPath(*i)) continue; /* path was GC'ed, probably */ - startNest(nest, lvlChatty, format("hashing files in ‘%1%’") % *i); - optimisePath_(stats, *i, inodeHash); + for (auto & i : paths) { + addTempRoot(i); + if (!isValidPath(i)) continue; /* path was GC'ed, probably */ + Activity act(*logger, lvlChatty, format("hashing files in ‘%1%’") % i); + optimisePath_(stats, i, inodeHash); } } diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index 9db37e8f9aaa..eddf5bcbda65 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -60,7 +60,7 @@ bool lockFile(int fd, LockType lockType, bool wait) while (fcntl(fd, F_SETLK, &lock) != 0) { checkInterrupt(); if (errno == EACCES || errno == EAGAIN) return false; - if (errno != EINTR) + if (errno != EINTR) throw SysError(format("acquiring/releasing lock")); } } @@ -94,7 +94,7 @@ bool PathLocks::lockPaths(const PathSet & _paths, const string & waitMsg, bool wait) { assert(fds.empty()); - + /* Note that `fds' is built incrementally so that the destructor will only release those locks that we have already acquired. */ @@ -102,11 +102,10 @@ bool PathLocks::lockPaths(const PathSet & _paths, the same order, thus preventing deadlocks. */ Paths paths(_paths.begin(), _paths.end()); paths.sort(); - + /* Acquire the lock for each path. */ - foreach (Paths::iterator, i, paths) { + for (auto & path : paths) { checkInterrupt(); - Path path = *i; Path lockPath = path + ".lock"; debug(format("locking path ‘%1%’") % path); @@ -115,11 +114,11 @@ bool PathLocks::lockPaths(const PathSet & _paths, throw Error("deadlock: trying to re-acquire self-held lock"); AutoCloseFD fd; - + while (1) { /* Open/create the lock file. */ - fd = openLockFile(lockPath, true); + fd = openLockFile(lockPath, true); /* Acquire an exclusive lock. */ if (!lockFile(fd, ltWrite, false)) { @@ -162,21 +161,25 @@ bool PathLocks::lockPaths(const PathSet & _paths, PathLocks::~PathLocks() { - unlock(); + try { + unlock(); + } catch (...) { + ignoreException(); + } } void PathLocks::unlock() { - foreach (list<FDPair>::iterator, i, fds) { - if (deletePaths) deleteLockFile(i->second, i->first); + for (auto & i : fds) { + if (deletePaths) deleteLockFile(i.second, i.first); - lockedPaths.erase(i->second); - if (close(i->first) == -1) + lockedPaths.erase(i.second); + if (close(i.first) == -1) printMsg(lvlError, - format("error (ignored): cannot close lock file on ‘%1%’") % i->second); + format("error (ignored): cannot close lock file on ‘%1%’") % i.second); - debug(format("lock released on ‘%1%’") % i->second); + debug(format("lock released on ‘%1%’") % i.second); } fds.clear(); @@ -195,5 +198,5 @@ bool pathIsLockedByMe(const Path & path) return lockedPaths.find(lockPath) != lockedPaths.end(); } - + } diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index da3f7da9d19d..18e3bcbec4a3 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -74,7 +74,7 @@ static void makeName(const Path & profile, unsigned int num, } -Path createGeneration(Path profile, Path outPath) +Path createGeneration(ref<Store> store, Path profile, Path outPath) { /* The new generation number should be higher than old the previous ones. */ @@ -108,7 +108,7 @@ Path createGeneration(Path profile, Path outPath) user environment etc. we've just built. */ Path generation; makeName(profile, num + 1, generation); - addPermRoot(*store, outPath, generation, false, true); + store->addPermRoot(outPath, generation, false, true); return generation; } @@ -222,8 +222,7 @@ void switchLink(Path link, Path target) void lockProfile(PathLocks & lock, const Path & profile) { - lock.lockPaths(singleton<PathSet>(profile), - (format("waiting for lock on profile ‘%1%’") % profile).str()); + lock.lockPaths({profile}, (format("waiting for lock on profile ‘%1%’") % profile).str()); lock.setDeletion(true); } diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh index e99bbf398a86..d758d94b603c 100644 --- a/src/libstore/profiles.hh +++ b/src/libstore/profiles.hh @@ -31,7 +31,9 @@ typedef list<Generation> Generations; profile, sorted by generation number. */ Generations findGenerations(Path profile, int & curGen); -Path createGeneration(Path profile, Path outPath); +class Store; + +Path createGeneration(ref<Store> store, Path profile, Path outPath); void deleteGeneration(const Path & profile, unsigned int gen); diff --git a/src/libstore/references.cc b/src/libstore/references.cc index 521244a31377..33eab5a240b5 100644 --- a/src/libstore/references.cc +++ b/src/libstore/references.cc @@ -13,7 +13,7 @@ namespace nix { static unsigned int refLength = 32; /* characters */ -static void search(const unsigned char * s, unsigned int len, +static void search(const unsigned char * s, unsigned int len, StringSet & hashes, StringSet & seen) { static bool initialised = false; @@ -24,7 +24,7 @@ static void search(const unsigned char * s, unsigned int len, isBase32[(unsigned char) base32Chars[i]] = true; initialised = true; } - + for (unsigned int i = 0; i + refLength <= len; ) { int j; bool match = true; @@ -56,7 +56,7 @@ struct RefScanSink : Sink string tail; RefScanSink() : hashSink(htSHA256) { } - + void operator () (const unsigned char * data, size_t len); }; @@ -89,17 +89,17 @@ PathSet scanForReferences(const string & path, /* For efficiency (and a higher hit rate), just search for the hash part of the file name. (This assumes that all references have the form `HASH-bla'). */ - foreach (PathSet::const_iterator, i, refs) { - string baseName = baseNameOf(*i); + for (auto & i : refs) { + string baseName = baseNameOf(i); string::size_type pos = baseName.find('-'); if (pos == string::npos) - throw Error(format("bad reference ‘%1%’") % *i); + throw Error(format("bad reference ‘%1%’") % i); string s = string(baseName, 0, pos); assert(s.size() == refLength); assert(backMap.find(s) == backMap.end()); // parseHash(htSHA256, s); sink.hashes.insert(s); - backMap[s] = *i; + backMap[s] = i; } /* Look for the hashes in the NAR dump of the path. */ @@ -107,14 +107,14 @@ PathSet scanForReferences(const string & path, /* Map the hashes found back to their store paths. */ PathSet found; - foreach (StringSet::iterator, i, sink.seen) { + for (auto & i : sink.seen) { std::map<string, Path>::iterator j; - if ((j = backMap.find(*i)) == backMap.end()) abort(); + if ((j = backMap.find(i)) == backMap.end()) abort(); found.insert(j->second); } hash = sink.hashSink.finish(); - + return found; } diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index ab87d9d8b16f..9a00a6ed9910 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -5,6 +5,8 @@ #include "archive.hh" #include "affinity.hh" #include "globals.hh" +#include "derivations.hh" +#include "pool.hh" #include <sys/types.h> #include <sys/stat.h> @@ -12,9 +14,8 @@ #include <sys/un.h> #include <errno.h> #include <fcntl.h> - -#include <iostream> #include <unistd.h> + #include <cstring> namespace nix { @@ -31,205 +32,171 @@ Path readStorePath(Source & from) template<class T> T readStorePaths(Source & from) { T paths = readStrings<T>(from); - foreach (typename T::iterator, i, paths) assertStorePath(*i); + for (auto & i : paths) assertStorePath(i); return paths; } template PathSet readStorePaths(Source & from); -RemoteStore::RemoteStore() +RemoteStore::RemoteStore(size_t maxConnections) + : connections(make_ref<Pool<Connection>>( + maxConnections, + [this]() { return openConnection(); }, + [](const ref<Connection> & r) { return r->to.good() && r->from.good(); } + )) +{ +} + + +std::string RemoteStore::getUri() { - initialised = false; + return "daemon"; } -void RemoteStore::openConnection(bool reserveSpace) +ref<RemoteStore::Connection> RemoteStore::openConnection() { - if (initialised) return; - initialised = true; + auto conn = make_ref<Connection>(); - string remoteMode = getEnv("NIX_REMOTE"); + /* Connect to a daemon that does the privileged work for us. */ + conn->fd = socket(PF_UNIX, SOCK_STREAM, 0); + if (conn->fd == -1) + throw SysError("cannot create Unix domain socket"); + closeOnExec(conn->fd); + + string socketPath = settings.nixDaemonSocketFile; - if (remoteMode == "daemon") - /* Connect to a daemon that does the privileged work for - us. */ - connectToDaemon(); - else - throw Error(format("invalid setting for NIX_REMOTE, ‘%1%’") % remoteMode); + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + if (socketPath.size() + 1 >= sizeof(addr.sun_path)) + throw Error(format("socket path ‘%1%’ is too long") % socketPath); + strcpy(addr.sun_path, socketPath.c_str()); - from.fd = fdSocket; - to.fd = fdSocket; + if (connect(conn->fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath); + + conn->from.fd = conn->fd; + conn->to.fd = conn->fd; /* Send the magic greeting, check for the reply. */ try { - writeInt(WORKER_MAGIC_1, to); - to.flush(); - unsigned int magic = readInt(from); + conn->to << WORKER_MAGIC_1; + conn->to.flush(); + unsigned int magic = readInt(conn->from); if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch"); - daemonVersion = readInt(from); - if (GET_PROTOCOL_MAJOR(daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) + conn->daemonVersion = readInt(conn->from); + if (GET_PROTOCOL_MAJOR(conn->daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) throw Error("Nix daemon protocol version not supported"); - writeInt(PROTOCOL_VERSION, to); + conn->to << PROTOCOL_VERSION; - if (GET_PROTOCOL_MINOR(daemonVersion) >= 14) { + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 14) { int cpu = settings.lockCPU ? lockToCurrentCPU() : -1; - if (cpu != -1) { - writeInt(1, to); - writeInt(cpu, to); - } else - writeInt(0, to); + if (cpu != -1) + conn->to << 1 << cpu; + else + conn->to << 0; } - if (GET_PROTOCOL_MINOR(daemonVersion) >= 11) - writeInt(reserveSpace, to); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 11) + conn->to << false; - processStderr(); + conn->processStderr(); } catch (Error & e) { throw Error(format("cannot start daemon worker: %1%") % e.msg()); } - setOptions(); -} - - -void RemoteStore::connectToDaemon() -{ - fdSocket = socket(PF_UNIX, SOCK_STREAM, 0); - if (fdSocket == -1) - throw SysError("cannot create Unix domain socket"); - closeOnExec(fdSocket); - - string socketPath = settings.nixDaemonSocketFile; - - /* Urgh, sockaddr_un allows path names of only 108 characters. So - chdir to the socket directory so that we can pass a relative - path name. !!! this is probably a bad idea in multi-threaded - applications... */ - AutoCloseFD fdPrevDir = open(".", O_RDONLY); - if (fdPrevDir == -1) throw SysError("couldn't open current directory"); - if (chdir(dirOf(socketPath).c_str()) == -1) throw SysError(format("couldn't change to directory of ‘%1%’") % socketPath); - Path socketPathRel = "./" + baseNameOf(socketPath); - - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - if (socketPathRel.size() >= sizeof(addr.sun_path)) - throw Error(format("socket path ‘%1%’ is too long") % socketPathRel); - using namespace std; - strcpy(addr.sun_path, socketPathRel.c_str()); - - if (connect(fdSocket, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath); + setOptions(conn); - if (fchdir(fdPrevDir) == -1) - throw SysError("couldn't change back to previous directory"); + return conn; } -RemoteStore::~RemoteStore() +void RemoteStore::setOptions(ref<Connection> conn) { - try { - to.flush(); - fdSocket.close(); - } catch (...) { - ignoreException(); - } -} - + conn->to << wopSetOptions + << settings.keepFailed + << settings.keepGoing + << settings.tryFallback + << verbosity + << settings.maxBuildJobs + << settings.maxSilentTime; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 2) + conn->to << settings.useBuildHook; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 4) + conn->to << (settings.verboseBuild ? lvlError : lvlVomit) + << 0 // obsolete log type + << 0 /* obsolete print build trace */; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 6) + conn->to << settings.buildCores; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 10) + conn->to << settings.useSubstitutes; -void RemoteStore::setOptions() -{ - writeInt(wopSetOptions, to); - - writeInt(settings.keepFailed, to); - writeInt(settings.keepGoing, to); - writeInt(settings.tryFallback, to); - writeInt(verbosity, to); - writeInt(settings.maxBuildJobs, to); - writeInt(settings.maxSilentTime, to); - if (GET_PROTOCOL_MINOR(daemonVersion) >= 2) - writeInt(settings.useBuildHook, to); - if (GET_PROTOCOL_MINOR(daemonVersion) >= 4) { - writeInt(settings.buildVerbosity, to); - writeInt(logType, to); - writeInt(settings.printBuildTrace, to); - } - if (GET_PROTOCOL_MINOR(daemonVersion) >= 6) - writeInt(settings.buildCores, to); - if (GET_PROTOCOL_MINOR(daemonVersion) >= 10) - writeInt(settings.useSubstitutes, to); - - if (GET_PROTOCOL_MINOR(daemonVersion) >= 12) { + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 12) { Settings::SettingsMap overrides = settings.getOverrides(); if (overrides["ssh-auth-sock"] == "") overrides["ssh-auth-sock"] = getEnv("SSH_AUTH_SOCK"); - writeInt(overrides.size(), to); - foreach (Settings::SettingsMap::iterator, i, overrides) { - writeString(i->first, to); - writeString(i->second, to); - } + conn->to << overrides.size(); + for (auto & i : overrides) + conn->to << i.first << i.second; } - processStderr(); + conn->processStderr(); } -bool RemoteStore::isValidPath(const Path & path) +bool RemoteStore::isValidPathUncached(const Path & path) { - openConnection(); - writeInt(wopIsValidPath, to); - writeString(path, to); - processStderr(); - unsigned int reply = readInt(from); + auto conn(connections->get()); + conn->to << wopIsValidPath << path; + conn->processStderr(); + unsigned int reply = readInt(conn->from); return reply != 0; } PathSet RemoteStore::queryValidPaths(const PathSet & paths) { - openConnection(); - if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + auto conn(connections->get()); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; - foreach (PathSet::const_iterator, i, paths) - if (isValidPath(*i)) res.insert(*i); + for (auto & i : paths) + if (isValidPath(i)) res.insert(i); return res; } else { - writeInt(wopQueryValidPaths, to); - writeStrings(paths, to); - processStderr(); - return readStorePaths<PathSet>(from); + conn->to << wopQueryValidPaths << paths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } } PathSet RemoteStore::queryAllValidPaths() { - openConnection(); - writeInt(wopQueryAllValidPaths, to); - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryAllValidPaths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths) { - openConnection(); - if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + auto conn(connections->get()); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; - foreach (PathSet::const_iterator, i, paths) { - writeInt(wopHasSubstitutes, to); - writeString(*i, to); - processStderr(); - if (readInt(from)) res.insert(*i); + for (auto & i : paths) { + conn->to << wopHasSubstitutes << i; + conn->processStderr(); + if (readInt(conn->from)) res.insert(i); } return res; } else { - writeInt(wopQuerySubstitutablePaths, to); - writeStrings(paths, to); - processStderr(); - return readStorePaths<PathSet>(from); + conn->to << wopQuerySubstitutablePaths << paths; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } } @@ -239,187 +206,163 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, { if (paths.empty()) return; - openConnection(); + auto conn(connections->get()); - if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 3) return; - if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { - foreach (PathSet::const_iterator, i, paths) { + for (auto & i : paths) { SubstitutablePathInfo info; - writeInt(wopQuerySubstitutablePathInfo, to); - writeString(*i, to); - processStderr(); - unsigned int reply = readInt(from); + conn->to << wopQuerySubstitutablePathInfo << i; + conn->processStderr(); + unsigned int reply = readInt(conn->from); if (reply == 0) continue; - info.deriver = readString(from); + info.deriver = readString(conn->from); if (info.deriver != "") assertStorePath(info.deriver); - info.references = readStorePaths<PathSet>(from); - info.downloadSize = readLongLong(from); - info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; - infos[*i] = info; + info.references = readStorePaths<PathSet>(conn->from); + info.downloadSize = readLongLong(conn->from); + info.narSize = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 7 ? readLongLong(conn->from) : 0; + infos[i] = info; } } else { - writeInt(wopQuerySubstitutablePathInfos, to); - writeStrings(paths, to); - processStderr(); - unsigned int count = readInt(from); + conn->to << wopQuerySubstitutablePathInfos << paths; + conn->processStderr(); + unsigned int count = readInt(conn->from); for (unsigned int n = 0; n < count; n++) { - Path path = readStorePath(from); + Path path = readStorePath(conn->from); SubstitutablePathInfo & info(infos[path]); - info.deriver = readString(from); + info.deriver = readString(conn->from); if (info.deriver != "") assertStorePath(info.deriver); - info.references = readStorePaths<PathSet>(from); - info.downloadSize = readLongLong(from); - info.narSize = readLongLong(from); + info.references = readStorePaths<PathSet>(conn->from); + info.downloadSize = readLongLong(conn->from); + info.narSize = readLongLong(conn->from); } } } -ValidPathInfo RemoteStore::queryPathInfo(const Path & path) +std::shared_ptr<ValidPathInfo> RemoteStore::queryPathInfoUncached(const Path & path) { - openConnection(); - writeInt(wopQueryPathInfo, to); - writeString(path, to); - processStderr(); - ValidPathInfo info; - info.path = path; - info.deriver = readString(from); - if (info.deriver != "") assertStorePath(info.deriver); - info.hash = parseHash(htSHA256, readString(from)); - info.references = readStorePaths<PathSet>(from); - info.registrationTime = readInt(from); - info.narSize = readLongLong(from); + auto conn(connections->get()); + conn->to << wopQueryPathInfo << path; + try { + conn->processStderr(); + } catch (Error & e) { + // Ugly backwards compatibility hack. + if (e.msg().find("is not valid") != std::string::npos) + throw InvalidPath(e.what()); + throw; + } + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { + bool valid = readInt(conn->from) != 0; + if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path); + } + auto info = std::make_shared<ValidPathInfo>(); + info->path = path; + info->deriver = readString(conn->from); + if (info->deriver != "") assertStorePath(info->deriver); + info->narHash = parseHash(htSHA256, readString(conn->from)); + info->references = readStorePaths<PathSet>(conn->from); + info->registrationTime = readInt(conn->from); + info->narSize = readLongLong(conn->from); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { + info->ultimate = readInt(conn->from) != 0; + info->sigs = readStrings<StringSet>(conn->from); + } return info; } -Hash RemoteStore::queryPathHash(const Path & path) -{ - openConnection(); - writeInt(wopQueryPathHash, to); - writeString(path, to); - processStderr(); - string hash = readString(from); - return parseHash(htSHA256, hash); -} - - -void RemoteStore::queryReferences(const Path & path, - PathSet & references) -{ - openConnection(); - writeInt(wopQueryReferences, to); - writeString(path, to); - processStderr(); - PathSet references2 = readStorePaths<PathSet>(from); - references.insert(references2.begin(), references2.end()); -} - - void RemoteStore::queryReferrers(const Path & path, PathSet & referrers) { - openConnection(); - writeInt(wopQueryReferrers, to); - writeString(path, to); - processStderr(); - PathSet referrers2 = readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryReferrers << path; + conn->processStderr(); + PathSet referrers2 = readStorePaths<PathSet>(conn->from); referrers.insert(referrers2.begin(), referrers2.end()); } -Path RemoteStore::queryDeriver(const Path & path) -{ - openConnection(); - writeInt(wopQueryDeriver, to); - writeString(path, to); - processStderr(); - Path drvPath = readString(from); - if (drvPath != "") assertStorePath(drvPath); - return drvPath; -} - - PathSet RemoteStore::queryValidDerivers(const Path & path) { - openConnection(); - writeInt(wopQueryValidDerivers, to); - writeString(path, to); - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryValidDerivers << path; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } PathSet RemoteStore::queryDerivationOutputs(const Path & path) { - openConnection(); - writeInt(wopQueryDerivationOutputs, to); - writeString(path, to); - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryDerivationOutputs << path; + conn->processStderr(); + return readStorePaths<PathSet>(conn->from); } PathSet RemoteStore::queryDerivationOutputNames(const Path & path) { - openConnection(); - writeInt(wopQueryDerivationOutputNames, to); - writeString(path, to); - processStderr(); - return readStrings<PathSet>(from); + auto conn(connections->get()); + conn->to << wopQueryDerivationOutputNames << path; + conn->processStderr(); + return readStrings<PathSet>(conn->from); } Path RemoteStore::queryPathFromHashPart(const string & hashPart) { - openConnection(); - writeInt(wopQueryPathFromHashPart, to); - writeString(hashPart, to); - processStderr(); - Path path = readString(from); + auto conn(connections->get()); + conn->to << wopQueryPathFromHashPart << hashPart; + conn->processStderr(); + Path path = readString(conn->from); if (!path.empty()) assertStorePath(path); return path; } +void RemoteStore::addToStore(const ValidPathInfo & info, const std::string & nar, bool repair) +{ + throw Error("RemoteStore::addToStore() not implemented"); +} + + Path RemoteStore::addToStore(const string & name, const Path & _srcPath, bool recursive, HashType hashAlgo, PathFilter & filter, bool repair) { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - openConnection(); + auto conn(connections->get()); Path srcPath(absPath(_srcPath)); - writeInt(wopAddToStore, to); - writeString(name, to); - /* backwards compatibility hack */ - writeInt((hashAlgo == htSHA256 && recursive) ? 0 : 1, to); - writeInt(recursive ? 1 : 0, to); - writeString(printHashType(hashAlgo), to); + conn->to << wopAddToStore << name + << ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */ + << (recursive ? 1 : 0) + << printHashType(hashAlgo); try { - to.written = 0; - to.warn = true; - dumpPath(srcPath, to, filter); - to.warn = false; - processStderr(); + conn->to.written = 0; + conn->to.warn = true; + dumpPath(srcPath, conn->to, filter); + conn->to.warn = false; + conn->processStderr(); } catch (SysError & e) { /* Daemon closed while we were sending the path. Probably OOM or I/O error. */ if (e.errNo == EPIPE) try { - processStderr(); + conn->processStderr(); } catch (EndOfFile & e) { } throw; } - return readStorePath(from); + return readStorePath(conn->from); } @@ -428,109 +371,100 @@ Path RemoteStore::addTextToStore(const string & name, const string & s, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - openConnection(); - writeInt(wopAddTextToStore, to); - writeString(name, to); - writeString(s, to); - writeStrings(references, to); - - processStderr(); - return readStorePath(from); -} - + auto conn(connections->get()); + conn->to << wopAddTextToStore << name << s << references; -void RemoteStore::exportPath(const Path & path, bool sign, - Sink & sink) -{ - openConnection(); - writeInt(wopExportPath, to); - writeString(path, to); - writeInt(sign ? 1 : 0, to); - processStderr(&sink); /* sink receives the actual data */ - readInt(from); -} - - -Paths RemoteStore::importPaths(bool requireSignature, Source & source) -{ - openConnection(); - writeInt(wopImportPaths, to); - /* We ignore requireSignature, since the worker forces it to true - anyway. */ - processStderr(0, &source); - return readStorePaths<Paths>(from); + conn->processStderr(); + return readStorePath(conn->from); } void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) { - if (buildMode != bmNormal) throw Error("repairing or checking is not supported when building through the Nix daemon"); - openConnection(); - writeInt(wopBuildPaths, to); - if (GET_PROTOCOL_MINOR(daemonVersion) >= 13) - writeStrings(drvPaths, to); - else { + auto conn(connections->get()); + conn->to << wopBuildPaths; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) { + conn->to << drvPaths; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15) + conn->to << buildMode; + else + /* Old daemons did not take a 'buildMode' parameter, so we + need to validate it here on the client side. */ + if (buildMode != bmNormal) + throw Error("repairing or checking is not supported when building through the Nix daemon"); + } else { /* For backwards compatibility with old daemons, strip output identifiers. */ PathSet drvPaths2; - foreach (PathSet::const_iterator, i, drvPaths) - drvPaths2.insert(string(*i, 0, i->find('!'))); - writeStrings(drvPaths2, to); + for (auto & i : drvPaths) + drvPaths2.insert(string(i, 0, i.find('!'))); + conn->to << drvPaths2; } - processStderr(); - readInt(from); + conn->processStderr(); + readInt(conn->from); +} + + +BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode) +{ + auto conn(connections->get()); + conn->to << wopBuildDerivation << drvPath << drv << buildMode; + conn->processStderr(); + BuildResult res; + unsigned int status; + conn->from >> status >> res.errorMsg; + res.status = (BuildResult::Status) status; + return res; } void RemoteStore::ensurePath(const Path & path) { - openConnection(); - writeInt(wopEnsurePath, to); - writeString(path, to); - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopEnsurePath << path; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::addTempRoot(const Path & path) { - openConnection(); - writeInt(wopAddTempRoot, to); - writeString(path, to); - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopAddTempRoot << path; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::addIndirectRoot(const Path & path) { - openConnection(); - writeInt(wopAddIndirectRoot, to); - writeString(path, to); - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopAddIndirectRoot << path; + conn->processStderr(); + readInt(conn->from); } void RemoteStore::syncWithGC() { - openConnection(); - writeInt(wopSyncWithGC, to); - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopSyncWithGC; + conn->processStderr(); + readInt(conn->from); } Roots RemoteStore::findRoots() { - openConnection(); - writeInt(wopFindRoots, to); - processStderr(); - unsigned int count = readInt(from); + auto conn(connections->get()); + conn->to << wopFindRoots; + conn->processStderr(); + unsigned int count = readInt(conn->from); Roots result; while (count--) { - Path link = readString(from); - Path target = readStorePath(from); + Path link = readString(conn->from); + Path target = readStorePath(conn->from); result[link] = target; } return result; @@ -539,65 +473,66 @@ Roots RemoteStore::findRoots() void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) { - openConnection(false); + auto conn(connections->get()); - writeInt(wopCollectGarbage, to); - writeInt(options.action, to); - writeStrings(options.pathsToDelete, to); - writeInt(options.ignoreLiveness, to); - writeLongLong(options.maxFreed, to); - writeInt(0, to); - if (GET_PROTOCOL_MINOR(daemonVersion) >= 5) { + conn->to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness + << options.maxFreed << 0; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 5) /* removed options */ - writeInt(0, to); - writeInt(0, to); - } + conn->to << 0 << 0; - processStderr(); + conn->processStderr(); - results.paths = readStrings<PathSet>(from); - results.bytesFreed = readLongLong(from); - readLongLong(from); // obsolete + results.paths = readStrings<PathSet>(conn->from); + results.bytesFreed = readLongLong(conn->from); + readLongLong(conn->from); // obsolete + + { + auto state_(Store::state.lock()); + state_->pathInfoCache.clear(); + } } -PathSet RemoteStore::queryFailedPaths() +void RemoteStore::optimiseStore() { - openConnection(); - writeInt(wopQueryFailedPaths, to); - processStderr(); - return readStorePaths<PathSet>(from); + auto conn(connections->get()); + conn->to << wopOptimiseStore; + conn->processStderr(); + readInt(conn->from); } -void RemoteStore::clearFailedPaths(const PathSet & paths) +bool RemoteStore::verifyStore(bool checkContents, bool repair) { - openConnection(); - writeInt(wopClearFailedPaths, to); - writeStrings(paths, to); - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopVerifyStore << checkContents << repair; + conn->processStderr(); + return readInt(conn->from) != 0; } -void RemoteStore::optimiseStore() + +void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs) { - openConnection(); - writeInt(wopOptimiseStore, to); - processStderr(); - readInt(from); + auto conn(connections->get()); + conn->to << wopAddSignatures << storePath << sigs; + conn->processStderr(); + readInt(conn->from); } -bool RemoteStore::verifyStore(bool checkContents, bool repair) + +RemoteStore::Connection::~Connection() { - openConnection(); - writeInt(wopVerifyStore, to); - writeInt(checkContents, to); - writeInt(repair, to); - processStderr(); - return readInt(from) != 0; + try { + to.flush(); + fd.close(); + } catch (...) { + ignoreException(); + } } -void RemoteStore::processStderr(Sink * sink, Source * source) + +void RemoteStore::Connection::processStderr(Sink * sink, Source * source) { to.flush(); unsigned int msg; @@ -606,7 +541,7 @@ void RemoteStore::processStderr(Sink * sink, Source * source) if (msg == STDERR_WRITE) { string s = readString(from); if (!sink) throw Error("no sink"); - (*sink)((const unsigned char *) s.data(), s.size()); + (*sink)(s); } else if (msg == STDERR_READ) { if (!source) throw Error("no source"); @@ -616,10 +551,8 @@ void RemoteStore::processStderr(Sink * sink, Source * source) writeString(buf, source->read(buf, len), to); to.flush(); } - else { - string s = readString(from); - writeToStderr(s); - } + else + printMsg(lvlError, chomp(readString(from))); } if (msg == STDERR_ERROR) { string error = readString(from); diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 030120db4067..0757f82e8964 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -1,5 +1,6 @@ #pragma once +#include <limits> #include <string> #include "store-api.hh" @@ -12,94 +13,96 @@ class Pipe; class Pid; struct FdSink; struct FdSource; +template<typename T> class Pool; -class RemoteStore : public StoreAPI +/* FIXME: RemoteStore is a misnomer - should be something like + DaemonStore. */ +class RemoteStore : public LocalFSStore { public: - RemoteStore(); - - ~RemoteStore(); + RemoteStore(size_t maxConnections = std::numeric_limits<size_t>::max()); /* Implementations of abstract store API methods. */ - bool isValidPath(const Path & path); - - PathSet queryValidPaths(const PathSet & paths); + std::string getUri() override; - PathSet queryAllValidPaths(); + bool isValidPathUncached(const Path & path) override; - ValidPathInfo queryPathInfo(const Path & path); + PathSet queryValidPaths(const PathSet & paths) override; - Hash queryPathHash(const Path & path); + PathSet queryAllValidPaths() override; - void queryReferences(const Path & path, PathSet & references); + std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) override; - void queryReferrers(const Path & path, PathSet & referrers); + void queryReferrers(const Path & path, PathSet & referrers) override; - Path queryDeriver(const Path & path); + PathSet queryValidDerivers(const Path & path) override; - PathSet queryValidDerivers(const Path & path); + PathSet queryDerivationOutputs(const Path & path) override; - PathSet queryDerivationOutputs(const Path & path); + StringSet queryDerivationOutputNames(const Path & path) override; - StringSet queryDerivationOutputNames(const Path & path); + Path queryPathFromHashPart(const string & hashPart) override; - Path queryPathFromHashPart(const string & hashPart); - - PathSet querySubstitutablePaths(const PathSet & paths); + PathSet querySubstitutablePaths(const PathSet & paths) override; void querySubstitutablePathInfos(const PathSet & paths, - SubstitutablePathInfos & infos); + SubstitutablePathInfos & infos) override; + + void addToStore(const ValidPathInfo & info, const std::string & nar, + bool repair) override; Path addToStore(const string & name, const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter, bool repair = false); + PathFilter & filter = defaultPathFilter, bool repair = false) override; Path addTextToStore(const string & name, const string & s, - const PathSet & references, bool repair = false); + const PathSet & references, bool repair = false) override; - void exportPath(const Path & path, bool sign, - Sink & sink); + void buildPaths(const PathSet & paths, BuildMode buildMode) override; - Paths importPaths(bool requireSignature, Source & source); + BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode) override; - void buildPaths(const PathSet & paths, BuildMode buildMode); + void ensurePath(const Path & path) override; - void ensurePath(const Path & path); + void addTempRoot(const Path & path) override; - void addTempRoot(const Path & path); + void addIndirectRoot(const Path & path) override; - void addIndirectRoot(const Path & path); + void syncWithGC() override; - void syncWithGC(); + Roots findRoots() override; - Roots findRoots(); + void collectGarbage(const GCOptions & options, GCResults & results) override; - void collectGarbage(const GCOptions & options, GCResults & results); + void optimiseStore() override; - PathSet queryFailedPaths(); + bool verifyStore(bool checkContents, bool repair) override; - void clearFailedPaths(const PathSet & paths); + void addSignatures(const Path & storePath, const StringSet & sigs) override; - void optimiseStore(); - - bool verifyStore(bool checkContents, bool repair); private: - AutoCloseFD fdSocket; - FdSink to; - FdSource from; - unsigned int daemonVersion; - bool initialised; - void openConnection(bool reserveSpace = true); + struct Connection + { + AutoCloseFD fd; + FdSink to; + FdSource from; + unsigned int daemonVersion; + + ~Connection(); + + void processStderr(Sink * sink = 0, Source * source = 0); + }; - void processStderr(Sink * sink = 0, Source * source = 0); + ref<Pool<Connection>> connections; - void connectToDaemon(); + ref<Connection> openConnection(); - void setOptions(); + void setOptions(ref<Connection> conn); }; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc new file mode 100644 index 000000000000..6ee27b48d61d --- /dev/null +++ b/src/libstore/s3-binary-cache-store.cc @@ -0,0 +1,258 @@ +#include "config.h" + +#if ENABLE_S3 + +#include "s3-binary-cache-store.hh" +#include "nar-info.hh" +#include "nar-info-disk-cache.hh" +#include "globals.hh" + +#include <aws/core/client/ClientConfiguration.h> +#include <aws/s3/S3Client.h> +#include <aws/s3/model/CreateBucketRequest.h> +#include <aws/s3/model/GetBucketLocationRequest.h> +#include <aws/s3/model/GetObjectRequest.h> +#include <aws/s3/model/HeadObjectRequest.h> +#include <aws/s3/model/PutObjectRequest.h> +#include <aws/s3/model/ListObjectsRequest.h> + +namespace nix { + +struct S3Error : public Error +{ + Aws::S3::S3Errors err; + S3Error(Aws::S3::S3Errors err, const FormatOrString & fs) + : Error(fs), err(err) { }; +}; + +/* Helper: given an Outcome<R, E>, return R in case of success, or + throw an exception in case of an error. */ +template<typename R, typename E> +R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome) +{ + if (!outcome.IsSuccess()) + throw S3Error( + outcome.GetError().GetErrorType(), + fs.s + ": " + outcome.GetError().GetMessage()); + return outcome.GetResultWithOwnership(); +} + +struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore +{ + std::string bucketName; + + ref<Aws::Client::ClientConfiguration> config; + ref<Aws::S3::S3Client> client; + + Stats stats; + + S3BinaryCacheStoreImpl( + const StoreParams & params, const std::string & bucketName) + : S3BinaryCacheStore(params) + , bucketName(bucketName) + , config(makeConfig()) + , client(make_ref<Aws::S3::S3Client>(*config)) + { + diskCache = getNarInfoDiskCache(); + } + + std::string getUri() + { + return "s3://" + bucketName; + } + + ref<Aws::Client::ClientConfiguration> makeConfig() + { + auto res = make_ref<Aws::Client::ClientConfiguration>(); + res->region = Aws::Region::US_EAST_1; // FIXME: make configurable + res->requestTimeoutMs = 600 * 1000; + return res; + } + + void init() + { + if (!diskCache->cacheExists(getUri())) { + + /* Create the bucket if it doesn't already exists. */ + // FIXME: HeadBucket would be more appropriate, but doesn't return + // an easily parsed 404 message. + auto res = client->GetBucketLocation( + Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName)); + + if (!res.IsSuccess()) { + if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET) + throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage()); + + checkAws(format("AWS error creating bucket ‘%s’") % bucketName, + client->CreateBucket( + Aws::S3::Model::CreateBucketRequest() + .WithBucket(bucketName) + .WithCreateBucketConfiguration( + Aws::S3::Model::CreateBucketConfiguration() + /* .WithLocationConstraint( + Aws::S3::Model::BucketLocationConstraint::US) */ ))); + } + + BinaryCacheStore::init(); + + diskCache->createCache(getUri(), wantMassQuery_, priority); + } + } + + const Stats & getS3Stats() + { + return stats; + } + + /* This is a specialisation of isValidPath() that optimistically + fetches the .narinfo file, rather than first checking for its + existence via a HEAD request. Since .narinfos are small, doing + a GET is unlikely to be slower than HEAD. */ + bool isValidPathUncached(const Path & storePath) + { + try { + queryPathInfo(storePath); + return true; + } catch (InvalidPath & e) { + return false; + } + } + + bool fileExists(const std::string & path) + { + stats.head++; + + auto res = client->HeadObject( + Aws::S3::Model::HeadObjectRequest() + .WithBucket(bucketName) + .WithKey(path)); + + if (!res.IsSuccess()) { + auto & error = res.GetError(); + if (error.GetErrorType() == Aws::S3::S3Errors::UNKNOWN // FIXME + && error.GetMessage().find("404") != std::string::npos) + return false; + throw Error(format("AWS error fetching ‘%s’: %s") % path % error.GetMessage()); + } + + return true; + } + + void upsertFile(const std::string & path, const std::string & data) + { + auto request = + Aws::S3::Model::PutObjectRequest() + .WithBucket(bucketName) + .WithKey(path); + + auto stream = std::make_shared<std::stringstream>(data); + + request.SetBody(stream); + + stats.put++; + stats.putBytes += data.size(); + + auto now1 = std::chrono::steady_clock::now(); + + auto result = checkAws(format("AWS error uploading ‘%s’") % path, + client->PutObject(request)); + + auto now2 = std::chrono::steady_clock::now(); + + auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count(); + + printMsg(lvlInfo, format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms") + % bucketName % path % data.size() % duration); + + stats.putTimeMs += duration; + } + + std::shared_ptr<std::string> getFile(const std::string & path) + { + debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path); + + auto request = + Aws::S3::Model::GetObjectRequest() + .WithBucket(bucketName) + .WithKey(path); + + request.SetResponseStreamFactory([&]() { + return Aws::New<std::stringstream>("STRINGSTREAM"); + }); + + stats.get++; + + try { + + auto now1 = std::chrono::steady_clock::now(); + + auto result = checkAws(format("AWS error fetching ‘%s’") % path, + client->GetObject(request)); + + auto now2 = std::chrono::steady_clock::now(); + + auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str(); + + auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count(); + + printMsg(lvlTalkative, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms") + % bucketName % path % res.size() % duration); + + stats.getBytes += res.size(); + stats.getTimeMs += duration; + + return std::make_shared<std::string>(res); + + } catch (S3Error & e) { + if (e.err == Aws::S3::S3Errors::NO_SUCH_KEY) return 0; + throw; + } + } + + PathSet queryAllValidPaths() override + { + PathSet paths; + std::string marker; + + do { + debug(format("listing bucket ‘s3://%s’ from key ‘%s’...") % bucketName % marker); + + auto res = checkAws(format("AWS error listing bucket ‘%s’") % bucketName, + client->ListObjects( + Aws::S3::Model::ListObjectsRequest() + .WithBucket(bucketName) + .WithDelimiter("/") + .WithMarker(marker))); + + auto & contents = res.GetContents(); + + debug(format("got %d keys, next marker ‘%s’") + % contents.size() % res.GetNextMarker()); + + for (auto object : contents) { + auto & key = object.GetKey(); + if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue; + paths.insert(settings.nixStore + "/" + key.substr(0, key.size() - 8)); + } + + marker = res.GetNextMarker(); + } while (!marker.empty()); + + return paths; + } + +}; + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const StoreParams & params) + -> std::shared_ptr<Store> +{ + if (std::string(uri, 0, 5) != "s3://") return 0; + auto store = std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5)); + store->init(); + return store; +}); + +} + +#endif diff --git a/src/libstore/s3-binary-cache-store.hh b/src/libstore/s3-binary-cache-store.hh new file mode 100644 index 000000000000..3f9bd891274b --- /dev/null +++ b/src/libstore/s3-binary-cache-store.hh @@ -0,0 +1,33 @@ +#pragma once + +#include "binary-cache-store.hh" + +#include <atomic> + +namespace nix { + +class S3BinaryCacheStore : public BinaryCacheStore +{ +protected: + + S3BinaryCacheStore(const StoreParams & params) + : BinaryCacheStore(params) + { } + +public: + + struct Stats + { + std::atomic<uint64_t> put{0}; + std::atomic<uint64_t> putBytes{0}; + std::atomic<uint64_t> putTimeMs{0}; + std::atomic<uint64_t> get{0}; + std::atomic<uint64_t> getBytes{0}; + std::atomic<uint64_t> getTimeMs{0}; + std::atomic<uint64_t> head{0}; + }; + + const Stats & getS3Stats(); +}; + +} diff --git a/src/libstore/sandbox-defaults.sb.in b/src/libstore/sandbox-defaults.sb.in new file mode 100644 index 000000000000..b5e80085fbe2 --- /dev/null +++ b/src/libstore/sandbox-defaults.sb.in @@ -0,0 +1,63 @@ +(allow file-read* file-write-data (literal "/dev/null")) +(allow ipc-posix*) +(allow mach-lookup (global-name "com.apple.SecurityServer")) + +(allow file-read* + (literal "/dev/dtracehelper") + (literal "/dev/tty") + (literal "/dev/autofs_nowait") + (literal "/System/Library/CoreServices/SystemVersion.plist") + (literal "/private/var/run/systemkeychaincheck.done") + (literal "/private/etc/protocols") + (literal "/private/var/tmp") + (literal "/private/var/db") + (subpath "/private/var/db/mds")) + +(allow file-read* + (subpath "/usr/share/icu") + (subpath "/usr/share/locale") + (subpath "/usr/share/zoneinfo")) + +(allow file-write* + (literal "/dev/tty") + (literal "/dev/dtracehelper") + (literal "/mds")) + +(allow file-ioctl (literal "/dev/dtracehelper")) + +(allow file-read-metadata + (literal "/var") + (literal "/tmp") + ; symlinks + (literal "@sysconfdir@") + (literal "@sysconfdir@/nix") + (literal "@sysconfdir@/nix/nix.conf") + (literal "/etc/resolv.conf") + (literal "/private/etc/resolv.conf")) + +(allow file-read* + (literal "/private@sysconfdir@/nix/nix.conf") + (literal "/private/var/run/resolv.conf")) + +; some builders use filehandles other than stdin/stdout +(allow file* + (subpath "/dev/fd") + (literal "/dev/ptmx") + (regex #"^/dev/[pt]ty.*$")) + +; allow everything inside TMP +(allow file* process-exec + (subpath (param "_GLOBAL_TMP_DIR")) + (subpath "/private/tmp")) + +(allow process-fork) +(allow sysctl-read) +(allow signal (target same-sandbox)) + +; allow getpwuid (for git and other packages) +(allow mach-lookup + (global-name "com.apple.system.notification_center") + (global-name "com.apple.system.opendirectoryd.libinfo")) + +; allow local networking +(allow network* (local ip) (remote unix-socket)) diff --git a/src/libstore/schema.sql b/src/libstore/schema.sql index c1b4a689afcb..91878af1580d 100644 --- a/src/libstore/schema.sql +++ b/src/libstore/schema.sql @@ -4,7 +4,9 @@ create table if not exists ValidPaths ( hash text not null, registrationTime integer not null, deriver text, - narSize integer + narSize integer, + ultimate integer, -- null implies "false" + sigs text -- space-separated ); create table if not exists Refs ( @@ -37,8 +39,3 @@ create table if not exists DerivationOutputs ( ); create index if not exists IndexDerivationOutputs on DerivationOutputs(path); - -create table if not exists FailedPaths ( - path text primary key not null, - time integer not null -); diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc new file mode 100644 index 000000000000..816f9984d6eb --- /dev/null +++ b/src/libstore/sqlite.cc @@ -0,0 +1,172 @@ +#include "sqlite.hh" +#include "util.hh" + +#include <sqlite3.h> + +namespace nix { + +[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f) +{ + int err = sqlite3_errcode(db); + if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) { + if (err == SQLITE_PROTOCOL) + printMsg(lvlError, "warning: SQLite database is busy (SQLITE_PROTOCOL)"); + else { + static bool warned = false; + if (!warned) { + printMsg(lvlError, "warning: SQLite database is busy"); + warned = true; + } + } + /* Sleep for a while since retrying the transaction right away + is likely to fail again. */ + checkInterrupt(); +#if HAVE_NANOSLEEP + struct timespec t; + t.tv_sec = 0; + t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */ + nanosleep(&t, 0); +#else + sleep(1); +#endif + throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db)); + } + else + throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db)); +} + +SQLite::~SQLite() +{ + try { + if (db && sqlite3_close(db) != SQLITE_OK) + throwSQLiteError(db, "closing database"); + } catch (...) { + ignoreException(); + } +} + +void SQLiteStmt::create(sqlite3 * db, const string & s) +{ + checkInterrupt(); + assert(!stmt); + if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK) + throwSQLiteError(db, "creating statement"); + this->db = db; +} + +SQLiteStmt::~SQLiteStmt() +{ + try { + if (stmt && sqlite3_finalize(stmt) != SQLITE_OK) + throwSQLiteError(db, "finalizing statement"); + } catch (...) { + ignoreException(); + } +} + +SQLiteStmt::Use::Use(SQLiteStmt & stmt) + : stmt(stmt) +{ + assert(stmt.stmt); + /* Note: sqlite3_reset() returns the error code for the most + recent call to sqlite3_step(). So ignore it. */ + sqlite3_reset(stmt); +} + +SQLiteStmt::Use::~Use() +{ + sqlite3_reset(stmt); +} + +SQLiteStmt::Use & SQLiteStmt::Use::operator () (const std::string & value, bool notNull) +{ + if (notNull) { + if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK) + throwSQLiteError(stmt.db, "binding argument"); + } else + bind(); + return *this; +} + +SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull) +{ + if (notNull) { + if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK) + throwSQLiteError(stmt.db, "binding argument"); + } else + bind(); + return *this; +} + +SQLiteStmt::Use & SQLiteStmt::Use::bind() +{ + if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK) + throwSQLiteError(stmt.db, "binding argument"); + return *this; +} + +int SQLiteStmt::Use::step() +{ + return sqlite3_step(stmt); +} + +void SQLiteStmt::Use::exec() +{ + int r = step(); + assert(r != SQLITE_ROW); + if (r != SQLITE_DONE) + throwSQLiteError(stmt.db, "executing SQLite statement"); +} + +bool SQLiteStmt::Use::next() +{ + int r = step(); + if (r != SQLITE_DONE && r != SQLITE_ROW) + throwSQLiteError(stmt.db, "executing SQLite query"); + return r == SQLITE_ROW; +} + +std::string SQLiteStmt::Use::getStr(int col) +{ + auto s = (const char *) sqlite3_column_text(stmt, col); + assert(s); + return s; +} + +int64_t SQLiteStmt::Use::getInt(int col) +{ + // FIXME: detect nulls? + return sqlite3_column_int64(stmt, col); +} + +bool SQLiteStmt::Use::isNull(int col) +{ + return sqlite3_column_type(stmt, col) == SQLITE_NULL; +} + +SQLiteTxn::SQLiteTxn(sqlite3 * db) +{ + this->db = db; + if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(db, "starting transaction"); + active = true; +} + +void SQLiteTxn::commit() +{ + if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(db, "committing transaction"); + active = false; +} + +SQLiteTxn::~SQLiteTxn() +{ + try { + if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK) + throwSQLiteError(db, "aborting transaction"); + } catch (...) { + ignoreException(); + } +} + +} diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh new file mode 100644 index 000000000000..d6b4a8d9117b --- /dev/null +++ b/src/libstore/sqlite.hh @@ -0,0 +1,103 @@ +#pragma once + +#include <functional> +#include <string> + +#include "types.hh" + +class sqlite3; +class sqlite3_stmt; + +namespace nix { + +/* RAII wrapper to close a SQLite database automatically. */ +struct SQLite +{ + sqlite3 * db; + SQLite() { db = 0; } + ~SQLite(); + operator sqlite3 * () { return db; } +}; + +/* RAII wrapper to create and destroy SQLite prepared statements. */ +struct SQLiteStmt +{ + sqlite3 * db = 0; + sqlite3_stmt * stmt = 0; + SQLiteStmt() { } + void create(sqlite3 * db, const std::string & s); + ~SQLiteStmt(); + operator sqlite3_stmt * () { return stmt; } + + /* Helper for binding / executing statements. */ + class Use + { + friend struct SQLiteStmt; + private: + SQLiteStmt & stmt; + unsigned int curArg = 1; + Use(SQLiteStmt & stmt); + + public: + + ~Use(); + + /* Bind the next parameter. */ + Use & operator () (const std::string & value, bool notNull = true); + Use & operator () (int64_t value, bool notNull = true); + Use & bind(); // null + + int step(); + + /* Execute a statement that does not return rows. */ + void exec(); + + /* For statements that return 0 or more rows. Returns true iff + a row is available. */ + bool next(); + + std::string getStr(int col); + int64_t getInt(int col); + bool isNull(int col); + }; + + Use use() + { + return Use(*this); + } +}; + +/* RAII helper that ensures transactions are aborted unless explicitly + committed. */ +struct SQLiteTxn +{ + bool active = false; + sqlite3 * db; + + SQLiteTxn(sqlite3 * db); + + void commit(); + + ~SQLiteTxn(); +}; + + +MakeError(SQLiteError, Error); +MakeError(SQLiteBusy, SQLiteError); + +[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f); + +/* Convenience function for retrying a SQLite transaction when the + database is busy. */ +template<typename T> +T retrySQLite(std::function<T()> fun) +{ + while (true) { + try { + return fun(); + } catch (SQLiteBusy & e) { + } + } +} + +} diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index d3cbd1e7dee2..f39d6b54787c 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1,21 +1,13 @@ -#include "store-api.hh" +#include "crypto.hh" #include "globals.hh" +#include "store-api.hh" #include "util.hh" - -#include <climits> +#include "nar-info-disk-cache.hh" namespace nix { -GCOptions::GCOptions() -{ - action = gcDeleteDead; - ignoreLiveness = false; - maxFreed = ULLONG_MAX; -} - - bool isInStore(const Path & path) { return isInDir(path, settings.nixStore); @@ -25,6 +17,7 @@ bool isInStore(const Path & path) bool isStorePath(const Path & path) { return isInStore(path) + && path.size() >= settings.nixStore.size() + 1 + storePathHashLen && path.find('/', settings.nixStore.size() + 1) == Path::npos; } @@ -71,7 +64,17 @@ Path followLinksToStorePath(const Path & path) string storePathToName(const Path & path) { assertStorePath(path); - return string(path, settings.nixStore.size() + 34); + auto l = settings.nixStore.size() + 1 + storePathHashLen; + assert(path.size() >= l); + return path.size() == l ? "" : string(path, l + 1); +} + + +string storePathToHash(const Path & path) +{ + assertStorePath(path); + assert(path.size() >= settings.nixStore.size() + 1 + storePathHashLen); + return string(path, settings.nixStore.size() + 1, storePathHashLen); } @@ -82,14 +85,14 @@ void checkStoreName(const string & name) reasons (e.g., "." and ".."). */ if (string(name, 0, 1) == ".") throw Error(format("illegal name: ‘%1%’") % name); - foreach (string::const_iterator, i, name) - if (!((*i >= 'A' && *i <= 'Z') || - (*i >= 'a' && *i <= 'z') || - (*i >= '0' && *i <= '9') || - validChars.find(*i) != string::npos)) + for (auto & i : name) + if (!((i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + validChars.find(i) != string::npos)) { throw Error(format("invalid character ‘%1%’ in name ‘%2%’") - % *i % name); + % i % name); } } @@ -101,22 +104,22 @@ void checkStoreName(const string & name) where <store> = the location of the Nix store, usually /nix/store - + <name> = a human readable name for the path, typically obtained from the name attribute of the derivation, or the name of the source file from which the store path is created. For derivation outputs other than the default "out" output, the string "-<id>" is suffixed to <name>. - + <h> = base-32 representation of the first 160 bits of a SHA-256 hash of <s>; the hash part of the store name - + <s> = the string "<type>:sha256:<h2>:<store>:<name>"; note that it includes the location of the store as well as the name to make sure that changes to either of those are reflected in the hash (e.g. you won't get /nix/store/<h>-name1 and /nix/store/<h>-name2 with equal hash parts). - + <type> = one of: "text:<r1>:<r2>:...<rN>" for plain text files written to the store using @@ -138,14 +141,14 @@ void checkStoreName(const string & name) if <type> = "source": the serialisation of the path from which this store path is copied, as returned by hashPath() - if <type> = "output:out": + if <type> = "output:<id>": for non-fixed derivation outputs: the derivation (see hashDerivationModulo() in primops.cc) for paths copied by addToStore() or produced by fixed-output derivations: the string "fixed:out:<rec><algo>:<hash>:", where - <rec> = "r:" for recursive (path) hashes, or "" or flat + <rec> = "r:" for recursive (path) hashes, or "" for flat (file) hashes <algo> = "md5", "sha1" or "sha256" <hash> = base-16 representation of the path or flat hash of @@ -219,45 +222,153 @@ Path computeStorePathForText(const string & name, const string & s, hacky, but we can't put them in `s' since that would be ambiguous. */ string type = "text"; - foreach (PathSet::const_iterator, i, references) { + for (auto & i : references) { type += ":"; - type += *i; + type += i; } return makeStorePath(type, hash, name); } +std::string Store::getUri() +{ + return ""; +} + + +bool Store::isValidPath(const Path & storePath) +{ + auto hashPart = storePathToHash(storePath); + + { + auto state_(state.lock()); + auto res = state_->pathInfoCache.get(hashPart); + if (res) { + stats.narInfoReadAverted++; + return *res != 0; + } + } + + if (diskCache) { + auto res = diskCache->lookupNarInfo(getUri(), hashPart); + if (res.first != NarInfoDiskCache::oUnknown) { + stats.narInfoReadAverted++; + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, + res.first == NarInfoDiskCache::oInvalid ? 0 : res.second); + return res.first == NarInfoDiskCache::oValid; + } + } + + return isValidPathUncached(storePath); + + // FIXME: insert result into NARExistence table of diskCache. +} + + +ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath) +{ + auto hashPart = storePathToHash(storePath); + + { + auto state_(state.lock()); + auto res = state_->pathInfoCache.get(hashPart); + if (res) { + stats.narInfoReadAverted++; + if (!*res) + throw InvalidPath(format("path ‘%s’ is not valid") % storePath); + return ref<ValidPathInfo>(*res); + } + } + + if (diskCache) { + auto res = diskCache->lookupNarInfo(getUri(), hashPart); + if (res.first != NarInfoDiskCache::oUnknown) { + stats.narInfoReadAverted++; + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, + res.first == NarInfoDiskCache::oInvalid ? 0 : res.second); + if (res.first == NarInfoDiskCache::oInvalid || + (res.second->path != storePath && storePathToName(storePath) != "")) + throw InvalidPath(format("path ‘%s’ is not valid") % storePath); + return ref<ValidPathInfo>(res.second); + } + } + + auto info = queryPathInfoUncached(storePath); + + if (diskCache && info) + diskCache->upsertNarInfo(getUri(), hashPart, info); + + { + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, info); + } + + if (!info + || (info->path != storePath && storePathToName(storePath) != "")) + { + stats.narInfoMissing++; + throw InvalidPath(format("path ‘%s’ is not valid") % storePath); + } + + return ref<ValidPathInfo>(info); +} + + /* Return a string accepted by decodeValidPathInfo() that registers the specified paths as valid. Note: it's the responsibility of the caller to provide a closure. */ -string StoreAPI::makeValidityRegistration(const PathSet & paths, +string Store::makeValidityRegistration(const PathSet & paths, bool showDerivers, bool showHash) { string s = ""; - - foreach (PathSet::iterator, i, paths) { - s += *i + "\n"; - ValidPathInfo info = queryPathInfo(*i); + for (auto & i : paths) { + s += i + "\n"; + + auto info = queryPathInfo(i); if (showHash) { - s += printHash(info.hash) + "\n"; - s += (format("%1%\n") % info.narSize).str(); + s += printHash(info->narHash) + "\n"; + s += (format("%1%\n") % info->narSize).str(); } - Path deriver = showDerivers ? info.deriver : ""; + Path deriver = showDerivers ? info->deriver : ""; s += deriver + "\n"; - s += (format("%1%\n") % info.references.size()).str(); + s += (format("%1%\n") % info->references.size()).str(); - foreach (PathSet::iterator, j, info.references) - s += *j + "\n"; + for (auto & j : info->references) + s += j + "\n"; } return s; } +const Store::Stats & Store::getStats() +{ + { + auto state_(state.lock()); + stats.pathInfoCacheSize = state_->pathInfoCache.size(); + } + return stats; +} + + +void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, + const Path & storePath, bool repair) +{ + auto info = srcStore->queryPathInfo(storePath); + + StringSink sink; + srcStore->narFromPath({storePath}, sink); + + dstStore->addToStore(*info, *sink.s, repair); +} + + ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven) { ValidPathInfo info; @@ -266,7 +377,7 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven) if (hashGiven) { string s; getline(str, s); - info.hash = parseHash(htSHA256, s); + info.narHash = parseHash(htSHA256, s); getline(str, s); if (!string2Int(s, info.narSize)) throw Error("number expected"); } @@ -286,22 +397,55 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven) string showPaths(const PathSet & paths) { string s; - foreach (PathSet::const_iterator, i, paths) { + for (auto & i : paths) { if (s.size() != 0) s += ", "; - s += "‘" + *i + "’"; + s += "‘" + i + "’"; } return s; } -void exportPaths(StoreAPI & store, const Paths & paths, - bool sign, Sink & sink) +std::string ValidPathInfo::fingerprint() const { - foreach (Paths::const_iterator, i, paths) { - writeInt(1, sink); - store.exportPath(*i, sign, sink); - } - writeInt(0, sink); + if (narSize == 0 || !narHash) + throw Error(format("cannot calculate fingerprint of path ‘%s’ because its size/hash is not known") + % path); + return + "1;" + path + ";" + + printHashType(narHash.type) + ":" + printHash32(narHash) + ";" + + std::to_string(narSize) + ";" + + concatStringsSep(",", references); +} + + +void ValidPathInfo::sign(const SecretKey & secretKey) +{ + sigs.insert(secretKey.signDetached(fingerprint())); +} + + +unsigned int ValidPathInfo::checkSignatures(const PublicKeys & publicKeys) const +{ + unsigned int good = 0; + for (auto & sig : sigs) + if (checkSignature(publicKeys, sig)) + good++; + return good; +} + + +bool ValidPathInfo::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const +{ + return verifyDetached(fingerprint(), sig, publicKeys); +} + + +Strings ValidPathInfo::shortRefs() const +{ + Strings refs; + for (auto & r : references) + refs.push_back(baseNameOf(r)); + return refs; } @@ -309,22 +453,102 @@ void exportPaths(StoreAPI & store, const Paths & paths, #include "local-store.hh" -#include "serialise.hh" #include "remote-store.hh" namespace nix { -std::shared_ptr<StoreAPI> store; +RegisterStoreImplementation::Implementations * RegisterStoreImplementation::implementations = 0; -std::shared_ptr<StoreAPI> openStore(bool reserveSpace) +ref<Store> openStoreAt(const std::string & uri_) { - if (getEnv("NIX_REMOTE") == "") - return std::shared_ptr<StoreAPI>(new LocalStore(reserveSpace)); - else - return std::shared_ptr<StoreAPI>(new RemoteStore()); + auto uri(uri_); + StoreParams params; + auto q = uri.find('?'); + if (q != std::string::npos) { + for (auto s : tokenizeString<Strings>(uri.substr(q + 1), "&")) { + auto e = s.find('='); + if (e != std::string::npos) + params[s.substr(0, e)] = s.substr(e + 1); + } + uri = uri_.substr(0, q); + } + + for (auto fun : *RegisterStoreImplementation::implementations) { + auto store = fun(uri, params); + if (store) return ref<Store>(store); + } + + throw Error(format("don't know how to open Nix store ‘%s’") % uri); +} + + +ref<Store> openStore() +{ + return openStoreAt(getEnv("NIX_REMOTE")); +} + + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const StoreParams & params) + -> std::shared_ptr<Store> +{ + enum { mDaemon, mLocal, mAuto } mode; + + if (uri == "daemon") mode = mDaemon; + else if (uri == "local") mode = mLocal; + else if (uri == "") mode = mAuto; + else return 0; + + if (mode == mAuto) { + if (LocalStore::haveWriteAccess()) + mode = mLocal; + else if (pathExists(settings.nixDaemonSocketFile)) + mode = mDaemon; + else + mode = mLocal; + } + + return mode == mDaemon + ? std::shared_ptr<Store>(std::make_shared<RemoteStore>()) + : std::shared_ptr<Store>(std::make_shared<LocalStore>()); +}); + + +std::list<ref<Store>> getDefaultSubstituters() +{ + struct State { + bool done = false; + std::list<ref<Store>> stores; + }; + static Sync<State> state_; + + auto state(state_.lock()); + + if (state->done) return state->stores; + + StringSet done; + + auto addStore = [&](const std::string & uri) { + if (done.count(uri)) return; + done.insert(uri); + state->stores.push_back(openStoreAt(uri)); + }; + + for (auto uri : settings.get("substituters", Strings())) + addStore(uri); + + for (auto uri : settings.get("binary-caches", Strings())) + addStore(uri); + + for (auto uri : settings.get("extra-binary-caches", Strings())) + addStore(uri); + + state->done = true; + + return state->stores; } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 3764f3e54242..8c618bf3e771 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -2,15 +2,27 @@ #include "hash.hh" #include "serialise.hh" +#include "crypto.hh" +#include "lru-cache.hh" +#include "sync.hh" -#include <string> +#include <atomic> +#include <limits> #include <map> #include <memory> +#include <string> namespace nix { +/* Size of the hash part of store paths, in base-32 characters. */ +const size_t storePathHashLen = 32; // i.e. 160 bits + +/* Magic header of exportPath() output (obsolete). */ +const uint32_t exportMagic = 0x4558494e; + + typedef std::map<Path, Path> Roots; @@ -36,21 +48,19 @@ struct GCOptions gcDeleteSpecific, } GCAction; - GCAction action; + GCAction action{gcDeleteDead}; /* If `ignoreLiveness' is set, then reachability from the roots is ignored (dangerous!). However, the paths must still be unreferenced *within* the store (i.e., there can be no other store paths that depend on them). */ - bool ignoreLiveness; + bool ignoreLiveness{false}; /* For `gcDeleteSpecific', the paths to delete. */ PathSet pathsToDelete; /* Stop after at least `maxFreed' bytes have been freed. */ - unsigned long long maxFreed; - - GCOptions(); + unsigned long long maxFreed{std::numeric_limits<unsigned long long>::max()}; }; @@ -86,55 +96,138 @@ struct ValidPathInfo { Path path; Path deriver; - Hash hash; + Hash narHash; PathSet references; - time_t registrationTime; - unsigned long long narSize; // 0 = unknown - unsigned long long id; // internal use only - ValidPathInfo() : registrationTime(0), narSize(0) { } + time_t registrationTime = 0; + uint64_t narSize = 0; // 0 = unknown + uint64_t id; // internal use only + + /* Whether the path is ultimately trusted, that is, it was built + locally or is content-addressable (e.g. added via addToStore() + or the result of a fixed-output derivation). */ + bool ultimate = false; + + StringSet sigs; // note: not necessarily verified + + bool operator == (const ValidPathInfo & i) const + { + return + path == i.path + && narHash == i.narHash + && references == i.references; + } + + /* Return a fingerprint of the store path to be used in binary + cache signatures. It contains the store path, the base-32 + SHA-256 hash of the NAR serialisation of the path, the size of + the NAR, and the sorted references. The size field is strictly + speaking superfluous, but might prevent endless/excessive data + attacks. */ + std::string fingerprint() const; + + void sign(const SecretKey & secretKey); + + /* Return the number of signatures on this .narinfo that were + produced by one of the specified keys. */ + unsigned int checkSignatures(const PublicKeys & publicKeys) const; + + /* Verify a single signature. */ + bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; + + Strings shortRefs() const; + + virtual ~ValidPathInfo() { } }; typedef list<ValidPathInfo> ValidPathInfos; -enum BuildMode { bmNormal, bmRepair, bmCheck }; +enum BuildMode { bmNormal, bmRepair, bmCheck, bmHash }; -class StoreAPI +struct BuildResult { + enum Status { + Built = 0, + Substituted, + AlreadyValid, + PermanentFailure, + InputRejected, + OutputRejected, + TransientFailure, // possibly transient + TimedOut, + MiscFailure, + DependencyFailed, + LogLimitExceeded, + NotDeterministic, + } status = MiscFailure; + std::string errorMsg; + //time_t startTime = 0, stopTime = 0; + bool success() { + return status == Built || status == Substituted || status == AlreadyValid; + } +}; + + +struct BasicDerivation; +struct Derivation; +class FSAccessor; +class NarInfoDiskCache; + + +class Store : public std::enable_shared_from_this<Store> +{ +protected: + + struct State + { + LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache{64 * 1024}; + }; + + Sync<State> state; + + std::shared_ptr<NarInfoDiskCache> diskCache; + public: - virtual ~StoreAPI() { } + virtual ~Store() { } + + virtual std::string getUri() = 0; /* Check whether a path is valid. */ - virtual bool isValidPath(const Path & path) = 0; + bool isValidPath(const Path & path); + +protected: + + virtual bool isValidPathUncached(const Path & path) = 0; + +public: /* Query which of the given paths is valid. */ virtual PathSet queryValidPaths(const PathSet & paths) = 0; - /* Query the set of all valid paths. */ + /* Query the set of all valid paths. Note that for some store + backends, the name part of store paths may be omitted + (i.e. you'll get /nix/store/<hash> rather than + /nix/store/<hash>-<name>). Use queryPathInfo() to obtain the + full store path. */ virtual PathSet queryAllValidPaths() = 0; - /* Query information about a valid path. */ - virtual ValidPathInfo queryPathInfo(const Path & path) = 0; + /* Query information about a valid path. It is permitted to omit + the name part of the store path. */ + ref<const ValidPathInfo> queryPathInfo(const Path & path); - /* Query the hash of a valid path. */ - virtual Hash queryPathHash(const Path & path) = 0; +protected: - /* Query the set of outgoing FS references for a store path. The - result is not cleared. */ - virtual void queryReferences(const Path & path, - PathSet & references) = 0; + virtual std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) = 0; + +public: /* Queries the set of incoming FS references for a store path. The result is not cleared. */ virtual void queryReferrers(const Path & path, PathSet & referrers) = 0; - /* Query the deriver of a store path. Return the empty string if - no deriver has been set. */ - virtual Path queryDeriver(const Path & path) = 0; - /* Return all currently valid derivations that have `path' as an output. (Note that the result of `queryDeriver()' is the derivation that was actually used to produce `path', which may @@ -160,6 +253,12 @@ public: virtual void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) = 0; + virtual bool wantMassQuery() { return false; } + + /* Import a path into the store. */ + virtual void addToStore(const ValidPathInfo & info, const std::string & nar, + bool repair = false) = 0; + /* Copy the contents of a path to the store and register the validity the resulting path. The resulting path is returned. The function object `filter' can be used to exclude files (see @@ -173,16 +272,8 @@ public: virtual Path addTextToStore(const string & name, const string & s, const PathSet & references, bool repair = false) = 0; - /* Export a store path, that is, create a NAR dump of the store - path and append its references and its deriver. Optionally, a - cryptographic signature (created by OpenSSL) of the preceding - data is attached. */ - virtual void exportPath(const Path & path, bool sign, - Sink & sink) = 0; - - /* Import a sequence of NAR dumps created by exportPaths() into - the Nix store. */ - virtual Paths importPaths(bool requireSignature, Source & source) = 0; + /* Write a NAR dump of a store path. */ + virtual void narFromPath(const Path & path, Sink & sink) = 0; /* For each path, if it's a derivation, build it. Building a derivation means ensuring that the output paths are valid. If @@ -194,6 +285,12 @@ public: not derivations, substitute them. */ virtual void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) = 0; + /* Build a single non-materialized derivation (i.e. not from an + on-disk .drv file). Note that ‘drvPath’ is only used for + informational purposes. */ + virtual BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, + BuildMode buildMode = bmNormal) = 0; + /* Ensure that a path is valid. If it is not currently valid, it may be made valid by running a substitute (if defined for the path). */ @@ -210,6 +307,10 @@ public: `path' has disappeared. */ virtual void addIndirectRoot(const Path & path) = 0; + /* Register a permanent GC root. */ + Path addPermRoot(const Path & storePath, + const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false); + /* Acquire the global GC lock, then immediately release it. This function must be called after registering a new permanent root, but before exiting. Otherwise, it is possible that a running @@ -238,13 +339,6 @@ public: /* Perform a garbage collection. */ virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0; - /* Return the set of paths that have failed to build.*/ - virtual PathSet queryFailedPaths() = 0; - - /* Clear the "failed" status of the given paths. The special - value `*' causes all failed paths to be cleared. */ - virtual void clearFailedPaths(const PathSet & paths) = 0; - /* Return a string representing information about the path that can be loaded into the database using `nix-store --load-db' or `nix-store --register-validity'. */ @@ -258,6 +352,86 @@ public: /* Check the integrity of the Nix store. Returns true if errors remain. */ virtual bool verifyStore(bool checkContents, bool repair) = 0; + + /* Return an object to access files in the Nix store. */ + virtual ref<FSAccessor> getFSAccessor() = 0; + + /* Add signatures to the specified store path. The signatures are + not verified. */ + virtual void addSignatures(const Path & storePath, const StringSet & sigs) = 0; + + /* Utility functions. */ + + /* Read a derivation, after ensuring its existence through + ensurePath(). */ + Derivation derivationFromPath(const Path & drvPath); + + /* Place in `paths' the set of all store paths in the file system + closure of `storePath'; that is, all paths than can be directly + or indirectly reached from it. `paths' is not cleared. If + `flipDirection' is true, the set of paths that can reach + `storePath' is returned; that is, the closures under the + `referrers' relation instead of the `references' relation is + returned. */ + void computeFSClosure(const Path & path, + PathSet & paths, bool flipDirection = false, + bool includeOutputs = false, bool includeDerivers = false); + + /* Given a set of paths that are to be built, return the set of + derivations that will be built, and the set of output paths + that will be substituted. */ + void queryMissing(const PathSet & targets, + PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown, + unsigned long long & downloadSize, unsigned long long & narSize); + + /* Sort a set of paths topologically under the references + relation. If p refers to q, then p preceeds q in this list. */ + Paths topoSortPaths(const PathSet & paths); + + /* Export multiple paths in the format expected by ‘nix-store + --import’. */ + void exportPaths(const Paths & paths, Sink & sink); + + void exportPath(const Path & path, Sink & sink); + + /* Import a sequence of NAR dumps created by exportPaths() into + the Nix store. Optionally, the contents of the NARs are + preloaded into the specified FS accessor to speed up subsequent + access. */ + Paths importPaths(Source & source, + std::shared_ptr<FSAccessor> accessor); + + struct Stats + { + std::atomic<uint64_t> narInfoRead{0}; + std::atomic<uint64_t> narInfoReadAverted{0}; + std::atomic<uint64_t> narInfoMissing{0}; + std::atomic<uint64_t> narInfoWrite{0}; + std::atomic<uint64_t> pathInfoCacheSize{0}; + std::atomic<uint64_t> narRead{0}; + std::atomic<uint64_t> narReadBytes{0}; + std::atomic<uint64_t> narReadCompressedBytes{0}; + std::atomic<uint64_t> narWrite{0}; + std::atomic<uint64_t> narWriteAverted{0}; + std::atomic<uint64_t> narWriteBytes{0}; + std::atomic<uint64_t> narWriteCompressedBytes{0}; + std::atomic<uint64_t> narWriteCompressionTimeMs{0}; + }; + + const Stats & getStats(); + +protected: + + Stats stats; + +}; + + +class LocalFSStore : public Store +{ +public: + void narFromPath(const Path & path, Sink & sink) override; + ref<FSAccessor> getFSAccessor() override; }; @@ -272,6 +446,9 @@ bool isStorePath(const Path & path); /* Extract the name part of the given store path. */ string storePathToName(const Path & path); +/* Extract the hash part of the given store path. */ +string storePathToHash(const Path & path); + void checkStoreName(const string & name); @@ -326,30 +503,62 @@ Path computeStorePathForText(const string & name, const string & s, const PathSet & references); +/* Copy a path from one store to another. */ +void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, + const Path & storePath, bool repair = false); + + /* Remove the temporary roots file for this process. Any temporary root becomes garbage after this point unless it has been registered as a (permanent) root. */ void removeTempRoots(); -/* Register a permanent GC root. */ -Path addPermRoot(StoreAPI & store, const Path & storePath, - const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false); +/* Return a Store object to access the Nix store denoted by + ‘uri’ (slight misnomer...). Supported values are: + + * ‘direct’: The Nix store in /nix/store and database in + /nix/var/nix/db, accessed directly. + + * ‘daemon’: The Nix store accessed via a Unix domain socket + connection to nix-daemon. + + * ‘file://<path>’: A binary cache stored in <path>. + + If ‘uri’ is empty, it defaults to ‘direct’ or ‘daemon’ depending on + whether the user has write access to the local Nix store/database. + set to true *unless* you're going to collect garbage. */ +ref<Store> openStoreAt(const std::string & uri); -/* Sort a set of paths topologically under the references relation. - If p refers to q, then p preceeds q in this list. */ -Paths topoSortPaths(StoreAPI & store, const PathSet & paths); +/* Open the store indicated by the ‘NIX_REMOTE’ environment variable. */ +ref<Store> openStore(); -/* For now, there is a single global store API object, but we'll - purify that in the future. */ -extern std::shared_ptr<StoreAPI> store; +/* Return the default substituter stores, defined by the + ‘substituters’ option and various legacy options like + ‘binary-caches’. */ +std::list<ref<Store>> getDefaultSubstituters(); -/* Factory method: open the Nix database, either through the local or - remote implementation. */ -std::shared_ptr<StoreAPI> openStore(bool reserveSpace = true); +/* Store implementation registration. */ +typedef std::map<std::string, std::string> StoreParams; + +typedef std::function<std::shared_ptr<Store>( + const std::string & uri, const StoreParams & params)> OpenStore; + +struct RegisterStoreImplementation +{ + typedef std::vector<OpenStore> Implementations; + static Implementations * implementations; + + RegisterStoreImplementation(OpenStore fun) + { + if (!implementations) implementations = new Implementations; + implementations->push_back(fun); + } +}; + /* Display a set of paths in human-readable form (i.e., between quotes @@ -361,14 +570,9 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven = false); -/* Export multiple paths in the format expected by ‘nix-store - --import’. */ -void exportPaths(StoreAPI & store, const Paths & paths, - bool sign, Sink & sink); - - MakeError(SubstError, Error) MakeError(BuildError, Error) /* denotes a permanent build failure */ +MakeError(InvalidPath, Error) } diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index d037d7402ede..7ff0553a016c 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -6,7 +6,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x10e +#define PROTOCOL_VERSION 0x111 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) @@ -14,8 +14,8 @@ namespace nix { typedef enum { wopIsValidPath = 1, wopHasSubstitutes = 3, - wopQueryPathHash = 4, - wopQueryReferences = 5, + wopQueryPathHash = 4, // obsolete + wopQueryReferences = 5, // obsolete wopQueryReferrers = 6, wopAddToStore = 7, wopAddTextToStore = 8, @@ -25,8 +25,8 @@ typedef enum { wopAddIndirectRoot = 12, wopSyncWithGC = 13, wopFindRoots = 14, - wopExportPath = 16, - wopQueryDeriver = 18, + wopExportPath = 16, // obsolete + wopQueryDeriver = 18, // obsolete wopSetOptions = 19, wopCollectGarbage = 20, wopQuerySubstitutablePathInfo = 21, @@ -35,7 +35,7 @@ typedef enum { wopQueryFailedPaths = 24, wopClearFailedPaths = 25, wopQueryPathInfo = 26, - wopImportPaths = 27, + wopImportPaths = 27, // obsolete wopQueryDerivationOutputNames = 28, wopQueryPathFromHashPart = 29, wopQuerySubstitutablePathInfos = 30, @@ -43,7 +43,9 @@ typedef enum { wopQuerySubstitutablePaths = 32, wopQueryValidDerivers = 33, wopOptimiseStore = 34, - wopVerifyStore = 35 + wopVerifyStore = 35, + wopBuildDerivation = 36, + wopAddSignatures = 37, } WorkerOp; |