diff options
Diffstat (limited to 'src/libstore')
34 files changed, 942 insertions, 564 deletions
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index d1b278b8efbe..11fa3cae27a5 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -54,17 +54,38 @@ void BinaryCacheStore::init() } } -std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path) +void BinaryCacheStore::getFile(const std::string & path, + Callback<std::shared_ptr<std::string>> callback) +{ + try { + callback(getFile(path)); + } catch (...) { callback.rethrow(); } +} + +void BinaryCacheStore::getFile(const std::string & path, Sink & sink) { std::promise<std::shared_ptr<std::string>> promise; getFile(path, - [&](std::shared_ptr<std::string> result) { - promise.set_value(result); - }, - [&](std::exception_ptr exc) { - promise.set_exception(exc); - }); - return promise.get_future().get(); + {[&](std::future<std::shared_ptr<std::string>> result) { + try { + promise.set_value(result.get()); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + auto data = promise.get_future().get(); + sink((unsigned char *) data->data(), data->size()); +} + +std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path) +{ + StringSink sink; + try { + getFile(path, sink); + } catch (NoSuchBinaryCacheFile &) { + return nullptr; + } + return sink.s; } Path BinaryCacheStore::narInfoFileFor(const Path & storePath) @@ -196,34 +217,27 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink) { auto info = queryPathInfo(storePath).cast<const NarInfo>(); - auto nar = getFile(info->url); - - if (!nar) throw Error(format("file '%s' missing from binary cache") % info->url); + auto source = sinkToSource([this, url{info->url}](Sink & sink) { + getFile(url, sink); + }); stats.narRead++; - stats.narReadCompressedBytes += nar->size(); + //stats.narReadCompressedBytes += nar->size(); // FIXME - /* Decompress the NAR. FIXME: would be nice to have the remote - side do this. */ - try { - nar = decompress(info->compression, *nar); - } catch (UnknownCompressionMethod &) { - throw Error(format("binary cache path '%s' uses unknown compression method '%s'") - % storePath % info->compression); - } - - stats.narReadBytes += nar->size(); + uint64_t narSize = 0; - printMsg(lvlTalkative, format("exporting path '%1%' (%2% bytes)") % storePath % nar->size()); + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + narSize += len; + }); - assert(nar->size() % 8 == 0); + decompress(info->compression, *source, wrapperSink); - sink((unsigned char *) nar->c_str(), nar->size()); + stats.narReadBytes += narSize; } void BinaryCacheStore::queryPathInfoUncached(const Path & storePath, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) + Callback<std::shared_ptr<ValidPathInfo>> callback) { auto uri = getUri(); auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo, @@ -233,17 +247,22 @@ void BinaryCacheStore::queryPathInfoUncached(const Path & storePath, auto narInfoFile = narInfoFileFor(storePath); getFile(narInfoFile, - [=](std::shared_ptr<std::string> data) { - if (!data) return success(0); + {[=](std::future<std::shared_ptr<std::string>> fut) { + try { + auto data = fut.get(); - stats.narInfoRead++; + if (!data) return callback(nullptr); - callSuccess(success, failure, (std::shared_ptr<ValidPathInfo>) - std::make_shared<NarInfo>(*this, *data, narInfoFile)); + stats.narInfoRead++; - (void) act; // force Activity into this lambda to ensure it stays alive - }, - failure); + callback((std::shared_ptr<ValidPathInfo>) + std::make_shared<NarInfo>(*this, *data, narInfoFile)); + + (void) act; // force Activity into this lambda to ensure it stays alive + } catch (...) { + callback.rethrow(); + } + }}); } Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath, diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index e20b968442b7..6bc83fc50ca1 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -38,11 +38,16 @@ public: const std::string & data, const std::string & mimeType) = 0; - /* Return the contents of the specified file, or null if it - doesn't exist. */ + /* Note: subclasses must implement at least one of the two + following getFile() methods. */ + + /* Dump the contents of the specified file to a sink. */ + virtual void getFile(const std::string & path, Sink & sink); + + /* Fetch the specified file and call the specified callback with + the result. A subclass may implement this asynchronously. */ virtual void getFile(const std::string & path, - std::function<void(std::shared_ptr<std::string>)> success, - std::function<void(std::exception_ptr exc)> failure) = 0; + Callback<std::shared_ptr<std::string>> callback); std::shared_ptr<std::string> getFile(const std::string & path); @@ -71,8 +76,7 @@ public: { unsupported(); } void queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) override; + Callback<std::shared_ptr<ValidPathInfo>> callback) override; void queryReferrers(const Path & path, PathSet & referrers) override @@ -131,4 +135,6 @@ public: }; +MakeError(NoSuchBinaryCacheFile, Error); + } diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 1d611ffbaba5..07b533783931 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -156,7 +156,7 @@ public: abort(); } - void trace(const format & f); + void trace(const FormatOrString & fs); string getName() { @@ -417,9 +417,9 @@ void Goal::amDone(ExitCode result) } -void Goal::trace(const format & f) +void Goal::trace(const FormatOrString & fs) { - debug(format("%1%: %2%") % name % f); + debug("%1%: %2%", name, fs.s); } @@ -652,6 +652,11 @@ HookInstance::HookInstance() if (dup2(builderOut.writeSide.get(), 4) == -1) throw SysError("dupping builder's stdout/stderr"); + /* Hack: pass the read side of that fd to allow build-remote + to read SSH error messages. */ + if (dup2(builderOut.readSide.get(), 5) == -1) + throw SysError("dupping builder's stdout/stderr"); + Strings args = { baseNameOf(settings.buildHook), std::to_string(verbosity), @@ -667,8 +672,10 @@ HookInstance::HookInstance() toHook.readSide = -1; sink = FdSink(toHook.writeSide.get()); - for (auto & setting : settings.getSettings()) - sink << 1 << setting.first << setting.second; + std::map<std::string, Config::SettingInfo> settings; + globalConfig.getSettings(settings); + for (auto & setting : settings) + sink << 1 << setting.first << setting.second.value; sink << 0; } @@ -837,9 +844,9 @@ private: BuildResult result; /* The current round, if we're building multiple times. */ - unsigned int curRound = 1; + size_t curRound = 1; - unsigned int nrRounds; + size_t nrRounds; /* Path registration info from the previous round, if we're building multiple times. Since this contains the hash, it @@ -959,6 +966,8 @@ private: } void done(BuildResult::Status status, const string & msg = ""); + + PathSet exportReferences(PathSet storePaths); }; @@ -1162,7 +1171,7 @@ void DerivationGoal::outputsSubstituted() return; } - unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); + auto nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); if (buildMode == bmNormal && nrInvalid == 0) { done(BuildResult::Substituted); return; @@ -1189,7 +1198,7 @@ void DerivationGoal::outputsSubstituted() for (auto & i : drv->inputSrcs) { if (worker.store.isValidPath(i)) continue; if (!settings.useSubstitutes) - throw Error(format("dependency of '%1%' of '%2%' does not exist, and substitution is disabled") + throw Error(format("dependency '%1%' of '%2%' does not exist, and substitution is disabled") % i % drvPath); addWaitee(worker.makeSubstitutionGoal(i)); } @@ -1458,7 +1467,7 @@ void replaceValidPath(const Path & storePath, const Path tmpPath) tmpPath (the replacement), so we have to move it out of the way first. We'd better not be interrupted here, because if we're repairing (say) Glibc, we end up with a broken system. */ - Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % rand()).str(); + Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str(); if (pathExists(storePath)) rename(storePath.c_str(), oldPath.c_str()); if (rename(tmpPath.c_str(), storePath.c_str()) == -1) @@ -1725,22 +1734,23 @@ int childEntry(void * arg) } -PathSet exportReferences(Store & store, PathSet storePaths) +PathSet DerivationGoal::exportReferences(PathSet storePaths) { PathSet paths; for (auto storePath : storePaths) { /* Check that the store path is valid. */ - if (!store.isInStore(storePath)) + if (!worker.store.isInStore(storePath)) throw BuildError(format("'exportReferencesGraph' contains a non-store path '%1%'") % storePath); - storePath = store.toStorePath(storePath); - if (!store.isValidPath(storePath)) - throw BuildError(format("'exportReferencesGraph' contains an invalid path '%1%'") - % storePath); - store.computeFSClosure(storePath, paths); + storePath = worker.store.toStorePath(storePath); + + if (!inputPaths.count(storePath)) + throw BuildError("cannot export references of path '%s' because it is not in the input closure of the derivation", storePath); + + worker.store.computeFSClosure(storePath, paths); } /* If there are derivations in the graph, then include their @@ -1751,9 +1761,9 @@ PathSet exportReferences(Store & store, PathSet storePaths) for (auto & j : paths2) { if (isDerivation(j)) { - Derivation drv = store.derivationFromPath(j); + Derivation drv = worker.store.derivationFromPath(j); for (auto & k : drv.outputs) - store.computeFSClosure(k.second.path, paths); + worker.store.computeFSClosure(k.second.path, paths); } } @@ -1877,7 +1887,7 @@ void DerivationGoal::startBuilder() /* Write closure info to <fileName>. */ writeFile(tmpDir + "/" + fileName, worker.store.makeValidityRegistration( - exportReferences(worker.store, {storePath}), false, false)); + exportReferences({storePath}), false, false)); } } @@ -2379,7 +2389,7 @@ void DerivationGoal::writeStructuredAttrs() for (auto & p : *i) storePaths.insert(p.get<std::string>()); worker.store.pathInfoToJSON(jsonRoot, - exportReferences(worker.store, storePaths), false, true); + exportReferences(storePaths), false, true); } json[i.key()] = nlohmann::json::parse(str.str()); // urgh } @@ -2491,6 +2501,10 @@ void setupSeccomp() seccomp_arch_add(ctx, SCMP_ARCH_X32) != 0) throw SysError("unable to add X32 seccomp architecture"); + if (settings.thisSystem == "aarch64-linux" && + seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0) + printError("unsable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes."); + /* Prevent builders from creating setuid/setgid binaries. */ for (int perm : { S_ISUID, S_ISGID }) { if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1, @@ -2691,8 +2705,8 @@ void DerivationGoal::runChild() } else { if (errno != EINVAL) throw SysError("mounting /dev/pts"); - doBind("/dev/pts", "/dev/pts"); - doBind("/dev/ptmx", "/dev/ptmx"); + doBind("/dev/pts", chrootRootDir + "/dev/pts"); + doBind("/dev/ptmx", chrootRootDir + "/dev/ptmx"); } } @@ -2944,6 +2958,8 @@ void DerivationGoal::runChild() if (drv->builder == "builtin:fetchurl") builtinFetchurl(drv2, netrcData); + else if (drv->builder == "builtin:buildenv") + builtinBuildenv(drv2); else throw Error(format("unsupported builtin function '%1%'") % string(drv->builder, 8)); _exit(0); @@ -3235,6 +3251,8 @@ void DerivationGoal::registerOutputs() info.ultimate = true; worker.store.signPathInfo(info); + if (!info.references.empty()) info.ca.clear(); + infos.push_back(info); } @@ -3680,7 +3698,7 @@ void SubstitutionGoal::tryNext() only after we've downloaded the path. */ if (worker.store.requireSigs && !sub->isTrusted - && !info->checkSignatures(worker.store, worker.store.publicKeys)) + && !info->checkSignatures(worker.store, worker.store.getPublicKeys())) { printError("warning: substituter '%s' does not have a valid signature for path '%s'", sub->getUri(), storePath); @@ -4154,10 +4172,10 @@ void Worker::waitForInput() assert(goal); set<int> fds2(j->fds); + std::vector<unsigned char> buffer(4096); for (auto & k : fds2) { if (FD_ISSET(k, &fds)) { - unsigned char buffer[4096]; - ssize_t rd = read(k, buffer, sizeof(buffer)); + ssize_t rd = read(k, buffer.data(), buffer.size()); if (rd == -1) { if (errno != EINTR) throw SysError(format("reading from %1%") @@ -4169,7 +4187,7 @@ void Worker::waitForInput() } else { printMsg(lvlVomit, format("%1%: read %2% bytes") % goal->getName() % rd); - string data((char *) buffer, rd); + string data((char *) buffer.data(), rd); j->lastOutput = after; goal->handleChildOutput(k, data); } diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc deleted file mode 100644 index 4ca4a838e3c4..000000000000 --- a/src/libstore/builtins.cc +++ /dev/null @@ -1,73 +0,0 @@ -#include "builtins.hh" -#include "download.hh" -#include "store-api.hh" -#include "archive.hh" -#include "compression.hh" - -namespace nix { - -void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData) -{ - /* Make the host's netrc data available. Too bad curl requires - this to be stored in a file. It would be nice if we could just - pass a pointer to the data. */ - if (netrcData != "") { - settings.netrcFile = "netrc"; - writeFile(settings.netrcFile, netrcData, 0600); - } - - auto getAttr = [&](const string & name) { - auto i = drv.env.find(name); - if (i == drv.env.end()) throw Error(format("attribute '%s' missing") % name); - return i->second; - }; - - auto fetch = [&](const string & url) { - /* No need to do TLS verification, because we check the hash of - the result anyway. */ - DownloadRequest request(url); - request.verifyTLS = false; - request.decompress = false; - - /* Note: have to use a fresh downloader here because we're in - a forked process. */ - auto data = makeDownloader()->download(request); - assert(data.data); - - return data.data; - }; - - std::shared_ptr<std::string> data; - - if (getAttr("outputHashMode") == "flat") - for (auto hashedMirror : settings.hashedMirrors.get()) - try { - if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/'; - auto ht = parseHashType(getAttr("outputHashAlgo")); - data = fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false)); - break; - } catch (Error & e) { - debug(e.what()); - } - - if (!data) data = fetch(getAttr("url")); - - Path storePath = getAttr("out"); - - auto unpack = drv.env.find("unpack"); - if (unpack != drv.env.end() && unpack->second == "1") { - if (string(*data, 0, 6) == string("\xfd" "7zXZ\0", 6)) - data = decompress("xz", *data); - StringSource source(*data); - restorePath(storePath, source); - } else - writeFile(storePath, *data); - - auto executable = drv.env.find("executable"); - if (executable != drv.env.end() && executable->second == "1") { - if (chmod(storePath.c_str(), 0755) == -1) - throw SysError(format("making '%1%' executable") % storePath); - } -} - -} diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh index 0cc6ba31f658..0d2da873ece4 100644 --- a/src/libstore/builtins.hh +++ b/src/libstore/builtins.hh @@ -4,6 +4,8 @@ namespace nix { +// TODO: make pluggable. void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData); +void builtinBuildenv(const BasicDerivation & drv); } diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc new file mode 100644 index 000000000000..74e706664694 --- /dev/null +++ b/src/libstore/builtins/buildenv.cc @@ -0,0 +1,204 @@ +#include "builtins.hh" + +#include <sys/stat.h> +#include <sys/types.h> +#include <fcntl.h> +#include <algorithm> + +namespace nix { + +typedef std::map<Path,int> Priorities; + +// FIXME: change into local variables. + +static Priorities priorities; + +static unsigned long symlinks; + +/* For each activated package, create symlinks */ +static void createLinks(const Path & srcDir, const Path & dstDir, int priority) +{ + DirEntries srcFiles; + + try { + srcFiles = readDirectory(srcDir); + } catch (SysError & e) { + if (e.errNo == ENOTDIR) { + printError("warning: not including '%s' in the user environment because it's not a directory", srcDir); + return; + } + throw; + } + + for (const auto & ent : srcFiles) { + if (ent.name[0] == '.') + /* not matched by glob */ + continue; + auto srcFile = srcDir + "/" + ent.name; + auto dstFile = dstDir + "/" + ent.name; + + struct stat srcSt; + try { + if (stat(srcFile.c_str(), &srcSt) == -1) + throw SysError("getting status of '%1%'", srcFile); + } catch (SysError & e) { + if (e.errNo == ENOENT || e.errNo == ENOTDIR) { + printError("warning: skipping dangling symlink '%s'", dstFile); + continue; + } + throw; + } + + /* The files below are special-cased to that they don't show up + * in user profiles, either because they are useless, or + * because they would cauase pointless collisions (e.g., each + * Python package brings its own + * `$out/lib/pythonX.Y/site-packages/easy-install.pth'.) + */ + if (hasSuffix(srcFile, "/propagated-build-inputs") || + hasSuffix(srcFile, "/nix-support") || + hasSuffix(srcFile, "/perllocal.pod") || + hasSuffix(srcFile, "/info/dir") || + hasSuffix(srcFile, "/log")) + continue; + + else if (S_ISDIR(srcSt.st_mode)) { + struct stat dstSt; + auto res = lstat(dstFile.c_str(), &dstSt); + if (res == 0) { + if (S_ISDIR(dstSt.st_mode)) { + createLinks(srcFile, dstFile, priority); + continue; + } else if (S_ISLNK(dstSt.st_mode)) { + auto target = canonPath(dstFile, true); + if (!S_ISDIR(lstat(target).st_mode)) + throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target); + if (unlink(dstFile.c_str()) == -1) + throw SysError(format("unlinking '%1%'") % dstFile); + if (mkdir(dstFile.c_str(), 0755) == -1) + throw SysError(format("creating directory '%1%'")); + createLinks(target, dstFile, priorities[dstFile]); + createLinks(srcFile, dstFile, priority); + continue; + } + } else if (errno != ENOENT) + throw SysError(format("getting status of '%1%'") % dstFile); + } + + else { + struct stat dstSt; + auto res = lstat(dstFile.c_str(), &dstSt); + if (res == 0) { + if (S_ISLNK(dstSt.st_mode)) { + auto prevPriority = priorities[dstFile]; + if (prevPriority == priority) + throw Error( + "packages '%1%' and '%2%' have the same priority %3%; " + "use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' " + "to change the priority of one of the conflicting packages" + " (0 being the highest priority)", + srcFile, readLink(dstFile), priority); + if (prevPriority < priority) + continue; + if (unlink(dstFile.c_str()) == -1) + throw SysError(format("unlinking '%1%'") % dstFile); + } else if (S_ISDIR(dstSt.st_mode)) + throw Error("collision between non-directory '%1%' and directory '%2%'", srcFile, dstFile); + } else if (errno != ENOENT) + throw SysError(format("getting status of '%1%'") % dstFile); + } + + createSymlink(srcFile, dstFile); + priorities[dstFile] = priority; + symlinks++; + } +} + +typedef std::set<Path> FileProp; + +static FileProp done; +static FileProp postponed = FileProp{}; + +static Path out; + +static void addPkg(const Path & pkgDir, int priority) +{ + if (done.count(pkgDir)) return; + done.insert(pkgDir); + createLinks(pkgDir, out, priority); + + try { + for (const auto & p : tokenizeString<std::vector<string>>( + readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n")) + if (!done.count(p)) + postponed.insert(p); + } catch (SysError & e) { + if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw; + } +} + +struct Package { + Path path; + bool active; + int priority; + Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {} +}; + +typedef std::vector<Package> Packages; + +void builtinBuildenv(const BasicDerivation & drv) +{ + auto getAttr = [&](const string & name) { + auto i = drv.env.find(name); + if (i == drv.env.end()) throw Error("attribute '%s' missing", name); + return i->second; + }; + + out = getAttr("out"); + createDirs(out); + + /* Convert the stuff we get from the environment back into a + * coherent data type. */ + Packages pkgs; + auto derivations = tokenizeString<Strings>(getAttr("derivations")); + while (!derivations.empty()) { + /* !!! We're trusting the caller to structure derivations env var correctly */ + auto active = derivations.front(); derivations.pop_front(); + auto priority = stoi(derivations.front()); derivations.pop_front(); + auto outputs = stoi(derivations.front()); derivations.pop_front(); + for (auto n = 0; n < outputs; n++) { + auto path = derivations.front(); derivations.pop_front(); + pkgs.emplace_back(path, active != "false", priority); + } + } + + /* Symlink to the packages that have been installed explicitly by the + * user. Process in priority order to reduce unnecessary + * symlink/unlink steps. + */ + std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) { + return a.priority < b.priority || (a.priority == b.priority && a.path < b.path); + }); + for (const auto & pkg : pkgs) + if (pkg.active) + addPkg(pkg.path, pkg.priority); + + /* Symlink to the packages that have been "propagated" by packages + * installed by the user (i.e., package X declares that it wants Y + * installed as well). We do these later because they have a lower + * priority in case of collisions. + */ + auto priorityCounter = 1000; + while (!postponed.empty()) { + auto pkgDirs = postponed; + postponed = FileProp{}; + for (const auto & pkgDir : pkgDirs) + addPkg(pkgDir, priorityCounter++); + } + + printError("created %d symlinks in user environment", symlinks); + + createSymlink(getAttr("manifest"), out + "/manifest.nix"); +} + +} diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc new file mode 100644 index 000000000000..1f4abd374f54 --- /dev/null +++ b/src/libstore/builtins/fetchurl.cc @@ -0,0 +1,81 @@ +#include "builtins.hh" +#include "download.hh" +#include "store-api.hh" +#include "archive.hh" +#include "compression.hh" + +namespace nix { + +void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData) +{ + /* Make the host's netrc data available. Too bad curl requires + this to be stored in a file. It would be nice if we could just + pass a pointer to the data. */ + if (netrcData != "") { + settings.netrcFile = "netrc"; + writeFile(settings.netrcFile, netrcData, 0600); + } + + auto getAttr = [&](const string & name) { + auto i = drv.env.find(name); + if (i == drv.env.end()) throw Error(format("attribute '%s' missing") % name); + return i->second; + }; + + Path storePath = getAttr("out"); + auto mainUrl = getAttr("url"); + + /* Note: have to use a fresh downloader here because we're in + a forked process. */ + auto downloader = makeDownloader(); + + auto fetch = [&](const std::string & url) { + + auto source = sinkToSource([&](Sink & sink) { + + /* No need to do TLS verification, because we check the hash of + the result anyway. */ + DownloadRequest request(url); + request.verifyTLS = false; + request.decompress = false; + + downloader->download(std::move(request), sink); + }); + + if (get(drv.env, "unpack", "") == "1") { + + if (hasSuffix(mainUrl, ".xz")) { + auto source2 = sinkToSource([&](Sink & sink) { + decompress("xz", *source, sink); + }); + restorePath(storePath, *source2); + } else + restorePath(storePath, *source); + + } else + writeFile(storePath, *source); + + auto executable = drv.env.find("executable"); + if (executable != drv.env.end() && executable->second == "1") { + if (chmod(storePath.c_str(), 0755) == -1) + throw SysError(format("making '%1%' executable") % storePath); + } + }; + + /* Try the hashed mirrors first. */ + if (getAttr("outputHashMode") == "flat") + for (auto hashedMirror : settings.hashedMirrors.get()) + try { + if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/'; + auto ht = parseHashType(getAttr("outputHashAlgo")); + fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false)); + return; + } catch (Error & e) { + debug(e.what()); + } + + /* Otherwise try the specified URL. */ + fetch(mainUrl); +} + +} diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index a0a0d78b7d30..74b861281ee0 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -57,16 +57,8 @@ bool BasicDerivation::isBuiltin() const bool BasicDerivation::canBuildLocally() const { return platform == settings.thisSystem - || isBuiltin() -#if __linux__ - || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux") - || (platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux") - || (platform == "armv5tel-linux" && (settings.thisSystem == "armv7l-linux" || settings.thisSystem == "armv6l-linux")) -#elif __FreeBSD__ - || (platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd") - || (platform == "i686-linux" && settings.thisSystem == "i686-freebsd") -#endif - ; + || settings.extraPlatforms.get().count(platform) > 0 + || isBuiltin(); } diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 5ab625f42288..72a08ef0089c 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -7,6 +7,7 @@ #include "s3.hh" #include "compression.hh" #include "pathlocks.hh" +#include "finally.hh" #ifdef ENABLE_S3 #include <aws/core/client/ClientConfiguration.h> @@ -29,12 +30,25 @@ using namespace std::string_literals; namespace nix { -double getTime() +struct DownloadSettings : Config { - struct timeval tv; - gettimeofday(&tv, 0); - return tv.tv_sec + (tv.tv_usec / 1000000.0); -} + Setting<bool> enableHttp2{this, true, "http2", + "Whether to enable HTTP/2 support."}; + + Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix", + "String appended to the user agent in HTTP requests."}; + + Setting<size_t> httpConnections{this, 25, "http-connections", + "Number of parallel HTTP connections.", + {"binary-caches-parallel-connections"}}; + + Setting<unsigned long> connectTimeout{this, 0, "connect-timeout", + "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."}; +}; + +static DownloadSettings downloadSettings; + +static GlobalConfig::Register r1(&downloadSettings); std::string resolveUri(const std::string & uri) { @@ -61,8 +75,6 @@ struct CurlDownloader : public Downloader std::random_device rd; std::mt19937 mt19937; - bool enableHttp2; - struct DownloadItem : public std::enable_shared_from_this<DownloadItem> { CurlDownloader & downloader; @@ -70,8 +82,7 @@ struct CurlDownloader : public Downloader DownloadResult result; Activity act; bool done = false; // whether either the success or failure function has been called - std::function<void(const DownloadResult &)> success; - std::function<void(std::exception_ptr exc)> failure; + Callback<DownloadResult> callback; CURL * req = 0; bool active = false; // whether the handle has been added to the multi object std::string status; @@ -86,10 +97,13 @@ struct CurlDownloader : public Downloader std::string encoding; - DownloadItem(CurlDownloader & downloader, const DownloadRequest & request) + DownloadItem(CurlDownloader & downloader, + const DownloadRequest & request, + Callback<DownloadResult> callback) : downloader(downloader) , request(request) , act(*logger, lvlTalkative, actDownload, fmt("downloading '%s'", request.uri), {request.uri}, request.parentAct) + , callback(callback) { if (!request.expectedETag.empty()) requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); @@ -118,13 +132,16 @@ struct CurlDownloader : public Downloader { assert(!done); done = true; - callFailure(failure, std::make_exception_ptr(e)); + callback.rethrow(std::make_exception_ptr(e)); } size_t writeCallback(void * contents, size_t size, size_t nmemb) { size_t realSize = size * nmemb; - result.data->append((char *) contents, realSize); + if (request.dataCallback) + request.dataCallback((char *) contents, realSize); + else + result.data->append((char *) contents, realSize); return realSize; } @@ -173,7 +190,11 @@ struct CurlDownloader : public Downloader int progressCallback(double dltotal, double dlnow) { - act.progress(dlnow, dltotal); + try { + act.progress(dlnow, dltotal); + } catch (nix::Interrupted &) { + assert(_isInterrupted); + } return _isInterrupted; } @@ -195,6 +216,7 @@ struct CurlDownloader : public Downloader if (readOffset == request.data->length()) return 0; auto count = std::min(size * nitems, request.data->length() - readOffset); + assert(count); memcpy(buffer, request.data->data() + readOffset, count); readOffset += count; return count; @@ -223,12 +245,12 @@ struct CurlDownloader : public Downloader curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(req, CURLOPT_USERAGENT, ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + - (settings.userAgentSuffix != "" ? " " + settings.userAgentSuffix.get() : "")).c_str()); + (downloadSettings.userAgentSuffix != "" ? " " + downloadSettings.userAgentSuffix.get() : "")).c_str()); #if LIBCURL_VERSION_NUM >= 0x072b00 curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1); #endif #if LIBCURL_VERSION_NUM >= 0x072f00 - if (downloader.enableHttp2) + if (downloadSettings.enableHttp2) curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS); #endif curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper); @@ -260,7 +282,7 @@ struct CurlDownloader : public Downloader curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0); } - curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, settings.connectTimeout.get()); + curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get()); curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L); curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, lowSpeedTimeout); @@ -300,11 +322,11 @@ struct CurlDownloader : public Downloader try { if (request.decompress) result.data = decodeContent(encoding, ref<std::string>(result.data)); - callSuccess(success, failure, const_cast<const DownloadResult &>(result)); act.progress(result.data->size(), result.data->size()); + callback(std::move(result)); } catch (...) { done = true; - callFailure(failure, std::current_exception()); + callback.rethrow(); } } else { // We treat most errors as transient, but won't retry when hopeless @@ -339,6 +361,7 @@ struct CurlDownloader : public Downloader case CURLE_BAD_FUNCTION_ARGUMENT: case CURLE_INTERFACE_FAILED: case CURLE_UNKNOWN_OPTION: + case CURLE_SSL_CACERT_BADFILE: err = Misc; break; default: // Shut up warnings @@ -402,11 +425,9 @@ struct CurlDownloader : public Downloader #endif #if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0 curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS, - settings.binaryCachesParallelConnections.get()); + downloadSettings.httpConnections.get()); #endif - enableHttp2 = settings.enableHttp2; - wakeupPipe.create(); fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK); @@ -555,13 +576,12 @@ struct CurlDownloader : public Downloader } void enqueueDownload(const DownloadRequest & request, - std::function<void(const DownloadResult &)> success, - std::function<void(std::exception_ptr exc)> failure) override + Callback<DownloadResult> callback) override { /* Ugly hack to support s3:// URIs. */ if (hasPrefix(request.uri, "s3://")) { // FIXME: do this on a worker thread - sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult { + try { #ifdef ENABLE_S3 S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable auto slash = request.uri.find('/', 5); @@ -575,27 +595,22 @@ struct CurlDownloader : public Downloader if (!s3Res.data) throw DownloadError(NotFound, fmt("S3 object '%s' does not exist", request.uri)); res.data = s3Res.data; - return res; + callback(std::move(res)); #else throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri); #endif - }); + } catch (...) { callback.rethrow(); } return; } - auto item = std::make_shared<DownloadItem>(*this, request); - item->success = success; - item->failure = failure; - enqueueItem(item); + enqueueItem(std::make_shared<DownloadItem>(*this, request, callback)); } }; ref<Downloader> getDownloader() { - static std::shared_ptr<Downloader> downloader; - static std::once_flag downloaderCreated; - std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); }); - return ref<Downloader>(downloader); + static ref<Downloader> downloader = makeDownloader(); + return downloader; } ref<Downloader> makeDownloader() @@ -607,8 +622,13 @@ std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest & { auto promise = std::make_shared<std::promise<DownloadResult>>(); enqueueDownload(request, - [promise](const DownloadResult & result) { promise->set_value(result); }, - [promise](std::exception_ptr exc) { promise->set_exception(exc); }); + {[promise](std::future<DownloadResult> fut) { + try { + promise->set_value(fut.get()); + } catch (...) { + promise->set_exception(std::current_exception()); + } + }}); return promise->get_future(); } @@ -617,7 +637,93 @@ DownloadResult Downloader::download(const DownloadRequest & request) return enqueueDownload(request).get(); } -Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl) +void Downloader::download(DownloadRequest && request, Sink & sink) +{ + /* Note: we can't call 'sink' via request.dataCallback, because + that would cause the sink to execute on the downloader + thread. If 'sink' is a coroutine, this will fail. Also, if the + sink is expensive (e.g. one that does decompression and writing + to the Nix store), it would stall the download thread too much. + Therefore we use a buffer to communicate data between the + download thread and the calling thread. */ + + struct State { + bool quit = false; + std::exception_ptr exc; + std::string data; + std::condition_variable avail, request; + }; + + auto _state = std::make_shared<Sync<State>>(); + + /* In case of an exception, wake up the download thread. FIXME: + abort the download request. */ + Finally finally([&]() { + auto state(_state->lock()); + state->quit = true; + state->request.notify_one(); + }); + + request.dataCallback = [_state](char * buf, size_t len) { + + auto state(_state->lock()); + + if (state->quit) return; + + /* If the buffer is full, then go to sleep until the calling + thread wakes us up (i.e. when it has removed data from the + buffer). Note: this does stall the download thread. */ + while (state->data.size() > 1024 * 1024) { + if (state->quit) return; + debug("download buffer is full; going to sleep"); + state.wait(state->request); + } + + /* Append data to the buffer and wake up the calling + thread. */ + state->data.append(buf, len); + state->avail.notify_one(); + }; + + enqueueDownload(request, + {[_state](std::future<DownloadResult> fut) { + auto state(_state->lock()); + state->quit = true; + try { + fut.get(); + } catch (...) { + state->exc = std::current_exception(); + } + state->avail.notify_one(); + state->request.notify_one(); + }}); + + auto state(_state->lock()); + + while (true) { + checkInterrupt(); + + if (state->quit) { + if (state->exc) std::rethrow_exception(state->exc); + break; + } + + /* If no data is available, then wait for the download thread + to wake us up. */ + if (state->data.empty()) + state.wait(state->avail); + + /* If data is available, then flush it to the sink and wake up + the download thread if it's blocked on a full buffer. */ + if (!state->data.empty()) { + sink((unsigned char *) state->data.data(), state->data.size()); + state->data.clear(); + state->request.notify_one(); + } + } +} + +Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl) { auto url = resolveUri(url_); @@ -630,7 +736,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa if (expectedHash) { expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name); if (store->isValidPath(expectedStorePath)) - return expectedStorePath; + return store->toRealPath(expectedStorePath); } Path cacheDir = getCacheDir() + "/nix/tarballs"; @@ -647,7 +753,6 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa string expectedETag; - int ttl = settings.tarballTtl; bool skip = false; if (pathExists(fileLink) && pathExists(dataFile)) { @@ -724,8 +829,13 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa storePath = unpackedStorePath; } - if (expectedStorePath != "" && storePath != expectedStorePath) - throw nix::Error("store path mismatch in file downloaded from '%s'", url); + if (expectedStorePath != "" && storePath != expectedStorePath) { + Hash gotHash = unpack + ? hashPath(expectedHash.type, store->toRealPath(storePath)).first + : hashFile(expectedHash.type, store->toRealPath(storePath)); + throw nix::Error("hash mismatch in file downloaded from '%s': got hash '%s' instead of the expected hash '%s'", + url, gotHash.to_string(), expectedHash.to_string()); + } return store->toRealPath(storePath); } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index d9d525d4e65f..f56274b2353c 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -2,6 +2,7 @@ #include "types.hh" #include "hash.hh" +#include "globals.hh" #include <string> #include <future> @@ -20,9 +21,10 @@ struct DownloadRequest bool decompress = true; std::shared_ptr<std::string> data; std::string mimeType; + std::function<void(char *, size_t)> dataCallback; DownloadRequest(const std::string & uri) - : uri(uri), parentAct(curActivity) { } + : uri(uri), parentAct(getCurActivity()) { } }; struct DownloadResult @@ -41,20 +43,23 @@ struct Downloader the download. The future may throw a DownloadError exception. */ virtual void enqueueDownload(const DownloadRequest & request, - std::function<void(const DownloadResult &)> success, - std::function<void(std::exception_ptr exc)> failure) = 0; + Callback<DownloadResult> callback) = 0; std::future<DownloadResult> enqueueDownload(const DownloadRequest & request); /* Synchronously download a file. */ DownloadResult download(const DownloadRequest & request); + /* Download a file, writing its data to a sink. The sink will be + invoked on the thread of the caller. */ + void download(DownloadRequest && request, Sink & sink); + /* Check if the specified file is already in ~/.cache/nix/tarballs and is more recent than โtarball-ttlโ seconds. Otherwise, use the recorded ETag to verify if the server has a more recent version, and if so, download it to the Nix store. */ Path downloadCached(ref<Store> store, const string & uri, bool unpack, string name = "", - const Hash & expectedHash = Hash(), string * effectiveUri = nullptr); + const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, int ttl = settings.tarballTtl); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 943b16c28fa3..ba49749d830a 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -59,7 +59,7 @@ static void makeSymlink(const Path & link, const Path & target) /* Create the new symlink. */ Path tempLink = (format("%1%.tmp-%2%-%3%") - % link % getpid() % rand()).str(); + % link % getpid() % random()).str(); createSymlink(target, tempLink); /* Atomically replace the old one. */ diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index f46e8326235f..d95db56726cb 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -28,9 +28,10 @@ namespace nix { Settings settings; +static GlobalConfig::Register r1(&settings); + Settings::Settings() - : Config({}) - , nixPrefix(NIX_PREFIX) + : nixPrefix(NIX_PREFIX) , nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)))) , nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR))) , nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR))) @@ -69,20 +70,15 @@ Settings::Settings() allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES); } -void Settings::loadConfFile() +void loadConfFile() { - applyConfigFile(nixConfDir + "/nix.conf"); + globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf"); /* We only want to send overrides to the daemon, i.e. stuff from ~/.nix/nix.conf or the command line. */ - resetOverriden(); + globalConfig.resetOverriden(); - applyConfigFile(getConfigDir() + "/nix/nix.conf"); -} - -void Settings::set(const string & name, const string & value) -{ - Config::set(name, value); + globalConfig.applyConfigFile(getConfigDir() + "/nix/nix.conf"); } unsigned int Settings::getDefaultCores() @@ -159,26 +155,14 @@ void initPlugins() void *handle = dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL); if (!handle) - throw Error("could not dynamically open plugin file '%s%': %s%", file, dlerror()); + throw Error("could not dynamically open plugin file '%s': %s", file, dlerror()); } } - /* We handle settings registrations here, since plugins can add settings */ - if (RegisterSetting::settingRegistrations) { - for (auto & registration : *RegisterSetting::settingRegistrations) - settings.addSetting(registration); - delete RegisterSetting::settingRegistrations; - } - settings.handleUnknownSettings(); -} - -RegisterSetting::SettingRegistrations * RegisterSetting::settingRegistrations; -RegisterSetting::RegisterSetting(AbstractSetting * s) -{ - if (!settingRegistrations) - settingRegistrations = new SettingRegistrations; - settingRegistrations->emplace_back(s); + /* Since plugins can add settings, try to re-apply previously + unknown settings. */ + globalConfig.reapplyUnknownSettings(); + globalConfig.warnUnknownSettings(); } - } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index dd01f832df0c..f589078dbb98 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -13,26 +13,6 @@ namespace nix { typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode; -extern bool useCaseHack; // FIXME - -struct CaseHackSetting : public BaseSetting<bool> -{ - CaseHackSetting(Config * options, - const std::string & name, - const std::string & description, - const std::set<std::string> & aliases = {}) - : BaseSetting<bool>(useCaseHack, name, description, aliases) - { - options->addSetting(this); - } - - void set(const std::string & str) override - { - BaseSetting<bool>::set(str); - nix::useCaseHack = true; - } -}; - struct MaxBuildJobsSetting : public BaseSetting<unsigned int> { MaxBuildJobsSetting(Config * options, @@ -56,10 +36,6 @@ public: Settings(); - void loadConfFile(); - - void set(const string & name, const string & value); - Path nixPrefix; /* The directory where we store sources and derived files. */ @@ -217,9 +193,6 @@ public: Setting<bool> showTrace{this, false, "show-trace", "Whether to show a stack trace on evaluation errors."}; - Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", - "Whether builtin functions that allow executing native code should be enabled."}; - Setting<SandboxMode> sandboxMode{this, smDisabled, "sandbox", "Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".", {"build-use-chroot", "build-use-sandbox"}}; @@ -232,13 +205,6 @@ public: "Additional paths to make available inside the build sandbox.", {"build-extra-chroot-dirs", "build-extra-sandbox-paths"}}; - Setting<bool> restrictEval{this, false, "restrict-eval", - "Whether to restrict file system access to paths in $NIX_PATH, " - "and network access to the URI prefixes listed in 'allowed-uris'."}; - - Setting<bool> pureEval{this, false, "pure-eval", - "Whether to restrict file system and network access to files specified by cryptographic hash."}; - Setting<size_t> buildRepeat{this, 0, "repeat", "The number of times to repeat a build in order to verify determinism.", {"build-repeat"}}; @@ -280,13 +246,6 @@ public: Setting<Strings> secretKeyFiles{this, {}, "secret-key-files", "Secret keys with which to sign local builds."}; - Setting<size_t> binaryCachesParallelConnections{this, 25, "http-connections", - "Number of parallel HTTP connections.", - {"binary-caches-parallel-connections"}}; - - Setting<bool> enableHttp2{this, true, "http2", - "Whether to enable HTTP/2 support."}; - Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl", "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."}; @@ -295,6 +254,13 @@ public: "Nix store has a valid signature (that is, one signed using a key " "listed in 'trusted-public-keys'."}; + Setting<StringSet> extraPlatforms{this, + std::string{SYSTEM} == "x86_64-linux" ? StringSet{"i686-linux"} : StringSet{}, + "extra-platforms", + "Additional platforms that can be built on the local system. " + "These may be supported natively (e.g. armv7 on some aarch64 CPUs " + "or using hacks like qemu-user."}; + Setting<Strings> substituters{this, nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(), "substituters", @@ -313,6 +279,14 @@ public: Setting<Strings> trustedUsers{this, {"root"}, "trusted-users", "Which users or groups are trusted to ask the daemon to do unsafe things."}; + Setting<unsigned int> ttlNegativeNarInfoCache{this, 3600, "narinfo-cache-negative-ttl", + "The TTL in seconds for negative lookups in the disk cache i.e binary cache lookups that " + "return an invalid path result"}; + + Setting<unsigned int> ttlPositiveNarInfoCache{this, 30 * 24 * 3600, "narinfo-cache-positive-ttl", + "The TTL in seconds for positive lookups in the disk cache i.e binary cache lookups that " + "return a valid path result."}; + /* ?Who we trust to use the daemon in safe ways */ Setting<Strings> allowedUsers{this, {"*"}, "allowed-users", "Which users or groups are allowed to connect to the daemon."}; @@ -335,18 +309,6 @@ public: /* Path to the SSL CA file used */ Path caFile; - Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation", - "Whether the evaluator allows importing the result of a derivation."}; - - CaseHackSetting useCaseHack{this, "use-case-hack", - "Whether to enable a Darwin-specific hack for dealing with file name collisions."}; - - Setting<unsigned long> connectTimeout{this, 0, "connect-timeout", - "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."}; - - Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix", - "String appended to the user agent in HTTP requests."}; - #if __linux__ Setting<bool> filterSyscalls{this, true, "filter-syscalls", "Whether to prevent certain dangerous system calls, such as " @@ -368,9 +330,6 @@ public: Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free", "Stop deleting garbage when free disk space is above the specified amount."}; - Setting<Strings> allowedUris{this, {}, "allowed-uris", - "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; - Setting<Paths> pluginFiles{this, {}, "plugin-files", "Plugins to dynamically load at nix initialization time."}; }; @@ -383,15 +342,8 @@ extern Settings settings; anything else */ void initPlugins(); +void loadConfFile(); extern const string nixVersion; -struct RegisterSetting -{ - typedef std::vector<AbstractSetting *> SettingRegistrations; - static SettingRegistrations * settingRegistrations; - RegisterSetting(AbstractSetting * s); -}; - - } diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index b9e9cd5daba5..6fdae40e3603 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -77,28 +77,42 @@ protected: } } - void getFile(const std::string & path, - std::function<void(std::shared_ptr<std::string>)> success, - std::function<void(std::exception_ptr exc)> failure) override + DownloadRequest makeRequest(const std::string & path) { DownloadRequest request(cacheUri + "/" + path); request.tries = 8; + return request; + } + + void getFile(const std::string & path, Sink & sink) override + { + auto request(makeRequest(path)); + try { + getDownloader()->download(std::move(request), sink); + } catch (DownloadError & e) { + if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) + throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri()); + throw; + } + } + + void getFile(const std::string & path, + Callback<std::shared_ptr<std::string>> callback) override + { + auto request(makeRequest(path)); getDownloader()->enqueueDownload(request, - [success](const DownloadResult & result) { - success(result.data); - }, - [success, failure](std::exception_ptr exc) { + {[callback](std::future<DownloadResult> result) { try { - std::rethrow_exception(exc); + callback(result.get().data); } catch (DownloadError & e) { if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) - return success(0); - failure(exc); + return callback(std::shared_ptr<std::string>()); + callback.rethrow(); } catch (...) { - failure(exc); + callback.rethrow(); } - }); + }}); } }; diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index dfefdb9bc874..02d91ded04cd 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -16,6 +16,7 @@ struct LegacySSHStore : public Store const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"}; const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"}; const Setting<bool> compress{this, false, "compress", "whether to compress the connection"}; + const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"}; // Hack for getting remote build log output. const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"}; @@ -55,7 +56,7 @@ struct LegacySSHStore : public Store ref<Connection> openConnection() { auto conn = make_ref<Connection>(); - conn->sshConn = master.startCommand("nix-store --serve --write"); + conn->sshConn = master.startCommand(fmt("%s --serve --write", remoteProgram)); conn->to = FdSink(conn->sshConn->in.get()); conn->from = FdSource(conn->sshConn->out.get()); @@ -83,10 +84,9 @@ struct LegacySSHStore : public Store } void queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) override + Callback<std::shared_ptr<ValidPathInfo>> callback) override { - sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> { + try { auto conn(connections->get()); debug("querying remote host '%s' for info on '%s'", host, path); @@ -96,7 +96,7 @@ struct LegacySSHStore : public Store auto info = std::make_shared<ValidPathInfo>(); conn->from >> info->path; - if (info->path.empty()) return nullptr; + if (info->path.empty()) return callback(nullptr); assert(path == info->path); PathSet references; @@ -115,11 +115,11 @@ struct LegacySSHStore : public Store auto s = readString(conn->from); assert(s == ""); - return info; - }); + callback(std::move(info)); + } catch (...) { callback.rethrow(); } } - void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) override { @@ -130,7 +130,7 @@ struct LegacySSHStore : public Store conn->to << cmdImportPaths << 1; - conn->to(*nar); + copyNAR(source, conn->to); conn->to << exportMagic << info.path @@ -150,12 +150,7 @@ struct LegacySSHStore : public Store conn->to << cmdDumpStorePath << path; conn->to.flush(); - - /* FIXME: inefficient. */ - ParseSink parseSink; /* null sink; just parse the NAR */ - TeeSource savedNAR(conn->from); - parseDump(parseSink, savedNAR); - sink(*savedNAR.data); + copyNAR(conn->from, sink); } PathSet queryAllValidPaths() override { unsupported(); } diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index 2577e90aef23..b7001795be4d 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -34,18 +34,14 @@ protected: const std::string & data, const std::string & mimeType) override; - void getFile(const std::string & path, - std::function<void(std::shared_ptr<std::string>)> success, - std::function<void(std::exception_ptr exc)> failure) override + void getFile(const std::string & path, Sink & sink) override { - sync2async<std::shared_ptr<std::string>>(success, failure, [&]() { - try { - return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path)); - } catch (SysError & e) { - if (e.errNo == ENOENT) return std::shared_ptr<std::string>(); - throw; - } - }); + try { + readFile(binaryCacheDir + "/" + path, sink); + } catch (SysError & e) { + if (e.errNo == ENOENT) + throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path); + } } PathSet queryAllValidPaths() override diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 4afe51ea91ec..3b2ba65f3b46 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -53,7 +53,6 @@ LocalStore::LocalStore(const Params & params) , trashDir(realStoreDir + "/trash") , tempRootsDir(stateDir + "/temproots") , fnTempRoots(fmt("%s/%d", tempRootsDir, getpid())) - , publicKeys(getDefaultPublicKeys()) { auto state(_state.lock()); @@ -582,7 +581,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs) { - assert(info.ca == "" || info.isContentAddressed(*this)); + if (info.ca != "" && !info.isContentAddressed(*this)) + throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", info.path); state.stmtRegisterValidPath.use() (info.path) @@ -629,17 +629,15 @@ uint64_t LocalStore::addValidPath(State & state, void LocalStore::queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) + Callback<std::shared_ptr<ValidPathInfo>> callback) { - sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() { - + try { auto info = std::make_shared<ValidPathInfo>(); info->path = path; assertStorePath(path); - return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() { + callback(retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() { auto state(_state.lock()); /* Get the path info. */ @@ -679,8 +677,9 @@ void LocalStore::queryPathInfoUncached(const Path & path, info->references.insert(useQueryReferences.getStr(0)); return info; - }); - }); + })); + + } catch (...) { callback.rethrow(); } } @@ -964,21 +963,22 @@ void LocalStore::invalidatePath(State & state, const Path & path) } -void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, - RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) +const PublicKeys & LocalStore::getPublicKeys() { - assert(info.narHash); + auto state(_state.lock()); + if (!state->publicKeys) + state->publicKeys = std::make_unique<PublicKeys>(getDefaultPublicKeys()); + return *state->publicKeys; +} - Hash h = hashString(htSHA256, *nar); - if (h != info.narHash) - throw Error("hash mismatch importing path '%s'; expected hash '%s', got '%s'", - info.path, info.narHash.to_string(), h.to_string()); - if (nar->size() != info.narSize) - throw Error("size mismatch importing path '%s'; expected %s, got %s", - info.path, info.narSize, nar->size()); +void LocalStore::addToStore(const ValidPathInfo & info, Source & source, + RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) +{ + if (!info.narHash) + throw Error("cannot add path '%s' because it lacks a hash", info.path); - if (requireSigs && checkSigs && !info.checkSignatures(*this, publicKeys)) + if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys())) throw Error("cannot add path '%s' because it lacks a valid signature", info.path); addTempRoot(info.path); @@ -999,8 +999,27 @@ void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & deletePath(realPath); - StringSource source(*nar); - restorePath(realPath, source); + /* While restoring the path from the NAR, compute the hash + of the NAR. */ + HashSink hashSink(htSHA256); + + LambdaSource wrapperSource([&](unsigned char * data, size_t len) -> size_t { + size_t n = source.read(data, len); + hashSink(data, n); + return n; + }); + + restorePath(realPath, wrapperSource); + + auto hashResult = hashSink.finish(); + + if (hashResult.first != info.narHash) + throw Error("hash mismatch importing path '%s'; expected hash '%s', got '%s'", + info.path, info.narHash.to_string(), hashResult.first.to_string()); + + if (hashResult.second != info.narSize) + throw Error("size mismatch importing path '%s'; expected %s, got %s", + info.path, info.narSize, hashResult.second); autoGC(); @@ -1215,7 +1234,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) /* Check the content hash (optionally - slow). */ printMsg(lvlTalkative, format("checking contents of '%1%'") % i); - HashResult current = hashPath(info->narHash.type, i); + HashResult current = hashPath(info->narHash.type, toRealPath(i)); if (info->narHash != nullHash && info->narHash != current.first) { printError(format("path '%1%' was modified! " diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index bbd50e1c1451..746bdbeed793 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -77,6 +77,8 @@ private: minFree but not much below availAfterGC, then there is no point in starting a new GC. */ uint64_t availAfterGC = std::numeric_limits<uint64_t>::max(); + + std::unique_ptr<PublicKeys> publicKeys; }; Sync<State, std::recursive_mutex> _state; @@ -100,7 +102,7 @@ private: settings.requireSigs, "require-sigs", "whether store paths should have a trusted signature on import"}; - PublicKeys publicKeys; + const PublicKeys & getPublicKeys(); public: @@ -125,8 +127,7 @@ public: PathSet queryAllValidPaths() override; void queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) override; + Callback<std::shared_ptr<ValidPathInfo>> callback) override; void queryReferrers(const Path & path, PathSet & referrers) override; @@ -143,7 +144,7 @@ public: void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) override; - void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) override; diff --git a/src/libstore/local.mk b/src/libstore/local.mk index e11efa5c2b54..3799257f83ff 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -4,7 +4,7 @@ libstore_NAME = libnixstore libstore_DIR := $(d) -libstore_SOURCES := $(wildcard $(d)/*.cc) +libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc) libstore_LIBS = libutil libformat @@ -18,7 +18,7 @@ libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb $(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox))) ifeq ($(ENABLE_S3), 1) - libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core + libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core endif ifeq ($(OS), SunOS) diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index a82aa4e9cfa5..adcce026fa1d 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -33,9 +33,11 @@ void Store::computeFSClosure(const PathSet & startPaths, state->pending++; } - queryPathInfo(path, - [&, path](ref<ValidPathInfo> info) { - // FIXME: calls to isValidPath() should be async + queryPathInfo(path, {[&, path](std::future<ref<ValidPathInfo>> fut) { + // FIXME: calls to isValidPath() should be async + + try { + auto info = fut.get(); if (flipDirection) { @@ -75,14 +77,13 @@ void Store::computeFSClosure(const PathSet & startPaths, if (!--state->pending) done.notify_one(); } - }, - - [&, path](std::exception_ptr exc) { + } catch (...) { auto state(state_.lock()); - if (!state->exc) state->exc = exc; + if (!state->exc) state->exc = std::current_exception(); assert(state->pending); if (!--state->pending) done.notify_one(); - }); + }; + }}); }; for (auto & startPath : startPaths) diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 6e155e877803..35403e5df56f 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -1,6 +1,7 @@ #include "nar-info-disk-cache.hh" #include "sync.hh" #include "sqlite.hh" +#include "globals.hh" #include <sqlite3.h> @@ -47,10 +48,6 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache { public: - /* How long negative and positive lookups are valid. */ - const int ttlNegative = 3600; - const int ttlPositive = 30 * 24 * 3600; - /* How often to purge expired entries from the cache. */ const int purgeInterval = 24 * 3600; @@ -116,8 +113,8 @@ public: SQLiteStmt(state->db, "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))") .use() - (now - ttlNegative) - (now - ttlPositive) + (now - settings.ttlNegativeNarInfoCache) + (now - settings.ttlPositiveNarInfoCache) .exec(); debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db)); @@ -186,8 +183,8 @@ public: auto queryNAR(state->queryNAR.use() (cache.id) (hashPart) - (now - ttlNegative) - (now - ttlPositive)); + (now - settings.ttlNegativeNarInfoCache) + (now - settings.ttlPositiveNarInfoCache)); if (!queryNAR.next()) return {oUnknown, 0}; @@ -260,11 +257,8 @@ public: ref<NarInfoDiskCache> getNarInfoDiskCache() { - static Sync<std::shared_ptr<NarInfoDiskCache>> cache; - - auto cache_(cache.lock()); - if (!*cache_) *cache_ = std::make_shared<NarInfoDiskCacheImpl>(); - return ref<NarInfoDiskCache>(*cache_); + static ref<NarInfoDiskCache> cache = make_ref<NarInfoDiskCacheImpl>(); + return cache; } } diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in index 3f1a2d83d2f2..5cf22faadcbe 100644 --- a/src/libstore/nix-store.pc.in +++ b/src/libstore/nix-store.pc.in @@ -5,5 +5,5 @@ includedir=@includedir@ Name: Nix Description: Nix Package Manager Version: @PACKAGE_VERSION@ -Libs: -L${libdir} -lnixstore -lnixutil -lnixformat -Cflags: -I${includedir}/nix +Libs: -L${libdir} -lnixstore -lnixutil +Cflags: -I${includedir}/nix -std=c++14 diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 891540ae4c1d..7840167d7772 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -213,7 +213,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : ""); Path tempLink = (format("%1%/.tmp-link-%2%-%3%") - % realStoreDir % getpid() % rand()).str(); + % realStoreDir % getpid() % random()).str(); if (link(linkPath.c_str(), tempLink.c_str()) == -1) { if (errno == EMLINK) { diff --git a/src/libstore/references.cc b/src/libstore/references.cc index ba9f18b9ca5e..5b7eb1f846af 100644 --- a/src/libstore/references.cc +++ b/src/libstore/references.cc @@ -13,7 +13,7 @@ namespace nix { static unsigned int refLength = 32; /* characters */ -static void search(const unsigned char * s, unsigned int len, +static void search(const unsigned char * s, size_t len, StringSet & hashes, StringSet & seen) { static bool initialised = false; @@ -25,7 +25,7 @@ static void search(const unsigned char * s, unsigned int len, initialised = true; } - for (unsigned int i = 0; i + refLength <= len; ) { + for (size_t i = 0; i + refLength <= len; ) { int j; bool match = true; for (j = refLength - 1; j >= 0; --j) @@ -73,7 +73,7 @@ void RefScanSink::operator () (const unsigned char * data, size_t len) search(data, len, hashes, seen); - unsigned int tailLen = len <= refLength ? len : refLength; + size_t tailLen = len <= refLength ? len : refLength; tail = string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) + string((const char *) data + len - tailLen, tailLen); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 8f0b65557ac4..ea86ef052f53 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -7,6 +7,7 @@ #include "globals.hh" #include "derivations.hh" #include "pool.hh" +#include "finally.hh" #include <sys/types.h> #include <sys/stat.h> @@ -187,10 +188,11 @@ void RemoteStore::setOptions(Connection & conn) << settings.useSubstitutes; if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) { - auto overrides = settings.getSettings(true); + std::map<std::string, Config::SettingInfo> overrides; + globalConfig.getSettings(overrides, true); conn.to << overrides.size(); for (auto & i : overrides) - conn.to << i.first << i.second; + conn.to << i.first << i.second.value; } conn.processStderr(); @@ -293,38 +295,40 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, void RemoteStore::queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) + Callback<std::shared_ptr<ValidPathInfo>> callback) { - sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() { - auto conn(connections->get()); - conn->to << wopQueryPathInfo << path; - try { - conn->processStderr(); - } catch (Error & e) { - // Ugly backwards compatibility hack. - if (e.msg().find("is not valid") != std::string::npos) - throw InvalidPath(e.what()); - throw; - } - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { - bool valid; conn->from >> valid; - if (!valid) throw InvalidPath(format("path '%s' is not valid") % path); - } - auto info = std::make_shared<ValidPathInfo>(); - info->path = path; - info->deriver = readString(conn->from); - if (info->deriver != "") assertStorePath(info->deriver); - info->narHash = Hash(readString(conn->from), htSHA256); - info->references = readStorePaths<PathSet>(*this, conn->from); - conn->from >> info->registrationTime >> info->narSize; - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { - conn->from >> info->ultimate; - info->sigs = readStrings<StringSet>(conn->from); - conn->from >> info->ca; + try { + std::shared_ptr<ValidPathInfo> info; + { + auto conn(connections->get()); + conn->to << wopQueryPathInfo << path; + try { + conn->processStderr(); + } catch (Error & e) { + // Ugly backwards compatibility hack. + if (e.msg().find("is not valid") != std::string::npos) + throw InvalidPath(e.what()); + throw; + } + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { + bool valid; conn->from >> valid; + if (!valid) throw InvalidPath(format("path '%s' is not valid") % path); + } + info = std::make_shared<ValidPathInfo>(); + info->path = path; + info->deriver = readString(conn->from); + if (info->deriver != "") assertStorePath(info->deriver); + info->narHash = Hash(readString(conn->from), htSHA256); + info->references = readStorePaths<PathSet>(*this, conn->from); + conn->from >> info->registrationTime >> info->narSize; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { + conn->from >> info->ultimate; + info->sigs = readStrings<StringSet>(conn->from); + conn->from >> info->ca; + } } - return info; - }); + callback(std::move(info)); + } catch (...) { callback.rethrow(); } } @@ -377,7 +381,7 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart) } -void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, +void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) { auto conn(connections->get()); @@ -385,22 +389,21 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) { conn->to << wopImportPaths; - StringSink sink; - sink << 1 // == path follows - ; - assert(nar->size() % 8 == 0); - sink((unsigned char *) nar->data(), nar->size()); - sink - << exportMagic - << info.path - << info.references - << info.deriver - << 0 // == no legacy signature - << 0 // == no path follows - ; - - StringSource source(*sink.s); - conn->processStderr(0, &source); + auto source2 = sinkToSource([&](Sink & sink) { + sink << 1 // == path follows + ; + copyNAR(source, sink); + sink + << exportMagic + << info.path + << info.references + << info.deriver + << 0 // == no legacy signature + << 0 // == no path follows + ; + }); + + conn->processStderr(0, source2.get()); auto importedPaths = readStorePaths<PathSet>(*this, conn->from); assert(importedPaths.size() <= 1); @@ -412,8 +415,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> << info.references << info.registrationTime << info.narSize << info.ultimate << info.sigs << info.ca << repair << !checkSigs; - conn->to(*nar); - conn->processStderr(); + bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21; + if (!tunnel) copyNAR(source, conn->to); + conn->processStderr(0, tunnel ? &source : nullptr); } } @@ -436,8 +440,10 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath, conn->to.written = 0; conn->to.warn = true; connections->incCapacity(); - dumpPath(srcPath, conn->to, filter); - connections->decCapacity(); + { + Finally cleanup([&]() { connections->decCapacity(); }); + dumpPath(srcPath, conn->to, filter); + } conn->to.warn = false; conn->processStderr(); } catch (SysError & e) { diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 7f36e206416b..b488e34ce263 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -40,8 +40,7 @@ public: PathSet queryAllValidPaths() override; void queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) override; + Callback<std::shared_ptr<ValidPathInfo>> callback) override; void queryReferrers(const Path & path, PathSet & referrers) override; @@ -58,7 +57,7 @@ public: void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) override; - void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + void addToStore(const ValidPathInfo & info, Source & nar, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) override; @@ -122,11 +121,12 @@ protected: ref<Pool<Connection>> connections; + virtual void setOptions(Connection & conn); + private: std::atomic_bool failed{false}; - void setOptions(Connection & conn); }; class UDSRemoteStore : public LocalFSStore, public RemoteStore diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 23af452094cf..239739bae832 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -17,6 +17,7 @@ #include <aws/core/client/DefaultRetryStrategy.h> #include <aws/core/utils/logging/FormattedLogSystem.h> #include <aws/core/utils/logging/LogMacros.h> +#include <aws/core/utils/threading/Executor.h> #include <aws/s3/S3Client.h> #include <aws/s3/model/CreateBucketRequest.h> #include <aws/s3/model/GetBucketLocationRequest.h> @@ -24,6 +25,9 @@ #include <aws/s3/model/HeadObjectRequest.h> #include <aws/s3/model/ListObjectsRequest.h> #include <aws/s3/model/PutObjectRequest.h> +#include <aws/transfer/TransferManager.h> + +using namespace Aws::Transfer; namespace nix { @@ -169,6 +173,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"}; const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"}; const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"}; + const Setting<uint64_t> bufferSize{ + this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"}; std::string bucketName; @@ -271,34 +277,76 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const std::string & mimeType, const std::string & contentEncoding) { - auto request = - Aws::S3::Model::PutObjectRequest() - .WithBucket(bucketName) - .WithKey(path); + auto stream = std::make_shared<istringstream_nocopy>(data); - request.SetContentType(mimeType); + auto maxThreads = std::thread::hardware_concurrency(); - if (contentEncoding != "") - request.SetContentEncoding(contentEncoding); + static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor> + executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads); - auto stream = std::make_shared<istringstream_nocopy>(data); + TransferManagerConfiguration transferConfig(executor.get()); - request.SetBody(stream); + transferConfig.s3Client = s3Helper.client; + transferConfig.bufferSize = bufferSize; - stats.put++; - stats.putBytes += data.size(); + if (contentEncoding != "") + transferConfig.createMultipartUploadTemplate.SetContentEncoding( + contentEncoding); + + transferConfig.uploadProgressCallback = + [&](const TransferManager *transferManager, + const std::shared_ptr<const TransferHandle> + &transferHandle) { + //FIXME: find a way to properly abort the multipart upload. + checkInterrupt(); + printTalkative("upload progress ('%s'): '%d' of '%d' bytes", + path, + transferHandle->GetBytesTransferred(), + transferHandle->GetBytesTotalSize()); + }; + + transferConfig.transferStatusUpdatedCallback = + [&](const TransferManager *, + const std::shared_ptr<const TransferHandle> + &transferHandle) { + switch (transferHandle->GetStatus()) { + case TransferStatus::COMPLETED: + printTalkative("upload of '%s' completed", path); + stats.put++; + stats.putBytes += data.size(); + break; + case TransferStatus::IN_PROGRESS: + break; + case TransferStatus::FAILED: + throw Error("AWS error: failed to upload 's3://%s/%s'", + bucketName, path); + break; + default: + throw Error("AWS error: transfer status of 's3://%s/%s' " + "in unexpected state", + bucketName, path); + }; + }; + + std::shared_ptr<TransferManager> transferManager = + TransferManager::Create(transferConfig); auto now1 = std::chrono::steady_clock::now(); - auto result = checkAws(format("AWS error uploading '%s'") % path, - s3Helper.client->PutObject(request)); + std::shared_ptr<TransferHandle> transferHandle = + transferManager->UploadFile(stream, bucketName, path, mimeType, + Aws::Map<Aws::String, Aws::String>()); + + transferHandle->WaitUntilFinished(); auto now2 = std::chrono::steady_clock::now(); - auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count(); + auto duration = + std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1) + .count(); - printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") - % bucketName % path % data.size() % duration); + printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") % + bucketName % path % data.size() % duration); stats.putTimeMs += duration; } @@ -316,24 +364,23 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore uploadFile(path, data, mimeType, ""); } - void getFile(const std::string & path, - std::function<void(std::shared_ptr<std::string>)> success, - std::function<void(std::exception_ptr exc)> failure) override + void getFile(const std::string & path, Sink & sink) override { - sync2async<std::shared_ptr<std::string>>(success, failure, [&]() { - stats.get++; + stats.get++; - auto res = s3Helper.getObject(bucketName, path); + // FIXME: stream output to sink. + auto res = s3Helper.getObject(bucketName, path); - stats.getBytes += res.data ? res.data->size() : 0; - stats.getTimeMs += res.durationMs; + stats.getBytes += res.data ? res.data->size() : 0; + stats.getTimeMs += res.durationMs; - if (res.data) - printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms", - bucketName, path, res.data->size(), res.durationMs); + if (res.data) { + printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms", + bucketName, path, res.data->size(), res.durationMs); - return res.data; - }); + sink((unsigned char *) res.data->data(), res.data->size()); + } else + throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri()); } PathSet queryAllValidPaths() override diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index b13001b06d57..a061d64f36d8 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -7,9 +7,10 @@ namespace nix { -[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f) +[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs) { int err = sqlite3_errcode(db); + int exterr = sqlite3_extended_errcode(db); auto path = sqlite3_db_filename(db, nullptr); if (!path) path = "(in-memory)"; @@ -21,7 +22,7 @@ namespace nix { : fmt("SQLite database '%s' is busy", path)); } else - throw SQLiteError("%s: %s (in '%s')", f.str(), sqlite3_errstr(err), path); + throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path); } SQLite::SQLite(const Path & path) diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 14a7a0dd8996..115679b84159 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -93,7 +93,7 @@ struct SQLiteTxn MakeError(SQLiteError, Error); MakeError(SQLiteBusy, SQLiteError); -[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f); +[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs); void handleSQLiteBusy(const SQLiteBusy & e); diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index 107c6e1ecb4d..39205ae2ce12 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -51,21 +51,16 @@ private: std::string host; SSHMaster master; -}; - -class ForwardSource : public Source -{ - Source & readSource; - Sink & writeSink; -public: - ForwardSource(Source & readSource, Sink & writeSink) : readSource(readSource), writeSink(writeSink) {} - size_t read(unsigned char * data, size_t len) override + void setOptions(RemoteStore::Connection & conn) override { - auto n = readSource.read(data, len); - writeSink(data, n); - return n; - } + /* TODO Add a way to explicitly ask for some options to be + forwarded. One option: A way to query the daemon for its + settings, and then a series of params to SSHStore like + forward-cores or forward-overridden-cores that only + override the requested settings. + */ + }; }; void SSHStore::narFromPath(const Path & path, Sink & sink) @@ -73,9 +68,7 @@ void SSHStore::narFromPath(const Path & path, Sink & sink) auto conn(connections->get()); conn->to << wopNarFromPath << path; conn->processStderr(); - ParseSink ps; - auto fwd = ForwardSource(conn->from, sink); - parseDump(ps, fwd); + copyNAR(conn->from, sink); } ref<FSAccessor> SSHStore::getFSAccessor() diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 7ff7a9bffc49..033c580936ad 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -49,6 +49,8 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string addCommonSSHOpts(args); if (socketPath != "") args.insert(args.end(), {"-S", socketPath}); + if (verbosity >= lvlChatty) + args.push_back("-v"); args.push_back(command); execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); @@ -93,6 +95,8 @@ Path SSHMaster::startMaster() , "-o", "LocalCommand=echo started" , "-o", "PermitLocalCommand=yes" }; + if (verbosity >= lvlChatty) + args.push_back("-v"); addCommonSSHOpts(args); execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 8830edcc3449..9b0b7d6327e0 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -253,6 +253,8 @@ std::string Store::getUri() bool Store::isValidPath(const Path & storePath) { + assertStorePath(storePath); + auto hashPart = storePathToHash(storePath); { @@ -303,20 +305,20 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath) std::promise<ref<ValidPathInfo>> promise; queryPathInfo(storePath, - [&](ref<ValidPathInfo> info) { - promise.set_value(info); - }, - [&](std::exception_ptr exc) { - promise.set_exception(exc); - }); + {[&](std::future<ref<ValidPathInfo>> result) { + try { + promise.set_value(result.get()); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); return promise.get_future().get(); } void Store::queryPathInfo(const Path & storePath, - std::function<void(ref<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) + Callback<ref<ValidPathInfo>> callback) { auto hashPart = storePathToHash(storePath); @@ -328,7 +330,7 @@ void Store::queryPathInfo(const Path & storePath, stats.narInfoReadAverted++; if (!*res) throw InvalidPath(format("path '%s' is not valid") % storePath); - return success(ref<ValidPathInfo>(*res)); + return callback(ref<ValidPathInfo>(*res)); } } @@ -344,35 +346,36 @@ void Store::queryPathInfo(const Path & storePath, (res.second->path != storePath && storePathToName(storePath) != "")) throw InvalidPath(format("path '%s' is not valid") % storePath); } - return success(ref<ValidPathInfo>(res.second)); + return callback(ref<ValidPathInfo>(res.second)); } } - } catch (std::exception & e) { - return callFailure(failure); - } + } catch (...) { return callback.rethrow(); } queryPathInfoUncached(storePath, - [this, storePath, hashPart, success, failure](std::shared_ptr<ValidPathInfo> info) { + {[this, storePath, hashPart, callback](std::future<std::shared_ptr<ValidPathInfo>> fut) { - if (diskCache) - diskCache->upsertNarInfo(getUri(), hashPart, info); + try { + auto info = fut.get(); - { - auto state_(state.lock()); - state_->pathInfoCache.upsert(hashPart, info); - } + if (diskCache) + diskCache->upsertNarInfo(getUri(), hashPart, info); - if (!info - || (info->path != storePath && storePathToName(storePath) != "")) - { - stats.narInfoMissing++; - return failure(std::make_exception_ptr(InvalidPath(format("path '%s' is not valid") % storePath))); - } + { + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, info); + } - callSuccess(success, failure, ref<ValidPathInfo>(info)); + if (!info + || (info->path != storePath && storePathToName(storePath) != "")) + { + stats.narInfoMissing++; + throw InvalidPath("path '%s' is not valid", storePath); + } - }, failure); + callback(ref<ValidPathInfo>(info)); + } catch (...) { callback.rethrow(); } + }}); } @@ -392,26 +395,19 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti auto doQuery = [&](const Path & path ) { checkInterrupt(); - queryPathInfo(path, - [path, &state_, &wakeup](ref<ValidPathInfo> info) { - auto state(state_.lock()); + queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<ValidPathInfo>> fut) { + auto state(state_.lock()); + try { + auto info = fut.get(); state->valid.insert(path); - assert(state->left); - if (!--state->left) - wakeup.notify_one(); - }, - [path, &state_, &wakeup](std::exception_ptr exc) { - auto state(state_.lock()); - try { - std::rethrow_exception(exc); - } catch (InvalidPath &) { - } catch (...) { - state->exc = exc; - } - assert(state->left); - if (!--state->left) - wakeup.notify_one(); - }); + } catch (InvalidPath &) { + } catch (...) { + state->exc = std::current_exception(); + } + assert(state->left); + if (!--state->left) + wakeup.notify_one(); + }}); }; for (auto & path : paths) @@ -590,32 +586,15 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, uint64_t total = 0; - auto progress = [&](size_t len) { - total += len; - act.progress(total, info->narSize); - }; - - struct MyStringSink : StringSink - { - typedef std::function<void(size_t)> Callback; - Callback callback; - MyStringSink(Callback callback) : callback(callback) { } - void operator () (const unsigned char * data, size_t len) override - { - StringSink::operator ()(data, len); - callback(len); - }; - }; - - MyStringSink sink(progress); - srcStore->narFromPath({storePath}, sink); - + // FIXME +#if 0 if (!info->narHash) { auto info2 = make_ref<ValidPathInfo>(*info); info2->narHash = hashString(htSHA256, *sink.s); if (!info->narSize) info2->narSize = sink.s->size(); info = info2; } +#endif if (info->ultimate) { auto info2 = make_ref<ValidPathInfo>(*info); @@ -623,7 +602,16 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, info = info2; } - dstStore->addToStore(*info, sink.s, repair, checkSigs); + auto source = sinkToSource([&](Sink & sink) { + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + total += len; + act.progress(total, info->narSize); + }); + srcStore->narFromPath({storePath}, wrapperSink); + }); + + dstStore->addToStore(*info, *source, repair, checkSigs); } @@ -765,7 +753,8 @@ bool ValidPathInfo::isContentAddressed(const Store & store) const else if (hasPrefix(ca, "fixed:")) { bool recursive = ca.compare(6, 2, "r:") == 0; Hash hash(std::string(ca, recursive ? 8 : 6)); - if (store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path) + if (references.empty() && + store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path) return true; else warn(); @@ -808,6 +797,21 @@ std::string makeFixedOutputCA(bool recursive, const Hash & hash) } +void Store::addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair, CheckSigsFlag checkSigs, + std::shared_ptr<FSAccessor> accessor) +{ + addToStore(info, make_ref<std::string>(narSource.drain()), repair, checkSigs, accessor); +} + +void Store::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + RepairFlag repair, CheckSigsFlag checkSigs, + std::shared_ptr<FSAccessor> accessor) +{ + StringSource source(*nar); + addToStore(info, source, repair, checkSigs, accessor); +} + } @@ -839,7 +843,7 @@ ref<Store> openStore(const std::string & uri_, for (auto fun : *RegisterStoreImplementation::implementations) { auto store = fun(uri, params); if (store) { - store->handleUnknownSettings(); + store->warnUnknownSettings(); return ref<Store>(store); } } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 563aa566bd37..6ee2d550679b 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -355,14 +355,12 @@ public: /* Asynchronous version of queryPathInfo(). */ void queryPathInfo(const Path & path, - std::function<void(ref<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure); + Callback<ref<ValidPathInfo>> callback); protected: virtual void queryPathInfoUncached(const Path & path, - std::function<void(std::shared_ptr<ValidPathInfo>)> success, - std::function<void(std::exception_ptr exc)> failure) = 0; + Callback<std::shared_ptr<ValidPathInfo>> callback) = 0; public: @@ -399,9 +397,14 @@ public: virtual bool wantMassQuery() { return false; } /* Import a path into the store. */ + virtual void addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs, + std::shared_ptr<FSAccessor> accessor = 0); + + // FIXME: remove virtual void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs, - std::shared_ptr<FSAccessor> accessor = 0) = 0; + std::shared_ptr<FSAccessor> accessor = 0); /* Copy the contents of a path to the store and register the validity the resulting path. The resulting path is returned. diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 996e1d25355f..5ebdfaf134d6 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -6,7 +6,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x114 +#define PROTOCOL_VERSION 0x115 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) |