diff options
Diffstat (limited to 'src')
64 files changed, 1649 insertions, 734 deletions
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 2ce20882da17..d7aee288670a 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -17,13 +17,12 @@ #include "derivations.hh" using namespace nix; -using std::cerr; using std::cin; -static void handle_alarm(int sig) { +static void handleAlarm(int sig) { } -class machine { +class Machine { const std::set<string> supportedFeatures; const std::set<string> mandatoryFeatures; @@ -31,8 +30,8 @@ public: const string hostName; const std::vector<string> systemTypes; const string sshKey; - const unsigned long long maxJobs; - const unsigned long long speedFactor; + const unsigned int maxJobs; + const unsigned int speedFactor; bool enabled; bool allSupported(const std::set<string> & features) const { @@ -50,28 +49,29 @@ public: }); } - machine(decltype(hostName) hostName, + Machine(decltype(hostName) hostName, decltype(systemTypes) systemTypes, decltype(sshKey) sshKey, decltype(maxJobs) maxJobs, decltype(speedFactor) speedFactor, decltype(supportedFeatures) supportedFeatures, decltype(mandatoryFeatures) mandatoryFeatures) : - supportedFeatures{std::move(supportedFeatures)}, - mandatoryFeatures{std::move(mandatoryFeatures)}, - hostName{std::move(hostName)}, - systemTypes{std::move(systemTypes)}, - sshKey{std::move(sshKey)}, - maxJobs{std::move(maxJobs)}, - speedFactor{speedFactor == 0 ? 1 : std::move(speedFactor)}, - enabled{true} {}; + supportedFeatures(supportedFeatures), + mandatoryFeatures(mandatoryFeatures), + hostName(hostName), + systemTypes(systemTypes), + sshKey(sshKey), + maxJobs(maxJobs), + speedFactor(std::max(1U, speedFactor)), + enabled(true) + {}; };; -static std::vector<machine> read_conf() +static std::vector<Machine> readConf() { auto conf = getEnv("NIX_REMOTE_SYSTEMS", SYSCONFDIR "/nix/machines"); - auto machines = std::vector<machine>{}; + auto machines = std::vector<Machine>{}; auto lines = std::vector<string>{}; try { lines = tokenizeString<std::vector<string>>(readFile(conf), "\n"); @@ -87,10 +87,8 @@ static std::vector<machine> read_conf() } auto tokens = tokenizeString<std::vector<string>>(line); auto sz = tokens.size(); - if (sz < 4) { - throw new FormatError(format("Bad machines.conf file %1%") - % conf); - } + if (sz < 4) + throw FormatError("bad machines.conf file ‘%1%’", conf); machines.emplace_back(tokens[0], tokenizeString<std::vector<string>>(tokens[1], ","), tokens[2], @@ -108,7 +106,7 @@ static std::vector<machine> read_conf() static string currentLoad; -static AutoCloseFD openSlotLock(const machine & m, unsigned long long slot) +static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot) { std::ostringstream fn_stream(currentLoad, std::ios_base::ate | std::ios_base::out); fn_stream << "/"; @@ -126,15 +124,14 @@ int main (int argc, char * * argv) { return handleExceptions(argv[0], [&]() { initNix(); + /* Ensure we don't get any SSH passphrase or host key popups. */ if (putenv(display_env) == -1 || - putenv(ssh_env) == -1) { - throw SysError("Setting SSH env vars"); - } + putenv(ssh_env) == -1) + throw SysError("setting SSH env vars"); - if (argc != 4) { + if (argc != 4) throw UsageError("called without required arguments"); - } auto store = openStore(); @@ -147,15 +144,14 @@ int main (int argc, char * * argv) std::shared_ptr<Store> sshStore; AutoCloseFD bestSlotLock; - auto machines = read_conf(); + auto machines = readConf(); string drvPath; string hostName; for (string line; getline(cin, line);) { auto tokens = tokenizeString<std::vector<string>>(line); auto sz = tokens.size(); - if (sz != 3 && sz != 4) { - throw Error(format("invalid build hook line %1%") % line); - } + if (sz != 3 && sz != 4) + throw Error("invalid build hook line ‘%1%’", line); auto amWilling = tokens[0] == "1"; auto neededSystem = tokens[1]; drvPath = tokens[2]; @@ -174,7 +170,7 @@ int main (int argc, char * * argv) bool rightType = false; - machine * bestMachine = nullptr; + Machine * bestMachine = nullptr; unsigned long long bestLoad = 0; for (auto & m : machines) { if (m.enabled && std::find(m.systemTypes.begin(), @@ -221,11 +217,10 @@ int main (int argc, char * * argv) } if (!bestSlotLock) { - if (rightType && !canBuildLocally) { - cerr << "# postpone\n"; - } else { - cerr << "# decline\n"; - } + if (rightType && !canBuildLocally) + std::cerr << "# postpone\n"; + else + std::cerr << "# decline\n"; break; } @@ -238,47 +233,51 @@ int main (int argc, char * * argv) lock = -1; try { - sshStore = openStore("ssh://" + bestMachine->hostName + "?key=" + bestMachine->sshKey); + sshStore = openStore("ssh-ng://" + bestMachine->hostName, + { {"ssh-key", bestMachine->sshKey }, + {"max-connections", "1" } }); hostName = bestMachine->hostName; } catch (std::exception & e) { - cerr << e.what() << '\n'; - cerr << "unable to open SSH connection to ‘" << bestMachine->hostName << "’, trying other available machines...\n"; + printError("unable to open SSH connection to ‘%s’: %s; trying other available machines...", + bestMachine->hostName, e.what()); bestMachine->enabled = false; continue; } goto connected; } } + connected: - cerr << "# accept\n"; + std::cerr << "# accept\n"; string line; - if (!getline(cin, line)) { + if (!getline(cin, line)) throw Error("hook caller didn't send inputs"); - } - auto inputs = tokenizeString<std::list<string>>(line); - if (!getline(cin, line)) { + auto inputs = tokenizeString<PathSet>(line); + if (!getline(cin, line)) throw Error("hook caller didn't send outputs"); - } - auto outputs = tokenizeString<Strings>(line); + auto outputs = tokenizeString<PathSet>(line); AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + hostName + ".upload-lock", true); - auto old = signal(SIGALRM, handle_alarm); + auto old = signal(SIGALRM, handleAlarm); alarm(15 * 60); - if (!lockFile(uploadLock.get(), ltWrite, true)) { - cerr << "somebody is hogging the upload lock for " << hostName << ", continuing...\n"; - } + if (!lockFile(uploadLock.get(), ltWrite, true)) + printError("somebody is hogging the upload lock for ‘%s’, continuing..."); alarm(0); signal(SIGALRM, old); copyPaths(store, ref<Store>(sshStore), inputs); uploadLock = -1; - cerr << "building ‘" << drvPath << "’ on ‘" << hostName << "’\n"; + printError("building ‘%s’ on ‘%s’", drvPath, hostName); sshStore->buildDerivation(drvPath, readDerivation(drvPath)); - std::remove_if(outputs.begin(), outputs.end(), [=](const Path & path) { return store->isValidPath(path); }); - if (!outputs.empty()) { - setenv("NIX_HELD_LOCKS", concatStringsSep(" ", outputs).c_str(), 1); /* FIXME: ugly */ - copyPaths(ref<Store>(sshStore), store, outputs); + PathSet missing; + for (auto & path : outputs) + if (!store->isValidPath(path)) missing.insert(path); + + if (!missing.empty()) { + setenv("NIX_HELD_LOCKS", concatStringsSep(" ", missing).c_str(), 1); /* FIXME: ugly */ + copyPaths(ref<Store>(sshStore), store, missing); } + return; }); } diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 5a570cefb2fa..615cc8138433 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -59,6 +59,8 @@ void EvalState::realiseContext(const PathSet & context) drvs.insert(decoded.first + "!" + decoded.second); } if (!drvs.empty()) { + if (!settings.enableImportFromDerivation) + throw EvalError(format("attempted to realize ‘%1%’ during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin())); /* For performance, prefetch all substitute info. */ PathSet willBuild, willSubstitute, unknown; unsigned long long downloadSize, narSize; @@ -176,6 +178,45 @@ static void prim_importNative(EvalState & state, const Pos & pos, Value * * args } +/* Execute a program and parse its output */ +static void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + state.forceList(*args[0], pos); + auto elems = args[0]->listElems(); + auto count = args[0]->listSize(); + if (count == 0) { + throw EvalError(format("at least one argument to 'exec' required, at %1%") % pos); + } + PathSet context; + auto program = state.coerceToString(pos, *elems[0], context, false, false); + Strings commandArgs; + for (unsigned int i = 1; i < args[0]->listSize(); ++i) { + commandArgs.emplace_back(state.coerceToString(pos, *elems[i], context, false, false)); + } + try { + state.realiseContext(context); + } catch (InvalidPathError & e) { + throw EvalError(format("cannot execute ‘%1%’, since path ‘%2%’ is not valid, at %3%") + % program % e.path % pos); + } + + auto output = runProgram(program, true, commandArgs); + Expr * parsed; + try { + parsed = state.parseExprFromString(output, pos.file); + } catch (Error & e) { + e.addPrefix(format("While parsing the output from ‘%1%’, at %2%\n") % program % pos); + throw; + } + try { + state.eval(parsed, v); + } catch (Error & e) { + e.addPrefix(format("While evaluating the output from ‘%1%’, at %2%\n") % program % pos); + throw; + } +} + + /* Return a string representing the type of the expression. */ static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Value & v) { @@ -1901,8 +1942,10 @@ void EvalState::createBaseEnv() mkApp(v, *baseEnv.values[baseEnvDispl - 1], *v2); forceValue(v); addConstant("import", v); - if (settings.enableImportNative) + if (settings.enableNativeCode) { addPrimOp("__importNative", 2, prim_importNative); + addPrimOp("__exec", 1, prim_exec); + } addPrimOp("__typeOf", 1, prim_typeOf); addPrimOp("isNull", 1, prim_isNull); addPrimOp("__isFunction", 1, prim_isFunction); diff --git a/src/libexpr/primops/fetchgit.cc b/src/libexpr/primops/fetchgit.cc index bd440c8c62ad..09e2c077baba 100644 --- a/src/libexpr/primops/fetchgit.cc +++ b/src/libexpr/primops/fetchgit.cc @@ -58,12 +58,14 @@ static void prim_fetchgit(EvalState & state, const Pos & pos, Value * * args, Va for (auto & attr : *args[0]->attrs) { string name(attr.name); - if (name == "url") - url = state.forceStringNoCtx(*attr.value, *attr.pos); - else if (name == "rev") + if (name == "url") { + PathSet context; + url = state.coerceToString(*attr.pos, *attr.value, context, false, false); + if (hasPrefix(url, "/")) url = "file://" + url; + } else if (name == "rev") rev = state.forceStringNoCtx(*attr.value, *attr.pos); else - throw EvalError(format("unsupported argument ‘%1%’ to ‘fetchgit’, at %3%") % attr.name % attr.pos); + throw EvalError("unsupported argument ‘%s’ to ‘fetchgit’, at %s", attr.name, *attr.pos); } if (url.empty()) diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 81f918d48de7..802e8ed2ee75 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -256,7 +256,7 @@ size_t valueSize(Value & v); #if HAVE_BOEHMGC typedef std::vector<Value *, gc_allocator<Value *> > ValueVector; -typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<Value *> > ValueMap; +typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap; #else typedef std::vector<Value *> ValueVector; typedef std::map<Symbol, Value *> ValueMap; diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 53fa83fe0dee..c1828aa7db88 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -106,8 +106,6 @@ void initNix() std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif - logger = makeDefaultLogger(); - /* Initialise OpenSSL locking. */ opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks()); CRYPTO_set_locking_callback(opensslLockCallback); @@ -167,6 +165,10 @@ struct LegacyArgs : public MixCommonArgs settings.set("build-fallback", "true"); }); + mkFlag1('j', "max-jobs", "jobs", "maximum number of parallel builds", [=](std::string s) { + settings.set("build-max-jobs", s); + }); + auto intSettingAlias = [&](char shortName, const std::string & longName, const std::string & description, const std::string & dest) { mkFlag<unsigned int>(shortName, longName, description, [=](unsigned int n) { @@ -174,7 +176,6 @@ struct LegacyArgs : public MixCommonArgs }); }; - intSettingAlias('j', "max-jobs", "maximum number of parallel builds", "build-max-jobs"); intSettingAlias(0, "cores", "maximum number of CPU cores to use inside a build", "build-cores"); intSettingAlias(0, "max-silent-time", "number of seconds of silence before a build is killed", "build-max-silent-time"); intSettingAlias(0, "timeout", "number of seconds before a build is killed", "build-timeout"); @@ -329,11 +330,7 @@ RunPager::~RunPager() pid.wait(); } } catch (...) { - try { - pid.kill(true); - } catch (...) { - ignoreException(); - } + ignoreException(); } } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 3e07a2aa2b60..b536c6c00044 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -79,10 +79,7 @@ struct BinaryCacheStoreAccessor : public FSAccessor BinaryCacheStore::BinaryCacheStore(const Params & params) : Store(params) - , compression(get(params, "compression", "xz")) - , writeNARListing(get(params, "write-nar-listing", "0") == "1") { - auto secretKeyFile = get(params, "secret-key", ""); if (secretKeyFile != "") secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile))); @@ -97,7 +94,7 @@ void BinaryCacheStore::init() auto cacheInfo = getFile(cacheInfoFile); if (!cacheInfo) { - upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n"); + upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info"); } else { for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) { size_t colon = line.find(':'); @@ -224,7 +221,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str } } - upsertFile(storePathToHash(info.path) + ".ls.xz", *compress("xz", jsonOut.str())); + upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json"); } else { @@ -250,10 +247,11 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar" + (compression == "xz" ? ".xz" : compression == "bzip2" ? ".bz2" : + compression == "br" ? ".br" : ""); if (repair || !fileExists(narInfo->url)) { stats.narWrite++; - upsertFile(narInfo->url, *narCompressed); + upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar"); } else stats.narWriteAverted++; @@ -264,7 +262,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str /* Atomically write the NAR info file.*/ if (secretKey) narInfo->sign(*secretKey); - upsertFile(narInfoFile, narInfo->to_string()); + upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo"); auto hashPart = storePathToHash(narInfo->path); @@ -382,4 +380,28 @@ ref<FSAccessor> BinaryCacheStore::getFSAccessor() return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this())); } +std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path) +{ + Path drvPath; + + if (isDerivation(path)) + drvPath = path; + else { + try { + auto info = queryPathInfo(path); + // FIXME: add a "Log" field to .narinfo + if (info->deriver == "") return nullptr; + drvPath = info->deriver; + } catch (InvalidPath &) { + return nullptr; + } + } + + auto logPath = "log/" + baseNameOf(drvPath); + + debug("fetching build log from binary cache ‘%s/%s’", getUri(), logPath); + + return getFile(logPath); +} + } diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index a70d50d4949c..5c2d0acfdbb7 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -13,13 +13,15 @@ struct NarInfo; class BinaryCacheStore : public Store { -private: +public: - std::unique_ptr<SecretKey> secretKey; + const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; + const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"}; + const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; - std::string compression; +private: - bool writeNARListing; + std::unique_ptr<SecretKey> secretKey; protected: @@ -31,7 +33,9 @@ public: virtual bool fileExists(const std::string & path) = 0; - virtual void upsertFile(const std::string & path, const std::string & data) = 0; + virtual void upsertFile(const std::string & path, + const std::string & data, + const std::string & mimeType) = 0; /* Return the contents of the specified file, or null if it doesn't exist. */ @@ -122,6 +126,8 @@ public: void addSignatures(const Path & storePath, const StringSet & sigs) override { notImpl(); } + std::shared_ptr<std::string> getBuildLog(const Path & path) override; + }; } diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 4a7e1a62b505..b23447fa0735 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -635,7 +635,7 @@ HookInstance::~HookInstance() { try { toHook.writeSide = -1; - if (pid != -1) pid.kill(true); + if (pid != -1) pid.kill(); } catch (...) { ignoreException(); } @@ -1430,7 +1430,7 @@ void DerivationGoal::buildDone() to have terminated. In fact, the builder could also have simply have closed its end of the pipe, so just to be sure, kill it. */ - int status = hook ? hook->pid.kill(true) : pid.kill(true); + int status = hook ? hook->pid.kill() : pid.kill(); debug(format("builder process for ‘%1%’ finished") % drvPath); @@ -1573,36 +1573,48 @@ HookReply DerivationGoal::tryBuildHook() if (!worker.hook) worker.hook = std::make_unique<HookInstance>(); - /* Tell the hook about system features (beyond the system type) - required from the build machine. (The hook could parse the - drv file itself, but this is easier.) */ - Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures")); - for (auto & i : features) checkStoreName(i); /* !!! abuse */ - - /* Send the request to the hook. */ - writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%") - % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0") - % drv->platform % drvPath % concatStringsSep(",", features)).str()); + try { - /* Read the first line of input, which should be a word indicating - whether the hook wishes to perform the build. */ - string reply; - while (true) { - string s = readLine(worker.hook->fromHook.readSide.get()); - if (string(s, 0, 2) == "# ") { - reply = string(s, 2); - break; + /* Tell the hook about system features (beyond the system type) + required from the build machine. (The hook could parse the + drv file itself, but this is easier.) */ + Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures")); + for (auto & i : features) checkStoreName(i); /* !!! abuse */ + + /* Send the request to the hook. */ + writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%") + % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0") + % drv->platform % drvPath % concatStringsSep(",", features)).str()); + + /* Read the first line of input, which should be a word indicating + whether the hook wishes to perform the build. */ + string reply; + while (true) { + string s = readLine(worker.hook->fromHook.readSide.get()); + if (string(s, 0, 2) == "# ") { + reply = string(s, 2); + break; + } + s += "\n"; + writeToStderr(s); } - s += "\n"; - writeToStderr(s); - } - debug(format("hook reply is ‘%1%’") % reply); + debug(format("hook reply is ‘%1%’") % reply); + + if (reply == "decline" || reply == "postpone") + return reply == "decline" ? rpDecline : rpPostpone; + else if (reply != "accept") + throw Error(format("bad hook reply ‘%1%’") % reply); - if (reply == "decline" || reply == "postpone") - return reply == "decline" ? rpDecline : rpPostpone; - else if (reply != "accept") - throw Error(format("bad hook reply ‘%1%’") % reply); + } catch (SysError & e) { + if (e.errNo == EPIPE) { + printError("build hook died unexpectedly: %s", + chomp(drainFD(worker.hook->fromHook.readSide.get()))); + worker.hook = 0; + return rpDecline; + } else + throw; + } printMsg(lvlTalkative, format("using hook to build path(s) %1%") % showPaths(missingPaths)); @@ -2358,8 +2370,6 @@ void DerivationGoal::runChild() ss.push_back("/dev/tty"); ss.push_back("/dev/urandom"); ss.push_back("/dev/zero"); - ss.push_back("/dev/ptmx"); - ss.push_back("/dev/pts"); createSymlink("/proc/self/fd", chrootRootDir + "/dev/fd"); createSymlink("/proc/self/fd/0", chrootRootDir + "/dev/stdin"); createSymlink("/proc/self/fd/1", chrootRootDir + "/dev/stdout"); @@ -2374,10 +2384,11 @@ void DerivationGoal::runChild() ss.push_back("/etc/nsswitch.conf"); ss.push_back("/etc/services"); ss.push_back("/etc/hosts"); - ss.push_back("/var/run/nscd/socket"); + if (pathExists("/var/run/nscd/socket")) + ss.push_back("/var/run/nscd/socket"); } - for (auto & i : ss) dirsInChroot[i] = i; + for (auto & i : ss) dirsInChroot.emplace(i, i); /* Bind-mount all the directories from the "host" filesystem that we want in the chroot @@ -2415,17 +2426,13 @@ void DerivationGoal::runChild() fmt("size=%s", settings.sandboxShmSize).c_str()) == -1) throw SysError("mounting /dev/shm"); -#if 0 - // FIXME: can't figure out how to do this in a user - // namespace. - /* Mount a new devpts on /dev/pts. Note that this requires the kernel to be compiled with CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case if /dev/ptx/ptmx exists). */ if (pathExists("/dev/pts/ptmx") && !pathExists(chrootRootDir + "/dev/ptmx") - && dirsInChroot.find("/dev/pts") == dirsInChroot.end()) + && !dirsInChroot.count("/dev/pts")) { if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, "newinstance,mode=0620") == -1) throw SysError("mounting /dev/pts"); @@ -2435,7 +2442,6 @@ void DerivationGoal::runChild() Linux versions, it is created with permissions 0. */ chmod_(chrootRootDir + "/dev/pts/ptmx", 0666); } -#endif /* Do the chroot(). */ if (chdir(chrootRootDir.c_str()) == -1) @@ -2732,6 +2738,8 @@ void DerivationGoal::registerOutputs() Path path = i.second.path; if (missingPaths.find(path) == missingPaths.end()) continue; + ValidPathInfo info; + Path actualPath = path; if (useChroot) { actualPath = chrootRootDir + path; @@ -2834,6 +2842,8 @@ void DerivationGoal::registerOutputs() format("output path ‘%1%’ has %2% hash ‘%3%’ when ‘%4%’ was expected") % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h)); } + + info.ca = makeFixedOutputCA(recursive, h2); } /* Get rid of all weird permissions. This also checks that @@ -2933,7 +2943,6 @@ void DerivationGoal::registerOutputs() worker.markContentsGood(path); } - ValidPathInfo info; info.path = path; info.narHash = hash.first; info.narSize = hash.second; @@ -3012,9 +3021,6 @@ void DerivationGoal::registerOutputs() } -string drvsLogDir = "drvs"; - - Path DerivationGoal::openLogFile() { logSize = 0; @@ -3024,13 +3030,11 @@ Path DerivationGoal::openLogFile() string baseName = baseNameOf(drvPath); /* Create a log file. */ - Path dir = (format("%1%/%2%/%3%/") % worker.store.logDir % drvsLogDir % string(baseName, 0, 2)).str(); + Path dir = fmt("%s/%s/%s/", worker.store.logDir, worker.store.drvsLogDir, string(baseName, 0, 2)); createDirs(dir); - Path logFileName = (format("%1%/%2%%3%") - % dir - % string(baseName, 2) - % (settings.compressLog ? ".bz2" : "")).str(); + Path logFileName = fmt("%s/%s%s", dir, string(baseName, 2), + settings.compressLog ? ".bz2" : ""); fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666); if (!fdLogFile) throw SysError(format("creating log file ‘%1%’") % logFileName); diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 79526c594f71..0c6ceb9f6741 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -4,7 +4,7 @@ #include "util.hh" #include "worker-protocol.hh" #include "fs-accessor.hh" - +#include "istringstream_nocopy.hh" namespace nix { @@ -152,7 +152,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths) static Derivation parseDerivation(const string & s) { Derivation drv; - std::istringstream str(s); + istringstream_nocopy str(s); expect(str, "Derive(["); /* Parse the list of outputs. */ @@ -397,8 +397,8 @@ PathSet BasicDerivation::outputPaths() const Source & readDerivation(Source & in, Store & store, BasicDerivation & drv) { drv.outputs.clear(); - auto nr = readInt(in); - for (unsigned int n = 0; n < nr; n++) { + auto nr = readNum<size_t>(in); + for (size_t n = 0; n < nr; n++) { auto name = readString(in); DerivationOutput o; in >> o.path >> o.hashAlgo >> o.hash; @@ -410,8 +410,8 @@ Source & readDerivation(Source & in, Store & store, BasicDerivation & drv) in >> drv.platform >> drv.builder; drv.args = readStrings<Strings>(in); - nr = readInt(in); - for (unsigned int n = 0; n < nr; n++) { + nr = readNum<size_t>(in); + for (size_t n = 0; n < nr; n++) { auto key = readString(in); auto value = readString(in); drv.env[key] = value; diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 661ee2ed54bb..95e6f7bace08 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -5,6 +5,11 @@ #include "store-api.hh" #include "archive.hh" #include "s3.hh" +#include "compression.hh" + +#ifdef ENABLE_S3 +#include <aws/core/client/ClientConfiguration.h> +#endif #include <unistd.h> #include <fcntl.h> @@ -34,6 +39,16 @@ std::string resolveUri(const std::string & uri) return uri; } +ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data) +{ + if (encoding == "") + return data; + else if (encoding == "br") + return decompress(encoding, *data); + else + throw Error("unsupported Content-Encoding ‘%s’", encoding); +} + struct CurlDownloader : public Downloader { CURLM * curlm = 0; @@ -67,6 +82,8 @@ struct CurlDownloader : public Downloader struct curl_slist * requestHeaders = 0; + std::string encoding; + DownloadItem(CurlDownloader & downloader, const DownloadRequest & request) : downloader(downloader), request(request) { @@ -124,6 +141,7 @@ struct CurlDownloader : public Downloader auto ss = tokenizeString<vector<string>>(line, " "); status = ss.size() >= 2 ? ss[1] : ""; result.data = std::make_shared<std::string>(); + encoding = ""; } else { auto i = line.find(':'); if (i != string::npos) { @@ -139,7 +157,8 @@ struct CurlDownloader : public Downloader debug(format("shutting down on 200 HTTP response with expected ETag")); return 0; } - } + } else if (name == "content-encoding") + encoding = trim(string(line, i + 1));; } } return realSize; @@ -224,8 +243,7 @@ struct CurlDownloader : public Downloader curl_easy_setopt(req, CURLOPT_NOBODY, 1); if (request.verifyTLS) - curl_easy_setopt(req, CURLOPT_CAINFO, - getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt")).c_str()); + curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str()); else { curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0); curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0); @@ -266,14 +284,34 @@ struct CurlDownloader : public Downloader { result.cached = httpStatus == 304; done = true; - callSuccess(success, failure, const_cast<const DownloadResult &>(result)); + + try { + result.data = decodeContent(encoding, ref<std::string>(result.data)); + callSuccess(success, failure, const_cast<const DownloadResult &>(result)); + } catch (...) { + done = true; + callFailure(failure, std::current_exception()); + } } else { Error err = (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) ? NotFound : httpStatus == 403 ? Forbidden : (httpStatus == 408 || httpStatus == 500 || httpStatus == 503 || httpStatus == 504 || httpStatus == 522 || httpStatus == 524 - || code == CURLE_COULDNT_RESOLVE_HOST || code == CURLE_RECV_ERROR) ? Transient : + || code == CURLE_COULDNT_RESOLVE_HOST + || code == CURLE_RECV_ERROR + + // this seems to occur occasionally for retriable reasons, and shows up in an error like this: + // curl: (23) Failed writing body (315 != 16366) + || code == CURLE_WRITE_ERROR + + // this is a generic SSL failure that in some cases (e.g., certificate error) is permanent but also appears in transient cases, so we consider it retryable + || code == CURLE_SSL_CONNECT_ERROR +#if LIBCURL_VERSION_NUM >= 0x073200 + || code == CURLE_HTTP2 + || code == CURLE_HTTP2_STREAM +#endif + ) ? Transient : Misc; attempt++; @@ -491,7 +529,7 @@ struct CurlDownloader : public Downloader // FIXME: do this on a worker thread sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult { #ifdef ENABLE_S3 - S3Helper s3Helper; + S3Helper s3Helper(Aws::Region::US_EAST_1); // FIXME: make configurable auto slash = request.uri.find('/', 5); if (slash == std::string::npos) throw nix::Error("bad S3 URI ‘%s’", request.uri); @@ -612,6 +650,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data); info.path = store->makeFixedOutputPath(false, hash, name); info.narHash = hashString(htSHA256, *sink.s); + info.ca = makeFixedOutputCA(false, hash); store->addToStore(info, sink.s, false, true); storePath = info.path; } @@ -640,7 +679,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa Path tmpDir = createTempDir(); AutoDelete autoDelete(tmpDir, true); // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}, ""); + runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}); unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false); } replaceSymlink(unpackedStorePath, unpackedLink); diff --git a/src/libstore/download.hh b/src/libstore/download.hh index bdb5011e7830..62f3860b9dae 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -15,7 +15,7 @@ struct DownloadRequest bool verifyTLS = true; enum { yes, no, automatic } showProgress = yes; bool head = false; - size_t tries = 1; + size_t tries = 5; unsigned int baseRetryTimeMs = 250; DownloadRequest(const std::string & uri) : uri(uri) { } @@ -73,4 +73,7 @@ public: bool isUri(const string & s); +/* Decode data according to the Content-Encoding header. */ +ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data); + } diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc index c5618c826c54..2b8ab063e18e 100644 --- a/src/libstore/export-import.cc +++ b/src/libstore/export-import.cc @@ -61,39 +61,17 @@ void Store::exportPath(const Path & path, Sink & sink) hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0; } -struct TeeSource : Source -{ - Source & readSource; - ref<std::string> data; - TeeSource(Source & readSource) - : readSource(readSource) - , data(make_ref<std::string>()) - { - } - size_t read(unsigned char * data, size_t len) - { - size_t n = readSource.read(data, len); - this->data->append((char *) data, n); - return n; - } -}; - -struct NopSink : ParseSink -{ -}; - Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, bool dontCheckSigs) { Paths res; while (true) { - unsigned long long n = readLongLong(source); + auto n = readNum<uint64_t>(source); if (n == 0) break; if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’"); /* Extract the NAR from the source. */ - TeeSource tee(source); - NopSink sink; - parseDump(sink, tee); + TeeSink tee(source); + parseDump(tee, tee.source); uint32_t magic = readInt(source); if (magic != exportMagic) @@ -110,14 +88,14 @@ Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, info.deriver = readString(source); if (info.deriver != "") assertStorePath(info.deriver); - info.narHash = hashString(htSHA256, *tee.data); - info.narSize = tee.data->size(); + info.narHash = hashString(htSHA256, *tee.source.data); + info.narSize = tee.source.data->size(); // Ignore optional legacy signature. if (readInt(source) == 1) readString(source); - addToStore(info, tee.data, false, dontCheckSigs, accessor); + addToStore(info, tee.source.data, false, dontCheckSigs, accessor); res.push_back(info.path); } diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 8e90913cc3f1..0b03d61a789a 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -679,7 +679,7 @@ void LocalStore::removeUnusedLinks(const GCState & state) if (unlink(path.c_str()) == -1) throw SysError(format("deleting ‘%1%’") % path); - state.results.bytesFreed += st.st_blocks * 512; + state.results.bytesFreed += st.st_blocks * 512ULL; } struct stat st; diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 62ed0376d711..b9f4fada59f6 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -2,9 +2,9 @@ #include "util.hh" #include "archive.hh" -#include <map> #include <algorithm> -#include <unistd.h> +#include <map> +#include <thread> namespace nix { @@ -53,11 +53,7 @@ Settings::Settings() keepGoing = false; tryFallback = false; maxBuildJobs = 1; - buildCores = 1; -#ifdef _SC_NPROCESSORS_ONLN - long res = sysconf(_SC_NPROCESSORS_ONLN); - if (res > 0) buildCores = res; -#endif + buildCores = std::max(1U, std::thread::hardware_concurrency()); readOnlyMode = false; thisSystem = SYSTEM; maxSilentTime = 0; @@ -82,8 +78,10 @@ Settings::Settings() envKeepDerivations = false; lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1"; showTrace = false; - enableImportNative = false; + enableNativeCode = false; netrcFile = fmt("%s/%s", nixConfDir, "netrc"); + caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt")); + enableImportFromDerivation = true; useSandbox = "false"; // TODO: make into an enum #if __linux__ @@ -98,14 +96,14 @@ Settings::Settings() runDiffHook = false; diffHook = ""; enforceDeterminism = true; - binaryCachePublicKeys = Strings(); + binaryCachePublicKeys = Strings{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}; secretKeyFiles = Strings(); binaryCachesParallelConnections = 25; enableHttp2 = true; tarballTtl = 60 * 60; signedBinaryCaches = ""; substituters = Strings(); - binaryCaches = Strings(); + binaryCaches = nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(); extraBinaryCaches = Strings(); trustedUsers = Strings({"root"}); allowedUsers = Strings({"*"}); @@ -155,7 +153,15 @@ void Settings::set(const string & name, const string & value) void Settings::update() { _get(tryFallback, "build-fallback"); - _get(maxBuildJobs, "build-max-jobs"); + + std::string s = "1"; + _get(s, "build-max-jobs"); + if (s == "auto") + maxBuildJobs = std::max(1U, std::thread::hardware_concurrency()); + else + if (!string2Int(s, maxBuildJobs)) + throw Error("configuration setting ‘build-max-jobs’ should be ‘auto’ or an integer"); + _get(buildCores, "build-cores"); _get(thisSystem, "system"); _get(maxSilentTime, "build-max-silent-time"); @@ -178,13 +184,13 @@ void Settings::update() _get(envKeepDerivations, "env-keep-derivations"); _get(sshSubstituterHosts, "ssh-substituter-hosts"); _get(useSshSubstituter, "use-ssh-substituter"); - _get(logServers, "log-servers"); - _get(enableImportNative, "allow-unsafe-native-code-during-evaluation"); + _get(enableNativeCode, "allow-unsafe-native-code-during-evaluation"); _get(useCaseHack, "use-case-hack"); _get(preBuildHook, "pre-build-hook"); _get(keepGoing, "keep-going"); _get(keepFailed, "keep-failed"); _get(netrcFile, "netrc-file"); + _get(enableImportFromDerivation, "allow-import-from-derivation"); _get(useSandbox, "build-use-sandbox", "build-use-chroot"); _get(sandboxPaths, "build-sandbox-paths", "build-chroot-dirs"); _get(extraSandboxPaths, "build-extra-sandbox-paths", "build-extra-chroot-dirs"); diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index d74488a41b30..d47fdb7c9de9 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -177,11 +177,8 @@ struct Settings { /* Whether to show a stack trace if Nix evaluation fails. */ bool showTrace; - /* A list of URL prefixes that can return Nix build logs. */ - Strings logServers; - - /* Whether the importNative primop should be enabled */ - bool enableImportNative; + /* Whether native-code enabling primops should be enabled */ + bool enableNativeCode; /* Whether to enable sandboxed builds (string until we get an enum for true/false/relaxed) */ string useSandbox; @@ -260,6 +257,12 @@ struct Settings { downloads. */ Path netrcFile; + /* Path to the SSL CA file used */ + Path caFile; + + /* Whether we allow import-from-derivation */ + bool enableImportFromDerivation; + private: StringSet deprecatedOptions; SettingsMap settings, overrides; diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 9d31f77c921f..37a7d6ace142 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -64,7 +64,9 @@ protected: } } - void upsertFile(const std::string & path, const std::string & data) override + void upsertFile(const std::string & path, + const std::string & data, + const std::string & mimeType) override { throw UploadToHTTP("uploading to an HTTP binary cache is not supported"); } diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index b20ff185f9ba..befc560bfcec 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -4,80 +4,54 @@ #include "serve-protocol.hh" #include "store-api.hh" #include "worker-protocol.hh" +#include "ssh.hh" namespace nix { -static std::string uriScheme = "legacy-ssh://"; +static std::string uriScheme = "ssh://"; struct LegacySSHStore : public Store { - string host; + const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"}; + const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"}; + const Setting<bool> compress{this, false, "compress", "whether to compress the connection"}; struct Connection { - Pid sshPid; - AutoCloseFD out; - AutoCloseFD in; + std::unique_ptr<SSHMaster::Connection> sshConn; FdSink to; FdSource from; }; - AutoDelete tmpDir; - - Path socketPath; - - Pid sshMaster; + std::string host; ref<Pool<Connection>> connections; - Path key; + SSHMaster master; - LegacySSHStore(const string & host, const Params & params, - size_t maxConnections = std::numeric_limits<size_t>::max()) + LegacySSHStore(const string & host, const Params & params) : Store(params) , host(host) - , tmpDir(createTempDir("", "nix", true, true, 0700)) - , socketPath((Path) tmpDir + "/ssh.sock") , connections(make_ref<Pool<Connection>>( - maxConnections, + std::max(1, (int) maxConnections), [this]() { return openConnection(); }, [](const ref<Connection> & r) { return true; } )) - , key(get(params, "ssh-key", "")) + , master( + host, + sshKey, + // Use SSH master only if using more than 1 connection. + connections->capacity() > 1, + compress) { } ref<Connection> openConnection() { - if ((pid_t) sshMaster == -1) { - sshMaster = startProcess([&]() { - restoreSignals(); - Strings args{ "ssh", "-M", "-S", socketPath, "-N", "-x", "-a", host }; - if (!key.empty()) - args.insert(args.end(), {"-i", key}); - execvp("ssh", stringsToCharPtrs(args).data()); - throw SysError("starting SSH master connection to host ‘%s’", host); - }); - } - auto conn = make_ref<Connection>(); - Pipe in, out; - in.create(); - out.create(); - conn->sshPid = startProcess([&]() { - if (dup2(in.readSide.get(), STDIN_FILENO) == -1) - throw SysError("duping over STDIN"); - if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) - throw SysError("duping over STDOUT"); - execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-store", "--serve", "--write", nullptr); - throw SysError("executing ‘nix-store --serve’ on remote host ‘%s’", host); - }); - in.readSide = -1; - out.writeSide = -1; - conn->out = std::move(out.readSide); - conn->in = std::move(in.writeSide); - conn->to = FdSink(conn->in.get()); - conn->from = FdSource(conn->out.get()); + conn->sshConn = master.startCommand("nix-store --serve --write"); + conn->to = FdSink(conn->sshConn->in.get()); + conn->from = FdSource(conn->sshConn->out.get()); int remoteVersion; @@ -169,9 +143,9 @@ struct LegacySSHStore : public Store /* FIXME: inefficient. */ ParseSink parseSink; /* null sink; just parse the NAR */ - SavingSourceAdapter savedNAR(conn->from); + TeeSource savedNAR(conn->from); parseDump(parseSink, savedNAR); - sink(savedNAR.s); + sink(*savedNAR.data); } /* Unsupported methods. */ @@ -234,6 +208,41 @@ struct LegacySSHStore : public Store bool isTrusted() override { return true; } + void computeFSClosure(const PathSet & paths, + PathSet & out, bool flipDirection = false, + bool includeOutputs = false, bool includeDerivers = false) override + { + if (flipDirection || includeDerivers) { + Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers); + return; + } + + auto conn(connections->get()); + + conn->to + << cmdQueryClosure + << includeOutputs + << paths; + conn->to.flush(); + + auto res = readStorePaths<PathSet>(*this, conn->from); + + out.insert(res.begin(), res.end()); + } + + PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override + { + auto conn(connections->get()); + + conn->to + << cmdQueryValidPaths + << false // lock + << maybeSubstitute + << paths; + conn->to.flush(); + + return readStorePaths<PathSet>(*this, conn->from); + } }; static RegisterStoreImplementation regStore([]( diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index 0f377989bd89..aff22f9fcc22 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -30,7 +30,9 @@ protected: bool fileExists(const std::string & path) override; - void upsertFile(const std::string & path, const std::string & data) override; + void upsertFile(const std::string & path, + const std::string & data, + const std::string & mimeType) override; void getFile(const std::string & path, std::function<void(std::shared_ptr<std::string>)> success, @@ -83,7 +85,9 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path) return pathExists(binaryCacheDir + "/" + path); } -void LocalBinaryCacheStore::upsertFile(const std::string & path, const std::string & data) +void LocalBinaryCacheStore::upsertFile(const std::string & path, + const std::string & data, + const std::string & mimeType) { atomicWrite(binaryCacheDir + "/" + path, data); } diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 4571a2211cd2..bf247903c9db 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -2,14 +2,13 @@ #include "fs-accessor.hh" #include "store-api.hh" #include "globals.hh" +#include "compression.hh" +#include "derivations.hh" namespace nix { LocalFSStore::LocalFSStore(const Params & params) : Store(params) - , rootDir(get(params, "root")) - , stateDir(canonPath(get(params, "state", rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir))) - , logDir(canonPath(get(params, "log", rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir))) { } @@ -84,4 +83,48 @@ void LocalFSStore::narFromPath(const Path & path, Sink & sink) dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink); } +const string LocalFSStore::drvsLogDir = "drvs"; + + + +std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_) +{ + auto path(path_); + + assertStorePath(path); + + + if (!isDerivation(path)) { + try { + path = queryPathInfo(path)->deriver; + } catch (InvalidPath &) { + return nullptr; + } + if (path == "") return nullptr; + } + + string baseName = baseNameOf(path); + + for (int j = 0; j < 2; j++) { + + Path logPath = + j == 0 + ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2)) + : fmt("%s/%s/%s", logDir, drvsLogDir, baseName); + Path logBz2Path = logPath + ".bz2"; + + if (pathExists(logPath)) + return std::make_shared<std::string>(readFile(logPath)); + + else if (pathExists(logBz2Path)) { + try { + return decompress("bzip2", readFile(logBz2Path)); + } catch (Error &) { } + } + + } + + return nullptr; +} + } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index afcda6e2be7b..9111a45f8869 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -38,13 +38,14 @@ namespace nix { LocalStore::LocalStore(const Params & params) : Store(params) , LocalFSStore(params) - , realStoreDir(get(params, "real", rootDir != "" ? rootDir + "/nix/store" : storeDir)) + , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real", + "physical path to the Nix store"} + , realStoreDir(realStoreDir_) , dbDir(stateDir + "/db") , linksDir(realStoreDir + "/.links") , reservedPath(dbDir + "/reserved") , schemaPath(dbDir + "/schema") , trashDir(realStoreDir + "/trash") - , requireSigs(trim(settings.signedBinaryCaches) != "") // FIXME: rename option , publicKeys(getDefaultPublicKeys()) { auto state(_state.lock()); @@ -519,6 +520,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs) { + assert(info.ca == "" || info.isContentAddressed(*this)); + state.stmtRegisterValidPath.use() (info.path) ("sha256:" + printHash(info.narHash)) @@ -667,7 +670,7 @@ bool LocalStore::isValidPathUncached(const Path & path) } -PathSet LocalStore::queryValidPaths(const PathSet & paths) +PathSet LocalStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute) { PathSet res; for (auto & i : paths) @@ -918,7 +921,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & info.path % info.narHash.to_string() % h.to_string()); if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys)) - throw Error(format("cannot import path ‘%s’ because it lacks a valid signature") % info.path); + throw Error("cannot add path ‘%s’ because it lacks a valid signature", info.path); addTempRoot(info.path); @@ -1002,7 +1005,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name, info.narHash = hash.first; info.narSize = hash.second; info.ultimate = true; - info.ca = "fixed:" + (recursive ? (std::string) "r:" : "") + h.to_string(); + info.ca = makeFixedOutputCA(recursive, h); registerValidPath(info); } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 511209d8404a..f2c40e96464b 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -21,22 +21,14 @@ namespace nix { const int nixSchemaVersion = 10; -extern string drvsLogDir; - - struct Derivation; struct OptimiseStats { - unsigned long filesLinked; - unsigned long long bytesFreed; - unsigned long long blocksFreed; - OptimiseStats() - { - filesLinked = 0; - bytesFreed = blocksFreed = 0; - } + unsigned long filesLinked = 0; + unsigned long long bytesFreed = 0; + unsigned long long blocksFreed = 0; }; @@ -75,6 +67,8 @@ private: public: + PathSetting realStoreDir_; + const Path realStoreDir; const Path dbDir; const Path linksDir; @@ -84,7 +78,9 @@ public: private: - bool requireSigs; + Setting<bool> requireSigs{(Store*) this, + settings.signedBinaryCaches != "", // FIXME + "require-sigs", "whether store paths should have a trusted signature on import"}; PublicKeys publicKeys; @@ -102,7 +98,7 @@ public: bool isValidPathUncached(const Path & path) override; - PathSet queryValidPaths(const PathSet & paths) override; + PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override; PathSet queryAllValidPaths() override; diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 13b67b81f35e..180a936edb85 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -106,25 +106,27 @@ public: "select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); /* Periodically purge expired entries from the database. */ - auto now = time(0); - - SQLiteStmt queryLastPurge(state->db, "select value from LastPurge"); - auto queryLastPurge_(queryLastPurge.use()); - - if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) { - SQLiteStmt(state->db, - "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))") - .use() - (now - ttlNegative) - (now - ttlPositive) - .exec(); - - debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db)); - - SQLiteStmt(state->db, - "insert or replace into LastPurge(dummy, value) values ('', ?)") - .use()(now).exec(); - } + retrySQLite<void>([&]() { + auto now = time(0); + + SQLiteStmt queryLastPurge(state->db, "select value from LastPurge"); + auto queryLastPurge_(queryLastPurge.use()); + + if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) { + SQLiteStmt(state->db, + "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))") + .use() + (now - ttlNegative) + (now - ttlPositive) + .exec(); + + debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db)); + + SQLiteStmt(state->db, + "insert or replace into LastPurge(dummy, value) values ('', ?)") + .use()(now).exec(); + } + }); } Cache & getCache(State & state, const std::string & uri) @@ -136,114 +138,123 @@ public: void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override { - auto state(_state.lock()); + retrySQLite<void>([&]() { + auto state(_state.lock()); - // FIXME: race + // FIXME: race - state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec(); - assert(sqlite3_changes(state->db) == 1); - state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority}; + state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec(); + assert(sqlite3_changes(state->db) == 1); + state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority}; + }); } bool cacheExists(const std::string & uri, bool & wantMassQuery, int & priority) override { - auto state(_state.lock()); + return retrySQLite<bool>([&]() { + auto state(_state.lock()); - auto i = state->caches.find(uri); - if (i == state->caches.end()) { - auto queryCache(state->queryCache.use()(uri)); - if (!queryCache.next()) return false; - state->caches.emplace(uri, - Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)}); - } + auto i = state->caches.find(uri); + if (i == state->caches.end()) { + auto queryCache(state->queryCache.use()(uri)); + if (!queryCache.next()) return false; + state->caches.emplace(uri, + Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)}); + } - auto & cache(getCache(*state, uri)); + auto & cache(getCache(*state, uri)); - wantMassQuery = cache.wantMassQuery; - priority = cache.priority; + wantMassQuery = cache.wantMassQuery; + priority = cache.priority; - return true; + return true; + }); } std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo( const std::string & uri, const std::string & hashPart) override { - auto state(_state.lock()); + return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>( + [&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> { + auto state(_state.lock()); + + auto & cache(getCache(*state, uri)); - auto & cache(getCache(*state, uri)); - - auto now = time(0); - - auto queryNAR(state->queryNAR.use() - (cache.id) - (hashPart) - (now - ttlNegative) - (now - ttlPositive)); - - if (!queryNAR.next()) - return {oUnknown, 0}; - - if (!queryNAR.getInt(13)) - return {oInvalid, 0}; - - auto narInfo = make_ref<NarInfo>(); - - auto namePart = queryNAR.getStr(2); - narInfo->path = cache.storeDir + "/" + - hashPart + (namePart.empty() ? "" : "-" + namePart); - narInfo->url = queryNAR.getStr(3); - narInfo->compression = queryNAR.getStr(4); - if (!queryNAR.isNull(5)) - narInfo->fileHash = parseHash(queryNAR.getStr(5)); - narInfo->fileSize = queryNAR.getInt(6); - narInfo->narHash = parseHash(queryNAR.getStr(7)); - narInfo->narSize = queryNAR.getInt(8); - for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " ")) - narInfo->references.insert(cache.storeDir + "/" + r); - if (!queryNAR.isNull(10)) - narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10); - for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " ")) - narInfo->sigs.insert(sig); - - return {oValid, narInfo}; + auto now = time(0); + + auto queryNAR(state->queryNAR.use() + (cache.id) + (hashPart) + (now - ttlNegative) + (now - ttlPositive)); + + if (!queryNAR.next()) + return {oUnknown, 0}; + + if (!queryNAR.getInt(13)) + return {oInvalid, 0}; + + auto narInfo = make_ref<NarInfo>(); + + auto namePart = queryNAR.getStr(2); + narInfo->path = cache.storeDir + "/" + + hashPart + (namePart.empty() ? "" : "-" + namePart); + narInfo->url = queryNAR.getStr(3); + narInfo->compression = queryNAR.getStr(4); + if (!queryNAR.isNull(5)) + narInfo->fileHash = parseHash(queryNAR.getStr(5)); + narInfo->fileSize = queryNAR.getInt(6); + narInfo->narHash = parseHash(queryNAR.getStr(7)); + narInfo->narSize = queryNAR.getInt(8); + for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " ")) + narInfo->references.insert(cache.storeDir + "/" + r); + if (!queryNAR.isNull(10)) + narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10); + for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " ")) + narInfo->sigs.insert(sig); + + return {oValid, narInfo}; + }); } void upsertNarInfo( const std::string & uri, const std::string & hashPart, std::shared_ptr<ValidPathInfo> info) override { - auto state(_state.lock()); - - auto & cache(getCache(*state, uri)); - - if (info) { - - auto narInfo = std::dynamic_pointer_cast<NarInfo>(info); - - assert(hashPart == storePathToHash(info->path)); - - state->insertNAR.use() - (cache.id) - (hashPart) - (storePathToName(info->path)) - (narInfo ? narInfo->url : "", narInfo != 0) - (narInfo ? narInfo->compression : "", narInfo != 0) - (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash) - (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize) - (info->narHash.to_string()) - (info->narSize) - (concatStringsSep(" ", info->shortRefs())) - (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "") - (concatStringsSep(" ", info->sigs)) - (time(0)).exec(); - - } else { - state->insertMissingNAR.use() - (cache.id) - (hashPart) - (time(0)).exec(); - } + retrySQLite<void>([&]() { + auto state(_state.lock()); + + auto & cache(getCache(*state, uri)); + + if (info) { + + auto narInfo = std::dynamic_pointer_cast<NarInfo>(info); + + assert(hashPart == storePathToHash(info->path)); + + state->insertNAR.use() + (cache.id) + (hashPart) + (storePathToName(info->path)) + (narInfo ? narInfo->url : "", narInfo != 0) + (narInfo ? narInfo->compression : "", narInfo != 0) + (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash) + (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize) + (info->narHash.to_string()) + (info->narSize) + (concatStringsSep(" ", info->shortRefs())) + (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "") + (concatStringsSep(" ", info->sigs)) + (time(0)).exec(); + + } else { + state->insertMissingNAR.use() + (cache.id) + (hashPart) + (time(0)).exec(); + } + }); } }; diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index 201cac671a55..d1042c6de25e 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -59,9 +59,11 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & } } else if (name == "Deriver") { - auto p = store.storeDir + "/" + value; - if (!store.isStorePath(p)) corrupt(); - deriver = p; + if (value != "unknown-deriver") { + auto p = store.storeDir + "/" + value; + if (!store.isStorePath(p)) corrupt(); + deriver = p; + } } else if (name == "System") system = value; diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 42c09ec7e0b6..e1df137e4dbe 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -40,21 +40,34 @@ template PathSet readStorePaths(Store & store, Source & from); template Paths readStorePaths(Store & store, Source & from); /* TODO: Separate these store impls into different files, give them better names */ -RemoteStore::RemoteStore(const Params & params, size_t maxConnections) +RemoteStore::RemoteStore(const Params & params) : Store(params) , connections(make_ref<Pool<Connection>>( - maxConnections, - [this]() { return openConnection(); }, + std::max(1, (int) maxConnections), + [this]() { return openConnectionWrapper(); }, [](const ref<Connection> & r) { return r->to.good() && r->from.good(); } )) { } -UDSRemoteStore::UDSRemoteStore(const Params & params, size_t maxConnections) +ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper() +{ + if (failed) + throw Error("opening a connection to remote store ‘%s’ previously failed", getUri()); + try { + return openConnection(); + } catch (...) { + failed = true; + throw; + } +} + + +UDSRemoteStore::UDSRemoteStore(const Params & params) : Store(params) , LocalFSStore(params) - , RemoteStore(params, maxConnections) + , RemoteStore(params) { } @@ -108,7 +121,7 @@ void RemoteStore::initConnection(Connection & conn) unsigned int magic = readInt(conn.from); if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch"); - conn.daemonVersion = readInt(conn.from); + conn.from >> conn.daemonVersion; if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) throw Error("Nix daemon protocol version not supported"); if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10) @@ -129,7 +142,7 @@ void RemoteStore::initConnection(Connection & conn) conn.processStderr(); } catch (Error & e) { - throw Error(format("cannot start daemon worker: %1%") % e.msg()); + throw Error("cannot open connection to remote store ‘%s’: %s", getUri(), e.what()); } setOptions(conn); @@ -170,12 +183,11 @@ bool RemoteStore::isValidPathUncached(const Path & path) auto conn(connections->get()); conn->to << wopIsValidPath << path; conn->processStderr(); - unsigned int reply = readInt(conn->from); - return reply != 0; + return readInt(conn->from); } -PathSet RemoteStore::queryValidPaths(const PathSet & paths) +PathSet RemoteStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute) { auto conn(connections->get()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { @@ -246,8 +258,8 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, conn->to << wopQuerySubstitutablePathInfos << paths; conn->processStderr(); - unsigned int count = readInt(conn->from); - for (unsigned int n = 0; n < count; n++) { + size_t count = readNum<size_t>(conn->from); + for (size_t n = 0; n < count; n++) { Path path = readStorePath(*this, conn->from); SubstitutablePathInfo & info(infos[path]); info.deriver = readString(conn->from); @@ -277,7 +289,7 @@ void RemoteStore::queryPathInfoUncached(const Path & path, throw; } if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { - bool valid = readInt(conn->from) != 0; + bool valid; conn->from >> valid; if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path); } auto info = std::make_shared<ValidPathInfo>(); @@ -286,12 +298,11 @@ void RemoteStore::queryPathInfoUncached(const Path & path, if (info->deriver != "") assertStorePath(info->deriver); info->narHash = parseHash(htSHA256, readString(conn->from)); info->references = readStorePaths<PathSet>(*this, conn->from); - info->registrationTime = readInt(conn->from); - info->narSize = readLongLong(conn->from); + conn->from >> info->registrationTime >> info->narSize; if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { - info->ultimate = readInt(conn->from) != 0; + conn->from >> info->ultimate; info->sigs = readStrings<StringSet>(conn->from); - info->ca = readString(conn->from); + conn->from >> info->ca; } return info; }); @@ -380,8 +391,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> conn->to << wopAddToStoreNar << info.path << info.deriver << printHash(info.narHash) << info.references << info.registrationTime << info.narSize - << info.ultimate << info.sigs << *nar << repair << dontCheckSigs; - // FIXME: don't send nar as a string + << info.ultimate << info.sigs << info.ca + << repair << dontCheckSigs; + conn->to(*nar); conn->processStderr(); } } @@ -515,7 +527,7 @@ Roots RemoteStore::findRoots() auto conn(connections->get()); conn->to << wopFindRoots; conn->processStderr(); - unsigned int count = readInt(conn->from); + size_t count = readNum<size_t>(conn->from); Roots result; while (count--) { Path link = readString(conn->from); @@ -563,7 +575,7 @@ bool RemoteStore::verifyStore(bool checkContents, bool repair) auto conn(connections->get()); conn->to << wopVerifyStore << checkContents << repair; conn->processStderr(); - return readInt(conn->from) != 0; + return readInt(conn->from); } @@ -576,6 +588,31 @@ void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs) } +void RemoteStore::queryMissing(const PathSet & targets, + PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown, + unsigned long long & downloadSize, unsigned long long & narSize) +{ + { + auto conn(connections->get()); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19) + // Don't hold the connection handle in the fallback case + // to prevent a deadlock. + goto fallback; + conn->to << wopQueryMissing << targets; + conn->processStderr(); + willBuild = readStorePaths<PathSet>(*this, conn->from); + willSubstitute = readStorePaths<PathSet>(*this, conn->from); + unknown = readStorePaths<PathSet>(*this, conn->from); + conn->from >> downloadSize >> narSize; + return; + } + + fallback: + return Store::queryMissing(targets, willBuild, willSubstitute, + unknown, downloadSize, narSize); +} + + RemoteStore::Connection::~Connection() { try { @@ -599,7 +636,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) } else if (msg == STDERR_READ) { if (!source) throw Error("no source"); - size_t len = readInt(from); + size_t len = readNum<size_t>(from); auto buf = std::make_unique<unsigned char[]>(len); writeString(buf.get(), source->read(buf.get(), len), to); to.flush(); diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 40f17da300d0..479cf3a7909d 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -22,13 +22,16 @@ class RemoteStore : public virtual Store { public: - RemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max()); + const Setting<int> maxConnections{(Store*) this, 1, + "max-connections", "maximum number of concurrent connections to the Nix daemon"}; + + RemoteStore(const Params & params); /* Implementations of abstract store API methods. */ bool isValidPathUncached(const Path & path) override; - PathSet queryValidPaths(const PathSet & paths) override; + PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override; PathSet queryAllValidPaths() override; @@ -85,6 +88,10 @@ public: void addSignatures(const Path & storePath, const StringSet & sigs) override; + void queryMissing(const PathSet & targets, + PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown, + unsigned long long & downloadSize, unsigned long long & narSize) override; + protected: struct Connection @@ -98,6 +105,8 @@ protected: void processStderr(Sink * sink = 0, Source * source = 0); }; + ref<Connection> openConnectionWrapper(); + virtual ref<Connection> openConnection() = 0; void initConnection(Connection & conn); @@ -106,6 +115,8 @@ protected: private: + std::atomic_bool failed{false}; + void setOptions(Connection & conn); }; @@ -113,7 +124,7 @@ class UDSRemoteStore : public LocalFSStore, public RemoteStore { public: - UDSRemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max()); + UDSRemoteStore(const Params & params); std::string getUri() override; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 041c68c6816f..245455296013 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -5,6 +5,9 @@ #include "nar-info.hh" #include "nar-info-disk-cache.hh" #include "globals.hh" +#include "compression.hh" +#include "download.hh" +#include "istringstream_nocopy.hh" #include <aws/core/Aws.h> #include <aws/core/client/ClientConfiguration.h> @@ -52,8 +55,8 @@ static void initAWS() }); } -S3Helper::S3Helper() - : config(makeConfig()) +S3Helper::S3Helper(const string & region) + : config(makeConfig(region)) , client(make_ref<Aws::S3::S3Client>(*config)) { } @@ -70,13 +73,14 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy } }; -ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig() +ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region) { initAWS(); auto res = make_ref<Aws::Client::ClientConfiguration>(); - res->region = Aws::Region::US_EAST_1; // FIXME: make configurable + res->region = region; res->requestTimeoutMs = 600 * 1000; res->retryStrategy = std::make_shared<RetryStrategy>(); + res->caFile = settings.caFile; return res; } @@ -103,8 +107,10 @@ S3Helper::DownloadResult S3Helper::getObject( auto result = checkAws(fmt("AWS error fetching ‘%s’", key), client->GetObject(request)); - res.data = std::make_shared<std::string>( - dynamic_cast<std::stringstream &>(result.GetBody()).str()); + res.data = decodeContent( + result.GetContentEncoding(), + make_ref<std::string>( + dynamic_cast<std::stringstream &>(result.GetBody()).str())); } catch (S3Error & e) { if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw; @@ -117,19 +123,13 @@ S3Helper::DownloadResult S3Helper::getObject( return res; } -#if __linux__ - -struct istringstream_nocopy : public std::stringstream -{ - istringstream_nocopy(const std::string & s) - { - rdbuf()->pubsetbuf( - (char *) s.data(), s.size()); - } -}; - struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore { + const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}}; + const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"}; + const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"}; + const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"}; + std::string bucketName; Stats stats; @@ -140,6 +140,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const Params & params, const std::string & bucketName) : S3BinaryCacheStore(params) , bucketName(bucketName) + , s3Helper(region) { diskCache = getNarInfoDiskCache(); } @@ -218,13 +219,20 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore return true; } - void upsertFile(const std::string & path, const std::string & data) override + void uploadFile(const std::string & path, const std::string & data, + const std::string & mimeType, + const std::string & contentEncoding) { auto request = Aws::S3::Model::PutObjectRequest() .WithBucket(bucketName) .WithKey(path); + request.SetContentType(mimeType); + + if (contentEncoding != "") + request.SetContentEncoding(contentEncoding); + auto stream = std::make_shared<istringstream_nocopy>(data); request.SetBody(stream); @@ -247,6 +255,19 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore stats.putTimeMs += duration; } + void upsertFile(const std::string & path, const std::string & data, + const std::string & mimeType) override + { + if (narinfoCompression != "" && hasSuffix(path, ".narinfo")) + uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression); + else if (lsCompression != "" && hasSuffix(path, ".ls")) + uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression); + else if (logCompression != "" && hasPrefix(path, "log/")) + uploadFile(path, *compress(logCompression, data), mimeType, logCompression); + else + uploadFile(path, data, mimeType, ""); + } + void getFile(const std::string & path, std::function<void(std::shared_ptr<std::string>)> success, std::function<void(std::exception_ptr exc)> failure) override @@ -313,8 +334,6 @@ static RegisterStoreImplementation regStore([]( return store; }); -#endif - } #endif diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh index 5d5d3475c449..08a7fbf96e98 100644 --- a/src/libstore/s3.hh +++ b/src/libstore/s3.hh @@ -14,9 +14,9 @@ struct S3Helper ref<Aws::Client::ClientConfiguration> config; ref<Aws::S3::S3Client> client; - S3Helper(); + S3Helper(const std::string & region); - ref<Aws::Client::ClientConfiguration> makeConfig(); + ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region); struct DownloadResult { diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 0197b091cd12..a81e62dbd6eb 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -3,36 +3,25 @@ #include <sqlite3.h> +#include <atomic> + namespace nix { [[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f) { int err = sqlite3_errcode(db); + + auto path = sqlite3_db_filename(db, nullptr); + if (!path) path = "(in-memory)"; + if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) { - if (err == SQLITE_PROTOCOL) - printError("warning: SQLite database is busy (SQLITE_PROTOCOL)"); - else { - static bool warned = false; - if (!warned) { - printError("warning: SQLite database is busy"); - warned = true; - } - } - /* Sleep for a while since retrying the transaction right away - is likely to fail again. */ - checkInterrupt(); -#if HAVE_NANOSLEEP - struct timespec t; - t.tv_sec = 0; - t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */ - nanosleep(&t, 0); -#else - sleep(1); -#endif - throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db)); + throw SQLiteBusy( + err == SQLITE_PROTOCOL + ? fmt("SQLite database ‘%s’ is busy (SQLITE_PROTOCOL)", path) + : fmt("SQLite database ‘%s’ is busy", path)); } else - throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db)); + throw SQLiteError("%s: %s (in ‘%s’)", f.str(), sqlite3_errstr(err), path); } SQLite::SQLite(const Path & path) @@ -54,24 +43,27 @@ SQLite::~SQLite() void SQLite::exec(const std::string & stmt) { - if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK) - throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt); + retrySQLite<void>([&]() { + if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK) + throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt); + }); } -void SQLiteStmt::create(sqlite3 * db, const string & s) +void SQLiteStmt::create(sqlite3 * db, const string & sql) { checkInterrupt(); assert(!stmt); - if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK) - throwSQLiteError(db, "creating statement"); + if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK) + throwSQLiteError(db, fmt("creating statement ‘%s’", sql)); this->db = db; + this->sql = sql; } SQLiteStmt::~SQLiteStmt() { try { if (stmt && sqlite3_finalize(stmt) != SQLITE_OK) - throwSQLiteError(db, "finalizing statement"); + throwSQLiteError(db, fmt("finalizing statement ‘%s’", sql)); } catch (...) { ignoreException(); } @@ -128,14 +120,14 @@ void SQLiteStmt::Use::exec() int r = step(); assert(r != SQLITE_ROW); if (r != SQLITE_DONE) - throwSQLiteError(stmt.db, "executing SQLite statement"); + throwSQLiteError(stmt.db, fmt("executing SQLite statement ‘%s’", stmt.sql)); } bool SQLiteStmt::Use::next() { int r = step(); if (r != SQLITE_DONE && r != SQLITE_ROW) - throwSQLiteError(stmt.db, "executing SQLite query"); + throwSQLiteError(stmt.db, fmt("executing SQLite query ‘%s’", stmt.sql)); return r == SQLITE_ROW; } @@ -182,4 +174,24 @@ SQLiteTxn::~SQLiteTxn() } } +void handleSQLiteBusy(const SQLiteBusy & e) +{ + static std::atomic<time_t> lastWarned{0}; + + time_t now = time(0); + + if (now > lastWarned + 10) { + lastWarned = now; + printError("warning: %s", e.what()); + } + + /* Sleep for a while since retrying the transaction right away + is likely to fail again. */ + checkInterrupt(); + struct timespec t; + t.tv_sec = 0; + t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */ + nanosleep(&t, 0); +} + } diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 4d347a2e56ab..14a7a0dd8996 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -30,8 +30,9 @@ struct SQLiteStmt { sqlite3 * db = 0; sqlite3_stmt * stmt = 0; + std::string sql; SQLiteStmt() { } - SQLiteStmt(sqlite3 * db, const std::string & s) { create(db, s); } + SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); } void create(sqlite3 * db, const std::string & s); ~SQLiteStmt(); operator sqlite3_stmt * () { return stmt; } @@ -94,6 +95,8 @@ MakeError(SQLiteBusy, SQLiteError); [[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f); +void handleSQLiteBusy(const SQLiteBusy & e); + /* Convenience function for retrying a SQLite transaction when the database is busy. */ template<typename T> @@ -103,6 +106,7 @@ T retrySQLite(std::function<T()> fun) try { return fun(); } catch (SQLiteBusy & e) { + handleSQLiteBusy(e); } } } diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index 6f1862afa899..bb536fadfd51 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -4,18 +4,36 @@ #include "archive.hh" #include "worker-protocol.hh" #include "pool.hh" +#include "ssh.hh" namespace nix { -static std::string uriScheme = "ssh://"; +static std::string uriScheme = "ssh-ng://"; class SSHStore : public RemoteStore { public: - SSHStore(string host, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max()); + const Setting<Path> sshKey{(Store*) this, "", "ssh-key", "path to an SSH private key"}; + const Setting<bool> compress{(Store*) this, false, "compress", "whether to compress the connection"}; + + SSHStore(const std::string & host, const Params & params) + : Store(params) + , RemoteStore(params) + , host(host) + , master( + host, + sshKey, + // Use SSH master only if using more than 1 connection. + connections->capacity() > 1, + compress) + { + } - std::string getUri() override; + std::string getUri() override + { + return uriScheme + host; + } void narFromPath(const Path & path, Sink & sink) override; @@ -25,43 +43,16 @@ private: struct Connection : RemoteStore::Connection { - Pid sshPid; - AutoCloseFD out; - AutoCloseFD in; + std::unique_ptr<SSHMaster::Connection> sshConn; }; ref<RemoteStore::Connection> openConnection() override; - AutoDelete tmpDir; - - Path socketPath; - - Pid sshMaster; - - string host; - - Path key; + std::string host; - bool compress; + SSHMaster master; }; -SSHStore::SSHStore(string host, const Params & params, size_t maxConnections) - : Store(params) - , RemoteStore(params, maxConnections) - , tmpDir(createTempDir("", "nix", true, true, 0700)) - , socketPath((Path) tmpDir + "/ssh.sock") - , host(std::move(host)) - , key(get(params, "ssh-key", "")) - , compress(get(params, "compress", "") == "true") -{ - /* open a connection and perform the handshake to verify all is well */ - connections->get(); -} - -string SSHStore::getUri() -{ - return uriScheme + host; -} class ForwardSource : public Source { @@ -94,35 +85,10 @@ ref<FSAccessor> SSHStore::getFSAccessor() ref<RemoteStore::Connection> SSHStore::openConnection() { - if ((pid_t) sshMaster == -1) { - sshMaster = startProcess([&]() { - restoreSignals(); - if (key.empty()) - execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), host.c_str(), NULL); - else - execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), host.c_str(), NULL); - throw SysError("starting ssh master"); - }); - } - auto conn = make_ref<Connection>(); - Pipe in, out; - in.create(); - out.create(); - conn->sshPid = startProcess([&]() { - if (dup2(in.readSide.get(), STDIN_FILENO) == -1) - throw SysError("duping over STDIN"); - if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) - throw SysError("duping over STDOUT"); - execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-daemon", "--stdio", NULL); - throw SysError("executing nix-daemon --stdio over ssh"); - }); - in.readSide = -1; - out.writeSide = -1; - conn->out = std::move(out.readSide); - conn->in = std::move(in.writeSide); - conn->to = FdSink(conn->in.get()); - conn->from = FdSource(conn->out.get()); + conn->sshConn = master.startCommand("nix-daemon --stdio"); + conn->to = FdSink(conn->sshConn->in.get()); + conn->from = FdSource(conn->sshConn->out.get()); initConnection(*conn); return conn; } diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc new file mode 100644 index 000000000000..e54f3f4ba284 --- /dev/null +++ b/src/libstore/ssh.cc @@ -0,0 +1,102 @@ +#include "ssh.hh" + +namespace nix { + +void SSHMaster::addCommonSSHOpts(Strings & args) +{ + for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS"))) + args.push_back(i); + if (!keyFile.empty()) + args.insert(args.end(), {"-i", keyFile}); + if (compress) + args.push_back("-C"); +} + +std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command) +{ + Path socketPath = startMaster(); + + Pipe in, out; + in.create(); + out.create(); + + auto conn = std::make_unique<Connection>(); + conn->sshPid = startProcess([&]() { + restoreSignals(); + + close(in.writeSide.get()); + close(out.readSide.get()); + + if (dup2(in.readSide.get(), STDIN_FILENO) == -1) + throw SysError("duping over stdin"); + if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + throw SysError("duping over stdout"); + + Strings args = { "ssh", host.c_str(), "-x", "-a" }; + addCommonSSHOpts(args); + if (socketPath != "") + args.insert(args.end(), {"-S", socketPath}); + args.push_back(command); + execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing ‘%s’ on ‘%s’", command, host); + }); + + + in.readSide = -1; + out.writeSide = -1; + + conn->out = std::move(out.readSide); + conn->in = std::move(in.writeSide); + + return conn; +} + +Path SSHMaster::startMaster() +{ + if (!useMaster) return ""; + + auto state(state_.lock()); + + if (state->sshMaster != -1) return state->socketPath; + + state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700)); + + state->socketPath = (Path) *state->tmpDir + "/ssh.sock"; + + Pipe out; + out.create(); + + state->sshMaster = startProcess([&]() { + restoreSignals(); + + close(out.readSide.get()); + + if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + throw SysError("duping over stdout"); + + Strings args = + { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath + , "-o", "LocalCommand=echo started" + , "-o", "PermitLocalCommand=yes" + }; + addCommonSSHOpts(args); + execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); + + throw SysError("starting SSH master"); + }); + + out.writeSide = -1; + + std::string reply; + try { + reply = readLine(out.readSide.get()); + } catch (EndOfFile & e) { } + + if (reply != "started") + throw Error("failed to start SSH master connection to ‘%s’", host); + + return state->socketPath; +} + +} diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh new file mode 100644 index 000000000000..b4396467e54e --- /dev/null +++ b/src/libstore/ssh.hh @@ -0,0 +1,49 @@ +#pragma once + +#include "util.hh" +#include "sync.hh" + +namespace nix { + +class SSHMaster +{ +private: + + const std::string host; + const std::string keyFile; + const bool useMaster; + const bool compress; + + struct State + { + Pid sshMaster; + std::unique_ptr<AutoDelete> tmpDir; + Path socketPath; + }; + + Sync<State> state_; + + void addCommonSSHOpts(Strings & args); + +public: + + SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress) + : host(host) + , keyFile(keyFile) + , useMaster(useMaster) + , compress(compress) + { + } + + struct Connection + { + Pid sshPid; + AutoCloseFD out, in; + }; + + std::unique_ptr<Connection> startCommand(const std::string & command); + + Path startMaster(); +}; + +} diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 603424277519..514d1c2ff8b3 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -241,7 +241,8 @@ Path Store::computeStorePathForText(const string & name, const string & s, Store::Store(const Params & params) - : storeDir(get(params, "store", settings.nixStore)) + : Config(params) + , state({(size_t) pathInfoCacheSize}) { } @@ -377,7 +378,7 @@ void Store::queryPathInfo(const Path & storePath, } -PathSet Store::queryValidPaths(const PathSet & paths) +PathSet Store::queryValidPaths(const PathSet & paths, bool maybeSubstitute) { struct State { @@ -550,6 +551,8 @@ void copyClosure(ref<Store> srcStore, ref<Store> dstStore, for (auto & path : storePaths) srcStore->computeFSClosure(path, closure); + // FIXME: use copyStorePaths() + PathSet valid = dstStore->queryValidPaths(closure); if (valid.size() == closure.size()) return; @@ -676,6 +679,12 @@ Strings ValidPathInfo::shortRefs() const } +std::string makeFixedOutputCA(bool recursive, const Hash & hash) +{ + return "fixed:" + (recursive ? (std::string) "r:" : "") + hash.to_string(); +} + + } @@ -702,10 +711,17 @@ ref<Store> openStore(const std::string & uri_) } uri = uri_.substr(0, q); } + return openStore(uri, params); +} +ref<Store> openStore(const std::string & uri, const Store::Params & params) +{ for (auto fun : *RegisterStoreImplementation::implementations) { auto store = fun(uri, params); - if (store) return ref<Store>(store); + if (store) { + store->warnUnused(); + return ref<Store>(store); + } } throw Error(format("don't know how to open Nix store ‘%s’") % uri); @@ -718,7 +734,7 @@ StoreType getStoreType(const std::string & uri, const std::string & stateDir) return tDaemon; } else if (uri == "local") { return tLocal; - } else if (uri == "") { + } else if (uri == "" || uri == "auto") { if (access(stateDir.c_str(), R_OK | W_OK) == 0) return tLocal; else if (pathExists(settings.nixDaemonSocketFile)) @@ -781,37 +797,25 @@ std::list<ref<Store>> getDefaultSubstituters() } -void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute) -{ - if (substitute) { - /* Filter out .drv files (we don't want to build anything). */ - PathSet paths2; - for (auto & path : storePaths) - if (!isDerivation(path)) paths2.insert(path); - unsigned long long downloadSize, narSize; - PathSet willBuild, willSubstitute, unknown; - to->queryMissing(PathSet(paths2.begin(), paths2.end()), - willBuild, willSubstitute, unknown, downloadSize, narSize); - /* FIXME: should use ensurePath(), but it only - does one path at a time. */ - if (!willSubstitute.empty()) - try { - to->buildPaths(willSubstitute); - } catch (Error & e) { - printMsg(lvlError, format("warning: %1%") % e.msg()); - } - } +void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute) +{ + PathSet valid = to->queryValidPaths(storePaths, substitute); + + PathSet missing; + for (auto & path : storePaths) + if (!valid.count(path)) missing.insert(path); std::string copiedLabel = "copied"; - logger->setExpected(copiedLabel, storePaths.size()); + logger->setExpected(copiedLabel, missing.size()); ThreadPool pool; processGraph<Path>(pool, - PathSet(storePaths.begin(), storePaths.end()), + PathSet(missing.begin(), missing.end()), [&](const Path & storePath) { + if (to->isValidPath(storePath)) return PathSet(); return from->queryPathInfo(storePath)->references; }, diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index d03e70849f93..067309c9e956 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -6,6 +6,7 @@ #include "lru-cache.hh" #include "sync.hh" #include "globals.hh" +#include "config.hh" #include <atomic> #include <limits> @@ -81,12 +82,7 @@ struct GCResults /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the number of bytes that would be or was freed. */ - unsigned long long bytesFreed; - - GCResults() - { - bytesFreed = 0; - } + unsigned long long bytesFreed = 0; }; @@ -128,7 +124,7 @@ struct ValidPathInfo of an output path of a derivation were actually produced by that derivation. In the intensional model, we have to trust that a particular output path was produced by a derivation; the - path name then implies the contents.) + path then implies the contents.) Ideally, the content-addressability assertion would just be a Boolean, and the store path would be computed from @@ -229,19 +225,23 @@ struct BuildResult }; -class Store : public std::enable_shared_from_this<Store> +class Store : public std::enable_shared_from_this<Store>, public Config { public: typedef std::map<std::string, std::string> Params; - const Path storeDir; + const PathSetting storeDir_{this, false, settings.nixStore, + "store", "path to the Nix store"}; + const Path storeDir = storeDir_; + + const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"}; protected: struct State { - LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache{64 * 1024}; + LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache; }; Sync<State> state; @@ -324,8 +324,10 @@ protected: public: - /* Query which of the given paths is valid. */ - virtual PathSet queryValidPaths(const PathSet & paths); + /* Query which of the given paths is valid. Optionally, try to + substitute missing paths. */ + virtual PathSet queryValidPaths(const PathSet & paths, + bool maybeSubstitute = false); /* Query the set of all valid paths. Note that for some store backends, the name part of store paths may be omitted @@ -511,7 +513,7 @@ public: `storePath' is returned; that is, the closures under the `referrers' relation instead of the `references' relation is returned. */ - void computeFSClosure(const PathSet & paths, + virtual void computeFSClosure(const PathSet & paths, PathSet & out, bool flipDirection = false, bool includeOutputs = false, bool includeDerivers = false); @@ -522,7 +524,7 @@ public: /* Given a set of paths that are to be built, return the set of derivations that will be built, and the set of output paths that will be substituted. */ - void queryMissing(const PathSet & targets, + virtual void queryMissing(const PathSet & targets, PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown, unsigned long long & downloadSize, unsigned long long & narSize); @@ -566,6 +568,18 @@ public: if they lack a signature. */ virtual bool isTrusted() { return false; } + /* Return the build log of the specified store path, if available, + or null otherwise. */ + virtual std::shared_ptr<std::string> getBuildLog(const Path & path) + { return nullptr; } + + /* Hack to allow long-running processes like hydra-queue-runner to + occasionally flush their path info cache. */ + void clearPathInfoCache() + { + state.lock()->pathInfoCache.clear(); + } + protected: Stats stats; @@ -576,9 +590,20 @@ protected: class LocalFSStore : public virtual Store { public: - const Path rootDir; - const Path stateDir; - const Path logDir; + + // FIXME: the (Store*) cast works around a bug in gcc that causes + // it to emit the call to the Option constructor. Clang works fine + // either way. + const PathSetting rootDir{(Store*) this, true, "", + "root", "directory prefixed to all other paths"}; + const PathSetting stateDir{(Store*) this, false, + rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir, + "state", "directory where Nix will store state"}; + const PathSetting logDir{(Store*) this, false, + rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir, + "log", "directory where Nix will store state"}; + + const static string drvsLogDir; LocalFSStore(const Params & params); @@ -595,6 +620,8 @@ public: { return getRealStoreDir() + "/" + baseNameOf(storePath); } + + std::shared_ptr<std::string> getBuildLog(const Path & path) override; }; @@ -642,8 +669,10 @@ void removeTempRoots(); set to true *unless* you're going to collect garbage. */ ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE")); +ref<Store> openStore(const std::string & uri, const Store::Params & params); -void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute = false); + +void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute = false); enum StoreType { tDaemon, @@ -687,6 +716,11 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven = false); +/* Compute the content-addressability assertion (ValidPathInfo::ca) + for paths created by makeFixedOutputPath() / addToStore(). */ +std::string makeFixedOutputCA(bool recursive, const Hash & hash); + + MakeError(SubstError, Error) MakeError(BuildError, Error) /* denotes a permanent build failure */ MakeError(InvalidPath, Error) diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 6a4ed47cc9fa..6c6766b36124 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -6,7 +6,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x112 +#define PROTOCOL_VERSION 0x113 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) @@ -47,7 +47,8 @@ typedef enum { wopBuildDerivation = 36, wopAddSignatures = 37, wopNarFromPath = 38, - wopAddToStoreNar = 39 + wopAddToStoreNar = 39, + wopQueryMissing = 40, } WorkerOp; diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh index d58b91df0461..607ebf8b28f9 100644 --- a/src/libutil/archive.hh +++ b/src/libutil/archive.hh @@ -70,6 +70,13 @@ struct ParseSink virtual void createSymlink(const Path & path, const string & target) { }; }; +struct TeeSink : ParseSink +{ + TeeSource source; + + TeeSink(Source & source) : source(source) { } +}; + void parseDump(ParseSink & sink, Source & source); void restorePath(const Path & path, Source & source); diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index a3bbb5170d9f..b0b1d709fa44 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -18,7 +18,7 @@ static ref<std::string> decompressXZ(const std::string & in) lzma_ret ret = lzma_stream_decoder( &strm, UINT64_MAX, LZMA_CONCATENATED); if (ret != LZMA_OK) - throw Error("unable to initialise lzma decoder"); + throw CompressionError("unable to initialise lzma decoder"); Finally free([&]() { lzma_end(&strm); }); @@ -48,7 +48,7 @@ static ref<std::string> decompressXZ(const std::string & in) return res; if (ret != LZMA_OK) - throw Error("error while decompressing xz file"); + throw CompressionError("error %d while decompressing xz file", ret); } } @@ -59,7 +59,7 @@ static ref<std::string> decompressBzip2(const std::string & in) int ret = BZ2_bzDecompressInit(&strm, 0, 0); if (ret != BZ_OK) - throw Error("unable to initialise bzip2 decoder"); + throw CompressionError("unable to initialise bzip2 decoder"); Finally free([&]() { BZ2_bzDecompressEnd(&strm); }); @@ -85,10 +85,19 @@ static ref<std::string> decompressBzip2(const std::string & in) return res; if (ret != BZ_OK) - throw Error("error while decompressing bzip2 file"); + throw CompressionError("error while decompressing bzip2 file"); + + if (strm.avail_in == 0) + throw CompressionError("bzip2 data ends prematurely"); } } +static ref<std::string> decompressBrotli(const std::string & in) +{ + // FIXME: use libbrotli + return make_ref<std::string>(runProgram(BRO, true, {"-d"}, {in})); +} + ref<std::string> compress(const std::string & method, const std::string & in) { StringSink ssink; @@ -106,6 +115,8 @@ ref<std::string> decompress(const std::string & method, const std::string & in) return decompressXZ(in); else if (method == "bzip2") return decompressBzip2(in); + else if (method == "br") + return decompressBrotli(in); else throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method); } @@ -130,7 +141,7 @@ struct XzSink : CompressionSink lzma_ret ret = lzma_easy_encoder( &strm, 6, LZMA_CHECK_CRC64); if (ret != LZMA_OK) - throw Error("unable to initialise lzma encoder"); + throw CompressionError("unable to initialise lzma encoder"); // FIXME: apply the x86 BCJ filter? strm.next_out = outbuf; @@ -139,7 +150,6 @@ struct XzSink : CompressionSink ~XzSink() { - assert(finished); lzma_end(&strm); } @@ -155,7 +165,7 @@ struct XzSink : CompressionSink lzma_ret ret = lzma_code(&strm, LZMA_FINISH); if (ret != LZMA_OK && ret != LZMA_STREAM_END) - throw Error("error while flushing xz file"); + throw CompressionError("error while flushing xz file"); if (strm.avail_out == 0 || ret == LZMA_STREAM_END) { nextSink(outbuf, sizeof(outbuf) - strm.avail_out); @@ -179,7 +189,7 @@ struct XzSink : CompressionSink lzma_ret ret = lzma_code(&strm, LZMA_RUN); if (ret != LZMA_OK) - throw Error("error while compressing xz file"); + throw CompressionError("error while compressing xz file"); if (strm.avail_out == 0) { nextSink(outbuf, sizeof(outbuf)); @@ -202,7 +212,7 @@ struct BzipSink : CompressionSink memset(&strm, 0, sizeof(strm)); int ret = BZ2_bzCompressInit(&strm, 9, 0, 30); if (ret != BZ_OK) - throw Error("unable to initialise bzip2 encoder"); + throw CompressionError("unable to initialise bzip2 encoder"); strm.next_out = outbuf; strm.avail_out = sizeof(outbuf); @@ -210,7 +220,6 @@ struct BzipSink : CompressionSink ~BzipSink() { - assert(finished); BZ2_bzCompressEnd(&strm); } @@ -226,7 +235,7 @@ struct BzipSink : CompressionSink int ret = BZ2_bzCompress(&strm, BZ_FINISH); if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END) - throw Error("error while flushing bzip2 file"); + throw CompressionError("error while flushing bzip2 file"); if (strm.avail_out == 0 || ret == BZ_STREAM_END) { nextSink((unsigned char *) outbuf, sizeof(outbuf) - strm.avail_out); @@ -250,7 +259,7 @@ struct BzipSink : CompressionSink int ret = BZ2_bzCompress(&strm, BZ_RUN); if (ret != BZ_OK) - Error("error while compressing bzip2 file"); + CompressionError("error while compressing bzip2 file"); if (strm.avail_out == 0) { nextSink((unsigned char *) outbuf, sizeof(outbuf)); @@ -261,6 +270,34 @@ struct BzipSink : CompressionSink } }; +struct BrotliSink : CompressionSink +{ + Sink & nextSink; + std::string data; + + BrotliSink(Sink & nextSink) : nextSink(nextSink) + { + } + + ~BrotliSink() + { + } + + // FIXME: use libbrotli + + void finish() override + { + flush(); + nextSink(runProgram(BRO, true, {}, data)); + } + + void write(const unsigned char * data, size_t len) override + { + checkInterrupt(); + this->data.append((const char *) data, len); + } +}; + ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink) { if (method == "none") @@ -269,6 +306,8 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next return make_ref<XzSink>(nextSink); else if (method == "bzip2") return make_ref<BzipSink>(nextSink); + else if (method == "br") + return make_ref<BrotliSink>(nextSink); else throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method); } diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index eacf559d65e9..e3e6f5a99303 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -21,4 +21,6 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next MakeError(UnknownCompressionMethod, Error); +MakeError(CompressionError, Error); + } diff --git a/src/libutil/config.cc b/src/libutil/config.cc new file mode 100644 index 000000000000..2f9f988607ea --- /dev/null +++ b/src/libutil/config.cc @@ -0,0 +1,112 @@ +#include "config.hh" +#include "args.hh" + +namespace nix { + +void Config::set(const std::string & name, const std::string & value) +{ + auto i = _settings.find(name); + if (i == _settings.end()) + throw UsageError("unknown setting '%s'", name); + i->second.setting->set(value); +} + +void Config::add(AbstractSetting * setting) +{ + _settings.emplace(setting->name, Config::SettingData{false, setting}); + for (auto & alias : setting->aliases) + _settings.emplace(alias, Config::SettingData{true, setting}); + + bool set = false; + + auto i = initials.find(setting->name); + if (i != initials.end()) { + setting->set(i->second); + initials.erase(i); + set = true; + } + + for (auto & alias : setting->aliases) { + auto i = initials.find(alias); + if (i != initials.end()) { + if (set) + warn("setting '%s' is set, but it's an alias of '%s' which is also set", + alias, setting->name); + else { + setting->set(i->second); + initials.erase(i); + set = true; + } + } + } +} + +void Config::warnUnused() +{ + for (auto & i : initials) + warn("unknown setting '%s'", i.first); +} + +std::string Config::dump() +{ + std::string res; + for (auto & opt : _settings) + if (!opt.second.isAlias) + res += opt.first + " = " + opt.second.setting->to_string() + "\n"; + return res; +} + +AbstractSetting::AbstractSetting( + const std::string & name, + const std::string & description, + const std::set<std::string> & aliases) + : name(name), description(description), aliases(aliases) +{ +} + +template<> void Setting<std::string>::set(const std::string & str) +{ + value = str; +} + +template<> std::string Setting<std::string>::to_string() +{ + return value; +} + +template<> void Setting<int>::set(const std::string & str) +{ + try { + value = std::stoi(str); + } catch (...) { + throw UsageError("setting '%s' has invalid value '%s'", name, str); + } +} + +template<> std::string Setting<int>::to_string() +{ + return std::to_string(value); +} + +template<> void Setting<bool>::set(const std::string & str) +{ + value = str == "true" || str == "1"; +} + +template<> std::string Setting<bool>::to_string() +{ + return value ? "true" : "false"; +} + +void PathSetting::set(const std::string & str) +{ + if (str == "") { + if (allowEmpty) + value = ""; + else + throw UsageError("setting '%s' cannot be empty", name); + } else + value = canonPath(str); +} + +} diff --git a/src/libutil/config.hh b/src/libutil/config.hh new file mode 100644 index 000000000000..fb2d48e9c834 --- /dev/null +++ b/src/libutil/config.hh @@ -0,0 +1,151 @@ +#include <map> +#include <set> + +#include "types.hh" + +#pragma once + +namespace nix { + +class Args; +class AbstractSetting; + +/* A class to simplify providing configuration settings. The typical + use is to inherit Config and add Setting<T> members: + + class MyClass : private Config + { + Setting<int> foo{this, 123, "foo", "the number of foos to use"}; + Setting<std::string> bar{this, "blabla", "bar", "the name of the bar"}; + + MyClass() : Config(readConfigFile("/etc/my-app.conf")) + { + std::cout << foo << "\n"; // will print 123 unless overriden + } + }; +*/ + +class Config +{ + friend class AbstractSetting; + + struct SettingData + { + bool isAlias = false; + AbstractSetting * setting; + }; + + std::map<std::string, SettingData> _settings; + + StringMap initials; + +public: + + Config(const StringMap & initials) + : initials(initials) + { } + + void set(const std::string & name, const std::string & value); + + void add(AbstractSetting * setting); + + void warnUnused(); + + std::string dump(); +}; + +class AbstractSetting +{ + friend class Config; + +public: + + const std::string name; + const std::string description; + const std::set<std::string> aliases; + + int created = 123; + +protected: + + AbstractSetting( + const std::string & name, + const std::string & description, + const std::set<std::string> & aliases); + + virtual ~AbstractSetting() + { + // Check against a gcc miscompilation causing our constructor + // not to run. + assert(created == 123); + } + + virtual void set(const std::string & value) = 0; + + virtual std::string to_string() = 0; +}; + +/* A setting of type T. */ +template<typename T> +class Setting : public AbstractSetting +{ +protected: + + T value; + +public: + + Setting(Config * options, + const T & def, + const std::string & name, + const std::string & description, + const std::set<std::string> & aliases = {}) + : AbstractSetting(name, description, aliases) + , value(def) + { + options->add(this); + } + + operator const T &() const { return value; } + bool operator ==(const T & v2) const { return value == v2; } + bool operator !=(const T & v2) const { return value != v2; } + void operator =(const T & v) { value = v; } + + void set(const std::string & str) override; + + std::string to_string() override; +}; + +template<typename T> +std::ostream & operator <<(std::ostream & str, const Setting<T> & opt) +{ + str << (const T &) opt; + return str; +} + +/* A special setting for Paths. These are automatically canonicalised + (e.g. "/foo//bar/" becomes "/foo/bar"). */ +class PathSetting : public Setting<Path> +{ + bool allowEmpty; + +public: + + PathSetting(Config * options, + bool allowEmpty, + const Path & def, + const std::string & name, + const std::string & description, + const std::set<std::string> & aliases = {}) + : Setting<Path>(options, def, name, description, aliases) + , allowEmpty(allowEmpty) + { + set(value); + } + + void set(const std::string & str) override; + + Path operator +(const char * p) const { return value + p; } +}; + +} diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index f447c80c5d81..9f4afd93c2fc 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -7,12 +7,12 @@ #include "hash.hh" #include "archive.hh" #include "util.hh" +#include "istringstream_nocopy.hh" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> - namespace nix { @@ -104,7 +104,7 @@ Hash parseHash(HashType ht, const string & s) string s2(s, i * 2, 2); if (!isxdigit(s2[0]) || !isxdigit(s2[1])) throw BadHash(format("invalid hash ‘%1%’") % s); - std::istringstream str(s2); + istringstream_nocopy str(s2); int n; str >> std::hex >> n; hash.hash[i] = n; diff --git a/src/libutil/istringstream_nocopy.hh b/src/libutil/istringstream_nocopy.hh new file mode 100644 index 000000000000..f7beac578e39 --- /dev/null +++ b/src/libutil/istringstream_nocopy.hh @@ -0,0 +1,92 @@ +/* This file provides a variant of std::istringstream that doesn't + copy its string argument. This is useful for large strings. The + caller must ensure that the string object is not destroyed while + it's referenced by this object. */ + +#pragma once + +#include <string> +#include <iostream> + +template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>> +class basic_istringbuf_nocopy : public std::basic_streambuf<CharT, Traits> +{ +public: + typedef std::basic_string<CharT, Traits, Allocator> string_type; + + typedef typename std::basic_streambuf<CharT, Traits>::off_type off_type; + + typedef typename std::basic_streambuf<CharT, Traits>::pos_type pos_type; + + typedef typename std::basic_streambuf<CharT, Traits>::int_type int_type; + + typedef typename std::basic_streambuf<CharT, Traits>::traits_type traits_type; + +private: + const string_type & s; + + off_type off; + +public: + basic_istringbuf_nocopy(const string_type & s) : s{s}, off{0} + { + } + +private: + pos_type seekoff(off_type off, std::ios_base::seekdir dir, std::ios_base::openmode which) + { + if (which & std::ios_base::in) { + this->off = dir == std::ios_base::beg + ? off + : (dir == std::ios_base::end + ? s.size() + off + : this->off + off); + } + return pos_type(this->off); + } + + pos_type seekpos(pos_type pos, std::ios_base::openmode which) + { + return seekoff(pos, std::ios_base::beg, which); + } + + std::streamsize showmanyc() + { + return s.size() - off; + } + + int_type underflow() + { + if (typename string_type::size_type(off) == s.size()) + return traits_type::eof(); + return traits_type::to_int_type(s[off]); + } + + int_type uflow() + { + if (typename string_type::size_type(off) == s.size()) + return traits_type::eof(); + return traits_type::to_int_type(s[off++]); + } + + int_type pbackfail(int_type ch) + { + if (off == 0 || (ch != traits_type::eof() && ch != s[off - 1])) + return traits_type::eof(); + + return traits_type::to_int_type(s[--off]); + } + +}; + +template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>> +class basic_istringstream_nocopy : public std::basic_iostream<CharT, Traits> +{ + typedef basic_istringbuf_nocopy<CharT, Traits, Allocator> buf_type; + buf_type buf; +public: + basic_istringstream_nocopy(const typename buf_type::string_type & s) : + std::basic_iostream<CharT, Traits>(&buf), buf(s) {}; +}; + +typedef basic_istringstream_nocopy<char> istringstream_nocopy; diff --git a/src/libutil/local.mk b/src/libutil/local.mk index cac5c8795db7..0721b21c2089 100644 --- a/src/libutil/local.mk +++ b/src/libutil/local.mk @@ -9,3 +9,5 @@ libutil_SOURCES := $(wildcard $(d)/*.cc) libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) libutil_LIBS = libformat + +libutil_CXXFLAGS = -DBRO=\"$(bro)\" diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index d9e8d22d7685..afcc2ec58543 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -3,7 +3,12 @@ namespace nix { -Logger * logger = 0; +Logger * logger = makeDefaultLogger(); + +void Logger::warn(const std::string & msg) +{ + log(lvlInfo, ANSI_RED "warning:" ANSI_NORMAL " " + msg); +} class SimpleLogger : public Logger { @@ -52,7 +57,7 @@ Verbosity verbosity = lvlInfo; void warnOnce(bool & haveWarned, const FormatOrString & fs) { if (!haveWarned) { - printError(format("warning: %1%") % fs.s); + warn(fs.s); haveWarned = true; } } diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh index 3f83664794f7..81aebccdca45 100644 --- a/src/libutil/logging.hh +++ b/src/libutil/logging.hh @@ -30,6 +30,8 @@ public: log(lvlInfo, fs); } + virtual void warn(const std::string & msg); + virtual void setExpected(const std::string & label, uint64_t value = 1) { } virtual void setProgress(const std::string & label, uint64_t value = 1) { } virtual void incExpected(const std::string & label, uint64_t value = 1) { } @@ -82,6 +84,14 @@ extern Verbosity verbosity; /* suppress msgs > this */ #define debug(args...) printMsg(lvlDebug, args) #define vomit(args...) printMsg(lvlVomit, args) +template<typename... Args> +inline void warn(const std::string & fs, Args... args) +{ + boost::format f(fs); + formatHelper(f, args...); + logger->warn(f.str()); +} + void warnOnce(bool & haveWarned, const FormatOrString & fs); void writeToStderr(const string & s); diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh index 35983aa2c918..3cb5d50889d9 100644 --- a/src/libutil/lru-cache.hh +++ b/src/libutil/lru-cache.hh @@ -11,7 +11,7 @@ class LRUCache { private: - size_t maxSize; + size_t capacity; // Stupid wrapper to get around circular dependency between Data // and LRU. @@ -27,14 +27,16 @@ private: public: - LRUCache(size_t maxSize) : maxSize(maxSize) { } + LRUCache(size_t capacity) : capacity(capacity) { } /* Insert or upsert an item in the cache. */ void upsert(const Key & key, const Value & value) { + if (capacity == 0) return; + erase(key); - if (data.size() >= maxSize) { + if (data.size() >= capacity) { /* Retire the oldest item. */ auto oldest = lru.begin(); data.erase(*oldest); diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh index f291cd578388..20df21948849 100644 --- a/src/libutil/pool.hh +++ b/src/libutil/pool.hh @@ -137,15 +137,21 @@ public: } catch (...) { auto state_(state.lock()); state_->inUse--; + wakeup.notify_one(); throw; } } - unsigned int count() + size_t count() { auto state_(state.lock()); return state_->idle.size() + state_->inUse; } + + size_t capacity() + { + return state.lock()->max; + } }; } diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index a68f7a0fa8ee..950e6362a245 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -194,39 +194,9 @@ void readPadding(size_t len, Source & source) } -unsigned int readInt(Source & source) -{ - unsigned char buf[8]; - source(buf, sizeof(buf)); - if (buf[4] || buf[5] || buf[6] || buf[7]) - throw SerialisationError("implementation cannot deal with > 32-bit integers"); - return - buf[0] | - (buf[1] << 8) | - (buf[2] << 16) | - (buf[3] << 24); -} - - -unsigned long long readLongLong(Source & source) -{ - unsigned char buf[8]; - source(buf, sizeof(buf)); - return - ((unsigned long long) buf[0]) | - ((unsigned long long) buf[1] << 8) | - ((unsigned long long) buf[2] << 16) | - ((unsigned long long) buf[3] << 24) | - ((unsigned long long) buf[4] << 32) | - ((unsigned long long) buf[5] << 40) | - ((unsigned long long) buf[6] << 48) | - ((unsigned long long) buf[7] << 56); -} - - size_t readString(unsigned char * buf, size_t max, Source & source) { - size_t len = readInt(source); + auto len = readNum<size_t>(source); if (len > max) throw Error("string is too long"); source(buf, len); readPadding(len, source); @@ -236,11 +206,11 @@ size_t readString(unsigned char * buf, size_t max, Source & source) string readString(Source & source) { - size_t len = readInt(source); - auto buf = std::make_unique<unsigned char[]>(len); - source(buf.get(), len); + auto len = readNum<size_t>(source); + std::string res(len, 0); + source((unsigned char*) res.data(), len); readPadding(len, source); - return string((char *) buf.get(), len); + return res; } Source & operator >> (Source & in, string & s) @@ -250,16 +220,9 @@ Source & operator >> (Source & in, string & s) } -Source & operator >> (Source & in, unsigned int & n) -{ - n = readInt(in); - return in; -} - - template<class T> T readStrings(Source & source) { - unsigned int count = readInt(source); + auto count = readNum<size_t>(source); T ss; while (count--) ss.insert(ss.end(), readString(source)); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 5646d08c1314..2bdee70807be 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -140,15 +140,16 @@ struct StringSource : Source /* Adapter class of a Source that saves all data read to `s'. */ -struct SavingSourceAdapter : Source +struct TeeSource : Source { Source & orig; - string s; - SavingSourceAdapter(Source & orig) : orig(orig) { } + ref<std::string> data; + TeeSource(Source & orig) + : orig(orig), data(make_ref<std::string>()) { } size_t read(unsigned char * data, size_t len) { size_t n = orig.read(data, len); - s.append((const char *) data, n); + this->data->append((const char *) data, n); return n; } }; @@ -177,18 +178,64 @@ Sink & operator << (Sink & sink, const Strings & s); Sink & operator << (Sink & sink, const StringSet & s); +MakeError(SerialisationError, Error) + + +template<typename T> +T readNum(Source & source) +{ + unsigned char buf[8]; + source(buf, sizeof(buf)); + + uint64_t n = + ((unsigned long long) buf[0]) | + ((unsigned long long) buf[1] << 8) | + ((unsigned long long) buf[2] << 16) | + ((unsigned long long) buf[3] << 24) | + ((unsigned long long) buf[4] << 32) | + ((unsigned long long) buf[5] << 40) | + ((unsigned long long) buf[6] << 48) | + ((unsigned long long) buf[7] << 56); + + if (n > std::numeric_limits<T>::max()) + throw SerialisationError("serialised integer %d is too large for type ‘%s’", n, typeid(T).name()); + + return n; +} + + +inline unsigned int readInt(Source & source) +{ + return readNum<unsigned int>(source); +} + + +inline uint64_t readLongLong(Source & source) +{ + return readNum<uint64_t>(source); +} + + void readPadding(size_t len, Source & source); -unsigned int readInt(Source & source); -unsigned long long readLongLong(Source & source); size_t readString(unsigned char * buf, size_t max, Source & source); string readString(Source & source); template<class T> T readStrings(Source & source); Source & operator >> (Source & in, string & s); -Source & operator >> (Source & in, unsigned int & n); +template<typename T> +Source & operator >> (Source & in, T & n) +{ + n = readNum<T>(in); + return in; +} -MakeError(SerialisationError, Error) +template<typename T> +Source & operator >> (Source & in, bool & b) +{ + b = readNum<uint64_t>(in); + return in; +} } diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh index 2aa074299b23..611c900e0a32 100644 --- a/src/libutil/sync.hh +++ b/src/libutil/sync.hh @@ -33,6 +33,7 @@ public: Sync() { } Sync(const T & data) : data(data) { } + Sync(T && data) noexcept : data(std::move(data)) { } class Lock { diff --git a/src/libutil/types.hh b/src/libutil/types.hh index 97d79af9b5d6..1429c238513b 100644 --- a/src/libutil/types.hh +++ b/src/libutil/types.hh @@ -7,6 +7,7 @@ #include <list> #include <set> #include <memory> +#include <map> #include <boost/format.hpp> @@ -141,6 +142,7 @@ private: typedef list<string> Strings; typedef set<string> StringSet; +typedef std::map<std::string, std::string> StringMap; /* Paths are just strings. */ diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 0a5f796e4eaa..0bd51afd1a9f 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1,6 +1,7 @@ #include "util.hh" #include "affinity.hh" #include "sync.hh" +#include "finally.hh" #include <cctype> #include <cerrno> @@ -10,6 +11,7 @@ #include <iostream> #include <sstream> #include <thread> +#include <future> #include <sys/wait.h> #include <unistd.h> @@ -94,6 +96,8 @@ Path absPath(Path path, Path dir) Path canonPath(const Path & path, bool resolveSymlinks) { + assert(path != ""); + string s; if (path[0] != '/') @@ -485,6 +489,7 @@ void readFull(int fd, unsigned char * buf, size_t count) void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts) { while (count) { + if (allowInterrupts) checkInterrupt(); ssize_t res = write(fd, (char *) buf, count); if (res == -1 && errno != EINTR) throw SysError("writing to file"); @@ -492,7 +497,6 @@ void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterr count -= res; buf += res; } - if (allowInterrupts) checkInterrupt(); } } @@ -676,12 +680,11 @@ Pid::operator pid_t() } -int Pid::kill(bool quiet) +int Pid::kill() { assert(pid != -1); - if (!quiet) - printError(format("killing process %1%") % pid); + debug(format("killing process %1%") % pid); /* Send the requested signal to the child. If it has its own process group, send the signal to every process in the child @@ -837,23 +840,21 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss) string runProgram(Path program, bool searchPath, const Strings & args, - const string & input) + const std::experimental::optional<std::string> & input) { checkInterrupt(); /* Create a pipe. */ Pipe out, in; out.create(); - if (!input.empty()) in.create(); + if (input) in.create(); /* Fork. */ Pid pid = startProcess([&]() { if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) throw SysError("dupping stdout"); - if (!input.empty()) { - if (dup2(in.readSide.get(), STDIN_FILENO) == -1) - throw SysError("dupping stdin"); - } + if (input && dup2(in.readSide.get(), STDIN_FILENO) == -1) + throw SysError("dupping stdin"); Strings args_(args); args_.push_front(program); @@ -870,11 +871,27 @@ string runProgram(Path program, bool searchPath, const Strings & args, out.writeSide = -1; - /* FIXME: This can deadlock if the input is too long. */ - if (!input.empty()) { + std::thread writerThread; + + std::promise<void> promise; + + Finally doJoin([&]() { + if (writerThread.joinable()) + writerThread.join(); + }); + + + if (input) { in.readSide = -1; - writeFull(in.writeSide.get(), input); - in.writeSide = -1; + writerThread = std::thread([&]() { + try { + writeFull(in.writeSide.get(), *input); + promise.set_value(); + } catch (...) { + promise.set_exception(std::current_exception()); + } + in.writeSide = -1; + }); } string result = drainFD(out.readSide.get()); @@ -885,6 +902,9 @@ string runProgram(Path program, bool searchPath, const Strings & args, throw ExecError(status, format("program ‘%1%’ %2%") % program % statusToString(status)); + /* Wait for the writer thread to finish. */ + if (input) promise.get_future().get(); + return result; } @@ -1194,7 +1214,7 @@ static void signalHandlerThread(sigset_t set) void triggerInterrupt() { - _isInterrupted = 1; + _isInterrupted = true; { auto interruptCallbacks(_interruptCallbacks.lock()); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 2950f7daa5ec..0e6941e4a8db 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -13,6 +13,8 @@ #include <limits> #include <cstdio> #include <map> +#include <sstream> +#include <experimental/optional> #ifndef HAVE_STRUCT_DIRENT_D_TYPE #define DT_UNKNOWN 0 @@ -201,7 +203,7 @@ public: ~Pid(); void operator =(pid_t pid); operator pid_t(); - int kill(bool quiet = false); + int kill(); int wait(); void setSeparatePG(bool separatePG); @@ -231,7 +233,8 @@ pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = P /* Run a program and return its stdout in a string (i.e., like the shell backtick operator). */ string runProgram(Path program, bool searchPath = false, - const Strings & args = Strings(), const string & input = ""); + const Strings & args = Strings(), + const std::experimental::optional<std::string> & input = {}); class ExecError : public Error { @@ -448,5 +451,4 @@ struct ReceiveInterrupts { } }; - } diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index ee030c57b6b3..b4206033cf5f 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -394,7 +394,7 @@ int main(int argc, char ** argv) auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp")); if (pure) { - std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL"}; + std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"}; decltype(env) newEnv; for (auto & i : env) if (keepVars.count(i.first)) @@ -408,7 +408,7 @@ int main(int argc, char ** argv) env["NIX_STORE"] = store->storeDir; for (auto & var : drv.env) - env.emplace(var); + env[var.first] = var.second; restoreAffinity(); @@ -448,15 +448,17 @@ int main(int argc, char ** argv) auto envPtrs = stringsToCharPtrs(envStrs); + auto shell = getEnv("NIX_BUILD_SHELL", "bash"); + environ = envPtrs.data(); auto argPtrs = stringsToCharPtrs(args); restoreSignals(); - execvp(getEnv("NIX_BUILD_SHELL", "bash").c_str(), argPtrs.data()); + execvp(shell.c_str(), argPtrs.data()); - throw SysError("executing shell"); + throw SysError("executing shell ‘%s’", shell); } // Ugly hackery to make "nix-build -A foo.all" produce symlinks diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index 4340443b5cc2..ed43bffbc8c8 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -47,13 +47,17 @@ int main(int argc, char ** argv) if (sshHost.empty()) throw UsageError("no host name specified"); - auto remoteUri = "legacy-ssh://" + sshHost + (gzip ? "?compress=true" : ""); + auto remoteUri = "ssh://" + sshHost + (gzip ? "?compress=true" : ""); auto to = toMode ? openStore(remoteUri) : openStore(); auto from = toMode ? openStore() : openStore(remoteUri); + PathSet storePaths2; + for (auto & path : storePaths) + storePaths2.insert(from->followLinksToStorePath(path)); + PathSet closure; - from->computeFSClosure(storePaths, closure, false, includeOutputs); + from->computeFSClosure(storePaths2, closure, false, includeOutputs); - copyPaths(from, to, Paths(closure.begin(), closure.end()), useSubstitutes); + copyPaths(from, to, closure, useSubstitutes); }); } diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 2fcb5b565a9f..f4285693f7fb 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -273,10 +273,9 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe } case wopAddToStore: { - string baseName = readString(from); - bool fixed = readInt(from) == 1; /* obsolete */ - bool recursive = readInt(from) == 1; - string s = readString(from); + bool fixed, recursive; + std::string s, baseName; + from >> baseName >> fixed /* obsolete */ >> recursive >> s; /* Compatibility hack. */ if (!fixed) { s = "sha256"; @@ -284,7 +283,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe } HashType hashAlgo = parseHashType(s); - SavingSourceAdapter savedNAR(from); + TeeSource savedNAR(from); RetrieveRegularNARSink savedRegular; if (recursive) { @@ -298,7 +297,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe startWork(); if (!savedRegular.regular) throw Error("regular file expected"); - Path path = store->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo); + Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo); stopWork(); to << path; @@ -340,7 +339,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe PathSet drvs = readStorePaths<PathSet>(*store, from); BuildMode mode = bmNormal; if (GET_PROTOCOL_MINOR(clientVersion) >= 15) { - mode = (BuildMode)readInt(from); + mode = (BuildMode) readInt(from); /* Repairing is not atomic, so disallowed for "untrusted" clients. */ @@ -417,8 +416,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe GCOptions options; options.action = (GCOptions::GCAction) readInt(from); options.pathsToDelete = readStorePaths<PathSet>(*store, from); - options.ignoreLiveness = readInt(from); - options.maxFreed = readLongLong(from); + from >> options.ignoreLiveness >> options.maxFreed; // obsolete fields readInt(from); readInt(from); @@ -438,8 +436,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe } case wopSetOptions: { - settings.keepFailed = readInt(from) != 0; - settings.keepGoing = readInt(from) != 0; + from >> settings.keepFailed; + from >> settings.keepGoing; settings.set("build-fallback", readInt(from) ? "true" : "false"); verbosity = (Verbosity) readInt(from); settings.set("build-max-jobs", std::to_string(readInt(from))); @@ -539,8 +537,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe break; case wopVerifyStore: { - bool checkContents = readInt(from) != 0; - bool repair = readInt(from) != 0; + bool checkContents, repair; + from >> checkContents >> repair; startWork(); if (repair && !trusted) throw Error("you are not privileged to repair paths"); @@ -571,25 +569,37 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe } case wopAddToStoreNar: { + bool repair, dontCheckSigs; ValidPathInfo info; info.path = readStorePath(*store, from); - info.deriver = readString(from); + from >> info.deriver; if (!info.deriver.empty()) store->assertStorePath(info.deriver); info.narHash = parseHash(htSHA256, readString(from)); info.references = readStorePaths<PathSet>(*store, from); - info.registrationTime = readInt(from); - info.narSize = readLongLong(from); - info.ultimate = readLongLong(from); + from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings<StringSet>(from); - auto nar = make_ref<std::string>(readString(from)); - auto repair = readInt(from) ? true : false; - auto dontCheckSigs = readInt(from) ? true : false; + from >> info.ca >> repair >> dontCheckSigs; if (!trusted && dontCheckSigs) dontCheckSigs = false; + + TeeSink tee(from); + parseDump(tee, tee.source); + + startWork(); + store->addToStore(info, tee.source.data, repair, dontCheckSigs, nullptr); + stopWork(); + break; + } + + case wopQueryMissing: { + PathSet targets = readStorePaths<PathSet>(*store, from); startWork(); - store->addToStore(info, nar, repair, dontCheckSigs, nullptr); + PathSet willBuild, willSubstitute, unknown; + unsigned long long downloadSize, narSize; + store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize); stopWork(); + to << willBuild << willSubstitute << unknown << downloadSize << narSize; break; } @@ -638,7 +648,10 @@ static void processConnection(bool trusted) #endif /* Open the store. */ - auto store = make_ref<LocalStore>(Store::Params()); // FIXME: get params from somewhere + Store::Params params; // FIXME: get params from somewhere + // Disable caching since the client already does that. + params["path-info-cache-size"] = "0"; + auto store = make_ref<LocalStore>(params); stopWork(); to.flush(); diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc index acf603025690..b3b2fcac7132 100644 --- a/src/nix-prefetch-url/nix-prefetch-url.cc +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -170,10 +170,10 @@ int main(int argc, char * * argv) Path unpacked = (Path) tmpDir + "/unpacked"; createDirs(unpacked); if (hasSuffix(baseNameOf(uri), ".zip")) - runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}, ""); + runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}); else // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}, ""); + runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}); /* If the archive unpacks to a single file/directory, then use that as the top-level. */ diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk index 84ff15b241f3..ade0b233adf3 100644 --- a/src/nix-store/local.mk +++ b/src/nix-store/local.mk @@ -7,5 +7,3 @@ nix-store_SOURCES := $(wildcard $(d)/*.cc) nix-store_LIBS = libmain libstore libutil libformat nix-store_LDFLAGS = -lbz2 -pthread $(SODIUM_LIBS) - -nix-store_CXXFLAGS = -DCURL=\"$(curl)\" diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index bb3b430c9ec1..a40cca9824ee 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -9,7 +9,6 @@ #include "util.hh" #include "worker-protocol.hh" #include "xmlgraph.hh" -#include "compression.hh" #include <iostream> #include <algorithm> @@ -482,58 +481,12 @@ static void opReadLog(Strings opFlags, Strings opArgs) RunPager pager; - // FIXME: move getting logs into Store. - auto store2 = std::dynamic_pointer_cast<LocalFSStore>(store); - if (!store2) throw Error(format("store ‘%s’ does not support reading logs") % store->getUri()); - for (auto & i : opArgs) { - Path path = useDeriver(store->followLinksToStorePath(i)); - - string baseName = baseNameOf(path); - bool found = false; - - for (int j = 0; j < 2; j++) { - - Path logPath = - j == 0 - ? (format("%1%/%2%/%3%/%4%") % store2->logDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str() - : (format("%1%/%2%/%3%") % store2->logDir % drvsLogDir % baseName).str(); - Path logBz2Path = logPath + ".bz2"; - - if (pathExists(logPath)) { - /* !!! Make this run in O(1) memory. */ - string log = readFile(logPath); - writeFull(STDOUT_FILENO, log); - found = true; - break; - } - - else if (pathExists(logBz2Path)) { - std::cout << *decompress("bzip2", readFile(logBz2Path)); - found = true; - break; - } - } - - if (!found) { - for (auto & i : settings.logServers) { - string prefix = i; - if (!prefix.empty() && prefix.back() != '/') prefix += '/'; - string url = prefix + baseName; - try { - string log = runProgram(CURL, true, {"--fail", "--location", "--silent", "--", url}); - std::cout << "(using build log from " << url << ")" << std::endl; - std::cout << log; - found = true; - break; - } catch (ExecError & e) { - /* Ignore errors from curl. FIXME: actually, might be - nice to print a warning on HTTP status != 404. */ - } - } - } - - if (!found) throw Error(format("build log of derivation ‘%1%’ is not available") % path); + auto path = store->followLinksToStorePath(i); + auto log = store->getBuildLog(path); + if (!log) + throw Error("build log of derivation ‘%s’ is not available", path); + std::cout << *log; } } @@ -708,6 +661,9 @@ static void opExport(Strings opFlags, Strings opArgs) for (auto & i : opFlags) throw UsageError(format("unknown flag ‘%1%’") % i); + for (auto & i : opArgs) + i = store->followLinksToStorePath(i); + FdSink sink(STDOUT_FILENO); store->exportPaths(opArgs, sink); } @@ -721,7 +677,7 @@ static void opImport(Strings opFlags, Strings opArgs) if (!opArgs.empty()) throw UsageError("no arguments expected"); FdSource source(STDIN_FILENO); - Paths paths = store->importPaths(source, 0); + Paths paths = store->importPaths(source, nullptr, true); for (auto & i : paths) cout << format("%1%\n") % i << std::flush; @@ -839,7 +795,7 @@ static void opServe(Strings opFlags, Strings opArgs) settings.maxSilentTime = readInt(in); settings.buildTimeout = readInt(in); if (GET_PROTOCOL_MINOR(clientVersion) >= 2) - settings.maxLogSize = readInt(in); + in >> settings.maxLogSize; if (GET_PROTOCOL_MINOR(clientVersion) >= 3) { settings.set("build-repeat", std::to_string(readInt(in))); settings.set("enforce-determinism", readInt(in) != 0 ? "true" : "false"); @@ -926,7 +882,7 @@ static void opServe(Strings opFlags, Strings opArgs) break; } - case cmdBuildPaths: { /* Used by build-remote.pl. */ + case cmdBuildPaths: { if (!writeAllowed) throw Error("building paths is not allowed"); PathSet paths = readStorePaths<PathSet>(*store, in); diff --git a/src/nix/command.cc b/src/nix/command.cc index 5a8288da912f..a1b2c120a5d9 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -79,9 +79,14 @@ StoreCommand::StoreCommand() mkFlag(0, "store", "store-uri", "URI of the Nix store to use", &storeUri); } +ref<Store> StoreCommand::createStore() +{ + return openStore(storeUri); +} + void StoreCommand::run() { - run(openStore(storeUri)); + run(createStore()); } StorePathsCommand::StorePathsCommand() diff --git a/src/nix/command.hh b/src/nix/command.hh index a29cdcf7f50f..fa6c21abf8ad 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -33,6 +33,7 @@ struct StoreCommand : virtual Command std::string storeUri; StoreCommand(); void run() override; + virtual ref<Store> createStore(); virtual void run(ref<Store>) = 0; }; diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 976b0d3e0b81..b2165cb8f85c 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -38,15 +38,19 @@ struct CmdCopy : StorePathsCommand }; } - void run(ref<Store> store, Paths storePaths) override + ref<Store> createStore() override + { + return srcUri.empty() ? StoreCommand::createStore() : openStore(srcUri); + } + + void run(ref<Store> srcStore, Paths storePaths) override { if (srcUri.empty() && dstUri.empty()) throw UsageError("you must pass ‘--from’ and/or ‘--to’"); - ref<Store> srcStore = srcUri.empty() ? store : openStore(srcUri); - ref<Store> dstStore = dstUri.empty() ? store : openStore(dstUri); + ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri); - copyPaths(srcStore, dstStore, storePaths); + copyPaths(srcStore, dstStore, PathSet(storePaths.begin(), storePaths.end())); } }; diff --git a/src/nix/log.cc b/src/nix/log.cc new file mode 100644 index 000000000000..d8a3830e91c8 --- /dev/null +++ b/src/nix/log.cc @@ -0,0 +1,57 @@ +#include "command.hh" +#include "common-args.hh" +#include "installables.hh" +#include "shared.hh" +#include "store-api.hh" + +using namespace nix; + +struct CmdLog : StoreCommand, MixInstallables +{ + CmdLog() + { + } + + std::string name() override + { + return "log"; + } + + std::string description() override + { + return "show the build log of the specified packages or paths"; + } + + void run(ref<Store> store) override + { + auto elems = evalInstallables(store); + + PathSet paths; + + for (auto & elem : elems) { + if (elem.isDrv) + paths.insert(elem.drvPath); + else + paths.insert(elem.outPaths.begin(), elem.outPaths.end()); + } + + auto subs = getDefaultSubstituters(); + + subs.push_front(store); + + for (auto & path : paths) { + bool found = false; + for (auto & sub : subs) { + auto log = sub->getBuildLog(path); + if (!log) continue; + std::cout << *log; + found = true; + break; + } + if (!found) + throw Error("build log of path ‘%s’ is not available", path); + } + } +}; + +static RegisterCommand r1(make_ref<CmdLog>()); diff --git a/src/nix/main.cc b/src/nix/main.cc index 440ced97dfcc..fdb8f6e3a197 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -42,6 +42,7 @@ void mainWrapped(int argc, char * * argv) NixArgs args; args.parseCmdline(argvToStrings(argc, argv)); + settings.update(); assert(args.command); |