diff options
Diffstat (limited to 'src/libstore')
-rw-r--r-- | src/libstore/build.cc | 436 | ||||
-rw-r--r-- | src/libstore/builtins/fetchurl.cc | 5 | ||||
-rw-r--r-- | src/libstore/derivations.cc | 14 | ||||
-rw-r--r-- | src/libstore/derivations.hh | 4 | ||||
-rw-r--r-- | src/libstore/download.cc | 47 | ||||
-rw-r--r-- | src/libstore/globals.cc | 15 | ||||
-rw-r--r-- | src/libstore/globals.hh | 12 | ||||
-rw-r--r-- | src/libstore/http-binary-cache-store.cc | 41 | ||||
-rw-r--r-- | src/libstore/local-store.cc | 6 | ||||
-rw-r--r-- | src/libstore/parsed-derivations.cc | 111 | ||||
-rw-r--r-- | src/libstore/parsed-derivations.hh | 35 | ||||
-rw-r--r-- | src/libstore/remote-store.cc | 171 | ||||
-rw-r--r-- | src/libstore/remote-store.hh | 13 | ||||
-rw-r--r-- | src/libstore/s3-binary-cache-store.cc | 28 | ||||
-rw-r--r-- | src/libstore/store-api.cc | 4 | ||||
-rw-r--r-- | src/libstore/store-api.hh | 1 |
16 files changed, 647 insertions, 296 deletions
diff --git a/src/libstore/build.cc b/src/libstore/build.cc index cd37f7a3fc08..cf4218a261fa 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -11,6 +11,7 @@ #include "compression.hh" #include "json.hh" #include "nar-info.hh" +#include "parsed-derivations.hh" #include <algorithm> #include <iostream> @@ -20,6 +21,7 @@ #include <future> #include <chrono> #include <regex> +#include <queue> #include <limits.h> #include <sys/time.h> @@ -740,6 +742,8 @@ private: /* The derivation stored at drvPath. */ std::unique_ptr<BasicDerivation> drv; + std::unique_ptr<ParsedDerivation> parsedDrv; + /* The remainder is state held during the build. */ /* Locks on the output paths. */ @@ -854,7 +858,7 @@ private: building multiple times. Since this contains the hash, it allows us to compare whether two rounds produced the same result. */ - ValidPathInfos prevInfos; + std::map<Path, ValidPathInfo> prevInfos; const uid_t sandboxUid = 1000; const gid_t sandboxGid = 100; @@ -935,6 +939,11 @@ private: as valid. */ void registerOutputs(); + /* Check that an output meets the requirements specified by the + 'outputChecks' attribute (or the legacy + '{allowed,disallowed}{References,Requisites}' attributes). */ + void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs); + /* Open a log file and a pipe to it. */ Path openLogFile(); @@ -1139,6 +1148,8 @@ void DerivationGoal::haveDerivation() return; } + parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv); + /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ @@ -1395,7 +1406,7 @@ void DerivationGoal::tryToBuild() /* Don't do a remote build if the derivation has the attribute `preferLocalBuild' set. Also, check and repair modes are only supported for local builds. */ - bool buildLocally = buildMode != bmNormal || drv->willBuildLocally(); + bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(); auto started = [&]() { auto msg = fmt( @@ -1641,19 +1652,13 @@ HookReply DerivationGoal::tryBuildHook() try { - /* Tell the hook about system features (beyond the system type) - required from the build machine. (The hook could parse the - drv file itself, but this is easier.) */ - Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures")); - for (auto & i : features) checkStoreName(i); /* !!! abuse */ - /* Send the request to the hook. */ worker.hook->sink << "try" << (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0) << drv->platform << drvPath - << features; + << parsedDrv->getRequiredSystemFeatures(); worker.hook->sink.flush(); /* Read the first line of input, which should be a word indicating @@ -1793,23 +1798,26 @@ static void preloadNSS() { void DerivationGoal::startBuilder() { /* Right platform? */ - if (!drv->canBuildLocally()) { - throw Error( - format("a '%1%' is required to build '%3%', but I am a '%2%'") - % drv->platform % settings.thisSystem % drvPath); - } + if (!parsedDrv->canBuildLocally()) + throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", + drv->platform, + concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), + drvPath, + settings.thisSystem, + concatStringsSep(", ", settings.systemFeatures)); if (drv->isBuiltin()) preloadNSS(); #if __APPLE__ - additionalSandboxProfile = get(drv->env, "__sandboxProfile"); + additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); #endif /* Are we doing a chroot build? */ { + auto noChroot = parsedDrv->getBoolAttr("__noChroot"); if (settings.sandboxMode == smEnabled) { - if (get(drv->env, "__noChroot") == "1") + if (noChroot) throw Error(format("derivation '%1%' has '__noChroot' set, " "but that's not allowed when 'sandbox' is 'true'") % drvPath); #if __APPLE__ @@ -1822,7 +1830,7 @@ void DerivationGoal::startBuilder() else if (settings.sandboxMode == smDisabled) useChroot = false; else if (settings.sandboxMode == smRelaxed) - useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1"; + useChroot = !fixedOutput && !noChroot; } if (worker.store.storeDir != worker.store.realStoreDir) { @@ -1873,7 +1881,7 @@ void DerivationGoal::startBuilder() writeStructuredAttrs(); /* Handle exportReferencesGraph(), if set. */ - if (!drv->env.count("__json")) { + if (!parsedDrv->getStructuredAttrs()) { /* The `exportReferencesGraph' feature allows the references graph to be passed to a builder. This attribute should be a list of pairs [name1 path1 name2 path2 ...]. The references graph of @@ -1938,7 +1946,7 @@ void DerivationGoal::startBuilder() PathSet allowedPaths = settings.allowedImpureHostPrefixes; /* This works like the above, except on a per-derivation level */ - Strings impurePaths = tokenizeString<Strings>(get(drv->env, "__impureHostDeps")); + auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings()); for (auto & i : impurePaths) { bool found = false; @@ -2306,7 +2314,7 @@ void DerivationGoal::initEnv() passAsFile is ignored in structure mode because it's not needed (attributes are not passed through the environment, so there is no size constraint). */ - if (!drv->env.count("__json")) { + if (!parsedDrv->getStructuredAttrs()) { StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile")); int fileNr = 0; @@ -2353,8 +2361,8 @@ void DerivationGoal::initEnv() fixed-output derivations is by definition pure (since we already know the cryptographic hash of the output). */ if (fixedOutput) { - Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars")); - for (auto & i : varNames) env[i] = getEnv(i); + for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings())) + env[i] = getEnv(i); } /* Currently structured log messages piggyback on stderr, but we @@ -2369,111 +2377,103 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); void DerivationGoal::writeStructuredAttrs() { - auto jsonAttr = drv->env.find("__json"); - if (jsonAttr == drv->env.end()) return; - - try { + auto & structuredAttrs = parsedDrv->getStructuredAttrs(); + if (!structuredAttrs) return; - auto jsonStr = rewriteStrings(jsonAttr->second, inputRewrites); + auto json = *structuredAttrs; - auto json = nlohmann::json::parse(jsonStr); - - /* Add an "outputs" object containing the output paths. */ - nlohmann::json outputs; - for (auto & i : drv->outputs) - outputs[i.first] = rewriteStrings(i.second.path, inputRewrites); - json["outputs"] = outputs; - - /* Handle exportReferencesGraph. */ - auto e = json.find("exportReferencesGraph"); - if (e != json.end() && e->is_object()) { - for (auto i = e->begin(); i != e->end(); ++i) { - std::ostringstream str; - { - JSONPlaceholder jsonRoot(str, true); - PathSet storePaths; - for (auto & p : *i) - storePaths.insert(p.get<std::string>()); - worker.store.pathInfoToJSON(jsonRoot, - exportReferences(storePaths), false, true); - } - json[i.key()] = nlohmann::json::parse(str.str()); // urgh + /* Add an "outputs" object containing the output paths. */ + nlohmann::json outputs; + for (auto & i : drv->outputs) + outputs[i.first] = rewriteStrings(i.second.path, inputRewrites); + json["outputs"] = outputs; + + /* Handle exportReferencesGraph. */ + auto e = json.find("exportReferencesGraph"); + if (e != json.end() && e->is_object()) { + for (auto i = e->begin(); i != e->end(); ++i) { + std::ostringstream str; + { + JSONPlaceholder jsonRoot(str, true); + PathSet storePaths; + for (auto & p : *i) + storePaths.insert(p.get<std::string>()); + worker.store.pathInfoToJSON(jsonRoot, + exportReferences(storePaths), false, true); } + json[i.key()] = nlohmann::json::parse(str.str()); // urgh } + } - writeFile(tmpDir + "/.attrs.json", json.dump()); - - /* As a convenience to bash scripts, write a shell file that - maps all attributes that are representable in bash - - namely, strings, integers, nulls, Booleans, and arrays and - objects consisting entirely of those values. (So nested - arrays or objects are not supported.) */ + writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); - auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional<std::string> { - if (value.is_string()) - return shellEscape(value); + /* As a convenience to bash scripts, write a shell file that + maps all attributes that are representable in bash - + namely, strings, integers, nulls, Booleans, and arrays and + objects consisting entirely of those values. (So nested + arrays or objects are not supported.) */ - if (value.is_number()) { - auto f = value.get<float>(); - if (std::ceil(f) == f) - return std::to_string(value.get<int>()); - } + auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional<std::string> { + if (value.is_string()) + return shellEscape(value); - if (value.is_null()) - return std::string("''"); + if (value.is_number()) { + auto f = value.get<float>(); + if (std::ceil(f) == f) + return std::to_string(value.get<int>()); + } - if (value.is_boolean()) - return value.get<bool>() ? std::string("1") : std::string(""); + if (value.is_null()) + return std::string("''"); - return {}; - }; + if (value.is_boolean()) + return value.get<bool>() ? std::string("1") : std::string(""); - std::string jsonSh; + return {}; + }; - for (auto i = json.begin(); i != json.end(); ++i) { + std::string jsonSh; - if (!std::regex_match(i.key(), shVarName)) continue; + for (auto i = json.begin(); i != json.end(); ++i) { - auto & value = i.value(); + if (!std::regex_match(i.key(), shVarName)) continue; - auto s = handleSimpleType(value); - if (s) - jsonSh += fmt("declare %s=%s\n", i.key(), *s); + auto & value = i.value(); - else if (value.is_array()) { - std::string s2; - bool good = true; + auto s = handleSimpleType(value); + if (s) + jsonSh += fmt("declare %s=%s\n", i.key(), *s); - for (auto i = value.begin(); i != value.end(); ++i) { - auto s3 = handleSimpleType(i.value()); - if (!s3) { good = false; break; } - s2 += *s3; s2 += ' '; - } + else if (value.is_array()) { + std::string s2; + bool good = true; - if (good) - jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2); + for (auto i = value.begin(); i != value.end(); ++i) { + auto s3 = handleSimpleType(i.value()); + if (!s3) { good = false; break; } + s2 += *s3; s2 += ' '; } - else if (value.is_object()) { - std::string s2; - bool good = true; + if (good) + jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2); + } - for (auto i = value.begin(); i != value.end(); ++i) { - auto s3 = handleSimpleType(i.value()); - if (!s3) { good = false; break; } - s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3); - } + else if (value.is_object()) { + std::string s2; + bool good = true; - if (good) - jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2); + for (auto i = value.begin(); i != value.end(); ++i) { + auto s3 = handleSimpleType(i.value()); + if (!s3) { good = false; break; } + s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3); } - } - - writeFile(tmpDir + "/.attrs.sh", jsonSh); - } catch (std::exception & e) { - throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what()); + if (good) + jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2); + } } + + writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); } @@ -2628,7 +2628,7 @@ void DerivationGoal::runChild() createDirs(chrootRootDir + "/dev/shm"); createDirs(chrootRootDir + "/dev/pts"); ss.push_back("/dev/full"); - if (pathExists("/dev/kvm")) + if (settings.systemFeatures.get().count("kvm") && pathExists("/dev/kvm")) ss.push_back("/dev/kvm"); ss.push_back("/dev/null"); ss.push_back("/dev/random"); @@ -2917,7 +2917,7 @@ void DerivationGoal::runChild() writeFile(sandboxFile, sandboxProfile); - bool allowLocalNetworking = get(drv->env, "__darwinAllowLocalNetworking") == "1"; + bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking"); /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */ @@ -2989,10 +2989,9 @@ void DerivationGoal::runChild() /* Parse a list of reference specifiers. Each element must either be a store path, or the symbolic name of the output of the derivation (such as `out'). */ -PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, string attr) +PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, const Strings & paths) { PathSet result; - Paths paths = tokenizeString<Paths>(attr); for (auto & i : paths) { if (store.isStorePath(i)) result.insert(i); @@ -3017,7 +3016,7 @@ void DerivationGoal::registerOutputs() if (allValid) return; } - ValidPathInfos infos; + std::map<std::string, ValidPathInfo> infos; /* Set of inodes seen during calls to canonicalisePathMetaData() for this build's outputs. This needs to be shared between @@ -3121,7 +3120,7 @@ void DerivationGoal::registerOutputs() the derivation to its content-addressed location. */ Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath); - Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]); + Path dest = worker.store.makeFixedOutputPath(recursive, h2, storePathToName(path)); if (h != h2) { @@ -3202,48 +3201,6 @@ void DerivationGoal::registerOutputs() debug(format("referenced input: '%1%'") % i); } - /* Enforce `allowedReferences' and friends. */ - auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) { - if (drv->env.find(attrName) == drv->env.end()) return; - - PathSet spec = parseReferenceSpecifiers(worker.store, *drv, get(drv->env, attrName)); - - PathSet used; - if (recursive) { - /* Our requisites are the union of the closures of our references. */ - for (auto & i : references) - /* Don't call computeFSClosure on ourselves. */ - if (path != i) - worker.store.computeFSClosure(i, used); - } else - used = references; - - PathSet badPaths; - - for (auto & i : used) - if (allowed) { - if (spec.find(i) == spec.end()) - badPaths.insert(i); - } else { - if (spec.find(i) != spec.end()) - badPaths.insert(i); - } - - if (!badPaths.empty()) { - string badPathsStr; - for (auto & i : badPaths) { - badPathsStr += "\n\t"; - badPathsStr += i; - } - throw BuildError(format("output '%1%' is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr); - } - }; - - checkRefs("allowedReferences", true, false); - checkRefs("allowedRequisites", true, true); - checkRefs("disallowedReferences", false, false); - checkRefs("disallowedRequisites", false, true); - if (curRound == nrRounds) { worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences() worker.markContentsGood(path); @@ -3259,11 +3216,14 @@ void DerivationGoal::registerOutputs() if (!info.references.empty()) info.ca.clear(); - infos.push_back(info); + infos[i.first] = info; } if (buildMode == bmCheck) return; + /* Apply output checks. */ + checkOutputs(infos); + /* Compare the result with the previous round, and report which path is different, if any.*/ if (curRound > 1 && prevInfos != infos) { @@ -3271,16 +3231,16 @@ void DerivationGoal::registerOutputs() for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j) if (!(*i == *j)) { result.isNonDeterministic = true; - Path prev = i->path + checkSuffix; + Path prev = i->second.path + checkSuffix; bool prevExists = keepPreviousRound && pathExists(prev); auto msg = prevExists - ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->path, drvPath, prev) - : fmt("output '%1%' of '%2%' differs from previous round", i->path, drvPath); + ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->second.path, drvPath, prev) + : fmt("output '%1%' of '%2%' differs from previous round", i->second.path, drvPath); auto diffHook = settings.diffHook; if (prevExists && diffHook != "" && runDiffHook) { try { - auto diff = runProgram(diffHook, true, {prev, i->path}); + auto diff = runProgram(diffHook, true, {prev, i->second.path}); if (diff != "") printError(chomp(diff)); } catch (Error & error) { @@ -3325,7 +3285,11 @@ void DerivationGoal::registerOutputs() /* Register each output path as valid, and register the sets of paths referenced by each of them. If there are cycles in the outputs, this will fail. */ - worker.store.registerValidPaths(infos); + { + ValidPathInfos infos2; + for (auto & i : infos) infos2.push_back(i.second); + worker.store.registerValidPaths(infos2); + } /* In case of a fixed-output derivation hash mismatch, throw an exception now that we have registered the output as valid. */ @@ -3334,6 +3298,153 @@ void DerivationGoal::registerOutputs() } +void DerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs) +{ + std::map<Path, const ValidPathInfo &> outputsByPath; + for (auto & output : outputs) + outputsByPath.emplace(output.second.path, output.second); + + for (auto & output : outputs) { + auto & outputName = output.first; + auto & info = output.second; + + struct Checks + { + std::experimental::optional<uint64_t> maxSize, maxClosureSize; + std::experimental::optional<Strings> allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites; + }; + + /* Compute the closure and closure size of some output. This + is slightly tricky because some of its references (namely + other outputs) may not be valid yet. */ + auto getClosure = [&](const Path & path) + { + uint64_t closureSize = 0; + PathSet pathsDone; + std::queue<Path> pathsLeft; + pathsLeft.push(path); + + while (!pathsLeft.empty()) { + auto path = pathsLeft.front(); + pathsLeft.pop(); + if (!pathsDone.insert(path).second) continue; + + auto i = outputsByPath.find(path); + if (i != outputsByPath.end()) { + closureSize += i->second.narSize; + for (auto & ref : i->second.references) + pathsLeft.push(ref); + } else { + auto info = worker.store.queryPathInfo(path); + closureSize += info->narSize; + for (auto & ref : info->references) + pathsLeft.push(ref); + } + } + + return std::make_pair(pathsDone, closureSize); + }; + + auto checkRefs = [&](const std::experimental::optional<Strings> & value, bool allowed, bool recursive) + { + if (!value) return; + + PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value); + + PathSet used = recursive ? getClosure(info.path).first : info.references; + + PathSet badPaths; + + for (auto & i : used) + if (allowed) { + if (spec.find(i) == spec.end()) + badPaths.insert(i); + } else { + if (spec.find(i) != spec.end()) + badPaths.insert(i); + } + + if (!badPaths.empty()) { + string badPathsStr; + for (auto & i : badPaths) { + badPathsStr += "\n "; + badPathsStr += i; + } + throw BuildError("output '%s' is not allowed to refer to the following paths:%s", info.path, badPathsStr); + } + }; + + auto applyChecks = [&](const Checks & checks) + { + if (checks.maxSize && info.narSize > *checks.maxSize) + throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes", + info.path, info.narSize, *checks.maxSize); + + if (checks.maxClosureSize) { + uint64_t closureSize = getClosure(info.path).second; + if (closureSize > *checks.maxClosureSize) + throw BuildError("closure of path '%s' is too large at %d bytes; limit is %d bytes", + info.path, closureSize, *checks.maxClosureSize); + } + + checkRefs(checks.allowedReferences, true, false); + checkRefs(checks.allowedRequisites, true, true); + checkRefs(checks.disallowedReferences, false, false); + checkRefs(checks.disallowedRequisites, false, true); + }; + + if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) { + auto outputChecks = structuredAttrs->find("outputChecks"); + if (outputChecks != structuredAttrs->end()) { + auto output = outputChecks->find(outputName); + + if (output != outputChecks->end()) { + Checks checks; + + auto maxSize = output->find("maxSize"); + if (maxSize != output->end()) + checks.maxSize = maxSize->get<uint64_t>(); + + auto maxClosureSize = output->find("maxClosureSize"); + if (maxClosureSize != output->end()) + checks.maxClosureSize = maxClosureSize->get<uint64_t>(); + + auto get = [&](const std::string & name) -> std::experimental::optional<Strings> { + auto i = output->find(name); + if (i != output->end()) { + Strings res; + for (auto j = i->begin(); j != i->end(); ++j) { + if (!j->is_string()) + throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath); + res.push_back(j->get<std::string>()); + } + checks.disallowedRequisites = res; + return res; + } + return {}; + }; + + checks.allowedReferences = get("allowedReferences"); + checks.allowedRequisites = get("allowedRequisites"); + checks.disallowedReferences = get("disallowedReferences"); + checks.disallowedRequisites = get("disallowedRequisites"); + + applyChecks(checks); + } + } + } else { + // legacy non-structured-attributes case + Checks checks; + checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences"); + checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites"); + checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences"); + checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites"); + applyChecks(checks); + } + } +} + + Path DerivationGoal::openLogFile() { logSize = 0; @@ -3682,6 +3793,19 @@ void SubstitutionGoal::tryNext() } catch (InvalidPath &) { tryNext(); return; + } catch (SubstituterDisabled &) { + if (settings.tryFallback) { + tryNext(); + return; + } + throw; + } catch (Error & e) { + if (settings.tryFallback) { + printError(e.what()); + tryNext(); + return; + } + throw; } /* Update the total expected download size. */ diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index b4dcb35f951a..92aec63a0379 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -24,6 +24,7 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData) Path storePath = getAttr("out"); auto mainUrl = getAttr("url"); + bool unpack = get(drv.env, "unpack", "") == "1"; /* Note: have to use a fresh downloader here because we're in a forked process. */ @@ -40,12 +41,12 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData) request.decompress = false; auto decompressor = makeDecompressionSink( - hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); + unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); downloader->download(std::move(request), *decompressor); decompressor->finish(); }); - if (get(drv.env, "unpack", "") == "1") + if (unpack) restorePath(storePath, *source); else writeFile(storePath, *source); diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 1e187ec5e954..3961126fff9c 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -36,12 +36,6 @@ Path BasicDerivation::findOutput(const string & id) const } -bool BasicDerivation::willBuildLocally() const -{ - return get(env, "preferLocalBuild") == "1" && canBuildLocally(); -} - - bool BasicDerivation::substitutesAllowed() const { return get(env, "allowSubstitutes", "1") == "1"; @@ -54,14 +48,6 @@ bool BasicDerivation::isBuiltin() const } -bool BasicDerivation::canBuildLocally() const -{ - return platform == settings.thisSystem - || settings.extraPlatforms.get().count(platform) > 0 - || isBuiltin(); -} - - Path writeDerivation(ref<Store> store, const Derivation & drv, const string & name, RepairFlag repair) { diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh index 7b97730d3bf2..9753e796db5f 100644 --- a/src/libstore/derivations.hh +++ b/src/libstore/derivations.hh @@ -56,14 +56,10 @@ struct BasicDerivation the given derivation. */ Path findOutput(const string & id) const; - bool willBuildLocally() const; - bool substitutesAllowed() const; bool isBuiltin() const; - bool canBuildLocally() const; - /* Return true iff this is a fixed-output derivation. */ bool isFixedOutput() const; diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 973fca0b130f..f44f1836b31e 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -345,7 +345,7 @@ struct CurlDownloader : public Downloader done = true; try { - act.progress(result.data->size(), result.data->size()); + act.progress(result.bodySize, result.bodySize); callback(std::move(result)); } catch (...) { done = true; @@ -710,11 +710,12 @@ void Downloader::download(DownloadRequest && request, Sink & sink) /* If the buffer is full, then go to sleep until the calling thread wakes us up (i.e. when it has removed data from the - buffer). Note: this does stall the download thread. */ - while (state->data.size() > 1024 * 1024) { - if (state->quit) return; + buffer). We don't wait forever to prevent stalling the + download thread. (Hopefully sleeping will throttle the + sender.) */ + if (state->data.size() > 1024 * 1024) { debug("download buffer is full; going to sleep"); - state.wait(state->request); + state.wait_for(state->request, std::chrono::seconds(10)); } /* Append data to the buffer and wake up the calling @@ -736,30 +737,36 @@ void Downloader::download(DownloadRequest && request, Sink & sink) state->request.notify_one(); }}); - auto state(_state->lock()); - while (true) { checkInterrupt(); - /* If no data is available, then wait for the download thread - to wake us up. */ - if (state->data.empty()) { + std::string chunk; + + /* Grab data if available, otherwise wait for the download + thread to wake us up. */ + { + auto state(_state->lock()); + + while (state->data.empty()) { - if (state->quit) { - if (state->exc) std::rethrow_exception(state->exc); - break; + if (state->quit) { + if (state->exc) std::rethrow_exception(state->exc); + return; + } + + state.wait(state->avail); } - state.wait(state->avail); - } + chunk = std::move(state->data); - /* If data is available, then flush it to the sink and wake up - the download thread if it's blocked on a full buffer. */ - if (!state->data.empty()) { - sink((unsigned char *) state->data.data(), state->data.size()); - state->data.clear(); state->request.notify_one(); } + + /* Flush the data to the sink and wake up the download thread + if it's blocked on a full buffer. We don't hold the state + lock while doing this to prevent blocking the download + thread if sink() takes a long time. */ + sink((unsigned char *) chunk.data(), chunk.size()); } } diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index d95db56726cb..a9c07b23a6f3 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -86,6 +86,21 @@ unsigned int Settings::getDefaultCores() return std::max(1U, std::thread::hardware_concurrency()); } +StringSet Settings::getDefaultSystemFeatures() +{ + /* For backwards compatibility, accept some "features" that are + used in Nixpkgs to route builds to certain machines but don't + actually require anything special on the machines. */ + StringSet features{"nixos-test", "benchmark", "big-parallel"}; + + #if __linux__ + if (access("/dev/kvm", R_OK | W_OK) == 0) + features.insert("kvm"); + #endif + + return features; +} + const string nixVersion = PACKAGE_VERSION; template<> void BaseSetting<SandboxMode>::set(const std::string & str) diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index f589078dbb98..6b3e204536f1 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -32,6 +32,8 @@ class Settings : public Config { unsigned int getDefaultCores(); + StringSet getDefaultSystemFeatures(); + public: Settings(); @@ -80,9 +82,9 @@ public: /* Whether to show build log output in real time. */ bool verboseBuild = true; - /* If verboseBuild is false, the number of lines of the tail of - the log to show if a build fails. */ - size_t logLines = 10; + Setting<size_t> logLines{this, 10, "log-lines", + "If verbose-build is false, the number of lines of the tail of " + "the log to show if a build fails."}; MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs", "Maximum number of parallel build jobs. \"auto\" means use number of cores.", @@ -261,6 +263,10 @@ public: "These may be supported natively (e.g. armv7 on some aarch64 CPUs " "or using hacks like qemu-user."}; + Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(), + "system-features", + "Optional features that this system implements (like \"kvm\")."}; + Setting<Strings> substituters{this, nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(), "substituters", diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index ab524d523cf2..8da0e2f9d82a 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -13,6 +13,14 @@ private: Path cacheUri; + struct State + { + bool enabled = true; + std::chrono::steady_clock::time_point disabledUntil; + }; + + Sync<State> _state; + public: HttpBinaryCacheStore( @@ -46,8 +54,33 @@ public: protected: + void maybeDisable() + { + auto state(_state.lock()); + if (state->enabled && settings.tryFallback) { + int t = 60; + printError("disabling binary cache '%s' for %s seconds", getUri(), t); + state->enabled = false; + state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t); + } + } + + void checkEnabled() + { + auto state(_state.lock()); + if (state->enabled) return; + if (std::chrono::steady_clock::now() > state->disabledUntil) { + state->enabled = true; + debug("re-enabling binary cache '%s'", getUri()); + return; + } + throw SubstituterDisabled("substituter '%s' is disabled", getUri()); + } + bool fileExists(const std::string & path) override { + checkEnabled(); + try { DownloadRequest request(cacheUri + "/" + path); request.head = true; @@ -59,6 +92,7 @@ protected: bucket is unlistable, so treat 403 as 404. */ if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) return false; + maybeDisable(); throw; } } @@ -86,12 +120,14 @@ protected: void getFile(const std::string & path, Sink & sink) override { + checkEnabled(); auto request(makeRequest(path)); try { getDownloader()->download(std::move(request), sink); } catch (DownloadError & e) { if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri()); + maybeDisable(); throw; } } @@ -99,15 +135,18 @@ protected: void getFile(const std::string & path, Callback<std::shared_ptr<std::string>> callback) override { + checkEnabled(); + auto request(makeRequest(path)); getDownloader()->enqueueDownload(request, - {[callback](std::future<DownloadResult> result) { + {[callback, this](std::future<DownloadResult> result) { try { callback(result.get().data); } catch (DownloadError & e) { if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) return callback(std::shared_ptr<std::string>()); + maybeDisable(); callback.rethrow(); } catch (...) { callback.rethrow(); diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index c8117c0c6508..216f3417c4a8 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -880,6 +880,12 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths, narInfo ? narInfo->fileSize : 0, info->narSize}; } catch (InvalidPath) { + } catch (SubstituterDisabled) { + } catch (Error & e) { + if (settings.tryFallback) + printError(e.what()); + else + throw; } } } diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc new file mode 100644 index 000000000000..dc3286482736 --- /dev/null +++ b/src/libstore/parsed-derivations.cc @@ -0,0 +1,111 @@ +#include "parsed-derivations.hh" + +namespace nix { + +ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv) + : drvPath(drvPath), drv(drv) +{ + /* Parse the __json attribute, if any. */ + auto jsonAttr = drv.env.find("__json"); + if (jsonAttr != drv.env.end()) { + try { + structuredAttrs = nlohmann::json::parse(jsonAttr->second); + } catch (std::exception & e) { + throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what()); + } + } +} + +std::experimental::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const +{ + if (structuredAttrs) { + auto i = structuredAttrs->find(name); + if (i == structuredAttrs->end()) + return {}; + else { + if (!i->is_string()) + throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath); + return i->get<std::string>(); + } + } else { + auto i = drv.env.find(name); + if (i == drv.env.end()) + return {}; + else + return i->second; + } +} + +bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const +{ + if (structuredAttrs) { + auto i = structuredAttrs->find(name); + if (i == structuredAttrs->end()) + return def; + else { + if (!i->is_boolean()) + throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath); + return i->get<bool>(); + } + } else { + auto i = drv.env.find(name); + if (i == drv.env.end()) + return def; + else + return i->second == "1"; + } +} + +std::experimental::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name) const +{ + if (structuredAttrs) { + auto i = structuredAttrs->find(name); + if (i == structuredAttrs->end()) + return {}; + else { + if (!i->is_array()) + throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath); + Strings res; + for (auto j = i->begin(); j != i->end(); ++j) { + if (!j->is_string()) + throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath); + res.push_back(j->get<std::string>()); + } + return res; + } + } else { + auto i = drv.env.find(name); + if (i == drv.env.end()) + return {}; + else + return tokenizeString<Strings>(i->second); + } +} + +StringSet ParsedDerivation::getRequiredSystemFeatures() const +{ + StringSet res; + for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) + res.insert(i); + return res; +} + +bool ParsedDerivation::canBuildLocally() const +{ + if (drv.platform != settings.thisSystem.get() + && !settings.extraPlatforms.get().count(drv.platform) + && !drv.isBuiltin()) + return false; + + for (auto & feature : getRequiredSystemFeatures()) + if (!settings.systemFeatures.get().count(feature)) return false; + + return true; +} + +bool ParsedDerivation::willBuildLocally() const +{ + return getBoolAttr("preferLocalBuild") && canBuildLocally(); +} + +} diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh new file mode 100644 index 000000000000..0a82c146172b --- /dev/null +++ b/src/libstore/parsed-derivations.hh @@ -0,0 +1,35 @@ +#include "derivations.hh" + +#include <nlohmann/json.hpp> + +namespace nix { + +class ParsedDerivation +{ + Path drvPath; + BasicDerivation & drv; + std::experimental::optional<nlohmann::json> structuredAttrs; + +public: + + ParsedDerivation(const Path & drvPath, BasicDerivation & drv); + + const std::experimental::optional<nlohmann::json> & getStructuredAttrs() const + { + return structuredAttrs; + } + + std::experimental::optional<std::string> getStringAttr(const std::string & name) const; + + bool getBoolAttr(const std::string & name, bool def = false) const; + + std::experimental::optional<Strings> getStringsAttr(const std::string & name) const; + + StringSet getRequiredSystemFeatures() const; + + bool canBuildLocally() const; + + bool willBuildLocally() const; +}; + +} diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index eff5d252419f..def140cfbe18 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -161,7 +161,8 @@ void RemoteStore::initConnection(Connection & conn) if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11) conn.to << false; - conn.processStderr(); + auto ex = conn.processStderr(); + if (ex) std::rethrow_exception(ex); } catch (Error & e) { throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what()); @@ -195,22 +196,68 @@ void RemoteStore::setOptions(Connection & conn) conn.to << i.first << i.second.value; } - conn.processStderr(); + auto ex = conn.processStderr(); + if (ex) std::rethrow_exception(ex); +} + + +/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks + the connection as bad (causing it to be closed) if a non-daemon + exception is thrown before the handle is closed. Such an exception + causes a deviation from the expected protocol and therefore a + desynchronization between the client and daemon. */ +struct ConnectionHandle +{ + Pool<RemoteStore::Connection>::Handle handle; + bool daemonException = false; + + ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle) + : handle(std::move(handle)) + { } + + ConnectionHandle(ConnectionHandle && h) + : handle(std::move(h.handle)) + { } + + ~ConnectionHandle() + { + if (!daemonException && std::uncaught_exception()) { + handle.markBad(); + debug("closing daemon connection because of an exception"); + } + } + + RemoteStore::Connection * operator -> () { return &*handle; } + + void processStderr(Sink * sink = 0, Source * source = 0) + { + auto ex = handle->processStderr(sink, source); + if (ex) { + daemonException = true; + std::rethrow_exception(ex); + } + } +}; + + +ConnectionHandle RemoteStore::getConnection() +{ + return ConnectionHandle(connections->get()); } bool RemoteStore::isValidPathUncached(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopIsValidPath << path; - conn->processStderr(); + conn.processStderr(); return readInt(conn->from); } PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubstitute) { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; for (auto & i : paths) @@ -218,7 +265,7 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe return res; } else { conn->to << wopQueryValidPaths << paths; - conn->processStderr(); + conn.processStderr(); return readStorePaths<PathSet>(*this, conn->from); } } @@ -226,27 +273,27 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe PathSet RemoteStore::queryAllValidPaths() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryAllValidPaths; - conn->processStderr(); + conn.processStderr(); return readStorePaths<PathSet>(*this, conn->from); } PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths) { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; for (auto & i : paths) { conn->to << wopHasSubstitutes << i; - conn->processStderr(); + conn.processStderr(); if (readInt(conn->from)) res.insert(i); } return res; } else { conn->to << wopQuerySubstitutablePaths << paths; - conn->processStderr(); + conn.processStderr(); return readStorePaths<PathSet>(*this, conn->from); } } @@ -257,14 +304,14 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, { if (paths.empty()) return; - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { for (auto & i : paths) { SubstitutablePathInfo info; conn->to << wopQuerySubstitutablePathInfo << i; - conn->processStderr(); + conn.processStderr(); unsigned int reply = readInt(conn->from); if (reply == 0) continue; info.deriver = readString(conn->from); @@ -278,7 +325,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, } else { conn->to << wopQuerySubstitutablePathInfos << paths; - conn->processStderr(); + conn.processStderr(); size_t count = readNum<size_t>(conn->from); for (size_t n = 0; n < count; n++) { Path path = readStorePath(*this, conn->from); @@ -300,10 +347,10 @@ void RemoteStore::queryPathInfoUncached(const Path & path, try { std::shared_ptr<ValidPathInfo> info; { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryPathInfo << path; try { - conn->processStderr(); + conn.processStderr(); } catch (Error & e) { // Ugly backwards compatibility hack. if (e.msg().find("is not valid") != std::string::npos) @@ -335,9 +382,9 @@ void RemoteStore::queryPathInfoUncached(const Path & path, void RemoteStore::queryReferrers(const Path & path, PathSet & referrers) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryReferrers << path; - conn->processStderr(); + conn.processStderr(); PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from); referrers.insert(referrers2.begin(), referrers2.end()); } @@ -345,36 +392,36 @@ void RemoteStore::queryReferrers(const Path & path, PathSet RemoteStore::queryValidDerivers(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryValidDerivers << path; - conn->processStderr(); + conn.processStderr(); return readStorePaths<PathSet>(*this, conn->from); } PathSet RemoteStore::queryDerivationOutputs(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryDerivationOutputs << path; - conn->processStderr(); + conn.processStderr(); return readStorePaths<PathSet>(*this, conn->from); } PathSet RemoteStore::queryDerivationOutputNames(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryDerivationOutputNames << path; - conn->processStderr(); + conn.processStderr(); return readStrings<PathSet>(conn->from); } Path RemoteStore::queryPathFromHashPart(const string & hashPart) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryPathFromHashPart << hashPart; - conn->processStderr(); + conn.processStderr(); Path path = readString(conn->from); if (!path.empty()) assertStorePath(path); return path; @@ -384,7 +431,7 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart) void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) { conn->to << wopImportPaths; @@ -403,7 +450,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, ; }); - conn->processStderr(0, source2.get()); + conn.processStderr(0, source2.get()); auto importedPaths = readStorePaths<PathSet>(*this, conn->from); assert(importedPaths.size() <= 1); @@ -417,7 +464,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, << repair << !checkSigs; bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21; if (!tunnel) copyNAR(source, conn->to); - conn->processStderr(0, tunnel ? &source : nullptr); + conn.processStderr(0, tunnel ? &source : nullptr); } } @@ -427,7 +474,7 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - auto conn(connections->get()); + auto conn(getConnection()); Path srcPath(absPath(_srcPath)); @@ -445,13 +492,13 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath, dumpPath(srcPath, conn->to, filter); } conn->to.warn = false; - conn->processStderr(); + conn.processStderr(); } catch (SysError & e) { /* Daemon closed while we were sending the path. Probably OOM or I/O error. */ if (e.errNo == EPIPE) try { - conn->processStderr(); + conn.processStderr(); } catch (EndOfFile & e) { } throw; } @@ -465,17 +512,17 @@ Path RemoteStore::addTextToStore(const string & name, const string & s, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddTextToStore << name << s << references; - conn->processStderr(); + conn.processStderr(); return readStorePath(*this, conn->from); } void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopBuildPaths; if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) { conn->to << drvPaths; @@ -494,7 +541,7 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) drvPaths2.insert(string(i, 0, i.find('!'))); conn->to << drvPaths2; } - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } @@ -502,9 +549,9 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopBuildDerivation << drvPath << drv << buildMode; - conn->processStderr(); + conn.processStderr(); BuildResult res; unsigned int status; conn->from >> status >> res.errorMsg; @@ -515,45 +562,45 @@ BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDeriva void RemoteStore::ensurePath(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopEnsurePath << path; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } void RemoteStore::addTempRoot(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddTempRoot << path; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } void RemoteStore::addIndirectRoot(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddIndirectRoot << path; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } void RemoteStore::syncWithGC() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopSyncWithGC; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } Roots RemoteStore::findRoots() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopFindRoots; - conn->processStderr(); + conn.processStderr(); size_t count = readNum<size_t>(conn->from); Roots result; while (count--) { @@ -567,7 +614,7 @@ Roots RemoteStore::findRoots() void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness @@ -575,7 +622,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) /* removed options */ << 0 << 0 << 0; - conn->processStderr(); + conn.processStderr(); results.paths = readStrings<PathSet>(conn->from); results.bytesFreed = readLongLong(conn->from); @@ -590,27 +637,27 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) void RemoteStore::optimiseStore() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopOptimiseStore; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopVerifyStore << checkContents << repair; - conn->processStderr(); + conn.processStderr(); return readInt(conn->from); } void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddSignatures << storePath << sigs; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } @@ -620,13 +667,13 @@ void RemoteStore::queryMissing(const PathSet & targets, unsigned long long & downloadSize, unsigned long long & narSize) { { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19) // Don't hold the connection handle in the fallback case // to prevent a deadlock. goto fallback; conn->to << wopQueryMissing << targets; - conn->processStderr(); + conn.processStderr(); willBuild = readStorePaths<PathSet>(*this, conn->from); willSubstitute = readStorePaths<PathSet>(*this, conn->from); unknown = readStorePaths<PathSet>(*this, conn->from); @@ -642,7 +689,7 @@ void RemoteStore::queryMissing(const PathSet & targets, void RemoteStore::connect() { - auto conn(connections->get()); + auto conn(getConnection()); } @@ -686,7 +733,7 @@ static Logger::Fields readFields(Source & from) } -void RemoteStore::Connection::processStderr(Sink * sink, Source * source) +std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source) { to.flush(); @@ -711,7 +758,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) else if (msg == STDERR_ERROR) { string error = readString(from); unsigned int status = readInt(from); - throw Error(status, error); + return std::make_exception_ptr(Error(status, error)); } else if (msg == STDERR_NEXT) @@ -745,6 +792,8 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) else throw Error("got unknown message type %x from Nix daemon", msg); } + + return nullptr; } static std::string uriScheme = "unix://"; diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 16daee8b6731..4f554b5980e8 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -14,6 +14,7 @@ class Pid; struct FdSink; struct FdSource; template<typename T> class Pool; +struct ConnectionHandle; /* FIXME: RemoteStore is a misnomer - should be something like @@ -105,6 +106,7 @@ protected: struct Connection { + AutoCloseFD fd; FdSink to; FdSource from; unsigned int daemonVersion; @@ -112,7 +114,7 @@ protected: virtual ~Connection(); - void processStderr(Sink * sink = 0, Source * source = 0); + std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0); }; ref<Connection> openConnectionWrapper(); @@ -125,6 +127,10 @@ protected: virtual void setOptions(Connection & conn); + ConnectionHandle getConnection(); + + friend struct ConnectionHandle; + private: std::atomic_bool failed{false}; @@ -142,11 +148,6 @@ public: private: - struct Connection : RemoteStore::Connection - { - AutoCloseFD fd; - }; - ref<RemoteStore::Connection> openConnection() override; std::experimental::optional<std::string> path; }; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 7711388f05a9..ba11ce6bb6de 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -19,8 +19,6 @@ #include <aws/core/utils/logging/LogMacros.h> #include <aws/core/utils/threading/Executor.h> #include <aws/s3/S3Client.h> -#include <aws/s3/model/CreateBucketRequest.h> -#include <aws/s3/model/GetBucketLocationRequest.h> #include <aws/s3/model/GetObjectRequest.h> #include <aws/s3/model/HeadObjectRequest.h> #include <aws/s3/model/ListObjectsRequest.h> @@ -202,32 +200,6 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore { if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) { - /* Create the bucket if it doesn't already exists. */ - // FIXME: HeadBucket would be more appropriate, but doesn't return - // an easily parsed 404 message. - auto res = s3Helper.client->GetBucketLocation( - Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName)); - - if (!res.IsSuccess()) { - if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET) - throw Error(format("AWS error checking bucket '%s': %s") % bucketName % res.GetError().GetMessage()); - - printInfo("creating S3 bucket '%s'...", bucketName); - - // Stupid S3 bucket locations. - auto bucketConfig = Aws::S3::Model::CreateBucketConfiguration(); - if (s3Helper.config->region != "us-east-1") - bucketConfig.SetLocationConstraint( - Aws::S3::Model::BucketLocationConstraintMapper::GetBucketLocationConstraintForName( - s3Helper.config->region)); - - checkAws(format("AWS error creating bucket '%s'") % bucketName, - s3Helper.client->CreateBucket( - Aws::S3::Model::CreateBucketRequest() - .WithBucket(bucketName) - .WithCreateBucketConfiguration(bucketConfig))); - } - BinaryCacheStore::init(); diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 1f42097fccfb..92e2685f7f66 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -320,6 +320,8 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath) void Store::queryPathInfo(const Path & storePath, Callback<ref<ValidPathInfo>> callback) { + assertStorePath(storePath); + auto hashPart = storePathToHash(storePath); try { @@ -610,7 +612,7 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, }); srcStore->narFromPath({storePath}, wrapperSink); }, [&]() { - throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri()); + throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri()); }); dstStore->addToStore(*info, *source, repair, checkSigs); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index c2f964e11f76..106b2be5e6b2 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -23,6 +23,7 @@ MakeError(BuildError, Error) /* denotes a permanent build failure */ MakeError(InvalidPath, Error) MakeError(Unsupported, Error) MakeError(SubstituteGone, Error) +MakeError(SubstituterDisabled, Error) struct BasicDerivation; |