diff options
Diffstat (limited to 'src')
43 files changed, 782 insertions, 1967 deletions
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh index c8d83814f3ca..0748fbd3f3e1 100644 --- a/src/libexpr/eval-inline.hh +++ b/src/libexpr/eval-inline.hh @@ -78,5 +78,4 @@ inline void EvalState::forceList(Value & v, const Pos & pos) throwTypeError("value is %1% while a list was expected, at %2%", v, pos); } - } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 74f88e498fed..acf1fbdc1356 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1291,10 +1291,16 @@ bool EvalState::forceBool(Value & v) } +bool EvalState::isFunctor(Value & fun) +{ + return fun.type == tAttrs && fun.attrs->find(sFunctor) != fun.attrs->end(); +} + + void EvalState::forceFunction(Value & v, const Pos & pos) { forceValue(v); - if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp) + if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp && !isFunctor(v)) throwTypeError("value is %1% while a function was expected, at %2%", v, pos); } @@ -1386,7 +1392,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, shell scripting convenience, just like `null'. */ if (v.type == tBool && v.boolean) return "1"; if (v.type == tBool && !v.boolean) return ""; - if (v.type == tInt) return int2String(v.integer); + if (v.type == tInt) return std::to_string(v.integer); if (v.type == tNull) return ""; if (v.isList()) { diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 8df0084fd57a..eb55f6d4d431 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -213,6 +213,8 @@ public: elements and attributes are compared recursively. */ bool eqValues(Value & v1, Value & v2); + bool isFunctor(Value & fun); + void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos); void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos); diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index 4c1f4de19187..5de9ccc6d011 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -6,9 +6,14 @@ libexpr_DIR := $(d) libexpr_SOURCES := $(wildcard $(d)/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc +libexpr_CXXFLAGS := -Wno-deprecated-register + libexpr_LIBS = libutil libstore libformat -libexpr_LDFLAGS = -ldl +libexpr_LDFLAGS = +ifneq ($(OS), FreeBSD) + libexpr_LDFLAGS += -ldl +endif # The dependency on libgc must be propagated (i.e. meaning that # programs/libraries that use libexpr must explicitly pass -lgc), diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index a7e6ef762120..65b288e1ff3e 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -88,7 +88,7 @@ extern volatile ::sig_atomic_t blockInt; string showBytes(unsigned long long bytes); -class GCResults; +struct GCResults; struct PrintFreed { diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 2e3a2221857e..e1ccb1eaf136 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -62,7 +62,7 @@ #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh" #else #define SANDBOX_ENABLED 0 - #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/bin" "/usr/bin" + #define DEFAULT_ALLOWED_IMPURE_PREFIXES "" #endif #if CHROOT_ENABLED @@ -74,6 +74,7 @@ #if __linux__ #include <sys/personality.h> +#include <sys/mman.h> #endif #if HAVE_STATVFS @@ -766,9 +767,6 @@ private: /* RAII object to delete the chroot directory. */ std::shared_ptr<AutoDelete> autoDelChroot; - /* All inputs that are regular files. */ - PathSet regularInputPaths; - /* Whether this is a fixed-output derivation. */ bool fixedOutput; @@ -780,6 +778,12 @@ private: DirsInChroot dirsInChroot; typedef map<string, string> Environment; Environment env; +#if SANDBOX_ENABLED + typedef string SandboxProfile; + SandboxProfile additionalSandboxProfile; + + AutoDelete autoDelSandbox; +#endif /* Hash rewriting. */ HashRewrites rewritesToTmp, rewritesFromTmp; @@ -793,13 +797,19 @@ private: temporary paths. */ PathSet redirectedBadOutputs; - /* Set of inodes seen during calls to canonicalisePathMetaData() - for this build's outputs. This needs to be shared between - outputs to allow hard links between outputs. */ - InodesSeen inodesSeen; - BuildResult result; + /* The current round, if we're building multiple times. */ + unsigned int curRound = 1; + + unsigned int nrRounds; + + /* Path registration info from the previous round, if we're + building multiple times. Since this contains the hash, it + allows us to compare whether two rounds produced the same + result. */ + ValidPathInfos prevInfos; + public: DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode = bmNormal); @@ -809,7 +819,7 @@ public: void timedOut() override; - string key() + string key() override { /* Ensure that derivations get built in order of their name, i.e. a derivation named "aardvark" always comes before @@ -818,7 +828,7 @@ public: return "b$" + storePathToName(drvPath) + "$" + drvPath; } - void work(); + void work() override; Path getDrvPath() { @@ -866,8 +876,8 @@ private: void deleteTmpDir(bool force); /* Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, const string & data); - void handleEOF(int fd); + void handleChildOutput(int fd, const string & data) override; + void handleEOF(int fd) override; /* Return the set of (in)valid paths. */ PathSet checkPathValidity(bool returnValid, bool checkHash); @@ -1240,6 +1250,10 @@ void DerivationGoal::inputsRealised() for (auto & i : drv->outputs) if (i.second.hash == "") fixedOutput = false; + /* Don't repeat fixed-output derivations since they're already + verified by their output hash.*/ + nrRounds = fixedOutput ? 1 : settings.get("build-repeat", 0) + 1; + /* Okay, try to build. Note that here we don't wait for a build slot to become available, since we don't need one if there is a build hook. */ @@ -1248,11 +1262,22 @@ void DerivationGoal::inputsRealised() } -static bool canBuildLocally(const string & platform) +static bool isBuiltin(const BasicDerivation & drv) +{ + return string(drv.builder, 0, 8) == "builtin:"; +} + + +static bool canBuildLocally(const BasicDerivation & drv) { - return platform == settings.thisSystem + return drv.platform == settings.thisSystem + || isBuiltin(drv) #if __linux__ - || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux") + || (drv.platform == "i686-linux" && settings.thisSystem == "x86_64-linux") + || (drv.platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux") +#elif __FreeBSD__ + || (drv.platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd") + || (drv.platform == "i686-linux" && settings.thisSystem == "i686-freebsd") #endif ; } @@ -1267,7 +1292,7 @@ static string get(const StringPairs & map, const string & key, const string & de bool willBuildLocally(const BasicDerivation & drv) { - return get(drv.env, "preferLocalBuild") == "1" && canBuildLocally(drv.platform); + return get(drv.env, "preferLocalBuild") == "1" && canBuildLocally(drv); } @@ -1277,12 +1302,6 @@ bool substitutesAllowed(const BasicDerivation & drv) } -static bool isBuiltin(const BasicDerivation & drv) -{ - return string(drv.builder, 0, 8) == "builtin:"; -} - - void DerivationGoal::tryToBuild() { trace("trying to build"); @@ -1420,6 +1439,9 @@ void replaceValidPath(const Path & storePath, const Path tmpPath) } +MakeError(NotDeterministic, BuildError) + + void DerivationGoal::buildDone() { trace("build done"); @@ -1519,6 +1541,15 @@ void DerivationGoal::buildDone() deleteTmpDir(true); + /* Repeat the build if necessary. */ + if (curRound++ < nrRounds) { + outputLocks.unlock(); + buildUser.release(); + state = &DerivationGoal::tryToBuild; + worker.wakeUp(shared_from_this()); + return; + } + /* It is now safe to delete the lock files, since all future lockers will see that the output paths are valid; they will not create new lock files with the same names as the old @@ -1552,6 +1583,7 @@ void DerivationGoal::buildDone() % drvPath % 1 % e.msg()); st = + dynamic_cast<NotDeterministic*>(&e) ? BuildResult::NotDeterministic : statusOk(status) ? BuildResult::OutputRejected : fixedOutput || diskFull ? BuildResult::TransientFailure : BuildResult::PermanentFailure; @@ -1678,13 +1710,16 @@ int childEntry(void * arg) void DerivationGoal::startBuilder() { - startNest(nest, lvlInfo, format( - buildMode == bmRepair ? "repairing path(s) %1%" : - buildMode == bmCheck ? "checking path(s) %1%" : - "building path(s) %1%") % showPaths(missingPaths)); + auto f = format( + buildMode == bmRepair ? "repairing path(s) %1%" : + buildMode == bmCheck ? "checking path(s) %1%" : + nrRounds > 1 ? "building path(s) %1% (round %2%/%3%)" : + "building path(s) %1%"); + f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit); + startNest(nest, lvlInfo, f % showPaths(missingPaths) % curRound % nrRounds); /* Right platform? */ - if (!canBuildLocally(drv->platform)) { + if (!canBuildLocally(*drv)) { if (settings.printBuildTrace) printMsg(lvlError, format("@ unsupported-platform %1% %2%") % drvPath % drv->platform); throw Error( @@ -1693,6 +1728,7 @@ void DerivationGoal::startBuilder() } /* Construct the environment passed to the builder. */ + env.clear(); /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when PATH is not set. We don't want this, so we fill it in with some dummy @@ -1732,7 +1768,7 @@ void DerivationGoal::startBuilder() if (passAsFile.find(i.first) == passAsFile.end()) { env[i.first] = i.second; } else { - Path p = tmpDir + "/.attr-" + int2String(fileNr++); + Path p = tmpDir + "/.attr-" + std::to_string(fileNr++); writeFile(p, i.second); filesToChown.insert(p); env[i.first + "Path"] = p; @@ -1845,12 +1881,14 @@ void DerivationGoal::startBuilder() work properly. Purity checking for fixed-output derivations is somewhat pointless anyway. */ { - string x = settings.get("build-use-chroot", string("false")); + string x = settings.get("build-use-sandbox", + /* deprecated alias */ + settings.get("build-use-chroot", string("false"))); if (x != "true" && x != "false" && x != "relaxed") - throw Error("option ‘build-use-chroot’ must be set to one of ‘true’, ‘false’ or ‘relaxed’"); + throw Error("option ‘build-use-sandbox’ must be set to one of ‘true’, ‘false’ or ‘relaxed’"); if (x == "true") { if (get(drv->env, "__noChroot") == "1") - throw Error(format("derivation ‘%1%’ has ‘__noChroot’ set, but that's not allowed when ‘build-use-chroot’ is ‘true’") % drvPath); + throw Error(format("derivation ‘%1%’ has ‘__noChroot’ set, but that's not allowed when ‘build-use-sandbox’ is ‘true’") % drvPath); useChroot = true; } else if (x == "false") @@ -1869,10 +1907,17 @@ void DerivationGoal::startBuilder() /* Allow a user-configurable set of directories from the host file system. */ - PathSet dirs = tokenizeString<StringSet>(settings.get("build-chroot-dirs", defaultChrootDirs)); - PathSet dirs2 = tokenizeString<StringSet>(settings.get("build-extra-chroot-dirs", string(""))); + PathSet dirs = tokenizeString<StringSet>( + settings.get("build-sandbox-paths", + /* deprecated alias with lower priority */ + settings.get("build-chroot-dirs", defaultChrootDirs))); + PathSet dirs2 = tokenizeString<StringSet>( + settings.get("build-extra-chroot-dirs", + settings.get("build-extra-sandbox-paths", string("")))); dirs.insert(dirs2.begin(), dirs2.end()); + dirsInChroot.clear(); + for (auto & i : dirs) { size_t p = i.find('='); if (p == string::npos) @@ -1890,6 +1935,9 @@ void DerivationGoal::startBuilder() for (auto & i : closure) dirsInChroot[i] = i; +#if SANDBOX_ENABLED + additionalSandboxProfile = get(drv->env, "__sandboxProfile"); +#endif string allowed = settings.get("allowed-impure-host-deps", string(DEFAULT_ALLOWED_IMPURE_PREFIXES)); PathSet allowedPaths = tokenizeString<StringSet>(allowed); @@ -1998,24 +2046,22 @@ void DerivationGoal::startBuilder() StringSource source(sink.s); restorePath(p, source); } - - regularInputPaths.insert(i); } } - /* If we're repairing or checking, it's possible that we're + /* If we're repairing, checking or rebuilding part of a + multiple-outputs derivation, it's possible that we're rebuilding a path that is in settings.dirsInChroot (typically the dependencies of /bin/sh). Throw them out. */ - if (buildMode != bmNormal) - for (auto & i : drv->outputs) - dirsInChroot.erase(i.second.path); + for (auto & i : drv->outputs) + dirsInChroot.erase(i.second.path); #elif SANDBOX_ENABLED /* We don't really have any parent prep work to do (yet?) All work happens in the child, instead. */ #else - throw Error("chroot builds are not supported on this platform"); + throw Error("sandboxing builds is not supported on this platform"); #endif } @@ -2061,10 +2107,10 @@ void DerivationGoal::startBuilder() auto lastPos = std::string::size_type{0}; for (auto nlPos = lines.find('\n'); nlPos != string::npos; nlPos = lines.find('\n', lastPos)) { - auto line = std::string{lines, lastPos, nlPos}; + auto line = std::string{lines, lastPos, nlPos - lastPos}; lastPos = nlPos + 1; if (state == stBegin) { - if (line == "extra-chroot-dirs") { + if (line == "extra-sandbox-paths" || line == "extra-chroot-dirs") { state = stExtraChrootDirs; } else { throw Error(format("unknown pre-build hook command ‘%1%’") @@ -2132,16 +2178,19 @@ void DerivationGoal::startBuilder() ProcessOptions options; options.allowVfork = false; Pid helper = startProcess([&]() { - char stack[32 * 1024]; + size_t stackSize = 1 * 1024 * 1024; + char * stack = (char *) mmap(0, stackSize, + PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (!stack) throw SysError("allocating stack"); int flags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD; if (!fixedOutput) flags |= CLONE_NEWNET; - pid_t child = clone(childEntry, stack + sizeof(stack) - 8, flags, this); + pid_t child = clone(childEntry, stack + stackSize, flags, this); if (child == -1 && errno == EINVAL) /* Fallback for Linux < 2.13 where CLONE_NEWPID and CLONE_PARENT are not allowed together. */ - child = clone(childEntry, stack + sizeof(stack) - 8, flags & ~CLONE_NEWPID, this); + child = clone(childEntry, stack + stackSize, flags & ~CLONE_NEWPID, this); if (child == -1) throw SysError("cloning builder process"); - writeFull(builderOut.writeSide, int2String(child) + "\n"); + writeFull(builderOut.writeSide, std::to_string(child) + "\n"); _exit(0); }, options); if (helper.wait(true) != 0) @@ -2409,9 +2458,10 @@ void DerivationGoal::runChild() const char *builder = "invalid"; string sandboxProfile; - if (isBuiltin(*drv)) + if (isBuiltin(*drv)) { ; - else if (useChroot && SANDBOX_ENABLED) { +#if SANDBOX_ENABLED + } else if (useChroot) { /* Lots and lots and lots of file functions freak out if they can't stat their full ancestry */ PathSet ancestry; @@ -2438,7 +2488,7 @@ void DerivationGoal::runChild() for (auto & i : inputPaths) dirsInChroot[i] = i; - /* TODO: we should factor out the policy cleanly, so we don't have to repeat the constants every time... */ + /* This has to appear before import statements */ sandboxProfile += "(version 1)\n"; /* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */ @@ -2448,15 +2498,6 @@ void DerivationGoal::runChild() sandboxProfile += "(deny default (with no-log))\n"; } - sandboxProfile += "(allow file-read* file-write-data (literal \"/dev/null\"))\n"; - - sandboxProfile += "(allow file-read-metadata\n" - "\t(literal \"/var\")\n" - "\t(literal \"/tmp\")\n" - "\t(literal \"/etc\")\n" - "\t(literal \"/etc/nix\")\n" - "\t(literal \"/etc/nix/nix.conf\"))\n"; - /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */ Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true); @@ -2464,20 +2505,6 @@ void DerivationGoal::runChild() /* They don't like trailing slashes on subpath directives */ if (globalTmpDir.back() == '/') globalTmpDir.pop_back(); - /* This is where our temp folders are and where most of the building will happen, so we want rwx on it. */ - sandboxProfile += (format("(allow file-read* file-write* process-exec (subpath \"%1%\") (subpath \"/private/tmp\"))\n") % globalTmpDir).str(); - - sandboxProfile += "(allow process-fork)\n"; - sandboxProfile += "(allow sysctl-read)\n"; - sandboxProfile += "(allow signal (target same-sandbox))\n"; - - /* Enables getpwuid (used by git and others) */ - sandboxProfile += "(allow mach-lookup (global-name \"com.apple.system.notification_center\") (global-name \"com.apple.system.opendirectoryd.libinfo\"))\n"; - - /* Allow local networking operations, mostly because lots of test suites use it and it seems mostly harmless */ - sandboxProfile += "(allow network* (local ip) (remote unix-socket))"; - - /* Our rwx outputs */ sandboxProfile += "(allow file-read* file-write* process-exec\n"; for (auto & i : missingPaths) { @@ -2486,11 +2513,9 @@ void DerivationGoal::runChild() sandboxProfile += ")\n"; /* Our inputs (transitive dependencies and any impurities computed above) - Note that the sandbox profile allows file-write* even though it isn't seemingly necessary. First of all, nix's standard user permissioning - mechanism still prevents builders from writing to input directories, so no security/purity is lost. The reason we allow file-write* is that - denying it means the `access` syscall will return EPERM instead of EACCESS, which confuses a few programs that assume (understandably, since - it appears to be a violation of the POSIX spec) that `access` won't do that, and don't deal with it nicely if it does. The most notable of - these is the entire GHC Haskell ecosystem. */ + + without file-write* allowed, access() incorrectly returns EPERM + */ sandboxProfile += "(allow file-read* file-write* process-exec\n"; for (auto & i : dirsInChroot) { if (i.first != i.second) @@ -2507,22 +2532,32 @@ void DerivationGoal::runChild() } sandboxProfile += ")\n"; - /* Our ancestry. N.B: this uses literal on folders, instead of subpath. Without that, - you open up the entire filesystem because you end up with (subpath "/") */ - sandboxProfile += "(allow file-read-metadata\n"; + /* Allow file-read* on full directory hierarchy to self. Allows realpath() */ + sandboxProfile += "(allow file-read*\n"; for (auto & i : ancestry) { sandboxProfile += (format("\t(literal \"%1%\")\n") % i.c_str()).str(); } sandboxProfile += ")\n"; + sandboxProfile += additionalSandboxProfile; + debug("Generated sandbox profile:"); debug(sandboxProfile); + Path sandboxFile = drvPath + ".sb"; + if (pathExists(sandboxFile)) deletePath(sandboxFile); + autoDelSandbox.reset(sandboxFile, false); + + writeFile(sandboxFile, sandboxProfile); + builder = "/usr/bin/sandbox-exec"; args.push_back("sandbox-exec"); - args.push_back("-p"); - args.push_back(sandboxProfile); + args.push_back("-f"); + args.push_back(sandboxFile); + args.push_back("-D"); + args.push_back("_GLOBAL_TMP_DIR=" + globalTmpDir); args.push_back(drv->builder); +#endif } else { builder = drv->builder.c_str(); string builderBasename = baseNameOf(drv->builder); @@ -2596,6 +2631,11 @@ void DerivationGoal::registerOutputs() ValidPathInfos infos; + /* Set of inodes seen during calls to canonicalisePathMetaData() + for this build's outputs. This needs to be shared between + outputs to allow hard links between outputs. */ + InodesSeen inodesSeen; + /* Check whether the output paths were created, and grep each output path to determine what other paths it references. Also make all output paths read-only. */ @@ -2612,7 +2652,7 @@ void DerivationGoal::registerOutputs() replaceValidPath(path, actualPath); else if (buildMode != bmCheck && rename(actualPath.c_str(), path.c_str()) == -1) - throw SysError(format("moving build output ‘%1%’ from the chroot to the Nix store") % path); + throw SysError(format("moving build output ‘%1%’ from the sandbox to the Nix store") % path); } if (buildMode != bmCheck) actualPath = path; } else { @@ -2688,7 +2728,7 @@ void DerivationGoal::registerOutputs() Hash h2 = recursive ? hashPath(ht, actualPath).first : hashFile(ht, actualPath); if (h != h2) throw BuildError( - format("output path ‘%1%’ should have %2% hash ‘%3%’, instead has ‘%4%’") + format("Nix expects output path ‘%1%’ to have %2% hash ‘%3%’, instead it has ‘%4%’") % path % i.second.hashAlgo % printHash16or32(h) % printHash16or32(h2)); } @@ -2767,6 +2807,16 @@ void DerivationGoal::registerOutputs() if (buildMode == bmCheck) return; + if (curRound > 1 && prevInfos != infos) + throw NotDeterministic( + format("result of ‘%1%’ differs from previous round; rejecting as non-deterministic") + % drvPath); + + if (curRound < nrRounds) { + prevInfos = infos; + return; + } + /* Register each output path as valid, and register the sets of paths referenced by each of them. If there are cycles in the outputs, this will fail. */ @@ -2859,7 +2909,8 @@ void DerivationGoal::handleChildOutput(int fd, const string & data) printMsg(lvlError, format("%1% killed after writing more than %2% bytes of log output") % getName() % settings.maxLogSize); - timedOut(); // not really a timeout, but close enough + killChild(); + done(BuildResult::LogLimitExceeded); return; } if (verbosity >= settings.buildVerbosity) diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc index 25e2e7df30e7..a1c4b48bf62e 100644 --- a/src/libstore/builtins.cc +++ b/src/libstore/builtins.cc @@ -1,5 +1,8 @@ #include "builtins.hh" #include "download.hh" +#include "store-api.hh" +#include "archive.hh" +#include "compression.hh" namespace nix { @@ -7,17 +10,36 @@ void builtinFetchurl(const BasicDerivation & drv) { auto url = drv.env.find("url"); if (url == drv.env.end()) throw Error("attribute ‘url’ missing"); - printMsg(lvlInfo, format("downloading ‘%1%’...") % url->second); - auto data = downloadFile(url->second); // FIXME: show progress + + /* No need to do TLS verification, because we check the hash of + the result anyway. */ + DownloadOptions options; + options.verifyTLS = false; + + /* Show a progress indicator, even though stderr is not a tty. */ + options.forceProgress = true; + + auto data = downloadFile(url->second, options); auto out = drv.env.find("out"); if (out == drv.env.end()) throw Error("attribute ‘url’ missing"); - writeFile(out->second, data.data); + + Path storePath = out->second; + assertStorePath(storePath); + + auto unpack = drv.env.find("unpack"); + if (unpack != drv.env.end() && unpack->second == "1") { + if (string(data.data, 0, 6) == string("\xfd" "7zXZ\0", 6)) + data.data = decompressXZ(data.data); + StringSource source(data.data); + restorePath(storePath, source); + } else + writeFile(storePath, data.data); auto executable = drv.env.find("executable"); if (executable != drv.env.end() && executable->second == "1") { - if (chmod(out->second.c_str(), 0755) == -1) - throw SysError(format("making ‘%1%’ executable") % out->second); + if (chmod(storePath.c_str(), 0755) == -1) + throw SysError(format("making ‘%1%’ executable") % storePath); } } diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 9bf3e13aa9da..822e9a8db867 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -6,8 +6,18 @@ #include <curl/curl.h> +#include <iostream> + + namespace nix { +double getTime() +{ + struct timeval tv; + gettimeofday(&tv, 0); + return tv.tv_sec + (tv.tv_usec / 1000000.0); +} + struct Curl { CURL * curl; @@ -16,6 +26,10 @@ struct Curl struct curl_slist * requestHeaders; + bool showProgress; + double prevProgressTime{0}, startTime{0}; + unsigned int moveBack{1}; + static size_t writeCallback(void * contents, size_t size, size_t nmemb, void * userp) { Curl & c(* (Curl *) userp); @@ -56,11 +70,30 @@ struct Curl return realSize; } - static int progressCallback(void * clientp, double dltotal, double dlnow, double ultotal, double ulnow) + int progressCallback(double dltotal, double dlnow) { + if (showProgress) { + double now = getTime(); + if (prevProgressTime <= now - 1) { + string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]") + % (dlnow / 1024.0) + % (dltotal / 1024.0) + % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str(); + std::cerr << "\e[" << moveBack << "D" << s; + moveBack = s.size(); + std::cerr.flush(); + prevProgressTime = now; + } + } return _isInterrupted; } + static int progressCallback_(void * userp, double dltotal, double dlnow, double ultotal, double ulnow) + { + Curl & c(* (Curl *) userp); + return c.progressCallback(dltotal, dlnow); + } + Curl() { requestHeaders = 0; @@ -69,7 +102,6 @@ struct Curl if (!curl) throw Error("unable to initialize curl"); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(curl, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str()); curl_easy_setopt(curl, CURLOPT_USERAGENT, ("Nix/" + nixVersion).c_str()); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); @@ -79,7 +111,8 @@ struct Curl curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, headerCallback); curl_easy_setopt(curl, CURLOPT_HEADERDATA, (void *) &curl); - curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallback); + curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallback_); + curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, (void *) &curl); curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0); } @@ -89,10 +122,19 @@ struct Curl if (requestHeaders) curl_slist_free_all(requestHeaders); } - bool fetch(const string & url, const string & expectedETag = "") + bool fetch(const string & url, const DownloadOptions & options) { + showProgress = options.forceProgress || isatty(STDERR_FILENO); + curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); + if (options.verifyTLS) + curl_easy_setopt(curl, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str()); + else { + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0); + } + data.clear(); if (requestHeaders) { @@ -100,16 +142,25 @@ struct Curl requestHeaders = 0; } - if (!expectedETag.empty()) { - this->expectedETag = expectedETag; - requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + expectedETag).c_str()); + if (!options.expectedETag.empty()) { + this->expectedETag = options.expectedETag; + requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + options.expectedETag).c_str()); } curl_easy_setopt(curl, CURLOPT_HTTPHEADER, requestHeaders); + if (showProgress) { + std::cerr << (format("downloading ‘%1%’... ") % url); + std::cerr.flush(); + startTime = getTime(); + } + CURLcode res = curl_easy_perform(curl); + if (showProgress) + //std::cerr << "\e[" << moveBack << "D\e[K\n"; + std::cerr << "\n"; checkInterrupt(); - if (res == CURLE_WRITE_ERROR && etag == expectedETag) return false; + if (res == CURLE_WRITE_ERROR && etag == options.expectedETag) return false; if (res != CURLE_OK) throw DownloadError(format("unable to download ‘%1%’: %2% (%3%)") % url % curl_easy_strerror(res) % res); @@ -123,11 +174,11 @@ struct Curl }; -DownloadResult downloadFile(string url, string expectedETag) +DownloadResult downloadFile(string url, const DownloadOptions & options) { DownloadResult res; Curl curl; - if (curl.fetch(url, expectedETag)) { + if (curl.fetch(url, options)) { res.cached = false; res.data = curl.data; } else @@ -178,13 +229,10 @@ Path downloadFileCached(const string & url, bool unpack) if (!skip) { - if (storePath.empty()) - printMsg(lvlInfo, format("downloading ‘%1%’...") % url); - else - printMsg(lvlInfo, format("checking ‘%1%’...") % url); - try { - auto res = downloadFile(url, expectedETag); + DownloadOptions options; + options.expectedETag = expectedETag; + auto res = downloadFile(url, options); if (!res.cached) storePath = store->addTextToStore(name, res.data, PathSet(), false); @@ -192,7 +240,7 @@ Path downloadFileCached(const string & url, bool unpack) assert(!storePath.empty()); replaceSymlink(storePath, fileLink); - writeFile(dataFile, url + "\n" + res.etag + "\n" + int2String(time(0)) + "\n"); + writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n"); } catch (DownloadError & e) { if (storePath.empty()) throw; printMsg(lvlError, format("warning: %1%; using cached result") % e.msg()); diff --git a/src/libstore/download.hh b/src/libstore/download.hh index 28c9117e4227..c1cb25b90c32 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -5,13 +5,20 @@ namespace nix { +struct DownloadOptions +{ + string expectedETag; + bool verifyTLS{true}; + bool forceProgress{false}; +}; + struct DownloadResult { bool cached; string data, etag; }; -DownloadResult downloadFile(string url, string expectedETag = ""); +DownloadResult downloadFile(string url, const DownloadOptions & options); Path downloadFileCached(const string & url, bool unpack); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 73f8489438fc..e704837e8798 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -77,6 +77,11 @@ void Settings::processEnvironment() nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)); nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)); nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH); + + // should be set with the other config options, but depends on nixLibexecDir +#ifdef __APPLE__ + preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies.pl"; +#endif } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 105f3592c685..ebdf19bf1359 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -98,41 +98,41 @@ public: /* Implementations of abstract store API methods. */ - bool isValidPath(const Path & path); + bool isValidPath(const Path & path) override; - PathSet queryValidPaths(const PathSet & paths); + PathSet queryValidPaths(const PathSet & paths) override; - PathSet queryAllValidPaths(); + PathSet queryAllValidPaths() override; - ValidPathInfo queryPathInfo(const Path & path); + ValidPathInfo queryPathInfo(const Path & path) override; - Hash queryPathHash(const Path & path); + Hash queryPathHash(const Path & path) override; - void queryReferences(const Path & path, PathSet & references); + void queryReferences(const Path & path, PathSet & references) override; - void queryReferrers(const Path & path, PathSet & referrers); + void queryReferrers(const Path & path, PathSet & referrers) override; - Path queryDeriver(const Path & path); + Path queryDeriver(const Path & path) override; - PathSet queryValidDerivers(const Path & path); + PathSet queryValidDerivers(const Path & path) override; - PathSet queryDerivationOutputs(const Path & path); + PathSet queryDerivationOutputs(const Path & path) override; - StringSet queryDerivationOutputNames(const Path & path); + StringSet queryDerivationOutputNames(const Path & path) override; - Path queryPathFromHashPart(const string & hashPart); + Path queryPathFromHashPart(const string & hashPart) override; - PathSet querySubstitutablePaths(const PathSet & paths); + PathSet querySubstitutablePaths(const PathSet & paths) override; void querySubstitutablePathInfos(const Path & substituter, PathSet & paths, SubstitutablePathInfos & infos); void querySubstitutablePathInfos(const PathSet & paths, - SubstitutablePathInfos & infos); + SubstitutablePathInfos & infos) override; Path addToStore(const string & name, const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter, bool repair = false); + PathFilter & filter = defaultPathFilter, bool repair = false) override; /* Like addToStore(), but the contents of the path are contained in `dump', which is either a NAR serialisation (if recursive == @@ -142,43 +142,43 @@ public: bool recursive = true, HashType hashAlgo = htSHA256, bool repair = false); Path addTextToStore(const string & name, const string & s, - const PathSet & references, bool repair = false); + const PathSet & references, bool repair = false) override; void exportPath(const Path & path, bool sign, - Sink & sink); + Sink & sink) override; - Paths importPaths(bool requireSignature, Source & source); + Paths importPaths(bool requireSignature, Source & source) override; - void buildPaths(const PathSet & paths, BuildMode buildMode); + void buildPaths(const PathSet & paths, BuildMode buildMode) override; BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; - void ensurePath(const Path & path); + void ensurePath(const Path & path) override; - void addTempRoot(const Path & path); + void addTempRoot(const Path & path) override; - void addIndirectRoot(const Path & path); + void addIndirectRoot(const Path & path) override; - void syncWithGC(); + void syncWithGC() override; - Roots findRoots(); + Roots findRoots() override; - void collectGarbage(const GCOptions & options, GCResults & results); + void collectGarbage(const GCOptions & options, GCResults & results) override; /* Optimise the disk space usage of the Nix store by hard-linking files with the same contents. */ void optimiseStore(OptimiseStats & stats); /* Generic variant of the above method. */ - void optimiseStore(); + void optimiseStore() override; /* Optimise a single store path. */ void optimisePath(const Path & path); /* Check the integrity of the Nix store. Returns true if errors remain. */ - bool verifyStore(bool checkContents, bool repair); + bool verifyStore(bool checkContents, bool repair) override; /* Register the validity of a path, i.e., that `path' exists, that the paths referenced by it exists, and in the case of an output @@ -197,9 +197,9 @@ public: /* Query whether `path' previously failed to build. */ bool hasPathFailed(const Path & path); - PathSet queryFailedPaths(); + PathSet queryFailedPaths() override; - void clearFailedPaths(const PathSet & paths); + void clearFailedPaths(const PathSet & paths) override; void vacuumDB(); diff --git a/src/libstore/local.mk b/src/libstore/local.mk index bf5c256c949e..e78f47949ad3 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -8,7 +8,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc) libstore_LIBS = libutil libformat -libstore_LDFLAGS = -lsqlite3 -lbz2 -lcurl +libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) ifeq ($(OS), SunOS) libstore_LDFLAGS += -lsocket @@ -33,3 +33,4 @@ $(d)/local-store.cc: $(d)/schema.sql.hh clean-files += $(d)/schema.sql.hh $(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644)) +$(eval $(call install-file-in, $(d)/sandbox-defaults.sb, $(datadir)/nix, 0644)) diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 6f66961792fb..23cbe7e26b47 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -120,9 +120,9 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa return; } - /* This can still happen on top-level files */ + /* This can still happen on top-level files. */ if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) { - printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s).") % path % (st.st_nlink - 2)); + printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2)); return; } @@ -141,6 +141,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa /* Check if this is a known hash. */ Path linkPath = linksDir + "/" + printHash32(hash); + retry: if (!pathExists(linkPath)) { /* Nope, create a hard link in the links directory. */ if (link(path.c_str(), linkPath.c_str()) == 0) { @@ -164,6 +165,12 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa return; } + if (st.st_size != stLink.st_size) { + printMsg(lvlError, format("removing corrupted link ‘%1%’") % linkPath); + unlink(linkPath.c_str()); + goto retry; + } + printMsg(lvlTalkative, format("linking ‘%1%’ to ‘%2%’") % path % linkPath); /* Make the containing directory writable, but only if it's not diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index fdb0975ac0fa..ed9895ac8133 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -5,6 +5,7 @@ #include "archive.hh" #include "affinity.hh" #include "globals.hh" +#include "derivations.hh" #include <sys/types.h> #include <sys/stat.h> @@ -458,7 +459,14 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) { - throw Error("not implemented"); + openConnection(); + to << wopBuildDerivation << drvPath << drv << buildMode; + processStderr(); + BuildResult res; + unsigned int status; + from >> status >> res.errorMsg; + res.status = (BuildResult::Status) status; + return res; } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 09e250386c5c..b0787c072904 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -24,71 +24,72 @@ public: /* Implementations of abstract store API methods. */ - bool isValidPath(const Path & path); + bool isValidPath(const Path & path) override; - PathSet queryValidPaths(const PathSet & paths); + PathSet queryValidPaths(const PathSet & paths) override; - PathSet queryAllValidPaths(); + PathSet queryAllValidPaths() override; - ValidPathInfo queryPathInfo(const Path & path); + ValidPathInfo queryPathInfo(const Path & path) override; - Hash queryPathHash(const Path & path); + Hash queryPathHash(const Path & path) override; - void queryReferences(const Path & path, PathSet & references); + void queryReferences(const Path & path, PathSet & references) override; - void queryReferrers(const Path & path, PathSet & referrers); + void queryReferrers(const Path & path, PathSet & referrers) override; - Path queryDeriver(const Path & path); + Path queryDeriver(const Path & path) override; - PathSet queryValidDerivers(const Path & path); + PathSet queryValidDerivers(const Path & path) override; - PathSet queryDerivationOutputs(const Path & path); + PathSet queryDerivationOutputs(const Path & path) override; - StringSet queryDerivationOutputNames(const Path & path); + StringSet queryDerivationOutputNames(const Path & path) override; - Path queryPathFromHashPart(const string & hashPart); + Path queryPathFromHashPart(const string & hashPart) override; - PathSet querySubstitutablePaths(const PathSet & paths); + PathSet querySubstitutablePaths(const PathSet & paths) override; void querySubstitutablePathInfos(const PathSet & paths, - SubstitutablePathInfos & infos); + SubstitutablePathInfos & infos) override; Path addToStore(const string & name, const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter, bool repair = false); + PathFilter & filter = defaultPathFilter, bool repair = false) override; Path addTextToStore(const string & name, const string & s, - const PathSet & references, bool repair = false); + const PathSet & references, bool repair = false) override; void exportPath(const Path & path, bool sign, - Sink & sink); + Sink & sink) override; - Paths importPaths(bool requireSignature, Source & source); + Paths importPaths(bool requireSignature, Source & source) override; - void buildPaths(const PathSet & paths, BuildMode buildMode); + void buildPaths(const PathSet & paths, BuildMode buildMode) override; BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; - void ensurePath(const Path & path); + void ensurePath(const Path & path) override; - void addTempRoot(const Path & path); + void addTempRoot(const Path & path) override; - void addIndirectRoot(const Path & path); + void addIndirectRoot(const Path & path) override; - void syncWithGC(); + void syncWithGC() override; - Roots findRoots(); + Roots findRoots() override; - void collectGarbage(const GCOptions & options, GCResults & results); + void collectGarbage(const GCOptions & options, GCResults & results) override; - PathSet queryFailedPaths(); + PathSet queryFailedPaths() override; - void clearFailedPaths(const PathSet & paths); + void clearFailedPaths(const PathSet & paths) override; - void optimiseStore(); + void optimiseStore() override; + + bool verifyStore(bool checkContents, bool repair) override; - bool verifyStore(bool checkContents, bool repair); private: AutoCloseFD fdSocket; FdSink to; diff --git a/src/libstore/sandbox-defaults.sb.in b/src/libstore/sandbox-defaults.sb.in new file mode 100644 index 000000000000..b5e80085fbe2 --- /dev/null +++ b/src/libstore/sandbox-defaults.sb.in @@ -0,0 +1,63 @@ +(allow file-read* file-write-data (literal "/dev/null")) +(allow ipc-posix*) +(allow mach-lookup (global-name "com.apple.SecurityServer")) + +(allow file-read* + (literal "/dev/dtracehelper") + (literal "/dev/tty") + (literal "/dev/autofs_nowait") + (literal "/System/Library/CoreServices/SystemVersion.plist") + (literal "/private/var/run/systemkeychaincheck.done") + (literal "/private/etc/protocols") + (literal "/private/var/tmp") + (literal "/private/var/db") + (subpath "/private/var/db/mds")) + +(allow file-read* + (subpath "/usr/share/icu") + (subpath "/usr/share/locale") + (subpath "/usr/share/zoneinfo")) + +(allow file-write* + (literal "/dev/tty") + (literal "/dev/dtracehelper") + (literal "/mds")) + +(allow file-ioctl (literal "/dev/dtracehelper")) + +(allow file-read-metadata + (literal "/var") + (literal "/tmp") + ; symlinks + (literal "@sysconfdir@") + (literal "@sysconfdir@/nix") + (literal "@sysconfdir@/nix/nix.conf") + (literal "/etc/resolv.conf") + (literal "/private/etc/resolv.conf")) + +(allow file-read* + (literal "/private@sysconfdir@/nix/nix.conf") + (literal "/private/var/run/resolv.conf")) + +; some builders use filehandles other than stdin/stdout +(allow file* + (subpath "/dev/fd") + (literal "/dev/ptmx") + (regex #"^/dev/[pt]ty.*$")) + +; allow everything inside TMP +(allow file* process-exec + (subpath (param "_GLOBAL_TMP_DIR")) + (subpath "/private/tmp")) + +(allow process-fork) +(allow sysctl-read) +(allow signal (target same-sandbox)) + +; allow getpwuid (for git and other packages) +(allow mach-lookup + (global-name "com.apple.system.notification_center") + (global-name "com.apple.system.opendirectoryd.libinfo")) + +; allow local networking +(allow network* (local ip) (remote unix-socket)) diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 235017503213..9cc5fd45b7c4 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -87,10 +87,17 @@ struct ValidPathInfo Path deriver; Hash hash; PathSet references; - time_t registrationTime; - unsigned long long narSize; // 0 = unknown + time_t registrationTime = 0; + unsigned long long narSize = 0; // 0 = unknown unsigned long long id; // internal use only - ValidPathInfo() : registrationTime(0), narSize(0) { } + + bool operator == (const ValidPathInfo & i) const + { + return + path == i.path + && hash == i.hash + && references == i.references; + } }; typedef list<ValidPathInfo> ValidPathInfos; @@ -112,7 +119,9 @@ struct BuildResult CachedFailure, TimedOut, MiscFailure, - DependencyFailed + DependencyFailed, + LogLimitExceeded, + NotDeterministic, } status = MiscFailure; std::string errorMsg; //time_t startTime = 0, stopTime = 0; diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index d037d7402ede..5a0ef5117aa6 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -43,7 +43,8 @@ typedef enum { wopQuerySubstitutablePaths = 32, wopQueryValidDerivers = 33, wopOptimiseStore = 34, - wopVerifyStore = 35 + wopVerifyStore = 35, + wopBuildDerivation = 36, } WorkerOp; diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 0187f062b21d..6ee7981432b6 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -243,7 +243,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path) if (i != names.end()) { printMsg(lvlDebug, format("case collision between ‘%1%’ and ‘%2%’") % i->first % name); name += caseHackSuffix; - name += int2String(++i->second); + name += std::to_string(++i->second); } else names[name] = 0; } diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc new file mode 100644 index 000000000000..446fcb781564 --- /dev/null +++ b/src/libutil/compression.cc @@ -0,0 +1,46 @@ +#include "compression.hh" +#include "types.hh" + +#include <lzma.h> + +namespace nix { + +std::string decompressXZ(const std::string & in) +{ + lzma_stream strm = LZMA_STREAM_INIT; + + lzma_ret ret = lzma_stream_decoder( + &strm, UINT64_MAX, LZMA_CONCATENATED); + if (ret != LZMA_OK) + throw Error("unable to initialise lzma decoder"); + + lzma_action action = LZMA_RUN; + uint8_t outbuf[BUFSIZ]; + string res; + strm.next_in = (uint8_t *) in.c_str(); + strm.avail_in = in.size(); + strm.next_out = outbuf; + strm.avail_out = sizeof(outbuf); + + while (true) { + + if (strm.avail_in == 0) + action = LZMA_FINISH; + + lzma_ret ret = lzma_code(&strm, action); + + if (strm.avail_out == 0 || ret == LZMA_STREAM_END) { + res.append((char *) outbuf, sizeof(outbuf) - strm.avail_out); + strm.next_out = outbuf; + strm.avail_out = sizeof(outbuf); + } + + if (ret == LZMA_STREAM_END) + return res; + + if (ret != LZMA_OK) + throw Error("error while decompressing xz file"); + } +} + +} diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh new file mode 100644 index 000000000000..962ce5ac7767 --- /dev/null +++ b/src/libutil/compression.hh @@ -0,0 +1,9 @@ +#pragma once + +#include <string> + +namespace nix { + +std::string decompressXZ(const std::string & in); + +} diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index a83ba0a81817..2d97c5e6b6a7 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -3,16 +3,8 @@ #include <iostream> #include <cstring> -#ifdef HAVE_OPENSSL #include <openssl/md5.h> #include <openssl/sha.h> -#else -extern "C" { -#include "md5.h" -#include "sha1.h" -#include "sha256.h" -} -#endif #include "hash.hh" #include "archive.hh" @@ -40,6 +32,7 @@ Hash::Hash(HashType type) if (type == htMD5) hashSize = md5HashSize; else if (type == htSHA1) hashSize = sha1HashSize; else if (type == htSHA256) hashSize = sha256HashSize; + else if (type == htSHA512) hashSize = sha512HashSize; else throw Error("unknown hash type"); assert(hashSize <= maxHashSize); memset(hash, 0, maxHashSize); @@ -115,7 +108,6 @@ const string base32Chars = "0123456789abcdfghijklmnpqrsvwxyz"; string printHash32(const Hash & hash) { - Hash hash2(hash); unsigned int len = hashLength32(hash); string s; @@ -199,6 +191,7 @@ union Ctx MD5_CTX md5; SHA_CTX sha1; SHA256_CTX sha256; + SHA512_CTX sha512; }; @@ -207,6 +200,7 @@ static void start(HashType ht, Ctx & ctx) if (ht == htMD5) MD5_Init(&ctx.md5); else if (ht == htSHA1) SHA1_Init(&ctx.sha1); else if (ht == htSHA256) SHA256_Init(&ctx.sha256); + else if (ht == htSHA512) SHA512_Init(&ctx.sha512); } @@ -216,6 +210,7 @@ static void update(HashType ht, Ctx & ctx, if (ht == htMD5) MD5_Update(&ctx.md5, bytes, len); else if (ht == htSHA1) SHA1_Update(&ctx.sha1, bytes, len); else if (ht == htSHA256) SHA256_Update(&ctx.sha256, bytes, len); + else if (ht == htSHA512) SHA512_Update(&ctx.sha512, bytes, len); } @@ -224,6 +219,7 @@ static void finish(HashType ht, Ctx & ctx, unsigned char * hash) if (ht == htMD5) MD5_Final(hash, &ctx.md5); else if (ht == htSHA1) SHA1_Final(hash, &ctx.sha1); else if (ht == htSHA256) SHA256_Final(hash, &ctx.sha256); + else if (ht == htSHA512) SHA512_Final(hash, &ctx.sha512); } @@ -321,6 +317,7 @@ HashType parseHashType(const string & s) if (s == "md5") return htMD5; else if (s == "sha1") return htSHA1; else if (s == "sha256") return htSHA256; + else if (s == "sha512") return htSHA512; else return htUnknown; } @@ -330,6 +327,7 @@ string printHashType(HashType ht) if (ht == htMD5) return "md5"; else if (ht == htSHA1) return "sha1"; else if (ht == htSHA256) return "sha256"; + else if (ht == htSHA512) return "sha512"; else throw Error("cannot print unknown hash type"); } diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index 2c6f176ec74c..841b4cb2936c 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -7,19 +7,20 @@ namespace nix { -typedef enum { htUnknown, htMD5, htSHA1, htSHA256 } HashType; +typedef enum { htUnknown, htMD5, htSHA1, htSHA256, htSHA512 } HashType; const int md5HashSize = 16; const int sha1HashSize = 20; const int sha256HashSize = 32; +const int sha512HashSize = 64; extern const string base32Chars; struct Hash { - static const unsigned int maxHashSize = 32; + static const unsigned int maxHashSize = 64; unsigned int hashSize; unsigned char hash[maxHashSize]; diff --git a/src/libutil/local.mk b/src/libutil/local.mk index 8af2e78d9ce4..4dae3305433f 100644 --- a/src/libutil/local.mk +++ b/src/libutil/local.mk @@ -6,10 +6,6 @@ libutil_DIR := $(d) libutil_SOURCES := $(wildcard $(d)/*.cc) -ifeq ($(HAVE_OPENSSL), 1) - libutil_LDFLAGS = $(OPENSSL_LIBS) -else - libutil_SOURCES += $(d)/md5.c $(d)/sha1.c $(d)/sha256.c -endif +libutil_LDFLAGS = -llzma $(OPENSSL_LIBS) libutil_LIBS = libformat diff --git a/src/libutil/md32_common.h b/src/libutil/md32_common.h deleted file mode 100644 index 0cbcfaf8a20b..000000000000 --- a/src/libutil/md32_common.h +++ /dev/null @@ -1,620 +0,0 @@ -/* crypto/md32_common.h */ -/* ==================================================================== - * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - -/* - * This is a generic 32 bit "collector" for message digest algorithms. - * Whenever needed it collects input character stream into chunks of - * 32 bit values and invokes a block function that performs actual hash - * calculations. - * - * Porting guide. - * - * Obligatory macros: - * - * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN - * this macro defines byte order of input stream. - * HASH_CBLOCK - * size of a unit chunk HASH_BLOCK operates on. - * HASH_LONG - * has to be at lest 32 bit wide, if it's wider, then - * HASH_LONG_LOG2 *has to* be defined along - * HASH_CTX - * context structure that at least contains following - * members: - * typedef struct { - * ... - * HASH_LONG Nl,Nh; - * HASH_LONG data[HASH_LBLOCK]; - * unsigned int num; - * ... - * } HASH_CTX; - * HASH_UPDATE - * name of "Update" function, implemented here. - * HASH_TRANSFORM - * name of "Transform" function, implemented here. - * HASH_FINAL - * name of "Final" function, implemented here. - * HASH_BLOCK_HOST_ORDER - * name of "block" function treating *aligned* input message - * in host byte order, implemented externally. - * HASH_BLOCK_DATA_ORDER - * name of "block" function treating *unaligned* input message - * in original (data) byte order, implemented externally (it - * actually is optional if data and host are of the same - * "endianess"). - * HASH_MAKE_STRING - * macro convering context variables to an ASCII hash string. - * - * Optional macros: - * - * B_ENDIAN or L_ENDIAN - * defines host byte-order. - * HASH_LONG_LOG2 - * defaults to 2 if not states otherwise. - * HASH_LBLOCK - * assumed to be HASH_CBLOCK/4 if not stated otherwise. - * HASH_BLOCK_DATA_ORDER_ALIGNED - * alternative "block" function capable of treating - * aligned input message in original (data) order, - * implemented externally. - * - * MD5 example: - * - * #define DATA_ORDER_IS_LITTLE_ENDIAN - * - * #define HASH_LONG MD5_LONG - * #define HASH_LONG_LOG2 MD5_LONG_LOG2 - * #define HASH_CTX MD5_CTX - * #define HASH_CBLOCK MD5_CBLOCK - * #define HASH_LBLOCK MD5_LBLOCK - * #define HASH_UPDATE MD5_Update - * #define HASH_TRANSFORM MD5_Transform - * #define HASH_FINAL MD5_Final - * #define HASH_BLOCK_HOST_ORDER md5_block_host_order - * #define HASH_BLOCK_DATA_ORDER md5_block_data_order - * - * <appro@fy.chalmers.se> - */ - -#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) -#error "DATA_ORDER must be defined!" -#endif - -#ifndef HASH_CBLOCK -#error "HASH_CBLOCK must be defined!" -#endif -#ifndef HASH_LONG -#error "HASH_LONG must be defined!" -#endif -#ifndef HASH_CTX -#error "HASH_CTX must be defined!" -#endif - -#ifndef HASH_UPDATE -#error "HASH_UPDATE must be defined!" -#endif -#ifndef HASH_TRANSFORM -#error "HASH_TRANSFORM must be defined!" -#endif -#ifndef HASH_FINAL -#error "HASH_FINAL must be defined!" -#endif - -#ifndef HASH_BLOCK_HOST_ORDER -#error "HASH_BLOCK_HOST_ORDER must be defined!" -#endif - -#if 0 -/* - * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED - * isn't defined. - */ -#ifndef HASH_BLOCK_DATA_ORDER -#error "HASH_BLOCK_DATA_ORDER must be defined!" -#endif -#endif - -#ifndef HASH_LBLOCK -#define HASH_LBLOCK (HASH_CBLOCK/4) -#endif - -#ifndef HASH_LONG_LOG2 -#define HASH_LONG_LOG2 2 -#endif - -/* - * Engage compiler specific rotate intrinsic function if available. - */ -#undef ROTATE -#ifndef PEDANTIC -# if defined(_MSC_VER) || defined(__ICC) -# define ROTATE(a,n) _lrotl(a,n) -# elif defined(__MWERKS__) -# if defined(__POWERPC__) -# define ROTATE(a,n) __rlwinm(a,n,0,31) -# elif defined(__MC68K__) - /* Motorola specific tweak. <appro@fy.chalmers.se> */ -# define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) ) -# else -# define ROTATE(a,n) __rol(a,n) -# endif -# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) - /* - * Some GNU C inline assembler templates. Note that these are - * rotates by *constant* number of bits! But that's exactly - * what we need here... - * <appro@fy.chalmers.se> - */ -# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) -# define ROTATE(a,n) ({ register unsigned int ret; \ - asm ( \ - "roll %1,%0" \ - : "=r"(ret) \ - : "I"(n), "0"(a) \ - : "cc"); \ - ret; \ - }) -# elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) -# define ROTATE(a,n) ({ register unsigned int ret; \ - asm ( \ - "rlwinm %0,%1,%2,0,31" \ - : "=r"(ret) \ - : "r"(a), "I"(n)); \ - ret; \ - }) -# endif -# endif -#endif /* PEDANTIC */ - -#if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ -/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ -#ifdef ROTATE -/* 5 instructions with rotate instruction, else 9 */ -#define REVERSE_FETCH32(a,l) ( \ - l=*(const HASH_LONG *)(a), \ - ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \ - ) -#else -/* 6 instructions with rotate instruction, else 8 */ -#define REVERSE_FETCH32(a,l) ( \ - l=*(const HASH_LONG *)(a), \ - l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \ - ROTATE(l,16) \ - ) -/* - * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|... - * It's rewritten as above for two reasons: - * - RISCs aren't good at long constants and have to explicitely - * compose 'em with several (well, usually 2) instructions in a - * register before performing the actual operation and (as you - * already realized:-) having same constant should inspire the - * compiler to permanently allocate the only register for it; - * - most modern CPUs have two ALUs, but usually only one has - * circuitry for shifts:-( this minor tweak inspires compiler - * to schedule shift instructions in a better way... - * - * <appro@fy.chalmers.se> - */ -#endif -#endif - -#ifndef ROTATE -#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) -#endif - -/* - * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED - * and HASH_BLOCK_HOST_ORDER ought to be the same if input data - * and host are of the same "endianess". It's possible to mask - * this with blank #define HASH_BLOCK_DATA_ORDER though... - * - * <appro@fy.chalmers.se> - */ -#if defined(B_ENDIAN) -# if defined(DATA_ORDER_IS_BIG_ENDIAN) -# if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 -# define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER -# endif -# endif -#elif defined(L_ENDIAN) -# if defined(DATA_ORDER_IS_LITTLE_ENDIAN) -# if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 -# define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER -# endif -# endif -#endif - -#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) -#ifndef HASH_BLOCK_DATA_ORDER -#error "HASH_BLOCK_DATA_ORDER must be defined!" -#endif -#endif - -#if defined(DATA_ORDER_IS_BIG_ENDIAN) - -#ifndef PEDANTIC -# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) -# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) - /* - * This gives ~30-40% performance improvement in SHA-256 compiled - * with gcc [on P4]. Well, first macro to be frank. We can pull - * this trick on x86* platforms only, because these CPUs can fetch - * unaligned data without raising an exception. - */ -# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ - asm ("bswapl %0":"=r"(r):"0"(r)); \ - (c)+=4; (l)=r; }) -# define HOST_l2c(l,c) ({ unsigned int r=(l); \ - asm ("bswapl %0":"=r"(r):"0"(r)); \ - *((unsigned int *)(c))=r; (c)+=4; r; }) -# endif -# endif -#endif - -#ifndef HOST_c2l -#define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ - l|=(((unsigned long)(*((c)++)))<<16), \ - l|=(((unsigned long)(*((c)++)))<< 8), \ - l|=(((unsigned long)(*((c)++))) ), \ - l) -#endif -#define HOST_p_c2l(c,l,n) { \ - switch (n) { \ - case 0: l =((unsigned long)(*((c)++)))<<24; \ - case 1: l|=((unsigned long)(*((c)++)))<<16; \ - case 2: l|=((unsigned long)(*((c)++)))<< 8; \ - case 3: l|=((unsigned long)(*((c)++))); \ - } } -#define HOST_p_c2l_p(c,l,sc,len) { \ - switch (sc) { \ - case 0: l =((unsigned long)(*((c)++)))<<24; \ - if (--len == 0) break; \ - case 1: l|=((unsigned long)(*((c)++)))<<16; \ - if (--len == 0) break; \ - case 2: l|=((unsigned long)(*((c)++)))<< 8; \ - } } -/* NOTE the pointer is not incremented at the end of this */ -#define HOST_c2l_p(c,l,n) { \ - l=0; (c)+=n; \ - switch (n) { \ - case 3: l =((unsigned long)(*(--(c))))<< 8; \ - case 2: l|=((unsigned long)(*(--(c))))<<16; \ - case 1: l|=((unsigned long)(*(--(c))))<<24; \ - } } -#ifndef HOST_l2c -#define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ - *((c)++)=(unsigned char)(((l)>>16)&0xff), \ - *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ - *((c)++)=(unsigned char)(((l) )&0xff), \ - l) -#endif - -#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) - -#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) - /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ -# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) -# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) -#endif - -#ifndef HOST_c2l -#define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ - l|=(((unsigned long)(*((c)++)))<< 8), \ - l|=(((unsigned long)(*((c)++)))<<16), \ - l|=(((unsigned long)(*((c)++)))<<24), \ - l) -#endif -#define HOST_p_c2l(c,l,n) { \ - switch (n) { \ - case 0: l =((unsigned long)(*((c)++))); \ - case 1: l|=((unsigned long)(*((c)++)))<< 8; \ - case 2: l|=((unsigned long)(*((c)++)))<<16; \ - case 3: l|=((unsigned long)(*((c)++)))<<24; \ - } } -#define HOST_p_c2l_p(c,l,sc,len) { \ - switch (sc) { \ - case 0: l =((unsigned long)(*((c)++))); \ - if (--len == 0) break; \ - case 1: l|=((unsigned long)(*((c)++)))<< 8; \ - if (--len == 0) break; \ - case 2: l|=((unsigned long)(*((c)++)))<<16; \ - } } -/* NOTE the pointer is not incremented at the end of this */ -#define HOST_c2l_p(c,l,n) { \ - l=0; (c)+=n; \ - switch (n) { \ - case 3: l =((unsigned long)(*(--(c))))<<16; \ - case 2: l|=((unsigned long)(*(--(c))))<< 8; \ - case 1: l|=((unsigned long)(*(--(c)))); \ - } } -#ifndef HOST_l2c -#define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ - *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ - *((c)++)=(unsigned char)(((l)>>16)&0xff), \ - *((c)++)=(unsigned char)(((l)>>24)&0xff), \ - l) -#endif - -#endif - -/* - * Time for some action:-) - */ - -int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) - { - const unsigned char *data=data_; - register HASH_LONG * p; - register HASH_LONG l; - size_t sw,sc,ew,ec; - - if (len==0) return 1; - - l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; - /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to - * Wei Dai <weidai@eskimo.com> for pointing it out. */ - if (l < c->Nl) /* overflow */ - c->Nh++; - c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ - c->Nl=l; - - if (c->num != 0) - { - p=c->data; - sw=c->num>>2; - sc=c->num&0x03; - - if ((c->num+len) >= HASH_CBLOCK) - { - l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; - for (; sw<HASH_LBLOCK; sw++) - { - HOST_c2l(data,l); p[sw]=l; - } - HASH_BLOCK_HOST_ORDER (c,p,1); - len-=(HASH_CBLOCK-c->num); - c->num=0; - /* drop through and do the rest */ - } - else - { - c->num+=(unsigned int)len; - if ((sc+len) < 4) /* ugly, add char's to a word */ - { - l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; - } - else - { - ew=(c->num>>2); - ec=(c->num&0x03); - if (sc) - l=p[sw]; - HOST_p_c2l(data,l,sc); - p[sw++]=l; - for (; sw < ew; sw++) - { - HOST_c2l(data,l); p[sw]=l; - } - if (ec) - { - HOST_c2l_p(data,l,ec); p[sw]=l; - } - } - return 1; - } - } - - sw=len/HASH_CBLOCK; - if (sw > 0) - { -#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) - /* - * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined - * only if sizeof(HASH_LONG)==4. - */ - if ((((size_t)data)%4) == 0) - { - /* data is properly aligned so that we can cast it: */ - HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw); - sw*=HASH_CBLOCK; - data+=sw; - len-=sw; - } - else -#if !defined(HASH_BLOCK_DATA_ORDER) - while (sw--) - { - memcpy (p=c->data,data,HASH_CBLOCK); - HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1); - data+=HASH_CBLOCK; - len-=HASH_CBLOCK; - } -#endif -#endif -#if defined(HASH_BLOCK_DATA_ORDER) - { - HASH_BLOCK_DATA_ORDER(c,data,sw); - sw*=HASH_CBLOCK; - data+=sw; - len-=sw; - } -#endif - } - - if (len!=0) - { - p = c->data; - c->num = len; - ew=len>>2; /* words to copy */ - ec=len&0x03; - for (; ew; ew--,p++) - { - HOST_c2l(data,l); *p=l; - } - HOST_c2l_p(data,l,ec); - *p=l; - } - return 1; - } - - -void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) - { -#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) - if ((((size_t)data)%4) == 0) - /* data is properly aligned so that we can cast it: */ - HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1); - else -#if !defined(HASH_BLOCK_DATA_ORDER) - { - memcpy (c->data,data,HASH_CBLOCK); - HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1); - } -#endif -#endif -#if defined(HASH_BLOCK_DATA_ORDER) - HASH_BLOCK_DATA_ORDER (c,data,1); -#endif - } - - -int HASH_FINAL (unsigned char *md, HASH_CTX *c) - { - register HASH_LONG *p; - register unsigned long l; - register int i,j; - static const unsigned char end[4]={0x80,0x00,0x00,0x00}; - const unsigned char *cp=end; - - /* c->num should definitly have room for at least one more byte. */ - p=c->data; - i=c->num>>2; - j=c->num&0x03; - -#if 0 - /* purify often complains about the following line as an - * Uninitialized Memory Read. While this can be true, the - * following p_c2l macro will reset l when that case is true. - * This is because j&0x03 contains the number of 'valid' bytes - * already in p[i]. If and only if j&0x03 == 0, the UMR will - * occur but this is also the only time p_c2l will do - * l= *(cp++) instead of l|= *(cp++) - * Many thanks to Alex Tang <altitude@cic.net> for pickup this - * 'potential bug' */ -#ifdef PURIFY - if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */ -#endif - l=p[i]; -#else - l = (j==0) ? 0 : p[i]; -#endif - HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */ - - if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */ - { - if (i<HASH_LBLOCK) p[i]=0; - HASH_BLOCK_HOST_ORDER (c,p,1); - i=0; - } - for (; i<(HASH_LBLOCK-2); i++) - p[i]=0; - -#if defined(DATA_ORDER_IS_BIG_ENDIAN) - p[HASH_LBLOCK-2]=c->Nh; - p[HASH_LBLOCK-1]=c->Nl; -#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) - p[HASH_LBLOCK-2]=c->Nl; - p[HASH_LBLOCK-1]=c->Nh; -#endif - HASH_BLOCK_HOST_ORDER (c,p,1); - -#ifndef HASH_MAKE_STRING -#error "HASH_MAKE_STRING must be defined!" -#else - HASH_MAKE_STRING(c,md); -#endif - - c->num=0; - /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack - * but I'm not worried :-) - OPENSSL_cleanse((void *)c,sizeof(HASH_CTX)); - */ - return 1; - } - -#ifndef MD32_REG_T -#define MD32_REG_T long -/* - * This comment was originaly written for MD5, which is why it - * discusses A-D. But it basically applies to all 32-bit digests, - * which is why it was moved to common header file. - * - * In case you wonder why A-D are declared as long and not - * as MD5_LONG. Doing so results in slight performance - * boost on LP64 architectures. The catch is we don't - * really care if 32 MSBs of a 64-bit register get polluted - * with eventual overflows as we *save* only 32 LSBs in - * *either* case. Now declaring 'em long excuses the compiler - * from keeping 32 MSBs zeroed resulting in 13% performance - * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. - * Well, to be honest it should say that this *prevents* - * performance degradation. - * <appro@fy.chalmers.se> - * Apparently there're LP64 compilers that generate better - * code if A-D are declared int. Most notably GCC-x86_64 - * generates better code. - * <appro@fy.chalmers.se> - */ -#endif diff --git a/src/libutil/md5.c b/src/libutil/md5.c deleted file mode 100644 index b31640cdcced..000000000000 --- a/src/libutil/md5.c +++ /dev/null @@ -1,365 +0,0 @@ -/* Functions to compute MD5 message digest of files or memory blocks. - according to the definition of MD5 in RFC 1321 from April 1992. - Copyright (C) 1995,1996,1997,1999,2000,2001 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ - -/* Written by Ulrich Drepper <drepper@gnu.ai.mit.edu>, 1995. */ - -#include <sys/types.h> - -#include <stdlib.h> -#include <string.h> - -#include "md5.h" - - -static md5_uint32 SWAP(md5_uint32 n) -{ - static int checked = 0; - static int bigendian = 0; - static md5_uint32 test; - - if (!checked) { - test = 1; - if (* (char *) &test == 0) - bigendian = 1; - checked = 1; - } - - if (bigendian) - return (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)); - else - return n; -} - - -/* This array contains the bytes used to pad the buffer to the next - 64-byte boundary. (RFC 1321, 3.1: Step 1) */ -static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ... */ }; - - -/* Initialize structure containing state of computation. - (RFC 1321, 3.3: Step 3) */ -void -MD5_Init (ctx) - struct MD5_CTX *ctx; -{ - ctx->A = 0x67452301; - ctx->B = 0xefcdab89; - ctx->C = 0x98badcfe; - ctx->D = 0x10325476; - - ctx->total[0] = ctx->total[1] = 0; - ctx->buflen = 0; -} - -/* Put result from CTX in first 16 bytes following RESBUF. The result - must be in little endian byte order. - - IMPORTANT: On some systems it is required that RESBUF is correctly - aligned for a 32 bits value. */ -void * -md5_read_ctx (ctx, resbuf) - const struct MD5_CTX *ctx; - void *resbuf; -{ - ((md5_uint32 *) resbuf)[0] = SWAP (ctx->A); - ((md5_uint32 *) resbuf)[1] = SWAP (ctx->B); - ((md5_uint32 *) resbuf)[2] = SWAP (ctx->C); - ((md5_uint32 *) resbuf)[3] = SWAP (ctx->D); - - return resbuf; -} - -/* Process the remaining bytes in the internal buffer and the usual - prolog according to the standard and write the result to RESBUF. - - IMPORTANT: On some systems it is required that RESBUF is correctly - aligned for a 32 bits value. */ -void * -MD5_Final (resbuf, ctx) - void *resbuf; - struct MD5_CTX *ctx; -{ - /* Take yet unprocessed bytes into account. */ - md5_uint32 bytes = ctx->buflen; - size_t pad; - - /* Now count remaining bytes. */ - ctx->total[0] += bytes; - if (ctx->total[0] < bytes) - ++ctx->total[1]; - - pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes; - memcpy (&ctx->buffer[bytes], fillbuf, pad); - - /* Put the 64-bit file length in *bits* at the end of the buffer. */ - *(md5_uint32 *) &ctx->buffer[bytes + pad] = SWAP (ctx->total[0] << 3); - *(md5_uint32 *) &ctx->buffer[bytes + pad + 4] = SWAP ((ctx->total[1] << 3) | - (ctx->total[0] >> 29)); - - /* Process last bytes. */ - md5_process_block (ctx->buffer, bytes + pad + 8, ctx); - - return md5_read_ctx (ctx, resbuf); -} - -void -MD5_Update (ctx, buffer, len) - struct MD5_CTX *ctx; - const void *buffer; - size_t len; -{ - /* When we already have some bits in our internal buffer concatenate - both inputs first. */ - if (ctx->buflen != 0) - { - size_t left_over = ctx->buflen; - size_t add = 128 - left_over > len ? len : 128 - left_over; - - memcpy (&ctx->buffer[left_over], buffer, add); - ctx->buflen += add; - - if (ctx->buflen > 64) - { - md5_process_block (ctx->buffer, ctx->buflen & ~63, ctx); - - ctx->buflen &= 63; - /* The regions in the following copy operation cannot overlap. */ - memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63], - ctx->buflen); - } - - buffer = (const char *) buffer + add; - len -= add; - } - - /* Process available complete blocks. */ - if (len >= 64) - { -#if !_STRING_ARCH_unaligned -/* To check alignment gcc has an appropriate operator. Other - compilers don't. */ -# if __GNUC__ >= 2 -# define UNALIGNED_P(p) (((md5_uintptr) p) % __alignof__ (md5_uint32) != 0) -# else -# define UNALIGNED_P(p) (((md5_uintptr) p) % sizeof (md5_uint32) != 0) -# endif - if (UNALIGNED_P (buffer)) - while (len > 64) - { - md5_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx); - buffer = (const char *) buffer + 64; - len -= 64; - } - else -#endif - { - md5_process_block (buffer, len & ~63, ctx); - buffer = (const char *) buffer + (len & ~63); - len &= 63; - } - } - - /* Move remaining bytes in internal buffer. */ - if (len > 0) - { - size_t left_over = ctx->buflen; - - memcpy (&ctx->buffer[left_over], buffer, len); - left_over += len; - if (left_over >= 64) - { - md5_process_block (ctx->buffer, 64, ctx); - left_over -= 64; - memcpy (ctx->buffer, &ctx->buffer[64], left_over); - } - ctx->buflen = left_over; - } -} - - -/* These are the four functions used in the four steps of the MD5 algorithm - and defined in the RFC 1321. The first function is a little bit optimized - (as found in Colin Plumbs public domain implementation). */ -/* #define FF(b, c, d) ((b & c) | (~b & d)) */ -#define FF(b, c, d) (d ^ (b & (c ^ d))) -#define FG(b, c, d) FF (d, b, c) -#define FH(b, c, d) (b ^ c ^ d) -#define FI(b, c, d) (c ^ (b | ~d)) - -/* Process LEN bytes of BUFFER, accumulating context into CTX. - It is assumed that LEN % 64 == 0. */ - -void -md5_process_block (buffer, len, ctx) - const void *buffer; - size_t len; - struct MD5_CTX *ctx; -{ - md5_uint32 correct_words[16]; - const md5_uint32 *words = buffer; - size_t nwords = len / sizeof (md5_uint32); - const md5_uint32 *endp = words + nwords; - md5_uint32 A = ctx->A; - md5_uint32 B = ctx->B; - md5_uint32 C = ctx->C; - md5_uint32 D = ctx->D; - - /* First increment the byte count. RFC 1321 specifies the possible - length of the file up to 2^64 bits. Here we only compute the - number of bytes. Do a double word increment. */ - ctx->total[0] += len; - if (ctx->total[0] < len) - ++ctx->total[1]; - - /* Process all bytes in the buffer with 64 bytes in each round of - the loop. */ - while (words < endp) - { - md5_uint32 *cwp = correct_words; - md5_uint32 A_save = A; - md5_uint32 B_save = B; - md5_uint32 C_save = C; - md5_uint32 D_save = D; - - /* First round: using the given function, the context and a constant - the next context is computed. Because the algorithms processing - unit is a 32-bit word and it is determined to work on words in - little endian byte order we perhaps have to change the byte order - before the computation. To reduce the work for the next steps - we store the swapped words in the array CORRECT_WORDS. */ - -#define OP(a, b, c, d, s, T) \ - do \ - { \ - a += FF (b, c, d) + (*cwp++ = SWAP (*words)) + T; \ - ++words; \ - CYCLIC (a, s); \ - a += b; \ - } \ - while (0) - - /* It is unfortunate that C does not provide an operator for - cyclic rotation. Hope the C compiler is smart enough. */ -#define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s))) - - /* Before we start, one word to the strange constants. - They are defined in RFC 1321 as - - T[i] = (int) (4294967296.0 * fabs (sin (i))), i=1..64 - */ - - /* Round 1. */ - OP (A, B, C, D, 7, 0xd76aa478); - OP (D, A, B, C, 12, 0xe8c7b756); - OP (C, D, A, B, 17, 0x242070db); - OP (B, C, D, A, 22, 0xc1bdceee); - OP (A, B, C, D, 7, 0xf57c0faf); - OP (D, A, B, C, 12, 0x4787c62a); - OP (C, D, A, B, 17, 0xa8304613); - OP (B, C, D, A, 22, 0xfd469501); - OP (A, B, C, D, 7, 0x698098d8); - OP (D, A, B, C, 12, 0x8b44f7af); - OP (C, D, A, B, 17, 0xffff5bb1); - OP (B, C, D, A, 22, 0x895cd7be); - OP (A, B, C, D, 7, 0x6b901122); - OP (D, A, B, C, 12, 0xfd987193); - OP (C, D, A, B, 17, 0xa679438e); - OP (B, C, D, A, 22, 0x49b40821); - - /* For the second to fourth round we have the possibly swapped words - in CORRECT_WORDS. Redefine the macro to take an additional first - argument specifying the function to use. */ -#undef OP -#define OP(f, a, b, c, d, k, s, T) \ - do \ - { \ - a += f (b, c, d) + correct_words[k] + T; \ - CYCLIC (a, s); \ - a += b; \ - } \ - while (0) - - /* Round 2. */ - OP (FG, A, B, C, D, 1, 5, 0xf61e2562); - OP (FG, D, A, B, C, 6, 9, 0xc040b340); - OP (FG, C, D, A, B, 11, 14, 0x265e5a51); - OP (FG, B, C, D, A, 0, 20, 0xe9b6c7aa); - OP (FG, A, B, C, D, 5, 5, 0xd62f105d); - OP (FG, D, A, B, C, 10, 9, 0x02441453); - OP (FG, C, D, A, B, 15, 14, 0xd8a1e681); - OP (FG, B, C, D, A, 4, 20, 0xe7d3fbc8); - OP (FG, A, B, C, D, 9, 5, 0x21e1cde6); - OP (FG, D, A, B, C, 14, 9, 0xc33707d6); - OP (FG, C, D, A, B, 3, 14, 0xf4d50d87); - OP (FG, B, C, D, A, 8, 20, 0x455a14ed); - OP (FG, A, B, C, D, 13, 5, 0xa9e3e905); - OP (FG, D, A, B, C, 2, 9, 0xfcefa3f8); - OP (FG, C, D, A, B, 7, 14, 0x676f02d9); - OP (FG, B, C, D, A, 12, 20, 0x8d2a4c8a); - - /* Round 3. */ - OP (FH, A, B, C, D, 5, 4, 0xfffa3942); - OP (FH, D, A, B, C, 8, 11, 0x8771f681); - OP (FH, C, D, A, B, 11, 16, 0x6d9d6122); - OP (FH, B, C, D, A, 14, 23, 0xfde5380c); - OP (FH, A, B, C, D, 1, 4, 0xa4beea44); - OP (FH, D, A, B, C, 4, 11, 0x4bdecfa9); - OP (FH, C, D, A, B, 7, 16, 0xf6bb4b60); - OP (FH, B, C, D, A, 10, 23, 0xbebfbc70); - OP (FH, A, B, C, D, 13, 4, 0x289b7ec6); - OP (FH, D, A, B, C, 0, 11, 0xeaa127fa); - OP (FH, C, D, A, B, 3, 16, 0xd4ef3085); - OP (FH, B, C, D, A, 6, 23, 0x04881d05); - OP (FH, A, B, C, D, 9, 4, 0xd9d4d039); - OP (FH, D, A, B, C, 12, 11, 0xe6db99e5); - OP (FH, C, D, A, B, 15, 16, 0x1fa27cf8); - OP (FH, B, C, D, A, 2, 23, 0xc4ac5665); - - /* Round 4. */ - OP (FI, A, B, C, D, 0, 6, 0xf4292244); - OP (FI, D, A, B, C, 7, 10, 0x432aff97); - OP (FI, C, D, A, B, 14, 15, 0xab9423a7); - OP (FI, B, C, D, A, 5, 21, 0xfc93a039); - OP (FI, A, B, C, D, 12, 6, 0x655b59c3); - OP (FI, D, A, B, C, 3, 10, 0x8f0ccc92); - OP (FI, C, D, A, B, 10, 15, 0xffeff47d); - OP (FI, B, C, D, A, 1, 21, 0x85845dd1); - OP (FI, A, B, C, D, 8, 6, 0x6fa87e4f); - OP (FI, D, A, B, C, 15, 10, 0xfe2ce6e0); - OP (FI, C, D, A, B, 6, 15, 0xa3014314); - OP (FI, B, C, D, A, 13, 21, 0x4e0811a1); - OP (FI, A, B, C, D, 4, 6, 0xf7537e82); - OP (FI, D, A, B, C, 11, 10, 0xbd3af235); - OP (FI, C, D, A, B, 2, 15, 0x2ad7d2bb); - OP (FI, B, C, D, A, 9, 21, 0xeb86d391); - - /* Add the starting values of the context. */ - A += A_save; - B += B_save; - C += C_save; - D += D_save; - } - - /* Put checksum in context given as argument. */ - ctx->A = A; - ctx->B = B; - ctx->C = C; - ctx->D = D; -} diff --git a/src/libutil/md5.h b/src/libutil/md5.h deleted file mode 100644 index 228d4972320f..000000000000 --- a/src/libutil/md5.h +++ /dev/null @@ -1,82 +0,0 @@ -/* Declaration of functions and data types used for MD5 sum computing - library functions. - Copyright (C) 1995,1996,1997,1999,2000,2001 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ - -#ifndef _MD5_H -#define _MD5_H 1 - -#include <inttypes.h> -typedef uint32_t md5_uint32; -typedef uintptr_t md5_uintptr; - -/* Structure to save state of computation between the single steps. */ -struct MD5_CTX -{ - md5_uint32 A; - md5_uint32 B; - md5_uint32 C; - md5_uint32 D; - - md5_uint32 total[2]; - md5_uint32 buflen; - char buffer[128] __attribute__ ((__aligned__ (__alignof__ (md5_uint32)))); -}; - -/* - * The following three functions are build up the low level used in - * the functions `md5_stream' and `md5_buffer'. - */ - -/* Initialize structure containing state of computation. - (RFC 1321, 3.3: Step 3) */ -extern void MD5_Init (struct MD5_CTX *ctx); - -/* Starting with the result of former calls of this function (or the - initialization function update the context for the next LEN bytes - starting at BUFFER. - It is necessary that LEN is a multiple of 64!!! */ -extern void md5_process_block (const void *buffer, size_t len, - struct MD5_CTX *ctx); - -/* Starting with the result of former calls of this function (or the - initialization function update the context for the next LEN bytes - starting at BUFFER. - It is NOT required that LEN is a multiple of 64. */ -extern void MD5_Update (struct MD5_CTX *ctx, const void *buffer, size_t len); - -/* Process the remaining bytes in the buffer and put result from CTX - in first 16 bytes following RESBUF. The result is always in little - endian byte order, so that a byte-wise output yields to the wanted - ASCII representation of the message digest. - - IMPORTANT: On some systems it is required that RESBUF is correctly - aligned for a 32 bits value. */ -extern void *MD5_Final (void *resbuf, struct MD5_CTX *ctx); - - -/* Put result from CTX in first 16 bytes following RESBUF. The result is - always in little endian byte order, so that a byte-wise output yields - to the wanted ASCII representation of the message digest. - - IMPORTANT: On some systems it is required that RESBUF is correctly - aligned for a 32 bits value. */ -extern void *md5_read_ctx (const struct MD5_CTX *ctx, void *resbuf); - - -#endif /* md5.h */ diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index f8e9d00c12b8..f136a13248ba 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -248,6 +248,13 @@ Source & operator >> (Source & in, string & s) } +Source & operator >> (Source & in, unsigned int & n) +{ + n = readInt(in); + return in; +} + + template<class T> T readStrings(Source & source) { unsigned int count = readInt(source); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 97ac3e912f85..979ff849fcaf 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -143,6 +143,7 @@ string readString(Source & source); template<class T> T readStrings(Source & source); Source & operator >> (Source & in, string & s); +Source & operator >> (Source & in, unsigned int & n); MakeError(SerialisationError, Error) diff --git a/src/libutil/sha1.c b/src/libutil/sha1.c deleted file mode 100644 index d9d294d15540..000000000000 --- a/src/libutil/sha1.c +++ /dev/null @@ -1,369 +0,0 @@ -/* $Id$ */ - -/* sha.c - Implementation of the Secure Hash Algorithm - * - * Copyright (C) 1995, A.M. Kuchling - * - * Distribute and use freely; there are no restrictions on further - * dissemination and usage except those imposed by the laws of your - * country of residence. - * - * Adapted to pike and some cleanup by Niels Möller. - */ - -/* $Id$ */ - -/* SHA: NIST's Secure Hash Algorithm */ - -/* Based on SHA code originally posted to sci.crypt by Peter Gutmann - in message <30ajo5$oe8@ccu2.auckland.ac.nz>. - Modified to test for endianness on creation of SHA objects by AMK. - Also, the original specification of SHA was found to have a weakness - by NSA/NIST. This code implements the fixed version of SHA. -*/ - -/* Here's the first paragraph of Peter Gutmann's posting: - -The following is my SHA (FIPS 180) code updated to allow use of the "fixed" -SHA, thanks to Jim Gillogly and an anonymous contributor for the information on -what's changed in the new version. The fix is a simple change which involves -adding a single rotate in the initial expansion function. It is unknown -whether this is an optimal solution to the problem which was discovered in the -SHA or whether it's simply a bandaid which fixes the problem with a minimum of -effort (for example the reengineering of a great many Capstone chips). -*/ - -#include "sha1.h" - -#include <string.h> - -void sha_copy(struct SHA_CTX *dest, struct SHA_CTX *src) -{ - unsigned int i; - - dest->count_l=src->count_l; - dest->count_h=src->count_h; - for(i=0; i<SHA_DIGESTLEN; i++) - dest->digest[i]=src->digest[i]; - for(i=0; i < src->index; i++) - dest->block[i] = src->block[i]; - dest->index = src->index; -} - - -/* The SHA f()-functions. The f1 and f3 functions can be optimized to - save one boolean operation each - thanks to Rich Schroeppel, - rcs@cs.arizona.edu for discovering this */ - -/*#define f1(x,y,z) ( ( x & y ) | ( ~x & z ) ) // Rounds 0-19 */ -#define f1(x,y,z) ( z ^ ( x & ( y ^ z ) ) ) /* Rounds 0-19 */ -#define f2(x,y,z) ( x ^ y ^ z ) /* Rounds 20-39 */ -/*#define f3(x,y,z) ( ( x & y ) | ( x & z ) | ( y & z ) ) // Rounds 40-59 */ -#define f3(x,y,z) ( ( x & y ) | ( z & ( x | y ) ) ) /* Rounds 40-59 */ -#define f4(x,y,z) ( x ^ y ^ z ) /* Rounds 60-79 */ - -/* The SHA Mysterious Constants */ - -#define K1 0x5A827999L /* Rounds 0-19 */ -#define K2 0x6ED9EBA1L /* Rounds 20-39 */ -#define K3 0x8F1BBCDCL /* Rounds 40-59 */ -#define K4 0xCA62C1D6L /* Rounds 60-79 */ - -/* SHA initial values */ - -#define h0init 0x67452301L -#define h1init 0xEFCDAB89L -#define h2init 0x98BADCFEL -#define h3init 0x10325476L -#define h4init 0xC3D2E1F0L - -/* 32-bit rotate left - kludged with shifts */ - -#define ROTL(n,X) ( ( (X) << (n) ) | ( (X) >> ( 32 - (n) ) ) ) - -/* The initial expanding function. The hash function is defined over an - 80-word expanded input array W, where the first 16 are copies of the input - data, and the remaining 64 are defined by - - W[ i ] = W[ i - 16 ] ^ W[ i - 14 ] ^ W[ i - 8 ] ^ W[ i - 3 ] - - This implementation generates these values on the fly in a circular - buffer - thanks to Colin Plumb, colin@nyx10.cs.du.edu for this - optimization. - - The updated SHA changes the expanding function by adding a rotate of 1 - bit. Thanks to Jim Gillogly, jim@rand.org, and an anonymous contributor - for this information */ - -#define expand(W,i) ( W[ i & 15 ] = \ - ROTL( 1, ( W[ i & 15 ] ^ W[ (i - 14) & 15 ] ^ \ - W[ (i - 8) & 15 ] ^ W[ (i - 3) & 15 ] ) ) ) - - -/* The prototype SHA sub-round. The fundamental sub-round is: - - a' = e + ROTL( 5, a ) + f( b, c, d ) + k + data; - b' = a; - c' = ROTL( 30, b ); - d' = c; - e' = d; - - but this is implemented by unrolling the loop 5 times and renaming the - variables ( e, a, b, c, d ) = ( a', b', c', d', e' ) each iteration. - This code is then replicated 20 times for each of the 4 functions, using - the next 20 values from the W[] array each time */ - -#define subRound(a, b, c, d, e, f, k, data) \ - ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) ) - -/* Initialize the SHA values */ - -void SHA1_Init(struct SHA_CTX *ctx) -{ - /* Set the h-vars to their initial values */ - ctx->digest[ 0 ] = h0init; - ctx->digest[ 1 ] = h1init; - ctx->digest[ 2 ] = h2init; - ctx->digest[ 3 ] = h3init; - ctx->digest[ 4 ] = h4init; - - /* Initialize bit count */ - ctx->count_l = ctx->count_h = 0; - - /* Initialize buffer */ - ctx->index = 0; -} - -/* Perform the SHA transformation. Note that this code, like MD5, seems to - break some optimizing compilers due to the complexity of the expressions - and the size of the basic block. It may be necessary to split it into - sections, e.g. based on the four subrounds - - Note that this function destroys the data area */ - -static void sha_transform(struct SHA_CTX *ctx, uint32_t *data ) -{ - uint32_t A, B, C, D, E; /* Local vars */ - - /* Set up first buffer and local data buffer */ - A = ctx->digest[0]; - B = ctx->digest[1]; - C = ctx->digest[2]; - D = ctx->digest[3]; - E = ctx->digest[4]; - - /* Heavy mangling, in 4 sub-rounds of 20 interations each. */ - subRound( A, B, C, D, E, f1, K1, data[ 0] ); - subRound( E, A, B, C, D, f1, K1, data[ 1] ); - subRound( D, E, A, B, C, f1, K1, data[ 2] ); - subRound( C, D, E, A, B, f1, K1, data[ 3] ); - subRound( B, C, D, E, A, f1, K1, data[ 4] ); - subRound( A, B, C, D, E, f1, K1, data[ 5] ); - subRound( E, A, B, C, D, f1, K1, data[ 6] ); - subRound( D, E, A, B, C, f1, K1, data[ 7] ); - subRound( C, D, E, A, B, f1, K1, data[ 8] ); - subRound( B, C, D, E, A, f1, K1, data[ 9] ); - subRound( A, B, C, D, E, f1, K1, data[10] ); - subRound( E, A, B, C, D, f1, K1, data[11] ); - subRound( D, E, A, B, C, f1, K1, data[12] ); - subRound( C, D, E, A, B, f1, K1, data[13] ); - subRound( B, C, D, E, A, f1, K1, data[14] ); - subRound( A, B, C, D, E, f1, K1, data[15] ); - subRound( E, A, B, C, D, f1, K1, expand( data, 16 ) ); - subRound( D, E, A, B, C, f1, K1, expand( data, 17 ) ); - subRound( C, D, E, A, B, f1, K1, expand( data, 18 ) ); - subRound( B, C, D, E, A, f1, K1, expand( data, 19 ) ); - - subRound( A, B, C, D, E, f2, K2, expand( data, 20 ) ); - subRound( E, A, B, C, D, f2, K2, expand( data, 21 ) ); - subRound( D, E, A, B, C, f2, K2, expand( data, 22 ) ); - subRound( C, D, E, A, B, f2, K2, expand( data, 23 ) ); - subRound( B, C, D, E, A, f2, K2, expand( data, 24 ) ); - subRound( A, B, C, D, E, f2, K2, expand( data, 25 ) ); - subRound( E, A, B, C, D, f2, K2, expand( data, 26 ) ); - subRound( D, E, A, B, C, f2, K2, expand( data, 27 ) ); - subRound( C, D, E, A, B, f2, K2, expand( data, 28 ) ); - subRound( B, C, D, E, A, f2, K2, expand( data, 29 ) ); - subRound( A, B, C, D, E, f2, K2, expand( data, 30 ) ); - subRound( E, A, B, C, D, f2, K2, expand( data, 31 ) ); - subRound( D, E, A, B, C, f2, K2, expand( data, 32 ) ); - subRound( C, D, E, A, B, f2, K2, expand( data, 33 ) ); - subRound( B, C, D, E, A, f2, K2, expand( data, 34 ) ); - subRound( A, B, C, D, E, f2, K2, expand( data, 35 ) ); - subRound( E, A, B, C, D, f2, K2, expand( data, 36 ) ); - subRound( D, E, A, B, C, f2, K2, expand( data, 37 ) ); - subRound( C, D, E, A, B, f2, K2, expand( data, 38 ) ); - subRound( B, C, D, E, A, f2, K2, expand( data, 39 ) ); - - subRound( A, B, C, D, E, f3, K3, expand( data, 40 ) ); - subRound( E, A, B, C, D, f3, K3, expand( data, 41 ) ); - subRound( D, E, A, B, C, f3, K3, expand( data, 42 ) ); - subRound( C, D, E, A, B, f3, K3, expand( data, 43 ) ); - subRound( B, C, D, E, A, f3, K3, expand( data, 44 ) ); - subRound( A, B, C, D, E, f3, K3, expand( data, 45 ) ); - subRound( E, A, B, C, D, f3, K3, expand( data, 46 ) ); - subRound( D, E, A, B, C, f3, K3, expand( data, 47 ) ); - subRound( C, D, E, A, B, f3, K3, expand( data, 48 ) ); - subRound( B, C, D, E, A, f3, K3, expand( data, 49 ) ); - subRound( A, B, C, D, E, f3, K3, expand( data, 50 ) ); - subRound( E, A, B, C, D, f3, K3, expand( data, 51 ) ); - subRound( D, E, A, B, C, f3, K3, expand( data, 52 ) ); - subRound( C, D, E, A, B, f3, K3, expand( data, 53 ) ); - subRound( B, C, D, E, A, f3, K3, expand( data, 54 ) ); - subRound( A, B, C, D, E, f3, K3, expand( data, 55 ) ); - subRound( E, A, B, C, D, f3, K3, expand( data, 56 ) ); - subRound( D, E, A, B, C, f3, K3, expand( data, 57 ) ); - subRound( C, D, E, A, B, f3, K3, expand( data, 58 ) ); - subRound( B, C, D, E, A, f3, K3, expand( data, 59 ) ); - - subRound( A, B, C, D, E, f4, K4, expand( data, 60 ) ); - subRound( E, A, B, C, D, f4, K4, expand( data, 61 ) ); - subRound( D, E, A, B, C, f4, K4, expand( data, 62 ) ); - subRound( C, D, E, A, B, f4, K4, expand( data, 63 ) ); - subRound( B, C, D, E, A, f4, K4, expand( data, 64 ) ); - subRound( A, B, C, D, E, f4, K4, expand( data, 65 ) ); - subRound( E, A, B, C, D, f4, K4, expand( data, 66 ) ); - subRound( D, E, A, B, C, f4, K4, expand( data, 67 ) ); - subRound( C, D, E, A, B, f4, K4, expand( data, 68 ) ); - subRound( B, C, D, E, A, f4, K4, expand( data, 69 ) ); - subRound( A, B, C, D, E, f4, K4, expand( data, 70 ) ); - subRound( E, A, B, C, D, f4, K4, expand( data, 71 ) ); - subRound( D, E, A, B, C, f4, K4, expand( data, 72 ) ); - subRound( C, D, E, A, B, f4, K4, expand( data, 73 ) ); - subRound( B, C, D, E, A, f4, K4, expand( data, 74 ) ); - subRound( A, B, C, D, E, f4, K4, expand( data, 75 ) ); - subRound( E, A, B, C, D, f4, K4, expand( data, 76 ) ); - subRound( D, E, A, B, C, f4, K4, expand( data, 77 ) ); - subRound( C, D, E, A, B, f4, K4, expand( data, 78 ) ); - subRound( B, C, D, E, A, f4, K4, expand( data, 79 ) ); - - /* Build message digest */ - ctx->digest[0] += A; - ctx->digest[1] += B; - ctx->digest[2] += C; - ctx->digest[3] += D; - ctx->digest[4] += E; -} - -#if 1 - -#ifndef EXTRACT_UCHAR -#define EXTRACT_UCHAR(p) (*(unsigned char *)(p)) -#endif - -#define STRING2INT(s) ((((((EXTRACT_UCHAR(s) << 8) \ - | EXTRACT_UCHAR(s+1)) << 8) \ - | EXTRACT_UCHAR(s+2)) << 8) \ - | EXTRACT_UCHAR(s+3)) -#else -uint32_t STRING2INT(unsigned char *s) -{ - uint32_t r; - unsigned int i; - - for (i = 0, r = 0; i < 4; i++, s++) - r = (r << 8) | *s; - return r; -} -#endif - -static void sha_block(struct SHA_CTX *ctx, const unsigned char *block) -{ - uint32_t data[SHA_DATALEN]; - unsigned int i; - - /* Update block count */ - if (!++ctx->count_l) - ++ctx->count_h; - - /* Endian independent conversion */ - for (i = 0; i<SHA_DATALEN; i++, block += 4) - data[i] = STRING2INT(block); - - sha_transform(ctx, data); -} - -void SHA1_Update(struct SHA_CTX *ctx, const unsigned char *buffer, uint32_t len) -{ - if (ctx->index) - { /* Try to fill partial block */ - unsigned left = SHA_DATASIZE - ctx->index; - if (len < left) - { - memcpy(ctx->block + ctx->index, buffer, len); - ctx->index += len; - return; /* Finished */ - } - else - { - memcpy(ctx->block + ctx->index, buffer, left); - sha_block(ctx, ctx->block); - buffer += left; - len -= left; - } - } - while (len >= SHA_DATASIZE) - { - sha_block(ctx, buffer); - buffer += SHA_DATASIZE; - len -= SHA_DATASIZE; - } - if ((ctx->index = len)) /* This assignment is intended */ - /* Buffer leftovers */ - memcpy(ctx->block, buffer, len); -} - -/* Final wrapup - pad to SHA_DATASIZE-byte boundary with the bit pattern - 1 0* (64-bit count of bits processed, MSB-first) */ - -void SHA1_Final(unsigned char *s, struct SHA_CTX *ctx) -{ - uint32_t data[SHA_DATALEN]; - unsigned int i; - unsigned int words; - - i = ctx->index; - /* Set the first char of padding to 0x80. This is safe since there is - always at least one byte free */ - ctx->block[i++] = 0x80; - - /* Fill rest of word */ - for( ; i & 3; i++) - ctx->block[i] = 0; - - /* i is now a multiple of the word size 4 */ - words = i >> 2; - for (i = 0; i < words; i++) - data[i] = STRING2INT(ctx->block + 4*i); - - if (words > (SHA_DATALEN-2)) - { /* No room for length in this block. Process it and - * pad with another one */ - for (i = words ; i < SHA_DATALEN; i++) - data[i] = 0; - sha_transform(ctx, data); - for (i = 0; i < (SHA_DATALEN-2); i++) - data[i] = 0; - } - else - for (i = words ; i < SHA_DATALEN - 2; i++) - data[i] = 0; - /* Theres 512 = 2^9 bits in one block */ - data[SHA_DATALEN-2] = (ctx->count_h << 9) | (ctx->count_l >> 23); - data[SHA_DATALEN-1] = (ctx->count_l << 9) | (ctx->index << 3); - sha_transform(ctx, data); - sha_digest(ctx, s); -} - -void sha_digest(struct SHA_CTX *ctx, unsigned char *s) -{ - unsigned int i; - - for (i = 0; i < SHA_DIGESTLEN; i++) - { - *s++ = ctx->digest[i] >> 24; - *s++ = 0xff & (ctx->digest[i] >> 16); - *s++ = 0xff & (ctx->digest[i] >> 8); - *s++ = 0xff & ctx->digest[i]; - } -} diff --git a/src/libutil/sha1.h b/src/libutil/sha1.h deleted file mode 100644 index 715040dd48df..000000000000 --- a/src/libutil/sha1.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _SHA_H -#define _SHA_H - -#include <inttypes.h> - -/* The SHA block size and message digest sizes, in bytes */ - -#define SHA_DATASIZE 64 -#define SHA_DATALEN 16 -#define SHA_DIGESTSIZE 20 -#define SHA_DIGESTLEN 5 -/* The structure for storing SHA info */ - -struct SHA_CTX { - uint32_t digest[SHA_DIGESTLEN]; /* Message digest */ - uint32_t count_l, count_h; /* 64-bit block count */ - uint8_t block[SHA_DATASIZE]; /* SHA data buffer */ - unsigned int index; /* index into buffer */ -}; - -void SHA1_Init(struct SHA_CTX *ctx); -void SHA1_Update(struct SHA_CTX *ctx, const unsigned char *buffer, uint32_t len); -void SHA1_Final(unsigned char *s, struct SHA_CTX *ctx); -void sha_digest(struct SHA_CTX *ctx, unsigned char *s); -void sha_copy(struct SHA_CTX *dest, struct SHA_CTX *src); - - -#endif /* !_SHA_H */ diff --git a/src/libutil/sha256.c b/src/libutil/sha256.c deleted file mode 100644 index 63ed0ba43011..000000000000 --- a/src/libutil/sha256.c +++ /dev/null @@ -1,238 +0,0 @@ -/* crypto/sha/sha256.c */ -/* ==================================================================== - * Copyright (c) 2004 The OpenSSL Project. All rights reserved - * according to the OpenSSL license [found in ./md32_common.h]. - * ==================================================================== - */ - -#include <stdlib.h> -#include <string.h> - -#include "sha256.h" - -int SHA224_Init (SHA256_CTX *c) - { - c->h[0]=0xc1059ed8UL; c->h[1]=0x367cd507UL; - c->h[2]=0x3070dd17UL; c->h[3]=0xf70e5939UL; - c->h[4]=0xffc00b31UL; c->h[5]=0x68581511UL; - c->h[6]=0x64f98fa7UL; c->h[7]=0xbefa4fa4UL; - c->Nl=0; c->Nh=0; - c->num=0; c->md_len=SHA224_DIGEST_LENGTH; - return 1; - } - -int SHA256_Init (SHA256_CTX *c) - { - c->h[0]=0x6a09e667UL; c->h[1]=0xbb67ae85UL; - c->h[2]=0x3c6ef372UL; c->h[3]=0xa54ff53aUL; - c->h[4]=0x510e527fUL; c->h[5]=0x9b05688cUL; - c->h[6]=0x1f83d9abUL; c->h[7]=0x5be0cd19UL; - c->Nl=0; c->Nh=0; - c->num=0; c->md_len=SHA256_DIGEST_LENGTH; - return 1; - } - -unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md) - { - SHA256_CTX c; - static unsigned char m[SHA224_DIGEST_LENGTH]; - - if (md == NULL) md=m; - SHA224_Init(&c); - SHA256_Update(&c,d,n); - SHA256_Final(md,&c); - return(md); - } - -unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md) - { - SHA256_CTX c; - static unsigned char m[SHA256_DIGEST_LENGTH]; - - if (md == NULL) md=m; - SHA256_Init(&c); - SHA256_Update(&c,d,n); - SHA256_Final(md,&c); - return(md); - } - -int SHA224_Update(SHA256_CTX *c, const void *data, size_t len) -{ return SHA256_Update (c,data,len); } -int SHA224_Final (unsigned char *md, SHA256_CTX *c) -{ return SHA256_Final (md,c); } - -#define DATA_ORDER_IS_BIG_ENDIAN - -#define HASH_LONG uint32_t -#define HASH_LONG_LOG2 2 -#define HASH_CTX SHA256_CTX -#define HASH_CBLOCK SHA_CBLOCK -#define HASH_LBLOCK SHA_LBLOCK -/* - * Note that FIPS180-2 discusses "Truncation of the Hash Function Output." - * default: case below covers for it. It's not clear however if it's - * permitted to truncate to amount of bytes not divisible by 4. I bet not, - * but if it is, then default: case shall be extended. For reference. - * Idea behind separate cases for pre-defined lenghts is to let the - * compiler decide if it's appropriate to unroll small loops. - */ -#define HASH_MAKE_STRING(c,s) do { \ - unsigned long ll; \ - unsigned int n; \ - switch ((c)->md_len) \ - { case SHA224_DIGEST_LENGTH: \ - for (n=0;n<SHA224_DIGEST_LENGTH/4;n++) \ - { ll=(c)->h[n]; HOST_l2c(ll,(s)); } \ - break; \ - case SHA256_DIGEST_LENGTH: \ - for (n=0;n<SHA256_DIGEST_LENGTH/4;n++) \ - { ll=(c)->h[n]; HOST_l2c(ll,(s)); } \ - break; \ - default: \ - if ((c)->md_len > SHA256_DIGEST_LENGTH) \ - return 0; \ - for (n=0;n<(c)->md_len/4;n++) \ - { ll=(c)->h[n]; HOST_l2c(ll,(s)); } \ - break; \ - } \ - } while (0) - -#define HASH_UPDATE SHA256_Update -#define HASH_TRANSFORM SHA256_Transform -#define HASH_FINAL SHA256_Final -#define HASH_BLOCK_HOST_ORDER sha256_block_host_order -#define HASH_BLOCK_DATA_ORDER sha256_block_data_order -void sha256_block_host_order (SHA256_CTX *ctx, const void *in, size_t num); -void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num); - -#include "md32_common.h" - -static const uint32_t K256[64] = { - 0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL, - 0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL, - 0xd807aa98UL,0x12835b01UL,0x243185beUL,0x550c7dc3UL, - 0x72be5d74UL,0x80deb1feUL,0x9bdc06a7UL,0xc19bf174UL, - 0xe49b69c1UL,0xefbe4786UL,0x0fc19dc6UL,0x240ca1ccUL, - 0x2de92c6fUL,0x4a7484aaUL,0x5cb0a9dcUL,0x76f988daUL, - 0x983e5152UL,0xa831c66dUL,0xb00327c8UL,0xbf597fc7UL, - 0xc6e00bf3UL,0xd5a79147UL,0x06ca6351UL,0x14292967UL, - 0x27b70a85UL,0x2e1b2138UL,0x4d2c6dfcUL,0x53380d13UL, - 0x650a7354UL,0x766a0abbUL,0x81c2c92eUL,0x92722c85UL, - 0xa2bfe8a1UL,0xa81a664bUL,0xc24b8b70UL,0xc76c51a3UL, - 0xd192e819UL,0xd6990624UL,0xf40e3585UL,0x106aa070UL, - 0x19a4c116UL,0x1e376c08UL,0x2748774cUL,0x34b0bcb5UL, - 0x391c0cb3UL,0x4ed8aa4aUL,0x5b9cca4fUL,0x682e6ff3UL, - 0x748f82eeUL,0x78a5636fUL,0x84c87814UL,0x8cc70208UL, - 0x90befffaUL,0xa4506cebUL,0xbef9a3f7UL,0xc67178f2UL }; - -/* - * FIPS specification refers to right rotations, while our ROTATE macro - * is left one. This is why you might notice that rotation coefficients - * differ from those observed in FIPS document by 32-N... - */ -#define Sigma0(x) (ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10)) -#define Sigma1(x) (ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7)) -#define sigma0(x) (ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3)) -#define sigma1(x) (ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10)) - -#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) -#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) - -#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \ - T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; \ - h = Sigma0(a) + Maj(a,b,c); \ - d += T1; h += T1; } while (0) - -#define ROUND_16_63(i,a,b,c,d,e,f,g,h,X) do { \ - s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \ - s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \ - T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ - ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0) - -static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host) - { - uint32_t a,b,c,d,e,f,g,h,s0,s1,T1; - uint32_t X[16]; - int i; - const unsigned char *data=in; - - while (num--) { - - a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3]; - e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7]; - - if (host) - { - const uint32_t *W=(const uint32_t *)data; - - T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h); - T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g); - T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f); - T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e); - T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d); - T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c); - T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b); - T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a); - T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h); - T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g); - T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f); - T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e); - T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d); - T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c); - T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b); - T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a); - - data += SHA256_CBLOCK; - } - else - { - uint32_t l; - - HOST_c2l(data,l); T1 = X[0] = l; ROUND_00_15(0,a,b,c,d,e,f,g,h); - HOST_c2l(data,l); T1 = X[1] = l; ROUND_00_15(1,h,a,b,c,d,e,f,g); - HOST_c2l(data,l); T1 = X[2] = l; ROUND_00_15(2,g,h,a,b,c,d,e,f); - HOST_c2l(data,l); T1 = X[3] = l; ROUND_00_15(3,f,g,h,a,b,c,d,e); - HOST_c2l(data,l); T1 = X[4] = l; ROUND_00_15(4,e,f,g,h,a,b,c,d); - HOST_c2l(data,l); T1 = X[5] = l; ROUND_00_15(5,d,e,f,g,h,a,b,c); - HOST_c2l(data,l); T1 = X[6] = l; ROUND_00_15(6,c,d,e,f,g,h,a,b); - HOST_c2l(data,l); T1 = X[7] = l; ROUND_00_15(7,b,c,d,e,f,g,h,a); - HOST_c2l(data,l); T1 = X[8] = l; ROUND_00_15(8,a,b,c,d,e,f,g,h); - HOST_c2l(data,l); T1 = X[9] = l; ROUND_00_15(9,h,a,b,c,d,e,f,g); - HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f); - HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e); - HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d); - HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c); - HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b); - HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a); - } - - for (i=16;i<64;i+=8) - { - ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X); - ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X); - ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X); - ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X); - ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X); - ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X); - ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X); - ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X); - } - - ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; - ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h; - - } - } - -/* - * Idea is to trade couple of cycles for some space. On IA-32 we save - * about 4K in "big footprint" case. In "small footprint" case any gain - * is appreciated:-) - */ -void HASH_BLOCK_HOST_ORDER (SHA256_CTX *ctx, const void *in, size_t num) -{ sha256_block (ctx,in,num,1); } - -void HASH_BLOCK_DATA_ORDER (SHA256_CTX *ctx, const void *in, size_t num) -{ sha256_block (ctx,in,num,0); } - - diff --git a/src/libutil/sha256.h b/src/libutil/sha256.h deleted file mode 100644 index 0686b84f0e08..000000000000 --- a/src/libutil/sha256.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef _SHA256_H -#define _SHA256_H 1 - -#include <inttypes.h> - -#define SHA_LBLOCK 16 -#define SHA_CBLOCK (SHA_LBLOCK*4) /* SHA treats input data as a - * contiguous array of 32 bit - * wide big-endian values. */ - -#define SHA256_CBLOCK (SHA_LBLOCK*4) /* SHA-256 treats input data as a - * contiguous array of 32 bit - * wide big-endian values. */ -#define SHA224_DIGEST_LENGTH 28 -#define SHA256_DIGEST_LENGTH 32 - -typedef struct SHA256state_st - { - uint32_t h[8]; - uint32_t Nl,Nh; - uint32_t data[SHA_LBLOCK]; - unsigned int num,md_len; - } SHA256_CTX; - -int SHA224_Init(SHA256_CTX *c); -int SHA224_Update(SHA256_CTX *c, const void *data, size_t len); -int SHA224_Final(unsigned char *md, SHA256_CTX *c); -unsigned char *SHA224(const unsigned char *d, size_t n,unsigned char *md); -int SHA256_Init(SHA256_CTX *c); -int SHA256_Update(SHA256_CTX *c, const void *data, size_t len); -int SHA256_Final(unsigned char *md, SHA256_CTX *c); -unsigned char *SHA256(const unsigned char *d, size_t n,unsigned char *md); -void SHA256_Transform(SHA256_CTX *c, const unsigned char *data); - -#endif diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 11c75d2cda4c..75032bf90d0b 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -453,7 +453,7 @@ Nest::~Nest() static string escVerbosity(Verbosity level) { - return int2String((int) level); + return std::to_string((int) level); } @@ -599,6 +599,8 @@ string drainFD(int fd) ////////////////////////////////////////////////////////////////////// +AutoDelete::AutoDelete() : del{false} {} + AutoDelete::AutoDelete(const string & p, bool recursive) : path(p) { del = true; @@ -626,6 +628,12 @@ void AutoDelete::cancel() del = false; } +void AutoDelete::reset(const Path & p, bool recursive) { + path = p; + this->recursive = recursive; + del = true; +} + ////////////////////////////////////////////////////////////////////// diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b2fb59d6f2d7..f4026a0a884b 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -150,8 +150,8 @@ void printMsg_(Verbosity level, const FormatOrString & fs); #define printMsg(level, f) \ do { \ - if (level <= verbosity) { \ - printMsg_(level, (f)); \ + if (level <= nix::verbosity) { \ + nix::printMsg_(level, (f)); \ } \ } while (0) @@ -199,9 +199,12 @@ class AutoDelete bool del; bool recursive; public: + AutoDelete(); AutoDelete(const Path & p, bool recursive = true); ~AutoDelete(); void cancel(); + void reset(const Path & p, bool recursive = true); + operator Path() const { return path; } }; @@ -355,13 +358,6 @@ template<class N> bool string2Int(const string & s, N & n) return str && str.get() == EOF; } -template<class N> string int2String(N n) -{ - std::ostringstream str; - str << n; - return str.str(); -} - /* Return true iff `s' ends in `suffix'. */ bool hasSuffix(const string & s, const string & suffix); diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index bb4789aeac82..5a72cb712014 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -4,6 +4,7 @@ #include "globals.hh" #include <iostream> +#include <cerrno> using namespace nix; diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 199d3288f4d6..e97d1dab17b2 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -7,6 +7,7 @@ #include "affinity.hh" #include "globals.hh" #include "monitor-fd.hh" +#include "derivations.hh" #include <algorithm> @@ -325,6 +326,20 @@ static void performOp(bool trusted, unsigned int clientVersion, break; } + case wopBuildDerivation: { + Path drvPath = readStorePath(from); + BasicDerivation drv; + from >> drv; + BuildMode buildMode = (BuildMode) readInt(from); + startWork(); + if (!trusted) + throw Error("you are not privileged to build derivations"); + auto res = store->buildDerivation(drvPath, drv, buildMode); + stopWork(); + to << res.status << res.errorMsg; + break; + } + case wopEnsurePath: { Path path = readStorePath(from); startWork(); @@ -401,8 +416,8 @@ static void performOp(bool trusted, unsigned int clientVersion, settings.keepGoing = readInt(from) != 0; settings.set("build-fallback", readInt(from) ? "true" : "false"); verbosity = (Verbosity) readInt(from); - settings.set("build-max-jobs", int2String(readInt(from))); - settings.set("build-max-silent-time", int2String(readInt(from))); + settings.set("build-max-jobs", std::to_string(readInt(from))); + settings.set("build-max-silent-time", std::to_string(readInt(from))); if (GET_PROTOCOL_MINOR(clientVersion) >= 2) settings.useBuildHook = readInt(from) != 0; if (GET_PROTOCOL_MINOR(clientVersion) >= 4) { @@ -411,7 +426,7 @@ static void performOp(bool trusted, unsigned int clientVersion, settings.printBuildTrace = readInt(from) != 0; } if (GET_PROTOCOL_MINOR(clientVersion) >= 6) - settings.set("build-cores", int2String(readInt(from))); + settings.set("build-cores", std::to_string(readInt(from))); if (GET_PROTOCOL_MINOR(clientVersion) >= 10) settings.set("build-use-substitutes", readInt(from) ? "true" : "false"); if (GET_PROTOCOL_MINOR(clientVersion) >= 12) { @@ -677,6 +692,10 @@ static PeerInfo getPeerInfo(int remote) #elif defined(LOCAL_PEERCRED) +#if !defined(SOL_LOCAL) +#define SOL_LOCAL 0 +#endif + xucred cred; socklen_t credLen = sizeof(cred); if (getsockopt(remote, SOL_LOCAL, LOCAL_PEERCRED, &cred, &credLen) == -1) @@ -705,7 +724,7 @@ static void daemonLoop(char * * argv) /* Handle socket-based activation by systemd. */ if (getEnv("LISTEN_FDS") != "") { - if (getEnv("LISTEN_PID") != int2String(getpid()) || getEnv("LISTEN_FDS") != "1") + if (getEnv("LISTEN_PID") != std::to_string(getpid()) || getEnv("LISTEN_FDS") != "1") throw Error("unexpected systemd environment variables"); fdSocket = SD_LISTEN_FDS_START; } @@ -781,10 +800,10 @@ static void daemonLoop(char * * argv) PeerInfo peer = getPeerInfo(remote); struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0; - string user = pw ? pw->pw_name : int2String(peer.uid); + string user = pw ? pw->pw_name : std::to_string(peer.uid); struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0; - string group = gr ? gr->gr_name : int2String(peer.gid); + string group = gr ? gr->gr_name : std::to_string(peer.gid); Strings trustedUsers = settings.get("trusted-users", Strings({"root"})); Strings allowedUsers = settings.get("allowed-users", Strings({"*"})); @@ -796,7 +815,7 @@ static void daemonLoop(char * * argv) throw Error(format("user ‘%1%’ is not allowed to connect to the Nix daemon") % user); printMsg(lvlInfo, format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : "")) - % (peer.pidKnown ? int2String(peer.pid) : "<unknown>") + % (peer.pidKnown ? std::to_string(peer.pid) : "<unknown>") % (peer.uidKnown ? user : "<unknown>")); /* Fork a child to handle the connection. */ @@ -817,7 +836,7 @@ static void daemonLoop(char * * argv) /* For debugging, stuff the pid into argv[1]. */ if (peer.pidKnown && argv[1]) { - string processName = int2String(peer.pid); + string processName = std::to_string(peer.pid); strncpy(argv[1], processName.c_str(), strlen(argv[1])); } diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 97a2bbdb7de0..02a9f25a7a4e 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -570,14 +570,16 @@ static void upgradeDerivations(Globals & globals, constraints specified by upgradeType. If there are multiple matches, take the one with the highest priority. If there are still multiple matches, - take the one with the highest version. */ + take the one with the highest version. + Do not upgrade if it would decrease the priority. */ DrvInfos::iterator bestElem = availElems.end(); - DrvName bestName; + string bestVersion; for (auto j = availElems.begin(); j != availElems.end(); ++j) { + if (comparePriorities(*globals.state, i, *j) > 0) + continue; DrvName newName(j->name); if (newName.name == drvName.name) { - int d = comparePriorities(*globals.state, i, *j); - if (d == 0) d = compareVersions(drvName.version, newName.version); + int d = compareVersions(drvName.version, newName.version); if ((upgradeType == utLt && d < 0) || (upgradeType == utLeq && d <= 0) || (upgradeType == utEq && d == 0) || @@ -586,11 +588,11 @@ static void upgradeDerivations(Globals & globals, int d2 = -1; if (bestElem != availElems.end()) { d2 = comparePriorities(*globals.state, *bestElem, *j); - if (d2 == 0) d2 = compareVersions(bestName.version, newName.version); + if (d2 == 0) d2 = compareVersions(bestVersion, newName.version); } if (d2 < 0 && (!globals.prebuiltOnly || isPrebuilt(*globals.state, *j))) { bestElem = j; - bestName = newName; + bestVersion = newName.version; } } } @@ -600,9 +602,11 @@ static void upgradeDerivations(Globals & globals, i.queryOutPath() != bestElem->queryOutPath()) { + const char * action = compareVersions(drvName.version, bestVersion) <= 0 + ? "upgrading" : "downgrading"; printMsg(lvlInfo, - format("upgrading ‘%1%’ to ‘%2%’") - % i.name % bestElem->name); + format("%1% ‘%2%’ to ‘%3%’") + % action % i.name % bestElem->name); newElems.push_back(*bestElem); } else newElems.push_back(i); @@ -1136,7 +1140,19 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) attrs3["value"] = v->listElems()[j]->string.s; xml.writeEmptyElement("string", attrs3); } + } else if (v->type == tAttrs) { + attrs2["type"] = "strings"; + XMLOpenElement m(xml, "meta", attrs2); + Bindings & attrs = *v->attrs; + for (auto &i : attrs) { + Attr & a(*attrs.find(i.name)); + if(a.value->type != tString) continue; + XMLAttrs attrs3; + attrs3["type"] = i.name; + attrs3["value"] = a.value->string.s; + xml.writeEmptyElement("string", attrs3); } + } } } } @@ -1273,7 +1289,7 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr std::set<unsigned int> gens; for (auto & i : opArgs) { unsigned int n; - if (!string2Int(i, n) || n < 0) + if (!string2Int(i, n)) throw UsageError(format("invalid generation number ‘%1%’") % i); gens.insert(n); } diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index b6845197ec71..13a145a3b53e 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -79,7 +79,7 @@ void processExpr(EvalState & state, const Strings & attrPaths, printGCWarning(); else { Path rootName = gcRoot; - if (++rootNr > 1) rootName += "-" + int2String(rootNr); + if (++rootNr > 1) rootName += "-" + std::to_string(rootNr); drvPath = addPermRoot(*store, drvPath, rootName, indirectRoot); } std::cout << format("%1%%2%\n") % drvPath % (outputName != "out" ? "!" + outputName : ""); diff --git a/src/nix-prefetch-url/local.mk b/src/nix-prefetch-url/local.mk new file mode 100644 index 000000000000..3e7735406af0 --- /dev/null +++ b/src/nix-prefetch-url/local.mk @@ -0,0 +1,7 @@ +programs += nix-prefetch-url + +nix-prefetch-url_DIR := $(d) + +nix-prefetch-url_SOURCES := $(d)/nix-prefetch-url.cc + +nix-prefetch-url_LIBS = libmain libexpr libstore libutil libformat diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc new file mode 100644 index 000000000000..73a2845e07a5 --- /dev/null +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -0,0 +1,210 @@ +#include "hash.hh" +#include "shared.hh" +#include "download.hh" +#include "store-api.hh" +#include "eval.hh" +#include "eval-inline.hh" +#include "common-opts.hh" +#include "attr-path.hh" + +#include <iostream> + +using namespace nix; + + +/* If ‘uri’ starts with ‘mirror://’, then resolve it using the list of + mirrors defined in Nixpkgs. */ +string resolveMirrorUri(EvalState & state, string uri) +{ + if (string(uri, 0, 9) != "mirror://") return uri; + + string s(uri, 9); + auto p = s.find('/'); + if (p == string::npos) throw Error("invalid mirror URI"); + string mirrorName(s, 0, p); + + Value vMirrors; + state.eval(state.parseExprFromString("import <nixpkgs/pkgs/build-support/fetchurl/mirrors.nix>", "."), vMirrors); + state.forceAttrs(vMirrors); + + auto mirrorList = vMirrors.attrs->find(state.symbols.create(mirrorName)); + if (mirrorList == vMirrors.attrs->end()) + throw Error(format("unknown mirror name ‘%1%’") % mirrorName); + state.forceList(*mirrorList->value); + + if (mirrorList->value->listSize() < 1) + throw Error(format("mirror URI ‘%1%’ did not expand to anything") % uri); + + string mirror = state.forceString(*mirrorList->value->listElems()[0]); + return mirror + (hasSuffix(mirror, "/") ? "" : "/") + string(s, p + 1); +} + + +int main(int argc, char * * argv) +{ + return handleExceptions(argv[0], [&]() { + initNix(); + initGC(); + + HashType ht = htSHA256; + std::vector<string> args; + Strings searchPath; + bool printPath = getEnv("PRINT_PATH") != ""; + bool fromExpr = false; + string attrPath; + std::map<string, string> autoArgs_; + bool unpack = false; + string name; + + parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { + if (*arg == "--help") + showManPage("nix-prefetch-url"); + else if (*arg == "--version") + printVersion("nix-prefetch-url"); + else if (*arg == "--type") { + string s = getArg(*arg, arg, end); + ht = parseHashType(s); + if (ht == htUnknown) + throw UsageError(format("unknown hash type ‘%1%’") % s); + } + else if (*arg == "--print-path") + printPath = true; + else if (*arg == "--attr" || *arg == "-A") { + fromExpr = true; + attrPath = getArg(*arg, arg, end); + } + else if (*arg == "--unpack") + unpack = true; + else if (*arg == "--name") + name = getArg(*arg, arg, end); + else if (parseAutoArgs(arg, end, autoArgs_)) + ; + else if (parseSearchPathArg(arg, end, searchPath)) + ; + else if (*arg != "" && arg->at(0) == '-') + return false; + else + args.push_back(*arg); + return true; + }); + + if (args.size() > 2) + throw UsageError("too many arguments"); + + store = openStore(); + EvalState state(searchPath); + + Bindings & autoArgs(*evalAutoArgs(state, autoArgs_)); + + /* If -A is given, get the URI from the specified Nix + expression. */ + string uri; + if (!fromExpr) { + if (args.empty()) + throw UsageError("you must specify a URI"); + uri = args[0]; + } else { + Path path = resolveExprPath(lookupFileArg(state, args.empty() ? "." : args[0])); + Value vRoot; + state.evalFile(path, vRoot); + Value & v(*findAlongAttrPath(state, attrPath, autoArgs, vRoot)); + state.forceAttrs(v); + + /* Extract the URI. */ + auto attr = v.attrs->find(state.symbols.create("urls")); + if (attr == v.attrs->end()) + throw Error("attribute set does not contain a ‘urls’ attribute"); + state.forceList(*attr->value); + if (attr->value->listSize() < 1) + throw Error("‘urls’ list is empty"); + uri = state.forceString(*attr->value->listElems()[0]); + + /* Extract the hash mode. */ + attr = v.attrs->find(state.symbols.create("outputHashMode")); + if (attr == v.attrs->end()) + printMsg(lvlInfo, "warning: this does not look like a fetchurl call"); + else + unpack = state.forceString(*attr->value) == "recursive"; + + /* Extract the name. */ + if (name.empty()) { + attr = v.attrs->find(state.symbols.create("name")); + if (attr != v.attrs->end()) + name = state.forceString(*attr->value); + } + } + + /* Figure out a name in the Nix store. */ + if (name.empty()) + name = baseNameOf(uri); + if (name.empty()) + throw Error(format("cannot figure out file name for ‘%1%’") % uri); + + /* If an expected hash is given, the file may already exist in + the store. */ + Hash hash, expectedHash(ht); + Path storePath; + if (args.size() == 2) { + expectedHash = parseHash16or32(ht, args[1]); + storePath = makeFixedOutputPath(unpack, ht, expectedHash, name); + if (store->isValidPath(storePath)) + hash = expectedHash; + else + storePath.clear(); + } + + if (storePath.empty()) { + + auto actualUri = resolveMirrorUri(state, uri); + + /* Download the file. */ + auto result = downloadFile(actualUri, DownloadOptions()); + + AutoDelete tmpDir(createTempDir(), true); + Path tmpFile = (Path) tmpDir + "/tmp"; + writeFile(tmpFile, result.data); + + /* Optionally unpack the file. */ + if (unpack) { + printMsg(lvlInfo, "unpacking..."); + Path unpacked = (Path) tmpDir + "/unpacked"; + createDirs(unpacked); + if (hasSuffix(baseNameOf(uri), ".zip")) + runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}, ""); + else + // FIXME: this requires GNU tar for decompression. + runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}, ""); + + /* If the archive unpacks to a single file/directory, then use + that as the top-level. */ + auto entries = readDirectory(unpacked); + if (entries.size() == 1) + tmpFile = unpacked + "/" + entries[0].name; + else + tmpFile = unpacked; + } + + /* FIXME: inefficient; addToStore() will also hash + this. */ + hash = unpack ? hashPath(ht, tmpFile).first : hashString(ht, result.data); + + if (expectedHash != Hash(ht) && expectedHash != hash) + throw Error(format("hash mismatch for ‘%1%’") % uri); + + /* Copy the file to the Nix store. FIXME: if RemoteStore + implemented addToStoreFromDump() and downloadFile() + supported a sink, we could stream the download directly + into the Nix store. */ + storePath = store->addToStore(name, tmpFile, unpack, ht); + + assert(storePath == makeFixedOutputPath(unpack, ht, hash, name)); + } + + if (!printPath) + printMsg(lvlInfo, format("path is ‘%1%’") % storePath); + + std::cout << printHash16or32(hash) << std::endl; + if (printPath) + std::cout << storePath << std::endl; + }); +} diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index d541b7b7d00a..14354f86e228 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -81,7 +81,7 @@ static PathSet realisePath(Path path, bool build = true) printGCWarning(); else { Path rootName = gcRoot; - if (rootNr > 1) rootName += "-" + int2String(rootNr); + if (rootNr > 1) rootName += "-" + std::to_string(rootNr); if (i->first != "out") rootName += "-" + i->first; outPath = addPermRoot(*store, outPath, rootName, indirectRoot); } @@ -98,7 +98,7 @@ static PathSet realisePath(Path path, bool build = true) else { Path rootName = gcRoot; rootNr++; - if (rootNr > 1) rootName += "-" + int2String(rootNr); + if (rootNr > 1) rootName += "-" + std::to_string(rootNr); path = addPermRoot(*store, path, rootName, indirectRoot); } return singleton<PathSet>(path); @@ -853,7 +853,7 @@ static void opServe(Strings opFlags, Strings opArgs) if (magic != SERVE_MAGIC_1) throw Error("protocol mismatch"); out << SERVE_MAGIC_2 << SERVE_PROTOCOL_VERSION; out.flush(); - readInt(in); // Client version, unused for now + unsigned int clientVersion = readInt(in); auto getBuildSettings = [&]() { // FIXME: changing options here doesn't work if we're @@ -863,6 +863,8 @@ static void opServe(Strings opFlags, Strings opArgs) settings.useSubstitutes = false; settings.maxSilentTime = readInt(in); settings.buildTimeout = readInt(in); + if (GET_PROTOCOL_MINOR(clientVersion) >= 2) + settings.maxLogSize = readInt(in); }; while (true) { diff --git a/src/nix-store/serve-protocol.hh b/src/nix-store/serve-protocol.hh index f7f151d4603f..c4e2a370300b 100644 --- a/src/nix-store/serve-protocol.hh +++ b/src/nix-store/serve-protocol.hh @@ -5,7 +5,7 @@ namespace nix { #define SERVE_MAGIC_1 0x390c9deb #define SERVE_MAGIC_2 0x5452eecb -#define SERVE_PROTOCOL_VERSION 0x201 +#define SERVE_PROTOCOL_VERSION 0x202 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) |