about summary refs log tree commit diff
path: root/src/libstore
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstore')
-rw-r--r--src/libstore/build.cc224
-rw-r--r--src/libstore/globals.hh6
-rw-r--r--src/libstore/legacy-ssh-store.cc6
-rw-r--r--src/libstore/local-store.cc6
-rw-r--r--src/libstore/local-store.hh2
-rw-r--r--src/libstore/remote-store.cc7
-rw-r--r--src/libstore/remote-store.hh4
-rw-r--r--src/libstore/s3-binary-cache-store.cc88
-rw-r--r--src/libstore/store-api.cc10
-rw-r--r--src/libstore/store-api.hh6
10 files changed, 269 insertions, 90 deletions
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index 0073b9b727ec..676ad5856b13 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -21,6 +21,7 @@
 #include <future>
 #include <chrono>
 #include <regex>
+#include <queue>
 
 #include <limits.h>
 #include <sys/time.h>
@@ -857,7 +858,7 @@ private:
        building multiple times. Since this contains the hash, it
        allows us to compare whether two rounds produced the same
        result. */
-    ValidPathInfos prevInfos;
+    std::map<Path, ValidPathInfo> prevInfos;
 
     const uid_t sandboxUid = 1000;
     const gid_t sandboxGid = 100;
@@ -938,6 +939,11 @@ private:
        as valid. */
     void registerOutputs();
 
+    /* Check that an output meets the requirements specified by the
+       'outputChecks' attribute (or the legacy
+       '{allowed,disallowed}{References,Requisites}' attributes). */
+    void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
+
     /* Open a log file and a pipe to it. */
     Path openLogFile();
 
@@ -3010,7 +3016,7 @@ void DerivationGoal::registerOutputs()
         if (allValid) return;
     }
 
-    ValidPathInfos infos;
+    std::map<std::string, ValidPathInfo> infos;
 
     /* Set of inodes seen during calls to canonicalisePathMetaData()
        for this build's outputs.  This needs to be shared between
@@ -3195,49 +3201,6 @@ void DerivationGoal::registerOutputs()
                 debug(format("referenced input: '%1%'") % i);
         }
 
-        /* Enforce `allowedReferences' and friends. */
-        auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) {
-            auto value = parsedDrv->getStringsAttr(attrName);
-            if (!value) return;
-
-            PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value);
-
-            PathSet used;
-            if (recursive) {
-                /* Our requisites are the union of the closures of our references. */
-                for (auto & i : references)
-                    /* Don't call computeFSClosure on ourselves. */
-                    if (path != i)
-                        worker.store.computeFSClosure(i, used);
-            } else
-                used = references;
-
-            PathSet badPaths;
-
-            for (auto & i : used)
-                if (allowed) {
-                    if (spec.find(i) == spec.end())
-                        badPaths.insert(i);
-                } else {
-                    if (spec.find(i) != spec.end())
-                        badPaths.insert(i);
-                }
-
-            if (!badPaths.empty()) {
-                string badPathsStr;
-                for (auto & i : badPaths) {
-                    badPathsStr += "\n\t";
-                    badPathsStr += i;
-                }
-                throw BuildError(format("output '%1%' is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr);
-            }
-        };
-
-        checkRefs("allowedReferences", true, false);
-        checkRefs("allowedRequisites", true, true);
-        checkRefs("disallowedReferences", false, false);
-        checkRefs("disallowedRequisites", false, true);
-
         if (curRound == nrRounds) {
             worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences()
             worker.markContentsGood(path);
@@ -3253,11 +3216,14 @@ void DerivationGoal::registerOutputs()
 
         if (!info.references.empty()) info.ca.clear();
 
-        infos.push_back(info);
+        infos[i.first] = info;
     }
 
     if (buildMode == bmCheck) return;
 
+    /* Apply output checks. */
+    checkOutputs(infos);
+
     /* Compare the result with the previous round, and report which
        path is different, if any.*/
     if (curRound > 1 && prevInfos != infos) {
@@ -3265,16 +3231,16 @@ void DerivationGoal::registerOutputs()
         for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
             if (!(*i == *j)) {
                 result.isNonDeterministic = true;
-                Path prev = i->path + checkSuffix;
+                Path prev = i->second.path + checkSuffix;
                 bool prevExists = keepPreviousRound && pathExists(prev);
                 auto msg = prevExists
-                    ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->path, drvPath, prev)
-                    : fmt("output '%1%' of '%2%' differs from previous round", i->path, drvPath);
+                    ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->second.path, drvPath, prev)
+                    : fmt("output '%1%' of '%2%' differs from previous round", i->second.path, drvPath);
 
                 auto diffHook = settings.diffHook;
                 if (prevExists && diffHook != "" && runDiffHook) {
                     try {
-                        auto diff = runProgram(diffHook, true, {prev, i->path});
+                        auto diff = runProgram(diffHook, true, {prev, i->second.path});
                         if (diff != "")
                             printError(chomp(diff));
                     } catch (Error & error) {
@@ -3319,7 +3285,11 @@ void DerivationGoal::registerOutputs()
     /* Register each output path as valid, and register the sets of
        paths referenced by each of them.  If there are cycles in the
        outputs, this will fail. */
-    worker.store.registerValidPaths(infos);
+    {
+        ValidPathInfos infos2;
+        for (auto & i : infos) infos2.push_back(i.second);
+        worker.store.registerValidPaths(infos2);
+    }
 
     /* In case of a fixed-output derivation hash mismatch, throw an
        exception now that we have registered the output as valid. */
@@ -3328,6 +3298,158 @@ void DerivationGoal::registerOutputs()
 }
 
 
+void DerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
+{
+    std::map<Path, const ValidPathInfo &> outputsByPath;
+    for (auto & output : outputs)
+        outputsByPath.emplace(output.second.path, output.second);
+
+    for (auto & output : outputs) {
+        auto & outputName = output.first;
+        auto & info = output.second;
+
+        struct Checks
+        {
+            bool ignoreSelfRefs = false;
+            std::experimental::optional<uint64_t> maxSize, maxClosureSize;
+            std::experimental::optional<Strings> allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites;
+        };
+
+        /* Compute the closure and closure size of some output. This
+           is slightly tricky because some of its references (namely
+           other outputs) may not be valid yet. */
+        auto getClosure = [&](const Path & path)
+        {
+            uint64_t closureSize = 0;
+            PathSet pathsDone;
+            std::queue<Path> pathsLeft;
+            pathsLeft.push(path);
+
+            while (!pathsLeft.empty()) {
+                auto path = pathsLeft.front();
+                pathsLeft.pop();
+                if (!pathsDone.insert(path).second) continue;
+
+                auto i = outputsByPath.find(path);
+                if (i != outputsByPath.end()) {
+                    closureSize += i->second.narSize;
+                    for (auto & ref : i->second.references)
+                        pathsLeft.push(ref);
+                } else {
+                    auto info = worker.store.queryPathInfo(path);
+                    closureSize += info->narSize;
+                    for (auto & ref : info->references)
+                        pathsLeft.push(ref);
+                }
+            }
+
+            return std::make_pair(pathsDone, closureSize);
+        };
+
+        auto applyChecks = [&](const Checks & checks)
+        {
+            if (checks.maxSize && info.narSize > *checks.maxSize)
+                throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes",
+                    info.path, info.narSize, *checks.maxSize);
+
+            if (checks.maxClosureSize) {
+                uint64_t closureSize = getClosure(info.path).second;
+                if (closureSize > *checks.maxClosureSize)
+                    throw BuildError("closure of path '%s' is too large at %d bytes; limit is %d bytes",
+                        info.path, closureSize, *checks.maxClosureSize);
+            }
+
+            auto checkRefs = [&](const std::experimental::optional<Strings> & value, bool allowed, bool recursive)
+            {
+                if (!value) return;
+
+                PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value);
+
+                PathSet used = recursive ? getClosure(info.path).first : info.references;
+
+                if (recursive && checks.ignoreSelfRefs)
+                    used.erase(info.path);
+
+                PathSet badPaths;
+
+                for (auto & i : used)
+                    if (allowed) {
+                        if (!spec.count(i))
+                            badPaths.insert(i);
+                    } else {
+                        if (spec.count(i))
+                            badPaths.insert(i);
+                    }
+
+                if (!badPaths.empty()) {
+                    string badPathsStr;
+                    for (auto & i : badPaths) {
+                        badPathsStr += "\n  ";
+                        badPathsStr += i;
+                    }
+                    throw BuildError("output '%s' is not allowed to refer to the following paths:%s", info.path, badPathsStr);
+                }
+            };
+
+            checkRefs(checks.allowedReferences, true, false);
+            checkRefs(checks.allowedRequisites, true, true);
+            checkRefs(checks.disallowedReferences, false, false);
+            checkRefs(checks.disallowedRequisites, false, true);
+        };
+
+        if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
+            auto outputChecks = structuredAttrs->find("outputChecks");
+            if (outputChecks != structuredAttrs->end()) {
+                auto output = outputChecks->find(outputName);
+
+                if (output != outputChecks->end()) {
+                    Checks checks;
+
+                    auto maxSize = output->find("maxSize");
+                    if (maxSize != output->end())
+                        checks.maxSize = maxSize->get<uint64_t>();
+
+                    auto maxClosureSize = output->find("maxClosureSize");
+                    if (maxClosureSize != output->end())
+                        checks.maxClosureSize = maxClosureSize->get<uint64_t>();
+
+                    auto get = [&](const std::string & name) -> std::experimental::optional<Strings> {
+                        auto i = output->find(name);
+                        if (i != output->end()) {
+                            Strings res;
+                            for (auto j = i->begin(); j != i->end(); ++j) {
+                                if (!j->is_string())
+                                    throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+                                res.push_back(j->get<std::string>());
+                            }
+                            checks.disallowedRequisites = res;
+                            return res;
+                        }
+                        return {};
+                    };
+
+                    checks.allowedReferences = get("allowedReferences");
+                    checks.allowedRequisites = get("allowedRequisites");
+                    checks.disallowedReferences = get("disallowedReferences");
+                    checks.disallowedRequisites = get("disallowedRequisites");
+
+                    applyChecks(checks);
+                }
+            }
+        } else {
+            // legacy non-structured-attributes case
+            Checks checks;
+            checks.ignoreSelfRefs = true;
+            checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences");
+            checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites");
+            checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences");
+            checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites");
+            applyChecks(checks);
+        }
+    }
+}
+
+
 Path DerivationGoal::openLogFile()
 {
     logSize = 0;
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index cf4ae63cdc2f..6b3e204536f1 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -82,9 +82,9 @@ public:
     /* Whether to show build log output in real time. */
     bool verboseBuild = true;
 
-    /* If verboseBuild is false, the number of lines of the tail of
-       the log to show if a build fails. */
-    size_t logLines = 10;
+    Setting<size_t> logLines{this, 10, "log-lines",
+        "If verbose-build is false, the number of lines of the tail of "
+        "the log to show if a build fails."};
 
     MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
         "Maximum number of parallel build jobs. \"auto\" means use number of cores.",
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 88d2574e86ef..26e1851981db 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -303,6 +303,12 @@ struct LegacySSHStore : public Store
     {
         auto conn(connections->get());
     }
+
+    unsigned int getProtocol() override
+    {
+        auto conn(connections->get());
+        return conn->remoteVersion;
+    }
 };
 
 static RegisterStoreImplementation regStore([](
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 197b9d78995b..216f3417c4a8 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -1338,6 +1338,12 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
 }
 
 
+unsigned int LocalStore::getProtocol()
+{
+    return PROTOCOL_VERSION;
+}
+
+
 #if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
 
 static void makeMutable(const Path & path)
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 746bdbeed793..fce963433a5e 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -209,6 +209,8 @@ public:
 
     void registerValidPaths(const ValidPathInfos & infos);
 
+    unsigned int getProtocol() override;
+
     void vacuumDB();
 
     /* Repair the contents of the given path by redownloading it using
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index ef8b0e53b808..def140cfbe18 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -693,6 +693,13 @@ void RemoteStore::connect()
 }
 
 
+unsigned int RemoteStore::getProtocol()
+{
+    auto conn(connections->get());
+    return conn->daemonVersion;
+}
+
+
 void RemoteStore::flushBadConnections()
 {
     connections->flushBad();
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 7f9d7d1f56d6..4f554b5980e8 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -98,6 +98,8 @@ public:
 
     void connect() override;
 
+    unsigned int getProtocol() override;
+
     void flushBadConnections();
 
 protected:
@@ -127,7 +129,7 @@ protected:
 
     ConnectionHandle getConnection();
 
-    friend class ConnectionHandle;
+    friend struct ConnectionHandle;
 
 private:
 
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index ba11ce6bb6de..1f755ba9eee7 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -173,6 +173,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
     const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
     const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
     const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
+    const Setting<bool> multipartUpload{
+        this, false, "multipart-upload", "whether to use multi-part uploads"};
     const Setting<uint64_t> bufferSize{
         this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
 
@@ -261,48 +263,70 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
             executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
 
-        std::call_once(transferManagerCreated, [&]() {
+        std::call_once(transferManagerCreated, [&]()
+        {
+            if (multipartUpload) {
+                TransferManagerConfiguration transferConfig(executor.get());
+
+                transferConfig.s3Client = s3Helper.client;
+                transferConfig.bufferSize = bufferSize;
+
+                transferConfig.uploadProgressCallback =
+                    [](const TransferManager *transferManager,
+                        const std::shared_ptr<const TransferHandle>
+                        &transferHandle)
+                    {
+                        //FIXME: find a way to properly abort the multipart upload.
+                        //checkInterrupt();
+                        debug("upload progress ('%s'): '%d' of '%d' bytes",
+                            transferHandle->GetKey(),
+                            transferHandle->GetBytesTransferred(),
+                            transferHandle->GetBytesTotalSize());
+                    };
+
+                transferManager = TransferManager::Create(transferConfig);
+            }
+        });
 
-            TransferManagerConfiguration transferConfig(executor.get());
+        auto now1 = std::chrono::steady_clock::now();
 
-            transferConfig.s3Client = s3Helper.client;
-            transferConfig.bufferSize = bufferSize;
+        if (transferManager) {
 
-            transferConfig.uploadProgressCallback =
-                [&](const TransferManager *transferManager,
-                    const std::shared_ptr<const TransferHandle>
-                    &transferHandle)
-                {
-                    //FIXME: find a way to properly abort the multipart upload.
-                    //checkInterrupt();
-                    debug("upload progress ('%s'): '%d' of '%d' bytes",
-                        path,
-                        transferHandle->GetBytesTransferred(),
-                        transferHandle->GetBytesTotalSize());
-                };
+            std::shared_ptr<TransferHandle> transferHandle =
+                transferManager->UploadFile(
+                    stream, bucketName, path, mimeType,
+                    Aws::Map<Aws::String, Aws::String>(),
+                    nullptr, contentEncoding);
 
-            transferManager = TransferManager::Create(transferConfig);
-        });
+            transferHandle->WaitUntilFinished();
 
-        auto now1 = std::chrono::steady_clock::now();
+            if (transferHandle->GetStatus() == TransferStatus::FAILED)
+                throw Error("AWS error: failed to upload 's3://%s/%s': %s",
+                    bucketName, path, transferHandle->GetLastError().GetMessage());
 
-        std::shared_ptr<TransferHandle> transferHandle =
-            transferManager->UploadFile(
-                stream, bucketName, path, mimeType,
-                Aws::Map<Aws::String, Aws::String>(),
-                nullptr, contentEncoding);
+            if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
+                throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
+                    bucketName, path);
 
-        transferHandle->WaitUntilFinished();
+        } else {
 
-        if (transferHandle->GetStatus() == TransferStatus::FAILED)
-            throw Error("AWS error: failed to upload 's3://%s/%s': %s",
-                bucketName, path, transferHandle->GetLastError().GetMessage());
+            auto request =
+                Aws::S3::Model::PutObjectRequest()
+                .WithBucket(bucketName)
+                .WithKey(path);
 
-        if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
-            throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
-                bucketName, path);
+            request.SetContentType(mimeType);
 
-        printTalkative("upload of '%s' completed", path);
+            if (contentEncoding != "")
+                request.SetContentEncoding(contentEncoding);
+
+            auto stream = std::make_shared<istringstream_nocopy>(data);
+
+            request.SetBody(stream);
+
+            auto result = checkAws(fmt("AWS error uploading '%s'", path),
+                s3Helper.client->PutObject(request));
+        }
 
         auto now2 = std::chrono::steady_clock::now();
 
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 92e2685f7f66..dc54c735fdb1 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -588,15 +588,19 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
 
     uint64_t total = 0;
 
-    // FIXME
-#if 0
     if (!info->narHash) {
+        StringSink sink;
+        srcStore->narFromPath({storePath}, sink);
         auto info2 = make_ref<ValidPathInfo>(*info);
         info2->narHash = hashString(htSHA256, *sink.s);
         if (!info->narSize) info2->narSize = sink.s->size();
+        if (info->ultimate) info2->ultimate = false;
         info = info2;
+
+        StringSource source(*sink.s);
+        dstStore->addToStore(*info, source, repair, checkSigs);
+        return;
     }
-#endif
 
     if (info->ultimate) {
         auto info2 = make_ref<ValidPathInfo>(*info);
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 099818ed6f69..106b2be5e6b2 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -599,6 +599,12 @@ public:
        a notion of connection. Otherwise this is a no-op. */
     virtual void connect() { };
 
+    /* Get the protocol version of this store or it's connection. */
+    virtual unsigned int getProtocol()
+    {
+        return 0;
+    };
+
     /* Get the priority of the store, used to order substituters. In
        particular, binary caches can specify a priority field in their
        "nix-cache-info" file. Lower value means higher priority. */