From 49cd7387ad6546571ca31a41f208091b482defaa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 28 Jun 2012 17:19:32 -0400 Subject: nix-push: create a manifest-less binary cache Manifests are a huge pain, since users need to run nix-pull directly or indirectly to obtain them. They tend to be large and lag behind the available binaries; also, the downloaded manifests in /nix/var/nix/manifest need to be in sync with the Nixpkgs sources. So we want to get rid of them. The idea of manifest-free operation works as follows. Nix is configured with a set of URIs of binary caches, e.g. http://nixos.org/binary-cache Whenever Nix needs a store path X, it checks each binary cache for the existence of a file /.narinfo, e.g. http://nixos.org/binary-cache/bi1gh9...ia17.narinfo The .narinfo file contains the necessary information about the store path that was formerly kept in the manifest, i.e., (relative) URI of the compressed NAR, references, size, hash, etc. For example: StorePath: /nix/store/xqp4l88cr9bxv01jinkz861mnc9p7qfi-neon-0.29.6 URL: 1bjxbg52l32wj8ww47sw9f4qz0r8n5vs71l93lcbgk2506v3cpfd.nar.bz2 CompressedHash: sha256:1bjxbg52l32wj8ww47sw9f4qz0r8n5vs71l93lcbgk2506v3cpfd CompressedSize: 202542 NarHash: sha256:1af26536781e6134ab84201b33408759fc59b36cc5530f57c0663f67b588e15f NarSize: 700440 References: 043zrsanirjh8nbc5vqpjn93hhrf107f-bash-4.2-p24 cj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 ... Deriver: 4idz1bgi58h3pazxr3akrw4fsr6zrf3r-neon-0.29.6.drv System: x86_64-linux Nix then knows that it needs to download http://nixos.org/binary-cache/1bjxbg52l32wj8ww47sw9f4qz0r8n5vs71l93lcbgk2506v3cpfd.nar.bz2 to substitute the store path. Note that the store directory is omitted from the References and Deriver fields to save space, and that the URL can be relative to the binary cache prefix. This patch just makes nix-push create binary caches in this format. The next step is to make a substituter that supports them. --- scripts/nix-push.in | 252 +++++++++++++++++++--------------------------------- 1 file changed, 93 insertions(+), 159 deletions(-) diff --git a/scripts/nix-push.in b/scripts/nix-push.in index a1c02190bd6c..db94b51fd3b2 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -1,10 +1,13 @@ #! @perl@ -w @perlFlags@ use strict; +use File::Basename; use File::Temp qw(tempdir); +use File::Path qw(mkpath); use File::stat; +use File::Copy; use Nix::Config; -use Nix::Manifest; +use Nix::Store; my $hashAlgo = "sha256"; @@ -12,7 +15,6 @@ my $tmpDir = tempdir("nix-push.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die "cannot create a temporary directory"; my $nixExpr = "$tmpDir/create-nars.nix"; -my $manifest = "$tmpDir/MANIFEST"; my $curl = "$Nix::Config::curl --fail --silent"; my $extraCurlFlags = ${ENV{'CURL_FLAGS'}}; @@ -22,18 +24,14 @@ $curl = "$curl $extraCurlFlags" if defined $extraCurlFlags; # Parse the command line. my $localCopy; my $localArchivesDir; -my $localManifestFile; - -my $targetArchivesUrl; my $archivesPutURL; my $archivesGetURL; -my $manifestPutURL; sub showSyntax { print STDERR <$nixExpr"; print NIX "["; @@ -112,172 +101,117 @@ print NIX "]"; close NIX; -# Instantiate store derivations from the Nix expression. -my @storeExprs; -print STDERR "instantiating store derivations...\n"; -my $pid = open(READ, "$Nix::Config::binDir/nix-instantiate $nixExpr|") - or die "cannot run nix-instantiate"; +# Build the Nix expression. +print STDERR "building compressed archives...\n"; +my @narPaths; +my $pid = open(READ, "$Nix::Config::binDir/nix-build $nixExpr|") + or die "cannot run nix-build"; while () { chomp; die unless /^\//; - push @storeExprs, $_; -} -close READ or die "nix-instantiate failed: $?"; - - -# Build the derivations. -print STDERR "creating archives...\n"; - -my @narPaths; - -my @tmp = @storeExprs; -while (scalar @tmp > 0) { - my $n = scalar @tmp; - if ($n > 256) { $n = 256 }; - my @tmp2 = @tmp[0..$n - 1]; - @tmp = @tmp[$n..scalar @tmp - 1]; - - my $pid = open(READ, "$Nix::Config::binDir/nix-store --realise @tmp2|") - or die "cannot run nix-store"; - while () { - chomp; - die unless (/^\//); - push @narPaths, "$_"; - } - close READ or die "nix-store failed: $?"; + push @narPaths, $_; } +close READ or die "nix-build failed: $?"; -# Create the manifest. -print STDERR "creating manifest...\n"; +# Upload the archives and the corresponding info files. +print STDERR "uploading/copying archives...\n"; -my %narFiles; -my %patches; +my $totalNarSize = 0; +my $totalNarBz2Size = 0; -my @narArchives; for (my $n = 0; $n < scalar @storePaths; $n++) { my $storePath = $storePaths[$n]; my $narDir = $narPaths[$n]; - - $storePath =~ /\/([^\/]*)$/; - my $basename = $1; - defined $basename or die; - - open HASH, "$narDir/narbz2-hash" or die "cannot open narbz2-hash"; - my $narbz2Hash = ; - chomp $narbz2Hash; - $narbz2Hash =~ /^[0-9a-z]+$/ or die "invalid hash"; - close HASH; - - my $narName = "$narbz2Hash.nar.bz2"; - - my $narFile = "$narDir/$narName"; - (-f $narFile) or die "narfile for $storePath not found"; - push @narArchives, $narFile; - - my $narbz2Size = stat($narFile)->size; - - my $references = `$Nix::Config::binDir/nix-store --query --references '$storePath'`; - die "cannot query references for `$storePath'" if $? != 0; - $references = join(" ", split(" ", $references)); - - my $deriver = `$Nix::Config::binDir/nix-store --query --deriver '$storePath'`; - die "cannot query deriver for `$storePath'" if $? != 0; - chomp $deriver; - $deriver = "" if $deriver eq "unknown-deriver"; + my $baseName = basename $storePath; - my $narHash = `$Nix::Config::binDir/nix-store --query --hash '$storePath'`; - die "cannot query hash for `$storePath'" if $? != 0; - chomp $narHash; + # Get info about the store path. + my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath); # In some exceptional cases (such as VM tests that use the Nix # store of the host), the database doesn't contain the hash. So # compute it. if ($narHash =~ /^sha256:0*$/) { - $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 '$storePath'`; - die "cannot hash `$storePath'" if $? != 0; + my $nar = "$tmpDir/nar"; + system("$Nix::Config::binDir/nix-store --dump $storePath > $nar") == 0 + or die "cannot dump $storePath\n"; + $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --flat $nar`; + die "cannot hash `$nar'" if $? != 0; chomp $narHash; $narHash = "sha256:$narHash"; + $narSize = stat("$nar")->size; + unlink $nar or die; } - my $narSize = `$Nix::Config::binDir/nix-store --query --size '$storePath'`; - die "cannot query size for `$storePath'" if $? != 0; - chomp $narSize; - - my $url; - if ($localCopy) { - $url = "$targetArchivesUrl/$narName"; - } else { - $url = "$archivesGetURL/$narName"; - } - $narFiles{$storePath} = [ - { url => $url - , hash => "$hashAlgo:$narbz2Hash" - , size => $narbz2Size - , narHash => "$narHash" - , narSize => $narSize - , references => $references - , deriver => $deriver - } - ]; -} - -writeManifest $manifest, \%narFiles, \%patches; - - -sub copyFile { - my $src = shift; - my $dst = shift; - my $tmp = "$dst.tmp.$$"; - system("@coreutils@/cp", $src, $tmp) == 0 or die "cannot copy file"; - rename($tmp, $dst) or die "cannot rename file: $!"; -} - + $totalNarSize += $narSize; + + # Get info about the compressed NAR. + open HASH, "$narDir/narbz2-hash" or die "cannot open narbz2-hash"; + my $narBz2Hash = ; + chomp $narBz2Hash; + $narBz2Hash =~ /^[0-9a-z]+$/ or die "invalid hash"; + close HASH; -# Upload/copy the archives. -print STDERR "uploading/copying archives...\n"; + my $narName = "$narBz2Hash.nar.bz2"; -sub archiveExists { - my $name = shift; - print STDERR " HEAD on $archivesGetURL/$name\n"; - return system("$curl --head $archivesGetURL/$name > /dev/null") == 0; -} + my $narFile = "$narDir/$narName"; + (-f $narFile) or die "NAR file for $storePath not found"; -foreach my $narArchive (@narArchives) { + my $narBz2Size = stat($narFile)->size; + $totalNarBz2Size += $narBz2Size; - $narArchive =~ /\/([^\/]*)$/; - my $basename = $1; + printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath, + $narBz2Size / (1024 * 1024), $narBz2Size / $narSize * 100; + # Upload the compressed NAR. if ($localCopy) { - # Since nix-push creates $dst atomically, if it exists we - # don't have to copy again. - my $dst = "$localArchivesDir/$basename"; - if (! -f "$localArchivesDir/$basename") { - print STDERR " $narArchive\n"; - copyFile $narArchive, $dst; + my $dst = "$localArchivesDir/$narName"; + if (! -f $dst) { + my $tmp = "$localArchivesDir/.tmp.$$.$narName"; + copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n"; + rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; } + } else { + die "unimplemented"; + #if (!archiveExists("$basename")) { + # system("$curl --show-error --upload-file " . + # "'$narArchive' '$archivesPutURL/$basename' > /dev/null") == 0 or + # die "curl failed on $narArchive: $?"; + #} } - else { - if (!archiveExists("$basename")) { - print STDERR " $narArchive\n"; - system("$curl --show-error --upload-file " . - "'$narArchive' '$archivesPutURL/$basename' > /dev/null") == 0 or - die "curl failed on $narArchive: $?"; + + # Upload the info file. + my $info; + $info .= "StorePath: $storePath\n"; + $info .= "URL: $narName\n"; + $info .= "CompressedHash: sha256:$narBz2Hash\n"; + $info .= "CompressedSize: $narBz2Size\n"; + $info .= "NarHash: $narHash\n"; + $info .= "NarSize: $narSize\n"; + $info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n"; + if (defined $deriver) { + $info .= "Deriver: " . basename $deriver, "\n"; + if (isValidPath($deriver)) { + my $drv = derivationFromPath($deriver); + $info .= "System: $drv->{platform}\n"; } } -} - -# Upload the manifest. -print STDERR "uploading manifest...\n"; -if ($localCopy) { - copyFile $manifest, $localManifestFile; - copyFile "$manifest.bz2", "$localManifestFile.bz2"; -} else { - system("$curl --show-error --upload-file " . - "'$manifest' '$manifestPutURL' > /dev/null") == 0 or - die "curl failed on $manifest: $?"; - system("$curl --show-error --upload-file " . - "'$manifest'.bz2 '$manifestPutURL'.bz2 > /dev/null") == 0 or - die "curl failed on $manifest: $?"; + my $infoName = hashString("sha256", 1, $storePath); + + if ($localCopy) { + my $dst = "$localArchivesDir/$infoName.narinfo"; + if (! -f $dst) { + my $tmp = "$localArchivesDir/.tmp.$$.$infoName"; + open INFO, ">$tmp" or die; + print INFO "$info" or die; + close INFO or die; + rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; + } + } else { + die "unimplemented"; + } } + +printf STDERR "total compressed size %.2f MiB, %.1f%%\n", + $totalNarBz2Size / (1024 * 1024), $totalNarBz2Size / $totalNarSize * 100; -- cgit 1.4.1 From 4911a10a4e51102a21a5d123a852c75d2ec92dbc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 29 Jun 2012 14:26:31 -0400 Subject: Use XZ compression in binary caches XZ compresses significantly better than bzip2. Here are the compression ratios and execution times (using 4 cores in parallel) on my /var/run/current-system (3.1 GiB): bzip2: total compressed size 849.56 MiB, 30.8% [2m08] xz -6: total compressed size 641.84 MiB, 23.4% [6m53] xz -7: total compressed size 621.82 MiB, 22.6% [7m19] xz -8: total compressed size 599.33 MiB, 21.8% [7m18] xz -9: total compressed size 588.18 MiB, 21.4% [7m40] Note that compression takes much longer. More importantly, however, decompression is much faster: bzip2: 1m47.274s xz -6: 0m55.446s xz -7: 0m54.119s xz -8: 0m52.388s xz -9: 0m51.842s The only downside to using -9 is that decompression takes a fair amount (~65 MB) of memory. --- configure.ac | 1 + corepkgs/config.nix.in | 3 ++- corepkgs/nar.nix | 9 +++++---- perl/lib/Nix/Config.pm.in | 3 ++- scripts/nix-push.in | 26 +++++++++++++------------- substitute.mk | 1 + 6 files changed, 24 insertions(+), 19 deletions(-) diff --git a/configure.ac b/configure.ac index 7b814dedcbfd..28959198d598 100644 --- a/configure.ac +++ b/configure.ac @@ -161,6 +161,7 @@ NEED_PROG(perl, perl) NEED_PROG(sed, sed) NEED_PROG(tar, tar) NEED_PROG(bzip2, bzip2) +NEED_PROG(xz, xz) AC_PATH_PROG(dot, dot) AC_PATH_PROG(dblatex, dblatex) AC_PATH_PROG(gzip, gzip) diff --git a/corepkgs/config.nix.in b/corepkgs/config.nix.in index b324d732a1a2..1be4bd0904ba 100644 --- a/corepkgs/config.nix.in +++ b/corepkgs/config.nix.in @@ -6,7 +6,8 @@ in { perl = "@perl@"; shell = "@shell@"; coreutils = "@coreutils@"; - bzip2 = fromEnv "NIX_BZIP2" "@bzip2@"; + bzip2 = "@bzip2@"; + xz = "@xz@"; tar = "@tar@"; tr = "@tr@"; nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@"; diff --git a/corepkgs/nar.nix b/corepkgs/nar.nix index 4747dc31def2..c5155dcc917c 100644 --- a/corepkgs/nar.nix +++ b/corepkgs/nar.nix @@ -8,14 +8,15 @@ let echo "packing ‘$storePath’..." mkdir $out - dst=$out/tmp.nar.bz2 + dst=$out/tmp.nar.xz set -o pipefail - nix-store --dump "$storePath" | ${bzip2} > $dst + nix-store --dump "$storePath" | ${xz} -9 > $dst - nix-hash --flat --type $hashAlgo --base32 $dst > $out/narbz2-hash + hash=$(nix-hash --flat --type $hashAlgo --base32 $dst) + echo -n $hash > $out/nar-compressed-hash - mv $out/tmp.nar.bz2 $out/$(cat $out/narbz2-hash).nar.bz2 + mv $dst $out/$hash.nar.xz ''; in diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in index b6d346651807..5adc1ffba9e9 100644 --- a/perl/lib/Nix/Config.pm.in +++ b/perl/lib/Nix/Config.pm.in @@ -7,7 +7,8 @@ $manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests"; $logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix"; $confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix"; -$bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@"; +$bzip2 = "@bzip2@"; +$xz = "@xz@"; $curl = "@curl@"; $useBindings = "@perlbindings@" eq "yes"; diff --git a/scripts/nix-push.in b/scripts/nix-push.in index db94b51fd3b2..9edd8731951c 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -118,7 +118,7 @@ close READ or die "nix-build failed: $?"; print STDERR "uploading/copying archives...\n"; my $totalNarSize = 0; -my $totalNarBz2Size = 0; +my $totalCompressedSize = 0; for (my $n = 0; $n < scalar @storePaths; $n++) { my $storePath = $storePaths[$n]; @@ -146,22 +146,22 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { $totalNarSize += $narSize; # Get info about the compressed NAR. - open HASH, "$narDir/narbz2-hash" or die "cannot open narbz2-hash"; - my $narBz2Hash = ; - chomp $narBz2Hash; - $narBz2Hash =~ /^[0-9a-z]+$/ or die "invalid hash"; + open HASH, "$narDir/nar-compressed-hash" or die "cannot open nar-compressed-hash"; + my $compressedHash = ; + chomp $compressedHash; + $compressedHash =~ /^[0-9a-z]+$/ or die "invalid hash"; close HASH; - my $narName = "$narBz2Hash.nar.bz2"; + my $narName = "$compressedHash.nar.bz2"; my $narFile = "$narDir/$narName"; (-f $narFile) or die "NAR file for $storePath not found"; - my $narBz2Size = stat($narFile)->size; - $totalNarBz2Size += $narBz2Size; + my $compressedSize = stat($narFile)->size; + $totalCompressedSize += $compressedSize; printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath, - $narBz2Size / (1024 * 1024), $narBz2Size / $narSize * 100; + $compressedSize / (1024 * 1024), $compressedSize / $narSize * 100; # Upload the compressed NAR. if ($localCopy) { @@ -184,13 +184,13 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { my $info; $info .= "StorePath: $storePath\n"; $info .= "URL: $narName\n"; - $info .= "CompressedHash: sha256:$narBz2Hash\n"; - $info .= "CompressedSize: $narBz2Size\n"; + $info .= "CompressedHash: sha256:$compressedHash\n"; + $info .= "CompressedSize: $compressedSize\n"; $info .= "NarHash: $narHash\n"; $info .= "NarSize: $narSize\n"; $info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n"; if (defined $deriver) { - $info .= "Deriver: " . basename $deriver, "\n"; + $info .= "Deriver: " . basename $deriver . "\n"; if (isValidPath($deriver)) { my $drv = derivationFromPath($deriver); $info .= "System: $drv->{platform}\n"; @@ -214,4 +214,4 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { } printf STDERR "total compressed size %.2f MiB, %.1f%%\n", - $totalNarBz2Size / (1024 * 1024), $totalNarBz2Size / $totalNarSize * 100; + $totalCompressedSize / (1024 * 1024), $totalCompressedSize / $totalNarSize * 100; diff --git a/substitute.mk b/substitute.mk index eb489c97a509..77c5afc28117 100644 --- a/substitute.mk +++ b/substitute.mk @@ -16,6 +16,7 @@ -e "s^@shell\@^$(bash)^g" \ -e "s^@curl\@^$(curl)^g" \ -e "s^@bzip2\@^$(bzip2)^g" \ + -e "s^@xz\@^$(xz)^g" \ -e "s^@perl\@^$(perl)^g" \ -e "s^@perlFlags\@^$(perlFlags)^g" \ -e "s^@coreutils\@^$(coreutils)^g" \ -- cgit 1.4.1 From 91b8814f0d275bbb6c7de9546e5e8cb784dad10b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 29 Jun 2012 17:16:00 -0400 Subject: Doh --- scripts/nix-push.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 9edd8731951c..4a8bdc81b50d 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -152,7 +152,7 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { $compressedHash =~ /^[0-9a-z]+$/ or die "invalid hash"; close HASH; - my $narName = "$compressedHash.nar.bz2"; + my $narName = "$compressedHash.nar.xz"; my $narFile = "$narDir/$narName"; (-f $narFile) or die "NAR file for $storePath not found"; -- cgit 1.4.1 From 37f70984645b4fc5e6ed9f30a858ba6fbf402441 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 29 Jun 2012 18:28:52 -0400 Subject: First attempt at the manifest-less substituter --- .gitignore | 1 + perl/lib/Nix/Config.pm.in | 1 + scripts/Makefile.am | 8 +-- scripts/download-from-binary-cache.pl.in | 118 +++++++++++++++++++++++++++++++ 4 files changed, 123 insertions(+), 5 deletions(-) create mode 100644 scripts/download-from-binary-cache.pl.in diff --git a/.gitignore b/.gitignore index d7f151507f9b..bcc7a79f8a30 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ Makefile.in /scripts/GeneratePatches.pm /scripts/download-using-manifests.pl /scripts/copy-from-other-stores.pl +/scripts/download-from-binary-cache.pl /scripts/find-runtime-roots.pl /scripts/build-remote.pl /scripts/nix-reduce-build diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in index 5adc1ffba9e9..64aaccd7cd31 100644 --- a/perl/lib/Nix/Config.pm.in +++ b/perl/lib/Nix/Config.pm.in @@ -6,6 +6,7 @@ $stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix"; $manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests"; $logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix"; $confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix"; +$storeDir = $ENV{"NIX_STORE_DIR"} || "@storedir@"; $bzip2 = "@bzip2@"; $xz = "@xz@"; diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 18a59dbdb6f6..506b1aeb454f 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -7,17 +7,14 @@ noinst_SCRIPTS = nix-profile.sh \ find-runtime-roots.pl build-remote.pl nix-reduce-build \ copy-from-other-stores.pl nix-http-export.cgi -nix-pull nix-push: download-using-manifests.pl - -install-exec-local: download-using-manifests.pl copy-from-other-stores.pl find-runtime-roots.pl +install-exec-local: download-using-manifests.pl copy-from-other-stores.pl download-from-binary-cache.pl find-runtime-roots.pl $(INSTALL) -d $(DESTDIR)$(sysconfdir)/profile.d $(INSTALL_DATA) nix-profile.sh $(DESTDIR)$(sysconfdir)/profile.d/nix.sh $(INSTALL) -d $(DESTDIR)$(libexecdir)/nix $(INSTALL_PROGRAM) find-runtime-roots.pl $(DESTDIR)$(libexecdir)/nix $(INSTALL_PROGRAM) build-remote.pl $(DESTDIR)$(libexecdir)/nix $(INSTALL) -d $(DESTDIR)$(libexecdir)/nix/substituters - $(INSTALL_PROGRAM) download-using-manifests.pl $(DESTDIR)$(libexecdir)/nix/substituters - $(INSTALL_PROGRAM) copy-from-other-stores.pl $(DESTDIR)$(libexecdir)/nix/substituters + $(INSTALL_PROGRAM) download-using-manifests.pl copy-from-other-stores.pl download-from-binary-cache.pl $(DESTDIR)$(libexecdir)/nix/substituters $(INSTALL) -d $(DESTDIR)$(sysconfdir)/nix include ../substitute.mk @@ -29,6 +26,7 @@ EXTRA_DIST = nix-collect-garbage.in \ nix-build.in \ download-using-manifests.pl.in \ copy-from-other-stores.pl.in \ + download-from-binary-cache.pl.in \ nix-copy-closure.in \ find-runtime-roots.pl.in \ build-remote.pl.in \ diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in new file mode 100644 index 000000000000..ccd28eafc6d4 --- /dev/null +++ b/scripts/download-from-binary-cache.pl.in @@ -0,0 +1,118 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use Nix::Config; +use Nix::Store; + +my @binaryCacheUrls = ("file:///tmp/binary-cache"); + +sub getInfoFrom { + my ($storePath, $pathHash, $binaryCacheUrl) = @_; + my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; + #print STDERR "checking $infoUrl...\n"; + my $s = `$Nix::Config::curl --fail --silent --location ${infoUrl}`; + if ($? != 0) { + print STDERR "GOT CURL REPLY ", $? >> 8, "\n"; + return undef; + } + my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver); + my @refs; + foreach my $line (split "\n", $s) { + $line =~ /^(.*): (.*)$/ or return undef; + if ($1 eq "StorePath") { $storePath2 = $2; } + elsif ($1 eq "URL") { $url = $2; } + elsif ($1 eq "CompressedHash") { $fileHash = $2; } + elsif ($1 eq "CompressedSize") { $fileSize = int($2); } + elsif ($1 eq "NarHash") { $narHash = $2; } + elsif ($1 eq "NarSize") { $narSize = int($2); } + elsif ($1 eq "References") { @refs = split / /, $2; } + elsif ($1 eq "Deriver") { $deriver = $2; } + } + if ($storePath ne $storePath2 || !defined $url || !defined $narHash || !defined $narSize) { + print STDERR "bad NAR info file ‘$infoUrl’\n"; + return undef + } + return + { url => $url + , fileHash => $fileHash + , fileSize => $fileSize + , narHash => $narHash + , narSize => $narSize + , refs => [ map { "$Nix::Config::storeDir/$_" } @refs ] + , deriver => "$Nix::Config::storeDir/$deriver" + } +} + +sub getInfo { + my ($storePath) = @_; + + my $pathHash = hashString("sha256", 1, $storePath); + + cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { + my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); + return $info if defined $info; + } + + return undef; +} + +sub downloadBinary { + my ($storePath) = @_; + + my $pathHash = hashString("sha256", 1, $storePath); + + cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { + my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); + if (defined $info) { + if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $Nix::Config::xz -d | $Nix::Config::binDir/nix-store --restore $storePath") == 0) { + return 1; + } + } + } + + return 0; +} + +if ($ARGV[0] eq "--query") { + + while () { + my $cmd = $_; chomp $cmd; + + if ($cmd eq "have") { + my $storePath = ; chomp $storePath; + # FIXME: want to give correct info here, but it's too slow. + print "0\n"; + #my $info = getInfo($storePath); + #if (defined $info) { print "1\n"; } else { print "0\n"; } + } + + elsif ($cmd eq "info") { + my $storePath = ; chomp $storePath; + my $info = getInfo($storePath); + if (defined $info) { + print "1\n"; + print $info->{deriver} || "", "\n"; + print scalar @{$info->{refs}}, "\n"; + print "$_\n" foreach @{$info->{refs}}; + print $info->{fileSize} || 0, "\n"; + print $info->{narSize}, "\n"; + } else { + print "0\n"; + } + } + + else { die "unknown command `$cmd'"; } + + flush STDOUT; + } + +} + +elsif ($ARGV[0] eq "--substitute") { + my $storePath = $ARGV[1] or die; + exit 1 unless downloadBinary($storePath); +} + +else { + die; +} -- cgit 1.4.1 From 000132cbd1624a681a8114a117de07a56a7eed4e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 29 Jun 2012 18:30:28 -0400 Subject: nix-push: Don't pollute the current directory with result symlink --- scripts/nix-push.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 4a8bdc81b50d..9f73b812705a 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -104,7 +104,7 @@ close NIX; # Build the Nix expression. print STDERR "building compressed archives...\n"; my @narPaths; -my $pid = open(READ, "$Nix::Config::binDir/nix-build $nixExpr|") +my $pid = open(READ, "$Nix::Config::binDir/nix-build $nixExpr -o $tmpDir/result |") or die "cannot run nix-build"; while () { chomp; -- cgit 1.4.1 From f4190c38bac1efdbfec9b1ff9c524808d23fe1cc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 1 Jul 2012 18:46:38 -0400 Subject: Allow both bzip2 and xz compression --- corepkgs/nar.nix | 18 +++++++--- scripts/download-from-binary-cache.pl.in | 19 +++++++--- scripts/nix-push.in | 61 +++++++++++++++++++++----------- 3 files changed, 68 insertions(+), 30 deletions(-) diff --git a/corepkgs/nar.nix b/corepkgs/nar.nix index c5155dcc917c..cf8851d50978 100644 --- a/corepkgs/nar.nix +++ b/corepkgs/nar.nix @@ -6,29 +6,37 @@ let '' export PATH=${nixBinDir}:${coreutils} + if [ $compressionType = "xz" ]; then + ext=xz + compressor=${xz} -9 + else + ext=bz2 + compressor=${bzip2} + fi + echo "packing ‘$storePath’..." mkdir $out - dst=$out/tmp.nar.xz + dst=$out/tmp.nar.$ext set -o pipefail - nix-store --dump "$storePath" | ${xz} -9 > $dst + nix-store --dump "$storePath" | $compressor -9 > $dst hash=$(nix-hash --flat --type $hashAlgo --base32 $dst) echo -n $hash > $out/nar-compressed-hash - mv $dst $out/$hash.nar.xz + mv $dst $out/$hash.nar.$ext ''; in -{ storePath, hashAlgo }: +{ storePath, hashAlgo, compressionType }: derivation { name = "nar"; system = builtins.currentSystem; builder = shell; args = [ "-e" builder ]; - inherit storePath hashAlgo; + inherit storePath hashAlgo compressionType; # Don't build in a chroot because Nix's dependencies may not be there. __noChroot = true; diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index ccd28eafc6d4..d121f2fc3151 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -4,7 +4,7 @@ use strict; use Nix::Config; use Nix::Store; -my @binaryCacheUrls = ("file:///tmp/binary-cache"); +my @binaryCacheUrls = ("file:///tmp/binary-cache2"); sub getInfoFrom { my ($storePath, $pathHash, $binaryCacheUrl) = @_; @@ -15,14 +15,15 @@ sub getInfoFrom { print STDERR "GOT CURL REPLY ", $? >> 8, "\n"; return undef; } - my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver); + my ($storePath2, $url, $compression, $fileHash, $fileSize, $narHash, $narSize, $deriver); my @refs; foreach my $line (split "\n", $s) { $line =~ /^(.*): (.*)$/ or return undef; if ($1 eq "StorePath") { $storePath2 = $2; } elsif ($1 eq "URL") { $url = $2; } - elsif ($1 eq "CompressedHash") { $fileHash = $2; } - elsif ($1 eq "CompressedSize") { $fileSize = int($2); } + elsif ($1 eq "Compression") { $compression = $2; } + elsif ($1 eq "FileHash") { $fileHash = $2; } + elsif ($1 eq "FileSize") { $fileSize = int($2); } elsif ($1 eq "NarHash") { $narHash = $2; } elsif ($1 eq "NarSize") { $narSize = int($2); } elsif ($1 eq "References") { @refs = split / /, $2; } @@ -34,6 +35,7 @@ sub getInfoFrom { } return { url => $url + , compression => ($compression || "bzip2") , fileHash => $fileHash , fileSize => $fileSize , narHash => $narHash @@ -64,7 +66,14 @@ sub downloadBinary { cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); if (defined $info) { - if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $Nix::Config::xz -d | $Nix::Config::binDir/nix-store --restore $storePath") == 0) { + my $decompressor; + if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; } + elsif ($info->{compression} eq "xz") { $decompressor = "$Nix::Config::xz -d"; } + else { + print STDERR "unknown compression method ‘$info->{compression}’\n"; + next; + } + if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") == 0) { return 1; } } diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 9f73b812705a..c388429ec2ef 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -22,16 +22,21 @@ $curl = "$curl $extraCurlFlags" if defined $extraCurlFlags; # Parse the command line. +my $compressionType = "xz"; +my $force = 0; + my $localCopy; my $localArchivesDir; my $archivesPutURL; my $archivesGetURL; +my @roots; + sub showSyntax { print STDERR <$tmp" or die; print INFO "$info" or die; -- cgit 1.4.1 From 440adfbdd1c99fdf7929db66b64ccf4dcd9097e3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 1 Jul 2012 21:55:36 -0400 Subject: Add an environment variable $NIX_BINARY_CACHES specifying URLs of binary caches --- scripts/download-from-binary-cache.pl.in | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index d121f2fc3151..b930fd8d72c1 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -4,7 +4,9 @@ use strict; use Nix::Config; use Nix::Store; -my @binaryCacheUrls = ("file:///tmp/binary-cache2"); + +my @binaryCacheUrls = split / /, ($ENV{"NIX_BINARY_CACHES"} || ""); + sub getInfoFrom { my ($storePath, $pathHash, $binaryCacheUrl) = @_; @@ -12,7 +14,7 @@ sub getInfoFrom { #print STDERR "checking $infoUrl...\n"; my $s = `$Nix::Config::curl --fail --silent --location ${infoUrl}`; if ($? != 0) { - print STDERR "GOT CURL REPLY ", $? >> 8, "\n"; + #print STDERR "GOT CURL REPLY ", $? >> 8, "\n"; return undef; } my ($storePath2, $url, $compression, $fileHash, $fileSize, $narHash, $narSize, $deriver); @@ -45,6 +47,7 @@ sub getInfoFrom { } } + sub getInfo { my ($storePath) = @_; @@ -58,6 +61,7 @@ sub getInfo { return undef; } + sub downloadBinary { my ($storePath) = @_; @@ -82,6 +86,7 @@ sub downloadBinary { return 0; } + if ($ARGV[0] eq "--query") { while () { -- cgit 1.4.1 From 6b1e671ac6cc0376474dd8c1403582ae1db01576 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 1 Jul 2012 21:57:25 -0400 Subject: Fix xz compression --- corepkgs/nar.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/corepkgs/nar.nix b/corepkgs/nar.nix index cf8851d50978..fc9687af776c 100644 --- a/corepkgs/nar.nix +++ b/corepkgs/nar.nix @@ -8,10 +8,10 @@ let if [ $compressionType = "xz" ]; then ext=xz - compressor=${xz} -9 + compressor="${xz} -9" else ext=bz2 - compressor=${bzip2} + compressor="${bzip2}" fi echo "packing ‘$storePath’..." @@ -19,7 +19,7 @@ let dst=$out/tmp.nar.$ext set -o pipefail - nix-store --dump "$storePath" | $compressor -9 > $dst + nix-store --dump "$storePath" | $compressor > $dst hash=$(nix-hash --flat --type $hashAlgo --base32 $dst) echo -n $hash > $out/nar-compressed-hash -- cgit 1.4.1 From 6ec7460af1103a23e4d331e0c8a56283350175c5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Jul 2012 12:42:58 -0400 Subject: Binary caches: use a better key Use the hash part of the store path as a key rather than a hash of the store path. This is enough to get the desired privacy property. --- scripts/download-from-binary-cache.pl.in | 15 ++++++++++----- scripts/nix-push.in | 6 +++--- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index b930fd8d72c1..10b7c0175e3a 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -1,6 +1,7 @@ #! @perl@ -w @perlFlags@ use strict; +use File::Basename; use Nix::Config; use Nix::Store; @@ -12,9 +13,11 @@ sub getInfoFrom { my ($storePath, $pathHash, $binaryCacheUrl) = @_; my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; #print STDERR "checking $infoUrl...\n"; - my $s = `$Nix::Config::curl --fail --silent --location ${infoUrl}`; + my $s = `$Nix::Config::curl --fail --silent --location $infoUrl`; if ($? != 0) { - #print STDERR "GOT CURL REPLY ", $? >> 8, "\n"; + my $status = $? >> 8; + print STDERR "could not download ‘$infoUrl’ (curl returned status ", $? >> 8, ")\n" + if $status != 22 && $status != 37; return undef; } my ($storePath2, $url, $compression, $fileHash, $fileSize, $narHash, $narSize, $deriver); @@ -51,7 +54,7 @@ sub getInfoFrom { sub getInfo { my ($storePath) = @_; - my $pathHash = hashString("sha256", 1, $storePath); + my $pathHash = substr(basename($storePath), 0, 32); cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); @@ -65,7 +68,7 @@ sub getInfo { sub downloadBinary { my ($storePath) = @_; - my $pathHash = hashString("sha256", 1, $storePath); + my $pathHash = substr(basename($storePath), 0, 32); cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); @@ -124,7 +127,9 @@ if ($ARGV[0] eq "--query") { elsif ($ARGV[0] eq "--substitute") { my $storePath = $ARGV[1] or die; - exit 1 unless downloadBinary($storePath); + if (!downloadBinary($storePath)) { + print STDERR "could not download ‘$storePath’ from any binary cache\n"; + } } else { diff --git a/scripts/nix-push.in b/scripts/nix-push.in index c388429ec2ef..51106657bb0c 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -218,12 +218,12 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { } } - my $infoName = hashString("sha256", 1, $storePath); + my $pathHash = substr(basename($storePath), 0, 32); if ($localCopy) { - my $dst = "$localArchivesDir/$infoName.narinfo"; + my $dst = "$localArchivesDir/$pathHash.narinfo"; if ($force || ! -f $dst) { - my $tmp = "$localArchivesDir/.tmp.$$.$infoName"; + my $tmp = "$localArchivesDir/.tmp.$$.$pathHash.narinfo"; open INFO, ">$tmp" or die; print INFO "$info" or die; close INFO or die; -- cgit 1.4.1 From cf49472d601a3d9d34ba92def1eca9a2d2c7c391 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Jul 2012 18:05:57 -0400 Subject: nix-push: Always generate base-32 hashes --- perl/lib/Nix/Store.xs | 4 ++-- scripts/nix-push.in | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index 8ca72b62a31f..2ebff5575616 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -86,7 +86,7 @@ SV * queryDeriver(char * path) } -SV * queryPathInfo(char * path) +SV * queryPathInfo(char * path, int base32) PPCODE: try { doInit(); @@ -95,7 +95,7 @@ SV * queryPathInfo(char * path) XPUSHs(&PL_sv_undef); else XPUSHs(sv_2mortal(newSVpv(info.deriver.c_str(), 0))); - string s = "sha256:" + printHash(info.hash); + string s = "sha256:" + (base32 ? printHash32(info.hash) : printHash(info.hash)); XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); mXPUSHi(info.registrationTime); mXPUSHi(info.narSize); diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 51106657bb0c..35ad43d283bb 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -146,7 +146,7 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { my $baseName = basename $storePath; # Get info about the store path. - my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath); + my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1); # In some exceptional cases (such as VM tests that use the Nix # store of the host), the database doesn't contain the hash. So @@ -155,7 +155,7 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { my $nar = "$tmpDir/nar"; system("$Nix::Config::binDir/nix-store --dump $storePath > $nar") == 0 or die "cannot dump $storePath\n"; - $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --flat $nar`; + $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 --flat $nar`; die "cannot hash `$nar'" if $? != 0; chomp $narHash; $narHash = "sha256:$narHash"; -- cgit 1.4.1 From 8319b1ab9f1e79ad32871dae602a59df5874d1a9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Jul 2012 18:53:04 -0400 Subject: download-from-binary-cache: Verify NAR hashes --- scripts/download-from-binary-cache.pl.in | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 10b7c0175e3a..57e3e072560a 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -34,9 +34,9 @@ sub getInfoFrom { elsif ($1 eq "References") { @refs = split / /, $2; } elsif ($1 eq "Deriver") { $deriver = $2; } } - if ($storePath ne $storePath2 || !defined $url || !defined $narHash || !defined $narSize) { + if ($storePath ne $storePath2 || !defined $url || !defined $narHash) { print STDERR "bad NAR info file ‘$infoUrl’\n"; - return undef + return undef; } return { url => $url @@ -46,7 +46,7 @@ sub getInfoFrom { , narHash => $narHash , narSize => $narSize , refs => [ map { "$Nix::Config::storeDir/$_" } @refs ] - , deriver => "$Nix::Config::storeDir/$deriver" + , deriver => defined $deriver ? "$Nix::Config::storeDir/$deriver" : undef } } @@ -80,9 +80,18 @@ sub downloadBinary { print STDERR "unknown compression method ‘$info->{compression}’\n"; next; } - if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") == 0) { - return 1; + if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { + die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; + next; } + # The hash in the manifest can be either in base-16 or + # base-32. Handle both. + $info->{narHash} =~ /^sha256:(.*)$/ or die "invalid hash"; + my $hash = $1; + my $hash2 = hashPath("sha256", 1, $storePath); + die "hash mismatch in downloaded path ‘$storePath’; expected $hash, got $hash2\n" + if $hash ne $hash2; + return 1; } } @@ -112,7 +121,7 @@ if ($ARGV[0] eq "--query") { print scalar @{$info->{refs}}, "\n"; print "$_\n" foreach @{$info->{refs}}; print $info->{fileSize} || 0, "\n"; - print $info->{narSize}, "\n"; + print $info->{narSize} || 0, "\n"; } else { print "0\n"; } -- cgit 1.4.1 From d694c599e2b9eee71ade8cc4befb46ed911f4a10 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 3 Jul 2012 17:29:33 -0400 Subject: download-from-binary-cache: cache binary cache info in a SQLite DB --- scripts/download-from-binary-cache.pl.in | 125 +++++++++++++++++++++++++++++-- 1 file changed, 117 insertions(+), 8 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 57e3e072560a..d073f5bfdcbe 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -4,15 +4,66 @@ use strict; use File::Basename; use Nix::Config; use Nix::Store; +use DBI; my @binaryCacheUrls = split / /, ($ENV{"NIX_BINARY_CACHES"} || ""); +my ($dbh, $insertNAR, $queryNAR); +my %cacheIds; + + +sub initCache { + my $dbPath = "$Nix::Config::stateDir/binary-cache-v1.sqlite"; + + # Open/create the database. + $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "") + or die "cannot open database `$dbPath'"; + $dbh->{RaiseError} = 1; + $dbh->{PrintError} = 0; + + $dbh->do("pragma synchronous = off"); # we can always reproduce the cache + $dbh->do("pragma journal_mode = truncate"); + + # Initialise the database schema, if necessary. + $dbh->do(<do(<prepare( + "insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " . + "narSize, refs, deriver, system, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die; + + $queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die; +} + sub getInfoFrom { my ($storePath, $pathHash, $binaryCacheUrl) = @_; my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; - #print STDERR "checking $infoUrl...\n"; + print STDERR "checking $infoUrl...\n"; my $s = `$Nix::Config::curl --fail --silent --location $infoUrl`; if ($? != 0) { my $status = $? >> 8; @@ -34,6 +85,7 @@ sub getInfoFrom { elsif ($1 eq "References") { @refs = split / /, $2; } elsif ($1 eq "Deriver") { $deriver = $2; } } + return undef if $storePath ne $storePath2; if ($storePath ne $storePath2 || !defined $url || !defined $narHash) { print STDERR "bad NAR info file ‘$infoUrl’\n"; return undef; @@ -45,9 +97,63 @@ sub getInfoFrom { , fileSize => $fileSize , narHash => $narHash , narSize => $narSize - , refs => [ map { "$Nix::Config::storeDir/$_" } @refs ] - , deriver => defined $deriver ? "$Nix::Config::storeDir/$deriver" : undef - } + , refs => [ @refs ] + , deriver => $deriver + }; +} + + +sub getCacheId { + my ($binaryCacheUrl) = @_; + + my $cacheId = $cacheIds{$binaryCacheUrl}; + return $cacheId if defined $cacheId; + + # FIXME: not atomic. + my @res = @{$dbh->selectcol_arrayref("select id from BinaryCaches where url = ?", {}, $binaryCacheUrl)}; + if (scalar @res == 1) { + $cacheId = $res[0]; + } else { + $dbh->do("insert into BinaryCaches(url) values (?)", + {}, $binaryCacheUrl); + $cacheId = $dbh->last_insert_id("", "", "", ""); + } + + $cacheIds{$binaryCacheUrl} = $cacheId; + return $cacheId; +} + + +sub cachedGetInfoFrom { + my ($storePath, $pathHash, $binaryCacheUrl) = @_; + + my $cacheId = getCacheId($binaryCacheUrl); + + # Look up $storePath in the SQLite cache. + $queryNAR->execute($cacheId, basename($storePath)); + my $res = $queryNAR->fetchrow_hashref(); + return + { url => $res->{url} + , compression => $res->{compression} + , fileHash => $res->{fileHash} + , fileSize => $res->{fileSize} + , narHash => $res->{narHash} + , narSize => $res->{narSize} + , refs => [ split " ", $res->{refs} ] + , deriver => $res->{deriver} + } if defined $res; + + # Not found, so do an HTTP request to get the info. + my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); + + # Cache the result. + $insertNAR->execute( + $cacheId, basename($storePath), $info->{url}, $info->{compression}, $info->{fileHash}, $info->{fileSize}, + $info->{narHash}, $info->{narSize}, join(" ", @{$info->{refs}}), + $info->{deriver}, $info->{system}, time()) + if defined $info; + + return $info; } @@ -57,7 +163,7 @@ sub getInfo { my $pathHash = substr(basename($storePath), 0, 32); cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); + my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); return $info if defined $info; } @@ -71,7 +177,7 @@ sub downloadBinary { my $pathHash = substr(basename($storePath), 0, 32); cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); + my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); if (defined $info) { my $decompressor; if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; } @@ -99,6 +205,9 @@ sub downloadBinary { } +initCache(); + + if ($ARGV[0] eq "--query") { while () { @@ -117,9 +226,9 @@ if ($ARGV[0] eq "--query") { my $info = getInfo($storePath); if (defined $info) { print "1\n"; - print $info->{deriver} || "", "\n"; + print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n"; print scalar @{$info->{refs}}, "\n"; - print "$_\n" foreach @{$info->{refs}}; + print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}}; print $info->{fileSize} || 0, "\n"; print $info->{narSize} || 0, "\n"; } else { -- cgit 1.4.1 From 2a8e5c8b11d23f3d56cc7548e21f47325a736b79 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 3 Jul 2012 17:47:01 -0400 Subject: download-from-binary-cache: strip trailing / from URLs --- scripts/download-from-binary-cache.pl.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index d073f5bfdcbe..3303199dacd2 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -7,7 +7,7 @@ use Nix::Store; use DBI; -my @binaryCacheUrls = split / /, ($ENV{"NIX_BINARY_CACHES"} || ""); +my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} || "")); my ($dbh, $insertNAR, $queryNAR); my %cacheIds; -- cgit 1.4.1 From 89380c03e99dc5ae8a4dcf9edb4f14483bd2f5fc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 3 Jul 2012 18:35:39 -0400 Subject: download-from-binary-cache: in queries, preferred cached info --- scripts/download-from-binary-cache.pl.in | 48 +++++++++++++++++++------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 3303199dacd2..26437b064971 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -61,7 +61,8 @@ EOF sub getInfoFrom { - my ($storePath, $pathHash, $binaryCacheUrl) = @_; + my ($storePath, $pathHash, $binaryCacheUrl, $cacheId) = @_; + my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; print STDERR "checking $infoUrl...\n"; my $s = `$Nix::Config::curl --fail --silent --location $infoUrl`; @@ -71,7 +72,9 @@ sub getInfoFrom { if $status != 22 && $status != 37; return undef; } - my ($storePath2, $url, $compression, $fileHash, $fileSize, $narHash, $narSize, $deriver); + + my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver, $system); + my $compression = "bzip2"; my @refs; foreach my $line (split "\n", $s) { $line =~ /^(.*): (.*)$/ or return undef; @@ -84,21 +87,29 @@ sub getInfoFrom { elsif ($1 eq "NarSize") { $narSize = int($2); } elsif ($1 eq "References") { @refs = split / /, $2; } elsif ($1 eq "Deriver") { $deriver = $2; } + elsif ($1 eq "System") { $system = $2; } } return undef if $storePath ne $storePath2; if ($storePath ne $storePath2 || !defined $url || !defined $narHash) { print STDERR "bad NAR info file ‘$infoUrl’\n"; return undef; } + + # Cache the result. + $insertNAR->execute( + getCacheId($binaryCacheUrl), basename($storePath), $url, $compression, $fileHash, $fileSize, + $narHash, $narSize, join(" ", @refs), $deriver, $system, time()); + return { url => $url - , compression => ($compression || "bzip2") + , compression => $compression , fileHash => $fileHash , fileSize => $fileSize , narHash => $narHash , narSize => $narSize , refs => [ @refs ] , deriver => $deriver + , system => $system }; } @@ -127,11 +138,10 @@ sub getCacheId { sub cachedGetInfoFrom { my ($storePath, $pathHash, $binaryCacheUrl) = @_; - my $cacheId = getCacheId($binaryCacheUrl); - - # Look up $storePath in the SQLite cache. - $queryNAR->execute($cacheId, basename($storePath)); + $queryNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); my $res = $queryNAR->fetchrow_hashref(); + return undef unless defined $res; + return { url => $res->{url} , compression => $res->{compression} @@ -142,18 +152,6 @@ sub cachedGetInfoFrom { , refs => [ split " ", $res->{refs} ] , deriver => $res->{deriver} } if defined $res; - - # Not found, so do an HTTP request to get the info. - my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); - - # Cache the result. - $insertNAR->execute( - $cacheId, basename($storePath), $info->{url}, $info->{compression}, $info->{fileHash}, $info->{fileSize}, - $info->{narHash}, $info->{narSize}, join(" ", @{$info->{refs}}), - $info->{deriver}, $info->{system}, time()) - if defined $info; - - return $info; } @@ -162,11 +160,18 @@ sub getInfo { my $pathHash = substr(basename($storePath), 0, 32); - cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { + # First look if we have cached info for one of the URLs. + foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); return $info if defined $info; } + # No, so do an HTTP request until we get a hit. + foreach my $binaryCacheUrl (@binaryCacheUrls) { + my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); + return $info if defined $info; + } + return undef; } @@ -178,6 +183,7 @@ sub downloadBinary { cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); + $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl) unless defined $info; if (defined $info) { my $decompressor; if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; } @@ -186,6 +192,7 @@ sub downloadBinary { print STDERR "unknown compression method ‘$info->{compression}’\n"; next; } + print STDERR "\n*** Downloading ‘$info->{url}’ into ‘$storePath’...\n"; if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; next; @@ -197,6 +204,7 @@ sub downloadBinary { my $hash2 = hashPath("sha256", 1, $storePath); die "hash mismatch in downloaded path ‘$storePath’; expected $hash, got $hash2\n" if $hash ne $hash2; + print STDERR "\n"; return 1; } } -- cgit 1.4.1 From ae60643c15a2eab2cf53230aa7c5fbc8af3430d1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 3 Jul 2012 18:54:46 -0400 Subject: download-from-binary-cache: do negative NAR info caching I.e. if a NAR info file does *not* exist, we record it in the cache DB so that we don't retry it later. --- scripts/download-from-binary-cache.pl.in | 34 +++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 26437b064971..ba8d44fe24d2 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -9,7 +9,7 @@ use DBI; my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} || "")); -my ($dbh, $insertNAR, $queryNAR); +my ($dbh, $insertNAR, $queryNAR, $insertNegativeNAR, $queryNegativeNAR); my %cacheIds; @@ -52,24 +52,48 @@ EOF ); EOF + $dbh->do(<prepare( "insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " . "narSize, refs, deriver, system, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die; $queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die; + + $insertNegativeNAR = $dbh->prepare( + "insert or replace into NegativeNARs(cache, storePath, timestamp) values (?, ?, ?)") or die; + + $queryNegativeNAR = $dbh->prepare("select 1 from NegativeNARs where cache = ? and storePath = ?") or die; } sub getInfoFrom { - my ($storePath, $pathHash, $binaryCacheUrl, $cacheId) = @_; + my ($storePath, $pathHash, $binaryCacheUrl) = @_; + + my $cacheId = getCacheId($binaryCacheUrl); + + # Bail out if there is a negative cache entry. + $queryNegativeNAR->execute($cacheId, basename($storePath)); + return undef if @{$queryNegativeNAR->fetchall_arrayref()} != 0; my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; print STDERR "checking $infoUrl...\n"; my $s = `$Nix::Config::curl --fail --silent --location $infoUrl`; if ($? != 0) { my $status = $? >> 8; - print STDERR "could not download ‘$infoUrl’ (curl returned status ", $? >> 8, ")\n" - if $status != 22 && $status != 37; + if ($status != 22 && $status != 37) { + print STDERR "could not download ‘$infoUrl’ (curl returned status ", $? >> 8, ")\n"; + } else { + $insertNegativeNAR->execute($cacheId, basename($storePath), time()); + } return undef; } @@ -97,7 +121,7 @@ sub getInfoFrom { # Cache the result. $insertNAR->execute( - getCacheId($binaryCacheUrl), basename($storePath), $url, $compression, $fileHash, $fileSize, + $cacheId, basename($storePath), $url, $compression, $fileHash, $fileSize, $narHash, $narSize, join(" ", @refs), $deriver, $system, time()); return -- cgit 1.4.1 From cd94665f38fbadde38d5d8ae5c9c14dff9aea0ac Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 6 Jul 2012 00:30:40 -0400 Subject: download-from-binary-cache: use WWW::Curl Using WWW::Curl rather than running an external curl process for every NAR info file halves the time it takes to get info thanks to libcurl's support for persistent HTTP connections. (We save a roundtrip per file.) But the real gain will come from using parallel and/or pipelined requests. --- scripts/download-from-binary-cache.pl.in | 79 +++++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 11 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index ba8d44fe24d2..ea37c818d311 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -1,10 +1,13 @@ #! @perl@ -w @perlFlags@ -use strict; +use DBI; use File::Basename; +use IO::Select; use Nix::Config; use Nix::Store; -use DBI; +use WWW::Curl::Easy; +use WWW::Curl::Multi; +use strict; my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} || "")); @@ -12,6 +15,58 @@ my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} my ($dbh, $insertNAR, $queryNAR, $insertNegativeNAR, $queryNegativeNAR); my %cacheIds; +my $curlm = WWW::Curl::Multi->new; +my $activeRequests = 0; +my $curlIdCount = 1; +my %curlHandles; +my $caBundle = $ENV{"CURL_CA_BUNDLE"} || $ENV{"OPENSSL_X509_CERT_FILE"}; + + +sub addRequest { + my ($url) = @_; + + my $curl = WWW::Curl::Easy->new; + my $curlId = $curlIdCount++; + $curlHandles{$curlId} = { handle => $curl, content => "" }; + + $curl->setopt(CURLOPT_PRIVATE, $curlId); + $curl->setopt(CURLOPT_URL, $url); + $curl->setopt(CURLOPT_WRITEDATA, \$curlHandles{$curlId}->{content}); + $curl->setopt(CURLOPT_FOLLOWLOCATION, 1); + $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle; + + $curlm->add_handle($curl); + $activeRequests++; + + return $curlHandles{$curlId}; +} + + +sub processRequests { + while ($activeRequests) { + my ($rfds, $wfds, $efds) = $curlm->fdset(); + #print STDERR "R = @{$rfds}, W = @{$wfds}, E = @{$efds}\n"; + + # Sleep until we can read or write some data. + if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) { + IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 0.1); + } + + if ($curlm->perform() != $activeRequests) { + while (my ($id, $result) = $curlm->info_read) { + if ($id) { + my $handle = $curlHandles{$id}->{handle}; + $curlHandles{$id}->{result} = $result; + $curlHandles{$id}->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE); + #print STDERR "\nRequest completed ($id, $result, $curlHandles{$id}->{httpStatus})\n"; + $activeRequests--; + delete $curlHandles{$id}->{handle}; + } + } + } + } +} + sub initCache { my $dbPath = "$Nix::Config::stateDir/binary-cache-v1.sqlite"; @@ -86,11 +141,13 @@ sub getInfoFrom { my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; print STDERR "checking $infoUrl...\n"; - my $s = `$Nix::Config::curl --fail --silent --location $infoUrl`; - if ($? != 0) { - my $status = $? >> 8; - if ($status != 22 && $status != 37) { - print STDERR "could not download ‘$infoUrl’ (curl returned status ", $? >> 8, ")\n"; + my $request = addRequest($infoUrl); + processRequests; + + if ($request->{result} != 0 || $request->{httpStatus} != 200) { + if ($request->{httpStatus} != 404) { + print STDERR "could not download ‘$infoUrl’ (" . + ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { $insertNegativeNAR->execute($cacheId, basename($storePath), time()); } @@ -100,7 +157,7 @@ sub getInfoFrom { my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver, $system); my $compression = "bzip2"; my @refs; - foreach my $line (split "\n", $s) { + foreach my $line (split "\n", $request->{content}) { $line =~ /^(.*): (.*)$/ or return undef; if ($1 eq "StorePath") { $storePath2 = $2; } elsif ($1 eq "URL") { $url = $2; } @@ -248,9 +305,9 @@ if ($ARGV[0] eq "--query") { if ($cmd eq "have") { my $storePath = ; chomp $storePath; # FIXME: want to give correct info here, but it's too slow. - print "0\n"; - #my $info = getInfo($storePath); - #if (defined $info) { print "1\n"; } else { print "0\n"; } + #print "0\n"; + my $info = getInfo($storePath); + if (defined $info) { print "1\n"; } else { print "0\n"; } } elsif ($cmd eq "info") { -- cgit 1.4.1 From 11800e61983677f92fd5a08f51beb9036f947d6e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 6 Jul 2012 19:08:20 -0400 Subject: download-from-binary-cache: parallelise fetching of NAR info files Getting substitute information using the binary cache substituter has non-trivial latency overhead. A package or NixOS system configuration can have hundreds of dependencies, and in the worst case (when the local info cache is empty) we have to do a separate HTTP request for each of these. If the ping time to the server is t, getting N info files will take tN seconds; e.g., with a ping time of 0.1s to nixos.org, sequentially downloading 1000 info files (a typical NixOS config) will take at least 100 seconds. To fix this problem, the binary cache substituter can now perform requests in parallel. This required changing the substituter interface to support a function querySubstitutablePathInfos() that queries multiple paths at the same time, and rewriting queryMissing() to take advantage of parallelism. (Due to local caching, parallelising queryMissing() is sufficient for most use cases, since it's almost always called before building a derivation and thus fills the local info cache.) For example, parallelism speeds up querying all 1056 paths in a particular NixOS system configuration from 116s to 2.6s. It works so well because the eccentricity of the top-level derivation in the dependency graph is only 9. So we only need 10 round-trips (when using an unlimited number of parallel connections) to get everything. Currently we do a maximum of 150 parallel connections to the server. Thus it's important that the binary cache server (e.g. nixos.org) has a high connection limit. Alternatively we could use HTTP pipelining, but WWW::Curl doesn't support it and libcurl has a hard-coded limit of 5 requests per pipeline. --- scripts/download-from-binary-cache.pl.in | 204 +++++++++++++++++++------------ src/libstore/build.cc | 10 +- src/libstore/globals.cc | 5 +- src/libstore/local-store.cc | 60 ++++++--- src/libstore/local-store.hh | 7 +- src/libstore/misc.cc | 106 ++++++++++++---- src/libstore/remote-store.cc | 13 ++ src/libstore/remote-store.hh | 3 + src/libstore/store-api.hh | 5 + 9 files changed, 282 insertions(+), 131 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index ea37c818d311..a67818e7f5b4 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -12,33 +12,40 @@ use strict; my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} || "")); +my $maxParallelRequests = 150; + my ($dbh, $insertNAR, $queryNAR, $insertNegativeNAR, $queryNegativeNAR); my %cacheIds; my $curlm = WWW::Curl::Multi->new; my $activeRequests = 0; my $curlIdCount = 1; -my %curlHandles; +my %requests; +my %scheduled; my $caBundle = $ENV{"CURL_CA_BUNDLE"} || $ENV{"OPENSSL_X509_CERT_FILE"}; sub addRequest { - my ($url) = @_; + my ($storePath, $url) = @_; my $curl = WWW::Curl::Easy->new; my $curlId = $curlIdCount++; - $curlHandles{$curlId} = { handle => $curl, content => "" }; + $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "" }; $curl->setopt(CURLOPT_PRIVATE, $curlId); $curl->setopt(CURLOPT_URL, $url); - $curl->setopt(CURLOPT_WRITEDATA, \$curlHandles{$curlId}->{content}); + $curl->setopt(CURLOPT_WRITEDATA, \$requests{$curlId}->{content}); $curl->setopt(CURLOPT_FOLLOWLOCATION, 1); $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle; - $curlm->add_handle($curl); - $activeRequests++; + if ($activeRequests >= $maxParallelRequests) { + $scheduled{$curlId} = 1; + } else { + $curlm->add_handle($curl); + $activeRequests++; + } - return $curlHandles{$curlId}; + return $requests{$curlId}; } @@ -55,12 +62,20 @@ sub processRequests { if ($curlm->perform() != $activeRequests) { while (my ($id, $result) = $curlm->info_read) { if ($id) { - my $handle = $curlHandles{$id}->{handle}; - $curlHandles{$id}->{result} = $result; - $curlHandles{$id}->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE); - #print STDERR "\nRequest completed ($id, $result, $curlHandles{$id}->{httpStatus})\n"; + my $handle = $requests{$id}->{handle}; + $requests{$id}->{result} = $result; + $requests{$id}->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE); + #print STDERR "\nRequest completed ($id, $result, $requests{$id}->{httpStatus})\n"; $activeRequests--; - delete $curlHandles{$id}->{handle}; + delete $requests{$id}->{handle}; + + if (scalar(keys %scheduled) > 0) { + my $id2 = (keys %scheduled)[0]; + $curlm->add_handle($requests{$id2}->{handle}); + $activeRequests++; + delete $scheduled{$id2}; + } + } } } @@ -130,23 +145,21 @@ EOF } -sub getInfoFrom { - my ($storePath, $pathHash, $binaryCacheUrl) = @_; +sub negativeHit { + my ($storePath, $binaryCacheUrl) = @_; + $queryNegativeNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); + return @{$queryNegativeNAR->fetchall_arrayref()} != 0; +} - my $cacheId = getCacheId($binaryCacheUrl); - # Bail out if there is a negative cache entry. - $queryNegativeNAR->execute($cacheId, basename($storePath)); - return undef if @{$queryNegativeNAR->fetchall_arrayref()} != 0; +sub processNARInfo { + my ($storePath, $binaryCacheUrl, $request) = @_; - my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; - print STDERR "checking $infoUrl...\n"; - my $request = addRequest($infoUrl); - processRequests; + my $cacheId = getCacheId($binaryCacheUrl); if ($request->{result} != 0 || $request->{httpStatus} != 200) { if ($request->{httpStatus} != 404) { - print STDERR "could not download ‘$infoUrl’ (" . + print STDERR "could not download ‘$request->{url}’ (" . ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { $insertNegativeNAR->execute($cacheId, basename($storePath), time()); @@ -172,7 +185,7 @@ sub getInfoFrom { } return undef if $storePath ne $storePath2; if ($storePath ne $storePath2 || !defined $url || !defined $narHash) { - print STDERR "bad NAR info file ‘$infoUrl’\n"; + print STDERR "bad NAR info file ‘$request->{url}’\n"; return undef; } @@ -236,24 +249,65 @@ sub cachedGetInfoFrom { } -sub getInfo { - my ($storePath) = @_; +sub printInfo { + my ($storePath, $info) = @_; + print "$storePath\n"; + print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n"; + print scalar @{$info->{refs}}, "\n"; + print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}}; + print $info->{fileSize} || 0, "\n"; + print $info->{narSize} || 0, "\n"; +} - my $pathHash = substr(basename($storePath), 0, 32); - # First look if we have cached info for one of the URLs. - foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); - return $info if defined $info; +sub printInfoParallel { + my @paths = @_; + + # First print all paths for which we have cached info. + my @left; + foreach my $storePath (@paths) { + my $pathHash = substr(basename($storePath), 0, 32); + my $found = 0; + foreach my $binaryCacheUrl (@binaryCacheUrls) { + my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); + if (defined $info) { + printInfo($storePath, $info); + $found = 1; + last; + } + } + push @left, $storePath if !$found; } - # No, so do an HTTP request until we get a hit. + return if scalar @left == 0; + foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl); - return $info if defined $info; - } - return undef; + my @left2; + %requests = (); + foreach my $storePath (@left) { + my $pathHash = substr(basename($storePath), 0, 32); + if (negativeHit($storePath, $binaryCacheUrl)) { + push @left2, $storePath; + next; + } + my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; + addRequest($storePath, $infoUrl); + } + + processRequests; + + foreach my $request (values %requests) { + my $info = processNARInfo($request->{storePath}, $binaryCacheUrl, $request); + if (defined $info) { + printInfo($request->{storePath}, $info); + } else { + push @left2, $request->{storePath}; + } + } + + @left = @left2; + } } @@ -264,30 +318,37 @@ sub downloadBinary { cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); - $info = getInfoFrom($storePath, $pathHash, $binaryCacheUrl) unless defined $info; - if (defined $info) { - my $decompressor; - if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; } - elsif ($info->{compression} eq "xz") { $decompressor = "$Nix::Config::xz -d"; } - else { - print STDERR "unknown compression method ‘$info->{compression}’\n"; - next; - } - print STDERR "\n*** Downloading ‘$info->{url}’ into ‘$storePath’...\n"; - if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { - die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; - next; - } - # The hash in the manifest can be either in base-16 or - # base-32. Handle both. - $info->{narHash} =~ /^sha256:(.*)$/ or die "invalid hash"; - my $hash = $1; - my $hash2 = hashPath("sha256", 1, $storePath); - die "hash mismatch in downloaded path ‘$storePath’; expected $hash, got $hash2\n" - if $hash ne $hash2; - print STDERR "\n"; - return 1; + + unless (defined $info) { + next if negativeHit($storePath, $binaryCacheUrl); + my $request = addRequest($storePath, "$binaryCacheUrl/$pathHash.narinfo"); + processRequests; + $info = processNARInfo($storePath, $binaryCacheUrl, $request); + } + + next unless defined $info; + + my $decompressor; + if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; } + elsif ($info->{compression} eq "xz") { $decompressor = "$Nix::Config::xz -d"; } + else { + print STDERR "unknown compression method ‘$info->{compression}’\n"; + next; + } + print STDERR "\n*** Downloading ‘$info->{url}’ into ‘$storePath’...\n"; + if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { + die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; + next; } + # The hash in the manifest can be either in base-16 or + # base-32. Handle both. + $info->{narHash} =~ /^sha256:(.*)$/ or die "invalid hash"; + my $hash = $1; + my $hash2 = hashPath("sha256", 1, $storePath); + die "hash mismatch in downloaded path ‘$storePath’; expected $hash, got $hash2\n" + if $hash ne $hash2; + print STDERR "\n"; + return 1; } return 0; @@ -300,29 +361,20 @@ initCache(); if ($ARGV[0] eq "--query") { while () { - my $cmd = $_; chomp $cmd; - + chomp; + my ($cmd, @args) = split " ", $_; + if ($cmd eq "have") { my $storePath = ; chomp $storePath; # FIXME: want to give correct info here, but it's too slow. - #print "0\n"; - my $info = getInfo($storePath); - if (defined $info) { print "1\n"; } else { print "0\n"; } + print "0\n"; + #my $info = getInfo($storePath); + #if (defined $info) { print "1\n"; } else { print "0\n"; } } elsif ($cmd eq "info") { - my $storePath = ; chomp $storePath; - my $info = getInfo($storePath); - if (defined $info) { - print "1\n"; - print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n"; - print scalar @{$info->{refs}}, "\n"; - print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}}; - print $info->{fileSize} || 0, "\n"; - print $info->{narSize} || 0, "\n"; - } else { - print "0\n"; - } + printInfoParallel(@args); + print "\n"; } else { die "unknown command `$cmd'"; } diff --git a/src/libstore/build.cc b/src/libstore/build.cc index d5bbd540b34d..1c84e5b9f92d 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2352,10 +2352,12 @@ void SubstitutionGoal::tryNext() sub = subs.front(); subs.pop_front(); - if (!worker.store.querySubstitutablePathInfo(sub, storePath, info)) { - tryNext(); - return; - } + SubstitutablePathInfos infos; + PathSet dummy(singleton(storePath)); + worker.store.querySubstitutablePathInfos(sub, dummy, infos); + SubstitutablePathInfos::iterator k = infos.find(storePath); + if (k == infos.end()) { tryNext(); return; } + info = k->second; /* To maintain the closure invariant, we first have to realise the paths referenced by this one. */ diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 5c22f1406649..f660ed68df0d 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -155,8 +155,9 @@ void setDefaultsFromEnvironment() string subs = getEnv("NIX_SUBSTITUTERS", "default"); if (subs == "default") { - substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); - substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); + //substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); + //substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); + substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl"); } else substituters = tokenizeString(subs, ":"); diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 1ce62aeafcef..b4ea4b748178 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -936,37 +936,57 @@ bool LocalStore::hasSubstitutes(const Path & path) } -bool LocalStore::querySubstitutablePathInfo(const Path & substituter, - const Path & path, SubstitutablePathInfo & info) +void LocalStore::querySubstitutablePathInfos(const Path & substituter, + PathSet & paths, SubstitutablePathInfos & infos) { RunningSubstituter & run(runningSubstituters[substituter]); startSubstituter(substituter, run); - writeLine(run.to, "info\n" + path); + string s = "info "; + foreach (PathSet::const_iterator, i, paths) + if (infos.find(*i) == infos.end()) { s += *i; s += " "; } + writeLine(run.to, s); - if (!getIntLine(run.from)) return false; - - info.deriver = readLine(run.from); - if (info.deriver != "") assertStorePath(info.deriver); - int nrRefs = getIntLine(run.from); - while (nrRefs--) { - Path p = readLine(run.from); - assertStorePath(p); - info.references.insert(p); + while (true) { + Path path = readLine(run.from); + if (path == "") break; + assert(paths.find(path) != paths.end()); + paths.erase(path); + SubstitutablePathInfo & info(infos[path]); + info.deriver = readLine(run.from); + if (info.deriver != "") assertStorePath(info.deriver); + int nrRefs = getIntLine(run.from); + while (nrRefs--) { + Path p = readLine(run.from); + assertStorePath(p); + info.references.insert(p); + } + info.downloadSize = getIntLine(run.from); + info.narSize = getIntLine(run.from); } - info.downloadSize = getIntLine(run.from); - info.narSize = getIntLine(run.from); - - return true; } bool LocalStore::querySubstitutablePathInfo(const Path & path, SubstitutablePathInfo & info) { - foreach (Paths::iterator, i, substituters) - if (querySubstitutablePathInfo(*i, path, info)) return true; - return false; + SubstitutablePathInfos infos; + querySubstitutablePathInfos(singleton(path), infos); + SubstitutablePathInfos::iterator i = infos.find(path); + if (i == infos.end()) return false; + info = i->second; + return true; +} + + +void LocalStore::querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos) +{ + PathSet todo = paths; + foreach (Paths::iterator, i, substituters) { + if (todo.empty()) break; + querySubstitutablePathInfos(*i, todo, infos); + } } @@ -1110,7 +1130,7 @@ Path LocalStore::addToStore(const Path & _srcPath, method for very large paths, but `copyPath' is mainly used for small files. */ StringSink sink; - if (recursive) + if (recursive) dumpPath(srcPath, sink, filter); else sink.s = readFile(srcPath); diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index aa8e8582fb0d..c4d8be692a36 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -128,8 +128,11 @@ public: bool querySubstitutablePathInfo(const Path & path, SubstitutablePathInfo & info); - bool querySubstitutablePathInfo(const Path & substituter, - const Path & path, SubstitutablePathInfo & info); + void querySubstitutablePathInfos(const Path & substituter, + PathSet & paths, SubstitutablePathInfos & infos); + + void querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos); Path addToStore(const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index 093499936349..aa5f6ff727c9 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -55,45 +55,97 @@ void queryMissing(StoreAPI & store, const PathSet & targets, PathSet todo(targets.begin(), targets.end()), done; + bool useSubstitutes = queryBoolSetting("build-use-substitutes", true); + + /* Getting substitute info has high latency when using the binary + cache substituter. Thus it's essential to do substitute + queries in parallel as much as possible. To accomplish this + we do the following: + + - For all paths still to be processed (‘todo’), we add all + paths for which we need info to the set ‘query’. For an + unbuilt derivation this is the output paths; otherwise, it's + the path itself. + + - We get info about all paths in ‘query’ in parallel. + + - We process the results and add new items to ‘todo’ if + necessary. E.g. if a path is substitutable, then we need to + get info on its references. + + - Repeat until ‘todo’ is empty. + */ + while (!todo.empty()) { - Path p = *(todo.begin()); - todo.erase(p); - if (done.find(p) != done.end()) continue; - done.insert(p); - - if (isDerivation(p)) { - if (!store.isValidPath(p)) { - unknown.insert(p); - continue; + + PathSet query, todoDrv, todoNonDrv; + + foreach (PathSet::iterator, i, todo) { + if (done.find(*i) != done.end()) continue; + done.insert(*i); + + if (isDerivation(*i)) { + if (!store.isValidPath(*i)) { + // FIXME: we could try to substitute p. + unknown.insert(*i); + continue; + } + Derivation drv = derivationFromPath(store, *i); + + PathSet invalid; + foreach (DerivationOutputs::iterator, j, drv.outputs) + if (!store.isValidPath(j->second.path)) invalid.insert(j->second.path); + if (invalid.empty()) continue; + + todoDrv.insert(*i); + if (useSubstitutes) query.insert(invalid.begin(), invalid.end()); + } + + else { + if (store.isValidPath(*i)) continue; + query.insert(*i); + todoNonDrv.insert(*i); } - Derivation drv = derivationFromPath(store, p); + } + + todo.clear(); + + SubstitutablePathInfos infos; + store.querySubstitutablePathInfos(query, infos); + + foreach (PathSet::iterator, i, todoDrv) { + // FIXME: cache this + Derivation drv = derivationFromPath(store, *i); bool mustBuild = false; - foreach (DerivationOutputs::iterator, i, drv.outputs) - if (!store.isValidPath(i->second.path) && - !(queryBoolSetting("build-use-substitutes", true) && store.hasSubstitutes(i->second.path))) - mustBuild = true; + if (useSubstitutes) { + foreach (DerivationOutputs::iterator, j, drv.outputs) + if (!store.isValidPath(j->second.path) && + infos.find(j->second.path) == infos.end()) + mustBuild = true; + } else + mustBuild = true; if (mustBuild) { - willBuild.insert(p); + willBuild.insert(*i); todo.insert(drv.inputSrcs.begin(), drv.inputSrcs.end()); foreach (DerivationInputs::iterator, i, drv.inputDrvs) todo.insert(i->first); - } else + } else foreach (DerivationOutputs::iterator, i, drv.outputs) - todo.insert(i->second.path); + todoNonDrv.insert(i->second.path); } - - else { - if (store.isValidPath(p)) continue; - SubstitutablePathInfo info; - if (store.querySubstitutablePathInfo(p, info)) { - willSubstitute.insert(p); - downloadSize += info.downloadSize; - narSize += info.narSize; - todo.insert(info.references.begin(), info.references.end()); + + foreach (PathSet::iterator, i, todoNonDrv) { + done.insert(*i); + SubstitutablePathInfos::iterator info = infos.find(*i); + if (info != infos.end()) { + willSubstitute.insert(*i); + downloadSize += info->second.downloadSize; + narSize += info->second.narSize; + todo.insert(info->second.references.begin(), info->second.references.end()); } else - unknown.insert(p); + unknown.insert(*i); } } } diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 5e5561a6aecf..1cf67d3731d1 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -256,6 +256,19 @@ bool RemoteStore::querySubstitutablePathInfo(const Path & path, } +void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos) +{ + if (paths.empty()) return; + printMsg(lvlError, format("QUERYING %1% (REMOTE)") % showPaths(paths)); + foreach (PathSet::const_iterator, i, paths) { + SubstitutablePathInfo info; + if (querySubstitutablePathInfo(*i, info)) + infos[*i] = info; + } +} + + ValidPathInfo RemoteStore::queryPathInfo(const Path & path) { openConnection(); diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index e9f40da6dbf4..1056a6115849 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -48,6 +48,9 @@ public: bool querySubstitutablePathInfo(const Path & path, SubstitutablePathInfo & info); + void querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos); + Path addToStore(const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index bf3269f57818..92b2ddb1e730 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -86,6 +86,8 @@ struct SubstitutablePathInfo unsigned long long narSize; /* 0 = unknown */ }; +typedef std::map SubstitutablePathInfos; + struct ValidPathInfo { @@ -147,6 +149,9 @@ public: substitutable path. */ virtual bool querySubstitutablePathInfo(const Path & path, SubstitutablePathInfo & info) = 0; + + virtual void querySubstitutablePathInfos(const PathSet & paths, + SubstitutablePathInfos & infos) = 0; /* Copy the contents of a path to the store and register the validity the resulting path. The resulting path is returned. -- cgit 1.4.1 From 400e556b34ff0563f34b191de5f632dc4377f0cd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 8 Jul 2012 18:39:07 -0400 Subject: Cleanup --- scripts/download-from-binary-cache.pl.in | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index a67818e7f5b4..37f8db0a9905 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -230,7 +230,7 @@ sub getCacheId { sub cachedGetInfoFrom { - my ($storePath, $pathHash, $binaryCacheUrl) = @_; + my ($storePath, $binaryCacheUrl) = @_; $queryNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); my $res = $queryNAR->fetchrow_hashref(); @@ -260,16 +260,22 @@ sub printInfo { } +sub infoUrl { + my ($binaryCacheUrl, $storePath) = @_; + my $pathHash = substr(basename($storePath), 0, 32); + my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; +} + + sub printInfoParallel { my @paths = @_; # First print all paths for which we have cached info. my @left; foreach my $storePath (@paths) { - my $pathHash = substr(basename($storePath), 0, 32); my $found = 0; foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); + my $info = cachedGetInfoFrom($storePath, $binaryCacheUrl); if (defined $info) { printInfo($storePath, $info); $found = 1; @@ -286,13 +292,11 @@ sub printInfoParallel { my @left2; %requests = (); foreach my $storePath (@left) { - my $pathHash = substr(basename($storePath), 0, 32); if (negativeHit($storePath, $binaryCacheUrl)) { push @left2, $storePath; next; } - my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; - addRequest($storePath, $infoUrl); + addRequest($storePath, infoUrl($binaryCacheUrl, $storePath)); } processRequests; @@ -314,14 +318,12 @@ sub printInfoParallel { sub downloadBinary { my ($storePath) = @_; - my $pathHash = substr(basename($storePath), 0, 32); - - cache: foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl); + foreach my $binaryCacheUrl (@binaryCacheUrls) { + my $info = cachedGetInfoFrom($storePath, $binaryCacheUrl); unless (defined $info) { next if negativeHit($storePath, $binaryCacheUrl); - my $request = addRequest($storePath, "$binaryCacheUrl/$pathHash.narinfo"); + my $request = addRequest($storePath, infoUrl($binaryCacheUrl, $storePath)); processRequests; $info = processNARInfo($storePath, $binaryCacheUrl, $request); } @@ -368,8 +370,6 @@ if ($ARGV[0] eq "--query") { my $storePath = ; chomp $storePath; # FIXME: want to give correct info here, but it's too slow. print "0\n"; - #my $info = getInfo($storePath); - #if (defined $info) { print "1\n"; } else { print "0\n"; } } elsif ($cmd eq "info") { -- cgit 1.4.1 From 425cc612ad4835d29bce081a67ad161d06063b51 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 8 Jul 2012 18:39:24 -0400 Subject: build.cc: Don't use hasSubstitute() Instead make a single call to querySubstitutablePathInfo() per derivation output. This is faster and prevents having to implement the "have" function in the binary cache substituter. --- src/libstore/build.cc | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 1c84e5b9f92d..76e77b8f01e4 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -94,7 +94,7 @@ typedef map WeakGoalMap; class Goal : public boost::enable_shared_from_this { public: - typedef enum {ecBusy, ecSuccess, ecFailed} ExitCode; + typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters} ExitCode; protected: @@ -111,6 +111,10 @@ protected: /* Number of goals we are/were waiting for that have failed. */ unsigned int nrFailed; + /* Number of substitution goals we are/were waiting for that + failed because there are no substituters. */ + unsigned int nrNoSubstituters; + /* Name of this goal for debugging purposes. */ string name; @@ -119,7 +123,7 @@ protected: Goal(Worker & worker) : worker(worker) { - nrFailed = 0; + nrFailed = nrNoSubstituters = 0; exitCode = ecBusy; } @@ -306,7 +310,9 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result) trace(format("waitee `%1%' done; %2% left") % waitee->name % waitees.size()); - if (result == ecFailed) ++nrFailed; + if (result == ecFailed || result == ecNoSubstituters) ++nrFailed; + + if (result == ecNoSubstituters) ++nrNoSubstituters; if (waitees.empty() || (result == ecFailed && !keepGoing)) { @@ -330,7 +336,7 @@ void Goal::amDone(ExitCode result) { trace("done"); assert(exitCode == ecBusy); - assert(result == ecSuccess || result == ecFailed); + assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters); exitCode = result; foreach (WeakGoals::iterator, i, waiters) { GoalPtr goal = i->lock(); @@ -736,6 +742,8 @@ HookInstance::~HookInstance() typedef enum {rpAccept, rpDecline, rpPostpone} HookReply; +class SubstitutionGoal; + class DerivationGoal : public Goal { private: @@ -985,10 +993,8 @@ void DerivationGoal::haveDerivation() /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ - foreach (PathSet::iterator, i, invalidOutputs) - /* Don't bother creating a substitution goal if there are no - substitutes. */ - if (queryBoolSetting("build-use-substitutes", true) && worker.store.hasSubstitutes(*i)) + if (queryBoolSetting("build-use-substitutes", true)) + foreach (PathSet::iterator, i, invalidOutputs) addWaitee(worker.makeSubstitutionGoal(*i)); if (waitees.empty()) /* to prevent hang (no wake-up event) */ @@ -1002,10 +1008,10 @@ void DerivationGoal::outputsSubstituted() { trace("all outputs substituted (maybe)"); - if (nrFailed > 0 && !tryFallback) + if (nrFailed > 0 && nrFailed > nrNoSubstituters && !tryFallback) throw Error(format("some substitutes for the outputs of derivation `%1%' failed; try `--fallback'") % drvPath); - nrFailed = 0; + nrFailed = nrNoSubstituters = 0; if (checkPathValidity(false).size() == 0) { amDone(ecSuccess); @@ -2241,6 +2247,9 @@ private: /* The current substituter. */ Path sub; + /* Whether any substituter can realise this path */ + bool hasSubstitute; + /* Path info returned by the substituter's query info operation. */ SubstitutablePathInfo info; @@ -2282,6 +2291,7 @@ public: SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker) : Goal(worker) + , hasSubstitute(false) { this->storePath = storePath; state = &SubstitutionGoal::init; @@ -2345,7 +2355,10 @@ void SubstitutionGoal::tryNext() /* None left. Terminate this goal and let someone else deal with it. */ debug(format("path `%1%' is required, but there is no substituter that can build it") % storePath); - amDone(ecFailed); + /* Hack: don't indicate failure if there were no substituters. + In that case the calling derivation should just do a + build. */ + amDone(hasSubstitute ? ecFailed : ecNoSubstituters); return; } @@ -2358,6 +2371,7 @@ void SubstitutionGoal::tryNext() SubstitutablePathInfos::iterator k = infos.find(storePath); if (k == infos.end()) { tryNext(); return; } info = k->second; + hasSubstitute = true; /* To maintain the closure invariant, we first have to realise the paths referenced by this one. */ -- cgit 1.4.1 From 98a423b75aa9061f0164c316f9d2481ce6d5e2f1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Jul 2012 09:59:34 -0400 Subject: prim_import(): prefetch substitute info in parallel using queryMissing() --- src/libexpr/primops.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 7587dccea4aa..354fc39be073 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -51,6 +51,12 @@ static void prim_import(EvalState & state, Value * * args, Value & v) % path % ctx); if (isDerivation(ctx)) try { + /* For performance, prefetch all substitute info. */ + PathSet willBuild, willSubstitute, unknown; + unsigned long long downloadSize, narSize; + queryMissing(*store, singleton(ctx), + willBuild, willSubstitute, unknown, downloadSize, narSize); + /* !!! If using a substitute, we only need to fetch the selected output of this derivation. */ store->buildPaths(singleton(ctx)); -- cgit 1.4.1 From 099125435fc5ada63365a94ca153c711e706e225 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Jul 2012 10:57:28 -0400 Subject: download-from-binary-cache: add nix.conf options --- doc/manual/conf-file.xml | 20 ++++++++++++++++++++ perl/lib/Nix/Config.pm.in | 6 +++--- scripts/download-from-binary-cache.pl.in | 10 +++++++--- 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml index 25a009de9d45..e2890b1031ed 100644 --- a/doc/manual/conf-file.xml +++ b/doc/manual/conf-file.xml @@ -288,6 +288,26 @@ build-use-chroot = /dev /proc /bin + binary-caches + + A list of URLs of binary caches, separated by + whitespace. It can be overriden through the environment variable + NIX_BINARY_CACHES. The default is + http://nixos.org/binary-cache. + + + + + binary-caches-parallel-connections + + The maximum number of parallel HTTP connections + used by the binary cache substituter to get NAR info files. This + number should be high to minimise latency. It defaults to + 150. + + + + system This option specifies the canonical Nix system diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in index 64aaccd7cd31..57751b6b4057 100644 --- a/perl/lib/Nix/Config.pm.in +++ b/perl/lib/Nix/Config.pm.in @@ -14,16 +14,16 @@ $curl = "@curl@"; $useBindings = "@perlbindings@" eq "yes"; +%config = (); + sub readConfig { - my %config; - my $config = "@sysconfdir@/nix/nix.conf"; + my $config = "$confDir/nix.conf"; return unless -f $config; open CONFIG, "<$config" or die "cannot open `$config'"; while () { /^\s*([\w|-]+)\s*=\s*(.*)$/ or next; $config{$1} = $2; - print "|$1| -> |$2|\n"; } close CONFIG; } diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 37f8db0a9905..f062d17e6e94 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -10,9 +10,13 @@ use WWW::Curl::Multi; use strict; -my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} || "")); +Nix::Config::readConfig; -my $maxParallelRequests = 150; +my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, + ($ENV{"NIX_BINARY_CACHES"} // $Nix::Config::config{"binary-caches"} // "http://nixos.org/binary-cache")); + +my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150); +$maxParallelRequests = 1 if $maxParallelRequests < 1; my ($dbh, $insertNAR, $queryNAR, $insertNegativeNAR, $queryNegativeNAR); my %cacheIds; @@ -22,7 +26,7 @@ my $activeRequests = 0; my $curlIdCount = 1; my %requests; my %scheduled; -my $caBundle = $ENV{"CURL_CA_BUNDLE"} || $ENV{"OPENSSL_X509_CERT_FILE"}; +my $caBundle = $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"}; sub addRequest { -- cgit 1.4.1 From ae72be1b8bf65e6b52bc0c9d534e55a79ca6712b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Jul 2012 13:11:37 -0400 Subject: Add WWW::Curl as a dependency --- configure.ac | 11 +++++++++++ release.nix | 3 +++ 2 files changed, 14 insertions(+) diff --git a/configure.ac b/configure.ac index 28959198d598..53d653a0eed6 100644 --- a/configure.ac +++ b/configure.ac @@ -261,6 +261,10 @@ AC_ARG_WITH(dbd-sqlite, AC_HELP_STRING([--with-dbd-sqlite=PATH], [prefix of the Perl DBD::SQLite library]), perlFlags="$perlFlags -I$withval") +AC_ARG_WITH(www-curl, AC_HELP_STRING([--with-www-curl=PATH], + [prefix of the Perl WWW::Curl library]), + perlFlags="$perlFlags -I$withval") + AC_MSG_CHECKING([whether DBD::SQLite works]) if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then AC_MSG_RESULT(no) @@ -268,6 +272,13 @@ if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then fi AC_MSG_RESULT(yes) +AC_MSG_CHECKING([whether WWW::Curl works]) +if ! $perl $perlFlags -e 'use WWW::Curl;' 2>&5; then + AC_MSG_RESULT(no) + AC_MSG_FAILURE([The Perl module WWW::Curl is missing.]) +fi +AC_MSG_RESULT(yes) + AC_SUBST(perlFlags) diff --git a/release.nix b/release.nix index 5f20920f3c7b..f2d48691d4fe 100644 --- a/release.nix +++ b/release.nix @@ -29,6 +29,7 @@ let --with-xml-flags=--nonet --with-dbi=${perlPackages.DBI}/${perl.libPrefix} --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix} + --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix} ''; postUnpack = '' @@ -77,6 +78,7 @@ let --disable-init-state --with-dbi=${perlPackages.DBI}/${perl.libPrefix} --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix} + --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix} --enable-gc ''; @@ -134,6 +136,7 @@ let --disable-init-state --with-dbi=${perlPackages.DBI}/${perl.libPrefix} --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix} + --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix} ''; dontInstall = false; -- cgit 1.4.1 From 5ee8944155f21a0ab5a100a184163d7bd0e72679 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 10:13:16 -0400 Subject: Cleanup --- scripts/download-from-binary-cache.pl.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index f062d17e6e94..2a70e5193390 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -233,7 +233,7 @@ sub getCacheId { } -sub cachedGetInfoFrom { +sub getCachedInfoFrom { my ($storePath, $binaryCacheUrl) = @_; $queryNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); @@ -279,7 +279,7 @@ sub printInfoParallel { foreach my $storePath (@paths) { my $found = 0; foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = cachedGetInfoFrom($storePath, $binaryCacheUrl); + my $info = getCachedInfoFrom($storePath, $binaryCacheUrl); if (defined $info) { printInfo($storePath, $info); $found = 1; @@ -323,7 +323,7 @@ sub downloadBinary { my ($storePath) = @_; foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = cachedGetInfoFrom($storePath, $binaryCacheUrl); + my $info = getCachedInfoFrom($storePath, $binaryCacheUrl); unless (defined $info) { next if negativeHit($storePath, $binaryCacheUrl); -- cgit 1.4.1 From 6586414bc70c8373faefd49afc5172881f3aad53 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 10:14:06 -0400 Subject: nix-env: Determine which paths have substitutes in parallel --- src/libexpr/get-drvs.hh | 7 ++++++- src/nix-env/nix-env.cc | 19 ++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh index 2d260c57beee..1e5d0a817cca 100644 --- a/src/libexpr/get-drvs.hh +++ b/src/libexpr/get-drvs.hh @@ -32,6 +32,8 @@ private: bool metaInfoRead; MetaInfo meta; + + bool failed; // set if we get an AssertionError public: string name; @@ -41,7 +43,7 @@ public: /* !!! make this private */ Bindings * attrs; - DrvInfo() : metaInfoRead(false), attrs(0) { }; + DrvInfo() : metaInfoRead(false), failed(false), attrs(0) { }; string queryDrvPath(EvalState & state) const; string queryOutPath(EvalState & state) const; @@ -59,6 +61,9 @@ public: } void setMetaInfo(const MetaInfo & meta); + + void setFailed() { failed = true; }; + bool hasFailed() { return failed; }; }; diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 7aa6276e3a0a..2fd4246dd297 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -929,6 +929,21 @@ static void opQuery(Globals & globals, installed.insert(i->queryOutPath(globals.state)); } + + /* Query which paths have substitutes. */ + SubstitutablePathInfos subs; + if (printStatus) { + PathSet paths; + foreach (vector::iterator, i, elems2) + try { + paths.insert(i->queryOutPath(globals.state)); + } catch (AssertionError & e) { + printMsg(lvlTalkative, format("skipping derivation named `%1%' which gives an assertion failure") % i->name); + i->setFailed(); + } + store->querySubstitutablePathInfos(paths, subs); + } + /* Print the desired columns, or XML output. */ Table table; @@ -938,6 +953,8 @@ static void opQuery(Globals & globals, foreach (vector::iterator, i, elems2) { try { + if (i->hasFailed()) continue; + startNest(nest, lvlDebug, format("outputting query result `%1%'") % i->attrPath); if (globals.prebuiltOnly && !isPrebuilt(globals.state, *i)) continue; @@ -949,7 +966,7 @@ static void opQuery(Globals & globals, XMLAttrs attrs; if (printStatus) { - bool hasSubs = store->hasSubstitutes(i->queryOutPath(globals.state)); + bool hasSubs = subs.find(i->queryOutPath(globals.state)) != subs.end(); bool isInstalled = installed.find(i->queryOutPath(globals.state)) != installed.end(); bool isValid = store->isValidPath(i->queryOutPath(globals.state)); if (xmlOutput) { -- cgit 1.4.1 From eb3036da87659fe7cf384c2362e7f7b8b67189a1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 10:43:24 -0400 Subject: Implement querySubstitutablePathInfos() in the daemon Also removed querySubstitutablePathInfo(). --- src/libstore/local-store.cc | 12 --------- src/libstore/local-store.hh | 3 --- src/libstore/remote-store.cc | 60 +++++++++++++++++++++++++---------------- src/libstore/remote-store.hh | 3 --- src/libstore/store-api.hh | 8 +++--- src/libstore/worker-protocol.hh | 3 ++- src/nix-worker/nix-worker.cc | 36 +++++++++++++++++++------ 7 files changed, 70 insertions(+), 55 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index b4ea4b748178..339e507957f6 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -967,18 +967,6 @@ void LocalStore::querySubstitutablePathInfos(const Path & substituter, } -bool LocalStore::querySubstitutablePathInfo(const Path & path, - SubstitutablePathInfo & info) -{ - SubstitutablePathInfos infos; - querySubstitutablePathInfos(singleton(path), infos); - SubstitutablePathInfos::iterator i = infos.find(path); - if (i == infos.end()) return false; - info = i->second; - return true; -} - - void LocalStore::querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) { diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index c4d8be692a36..78217fb71f04 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -125,9 +125,6 @@ public: bool hasSubstitutes(const Path & path); - bool querySubstitutablePathInfo(const Path & path, - SubstitutablePathInfo & info); - void querySubstitutablePathInfos(const Path & substituter, PathSet & paths, SubstitutablePathInfos & infos); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 1cf67d3731d1..9579481c7eb5 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -237,34 +237,48 @@ bool RemoteStore::hasSubstitutes(const Path & path) } -bool RemoteStore::querySubstitutablePathInfo(const Path & path, - SubstitutablePathInfo & info) -{ - openConnection(); - if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return false; - writeInt(wopQuerySubstitutablePathInfo, to); - writeString(path, to); - processStderr(); - unsigned int reply = readInt(from); - if (reply == 0) return false; - info.deriver = readString(from); - if (info.deriver != "") assertStorePath(info.deriver); - info.references = readStorePaths(from); - info.downloadSize = readLongLong(from); - info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; - return true; -} - - void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) { if (paths.empty()) return; - printMsg(lvlError, format("QUERYING %1% (REMOTE)") % showPaths(paths)); - foreach (PathSet::const_iterator, i, paths) { - SubstitutablePathInfo info; - if (querySubstitutablePathInfo(*i, info)) + + openConnection(); + + if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return; + + if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + + foreach (PathSet::const_iterator, i, paths) { + SubstitutablePathInfo info; + writeInt(wopQuerySubstitutablePathInfo, to); + writeString(*i, to); + processStderr(); + unsigned int reply = readInt(from); + if (reply == 0) continue; + info.deriver = readString(from); + if (info.deriver != "") assertStorePath(info.deriver); + info.references = readStorePaths(from); + info.downloadSize = readLongLong(from); + info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; infos[*i] = info; + } + + } else { + + writeInt(wopQuerySubstitutablePathInfos, to); + writeStrings(paths, to); + processStderr(); + unsigned int count = readInt(from); + for (unsigned int n = 0; n < count; n++) { + Path path = readStorePath(from); + SubstitutablePathInfo & info(infos[path]); + info.deriver = readString(from); + if (info.deriver != "") assertStorePath(info.deriver); + info.references = readStorePaths(from); + info.downloadSize = readLongLong(from); + info.narSize = readLongLong(from); + } + } } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 1056a6115849..c1ac2d05a1f5 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -45,9 +45,6 @@ public: bool hasSubstitutes(const Path & path); - bool querySubstitutablePathInfo(const Path & path, - SubstitutablePathInfo & info); - void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 92b2ddb1e730..37b44d4dac7d 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -145,11 +145,9 @@ public: /* Query whether a path has substitutes. */ virtual bool hasSubstitutes(const Path & path) = 0; - /* Query the references, deriver and download size of a - substitutable path. */ - virtual bool querySubstitutablePathInfo(const Path & path, - SubstitutablePathInfo & info) = 0; - + /* Query substitute info (i.e. references, derivers and download + sizes) of a set of paths. If a path does not have substitute + info, it's omitted from the resulting ‘infos’ map. */ virtual void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) = 0; diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 6a5f0ed40d46..76721d1fc3be 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -8,7 +8,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x10b +#define PROTOCOL_VERSION 0x10c #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) @@ -40,6 +40,7 @@ typedef enum { wopQueryPathInfo = 26, wopImportPaths = 27, wopQueryDerivationOutputNames = 28, + wopQuerySubstitutablePathInfos = 29, } WorkerOp; diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 2f0a2ab209c5..c4a42de12fe1 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -529,16 +529,36 @@ static void performOp(unsigned int clientVersion, case wopQuerySubstitutablePathInfo: { Path path = absPath(readString(from)); startWork(); - SubstitutablePathInfo info; - bool res = store->querySubstitutablePathInfo(path, info); + SubstitutablePathInfos infos; + store->querySubstitutablePathInfos(singleton(path), infos); stopWork(); - writeInt(res ? 1 : 0, to); - if (res) { - writeString(info.deriver, to); - writeStrings(info.references, to); - writeLongLong(info.downloadSize, to); + SubstitutablePathInfos::iterator i = infos.find(path); + if (i == infos.end()) + writeInt(0, to); + else { + writeInt(1, to); + writeString(i->second.deriver, to); + writeStrings(i->second.references, to); + writeLongLong(i->second.downloadSize, to); if (GET_PROTOCOL_MINOR(clientVersion) >= 7) - writeLongLong(info.narSize, to); + writeLongLong(i->second.narSize, to); + } + break; + } + + case wopQuerySubstitutablePathInfos: { + PathSet paths = readStorePaths(from); + startWork(); + SubstitutablePathInfos infos; + store->querySubstitutablePathInfos(paths, infos); + stopWork(); + writeInt(infos.size(), to); + foreach (SubstitutablePathInfos::iterator, i, infos) { + writeString(i->first, to); + writeString(i->second.deriver, to); + writeStrings(i->second.references, to); + writeLongLong(i->second.downloadSize, to); + writeLongLong(i->second.narSize, to); } break; } -- cgit 1.4.1 From 667d5f1936616dc829f9f92f8e5d5141ba5285a7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 10:49:04 -0400 Subject: Rename queryValidPaths() to queryAllValidPaths() --- src/libstore/local-store.cc | 4 ++-- src/libstore/local-store.hh | 2 +- src/libstore/optimise-store.cc | 2 +- src/libstore/remote-store.cc | 4 ++-- src/libstore/remote-store.hh | 2 +- src/libstore/store-api.hh | 12 ++++++------ src/libstore/worker-protocol.hh | 2 +- src/nix-store/nix-store.cc | 2 +- src/nix-worker/nix-worker.cc | 4 ++-- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 339e507957f6..89c5279b1b6e 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -744,7 +744,7 @@ bool LocalStore::isValidPath(const Path & path) } -PathSet LocalStore::queryValidPaths() +PathSet LocalStore::queryAllValidPaths() { SQLiteStmt stmt; stmt.create(db, "select path from ValidPaths"); @@ -1449,7 +1449,7 @@ void LocalStore::verifyStore(bool checkContents) /* Check whether all valid paths actually exist. */ printMsg(lvlInfo, "checking path existence..."); - PathSet validPaths2 = queryValidPaths(), validPaths, done; + PathSet validPaths2 = queryAllValidPaths(), validPaths, done; foreach (PathSet::iterator, i, validPaths2) verifyPath(*i, store, done, validPaths); diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 78217fb71f04..d24c2da0ee17 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -99,7 +99,7 @@ public: bool isValidPath(const Path & path); - PathSet queryValidPaths(); + PathSet queryAllValidPaths(); ValidPathInfo queryPathInfo(const Path & path); diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 2ca98f46ddf4..a486e66ef59e 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -178,7 +178,7 @@ void LocalStore::optimiseStore(bool dryRun, OptimiseStats & stats) { HashToPath hashToPath; - PathSet paths = queryValidPaths(); + PathSet paths = queryAllValidPaths(); foreach (PathSet::iterator, i, paths) { addTempRoot(*i); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 9579481c7eb5..04a9e28c9007 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -217,10 +217,10 @@ bool RemoteStore::isValidPath(const Path & path) } -PathSet RemoteStore::queryValidPaths() +PathSet RemoteStore::queryAllValidPaths() { openConnection(); - writeInt(wopQueryValidPaths, to); + writeInt(wopQueryAllValidPaths, to); processStderr(); return readStorePaths(from); } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index c1ac2d05a1f5..6e92498377dd 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -27,7 +27,7 @@ public: bool isValidPath(const Path & path); - PathSet queryValidPaths(); + PathSet queryAllValidPaths(); ValidPathInfo queryPathInfo(const Path & path); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 37b44d4dac7d..802591766af7 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -110,20 +110,20 @@ public: virtual ~StoreAPI() { } - /* Checks whether a path is valid. */ + /* Check whether a path is valid. */ virtual bool isValidPath(const Path & path) = 0; - /* Query the set of valid paths. */ - virtual PathSet queryValidPaths() = 0; + /* Query the set of all valid paths. */ + virtual PathSet queryAllValidPaths() = 0; /* Query information about a valid path. */ virtual ValidPathInfo queryPathInfo(const Path & path) = 0; - /* Queries the hash of a valid path. */ + /* Query the hash of a valid path. */ virtual Hash queryPathHash(const Path & path) = 0; - /* Queries the set of outgoing FS references for a store path. - The result is not cleared. */ + /* Query the set of outgoing FS references for a store path. The + result is not cleared. */ virtual void queryReferences(const Path & path, PathSet & references) = 0; diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 76721d1fc3be..cacd56f14edd 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -34,7 +34,7 @@ typedef enum { wopCollectGarbage = 20, wopQuerySubstitutablePathInfo = 21, wopQueryDerivationOutputs = 22, - wopQueryValidPaths = 23, + wopQueryAllValidPaths = 23, wopQueryFailedPaths = 24, wopClearFailedPaths = 25, wopQueryPathInfo = 26, diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 23863525fe8a..47c76693f1dc 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -465,7 +465,7 @@ static void opDumpDB(Strings opFlags, Strings opArgs) if (!opFlags.empty()) throw UsageError("unknown flag"); if (!opArgs.empty()) throw UsageError("no arguments expected"); - PathSet validPaths = store->queryValidPaths(); + PathSet validPaths = store->queryAllValidPaths(); foreach (PathSet::iterator, i, validPaths) cout << store->makeValidityRegistration(singleton(*i), true, true); } diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index c4a42de12fe1..e786318ad45f 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -563,9 +563,9 @@ static void performOp(unsigned int clientVersion, break; } - case wopQueryValidPaths: { + case wopQueryAllValidPaths: { startWork(); - PathSet paths = store->queryValidPaths(); + PathSet paths = store->queryAllValidPaths(); stopWork(); writeStrings(paths, to); break; -- cgit 1.4.1 From 58ef4d9a95584fb89ebcf6222fbac6e698aa6b0b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 11:08:47 -0400 Subject: Add a function queryValidPaths() queryValidPaths() combines multiple calls to isValidPath() in one. This matters when using the Nix daemon because it reduces latency. For instance, on "nix-env -qas \*" it reduces execution time from 5.7s to 4.7s (which is indistinguishable from the non-daemon case). --- src/libstore/local-store.cc | 9 +++++++++ src/libstore/local-store.hh | 2 ++ src/libstore/remote-store.cc | 17 +++++++++++++++++ src/libstore/remote-store.hh | 2 ++ src/libstore/store-api.hh | 3 +++ src/libstore/worker-protocol.hh | 1 + src/nix-env/nix-env.cc | 9 ++++++--- src/nix-worker/nix-worker.cc | 9 +++++++++ 8 files changed, 49 insertions(+), 3 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 89c5279b1b6e..e3d23fdfbcb7 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -744,6 +744,15 @@ bool LocalStore::isValidPath(const Path & path) } +PathSet LocalStore::queryValidPaths(const PathSet & paths) +{ + PathSet res; + foreach (PathSet::const_iterator, i, paths) + if (isValidPath(*i)) res.insert(*i); + return res; +} + + PathSet LocalStore::queryAllValidPaths() { SQLiteStmt stmt; diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index d24c2da0ee17..7398c1b9e5e0 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -99,6 +99,8 @@ public: bool isValidPath(const Path & path); + PathSet queryValidPaths(const PathSet & paths); + PathSet queryAllValidPaths(); ValidPathInfo queryPathInfo(const Path & path); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 04a9e28c9007..0cd29c5751d2 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -217,6 +217,23 @@ bool RemoteStore::isValidPath(const Path & path) } +PathSet RemoteStore::queryValidPaths(const PathSet & paths) +{ + if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + PathSet res; + foreach (PathSet::const_iterator, i, paths) + if (isValidPath(*i)) res.insert(*i); + return res; + } else { + openConnection(); + writeInt(wopQueryValidPaths, to); + writeStrings(paths, to); + processStderr(); + return readStorePaths(from); + } +} + + PathSet RemoteStore::queryAllValidPaths() { openConnection(); diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 6e92498377dd..2668fe25689f 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -27,6 +27,8 @@ public: bool isValidPath(const Path & path); + PathSet queryValidPaths(const PathSet & paths); + PathSet queryAllValidPaths(); ValidPathInfo queryPathInfo(const Path & path); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 802591766af7..13dcd9269d3a 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -113,6 +113,9 @@ public: /* Check whether a path is valid. */ virtual bool isValidPath(const Path & path) = 0; + /* Query which of the given paths is valid. */ + virtual PathSet queryValidPaths(const PathSet & paths) = 0; + /* Query the set of all valid paths. */ virtual PathSet queryAllValidPaths() = 0; diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index cacd56f14edd..b34ad484613d 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -41,6 +41,7 @@ typedef enum { wopImportPaths = 27, wopQueryDerivationOutputNames = 28, wopQuerySubstitutablePathInfos = 29, + wopQueryValidPaths = 30, } WorkerOp; diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 2fd4246dd297..91b82c0d09e6 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -932,6 +932,7 @@ static void opQuery(Globals & globals, /* Query which paths have substitutes. */ SubstitutablePathInfos subs; + PathSet validPaths; if (printStatus) { PathSet paths; foreach (vector::iterator, i, elems2) @@ -941,6 +942,7 @@ static void opQuery(Globals & globals, printMsg(lvlTalkative, format("skipping derivation named `%1%' which gives an assertion failure") % i->name); i->setFailed(); } + validPaths = store->queryValidPaths(paths); store->querySubstitutablePathInfos(paths, subs); } @@ -966,9 +968,10 @@ static void opQuery(Globals & globals, XMLAttrs attrs; if (printStatus) { - bool hasSubs = subs.find(i->queryOutPath(globals.state)) != subs.end(); - bool isInstalled = installed.find(i->queryOutPath(globals.state)) != installed.end(); - bool isValid = store->isValidPath(i->queryOutPath(globals.state)); + Path outPath = i->queryOutPath(globals.state); + bool hasSubs = subs.find(outPath) != subs.end(); + bool isInstalled = installed.find(outPath) != installed.end(); + bool isValid = validPaths.find(outPath) != validPaths.end(); if (xmlOutput) { attrs["installed"] = isInstalled ? "1" : "0"; attrs["valid"] = isValid ? "1" : "0"; diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index e786318ad45f..4d22f788595c 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -297,6 +297,15 @@ static void performOp(unsigned int clientVersion, break; } + case wopQueryValidPaths: { + PathSet paths = readStorePaths(from); + startWork(); + PathSet res = store->queryValidPaths(paths); + stopWork(); + writeStrings(res, to); + break; + } + case wopHasSubstitutes: { Path path = readStorePath(from); startWork(); -- cgit 1.4.1 From 09a6321aeb7393cdb4b5af62d2e4106d83124fdf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 17:52:18 -0400 Subject: Replace hasSubstitutes() with querySubstitutablePaths() querySubstitutablePaths() takes a set of paths, so this greatly reduces daemon <-> client latency. --- src/libstore/local-store.cc | 18 +++++++++++++----- src/libstore/local-store.hh | 4 +--- src/libstore/remote-store.cc | 24 +++++++++++++++++------- src/libstore/remote-store.hh | 2 +- src/libstore/store-api.hh | 4 ++-- src/libstore/worker-protocol.hh | 1 + src/nix-env/nix-env.cc | 10 ++++++---- src/nix-worker/nix-worker.cc | 13 +++++++++++-- 8 files changed, 52 insertions(+), 24 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index e3d23fdfbcb7..6e4cd053c859 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -932,16 +932,24 @@ template T getIntLine(int fd) } -bool LocalStore::hasSubstitutes(const Path & path) +PathSet LocalStore::querySubstitutablePaths(const PathSet & paths) { + PathSet res; foreach (Paths::iterator, i, substituters) { + if (res.size() == paths.size()) break; RunningSubstituter & run(runningSubstituters[*i]); startSubstituter(*i, run); - writeLine(run.to, "have\n" + path); - if (getIntLine(run.from)) return true; + string s = "have "; + foreach (PathSet::const_iterator, i, paths) + if (res.find(*i) == res.end()) { s += *i; s += " "; } + writeLine(run.to, s); + while (true) { + Path path = readLine(run.from); + if (path == "") break; + res.insert(path); + } } - - return false; + return res; } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 7398c1b9e5e0..3281a9106e9c 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -123,9 +123,7 @@ public: StringSet queryDerivationOutputNames(const Path & path); - PathSet querySubstitutablePaths(); - - bool hasSubstitutes(const Path & path); + PathSet querySubstitutablePaths(const PathSet & paths); void querySubstitutablePathInfos(const Path & substituter, PathSet & paths, SubstitutablePathInfos & infos); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 0cd29c5751d2..2232720c2bfd 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -219,13 +219,13 @@ bool RemoteStore::isValidPath(const Path & path) PathSet RemoteStore::queryValidPaths(const PathSet & paths) { + openConnection(); if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { PathSet res; foreach (PathSet::const_iterator, i, paths) if (isValidPath(*i)) res.insert(*i); return res; } else { - openConnection(); writeInt(wopQueryValidPaths, to); writeStrings(paths, to); processStderr(); @@ -243,14 +243,24 @@ PathSet RemoteStore::queryAllValidPaths() } -bool RemoteStore::hasSubstitutes(const Path & path) +PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths) { openConnection(); - writeInt(wopHasSubstitutes, to); - writeString(path, to); - processStderr(); - unsigned int reply = readInt(from); - return reply != 0; + if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { + PathSet res; + foreach (PathSet::const_iterator, i, paths) { + writeInt(wopHasSubstitutes, to); + writeString(*i, to); + processStderr(); + if (readInt(from)) res.insert(*i); + } + return res; + } else { + writeInt(wopQuerySubstitutablePaths, to); + writeStrings(paths, to); + processStderr(); + return readStorePaths(from); + } } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 2668fe25689f..5b007be483a2 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -45,7 +45,7 @@ public: StringSet queryDerivationOutputNames(const Path & path); - bool hasSubstitutes(const Path & path); + PathSet querySubstitutablePaths(const PathSet & paths); void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 13dcd9269d3a..e7963d053f31 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -145,8 +145,8 @@ public: /* Query the output names of the derivation denoted by `path'. */ virtual StringSet queryDerivationOutputNames(const Path & path) = 0; - /* Query whether a path has substitutes. */ - virtual bool hasSubstitutes(const Path & path) = 0; + /* Query which of the given paths have substitutes. */ + virtual PathSet querySubstitutablePaths(const PathSet & paths) = 0; /* Query substitute info (i.e. references, derivers and download sizes) of a set of paths. If a path does not have substitute diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index b34ad484613d..6011ec211df9 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -42,6 +42,7 @@ typedef enum { wopQueryDerivationOutputNames = 28, wopQuerySubstitutablePathInfos = 29, wopQueryValidPaths = 30, + wopQuerySubstitutablePaths = 31, } WorkerOp; diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 91b82c0d09e6..f06f23dad523 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -211,9 +211,12 @@ static int comparePriorities(EvalState & state, static bool isPrebuilt(EvalState & state, const DrvInfo & elem) { + assert(false); +#if 0 return store->isValidPath(elem.queryOutPath(state)) || store->hasSubstitutes(elem.queryOutPath(state)); +#endif } @@ -931,8 +934,7 @@ static void opQuery(Globals & globals, /* Query which paths have substitutes. */ - SubstitutablePathInfos subs; - PathSet validPaths; + PathSet validPaths, substitutablePaths; if (printStatus) { PathSet paths; foreach (vector::iterator, i, elems2) @@ -943,7 +945,7 @@ static void opQuery(Globals & globals, i->setFailed(); } validPaths = store->queryValidPaths(paths); - store->querySubstitutablePathInfos(paths, subs); + substitutablePaths = store->querySubstitutablePaths(paths); } @@ -969,7 +971,7 @@ static void opQuery(Globals & globals, if (printStatus) { Path outPath = i->queryOutPath(globals.state); - bool hasSubs = subs.find(outPath) != subs.end(); + bool hasSubs = substitutablePaths.find(outPath) != substitutablePaths.end(); bool isInstalled = installed.find(outPath) != installed.end(); bool isValid = validPaths.find(outPath) != validPaths.end(); if (xmlOutput) { diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 4d22f788595c..d3907fa8f125 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -309,12 +309,21 @@ static void performOp(unsigned int clientVersion, case wopHasSubstitutes: { Path path = readStorePath(from); startWork(); - bool result = store->hasSubstitutes(path); + PathSet res = store->querySubstitutablePaths(singleton(path)); stopWork(); - writeInt(result, to); + writeInt(res.find(path) != res.end(), to); break; } + case wopQuerySubstitutablePaths: { + PathSet paths = readStorePaths(from); + startWork(); + PathSet res = store->querySubstitutablePaths(paths); + stopWork(); + writeStrings(res, to); + break; + } + case wopQueryPathHash: { Path path = readStorePath(from); startWork(); -- cgit 1.4.1 From b74d92755d1ca6a1538f292dcb5a906f66af7b51 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 17:53:20 -0400 Subject: download-from-binary-cache: Use HEAD requests if possible In "nix-env -qas", we don't need the substitute info, we just need to know if it exists. This can be done using a HTTP HEAD request, which saves bandwidth. Note however that curl currently has a bug that prevents it from reusing HTTP connections if HEAD requests return a 404: https://sourceforge.net/tracker/?func=detail&aid=3542731&group_id=976&atid=100976 Without the patch attached to the issue, using HEAD is actually quite a bit slower than GET. --- scripts/download-from-binary-cache.pl.in | 91 +++++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 12 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 2a70e5193390..93155e0b562f 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -18,7 +18,7 @@ my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150); $maxParallelRequests = 1 if $maxParallelRequests < 1; -my ($dbh, $insertNAR, $queryNAR, $insertNegativeNAR, $queryNegativeNAR); +my ($dbh, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence); my %cacheIds; my $curlm = WWW::Curl::Multi->new; @@ -30,7 +30,7 @@ my $caBundle = $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"}; sub addRequest { - my ($storePath, $url) = @_; + my ($storePath, $url, $head) = @_; my $curl = WWW::Curl::Easy->new; my $curlId = $curlIdCount++; @@ -41,6 +41,7 @@ sub addRequest { $curl->setopt(CURLOPT_WRITEDATA, \$requests{$curlId}->{content}); $curl->setopt(CURLOPT_FOLLOWLOCATION, 1); $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle; + $curl->setopt(CURLOPT_NOBODY, 1) if $head; if ($activeRequests >= $maxParallelRequests) { $scheduled{$curlId} = 1; @@ -127,9 +128,10 @@ EOF EOF $dbh->do(<prepare("select * from NARs where cache = ? and storePath = ?") or die; - $insertNegativeNAR = $dbh->prepare( - "insert or replace into NegativeNARs(cache, storePath, timestamp) values (?, ?, ?)") or die; + $insertNARExistence = $dbh->prepare( + "insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)") or die; - $queryNegativeNAR = $dbh->prepare("select 1 from NegativeNARs where cache = ? and storePath = ?") or die; + $queryNARExistence = $dbh->prepare("select exist from NARExistence where cache = ? and storePath = ?") or die; } + sub negativeHit { my ($storePath, $binaryCacheUrl) = @_; - $queryNegativeNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); - return @{$queryNegativeNAR->fetchall_arrayref()} != 0; + $queryNARExistence->execute(getCacheId($binaryCacheUrl), basename($storePath)); + my $res = $queryNARExistence->fetchrow_hashref(); + return defined $res && $res->{exist} == 0; +} + + +sub positiveHit { + my ($storePath, $binaryCacheUrl) = @_; + return 1 if defined getCachedInfoFrom($storePath, $binaryCacheUrl); + $queryNARExistence->execute(getCacheId($binaryCacheUrl), basename($storePath)); + my $res = $queryNARExistence->fetchrow_hashref(); + return defined $res && $res->{exist} == 1; } @@ -166,7 +179,7 @@ sub processNARInfo { print STDERR "could not download ‘$request->{url}’ (" . ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { - $insertNegativeNAR->execute($cacheId, basename($storePath), time()); + $insertNARExistence->execute($cacheId, basename($storePath), 0, time()); } return undef; } @@ -319,6 +332,61 @@ sub printInfoParallel { } +sub printSubstitutablePaths { + my @paths = @_; + + # First look for paths that have cached info. + my @left; + foreach my $storePath (@paths) { + my $found = 0; + foreach my $binaryCacheUrl (@binaryCacheUrls) { + if (positiveHit($storePath, $binaryCacheUrl)) { + print "$storePath\n"; + $found = 1; + last; + } + } + push @left, $storePath if !$found; + } + + return if scalar @left == 0; + + # For remaining paths, do HEAD requests. + foreach my $binaryCacheUrl (@binaryCacheUrls) { + my $cacheId = getCacheId($binaryCacheUrl); + + my @left2; + %requests = (); + foreach my $storePath (@left) { + if (negativeHit($storePath, $binaryCacheUrl)) { + push @left2, $storePath; + next; + } + addRequest($storePath, infoUrl($binaryCacheUrl, $storePath), 1); + } + + processRequests; + + foreach my $request (values %requests) { + if ($request->{result} != 0 || $request->{httpStatus} != 200) { + if ($request->{httpStatus} != 404) { + print STDERR "could not check ‘$request->{url}’ (" . + ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; + } else { + $insertNARExistence->execute($cacheId, basename($request->{storePath}), 0, time()); + } + push @left2, $request->{storePath}; + } else { + $insertNARExistence->execute($cacheId, basename($request->{storePath}), 1, time()); + print "$request->{storePath}\n"; + } + } + + @left = @left2; + } +} + + sub downloadBinary { my ($storePath) = @_; @@ -371,9 +439,8 @@ if ($ARGV[0] eq "--query") { my ($cmd, @args) = split " ", $_; if ($cmd eq "have") { - my $storePath = ; chomp $storePath; - # FIXME: want to give correct info here, but it's too slow. - print "0\n"; + printSubstitutablePaths(@args); + print "\n"; } elsif ($cmd eq "info") { -- cgit 1.4.1 From d287b62b6432ce3155e963c6471edf79ec70439a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 18:05:30 -0400 Subject: Set the User-Agent header to "Nix/" --- perl/lib/Nix/Config.pm.in | 2 ++ scripts/download-from-binary-cache.pl.in | 1 + 2 files changed, 3 insertions(+) diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in index 57751b6b4057..ed197821e89f 100644 --- a/perl/lib/Nix/Config.pm.in +++ b/perl/lib/Nix/Config.pm.in @@ -1,5 +1,7 @@ package Nix::Config; +$version = "@version@"; + $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@"; $libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@"; $stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix"; diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 93155e0b562f..ca3ff807be84 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -41,6 +41,7 @@ sub addRequest { $curl->setopt(CURLOPT_WRITEDATA, \$requests{$curlId}->{content}); $curl->setopt(CURLOPT_FOLLOWLOCATION, 1); $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle; + $curl->setopt(CURLOPT_USERAGENT, "Nix/$Nix::Config::version"); $curl->setopt(CURLOPT_NOBODY, 1) if $head; if ($activeRequests >= $maxParallelRequests) { -- cgit 1.4.1 From 15c15da482eb30f95f4dab04b582a45edc10815b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 18:07:41 -0400 Subject: Add some missing --version switches --- scripts/nix-build.in | 5 +++++ scripts/nix-channel.in | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/scripts/nix-build.in b/scripts/nix-build.in index 35b186bb715d..13404f1dcb53 100755 --- a/scripts/nix-build.in +++ b/scripts/nix-build.in @@ -58,6 +58,11 @@ EOF # '` hack } + elsif ($arg eq "--version") { + print "nix-build (Nix) $Nix::Config::version\n"; + exit 0; + } + elsif ($arg eq "--add-drv-link") { $drvLink = "./derivation"; } diff --git a/scripts/nix-channel.in b/scripts/nix-channel.in index 6883ffd18db2..e7a4b0900e86 100755 --- a/scripts/nix-channel.in +++ b/scripts/nix-channel.in @@ -194,6 +194,11 @@ while (scalar @ARGV) { usageError; } + elsif ($arg eq "--version") { + print "nix-channel (Nix) $Nix::Config::version\n"; + exit 0; + } + else { die "unknown argument `$arg'; try `--help'"; } -- cgit 1.4.1 From f2bdc87595376efb2d05a8555b0686922a298929 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 18:52:09 -0400 Subject: Update the other substituters --- scripts/copy-from-other-stores.pl.in | 61 +++++++++++++------------- scripts/download-using-manifests.pl.in | 78 +++++++++++++++++----------------- src/libstore/globals.cc | 4 +- 3 files changed, 72 insertions(+), 71 deletions(-) diff --git a/scripts/copy-from-other-stores.pl.in b/scripts/copy-from-other-stores.pl.in index b930b720725e..92869ee7a107 100755 --- a/scripts/copy-from-other-stores.pl.in +++ b/scripts/copy-from-other-stores.pl.in @@ -36,42 +36,45 @@ sub findStorePath { if ($ARGV[0] eq "--query") { while () { - my $cmd = $_; chomp $cmd; + chomp; + my ($cmd, @args) = split " ", $_; if ($cmd eq "have") { - my $storePath = ; chomp $storePath; - print STDOUT (defined findStorePath($storePath) ? "1\n" : "0\n"); + foreach my $storePath (@args) { + print "$storePath\n" if defined findStorePath($storePath); + } + print "\n"; } elsif ($cmd eq "info") { - my $storePath = ; chomp $storePath; - my ($store, $sourcePath) = findStorePath($storePath); - if (!defined $store) { - print "0\n"; - next; # not an error - } - print "1\n"; + foreach my $storePath (@args) { + my ($store, $sourcePath) = findStorePath($storePath); + next unless defined $store; - $ENV{"NIX_DB_DIR"} = "$store/var/nix/db"; + $ENV{"NIX_DB_DIR"} = "$store/var/nix/db"; - my $deriver = `@bindir@/nix-store --query --deriver $storePath`; - die "cannot query deriver of `$storePath'" if $? != 0; - chomp $deriver; - $deriver = "" if $deriver eq "unknown-deriver"; - - my @references = split "\n", - `@bindir@/nix-store --query --references $storePath`; - die "cannot query references of `$storePath'" if $? != 0; - - my $narSize = `@bindir@/nix-store --query --size $storePath`; - die "cannot query size of `$storePath'" if $? != 0; - chomp $narSize; - - print "$deriver\n"; - print scalar @references, "\n"; - print "$_\n" foreach @references; - print "$narSize\n"; - print "$narSize\n"; + my $deriver = `@bindir@/nix-store --query --deriver $storePath`; + die "cannot query deriver of `$storePath'" if $? != 0; + chomp $deriver; + $deriver = "" if $deriver eq "unknown-deriver"; + + my @references = split "\n", + `@bindir@/nix-store --query --references $storePath`; + die "cannot query references of `$storePath'" if $? != 0; + + my $narSize = `@bindir@/nix-store --query --size $storePath`; + die "cannot query size of `$storePath'" if $? != 0; + chomp $narSize; + + print "$storePath\n"; + print "$deriver\n"; + print scalar @references, "\n"; + print "$_\n" foreach @references; + print "$narSize\n"; + print "$narSize\n"; + } + + print "\n"; } else { die "unknown command `$cmd'"; } diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in index ef663dabb1ef..f00debc68546 100755 --- a/scripts/download-using-manifests.pl.in +++ b/scripts/download-using-manifests.pl.in @@ -173,56 +173,54 @@ sub computeSmallestDownload { if ($ARGV[0] eq "--query") { while () { - my $cmd = $_; chomp $cmd; + chomp; + my ($cmd, @args) = split " ", $_; if ($cmd eq "have") { - my $storePath = ; chomp $storePath; - print STDOUT ( - scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0 - ? "1\n" : "0\n"); + foreach my $storePath (@args) { + print "$storePath\n" if scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0; + } + print "\n"; } elsif ($cmd eq "info") { - my $storePath = ; chomp $storePath; + foreach my $storePath (@args) { - my $infos = $dbh->selectall_arrayref( - "select * from NARs where storePath = ?", - { Slice => {} }, $storePath); + my $infos = $dbh->selectall_arrayref( + "select * from NARs where storePath = ?", + { Slice => {} }, $storePath); - my $info; - if (scalar @{$infos} > 0) { - $info = @{$infos}[0]; - } - else { - print "0\n"; - next; # not an error - } - - print "1\n"; - print "$info->{deriver}\n"; - my @references = split " ", $info->{refs}; - print scalar @references, "\n"; - print "$_\n" foreach @references; - - my @path = computeSmallestDownload $storePath; - - my $downloadSize = 0; - while (scalar @path > 0) { - my $edge = pop @path; - my $u = $edge->{start}; - my $v = $edge->{end}; - if ($edge->{type} eq "patch") { - $downloadSize += $edge->{info}->{size} || 0; - } - elsif ($edge->{type} eq "narfile") { - $downloadSize += $edge->{info}->{size} || 0; + next unless scalar @{$infos} > 0; + my $info = @{$infos}[0]; + + print "$storePath\n"; + print "$info->{deriver}\n"; + my @references = split " ", $info->{refs}; + print scalar @references, "\n"; + print "$_\n" foreach @references; + + my @path = computeSmallestDownload $storePath; + + my $downloadSize = 0; + while (scalar @path > 0) { + my $edge = pop @path; + my $u = $edge->{start}; + my $v = $edge->{end}; + if ($edge->{type} eq "patch") { + $downloadSize += $edge->{info}->{size} || 0; + } + elsif ($edge->{type} eq "narfile") { + $downloadSize += $edge->{info}->{size} || 0; + } } - } - print "$downloadSize\n"; + print "$downloadSize\n"; - my $narSize = $info->{narSize} || 0; - print "$narSize\n"; + my $narSize = $info->{narSize} || 0; + print "$narSize\n"; + } + + print "\n"; } else { die "unknown command `$cmd'"; } diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index f660ed68df0d..9636bf49d987 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -155,8 +155,8 @@ void setDefaultsFromEnvironment() string subs = getEnv("NIX_SUBSTITUTERS", "default"); if (subs == "default") { - //substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); - //substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); + substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); + substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl"); } else substituters = tokenizeString(subs, ":"); -- cgit 1.4.1 From e4d6bcb6cdc34d204ccf49e137dd5070f664c523 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Jul 2012 18:52:23 -0400 Subject: Update release notes --- doc/manual/release-notes.xml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/manual/release-notes.xml b/doc/manual/release-notes.xml index 66ced1c9c429..9a5449def23e 100644 --- a/doc/manual/release-notes.xml +++ b/doc/manual/release-notes.xml @@ -35,6 +35,11 @@ false. + + When using the Nix daemon, the flag in + nix-env -qa is now much faster. + + -- cgit 1.4.1 From a6f348599c94d8a5f7b41c7d8e43658dc6407be7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 17 Jul 2012 16:19:40 -0400 Subject: Print some debug output --- scripts/download-from-binary-cache.pl.in | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index ca3ff807be84..6f4b55766de2 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -18,6 +18,8 @@ my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150); $maxParallelRequests = 1 if $maxParallelRequests < 1; +my $debug = ($ENV{"NIX_DEBUG_SUBST"} // "") eq 1; + my ($dbh, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence); my %cacheIds; @@ -34,7 +36,7 @@ sub addRequest { my $curl = WWW::Curl::Easy->new; my $curlId = $curlIdCount++; - $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "" }; + $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "", type => $head ? "HEAD" : "GET" }; $curl->setopt(CURLOPT_PRIVATE, $curlId); $curl->setopt(CURLOPT_URL, $url); @@ -68,12 +70,15 @@ sub processRequests { if ($curlm->perform() != $activeRequests) { while (my ($id, $result) = $curlm->info_read) { if ($id) { - my $handle = $requests{$id}->{handle}; - $requests{$id}->{result} = $result; - $requests{$id}->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE); - #print STDERR "\nRequest completed ($id, $result, $requests{$id}->{httpStatus})\n"; + my $request = $requests{$id} or die; + my $handle = $request->{handle}; + $request->{result} = $result; + $request->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE); + + print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug; + $activeRequests--; - delete $requests{$id}->{handle}; + delete $request->{handle}; if (scalar(keys %scheduled) > 0) { my $id2 = (keys %scheduled)[0]; @@ -81,7 +86,6 @@ sub processRequests { $activeRequests++; delete $scheduled{$id2}; } - } } } @@ -189,7 +193,10 @@ sub processNARInfo { my $compression = "bzip2"; my @refs; foreach my $line (split "\n", $request->{content}) { - $line =~ /^(.*): (.*)$/ or return undef; + unless ($line =~ /^(.*): (.*)$/) { + print STDERR "bad NAR info file ‘$request->{url}’\n"; + return undef; + } if ($1 eq "StorePath") { $storePath2 = $2; } elsif ($1 eq "URL") { $url = $2; } elsif ($1 eq "Compression") { $compression = $2; } -- cgit 1.4.1 From aa115e22df1c80e8878237a9e704d7d70783a243 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 18 Jul 2012 11:01:17 -0400 Subject: download-from-binary-cache: Print correct URL --- scripts/download-from-binary-cache.pl.in | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 6f4b55766de2..5fb0419f181a 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -213,6 +213,8 @@ sub processNARInfo { print STDERR "bad NAR info file ‘$request->{url}’\n"; return undef; } + + # FIXME: validate $url etc. for security. # Cache the result. $insertNAR->execute( @@ -417,8 +419,9 @@ sub downloadBinary { print STDERR "unknown compression method ‘$info->{compression}’\n"; next; } - print STDERR "\n*** Downloading ‘$info->{url}’ into ‘$storePath’...\n"; - if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { + my $url = "$binaryCacheUrl/$info->{url}"; # FIXME: handle non-relative URLs + print STDERR "\n*** Downloading ‘$url’ into ‘$storePath’...\n"; + if (system("$Nix::Config::curl --fail --location '$url' | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; next; } -- cgit 1.4.1 From dbce685e91c513341dedf8c1a916ef4c62f5650a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 17:10:28 -0400 Subject: Add some .gitignore entries --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 9160fe43d1ec..dd8abf2d2254 100644 --- a/.gitignore +++ b/.gitignore @@ -35,8 +35,11 @@ Makefile.in # /doc/manual/ /doc/manual/manual.html +/doc/manual/manual.xmli +/doc/manual/manual.pdf /doc/manual/manual.is-valid /doc/manual/*.1 +/doc/manual/*.5 /doc/manual/*.8 /doc/manual/images /doc/manual/version.txt -- cgit 1.4.1 From 7892ad15ab4b6db0eee619a1fdd14fed129db252 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 17:11:11 -0400 Subject: download-from-binary-cache: Support file:// The file:// URI schema requires checking for errors in a more general way. Also, don't cache file:// lookups. --- scripts/download-from-binary-cache.pl.in | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 5fb0419f181a..10d0db9adbcf 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -45,6 +45,7 @@ sub addRequest { $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle; $curl->setopt(CURLOPT_USERAGENT, "Nix/$Nix::Config::version"); $curl->setopt(CURLOPT_NOBODY, 1) if $head; + $curl->setopt(CURLOPT_FAILONERROR, 1); if ($activeRequests >= $maxParallelRequests) { $scheduled{$curlId} = 1; @@ -73,7 +74,7 @@ sub processRequests { my $request = $requests{$id} or die; my $handle = $request->{handle}; $request->{result} = $result; - $request->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE); + $request->{httpStatus} = $handle->getinfo(CURLINFO_RESPONSE_CODE); print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug; @@ -179,12 +180,13 @@ sub processNARInfo { my $cacheId = getCacheId($binaryCacheUrl); - if ($request->{result} != 0 || $request->{httpStatus} != 200) { - if ($request->{httpStatus} != 404) { + if ($request->{result} != 0) { + if ($request->{result} != 37 && $request->{httpStatus} != 404) { print STDERR "could not download ‘$request->{url}’ (" . ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { - $insertNARExistence->execute($cacheId, basename($storePath), 0, time()); + $insertNARExistence->execute($cacheId, basename($storePath), 0, time()) + unless $request->{url} =~ /^file:/; } return undef; } @@ -219,7 +221,8 @@ sub processNARInfo { # Cache the result. $insertNAR->execute( $cacheId, basename($storePath), $url, $compression, $fileHash, $fileSize, - $narHash, $narSize, join(" ", @refs), $deriver, $system, time()); + $narHash, $narSize, join(" ", @refs), $deriver, $system, time()) + unless $request->{url} =~ /^file:/; return { url => $url @@ -378,16 +381,18 @@ sub printSubstitutablePaths { processRequests; foreach my $request (values %requests) { - if ($request->{result} != 0 || $request->{httpStatus} != 200) { - if ($request->{httpStatus} != 404) { + if ($request->{result} != 0) { + if ($request->{result} != 37 && $request->{httpStatus} != 404) { print STDERR "could not check ‘$request->{url}’ (" . ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { - $insertNARExistence->execute($cacheId, basename($request->{storePath}), 0, time()); + $insertNARExistence->execute($cacheId, basename($request->{storePath}), 0, time()) + unless $request->{url} =~ /^file:/; } push @left2, $request->{storePath}; } else { - $insertNARExistence->execute($cacheId, basename($request->{storePath}), 1, time()); + $insertNARExistence->execute($cacheId, basename($request->{storePath}), 1, time()) + unless $request->{url} =~ /^file:/; print "$request->{storePath}\n"; } } -- cgit 1.4.1 From 7861260a5ff33689b1b8f7a89489f5d5e5e4dfcb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 17:12:42 -0400 Subject: Clear NIX_STORE when running the tests --- tests/common.sh.in | 1 + tests/remote-store.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/common.sh.in b/tests/common.sh.in index 031a0d66d1b8..abefb7e25094 100644 --- a/tests/common.sh.in +++ b/tests/common.sh.in @@ -16,6 +16,7 @@ export NIX_DB_DIR=$TEST_ROOT/db export NIX_CONF_DIR=$TEST_ROOT/etc export NIX_MANIFESTS_DIR=$TEST_ROOT/var/nix/manifests export SHARED=$TEST_ROOT/shared +export NIX_REMOTE=$NIX_REMOTE_ export PATH=@bindir@:$PATH diff --git a/tests/remote-store.sh b/tests/remote-store.sh index e27631a006ea..ef289ab79a1a 100644 --- a/tests/remote-store.sh +++ b/tests/remote-store.sh @@ -3,7 +3,7 @@ source common.sh echo '*** testing slave mode ***' clearStore clearManifests -NIX_REMOTE=slave $SHELL ./user-envs.sh +NIX_REMOTE_=slave $SHELL ./user-envs.sh echo '*** testing daemon mode ***' clearStore -- cgit 1.4.1 From 609586a16de90f8964b9c494aad3c1526feb514f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 17:13:14 -0400 Subject: Add a test for the binary cache substituter --- tests/Makefile.am | 3 ++- tests/binary-cache.sh | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 tests/binary-cache.sh diff --git a/tests/Makefile.am b/tests/Makefile.am index a562db52bc57..641e29d7e7a7 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -9,7 +9,8 @@ TESTS = init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \ remote-store.sh export.sh export-graph.sh negative-caching.sh \ binary-patching.sh timeout.sh secure-drv-outputs.sh nix-channel.sh \ - multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh + multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ + binary-cache.sh XFAIL_TESTS = diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh new file mode 100644 index 000000000000..b4a3601ece9d --- /dev/null +++ b/tests/binary-cache.sh @@ -0,0 +1,19 @@ +source common.sh + +clearStore + +cacheDir=$TEST_ROOT/binary-cache +rm -rf $cacheDir + +outPath=$(nix-build dependencies.nix --no-out-link) + +nix-push --dest $cacheDir $outPath + +clearStore + +rm -f $NIX_STATE_DIR/binary-cache* + +NIX_BINARY_CACHES="file://$cacheDir" nix-store -r $outPath + +nix-store --check-validity $outPath +nix-store -qR $outPath | grep input-2 -- cgit 1.4.1 From e3ce54cceedb9a3144c4eccfbafd63ed765d8913 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 17:13:38 -0400 Subject: nix-push: Remove the upload feature --- scripts/nix-push.in | 75 +++++++++++++++++------------------------------------ 1 file changed, 24 insertions(+), 51 deletions(-) diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 35ad43d283bb..45a0695a64d7 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -24,22 +24,15 @@ $curl = "$curl $extraCurlFlags" if defined $extraCurlFlags; # Parse the command line. my $compressionType = "xz"; my $force = 0; - -my $localCopy; -my $localArchivesDir; - -my $archivesPutURL; -my $archivesGetURL; - +my $destDir; my @roots; sub showSyntax { print STDERR <) { close READ or die "nix-build failed: $?"; -# Upload the archives and the corresponding info files. -print STDERR "uploading/copying archives...\n"; +# Copy the archives and the corresponding info files. +print STDERR "copying archives...\n"; my $totalNarSize = 0; my $totalCompressedSize = 0; @@ -183,24 +169,15 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath, $compressedSize / (1024 * 1024), $compressedSize / $narSize * 100; - # Upload the compressed NAR. - if ($localCopy) { - my $dst = "$localArchivesDir/$narName"; - if (! -f $dst) { - my $tmp = "$localArchivesDir/.tmp.$$.$narName"; - copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n"; - rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; - } - } else { - die "unimplemented"; - #if (!archiveExists("$basename")) { - # system("$curl --show-error --upload-file " . - # "'$narArchive' '$archivesPutURL/$basename' > /dev/null") == 0 or - # die "curl failed on $narArchive: $?"; - #} + # Copy the compressed NAR. + my $dst = "$destDir/$narName"; + if (! -f $dst) { + my $tmp = "$destDir/.tmp.$$.$narName"; + copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n"; + rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; } - # Upload the info file. + # Write the info file. my $info; $info .= "StorePath: $storePath\n"; $info .= "URL: $narName\n"; @@ -220,17 +197,13 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { my $pathHash = substr(basename($storePath), 0, 32); - if ($localCopy) { - my $dst = "$localArchivesDir/$pathHash.narinfo"; - if ($force || ! -f $dst) { - my $tmp = "$localArchivesDir/.tmp.$$.$pathHash.narinfo"; - open INFO, ">$tmp" or die; - print INFO "$info" or die; - close INFO or die; - rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; - } - } else { - die "unimplemented"; + $dst = "$destDir/$pathHash.narinfo"; + if ($force || ! -f $dst) { + my $tmp = "$destDir/.tmp.$$.$pathHash.narinfo"; + open INFO, ">$tmp" or die; + print INFO "$info" or die; + close INFO or die; + rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; } } -- cgit 1.4.1 From 50395b71a90314abfcc39d8343dbaa8e9aa199a6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 17:36:07 -0400 Subject: Fix the substituter tests --- src/libstore/local-store.cc | 3 ++- tests/substituter.sh | 31 +++++++++++++++++-------------- tests/substituter2.sh | 32 +++++++++++++++++--------------- 3 files changed, 36 insertions(+), 30 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index ebfcc946716a..58ce691ebb07 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -999,7 +999,8 @@ void LocalStore::querySubstitutablePathInfos(const Path & substituter, while (true) { Path path = readLine(run.from); if (path == "") break; - assert(paths.find(path) != paths.end()); + if (paths.find(path) == paths.end()) + throw Error(format("got unexpected path `%1%' from substituter") % path); paths.erase(path); SubstitutablePathInfo & info(infos[path]); info.deriver = readLine(run.from); diff --git a/tests/substituter.sh b/tests/substituter.sh index 65c4fa1856d0..a6bdacfd66f7 100755 --- a/tests/substituter.sh +++ b/tests/substituter.sh @@ -2,22 +2,25 @@ echo substituter args: $* >&2 if test $1 = "--query"; then - while read cmd; do - echo FOO $cmd >&2 + while read cmd args; do + echo "CMD = $cmd, ARGS = $args" >&2 if test "$cmd" = "have"; then - read path - if grep -q "$path" $TEST_ROOT/sub-paths; then - echo 1 - else - echo 0 - fi + for path in $args; do + read path + if grep -q "$path" $TEST_ROOT/sub-paths; then + echo $path + fi + done + echo elif test "$cmd" = "info"; then - read path - echo 1 - echo "" # deriver - echo 0 # nr of refs - echo $((1 * 1024 * 1024)) # download size - echo $((2 * 1024 * 1024)) # nar size + for path in $args; do + echo $path + echo "" # deriver + echo 0 # nr of refs + echo $((1 * 1024 * 1024)) # download size + echo $((2 * 1024 * 1024)) # nar size + done + echo else echo "bad command $cmd" exit 1 diff --git a/tests/substituter2.sh b/tests/substituter2.sh index c56a1bc47b5c..34b2c0eafaf8 100755 --- a/tests/substituter2.sh +++ b/tests/substituter2.sh @@ -2,21 +2,23 @@ echo substituter2 args: $* >&2 if test $1 = "--query"; then - while read cmd; do - if test "$cmd" = "have"; then - read path - if grep -q "$path" $TEST_ROOT/sub-paths; then - echo 1 - else - echo 0 - fi - elif test "$cmd" = "info"; then - read path - echo 1 - echo "" # deriver - echo 0 # nr of refs - echo 0 # download size - echo 0 # nar size + while read cmd args; do + if test "$cmd" = have; then + for path in $args; do + if grep -q "$path" $TEST_ROOT/sub-paths; then + echo $path + fi + done + echo + elif test "$cmd" = info; then + for path in $args; do + echo $path + echo "" # deriver + echo 0 # nr of refs + echo 0 # download size + echo 0 # nar size + done + echo else echo "bad command $cmd" exit 1 -- cgit 1.4.1 From 67c6f3eded7dcb7c79243ed41f177c960f2b6aad Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Jul 2012 18:28:12 -0400 Subject: nix-push: Support generating a manifest again This makes all the tests succeed. Woohoo! --- scripts/nix-push.in | 44 ++++++++++++++++++++++++++++++++++---------- tests/binary-patching.sh | 9 ++++++--- tests/install-package.sh | 2 +- tests/nix-channel.sh | 2 +- tests/nix-pull.sh | 2 +- tests/nix-push.sh | 4 +++- 6 files changed, 46 insertions(+), 17 deletions(-) diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 45a0695a64d7..39fdd6da9e39 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -8,31 +8,28 @@ use File::stat; use File::Copy; use Nix::Config; use Nix::Store; - -my $hashAlgo = "sha256"; +use Nix::Manifest; my $tmpDir = tempdir("nix-push.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die "cannot create a temporary directory"; my $nixExpr = "$tmpDir/create-nars.nix"; -my $curl = "$Nix::Config::curl --fail --silent"; -my $extraCurlFlags = ${ENV{'CURL_FLAGS'}}; -$curl = "$curl $extraCurlFlags" if defined $extraCurlFlags; - # Parse the command line. my $compressionType = "xz"; my $force = 0; my $destDir; +my $writeManifest = 0; +my $archivesURL; my @roots; sub showSyntax { print STDERR < "$archivesURL/$narName" + , hash => "sha256:$compressedHash" + , size => $compressedSize + , narHash => "$narHash" + , narSize => $narSize + , references => join(" ", @{$refs}) + , deriver => $deriver + } + ] if $writeManifest; } printf STDERR "total compressed size %.2f MiB, %.1f%%\n", $totalCompressedSize / (1024 * 1024), $totalCompressedSize / $totalNarSize * 100; + + +# Optionally write a manifest. +if ($writeManifest) { + writeManifest "$destDir/MANIFEST", \%narFiles, \(); +} diff --git a/tests/binary-patching.sh b/tests/binary-patching.sh index 8c52c2f1421b..188be109a0b5 100644 --- a/tests/binary-patching.sh +++ b/tests/binary-patching.sh @@ -7,14 +7,17 @@ mkdir -p $TEST_ROOT/cache2 $TEST_ROOT/patches RESULT=$TEST_ROOT/result # Build version 1 and 2 of the "foo" package. -nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest1 \ +nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 \ $(nix-build -o $RESULT binary-patching.nix --arg version 1) +mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest1 out2=$(nix-build -o $RESULT binary-patching.nix --arg version 2) -nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest2 $out2 +nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out2 +mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest2 out3=$(nix-build -o $RESULT binary-patching.nix --arg version 3) -nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest3 $out3 +nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out3 +mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest3 rm $RESULT diff --git a/tests/install-package.sh b/tests/install-package.sh index b818eda121cf..653dfee4c8d1 100644 --- a/tests/install-package.sh +++ b/tests/install-package.sh @@ -9,7 +9,7 @@ clearStore clearProfiles cat > $TEST_ROOT/foo.nixpkg < Date: Fri, 27 Jul 2012 09:57:42 -0400 Subject: download-from-binary-cache: Only use the default cache for /nix/store --- scripts/download-from-binary-cache.pl.in | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 10d0db9adbcf..93692240051d 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -13,7 +13,9 @@ use strict; Nix::Config::readConfig; my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, - ($ENV{"NIX_BINARY_CACHES"} // $Nix::Config::config{"binary-caches"} // "http://nixos.org/binary-cache")); + ($ENV{"NIX_BINARY_CACHES"} + // $Nix::Config::config{"binary-caches"} + // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : ""))); my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150); $maxParallelRequests = 1 if $maxParallelRequests < 1; -- cgit 1.4.1 From b4ea83249b40dd910daa6a8ee32f13e023e9c858 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 09:59:18 -0400 Subject: Remove trailing whitespace / tabs --- scripts/download-from-binary-cache.pl.in | 34 ++-- src/libstore/build.cc | 260 +++++++++++++++---------------- 2 files changed, 147 insertions(+), 147 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 93692240051d..f22fbb4e9dac 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -35,7 +35,7 @@ my $caBundle = $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"}; sub addRequest { my ($storePath, $url, $head) = @_; - + my $curl = WWW::Curl::Easy->new; my $curlId = $curlIdCount++; $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "", type => $head ? "HEAD" : "GET" }; @@ -69,7 +69,7 @@ sub processRequests { if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) { IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 0.1); } - + if ($curlm->perform() != $activeRequests) { while (my ($id, $result) = $curlm->info_read) { if ($id) { @@ -77,9 +77,9 @@ sub processRequests { my $handle = $request->{handle}; $request->{result} = $result; $request->{httpStatus} = $handle->getinfo(CURLINFO_RESPONSE_CODE); - + print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug; - + $activeRequests--; delete $request->{handle}; @@ -115,7 +115,7 @@ sub initCache { url text unique not null ); EOF - + $dbh->do(<{result} != 0) { @@ -192,7 +192,7 @@ sub processNARInfo { } return undef; } - + my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver, $system); my $compression = "bzip2"; my @refs; @@ -219,13 +219,13 @@ sub processNARInfo { } # FIXME: validate $url etc. for security. - + # Cache the result. $insertNAR->execute( $cacheId, basename($storePath), $url, $compression, $fileHash, $fileSize, $narHash, $narSize, join(" ", @refs), $deriver, $system, time()) unless $request->{url} =~ /^file:/; - + return { url => $url , compression => $compression @@ -242,10 +242,10 @@ sub processNARInfo { sub getCacheId { my ($binaryCacheUrl) = @_; - + my $cacheId = $cacheIds{$binaryCacheUrl}; return $cacheId if defined $cacheId; - + # FIXME: not atomic. my @res = @{$dbh->selectcol_arrayref("select id from BinaryCaches where url = ?", {}, $binaryCacheUrl)}; if (scalar @res == 1) { @@ -267,8 +267,8 @@ sub getCachedInfoFrom { $queryNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); my $res = $queryNAR->fetchrow_hashref(); return undef unless defined $res; - - return + + return { url => $res->{url} , compression => $res->{compression} , fileHash => $res->{fileHash} @@ -379,7 +379,7 @@ sub printSubstitutablePaths { } addRequest($storePath, infoUrl($binaryCacheUrl, $storePath), 1); } - + processRequests; foreach my $request (values %requests) { @@ -406,7 +406,7 @@ sub printSubstitutablePaths { sub downloadBinary { my ($storePath) = @_; - + foreach my $binaryCacheUrl (@binaryCacheUrls) { my $info = getCachedInfoFrom($storePath, $binaryCacheUrl); @@ -418,7 +418,7 @@ sub downloadBinary { } next unless defined $info; - + my $decompressor; if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; } elsif ($info->{compression} eq "xz") { $decompressor = "$Nix::Config::xz -d"; } @@ -455,7 +455,7 @@ if ($ARGV[0] eq "--query") { while () { chomp; my ($cmd, @args) = split " ", $_; - + if ($cmd eq "have") { printSubstitutablePaths(@args); print "\n"; diff --git a/src/libstore/build.cc b/src/libstore/build.cc index c57a63db69dd..a5658e2fa7fb 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -64,7 +64,7 @@ namespace nix { using std::map; - + static string pathNullDevice = "/dev/null"; @@ -95,9 +95,9 @@ class Goal : public boost::enable_shared_from_this { public: typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters} ExitCode; - + protected: - + /* Backlink to the worker. */ Worker & worker; @@ -155,7 +155,7 @@ public: { return name; } - + ExitCode getExitCode() { return exitCode; @@ -217,7 +217,7 @@ private: /* Goals waiting for busy paths to be unlocked. */ WeakGoals waitingForAnyGoal; - + /* Goals sleeping for a few seconds (polling a lock). */ WeakGoals waitingForAWhile; @@ -238,7 +238,7 @@ public: LocalStore & store; boost::shared_ptr hook; - + Worker(LocalStore & store); ~Worker(); @@ -275,13 +275,13 @@ public: /* Wait for any goal to finish. Pretty indiscriminate way to wait for some resource that some other goal is holding. */ void waitForAnyGoal(GoalPtr goal); - + /* Wait for a few seconds and then retry this goal. Used when waiting for a lock held by another process. This kind of polling is inefficient, but POSIX doesn't really provide a way to wait for multiple locks in the main select() loop. */ void waitForAWhile(GoalPtr goal); - + /* Loop until the specified top-level goals have finished. */ void run(const Goals & topGoals); @@ -309,11 +309,11 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result) trace(format("waitee `%1%' done; %2% left") % waitee->name % waitees.size()); - + if (result == ecFailed || result == ecNoSubstituters) ++nrFailed; if (result == ecNoSubstituters) ++nrNoSubstituters; - + if (waitees.empty() || (result == ecFailed && !keepGoing)) { /* If we failed and keepGoing is not set, we remove all @@ -366,12 +366,12 @@ void commonChildInit(Pipe & logPipe) terminal signals. */ if (setsid() == -1) throw SysError(format("creating a new session")); - + /* Dup the write side of the logger pipe into stderr. */ if (dup2(logPipe.writeSide, STDERR_FILENO) == -1) throw SysError("cannot pipe standard error into log file"); logPipe.readSide.close(); - + /* Dup stderr to stdout. */ if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) throw SysError("cannot dup stderr into stdout"); @@ -400,7 +400,7 @@ const char * * strings2CharPtrs(const Strings & ss) /* Restore default handling of SIGPIPE, otherwise some programs will randomly say "Broken pipe". */ -static void restoreSIGPIPE() +static void restoreSIGPIPE() { struct sigaction act, oact; act.sa_handler = SIG_DFL; @@ -427,7 +427,7 @@ private: string user; uid_t uid; gid_t gid; - + public: UserLock(); ~UserLock(); @@ -442,7 +442,7 @@ public: uid_t getGID() { return gid; } bool enabled() { return uid != 0; } - + }; @@ -497,13 +497,13 @@ void UserLock::acquire() % *i % buildUsersGroup); createDirs(nixStateDir + "/userpool"); - + fnUserLock = (format("%1%/userpool/%2%") % nixStateDir % pw->pw_uid).str(); if (lockedPaths.find(fnUserLock) != lockedPaths.end()) /* We already have a lock on this one. */ continue; - + AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT, 0600); if (fd == -1) throw SysError(format("opening user lock `%1%'") % fnUserLock); @@ -519,7 +519,7 @@ void UserLock::acquire() if (uid == getuid() || uid == geteuid()) throw Error(format("the Nix user should not be a member of `%1%'") % buildUsersGroup); - + return; } } @@ -546,7 +546,7 @@ static void runSetuidHelper(const string & command, { Path program = getEnv("NIX_SETUID_HELPER", nixLibexecDir + "/nix-setuid-helper"); - + /* Fork. */ Pid pid; pid = fork(); @@ -564,7 +564,7 @@ static void runSetuidHelper(const string & command, args.push_back(0); restoreSIGPIPE(); - + execve(program.c_str(), (char * *) &args[0], 0); throw SysError(format("executing `%1%'") % program); } @@ -650,7 +650,7 @@ struct HookInstance /* Pipe for the builder's standard output/error. */ Pipe builderOut; - + /* The process ID of the hook. */ Pid pid; @@ -663,12 +663,12 @@ struct HookInstance HookInstance::HookInstance() { debug("starting build hook"); - + Path buildHook = absPath(getEnv("NIX_BUILD_HOOK")); - + /* Create a pipe to get the output of the child. */ fromHook.create(); - + /* Create the communication pipes. */ toHook.create(); @@ -678,7 +678,7 @@ HookInstance::HookInstance() /* Fork the hook. */ pid = fork(); switch (pid) { - + case -1: throw SysError("unable to fork"); @@ -688,7 +688,7 @@ HookInstance::HookInstance() commonChildInit(fromHook); if (chdir("/") == -1) throw SysError("changing into `/"); - + /* Dup the communication pipes. */ toHook.writeSide.close(); if (dup2(toHook.readSide, STDIN_FILENO) == -1) @@ -699,20 +699,20 @@ HookInstance::HookInstance() if (dup2(builderOut.writeSide, 4) == -1) throw SysError("dupping builder's stdout/stderr"); - /* XXX: Pass `buildTimeout' to the hook? */ + /* XXX: Pass `buildTimeout' to the hook? */ execl(buildHook.c_str(), buildHook.c_str(), thisSystem.c_str(), (format("%1%") % maxSilentTime).str().c_str(), (format("%1%") % printBuildTrace).str().c_str(), NULL); - + throw SysError(format("executing `%1%'") % buildHook); - + } catch (std::exception & e) { std::cerr << format("build hook error: %1%") % e.what() << std::endl; } quickExit(1); } - + /* parent */ pid.setSeparatePG(true); pid.setKillSignal(SIGTERM); @@ -752,7 +752,7 @@ private: /* The derivation stored at drvPath. */ Derivation drv; - + /* The remainder is state held during the build. */ /* Locks on the output paths. */ @@ -760,7 +760,7 @@ private: /* All input paths (that is, the union of FS closures of the immediate input paths). */ - PathSet inputPaths; + PathSet inputPaths; /* Referenceable paths (i.e., input and output paths). */ PathSet allPaths; @@ -784,10 +784,10 @@ private: /* The build hook. */ boost::shared_ptr hook; - + /* Whether we're currently doing a chroot build. */ bool useChroot; - + Path chrootRootDir; /* RAII object to delete the chroot directory. */ @@ -798,10 +798,10 @@ private: /* Whether this is a fixed-output derivation. */ bool fixedOutput; - + typedef void (DerivationGoal::*GoalState)(); GoalState state; - + /* Stuff we need to pass to initChild(). */ PathSet dirsInChroot; typedef map Environment; @@ -812,7 +812,7 @@ public: ~DerivationGoal(); void cancel(); - + void work(); Path getDrvPath() @@ -917,7 +917,7 @@ void DerivationGoal::killChild() pid.wait(true); } else pid.kill(); - + assert(pid == -1); } @@ -968,7 +968,7 @@ void DerivationGoal::haveDerivation() side: if the user forgot to make it a root, we wouldn't want things being garbage collected while we're busy. */ worker.store.addTempRoot(drvPath); - + assert(worker.store.isValidPath(drvPath)); /* Get the derivation. */ @@ -990,14 +990,14 @@ void DerivationGoal::haveDerivation() don't bother. */ foreach (PathSet::iterator, i, invalidOutputs) if (pathFailed(*i)) return; - + /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ if (queryBoolSetting("build-use-substitutes", true)) foreach (PathSet::iterator, i, invalidOutputs) addWaitee(worker.makeSubstitutionGoal(*i)); - + if (waitees.empty()) /* to prevent hang (no wake-up event) */ outputsSubstituted(); else @@ -1051,7 +1051,7 @@ void DerivationGoal::inputsRealised() /* Gather information necessary for computing the closure and/or running the build hook. */ - + /* The outputs are referenceable paths. */ foreach (DerivationOutputs::iterator, i, drv.outputs) { debug(format("building path `%1%'") % i->second.path); @@ -1088,7 +1088,7 @@ void DerivationGoal::inputsRealised() fixedOutput = true; foreach (DerivationOutputs::iterator, i, drv.outputs) if (i->second.hash == "") fixedOutput = false; - + /* Okay, try to build. Note that here we don't wait for a build slot to become available, since we don't need one if there is a build hook. */ @@ -1108,7 +1108,7 @@ PathSet outputPaths(const DerivationOutputs & outputs) static bool canBuildLocally(const string & platform) { - return platform == thisSystem + return platform == thisSystem #ifdef CAN_DO_LINUX32_BUILDS || (platform == "i686-linux" && thisSystem == "x86_64-linux") #endif @@ -1132,7 +1132,7 @@ void DerivationGoal::tryToBuild() worker.waitForAnyGoal(shared_from_this()); return; } - + /* Obtain locks on all output paths. The locks are automatically released when we exit this function or Nix crashes. If we can't acquire the lock, then continue; hopefully some other @@ -1207,7 +1207,7 @@ void DerivationGoal::tryToBuild() break; } } - + /* Make sure that we are allowed to start a build. If this derivation prefers to be done locally, do it even if maxBuildJobs is 0. */ @@ -1217,7 +1217,7 @@ void DerivationGoal::tryToBuild() outputLocks.unlock(); return; } - + try { /* Okay, we have to build. */ @@ -1265,7 +1265,7 @@ void DerivationGoal::buildDone() /* So the child is gone now. */ worker.childTerminated(savedPid); - + /* Close the read side of the logger pipe. */ if (hook) { hook->builderOut.readSide.close(); @@ -1295,13 +1295,13 @@ void DerivationGoal::buildDone() if (rename((chrootRootDir + path).c_str(), path.c_str()) == -1) throw SysError(format("moving build output `%1%' from the chroot to the Nix store") % path); } - + if (!pathExists(path)) continue; struct stat st; if (lstat(path.c_str(), &st) == -1) throw SysError(format("getting attributes of path `%1%'") % path); - + #ifndef __CYGWIN__ /* Check that the output is not group or world writable, as that means that someone else can have interfered @@ -1319,14 +1319,14 @@ void DerivationGoal::buildDone() if (buildUser.enabled() && !amPrivileged()) getOwnership(path); } - + /* Check the exit status. */ if (!statusOk(status)) { deleteTmpDir(false); throw BuildError(format("builder for `%1%' %2%") % drvPath % statusToString(status)); } - + deleteTmpDir(true); /* Delete the chroot (if we were using one). */ @@ -1336,7 +1336,7 @@ void DerivationGoal::buildDone() hard-linked inputs to be cleared. So set them again. */ foreach (PathSet::iterator, i, regularInputPaths) makeImmutable(*i); - + /* Compute the FS closure of the outputs and register them as being valid. */ computeClosure(); @@ -1358,7 +1358,7 @@ void DerivationGoal::buildDone() problem. */ bool hookError = hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100); - + if (printBuildTrace) { if (hook && hookError) printMsg(lvlError, format("@ hook-failed %1% %2% %3% %4%") @@ -1378,7 +1378,7 @@ void DerivationGoal::buildDone() if (worker.cacheFailure && !hookError && !fixedOutput) foreach (DerivationOutputs::iterator, i, drv.outputs) worker.store.registerFailedPath(i->second.path); - + worker.permanentFailure = !hookError && !fixedOutput; amDone(ecFailed); return; @@ -1391,7 +1391,7 @@ void DerivationGoal::buildDone() printMsg(lvlError, format("@ build-succeeded %1% %2%") % drvPath % drv.outputs["out"].path); } - + amDone(ecSuccess); } @@ -1439,7 +1439,7 @@ HookReply DerivationGoal::tryBuildHook() hook = worker.hook; worker.hook.reset(); - + /* Tell the hook all the inputs that have to be copied to the remote system. This unfortunately has to contain the entire derivation closure to ensure that the validity invariant holds @@ -1448,18 +1448,18 @@ HookReply DerivationGoal::tryBuildHook() PathSet allInputs; allInputs.insert(inputPaths.begin(), inputPaths.end()); computeFSClosure(worker.store, drvPath, allInputs); - + string s; foreach (PathSet::iterator, i, allInputs) s += *i + " "; writeLine(hook->toHook.writeSide, s); - + /* Tell the hooks the outputs that have to be copied back from the remote system. */ s = ""; foreach (DerivationOutputs::iterator, i, drv.outputs) s += i->second.path + " "; writeLine(hook->toHook.writeSide, s); - + hook->toHook.writeSide.close(); /* Create the log file and pipe. */ @@ -1469,12 +1469,12 @@ HookReply DerivationGoal::tryBuildHook() fds.insert(hook->fromHook.readSide); fds.insert(hook->builderOut.readSide); worker.childStarted(shared_from_this(), hook->pid, fds, false, false); - + if (printBuildTrace) printMsg(lvlError, format("@ build-started %1% %2% %3% %4%") % drvPath % drv.outputs["out"].path % drv.platform % logFile); - - return rpAccept; + + return rpAccept; } @@ -1496,7 +1496,7 @@ void DerivationGoal::startBuilder() { startNest(nest, lvlInfo, format("building path(s) %1%") % showPaths(outputPaths(drv.outputs))) - + /* Right platform? */ if (!canBuildLocally(drv.platform)) throw Error( @@ -1504,7 +1504,7 @@ void DerivationGoal::startBuilder() % drv.platform % thisSystem % drvPath); /* Construct the environment passed to the builder. */ - + /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when PATH is not set. We don't want this, so we fill it in with some dummy value. */ @@ -1614,7 +1614,7 @@ void DerivationGoal::startBuilder() worker.store.makeValidityRegistration(paths, false, false)); } - + /* If `build-users-group' is not empty, then we have to build as one of the members of that group. */ if (haveBuildUsers()) { @@ -1625,7 +1625,7 @@ void DerivationGoal::startBuilder() /* Make sure that no other processes are executing under this uid. */ buildUser.kill(); - + /* Change ownership of the temporary build directory, if we're root. If we're not root, then the setuid helper will do it just before it starts the builder. */ @@ -1674,7 +1674,7 @@ void DerivationGoal::startBuilder() /* Clean up the chroot directory automatically. */ autoDelChroot = boost::shared_ptr(new AutoDelete(chrootRootDir)); - + printMsg(lvlChatty, format("setting up chroot environment in `%1%'") % chrootRootDir); /* Create a writable /tmp in the chroot. Many builders need @@ -1696,8 +1696,8 @@ void DerivationGoal::startBuilder() % (buildUser.enabled() ? buildUser.getUID() : getuid()) % (buildUser.enabled() ? buildUser.getGID() : getgid())).str()); - /* Declare the build user's group so that programs get a consistent - view of the system (e.g., "id -gn"). */ + /* Declare the build user's group so that programs get a consistent + view of the system (e.g., "id -gn"). */ writeFile(chrootRootDir + "/etc/group", (format("nixbld:!:%1%:\n") % (buildUser.enabled() ? buildUser.getGID() : getgid())).str()); @@ -1738,7 +1738,7 @@ void DerivationGoal::startBuilder() /* Creating a hard link to *i is impossible if its immutable bit is set. So clear it first. */ makeMutable(*i); - + Path p = chrootRootDir + *i; if (link(i->c_str(), p.c_str()) == -1) { /* Hard-linking fails if we exceed the maximum @@ -1755,25 +1755,25 @@ void DerivationGoal::startBuilder() StringSource source(sink.s); restorePath(p, source); } - + makeImmutable(*i); regularInputPaths.insert(*i); } } - + #else throw Error("chroot builds are not supported on this platform"); #endif } - - + + /* Run the builder. */ printMsg(lvlChatty, format("executing builder `%1%'") % drv.builder); /* Create the log file. */ Path logFile = openLogFile(); - + /* Create a pipe to get the output of the builder. */ builderOut.create(); @@ -1793,7 +1793,7 @@ void DerivationGoal::startBuilder() - The private mount namespace ensures that all the bind mounts we do will only show up in this process and its children, and will disappear automatically when we're done. - + - The private network namespace ensures that the builder cannot talk to the outside world (or vice versa). It only has a private loopback interface. @@ -1845,7 +1845,7 @@ void DerivationGoal::initChild() /* Initialise the loopback interface. */ AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP)); if (fd == -1) throw SysError("cannot open IP socket"); - + struct ifreq ifr; strcpy(ifr.ifr_name, "lo"); ifr.ifr_flags = IFF_UP | IFF_LOOPBACK | IFF_RUNNING; @@ -1867,9 +1867,9 @@ void DerivationGoal::initChild() Path source = *i; Path target = chrootRootDir + source; debug(format("bind mounting `%1%' to `%2%'") % source % target); - + createDirs(target); - + if (mount(source.c_str(), target.c_str(), "", MS_BIND, 0) == -1) throw SysError(format("bind mount from `%1%' to `%2%' failed") % source % target); } @@ -1884,7 +1884,7 @@ void DerivationGoal::initChild() if (pathExists("/dev/shm")) if (mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0, 0) == -1) throw SysError("mounting /dev/shm"); - + /* Do the chroot(). Below we do a chdir() to the temporary build directory to make sure the current directory is in the chroot. (Actually the order @@ -1894,9 +1894,9 @@ void DerivationGoal::initChild() throw SysError(format("cannot change root directory to `%1%'") % chrootRootDir); } #endif - + commonChildInit(builderOut); - + if (chdir(tmpDir.c_str()) == -1) throw SysError(format("changing into `%1%'") % tmpDir); @@ -1930,7 +1930,7 @@ void DerivationGoal::initChild() Path program = drv.builder.c_str(); std::vector args; /* careful with c_str()! */ string user; /* must be here for its c_str()! */ - + /* If we are running in `build-users' mode, then switch to the user we allocated above. Make sure that we drop all root privileges. Note that above we have closed all file @@ -1941,10 +1941,10 @@ void DerivationGoal::initChild() printMsg(lvlChatty, format("switching to user `%1%'") % buildUser.getUser()); if (amPrivileged()) { - + if (setgroups(0, 0) == -1) throw SysError("cannot clear the set of supplementary groups"); - + if (setgid(buildUser.getGID()) == -1 || getgid() != buildUser.getGID() || getegid() != buildUser.getGID()) @@ -1954,7 +1954,7 @@ void DerivationGoal::initChild() getuid() != buildUser.getUID() || geteuid() != buildUser.getUID()) throw SysError("setuid failed"); - + } else { /* Let the setuid helper take care of it. */ program = nixLibexecDir + "/nix-setuid-helper"; @@ -1965,7 +1965,7 @@ void DerivationGoal::initChild() args.push_back(drv.builder.c_str()); } } - + /* Fill in the arguments. */ string builderBasename = baseNameOf(drv.builder); args.push_back(builderBasename.c_str()); @@ -1980,7 +1980,7 @@ void DerivationGoal::initChild() throw SysError(format("executing `%1%'") % drv.builder); - + } catch (std::exception & e) { std::cerr << format("build error: %1%") % e.what() << std::endl; } @@ -2022,7 +2022,7 @@ void DerivationGoal::computeClosure() if (!worker.store.isValidPath(i->second.path)) allValid = false; if (allValid) return; } - + /* Check whether the output paths were created, and grep each output path to determine what other paths it references. Also make all output paths read-only. */ @@ -2037,18 +2037,18 @@ void DerivationGoal::computeClosure() struct stat st; if (lstat(path.c_str(), &st) == -1) throw SysError(format("getting attributes of path `%1%'") % path); - + startNest(nest, lvlTalkative, format("scanning for references inside `%1%'") % path); /* Check that fixed-output derivations produced the right outputs (i.e., the content hash should match the specified - hash). */ + hash). */ if (i->second.hash != "") { bool recursive; HashType ht; Hash h; i->second.parseHashInfo(recursive, ht, h); - + if (!recursive) { /* The output path should be a regular file without execute permission. */ @@ -2067,12 +2067,12 @@ void DerivationGoal::computeClosure() } /* Get rid of all weird permissions. */ - canonicalisePathMetaData(path); + canonicalisePathMetaData(path); - /* For this output path, find the references to other paths - contained in it. Compute the SHA-256 NAR hash at the same - time. The hash is stored in the database so that we can - verify later on whether nobody has messed with the store. */ + /* For this output path, find the references to other paths + contained in it. Compute the SHA-256 NAR hash at the same + time. The hash is stored in the database so that we can + verify later on whether nobody has messed with the store. */ HashResult hash; PathSet references = scanForReferences(path, allPaths, hash); contentHashes[path] = hash; @@ -2126,7 +2126,7 @@ string drvsLogDir = "drvs"; Path DerivationGoal::openLogFile() { if (!queryBoolSetting("build-keep-log", true)) return ""; - + /* Create a log file. */ Path dir = (format("%1%/%2%") % nixLogDir % drvsLogDir).str(); createDirs(dir); @@ -2179,7 +2179,7 @@ void DerivationGoal::deleteTmpDir(bool force) { if (tmpDir != "") { if (keepFailed && !force) { - printMsg(lvlError, + printMsg(lvlError, format("builder for `%1%' failed; keeping build directory `%2%'") % drvPath % tmpDir); if (buildUser.enabled() && !amPrivileged()) @@ -2235,14 +2235,14 @@ PathSet DerivationGoal::checkPathValidity(bool returnValid) bool DerivationGoal::pathFailed(const Path & path) { if (!worker.cacheFailure) return false; - + if (!worker.store.hasPathFailed(path)) return false; printMsg(lvlError, format("builder for `%1%' failed previously (cached)") % path); - + if (printBuildTrace) printMsg(lvlError, format("@ build-failed %1% %2% cached") % drvPath % path); - + worker.permanentFailure = true; amDone(ecFailed); @@ -2256,7 +2256,7 @@ bool DerivationGoal::pathFailed(const Path & path) class SubstitutionGoal : public Goal { friend class Worker; - + private: /* The store path that should be realised through a substitute. */ Path storePath; @@ -2281,7 +2281,7 @@ private: /* Lock on the store path. */ boost::shared_ptr outputLock; - + typedef void (SubstitutionGoal::*GoalState)(); GoalState state; @@ -2290,7 +2290,7 @@ public: ~SubstitutionGoal(); void cancel(); - + void work(); /* The states. */ @@ -2351,7 +2351,7 @@ void SubstitutionGoal::init() trace("init"); worker.store.addTempRoot(storePath); - + /* If the path already exists we're done. */ if (worker.store.isValidPath(storePath)) { amDone(ecSuccess); @@ -2362,7 +2362,7 @@ void SubstitutionGoal::init() throw Error(format("cannot substitute path `%1%' - no write access to the Nix store") % storePath); subs = substituters; - + tryNext(); } @@ -2448,7 +2448,7 @@ void SubstitutionGoal::tryToRun() worker.waitForAnyGoal(shared_from_this()); return; /* restart in the tryToRun() state when another goal finishes */ } - + /* Acquire a lock on the output path. */ outputLock = boost::shared_ptr(new PathLocks); if (!outputLock->lockPaths(singleton(storePath), "", false)) { @@ -2465,7 +2465,7 @@ void SubstitutionGoal::tryToRun() } printMsg(lvlInfo, format("fetching path `%1%'...") % storePath); - + logPipe.create(); /* Remove the (stale) output path if it exists. */ @@ -2475,7 +2475,7 @@ void SubstitutionGoal::tryToRun() /* Fork the substitute program. */ pid = fork(); switch (pid) { - + case -1: throw SysError("unable to fork"); @@ -2494,15 +2494,15 @@ void SubstitutionGoal::tryToRun() const char * * argArr = strings2CharPtrs(args); execv(sub.c_str(), (char * *) argArr); - + throw SysError(format("executing `%1%'") % sub); - + } catch (std::exception & e) { std::cerr << format("substitute error: %1%") % e.what() << std::endl; } quickExit(1); } - + /* parent */ pid.setSeparatePG(true); pid.setKillSignal(SIGTERM); @@ -2538,23 +2538,23 @@ void SubstitutionGoal::finished() /* Check the exit status and the build result. */ try { - + if (!statusOk(status)) throw SubstError(format("fetching path `%1%' %2%") % storePath % statusToString(status)); if (!pathExists(storePath)) throw SubstError(format("substitute did not produce path `%1%'") % storePath); - + } catch (SubstError & e) { printMsg(lvlInfo, e.msg()); - + if (printBuildTrace) { printMsg(lvlError, format("@ substituter-failed %1% %2% %3%") % storePath % status % e.msg()); } - + /* Try the next substitute. */ state = &SubstitutionGoal::tryNext; worker.wakeUp(shared_from_this()); @@ -2564,9 +2564,9 @@ void SubstitutionGoal::finished() canonicalisePathMetaData(storePath); HashResult hash = hashPath(htSHA256, storePath); - + worker.store.optimisePath(storePath); // FIXME: combine with hashPath() - + ValidPathInfo info2; info2.path = storePath; info2.hash = hash.first; @@ -2576,14 +2576,14 @@ void SubstitutionGoal::finished() worker.store.registerValidPath(info2); outputLock->setDeletion(true); - + printMsg(lvlChatty, format("substitution of path `%1%' succeeded") % storePath); if (printBuildTrace) { printMsg(lvlError, format("@ substituter-succeeded %1%") % storePath); } - + amDone(ecSuccess); } @@ -2614,7 +2614,7 @@ static bool working = false; Worker::Worker(LocalStore & store) : store(store) { - /* Debugging: prevent recursive workers. */ + /* Debugging: prevent recursive workers. */ if (working) abort(); working = true; nrLocalBuilds = 0; @@ -2729,7 +2729,7 @@ void Worker::childStarted(GoalPtr goal, void Worker::childTerminated(pid_t pid, bool wakeSleepers) { assert(pid != -1); /* common mistake */ - + Children::iterator i = children.find(pid); assert(i != children.end()); @@ -2741,7 +2741,7 @@ void Worker::childTerminated(pid_t pid, bool wakeSleepers) children.erase(pid); if (wakeSleepers) { - + /* Wake up goals waiting for a build slot. */ foreach (WeakGoals::iterator, i, wantingToBuild) { GoalPtr goal = i->lock(); @@ -2780,7 +2780,7 @@ void Worker::waitForAWhile(GoalPtr goal) void Worker::run(const Goals & _topGoals) { foreach (Goals::iterator, i, _topGoals) topGoals.insert(*i); - + startNest(nest, lvlDebug, format("entered goal loop")); while (1) { @@ -2866,7 +2866,7 @@ void Worker::waitForInput() /* If we are polling goals that are waiting for a lock, then wake up after a few seconds at most. */ int wakeUpInterval = queryIntSetting("build-poll-interval", 5); - + if (!waitingForAWhile.empty()) { useTimeout = true; if (lastWokenUp == 0) @@ -2907,7 +2907,7 @@ void Worker::waitForInput() cancel(). */ set pids; foreach (Children::iterator, i, children) pids.insert(i->first); - + foreach (set::iterator, i, pids) { checkInterrupt(); Children::iterator j = children.find(*i); @@ -3001,7 +3001,7 @@ void LocalStore::buildPaths(const PathSet & drvPaths) if (i2) failed.insert(i2->getDrvPath()); else failed.insert(dynamic_cast(i->get())->getStorePath()); } - + if (!failed.empty()) throw Error(format("build of %1% failed") % showPaths(failed), worker.exitStatus()); } @@ -3022,5 +3022,5 @@ void LocalStore::ensurePath(const Path & path) throw Error(format("path `%1%' does not exist and cannot be created") % path, worker.exitStatus()); } - + } -- cgit 1.4.1 From 3a8f841612f08b9be11cc5346fa3c025413282d6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 10:47:36 -0400 Subject: download-using-manifests: Don't use nix-prefetch-url MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead call curl directly and pipe it into ‘nix-store --restore’. This saves I/O and prevents creating garbage in the Nix store. --- scripts/download-from-binary-cache.pl.in | 2 +- scripts/download-using-manifests.pl.in | 70 +++++++++++++------------------- 2 files changed, 30 insertions(+), 42 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index f22fbb4e9dac..9e1c774a5a7b 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -428,7 +428,7 @@ sub downloadBinary { } my $url = "$binaryCacheUrl/$info->{url}"; # FIXME: handle non-relative URLs print STDERR "\n*** Downloading ‘$url’ into ‘$storePath’...\n"; - if (system("$Nix::Config::curl --fail --location '$url' | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { + if (system("$Nix::Config::curl --fail --location --insecure '$url' | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; next; } diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in index f00debc68546..ed63e792ea80 100755 --- a/scripts/download-using-manifests.pl.in +++ b/scripts/download-using-manifests.pl.in @@ -15,6 +15,9 @@ my $logFile = "$Nix::Config::logDir/downloads"; # estimating the expected download size. my $fast = 1; +# ‘--insecure’ is fine because Nix verifies the hash of the result. +my $curl = "$Nix::Config::curl --fail --location --insecure"; + # Open the manifest cache and update it if necessary. my $dbh = updateManifestDB(); @@ -38,7 +41,7 @@ sub parseHash { # given path. sub computeSmallestDownload { my $targetPath = shift; - + # Build a graph of all store paths that might contribute to the # construction of $targetPath, and the special node "start". The # edges are either patch operations, or downloads of full NAR @@ -93,7 +96,7 @@ sub computeSmallestDownload { my $patchList = $dbh->selectall_arrayref( "select * from Patches where storePath = ?", { Slice => {} }, $u); - + foreach my $patch (@{$patchList}) { if (isValidPath($patch->{basePath})) { my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash}; @@ -106,7 +109,7 @@ sub computeSmallestDownload { $hash =~ s/.*://; $hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash; } - + next if $hash ne $baseHash; } push @queue, $patch->{basePath}; @@ -117,7 +120,7 @@ sub computeSmallestDownload { my $narFileList = $dbh->selectall_arrayref( "select * from NARs where storePath = ?", { Slice => {} }, $u); - + foreach my $narFile (@{$narFileList}) { # !!! how to handle files whose size is not known in advance? # For now, assume some arbitrary size (1 GB). @@ -189,7 +192,7 @@ if ($ARGV[0] eq "--query") { my $infos = $dbh->selectall_arrayref( "select * from NARs where storePath = ?", { Slice => {} }, $storePath); - + next unless scalar @{$infos} > 0; my $info = @{$infos}[0]; @@ -215,14 +218,14 @@ if ($ARGV[0] eq "--query") { } print "$downloadSize\n"; - + my $narSize = $info->{narSize} || 0; print "$narSize\n"; } print "\n"; } - + else { die "unknown command `$cmd'"; } } @@ -271,16 +274,6 @@ $dbh->disconnect; my $curStep = 1; my $maxStep = scalar @path; -sub downloadFile { - my $url = shift; - $ENV{"PRINT_PATH"} = 1; - $ENV{"QUIET"} = 1; - my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`; - die "download of `$url' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; - chomp $path; - return $path; -} - my $finalNarHash; while (scalar @path > 0) { @@ -312,13 +305,15 @@ while (scalar @path > 0) { # Download the patch. print STDERR " downloading patch...\n"; - my $patchPath = downloadFile "$patch->{url}"; + my $patchPath = "$tmpDir/patch"; + system("$curl '$patch->{url}' -o $patchPath") == 0 + or die "cannot download patch `$patch->{url}'\n"; # Apply the patch to the NAR archive produced in step 1 (for # the already present path) or a later step (for patch sequences). print STDERR " applying patch...\n"; system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0 - or die "cannot apply patch `$patchPath' to $tmpNar"; + or die "cannot apply patch `$patchPath' to $tmpNar\n"; if ($curStep < $maxStep) { # The archive will be used as the base of the next patch. @@ -328,7 +323,7 @@ while (scalar @path > 0) { # into the target path. print STDERR " unpacking patched archive...\n"; system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0 - or die "cannot unpack $tmpNar2 into `$v'"; + or die "cannot unpack $tmpNar2 into `$v'\n"; } $finalNarHash = $patch->{narHash}; @@ -340,20 +335,15 @@ while (scalar @path > 0) { my $size = $narFile->{size} || -1; print LOGFILE "$$ narfile $narFile->{url} $size $v\n"; - - # Download the archive. - print STDERR " downloading archive...\n"; - my $narFilePath = downloadFile "$narFile->{url}"; if ($curStep < $maxStep) { # The archive will be used a base to a patch. - system("$Nix::Config::bzip2 -d < '$narFilePath' > $tmpNar") == 0 - or die "cannot unpack `$narFilePath' into `$v'"; + system("$curl '$narFile->{url}' | $Nix::Config::bzip2 -d > $tmpNar") == 0 + or die "cannot download and unpack `$narFile->{url}' into `$v'\n"; } else { # Unpack the archive into the target path. - print STDERR " unpacking archive...\n"; - system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0 - or die "cannot unpack `$narFilePath' into `$v'"; + system("$curl '$narFile->{url}' | $Nix::Config::bzip2 -d | $Nix::Config::binDir/nix-store --restore '$v'") == 0 + or die "cannot download and unpack `$narFile->{url}' into `$v'\n"; } $finalNarHash = $narFile->{narHash}; @@ -365,19 +355,17 @@ while (scalar @path > 0) { # Make sure that the hash declared in the manifest matches what we # downloaded and unpacked. +die "cannot check integrity of the downloaded path since its hash is not known\n" + unless defined $finalNarHash; -if (defined $finalNarHash) { - my ($hashAlgo, $hash) = parseHash $finalNarHash; - - # The hash in the manifest can be either in base-16 or base-32. - # Handle both. - my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath); - - die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n" - if $hash ne $hash2; -} else { - die "cannot check integrity of the downloaded path since its hash is not known\n"; -} +my ($hashAlgo, $hash) = parseHash $finalNarHash; + +# The hash in the manifest can be either in base-16 or base-32. +# Handle both. +my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath); + +die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n" + if $hash ne $hash2; print STDERR "\n"; -- cgit 1.4.1 From fbf59d95f66012349fdcd2b60f34b9efb32e6319 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 10:56:33 -0400 Subject: Remove more tabs --- src/libstore/build.cc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index a5658e2fa7fb..90dc2b79b532 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2179,8 +2179,8 @@ void DerivationGoal::deleteTmpDir(bool force) { if (tmpDir != "") { if (keepFailed && !force) { - printMsg(lvlError, - format("builder for `%1%' failed; keeping build directory `%2%'") + printMsg(lvlError, + format("builder for `%1%' failed; keeping build directory `%2%'") % drvPath % tmpDir); if (buildUser.enabled() && !amPrivileged()) getOwnership(tmpDir); @@ -2838,9 +2838,9 @@ void Worker::waitForInput() /* If a global timeout has been set, sleep until it's done. */ if (buildTimeout != 0) { - useTimeout = true; + useTimeout = true; if (lastWait == 0 || lastWait > before) lastWait = before; - timeout.tv_sec = std::max((time_t) 0, lastWait + buildTimeout - before); + timeout.tv_sec = std::max((time_t) 0, lastWait + buildTimeout - before); } /* If we're monitoring for silence on stdout/stderr, sleep until @@ -2854,10 +2854,10 @@ void Worker::waitForInput() } } if (oldest) { - time_t silenceTimeout = std::max((time_t) 0, oldest + maxSilentTime - before); + time_t silenceTimeout = std::max((time_t) 0, oldest + maxSilentTime - before); timeout.tv_sec = useTimeout - ? std::min(silenceTimeout, timeout.tv_sec) - : silenceTimeout; + ? std::min(silenceTimeout, timeout.tv_sec) + : silenceTimeout; useTimeout = true; printMsg(lvlVomit, format("sleeping %1% seconds") % timeout.tv_sec); } @@ -2948,8 +2948,8 @@ void Worker::waitForInput() goal->cancel(); } - if (buildTimeout != 0 && - after - before >= (time_t) buildTimeout) + if (buildTimeout != 0 && + after - before >= (time_t) buildTimeout) { printMsg(lvlError, format("%1% timed out after %2% seconds of activity") -- cgit 1.4.1 From 73acb8b836affe5dfade9dd6e3339ad2f9191add Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 12:16:02 -0400 Subject: Let build.cc verify the expected hash of a substituter's output Since SubstitutionGoal::finished() in build.cc computes the hash anyway, we can prevent the inefficiency of computing the hash twice by letting the substituter tell Nix about the expected hash, which can then verify it. --- scripts/copy-from-other-stores.pl.in | 5 ++-- scripts/download-from-binary-cache.pl.in | 11 ++++----- scripts/download-using-manifests.pl.in | 13 ++--------- src/libstore/build.cc | 39 +++++++++++++++++++++++++++----- tests/substituter.sh | 1 + 5 files changed, 43 insertions(+), 26 deletions(-) diff --git a/scripts/copy-from-other-stores.pl.in b/scripts/copy-from-other-stores.pl.in index 92869ee7a107..3ee6f075b27e 100755 --- a/scripts/copy-from-other-stores.pl.in +++ b/scripts/copy-from-other-stores.pl.in @@ -52,7 +52,7 @@ if ($ARGV[0] eq "--query") { next unless defined $store; $ENV{"NIX_DB_DIR"} = "$store/var/nix/db"; - + my $deriver = `@bindir@/nix-store --query --deriver $storePath`; die "cannot query deriver of `$storePath'" if $? != 0; chomp $deriver; @@ -87,9 +87,10 @@ elsif ($ARGV[0] eq "--substitute") { my $storePath = $ARGV[1]; my ($store, $sourcePath) = findStorePath $storePath; die unless $store; - print "\n*** Copying `$storePath' from `$sourcePath'\n\n"; + print STDERR "\n*** Copying `$storePath' from `$sourcePath'\n\n"; system("$binDir/nix-store --dump $sourcePath | $binDir/nix-store --restore $storePath") == 0 or die "cannot copy `$sourcePath' to `$storePath'"; + print "\n"; # no hash to verify } diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 9e1c774a5a7b..823ecd9d9194 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -432,13 +432,10 @@ sub downloadBinary { die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; next; } - # The hash in the manifest can be either in base-16 or - # base-32. Handle both. - $info->{narHash} =~ /^sha256:(.*)$/ or die "invalid hash"; - my $hash = $1; - my $hash2 = hashPath("sha256", 1, $storePath); - die "hash mismatch in downloaded path ‘$storePath’; expected $hash, got $hash2\n" - if $hash ne $hash2; + + # Tell Nix about the expected hash so it can verify it. + print "$info->{narHash}\n"; + print STDERR "\n"; return 1; } diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in index ed63e792ea80..04bcce90da38 100755 --- a/scripts/download-using-manifests.pl.in +++ b/scripts/download-using-manifests.pl.in @@ -353,19 +353,10 @@ while (scalar @path > 0) { } -# Make sure that the hash declared in the manifest matches what we -# downloaded and unpacked. +# Tell Nix about the expected hash so it can verify it. die "cannot check integrity of the downloaded path since its hash is not known\n" unless defined $finalNarHash; - -my ($hashAlgo, $hash) = parseHash $finalNarHash; - -# The hash in the manifest can be either in base-16 or base-32. -# Handle both. -my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath); - -die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n" - if $hash ne $hash2; +print "$finalNarHash\n"; print STDERR "\n"; diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 90dc2b79b532..887858fce30c 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -371,6 +371,7 @@ void commonChildInit(Pipe & logPipe) if (dup2(logPipe.writeSide, STDERR_FILENO) == -1) throw SysError("cannot pipe standard error into log file"); logPipe.readSide.close(); + logPipe.writeSide.close(); /* Dup stderr to stdout. */ if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) @@ -2273,7 +2274,10 @@ private: /* Path info returned by the substituter's query info operation. */ SubstitutablePathInfo info; - /* Pipe for the substitute's standard output/error. */ + /* Pipe for the substituter's standard output. */ + Pipe outPipe; + + /* Pipe for the substituter's standard error. */ Pipe logPipe; /* The process ID of the builder. */ @@ -2466,6 +2470,7 @@ void SubstitutionGoal::tryToRun() printMsg(lvlInfo, format("fetching path `%1%'...") % storePath); + outPipe.create(); logPipe.create(); /* Remove the (stale) output path if it exists. */ @@ -2482,10 +2487,13 @@ void SubstitutionGoal::tryToRun() case 0: try { /* child */ - logPipe.readSide.close(); - commonChildInit(logPipe); + if (dup2(outPipe.writeSide, STDOUT_FILENO) == -1) + throw SysError("cannot dup output pipe into stdout"); + outPipe.readSide.close(); + outPipe.writeSide.close(); + /* Fill in the arguments. */ Strings args; args.push_back(baseNameOf(sub)); @@ -2506,6 +2514,7 @@ void SubstitutionGoal::tryToRun() /* parent */ pid.setSeparatePG(true); pid.setKillSignal(SIGTERM); + outPipe.writeSide.close(); logPipe.writeSide.close(); worker.childStarted(shared_from_this(), pid, singleton >(logPipe.readSide), true, true); @@ -2534,9 +2543,12 @@ void SubstitutionGoal::finished() /* Close the read side of the logger pipe. */ logPipe.readSide.close(); - debug(format("substitute for `%1%' finished") % storePath); + /* Get the hash info from stdout. */ + string expectedHashStr = statusOk(status) ? readLine(outPipe.readSide) : ""; + outPipe.readSide.close(); /* Check the exit status and the build result. */ + HashResult hash; try { if (!statusOk(status)) @@ -2546,6 +2558,23 @@ void SubstitutionGoal::finished() if (!pathExists(storePath)) throw SubstError(format("substitute did not produce path `%1%'") % storePath); + hash = hashPath(htSHA256, storePath); + + /* Verify the expected hash we got from the substituer. */ + if (expectedHashStr != "") { + size_t n = expectedHashStr.find(':'); + if (n == string::npos) + throw Error(format("bad hash from substituter: %1%") % expectedHashStr); + HashType hashType = parseHashType(string(expectedHashStr, 0, n)); + if (hashType == htUnknown) + throw Error(format("unknown hash algorithm in `%1%'") % expectedHashStr); + Hash expectedHash = parseHash16or32(hashType, string(expectedHashStr, n + 1)); + Hash actualHash = hashType == htSHA256 ? hash.first : hashPath(hashType, storePath).first; + if (expectedHash != actualHash) + throw SubstError(format("hash mismatch in downloaded path `%1%': expected %2%, got %3%") + % storePath % printHash(expectedHash) % printHash(actualHash)); + } + } catch (SubstError & e) { printMsg(lvlInfo, e.msg()); @@ -2563,8 +2592,6 @@ void SubstitutionGoal::finished() canonicalisePathMetaData(storePath); - HashResult hash = hashPath(htSHA256, storePath); - worker.store.optimisePath(storePath); // FIXME: combine with hashPath() ValidPathInfo info2; diff --git a/tests/substituter.sh b/tests/substituter.sh index a6bdacfd66f7..885655760e05 100755 --- a/tests/substituter.sh +++ b/tests/substituter.sh @@ -29,6 +29,7 @@ if test $1 = "--query"; then elif test $1 = "--substitute"; then mkdir $2 echo "Hallo Wereld" > $2/hello + echo # no expected hash else echo "unknown substituter operation" exit 1 -- cgit 1.4.1 From e6ab52cdd1df207c7a007a9cba665ee8a031d94a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 14:15:03 -0400 Subject: Test "nix-env -qas" with the binary cache substituter --- tests/binary-cache.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index b4a3601ece9d..b6951dd1351e 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -2,6 +2,7 @@ source common.sh clearStore +# Create the binary cache. cacheDir=$TEST_ROOT/binary-cache rm -rf $cacheDir @@ -9,10 +10,12 @@ outPath=$(nix-build dependencies.nix --no-out-link) nix-push --dest $cacheDir $outPath +# Check that downloading works. clearStore - rm -f $NIX_STATE_DIR/binary-cache* +NIX_BINARY_CACHES="file://$cacheDir" nix-env -f dependencies.nix -qas \* | grep -- "--S" + NIX_BINARY_CACHES="file://$cacheDir" nix-store -r $outPath nix-store --check-validity $outPath -- cgit 1.4.1 From 6ecf4f13f6a71701f77018a852db2bd4bde0bb67 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 14:33:01 -0400 Subject: Use "set -x" in the tests to see where a test fails --- tests/common.sh.in | 2 ++ tests/referrers.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/common.sh.in b/tests/common.sh.in index abefb7e25094..1d473ca0585e 100644 --- a/tests/common.sh.in +++ b/tests/common.sh.in @@ -80,3 +80,5 @@ fail() { echo "$1" exit 1 } + +set -x diff --git a/tests/referrers.sh b/tests/referrers.sh index d4604aec930b..e57b2d8e31ff 100644 --- a/tests/referrers.sh +++ b/tests/referrers.sh @@ -10,6 +10,7 @@ touch $reference echo "making registration..." +set +x for ((n = 0; n < $max; n++)); do storePath=$NIX_STORE_DIR/$n echo -n > $storePath @@ -19,6 +20,7 @@ for ((n = 0; n < $max; n++)); do fi echo $storePath; echo; echo 2; echo $reference; echo $ref2 done > $TEST_ROOT/reg_info +set -x echo "registering..." -- cgit 1.4.1 From 66a3ac6a56cfa70e2ffeb911c1286ba84c2fa048 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Jul 2012 18:16:05 -0400 Subject: Allow a binary cache to declare that it doesn't support "nix-env -qas" Querying all substitutable paths via "nix-env -qas" is potentially hard on a server, since it involves sending thousands of HEAD requests. So a binary cache must now have a meta-info file named "nix-cache-info" that specifies whether the server wants this. It also specifies the store prefix so that we don't send useless queries to a binary cache for a different store prefix. --- scripts/download-from-binary-cache.pl.in | 174 ++++++++++++++++++------------- scripts/nix-push.in | 32 +++--- tests/binary-cache.sh | 15 ++- 3 files changed, 135 insertions(+), 86 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 823ecd9d9194..6482b9c18391 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -12,18 +12,15 @@ use strict; Nix::Config::readConfig; -my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, - ($ENV{"NIX_BINARY_CACHES"} - // $Nix::Config::config{"binary-caches"} - // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : ""))); +my @caches; +my $gotCaches = 0; my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150); $maxParallelRequests = 1 if $maxParallelRequests < 1; my $debug = ($ENV{"NIX_DEBUG_SUBST"} // "") eq 1; -my ($dbh, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence); -my %cacheIds; +my ($dbh, $queryCache, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence); my $curlm = WWW::Curl::Multi->new; my $activeRequests = 0; @@ -112,7 +109,10 @@ sub initCache { $dbh->do(<prepare("select id, storeDir, wantMassQuery from BinaryCaches where url = ?") or die; + $insertNAR = $dbh->prepare( "insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " . "narSize, refs, deriver, system, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die; @@ -159,35 +161,65 @@ EOF } +sub getAvailableCaches { + return if $gotCaches; + $gotCaches = 1; -sub negativeHit { - my ($storePath, $binaryCacheUrl) = @_; - $queryNARExistence->execute(getCacheId($binaryCacheUrl), basename($storePath)); - my $res = $queryNARExistence->fetchrow_hashref(); - return defined $res && $res->{exist} == 0; -} + my @urls = map { s/\/+$//; $_ } split(/ /, + ($ENV{"NIX_BINARY_CACHES"} + // $Nix::Config::config{"binary-caches"} + // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : ""))); + foreach my $url (@urls) { -sub positiveHit { - my ($storePath, $binaryCacheUrl) = @_; - return 1 if defined getCachedInfoFrom($storePath, $binaryCacheUrl); - $queryNARExistence->execute(getCacheId($binaryCacheUrl), basename($storePath)); - my $res = $queryNARExistence->fetchrow_hashref(); - return defined $res && $res->{exist} == 1; + # FIXME: not atomic. + $queryCache->execute($url); + my $res = $queryCache->fetchrow_hashref(); + if (defined $res) { + next if $res->{storeDir} ne $Nix::Config::storeDir; + push @caches, { id => $res->{id}, url => $url, wantMassQuery => $res->{wantMassQuery} }; + next; + } + + # Get the cache info file. + my $request = addRequest(undef, $url . "/nix-cache-info"); + processRequests; + + if ($request->{result} != 0) { + print STDERR "could not download ‘$request->{url}’ (" . + ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; + next; + } + + my $storeDir = "/nix/store"; + my $wantMassQuery = 0; + foreach my $line (split "\n", $request->{content}) { + unless ($line =~ /^(.*): (.*)$/) { + print STDERR "bad cache info file ‘$request->{url}’\n"; + return undef; + } + if ($1 eq "StoreDir") { $storeDir = $2; } + elsif ($1 eq "WantMassQuery") { $wantMassQuery = int($2); } + } + + $dbh->do("insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery) values (?, ?, ?, ?)", + {}, $url, time(), $storeDir, $wantMassQuery); + my $id = $dbh->last_insert_id("", "", "", ""); + next if $storeDir ne $Nix::Config::storeDir; + push @caches, { id => $id, url => $url, wantMassQuery => $wantMassQuery }; + } } sub processNARInfo { - my ($storePath, $binaryCacheUrl, $request) = @_; - - my $cacheId = getCacheId($binaryCacheUrl); + my ($storePath, $cache, $request) = @_; if ($request->{result} != 0) { if ($request->{result} != 37 && $request->{httpStatus} != 404) { print STDERR "could not download ‘$request->{url}’ (" . ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { - $insertNARExistence->execute($cacheId, basename($storePath), 0, time()) + $insertNARExistence->execute($cache->{id}, basename($storePath), 0, time()) unless $request->{url} =~ /^file:/; } return undef; @@ -222,7 +254,7 @@ sub processNARInfo { # Cache the result. $insertNAR->execute( - $cacheId, basename($storePath), $url, $compression, $fileHash, $fileSize, + $cache->{id}, basename($storePath), $url, $compression, $fileHash, $fileSize, $narHash, $narSize, join(" ", @refs), $deriver, $system, time()) unless $request->{url} =~ /^file:/; @@ -240,31 +272,10 @@ sub processNARInfo { } -sub getCacheId { - my ($binaryCacheUrl) = @_; - - my $cacheId = $cacheIds{$binaryCacheUrl}; - return $cacheId if defined $cacheId; - - # FIXME: not atomic. - my @res = @{$dbh->selectcol_arrayref("select id from BinaryCaches where url = ?", {}, $binaryCacheUrl)}; - if (scalar @res == 1) { - $cacheId = $res[0]; - } else { - $dbh->do("insert into BinaryCaches(url) values (?)", - {}, $binaryCacheUrl); - $cacheId = $dbh->last_insert_id("", "", "", ""); - } - - $cacheIds{$binaryCacheUrl} = $cacheId; - return $cacheId; -} - - sub getCachedInfoFrom { - my ($storePath, $binaryCacheUrl) = @_; + my ($storePath, $cache) = @_; - $queryNAR->execute(getCacheId($binaryCacheUrl), basename($storePath)); + $queryNAR->execute($cache->{id}, basename($storePath)); my $res = $queryNAR->fetchrow_hashref(); return undef unless defined $res; @@ -281,6 +292,23 @@ sub getCachedInfoFrom { } +sub negativeHit { + my ($storePath, $cache) = @_; + $queryNARExistence->execute($cache->{id}, basename($storePath)); + my $res = $queryNARExistence->fetchrow_hashref(); + return defined $res && $res->{exist} == 0; +} + + +sub positiveHit { + my ($storePath, $cache) = @_; + return 1 if defined getCachedInfoFrom($storePath, $cache); + $queryNARExistence->execute($cache->{id}, basename($storePath)); + my $res = $queryNARExistence->fetchrow_hashref(); + return defined $res && $res->{exist} == 1; +} + + sub printInfo { my ($storePath, $info) = @_; print "$storePath\n"; @@ -306,8 +334,8 @@ sub printInfoParallel { my @left; foreach my $storePath (@paths) { my $found = 0; - foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = getCachedInfoFrom($storePath, $binaryCacheUrl); + foreach my $cache (@caches) { + my $info = getCachedInfoFrom($storePath, $cache); if (defined $info) { printInfo($storePath, $info); $found = 1; @@ -319,22 +347,22 @@ sub printInfoParallel { return if scalar @left == 0; - foreach my $binaryCacheUrl (@binaryCacheUrls) { + foreach my $cache (@caches) { my @left2; %requests = (); foreach my $storePath (@left) { - if (negativeHit($storePath, $binaryCacheUrl)) { + if (negativeHit($storePath, $cache)) { push @left2, $storePath; next; } - addRequest($storePath, infoUrl($binaryCacheUrl, $storePath)); + addRequest($storePath, infoUrl($cache->{url}, $storePath)); } processRequests; foreach my $request (values %requests) { - my $info = processNARInfo($request->{storePath}, $binaryCacheUrl, $request); + my $info = processNARInfo($request->{storePath}, $cache, $request); if (defined $info) { printInfo($request->{storePath}, $info); } else { @@ -354,8 +382,9 @@ sub printSubstitutablePaths { my @left; foreach my $storePath (@paths) { my $found = 0; - foreach my $binaryCacheUrl (@binaryCacheUrls) { - if (positiveHit($storePath, $binaryCacheUrl)) { + foreach my $cache (@caches) { + next unless $cache->{wantMassQuery}; + if (positiveHit($storePath, $cache)) { print "$storePath\n"; $found = 1; last; @@ -367,17 +396,16 @@ sub printSubstitutablePaths { return if scalar @left == 0; # For remaining paths, do HEAD requests. - foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $cacheId = getCacheId($binaryCacheUrl); - + foreach my $cache (@caches) { + next unless $cache->{wantMassQuery}; my @left2; %requests = (); foreach my $storePath (@left) { - if (negativeHit($storePath, $binaryCacheUrl)) { + if (negativeHit($storePath, $cache)) { push @left2, $storePath; next; } - addRequest($storePath, infoUrl($binaryCacheUrl, $storePath), 1); + addRequest($storePath, infoUrl($cache->{url}, $storePath), 1); } processRequests; @@ -388,12 +416,12 @@ sub printSubstitutablePaths { print STDERR "could not check ‘$request->{url}’ (" . ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; } else { - $insertNARExistence->execute($cacheId, basename($request->{storePath}), 0, time()) + $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 0, time()) unless $request->{url} =~ /^file:/; } push @left2, $request->{storePath}; } else { - $insertNARExistence->execute($cacheId, basename($request->{storePath}), 1, time()) + $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 1, time()) unless $request->{url} =~ /^file:/; print "$request->{storePath}\n"; } @@ -407,14 +435,14 @@ sub printSubstitutablePaths { sub downloadBinary { my ($storePath) = @_; - foreach my $binaryCacheUrl (@binaryCacheUrls) { - my $info = getCachedInfoFrom($storePath, $binaryCacheUrl); + foreach my $cache (@caches) { + my $info = getCachedInfoFrom($storePath, $cache); unless (defined $info) { - next if negativeHit($storePath, $binaryCacheUrl); - my $request = addRequest($storePath, infoUrl($binaryCacheUrl, $storePath)); + next if negativeHit($storePath, $cache); + my $request = addRequest($storePath, infoUrl($cache->{url}, $storePath)); processRequests; - $info = processNARInfo($storePath, $binaryCacheUrl, $request); + $info = processNARInfo($storePath, $cache, $request); } next unless defined $info; @@ -426,7 +454,7 @@ sub downloadBinary { print STDERR "unknown compression method ‘$info->{compression}’\n"; next; } - my $url = "$binaryCacheUrl/$info->{url}"; # FIXME: handle non-relative URLs + my $url = "$cache->{url}/$info->{url}"; # FIXME: handle non-relative URLs print STDERR "\n*** Downloading ‘$url’ into ‘$storePath’...\n"; if (system("$Nix::Config::curl --fail --location --insecure '$url' | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; @@ -437,10 +465,10 @@ sub downloadBinary { print "$info->{narHash}\n"; print STDERR "\n"; - return 1; + return; } - return 0; + print STDERR "could not download ‘$storePath’ from any binary cache\n"; } @@ -450,6 +478,7 @@ initCache(); if ($ARGV[0] eq "--query") { while () { + getAvailableCaches; chomp; my ($cmd, @args) = split " ", $_; @@ -472,9 +501,8 @@ if ($ARGV[0] eq "--query") { elsif ($ARGV[0] eq "--substitute") { my $storePath = $ARGV[1] or die; - if (!downloadBinary($storePath)) { - print STDERR "could not download ‘$storePath’ from any binary cache\n"; - } + getAvailableCaches; + downloadBinary($storePath); } else { diff --git a/scripts/nix-push.in b/scripts/nix-push.in index 39fdd6da9e39..1edd8e77314b 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -61,7 +61,7 @@ for (my $n = 0; $n < scalar @ARGV; $n++) { push @roots, $arg; } } - + showSyntax if !defined $destDir; $archivesURL = "file://$destDir" unless defined $archivesURL; @@ -74,12 +74,12 @@ my %storePaths; foreach my $path (@roots) { die unless $path =~ /^\//; - # Get all paths referenced by the normalisation of the given + # Get all paths referenced by the normalisation of the given # Nix expression. my $pid = open(READ, "$Nix::Config::binDir/nix-store --query --requisites --force-realise " . "--include-outputs '$path'|") or die; - + while () { chomp; die "bad: $_" unless /^\//; @@ -101,10 +101,10 @@ foreach my $storePath (@storePaths) { die unless ($storePath =~ /\/[0-9a-z]{32}[^\"\\\$]*$/); # Construct a Nix expression that creates a Nix archive. - my $nixexpr = + my $nixexpr = "(import " . "{ storePath = builtins.storePath \"$storePath\"; hashAlgo = \"sha256\"; compressionType = \"$compressionType\"; }) "; - + print NIX $nixexpr; } @@ -125,7 +125,17 @@ while () { close READ or die "nix-build failed: $?"; -# Copy the archives and the corresponding info files. +# Write the cache info file. +my $cacheInfoFile = "$destDir/nix-cache-info"; +if (! -e $cacheInfoFile) { + open FILE, ">$cacheInfoFile" or die "cannot create $cacheInfoFile: $!"; + print FILE "StoreDir: $Nix::Config::storeDir\n"; + print FILE "WantMassQuery: 0\n"; # by default, don't hit this cache for "nix-env -qas" + close FILE; +} + + +# Copy the archives and the corresponding NAR info files. print STDERR "copying archives...\n"; my $totalNarSize = 0; @@ -157,7 +167,7 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { } $totalNarSize += $narSize; - + # Get info about the compressed NAR. open HASH, "$narDir/nar-compressed-hash" or die "cannot open nar-compressed-hash"; my $compressedHash = ; @@ -170,7 +180,7 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { my $narFile = "$narDir/$narName"; (-f $narFile) or die "NAR file for $storePath not found"; - my $compressedSize = stat($narFile)->size; + my $compressedSize = stat($narFile)->size; $totalCompressedSize += $compressedSize; printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath, @@ -203,7 +213,7 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { } my $pathHash = substr(basename($storePath), 0, 32); - + $dst = "$destDir/$pathHash.narinfo"; if ($force || ! -f $dst) { my $tmp = "$destDir/.tmp.$$.$pathHash.narinfo"; @@ -230,6 +240,4 @@ printf STDERR "total compressed size %.2f MiB, %.1f%%\n", # Optionally write a manifest. -if ($writeManifest) { - writeManifest "$destDir/MANIFEST", \%narFiles, \(); -} +writeManifest "$destDir/MANIFEST", \%narFiles, \() if $writeManifest; diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index b6951dd1351e..8c1138210412 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -10,13 +10,26 @@ outPath=$(nix-build dependencies.nix --no-out-link) nix-push --dest $cacheDir $outPath -# Check that downloading works. + +# By default, a binary cache doesn't support "nix-env -qas", but does +# support installation. clearStore rm -f $NIX_STATE_DIR/binary-cache* +NIX_BINARY_CACHES="file://$cacheDir" nix-env -f dependencies.nix -qas \* | grep -- "---" + +NIX_BINARY_CACHES="file://$cacheDir" nix-store -r $outPath + + +# But with the right configuration, "nix-env -qas" should also work. +clearStore +rm -f $NIX_STATE_DIR/binary-cache* +echo "WantMassQuery: 1" >> $cacheDir/nix-cache-info + NIX_BINARY_CACHES="file://$cacheDir" nix-env -f dependencies.nix -qas \* | grep -- "--S" NIX_BINARY_CACHES="file://$cacheDir" nix-store -r $outPath nix-store --check-validity $outPath nix-store -qR $outPath | grep input-2 + -- cgit 1.4.1 From 6183cf2f197edd079a0134ccb8d320bab083a624 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 15:42:18 -0400 Subject: Fix whitespace --- src/libstore/local-store.cc | 142 ++++++++++++++++++++++---------------------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 58ce691ebb07..d6bb78f7ab12 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -6,7 +6,7 @@ #include "worker-protocol.hh" #include "derivations.hh" #include "immutable.hh" - + #include #include @@ -147,11 +147,11 @@ struct SQLiteStmtUse }; -struct SQLiteTxn +struct SQLiteTxn { bool active; sqlite3 * db; - + SQLiteTxn(sqlite3 * db) : active(false) { this->db = db; if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK) @@ -159,14 +159,14 @@ struct SQLiteTxn active = true; } - void commit() + void commit() { if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK) throwSQLiteError(db, "committing transaction"); active = false; } - - ~SQLiteTxn() + + ~SQLiteTxn() { try { if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK) @@ -199,9 +199,9 @@ void checkStoreNotSymlink() LocalStore::LocalStore(bool reserveSpace) { substitutablePathsLoaded = false; - + schemaPath = nixDBPath + "/schema"; - + if (readOnlyMode) { openDB(false); return; @@ -220,7 +220,7 @@ LocalStore::LocalStore(bool reserveSpace) if (symlink(profilesDir.c_str(), (gcRootsDir + "/profiles").c_str()) == -1) throw SysError(format("creating symlink to `%1%'") % profilesDir); } - + checkStoreNotSymlink(); /* We can't open a SQLite database if the disk is full. Since @@ -252,7 +252,7 @@ LocalStore::LocalStore(bool reserveSpace) openDB(false); return; } - + if (!lockFile(globalLock, ltRead, false)) { printMsg(lvlError, "waiting for the big Nix store lock..."); lockFile(globalLock, ltRead, true); @@ -264,20 +264,20 @@ LocalStore::LocalStore(bool reserveSpace) if (curSchema > nixSchemaVersion) throw Error(format("current Nix store schema is version %1%, but I only support %2%") % curSchema % nixSchemaVersion); - + else if (curSchema == 0) { /* new store */ curSchema = nixSchemaVersion; openDB(true); writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); } - + else if (curSchema < nixSchemaVersion) { if (curSchema < 5) throw Error( "Your Nix store has a database in Berkeley DB format,\n" "which is no longer supported. To convert to the new format,\n" "please upgrade Nix to version 0.12 first."); - + if (!lockFile(globalLock, ltWrite, false)) { printMsg(lvlError, "waiting for exclusive access to the Nix store..."); lockFile(globalLock, ltWrite, true); @@ -293,7 +293,7 @@ LocalStore::LocalStore(bool reserveSpace) lockFile(globalLock, ltRead, true); } - + else openDB(false); } @@ -339,7 +339,7 @@ void LocalStore::openDB(bool create) /* !!! check whether sqlite has been built with foreign key support */ - + /* Whether SQLite should fsync(). "Normal" synchronous mode should be safe enough. If the user asks for it, don't sync at all. This can cause database corruption if the system @@ -368,7 +368,7 @@ void LocalStore::openDB(bool create) derivation is done in a single fsync(). */ if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 8192;", 0, 0, 0) != SQLITE_OK) throwSQLiteError(db, "setting autocheckpoint interval"); - + /* Initialise the database schema, if necessary. */ if (create) { #include "schema.sql.hh" @@ -423,7 +423,7 @@ void canonicalisePathMetaData(const Path & path, bool recurse) struct stat st; if (lstat(path.c_str(), &st)) - throw SysError(format("getting attributes of path `%1%'") % path); + throw SysError(format("getting attributes of path `%1%'") % path); /* Really make sure that the path is of a supported type. This has already been checked in dumpPath(). */ @@ -451,7 +451,7 @@ void canonicalisePathMetaData(const Path & path, bool recurse) /* Mask out all type related bits. */ mode_t mode = st.st_mode & ~S_IFMT; - + if (mode != 0444 && mode != 0555) { mode = (st.st_mode & S_IFMT) | 0444 @@ -461,7 +461,7 @@ void canonicalisePathMetaData(const Path & path, bool recurse) } } - + if (st.st_mtime != mtimeStore) { struct timeval times[2]; times[0].tv_sec = st.st_atime; @@ -472,14 +472,14 @@ void canonicalisePathMetaData(const Path & path, bool recurse) if (lutimes(path.c_str(), times) == -1) #else if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1) -#endif +#endif throw SysError(format("changing modification time of `%1%'") % path); } if (recurse && S_ISDIR(st.st_mode)) { Strings names = readDirectory(path); - foreach (Strings::iterator, i, names) - canonicalisePathMetaData(path + "/" + *i, true); + foreach (Strings::iterator, i, names) + canonicalisePathMetaData(path + "/" + *i, true); } makeImmutable(path); @@ -494,7 +494,7 @@ void canonicalisePathMetaData(const Path & path) be a symlink, since we can't change its ownership. */ struct stat st; if (lstat(path.c_str(), &st)) - throw SysError(format("getting attributes of path `%1%'") % path); + throw SysError(format("getting attributes of path `%1%'") % path); if (st.st_uid != geteuid()) { assert(S_ISLNK(st.st_mode)); @@ -508,7 +508,7 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & string drvName = storePathToName(drvPath); assert(isDerivation(drvName)); drvName = string(drvName, 0, drvName.size() - drvExtension.size()); - + if (isFixedOutputDrv(drv)) { DerivationOutputs::const_iterator out = drv.outputs.find("out"); if (out == drv.outputs.end()) @@ -532,7 +532,7 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & } Hash h = hashDerivationModulo(*this, drvCopy); - + foreach (DerivationOutputs::const_iterator, i, drv.outputs) { Path outPath = makeOutputPath(i->first, h, drvName); StringPairs::const_iterator j = drv.env.find(i->first); @@ -568,14 +568,14 @@ unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool che derivation. */ if (isDerivation(info.path)) { Derivation drv = parseDerivation(readFile(info.path)); - + /* Verify that the output paths in the derivation are correct (i.e., follow the scheme for computing output paths from derivations). Note that if this throws an error, then the DB transaction is rolled back, so the path validity registration above is undone. */ if (checkOutputs) checkDerivationOutputs(info.path, drv); - + foreach (DerivationOutputs::iterator, i, drv.outputs) { SQLiteStmtUse use(stmtAddDerivationOutput); stmtAddDerivationOutput.bind(id); @@ -681,7 +681,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path) SQLiteStmtUse use1(stmtQueryPathInfo); stmtQueryPathInfo.bind(path); - + int r = sqlite3_step(stmtQueryPathInfo); if (r == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path); if (r != SQLITE_ROW) throwSQLiteError(db, "querying path in database"); @@ -691,7 +691,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path) const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1); assert(s); info.hash = parseHashField(path, s); - + info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2); s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3); @@ -769,9 +769,9 @@ PathSet LocalStore::queryAllValidPaths() { SQLiteStmt stmt; stmt.create(db, "select path from ValidPaths"); - + PathSet res; - + int r; while ((r = sqlite3_step(stmt)) == SQLITE_ROW) { const char * s = (const char *) sqlite3_column_text(stmt, 0); @@ -834,10 +834,10 @@ PathSet LocalStore::queryValidDerivers(const Path & path) assert(s); derivers.insert(s); } - + if (r != SQLITE_DONE) throwSQLiteError(db, format("error getting valid derivers of `%1%'") % path); - + return derivers; } @@ -845,10 +845,10 @@ PathSet LocalStore::queryValidDerivers(const Path & path) PathSet LocalStore::queryDerivationOutputs(const Path & path) { SQLiteTxn txn(db); - + SQLiteStmtUse use(stmtQueryDerivationOutputs); stmtQueryDerivationOutputs.bind(queryValidPathId(path)); - + PathSet outputs; int r; while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) { @@ -856,7 +856,7 @@ PathSet LocalStore::queryDerivationOutputs(const Path & path) assert(s); outputs.insert(s); } - + if (r != SQLITE_DONE) throwSQLiteError(db, format("error getting outputs of `%1%'") % path); @@ -867,10 +867,10 @@ PathSet LocalStore::queryDerivationOutputs(const Path & path) StringSet LocalStore::queryDerivationOutputNames(const Path & path) { SQLiteTxn txn(db); - + SQLiteStmtUse use(stmtQueryDerivationOutputs); stmtQueryDerivationOutputs.bind(queryValidPathId(path)); - + StringSet outputNames; int r; while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) { @@ -878,7 +878,7 @@ StringSet LocalStore::queryDerivationOutputNames(const Path & path) assert(s); outputNames.insert(s); } - + if (r != SQLITE_DONE) throwSQLiteError(db, format("error getting output names of `%1%'") % path); @@ -889,11 +889,11 @@ StringSet LocalStore::queryDerivationOutputNames(const Path & path) Path LocalStore::queryPathFromHashPart(const string & hashPart) { if (hashPart.size() != 32) throw Error("invalid hash part"); - + SQLiteTxn txn(db); Path prefix = nixStore + "/" + hashPart; - + SQLiteStmtUse use(stmtQueryPathFromHashPart); stmtQueryPathFromHashPart.bind(prefix); @@ -909,16 +909,16 @@ Path LocalStore::queryPathFromHashPart(const string & hashPart) void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run) { if (run.pid != -1) return; - + debug(format("starting substituter program `%1%'") % substituter); Pipe toPipe, fromPipe; - + toPipe.create(); fromPipe.create(); run.pid = fork(); - + switch (run.pid) { case -1: @@ -932,7 +932,7 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & library named libutil. As a result, substituters written in Perl (i.e. all of them) fail. */ unsetenv("DYLD_LIBRARY_PATH"); - + fromPipe.readSide.close(); toPipe.writeSide.close(); if (dup2(toPipe.readSide, STDIN_FILENO) == -1) @@ -949,7 +949,7 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & } /* Parent. */ - + run.to = toPipe.writeSide.borrow(); run.from = fromPipe.readSide.borrow(); } @@ -1054,7 +1054,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) try { SQLiteTxn txn(db); PathSet paths; - + foreach (ValidPathInfos::const_iterator, i, infos) { assert(i->hash.type == htSHA256); /* !!! Maybe the registration info should be updated if the @@ -1145,7 +1145,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name, hash = hashPath(htSHA256, dstPath); optimisePath(dstPath); // FIXME: combine with hashPath() - + ValidPathInfo info; info.path = dstPath; info.hash = hash.first; @@ -1183,7 +1183,7 @@ Path LocalStore::addTextToStore(const string & name, const string & s, const PathSet & references) { Path dstPath = computeStorePathForText(name, s, references); - + addTempRoot(dstPath); if (!isValidPath(dstPath)) { @@ -1201,7 +1201,7 @@ Path LocalStore::addTextToStore(const string & name, const string & s, HashResult hash = hashPath(htSHA256, dstPath); optimisePath(dstPath); - + ValidPathInfo info; info.path = dstPath; info.hash = hash.first; @@ -1259,7 +1259,7 @@ void LocalStore::exportPath(const Path & path, bool sign, throw Error(format("path `%1%' is not valid") % path); HashAndWriteSink hashAndWriteSink(sink); - + dumpPath(path, hashAndWriteSink); /* Refuse to export paths that have changed. This prevents @@ -1274,7 +1274,7 @@ void LocalStore::exportPath(const Path & path, bool sign, writeInt(EXPORT_MAGIC, hashAndWriteSink); writeString(path, hashAndWriteSink); - + PathSet references; queryReferences(path, references); writeStrings(references, hashAndWriteSink); @@ -1284,9 +1284,9 @@ void LocalStore::exportPath(const Path & path, bool sign, if (sign) { Hash hash = hashAndWriteSink.currentHash(); - + writeInt(1, hashAndWriteSink); - + Path tmpDir = createTempDir(); AutoDelete delTmp(tmpDir); Path hashFile = tmpDir + "/hash"; @@ -1305,7 +1305,7 @@ void LocalStore::exportPath(const Path & path, bool sign, string signature = runProgram(OPENSSL_PATH, true, args); writeString(signature, hashAndWriteSink); - + } else writeInt(0, hashAndWriteSink); } @@ -1348,7 +1348,7 @@ Path LocalStore::createTempDirInStore() Path LocalStore::importPath(bool requireSignature, Source & source) { HashAndReadSource hashAndReadSource(source); - + /* We don't yet know what store path this archive contains (the store path follows the archive data proper), and besides, we don't know yet whether the signature is valid. */ @@ -1378,7 +1378,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source) if (requireSignature && !haveSignature) throw Error(format("imported archive of `%1%' lacks a signature") % dstPath); - + if (haveSignature) { string signature = readString(hashAndReadSource); @@ -1432,13 +1432,13 @@ Path LocalStore::importPath(bool requireSignature, Source & source) % unpacked % dstPath); canonicalisePathMetaData(dstPath); - + /* !!! if we were clever, we could prevent the hashPath() here. */ HashResult hash = hashPath(htSHA256, dstPath); optimisePath(dstPath); // FIXME: combine with hashPath() - + ValidPathInfo info; info.path = dstPath; info.hash = hash.first; @@ -1447,10 +1447,10 @@ Path LocalStore::importPath(bool requireSignature, Source & source) info.deriver = deriver != "" && isValidPath(deriver) ? deriver : ""; registerValidPath(info); } - + outputLock.setDeletion(true); } - + return dstPath; } @@ -1498,7 +1498,7 @@ void LocalStore::verifyStore(bool checkContents) /* Acquire the global GC lock to prevent a garbage collection. */ AutoCloseFD fdGCLock = openGCLock(ltWrite); - + Paths entries = readDirectory(nixStore); PathSet store(entries.begin(), entries.end()); @@ -1527,7 +1527,7 @@ void LocalStore::verifyStore(bool checkContents) /* Check the content hash (optionally - slow). */ printMsg(lvlTalkative, format("checking contents of `%1%'") % *i); HashResult current = hashPath(info.hash.type, *i); - + if (info.hash != nullHash && info.hash != current.first) { printMsg(lvlError, format("path `%1%' was modified! " "expected hash `%2%', got `%3%'") @@ -1542,18 +1542,18 @@ void LocalStore::verifyStore(bool checkContents) info.hash = current.first; update = true; } - + /* Fill in missing narSize fields (from old stores). */ if (info.narSize == 0) { printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second); info.narSize = current.second; - update = true; + update = true; } if (update) updatePathInfo(info); } - + } catch (Error & e) { /* It's possible that the path got GC'ed, so ignore errors on invalid paths. */ @@ -1569,7 +1569,7 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, PathSet & done, PathSet & validPaths) { checkInterrupt(); - + if (done.find(path) != done.end()) return; done.insert(path); @@ -1596,10 +1596,10 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, invalidatePath(path); } else printMsg(lvlError, format("path `%1%' disappeared, but it still has valid referrers!") % path); - + return; } - + validPaths.insert(path); } @@ -1665,14 +1665,14 @@ void LocalStore::upgradeStore6() PathSet validPaths = queryValidPathsOld(); SQLiteTxn txn(db); - + foreach (PathSet::iterator, i, validPaths) { addValidPath(queryPathInfoOld(*i), false); std::cerr << "."; } std::cerr << "|"; - + foreach (PathSet::iterator, i, validPaths) { ValidPathInfo info = queryPathInfoOld(*i); unsigned long long referrer = queryValidPathId(*i); -- cgit 1.4.1 From f9613da18033d0a9835bc57ac2142aca754983cf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 15:43:37 -0400 Subject: Remove unused variables --- src/libstore/local-store.cc | 2 -- src/libstore/local-store.hh | 3 --- 2 files changed, 5 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index d6bb78f7ab12..023bf417e59a 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -198,8 +198,6 @@ void checkStoreNotSymlink() LocalStore::LocalStore(bool reserveSpace) { - substitutablePathsLoaded = false; - schemaPath = nixDBPath + "/schema"; if (readOnlyMode) { diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 15dff1d02052..4761658ed830 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -80,9 +80,6 @@ struct SQLiteStmt class LocalStore : public StoreAPI { private: - bool substitutablePathsLoaded; - PathSet substitutablePaths; - typedef std::map RunningSubstituters; RunningSubstituters runningSubstituters; -- cgit 1.4.1 From d059bf48e4bd4d1f50593dbe60953de8b2d395c7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 16:09:54 -0400 Subject: Pass configuration settings to the substituters Previously substituters could read nix.conf themselves, but this didn't take --option flags into account. --- perl/lib/Nix/Config.pm.in | 10 +++++++++- src/libstore/build.cc | 4 ++++ src/libstore/globals.cc | 23 ++++++++++++++++++----- src/libstore/globals.hh | 2 ++ src/libstore/local-store.cc | 4 ++++ 5 files changed, 37 insertions(+), 6 deletions(-) diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in index ed197821e89f..8c902ab6edc5 100644 --- a/perl/lib/Nix/Config.pm.in +++ b/perl/lib/Nix/Config.pm.in @@ -19,9 +19,17 @@ $useBindings = "@perlbindings@" eq "yes"; %config = (); sub readConfig { + if (defined $ENV{'_NIX_OPTIONS'}) { + foreach my $s (split '\n', $ENV{'_NIX_OPTIONS'}) { + my ($n, $v) = split '=', $s, 2; + $config{$n} = $v; + } + return; + } + my $config = "$confDir/nix.conf"; return unless -f $config; - + open CONFIG, "<$config" or die "cannot open `$config'"; while () { /^\s*([\w|-]+)\s*=\s*(.*)$/ or next; diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 887858fce30c..4a2bc5218b69 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2494,6 +2494,10 @@ void SubstitutionGoal::tryToRun() outPipe.readSide.close(); outPipe.writeSide.close(); + /* Pass configuration options (including those overriden + with --option) to the substituter. */ + setenv("_NIX_OPTIONS", packSettings().c_str(), 1); + /* Fill in the arguments. */ Strings args; args.push_back(baseNameOf(sub)); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 9636bf49d987..a28e08427da5 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -36,10 +36,12 @@ bool printBuildTrace = false; static bool settingsRead = false; -static std::map settings; +typedef std::map Settings; + +static Settings settings; /* Overriden settings. */ -std::map settingsCmdline; +Settings settingsCmdline; string & at(Strings & ss, unsigned int n) @@ -82,7 +84,7 @@ static void readSettings() }; settings.insert(settingsCmdline.begin(), settingsCmdline.end()); - + settingsRead = true; } @@ -90,7 +92,7 @@ static void readSettings() Strings querySetting(const string & name, const Strings & def) { if (!settingsRead) readSettings(); - std::map::iterator i = settings.find(name); + Settings::iterator i = settings.find(name); return i == settings.end() ? def : i->second; } @@ -169,5 +171,16 @@ void setDefaultsFromEnvironment() buildTimeout = queryIntSetting("build-timeout", 0); } - + +string packSettings() +{ + string s; + if (!settingsRead) readSettings(); + foreach (Settings::iterator, i, settings) { + s += i->first; s += '='; s += concatStringsSep(" ", i->second); s += '\n'; + } + return s; +} + + } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 1c0877a5e1e9..30acf59ef54f 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -115,5 +115,7 @@ void reloadSettings(); void setDefaultsFromEnvironment(); +string packSettings(); + } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 023bf417e59a..aaa1abb56921 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -931,6 +931,10 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & written in Perl (i.e. all of them) fail. */ unsetenv("DYLD_LIBRARY_PATH"); + /* Pass configuration options (including those overriden + with --option) to the substituter. */ + setenv("_NIX_OPTIONS", packSettings().c_str(), 1); + fromPipe.readSide.close(); toPipe.writeSide.close(); if (dup2(toPipe.readSide, STDIN_FILENO) == -1) -- cgit 1.4.1 From ab42bf1dab026d10b74e857a76feff475ae8a162 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 16:11:02 -0400 Subject: nix-install-package: Support binary caches The .nixpkg file format is extended to optionally include the URL of a binary cache, which will be used in preference to the manifest URL (which can be set to a non-existent value). --- scripts/nix-install-package.in | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/scripts/nix-install-package.in b/scripts/nix-install-package.in index 951c2918fa70..35d5f9d5306f 100755 --- a/scripts/nix-install-package.in +++ b/scripts/nix-install-package.in @@ -101,7 +101,7 @@ my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )"; # store path. We'll let nix-env do that. $contents =~ - / ^ \s* (\S+) \s+ ($urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) /x + / ^ \s* (\S+) \s+ ($urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($urlRE) )? /x or barf "invalid package contents"; my $version = $1; my $manifestURL = $2; @@ -109,6 +109,7 @@ my $drvName = $3; my $system = $4; my $drvPath = $5; my $outPath = $6; +my $binaryCacheURL = $8; barf "invalid package version `$version'" unless $version eq "NIXPKG1"; @@ -122,17 +123,25 @@ if ($interactive) { } -# Store the manifest in the temporary directory so that we don't -# pollute /nix/var/nix/manifests. This also requires that we don't -# use the Nix daemon (because otherwise download-using-manifests won't -# see our NIX_MANIFESTS_DIRS environment variable). -$ENV{NIX_MANIFESTS_DIR} = $tmpDir; -$ENV{NIX_REMOTE} = ""; +if (defined $binaryCacheURL) { + push @extraNixEnvArgs, "--option", "binary-caches", $binaryCacheURL; -print "\nPulling manifests...\n"; -system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0 - or barf "nix-pull failed: $?"; +} else { + + # Store the manifest in the temporary directory so that we don't + # pollute /nix/var/nix/manifests. This also requires that we + # don't use the Nix daemon (because otherwise + # download-using-manifests won't see our NIX_MANIFESTS_DIRS + # environment variable). + $ENV{NIX_MANIFESTS_DIR} = $tmpDir; + $ENV{NIX_REMOTE} = ""; + + print "\nPulling manifests...\n"; + system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0 + or barf "nix-pull failed: $?"; + +} print "\nInstalling package...\n"; -- cgit 1.4.1 From 9de6d10d112665ba1c6d807dd3950ed4c43a4404 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 16:39:31 -0400 Subject: Get rid of $NIX_BINARY_CACHES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit You can use ‘--option binary-caches URLs’ instead. --- doc/manual/conf-file.xml | 6 ++++-- scripts/download-from-binary-cache.pl.in | 5 ++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml index cccee8d46202..c73466ef5cf6 100644 --- a/doc/manual/conf-file.xml +++ b/doc/manual/conf-file.xml @@ -30,6 +30,9 @@ gc-keep-derivations = true # Idem env-keep-derivations = false +You can override settings using the +flag, e.g. --option gc-keep-outputs false. + The following settings are currently available: @@ -310,8 +313,7 @@ build-use-chroot = /dev /proc /bin binary-caches A list of URLs of binary caches, separated by - whitespace. It can be overriden through the environment variable - NIX_BINARY_CACHES. The default is + whitespace. The default is http://nixos.org/binary-cache. diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 6482b9c18391..7e203ec9d057 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -166,9 +166,8 @@ sub getAvailableCaches { $gotCaches = 1; my @urls = map { s/\/+$//; $_ } split(/ /, - ($ENV{"NIX_BINARY_CACHES"} - // $Nix::Config::config{"binary-caches"} - // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : ""))); + $Nix::Config::config{"binary-caches"} + // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : "")); foreach my $url (@urls) { -- cgit 1.4.1 From f3eb29c6530e990b18e9f04390f6fa7bfbc58078 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 17:09:13 -0400 Subject: Fix the test --- tests/binary-cache.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index 8c1138210412..0704a2bb3b8f 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -16,9 +16,9 @@ nix-push --dest $cacheDir $outPath clearStore rm -f $NIX_STATE_DIR/binary-cache* -NIX_BINARY_CACHES="file://$cacheDir" nix-env -f dependencies.nix -qas \* | grep -- "---" +nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---" -NIX_BINARY_CACHES="file://$cacheDir" nix-store -r $outPath +nix-store --option binary-caches "file://$cacheDir" -r $outPath # But with the right configuration, "nix-env -qas" should also work. @@ -26,9 +26,9 @@ clearStore rm -f $NIX_STATE_DIR/binary-cache* echo "WantMassQuery: 1" >> $cacheDir/nix-cache-info -NIX_BINARY_CACHES="file://$cacheDir" nix-env -f dependencies.nix -qas \* | grep -- "--S" +nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S" -NIX_BINARY_CACHES="file://$cacheDir" nix-store -r $outPath +nix-store --option binary-caches "file://$cacheDir" -r $outPath nix-store --check-validity $outPath nix-store -qR $outPath | grep input-2 -- cgit 1.4.1 From 9cd63d224468af87baf74228acc162873c649493 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 17:09:36 -0400 Subject: Do some validation of URLs --- perl/Makefile.am | 2 +- scripts/download-from-binary-cache.pl.in | 4 ++-- scripts/download-using-manifests.pl.in | 3 +++ scripts/nix-install-package.in | 6 +++--- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/perl/Makefile.am b/perl/Makefile.am index d1f9b1c74162..e42894353088 100644 --- a/perl/Makefile.am +++ b/perl/Makefile.am @@ -1,4 +1,4 @@ -PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in +PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in lib/Nix/Utils.pm all: $(PERL_MODULES:.in=) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 7e203ec9d057..5d65e664e564 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -5,6 +5,7 @@ use File::Basename; use IO::Select; use Nix::Config; use Nix::Store; +use Nix::Utils; use WWW::Curl::Easy; use WWW::Curl::Multi; use strict; @@ -249,8 +250,6 @@ sub processNARInfo { return undef; } - # FIXME: validate $url etc. for security. - # Cache the result. $insertNAR->execute( $cache->{id}, basename($storePath), $url, $compression, $fileHash, $fileSize, @@ -455,6 +454,7 @@ sub downloadBinary { } my $url = "$cache->{url}/$info->{url}"; # FIXME: handle non-relative URLs print STDERR "\n*** Downloading ‘$url’ into ‘$storePath’...\n"; + Nix::Utils::checkURL $url; if (system("$Nix::Config::curl --fail --location --insecure '$url' | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) { die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; next; diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in index 04bcce90da38..24f7c98e0cb9 100755 --- a/scripts/download-using-manifests.pl.in +++ b/scripts/download-using-manifests.pl.in @@ -4,6 +4,7 @@ use strict; use Nix::Config; use Nix::Manifest; use Nix::Store; +use Nix::Utils; use POSIX qw(strftime); use File::Temp qw(tempdir); @@ -306,6 +307,7 @@ while (scalar @path > 0) { # Download the patch. print STDERR " downloading patch...\n"; my $patchPath = "$tmpDir/patch"; + Nix::Utils::checkURL $patch->{url}; system("$curl '$patch->{url}' -o $patchPath") == 0 or die "cannot download patch `$patch->{url}'\n"; @@ -336,6 +338,7 @@ while (scalar @path > 0) { my $size = $narFile->{size} || -1; print LOGFILE "$$ narfile $narFile->{url} $size $v\n"; + Nix::Utils::checkURL $narFile->{url}; if ($curStep < $maxStep) { # The archive will be used a base to a patch. system("$curl '$narFile->{url}' | $Nix::Config::bzip2 -d > $tmpNar") == 0 diff --git a/scripts/nix-install-package.in b/scripts/nix-install-package.in index 35d5f9d5306f..6564529385d6 100755 --- a/scripts/nix-install-package.in +++ b/scripts/nix-install-package.in @@ -3,6 +3,7 @@ use strict; use File::Temp qw(tempdir); use Nix::Config; +use Nix::Utils; sub usageError { @@ -72,7 +73,7 @@ my $tmpDir = tempdir("nix-install-package.XXXXXX", CLEANUP => 1, TMPDIR => 1) sub barf { my $msg = shift; - print "$msg\n"; + print "\nInstallation failed: $msg\n"; if $interactive; exit 1; } @@ -92,7 +93,6 @@ open PKGFILE, "<$pkgFile" or barf "cannot open `$pkgFile': $!"; my $contents = ; close PKGFILE; -my $urlRE = "(?: [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ )"; my $nameRE = "(?: [A-Za-z0-9\+\-\.\_\?\=]+ )"; # see checkStoreName() my $systemRE = "(?: [A-Za-z0-9\+\-\_]+ )"; my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )"; @@ -101,7 +101,7 @@ my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )"; # store path. We'll let nix-env do that. $contents =~ - / ^ \s* (\S+) \s+ ($urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($urlRE) )? /x + / ^ \s* (\S+) \s+ ($Nix::Utils::urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )? /x or barf "invalid package contents"; my $version = $1; my $manifestURL = $2; -- cgit 1.4.1 From d50d7a287416da2086b0b24f9d998eabb24c1734 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 17:13:25 -0400 Subject: Whitespace --- src/libmain/shared.cc | 14 ++++----- src/libstore/remote-store.cc | 42 +++++++++++++-------------- src/libstore/worker-protocol.hh | 2 +- src/nix-worker/nix-worker.cc | 64 ++++++++++++++++++++--------------------- 4 files changed, 61 insertions(+), 61 deletions(-) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index d39816586404..2118e439189e 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -46,7 +46,7 @@ Path makeRootName(const Path & gcRoot, int & counter) void printGCWarning() { static bool haveWarned = false; - warnOnce(haveWarned, + warnOnce(haveWarned, "you did not specify `--add-root'; " "the result might be removed by the garbage collector"); } @@ -98,7 +98,7 @@ static bool showTrace = false; static void initAndRun(int argc, char * * argv) { setDefaultsFromEnvironment(); - + /* Catch SIGINT. */ struct sigaction act; act.sa_handler = sigintHandler; @@ -137,7 +137,7 @@ static void initAndRun(int argc, char * * argv) Strings args, remaining; while (argc--) args.push_back(*argv++); args.erase(args.begin()); - + /* Expand compound dash options (i.e., `-qlf' -> `-q -l -f'), and ignore options for the ATerm library. */ for (Strings::iterator i = args.begin(); i != args.end(); ++i) { @@ -210,7 +210,7 @@ static void initAndRun(int argc, char * * argv) verbosityDelta += queryIntSetting("verbosity", lvlInfo); verbosity = (Verbosity) (verbosityDelta < 0 ? 0 : verbosityDelta); - + run(remaining); /* Close the Nix database. */ @@ -228,7 +228,7 @@ static void setuidInit() uid_t nixUid = geteuid(); gid_t nixGid = getegid(); - + setuidCleanup(); /* Don't trust the current directory. */ @@ -294,7 +294,7 @@ int main(int argc, char * * argv) right away. */ if (argc == 0) abort(); setuidInit(); - + /* Turn on buffering for cerr. */ #if HAVE_PUBSETBUF std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); @@ -323,7 +323,7 @@ int main(int argc, char * * argv) throw; } } catch (UsageError & e) { - printMsg(lvlError, + printMsg(lvlError, format( "error: %1%\n" "Try `%2% --help' for more information.") diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 35530acab1af..5910ffd53094 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -60,7 +60,7 @@ void RemoteStore::openConnection(bool reserveSpace) else throw Error(format("invalid setting for NIX_REMOTE, `%1%'") % remoteMode); - + from.fd = fdSocket; to.fd = fdSocket; @@ -103,15 +103,15 @@ void RemoteStore::forkSlave() worker = nixBinDir + "/nix-worker"; child = fork(); - + switch (child) { - + case -1: throw SysError("unable to fork"); case 0: try { /* child */ - + if (dup2(fdChild, STDOUT_FILENO) == -1) throw SysError("dupping write side"); @@ -124,7 +124,7 @@ void RemoteStore::forkSlave() execlp(worker.c_str(), worker.c_str(), "--slave", NULL); throw SysError(format("executing `%1%'") % worker); - + } catch (std::exception & e) { std::cerr << format("child error: %1%\n") % e.what(); } @@ -150,16 +150,16 @@ void RemoteStore::connectToDaemon() applications... */ AutoCloseFD fdPrevDir = open(".", O_RDONLY); if (fdPrevDir == -1) throw SysError("couldn't open current directory"); - chdir(dirOf(socketPath).c_str()); + chdir(dirOf(socketPath).c_str()); Path socketPathRel = "./" + baseNameOf(socketPath); - + struct sockaddr_un addr; addr.sun_family = AF_UNIX; if (socketPathRel.size() >= sizeof(addr.sun_path)) throw Error(format("socket path `%1%' is too long") % socketPathRel); using namespace std; strcpy(addr.sun_path, socketPathRel.c_str()); - + if (connect(fdSocket, (struct sockaddr *) &addr, sizeof(addr)) == -1) throw SysError(format("cannot connect to daemon at `%1%'") % socketPath); @@ -199,9 +199,9 @@ void RemoteStore::setOptions() } if (GET_PROTOCOL_MINOR(daemonVersion) >= 6) writeInt(buildCores, to); - if (GET_PROTOCOL_MINOR(daemonVersion) >= 10) + if (GET_PROTOCOL_MINOR(daemonVersion) >= 10) writeInt(queryBoolSetting("build-use-substitutes", true), to); - + processStderr(); } @@ -270,11 +270,11 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, if (paths.empty()) return; openConnection(); - + if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return; - + if (GET_PROTOCOL_MINOR(daemonVersion) < 12) { - + foreach (PathSet::const_iterator, i, paths) { SubstitutablePathInfo info; writeInt(wopQuerySubstitutablePathInfo, to); @@ -289,9 +289,9 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; infos[*i] = info; } - + } else { - + writeInt(wopQuerySubstitutablePathInfos, to); writeStrings(paths, to); processStderr(); @@ -305,7 +305,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, info.downloadSize = readLongLong(from); info.narSize = readLongLong(from); } - + } } @@ -411,7 +411,7 @@ Path RemoteStore::addToStore(const Path & _srcPath, bool recursive, HashType hashAlgo, PathFilter & filter) { openConnection(); - + Path srcPath(absPath(_srcPath)); writeInt(wopAddToStore, to); @@ -434,7 +434,7 @@ Path RemoteStore::addTextToStore(const string & name, const string & s, writeString(name, to); writeString(s, to); writeStrings(references, to); - + processStderr(); return readStorePath(from); } @@ -531,7 +531,7 @@ Roots RemoteStore::findRoots() void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) { openConnection(false); - + writeInt(wopCollectGarbage, to); writeInt(options.action, to); writeStrings(options.pathsToDelete, to); @@ -543,9 +543,9 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) writeInt(0, to); writeInt(0, to); } - + processStderr(); - + results.paths = readStrings(from); results.bytesFreed = readLongLong(from); results.blocksFreed = readLongLong(from); diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 9677a46c2896..7e4c3ec5fbde 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -63,5 +63,5 @@ typedef enum { Path readStorePath(Source & from); template T readStorePaths(Source & from); - + } diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index f2ca0a89233e..09800c16087a 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -95,7 +95,7 @@ static bool isFarSideClosed(int socket) throw Error("EOF expected (protocol error?)"); else if (rd == -1 && errno != ECONNRESET) throw SysError("expected connection reset or EOF"); - + return true; } @@ -185,7 +185,7 @@ static void stopWork(bool success = true, const string & msg = "", unsigned int we're either sending or receiving from the client, so we'll be notified of client death anyway. */ setSigPollAction(false); - + canSendStderr = false; if (success) @@ -220,7 +220,7 @@ struct TunnelSource : BufferedSource so we have to disable the SIGPOLL handler. */ setSigPollAction(false); canSendStderr = false; - + writeInt(STDERR_READ, to); writeInt(len, to); to.flush(); @@ -279,7 +279,7 @@ static void performOp(unsigned int clientVersion, { switch (op) { -#if 0 +#if 0 case wopQuit: { /* Close the database. */ store.reset((StoreAPI *) 0); @@ -323,7 +323,7 @@ static void performOp(unsigned int clientVersion, writeStrings(res, to); break; } - + case wopQueryPathHash: { Path path = readStorePath(from); startWork(); @@ -391,7 +391,7 @@ static void performOp(unsigned int clientVersion, SavingSourceAdapter savedNAR(from); RetrieveRegularNARSink savedRegular; - + if (recursive) { /* Get the entire NAR dump from the client and save it to a string so that we can pass it to @@ -400,13 +400,13 @@ static void performOp(unsigned int clientVersion, parseDump(sink, savedNAR); } else parseDump(savedRegular, from); - + startWork(); if (!savedRegular.regular) throw Error("regular file expected"); Path path = dynamic_cast(store.get()) ->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo); stopWork(); - + writeString(path, to); break; } @@ -512,17 +512,17 @@ static void performOp(unsigned int clientVersion, } GCResults results; - + startWork(); if (options.ignoreLiveness) throw Error("you are not allowed to ignore liveness"); store->collectGarbage(options, results); stopWork(); - + writeStrings(results.paths, to); writeLongLong(results.bytesFreed, to); writeLongLong(results.blocksFreed, to); - + break; } @@ -572,7 +572,7 @@ static void performOp(unsigned int clientVersion, } break; } - + case wopQuerySubstitutablePathInfos: { PathSet paths = readStorePaths(from); startWork(); @@ -589,7 +589,7 @@ static void performOp(unsigned int clientVersion, } break; } - + case wopQueryAllValidPaths: { startWork(); PathSet paths = store->queryAllValidPaths(); @@ -637,7 +637,7 @@ static void performOp(unsigned int clientVersion, static void processConnection() { canSendStderr = false; - myPid = getpid(); + myPid = getpid(); writeToStderr = tunnelStderr; #ifdef HAVE_HUP_NOTIFICATION @@ -681,7 +681,7 @@ static void processConnection() stopWork(); to.flush(); - + } catch (Error & e) { stopWork(false, e.msg()); to.flush(); @@ -690,7 +690,7 @@ static void processConnection() /* Process client requests. */ unsigned int opCount = 0; - + while (true) { WorkerOp op; try { @@ -762,7 +762,7 @@ static void daemonLoop() /* Otherwise, create and bind to a Unix domain socket. */ else { - + /* Create and bind to a Unix domain socket. */ fdSocket = socket(PF_UNIX, SOCK_STREAM, 0); if (fdSocket == -1) @@ -777,7 +777,7 @@ static void daemonLoop() relative path name. */ chdir(dirOf(socketPath).c_str()); Path socketPathRel = "./" + baseNameOf(socketPath); - + struct sockaddr_un addr; addr.sun_family = AF_UNIX; if (socketPathRel.size() >= sizeof(addr.sun_path)) @@ -802,7 +802,7 @@ static void daemonLoop() } closeOnExec(fdSocket); - + /* Loop accepting connections. */ while (1) { @@ -810,7 +810,7 @@ static void daemonLoop() /* Important: the server process *cannot* open the SQLite database, because it doesn't like forks very much. */ assert(!store); - + /* Accept a connection. */ struct sockaddr_un remoteAddr; socklen_t remoteAddrLen = sizeof(remoteAddr); @@ -819,14 +819,14 @@ static void daemonLoop() (struct sockaddr *) &remoteAddr, &remoteAddrLen); checkInterrupt(); if (remote == -1) { - if (errno == EINTR) - continue; - else - throw SysError("accepting connection"); + if (errno == EINTR) + continue; + else + throw SysError("accepting connection"); } closeOnExec(remote); - + /* Get the identity of the caller, if possible. */ uid_t clientUid = -1; pid_t clientPid = -1; @@ -841,13 +841,13 @@ static void daemonLoop() #endif printMsg(lvlInfo, format("accepted connection from pid %1%, uid %2%") % clientPid % clientUid); - + /* Fork a child to handle the connection. */ pid_t child; child = fork(); - + switch (child) { - + case -1: throw SysError("unable to fork"); @@ -866,16 +866,16 @@ static void daemonLoop() string processName = int2String(clientPid); strncpy(argvSaved[1], processName.c_str(), strlen(argvSaved[1])); } - + /* Since the daemon can be long-running, the settings may have changed. So force a reload. */ reloadSettings(); - + /* Handle the connection. */ from.fd = remote; to.fd = remote; processConnection(); - + } catch (std::exception & e) { std::cerr << format("child error: %1%\n") % e.what(); } @@ -895,7 +895,7 @@ void run(Strings args) { bool slave = false; bool daemon = false; - + for (Strings::iterator i = args.begin(); i != args.end(); ) { string arg = *i++; if (arg == "--slave") slave = true; -- cgit 1.4.1 From 97421eb5ecde86b75441094fda017b12b5eca2a6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Jul 2012 19:55:41 -0400 Subject: Refactor settings processing Put all Nix configuration flags in a Settings object. --- perl/lib/Nix/Store.xs | 2 +- src/libexpr/eval.cc | 4 +- src/libexpr/primops.cc | 6 +- src/libmain/shared.cc | 47 ++++--- src/libstore/build.cc | 176 ++++++++++++-------------- src/libstore/derivations.cc | 22 ++-- src/libstore/gc.cc | 78 ++++++------ src/libstore/globals.cc | 201 ++++++++++++++++-------------- src/libstore/globals.hh | 221 +++++++++++++++++++++------------ src/libstore/local-store.cc | 64 +++++----- src/libstore/local-store.hh | 49 ++++---- src/libstore/misc.cc | 24 ++-- src/libstore/optimise-store.cc | 28 ++--- src/libstore/remote-store.cc | 24 ++-- src/libstore/store-api.cc | 16 +-- src/nix-env/nix-env.cc | 20 ++- src/nix-instantiate/nix-instantiate.cc | 4 +- src/nix-store/nix-store.cc | 70 +++++------ src/nix-worker/nix-worker.cc | 32 ++--- 19 files changed, 566 insertions(+), 522 deletions(-) diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index 76de674e6d5b..00311aa8f3bf 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -19,7 +19,7 @@ void doInit() { if (!store) { try { - setDefaultsFromEnvironment(); + settings.processEnvironment(); store = openStore(); } catch (Error & e) { croak(e.what()); diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index cf7c62ad20cf..74f7560fe0ea 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -179,7 +179,7 @@ EvalState::EvalState() searchPathInsertionPoint = searchPath.end(); Strings paths = tokenizeString(getEnv("NIX_PATH", ""), ":"); foreach (Strings::iterator, i, paths) addToSearchPath(*i); - addToSearchPath("nix=" + nixDataDir + "/nix/corepkgs"); + addToSearchPath("nix=" + settings.nixDataDir + "/nix/corepkgs"); searchPathInsertionPoint = searchPath.begin(); createBaseEnv(); @@ -1058,7 +1058,7 @@ string EvalState::coerceToString(Value & v, PathSet & context, if (srcToStore[path] != "") dstPath = srcToStore[path]; else { - dstPath = readOnlyMode + dstPath = settings.readOnlyMode ? computeStorePathForPath(path).first : store->addToStore(path); srcToStore[path] = dstPath; diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 5c011c43e31c..7258c4cc0fa9 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -623,7 +623,7 @@ static void prim_toFile(EvalState & state, Value * * args, Value & v) refs.insert(path); } - Path storePath = readOnlyMode + Path storePath = settings.readOnlyMode ? computeStorePathForText(name, contents, refs) : store->addTextToStore(name, contents, refs); @@ -687,7 +687,7 @@ static void prim_filterSource(EvalState & state, Value * * args, Value & v) FilterFromExpr filter(state, *args[0]); - Path dstPath = readOnlyMode + Path dstPath = settings.readOnlyMode ? computeStorePathForPath(path, true, htSHA256, filter).first : store->addToStore(path, true, htSHA256, filter); @@ -1079,7 +1079,7 @@ void EvalState::createBaseEnv() mkInt(v, time(0)); addConstant("__currentTime", v); - mkString(v, thisSystem.c_str()); + mkString(v, settings.thisSystem.c_str()); addConstant("__currentSystem", v); // Miscellaneous diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 2118e439189e..9c62e320f66e 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -74,7 +74,7 @@ void printMissing(StoreAPI & store, const PathSet & paths) if (!unknown.empty()) { printMsg(lvlInfo, format("don't know how to build these paths%1%:") - % (readOnlyMode ? " (may be caused by read-only store access)" : "")); + % (settings.readOnlyMode ? " (may be caused by read-only store access)" : "")); foreach (PathSet::iterator, i, unknown) printMsg(lvlInfo, format(" %1%") % *i); } @@ -93,11 +93,20 @@ static void setLogType(string lt) static bool showTrace = false; +string getArg(const string & opt, + Strings::iterator & i, const Strings::iterator & end) +{ + ++i; + if (i == end) throw UsageError(format("`%1%' requires an argument") % opt); + return *i; +} + /* Initialize and reorder arguments, then call the actual argument processor. */ static void initAndRun(int argc, char * * argv) { - setDefaultsFromEnvironment(); + settings.processEnvironment(); + settings.loadConfFile(); /* Catch SIGINT. */ struct sigaction act; @@ -156,20 +165,19 @@ static void initAndRun(int argc, char * * argv) remaining.clear(); /* Process default options. */ - int verbosityDelta = 0; + int verbosityDelta = lvlInfo; for (Strings::iterator i = args.begin(); i != args.end(); ++i) { string arg = *i; if (arg == "--verbose" || arg == "-v") verbosityDelta++; else if (arg == "--quiet") verbosityDelta--; else if (arg == "--log-type") { - ++i; - if (i == args.end()) throw UsageError("`--log-type' requires an argument"); - setLogType(*i); + string s = getArg(arg, i, args.end()); + setLogType(s); } else if (arg == "--no-build-output" || arg == "-Q") - buildVerbosity = lvlVomit; + settings.buildVerbosity = lvlVomit; else if (arg == "--print-build-trace") - printBuildTrace = true; + settings.printBuildTrace = true; else if (arg == "--help") { printHelp(); return; @@ -179,23 +187,23 @@ static void initAndRun(int argc, char * * argv) return; } else if (arg == "--keep-failed" || arg == "-K") - keepFailed = true; + settings.keepFailed = true; else if (arg == "--keep-going" || arg == "-k") - keepGoing = true; + settings.keepGoing = true; else if (arg == "--fallback") - tryFallback = true; + settings.tryFallback = true; else if (arg == "--max-jobs" || arg == "-j") - maxBuildJobs = getIntArg(arg, i, args.end()); + settings.set("build-max-jobs", getArg(arg, i, args.end())); else if (arg == "--cores") - buildCores = getIntArg(arg, i, args.end()); + settings.set("build-cores", getArg(arg, i, args.end())); else if (arg == "--readonly-mode") - readOnlyMode = true; + settings.readOnlyMode = true; else if (arg == "--max-silent-time") - maxSilentTime = getIntArg(arg, i, args.end()); + settings.set("build-max-silent-time", getArg(arg, i, args.end())); else if (arg == "--timeout") - buildTimeout = getIntArg(arg, i, args.end()); + settings.set("build-timeout", getArg(arg, i, args.end())); else if (arg == "--no-build-hook") - useBuildHook = false; + settings.useBuildHook = false; else if (arg == "--show-trace") showTrace = true; else if (arg == "--option") { @@ -203,14 +211,15 @@ static void initAndRun(int argc, char * * argv) string name = *i; ++i; if (i == args.end()) throw UsageError("`--option' requires two arguments"); string value = *i; - overrideSetting(name, tokenizeString(value)); + settings.set(name, value); } else remaining.push_back(arg); } - verbosityDelta += queryIntSetting("verbosity", lvlInfo); verbosity = (Verbosity) (verbosityDelta < 0 ? 0 : verbosityDelta); + settings.update(); + run(remaining); /* Close the Nix database. */ diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 4a2bc5218b69..0972d6e19364 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -229,8 +229,6 @@ private: public: - bool cacheFailure; - /* Set if at least one derivation had a BuildError (i.e. permanent failure). */ bool permanentFailure; @@ -314,7 +312,7 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result) if (result == ecNoSubstituters) ++nrNoSubstituters; - if (waitees.empty() || (result == ecFailed && !keepGoing)) { + if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) { /* If we failed and keepGoing is not set, we remove all remaining waitees. */ @@ -466,14 +464,13 @@ void UserLock::acquire() { assert(uid == 0); - string buildUsersGroup = querySetting("build-users-group", ""); - assert(buildUsersGroup != ""); + assert(settings.buildUsersGroup != ""); /* Get the members of the build-users-group. */ - struct group * gr = getgrnam(buildUsersGroup.c_str()); + struct group * gr = getgrnam(settings.buildUsersGroup.c_str()); if (!gr) throw Error(format("the group `%1%' specified in `build-users-group' does not exist") - % buildUsersGroup); + % settings.buildUsersGroup); gid = gr->gr_gid; /* Copy the result of getgrnam. */ @@ -485,7 +482,7 @@ void UserLock::acquire() if (users.empty()) throw Error(format("the build users group `%1%' has no members") - % buildUsersGroup); + % settings.buildUsersGroup); /* Find a user account that isn't currently in use for another build. */ @@ -495,11 +492,11 @@ void UserLock::acquire() struct passwd * pw = getpwnam(i->c_str()); if (!pw) throw Error(format("the user `%1%' in the group `%2%' does not exist") - % *i % buildUsersGroup); + % *i % settings.buildUsersGroup); - createDirs(nixStateDir + "/userpool"); + createDirs(settings.nixStateDir + "/userpool"); - fnUserLock = (format("%1%/userpool/%2%") % nixStateDir % pw->pw_uid).str(); + fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str(); if (lockedPaths.find(fnUserLock) != lockedPaths.end()) /* We already have a lock on this one. */ @@ -519,7 +516,7 @@ void UserLock::acquire() /* Sanity check... */ if (uid == getuid() || uid == geteuid()) throw Error(format("the Nix user should not be a member of `%1%'") - % buildUsersGroup); + % settings.buildUsersGroup); return; } @@ -527,7 +524,7 @@ void UserLock::acquire() throw Error(format("all build users are currently in use; " "consider creating additional users and adding them to the `%1%' group") - % buildUsersGroup); + % settings.buildUsersGroup); } @@ -546,7 +543,7 @@ static void runSetuidHelper(const string & command, const string & arg) { Path program = getEnv("NIX_SETUID_HELPER", - nixLibexecDir + "/nix-setuid-helper"); + settings.nixLibexecDir + "/nix-setuid-helper"); /* Fork. */ Pid pid; @@ -601,12 +598,6 @@ bool amPrivileged() } -bool haveBuildUsers() -{ - return querySetting("build-users-group", "") != ""; -} - - void getOwnership(const Path & path) { runSetuidHelper("get-ownership", path); @@ -622,7 +613,7 @@ void deletePathWrapped(const Path & path, } catch (SysError & e) { /* If this failed due to a permission error, then try it with the setuid helper. */ - if (haveBuildUsers() && !amPrivileged()) { + if (settings.buildUsersGroup != "" && !amPrivileged()) { getOwnership(path); deletePath(path, bytesFreed, blocksFreed); } else @@ -701,9 +692,9 @@ HookInstance::HookInstance() throw SysError("dupping builder's stdout/stderr"); /* XXX: Pass `buildTimeout' to the hook? */ - execl(buildHook.c_str(), buildHook.c_str(), thisSystem.c_str(), - (format("%1%") % maxSilentTime).str().c_str(), - (format("%1%") % printBuildTrace).str().c_str(), + execl(buildHook.c_str(), buildHook.c_str(), settings.thisSystem.c_str(), + (format("%1%") % settings.maxSilentTime).str().c_str(), + (format("%1%") % settings.printBuildTrace).str().c_str(), NULL); throw SysError(format("executing `%1%'") % buildHook); @@ -943,7 +934,7 @@ void DerivationGoal::init() { trace("init"); - if (readOnlyMode) + if (settings.readOnlyMode) throw Error(format("cannot build derivation `%1%' - no write access to the Nix store") % drvPath); /* The first thing to do is to make sure that the derivation @@ -995,7 +986,7 @@ void DerivationGoal::haveDerivation() /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ - if (queryBoolSetting("build-use-substitutes", true)) + if (settings.useSubstitutes) foreach (PathSet::iterator, i, invalidOutputs) addWaitee(worker.makeSubstitutionGoal(*i)); @@ -1010,7 +1001,7 @@ void DerivationGoal::outputsSubstituted() { trace("all outputs substituted (maybe)"); - if (nrFailed > 0 && nrFailed > nrNoSubstituters && !tryFallback) + if (nrFailed > 0 && nrFailed > nrNoSubstituters && !settings.tryFallback) throw Error(format("some substitutes for the outputs of derivation `%1%' failed; try `--fallback'") % drvPath); nrFailed = nrNoSubstituters = 0; @@ -1109,9 +1100,9 @@ PathSet outputPaths(const DerivationOutputs & outputs) static bool canBuildLocally(const string & platform) { - return platform == thisSystem + return platform == settings.thisSystem #ifdef CAN_DO_LINUX32_BUILDS - || (platform == "i686-linux" && thisSystem == "x86_64-linux") + || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux") #endif ; } @@ -1213,7 +1204,7 @@ void DerivationGoal::tryToBuild() derivation prefers to be done locally, do it even if maxBuildJobs is 0. */ unsigned int curBuilds = worker.getNrLocalBuilds(); - if (curBuilds >= maxBuildJobs && !(preferLocalBuild && curBuilds == 0)) { + if (curBuilds >= settings.maxBuildJobs && !(preferLocalBuild && curBuilds == 0)) { worker.waitForBuildSlot(shared_from_this()); outputLocks.unlock(); return; @@ -1228,7 +1219,7 @@ void DerivationGoal::tryToBuild() printMsg(lvlError, e.msg()); outputLocks.unlock(); buildUser.release(); - if (printBuildTrace) + if (settings.printBuildTrace) printMsg(lvlError, format("@ build-failed %1% %2% %3% %4%") % drvPath % drv.outputs["out"].path % 0 % e.msg()); worker.permanentFailure = true; @@ -1360,7 +1351,7 @@ void DerivationGoal::buildDone() bool hookError = hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100); - if (printBuildTrace) { + if (settings.printBuildTrace) { if (hook && hookError) printMsg(lvlError, format("@ hook-failed %1% %2% %3% %4%") % drvPath % drv.outputs["out"].path % status % e.msg()); @@ -1376,7 +1367,7 @@ void DerivationGoal::buildDone() able to access the network). Hook errors (like communication problems with the remote machine) shouldn't be cached either. */ - if (worker.cacheFailure && !hookError && !fixedOutput) + if (settings.cacheFailure && !hookError && !fixedOutput) foreach (DerivationOutputs::iterator, i, drv.outputs) worker.store.registerFailedPath(i->second.path); @@ -1388,7 +1379,7 @@ void DerivationGoal::buildDone() /* Release the build user, if applicable. */ buildUser.release(); - if (printBuildTrace) { + if (settings.printBuildTrace) { printMsg(lvlError, format("@ build-succeeded %1% %2%") % drvPath % drv.outputs["out"].path); } @@ -1399,7 +1390,7 @@ void DerivationGoal::buildDone() HookReply DerivationGoal::tryBuildHook() { - if (!useBuildHook || getEnv("NIX_BUILD_HOOK") == "") return rpDecline; + if (!settings.useBuildHook || getEnv("NIX_BUILD_HOOK") == "") return rpDecline; if (!worker.hook) worker.hook = boost::shared_ptr(new HookInstance); @@ -1412,7 +1403,7 @@ HookReply DerivationGoal::tryBuildHook() /* Send the request to the hook. */ writeLine(worker.hook->toHook.writeSide, (format("%1% %2% %3% %4%") - % (worker.getNrLocalBuilds() < maxBuildJobs ? "1" : "0") + % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0") % drv.platform % drvPath % concatStringsSep(",", features)).str()); /* Read the first line of input, which should be a word indicating @@ -1471,7 +1462,7 @@ HookReply DerivationGoal::tryBuildHook() fds.insert(hook->builderOut.readSide); worker.childStarted(shared_from_this(), hook->pid, fds, false, false); - if (printBuildTrace) + if (settings.printBuildTrace) printMsg(lvlError, format("@ build-started %1% %2% %3% %4%") % drvPath % drv.outputs["out"].path % drv.platform % logFile); @@ -1502,7 +1493,7 @@ void DerivationGoal::startBuilder() if (!canBuildLocally(drv.platform)) throw Error( format("a `%1%' is required to build `%3%', but I am a `%2%'") - % drv.platform % thisSystem % drvPath); + % drv.platform % settings.thisSystem % drvPath); /* Construct the environment passed to the builder. */ @@ -1523,10 +1514,10 @@ void DerivationGoal::startBuilder() shouldn't care, but this is useful for purity checking (e.g., the compiler or linker might only want to accept paths to files in the store or in the build directory). */ - env["NIX_STORE"] = nixStore; + env["NIX_STORE"] = settings.nixStore; /* The maximum number of cores to utilize for parallel building. */ - env["NIX_BUILD_CORES"] = (format("%d") % buildCores).str(); + env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str(); /* Add all bindings specified in the derivation. */ foreach (StringPairs::iterator, i, drv.env) @@ -1618,7 +1609,7 @@ void DerivationGoal::startBuilder() /* If `build-users-group' is not empty, then we have to build as one of the members of that group. */ - if (haveBuildUsers()) { + if (settings.buildUsersGroup != "") { buildUser.acquire(); assert(buildUser.getUID() != 0); assert(buildUser.getGID() != 0); @@ -1640,15 +1631,15 @@ void DerivationGoal::startBuilder() the builder can create its output but not mess with the outputs of other processes). */ struct stat st; - if (stat(nixStore.c_str(), &st) == -1) - throw SysError(format("cannot stat `%1%'") % nixStore); + if (stat(settings.nixStore.c_str(), &st) == -1) + throw SysError(format("cannot stat `%1%'") % settings.nixStore); if (!(st.st_mode & S_ISVTX) || ((st.st_mode & S_IRWXG) != S_IRWXG) || (st.st_gid != buildUser.getGID())) throw Error(format( "builder does not have write permission to `%2%'; " "try `chgrp %1% %2%; chmod 1775 %2%'") - % buildUser.getGID() % nixStore); + % buildUser.getGID() % settings.nixStore); } @@ -1657,7 +1648,7 @@ void DerivationGoal::startBuilder() functions like fetchurl (which needs a proper /etc/resolv.conf) work properly. Purity checking for fixed-output derivations is somewhat pointless anyway. */ - useChroot = queryBoolSetting("build-use-chroot", false); + useChroot = settings.useChroot; if (fixedOutput) useChroot = false; @@ -1707,16 +1698,8 @@ void DerivationGoal::startBuilder() writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n"); /* Bind-mount a user-configurable set of directories from the - host file system. The `/dev/pts' directory must be mounted - separately so that newly-created pseudo-terminals show - up. */ - Paths defaultDirs; - defaultDirs.push_back("/dev"); - defaultDirs.push_back("/dev/pts"); - - Paths dirsInChroot_ = querySetting("build-chroot-dirs", defaultDirs); - dirsInChroot.insert(dirsInChroot_.begin(), dirsInChroot_.end()); - + host file system. */ + dirsInChroot = settings.dirsInChroot; dirsInChroot.insert(tmpDir); /* Make the closure of the inputs available in the chroot, @@ -1726,8 +1709,8 @@ void DerivationGoal::startBuilder() can be bind-mounted). !!! As an extra security precaution, make the fake Nix store only writable by the build user. */ - createDirs(chrootRootDir + nixStore); - chmod(chrootRootDir + nixStore, 01777); + createDirs(chrootRootDir + settings.nixStore); + chmod(chrootRootDir + settings.nixStore, 01777); foreach (PathSet::iterator, i, inputPaths) { struct stat st; @@ -1827,7 +1810,7 @@ void DerivationGoal::startBuilder() worker.childStarted(shared_from_this(), pid, singleton >(builderOut.readSide), true, true); - if (printBuildTrace) { + if (settings.printBuildTrace) { printMsg(lvlError, format("@ build-started %1% %2% %3% %4%") % drvPath % drv.outputs["out"].path % drv.platform % logFile); } @@ -1907,16 +1890,14 @@ void DerivationGoal::initChild() #ifdef CAN_DO_LINUX32_BUILDS /* Change the personality to 32-bit if we're doing an i686-linux build on an x86_64-linux machine. */ - if (drv.platform == "i686-linux" && thisSystem == "x86_64-linux") { + if (drv.platform == "i686-linux" && settings.thisSystem == "x86_64-linux") { if (personality(0x0008 | 0x8000000 /* == PER_LINUX32_3GB */) == -1) throw SysError("cannot set i686-linux personality"); } /* Impersonate a Linux 2.6 machine to get some determinism in builds that depend on the kernel version. */ - if ((drv.platform == "i686-linux" || drv.platform == "x86_64-linux") && - queryBoolSetting("build-impersonate-linux-26", true)) - { + if ((drv.platform == "i686-linux" || drv.platform == "x86_64-linux") && settings.impersonateLinux26) { int cur = personality(0xffffffff); if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */); } @@ -1958,7 +1939,7 @@ void DerivationGoal::initChild() } else { /* Let the setuid helper take care of it. */ - program = nixLibexecDir + "/nix-setuid-helper"; + program = settings.nixLibexecDir + "/nix-setuid-helper"; args.push_back(program.c_str()); args.push_back("run-builder"); user = buildUser.getUser().c_str(); @@ -2126,13 +2107,13 @@ string drvsLogDir = "drvs"; Path DerivationGoal::openLogFile() { - if (!queryBoolSetting("build-keep-log", true)) return ""; + if (!settings.keepLog) return ""; /* Create a log file. */ - Path dir = (format("%1%/%2%") % nixLogDir % drvsLogDir).str(); + Path dir = (format("%1%/%2%") % settings.nixLogDir % drvsLogDir).str(); createDirs(dir); - if (queryBoolSetting("build-compress-log", true)) { + if (settings.compressLog) { Path logFileName = (format("%1%/%2%.bz2") % dir % baseNameOf(drvPath)).str(); AutoCloseFD fd = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0666); @@ -2179,7 +2160,7 @@ void DerivationGoal::closeLogFile() void DerivationGoal::deleteTmpDir(bool force) { if (tmpDir != "") { - if (keepFailed && !force) { + if (settings.keepFailed && !force) { printMsg(lvlError, format("builder for `%1%' failed; keeping build directory `%2%'") % drvPath % tmpDir); @@ -2199,7 +2180,7 @@ void DerivationGoal::handleChildOutput(int fd, const string & data) if ((hook && fd == hook->builderOut.readSide) || (!hook && fd == builderOut.readSide)) { - if (verbosity >= buildVerbosity) + if (verbosity >= settings.buildVerbosity) writeToStderr((unsigned char *) data.data(), data.size()); if (bzLogFile) { int err; @@ -2235,13 +2216,13 @@ PathSet DerivationGoal::checkPathValidity(bool returnValid) bool DerivationGoal::pathFailed(const Path & path) { - if (!worker.cacheFailure) return false; + if (!settings.cacheFailure) return false; if (!worker.store.hasPathFailed(path)) return false; printMsg(lvlError, format("builder for `%1%' failed previously (cached)") % path); - if (printBuildTrace) + if (settings.printBuildTrace) printMsg(lvlError, format("@ build-failed %1% %2% cached") % drvPath % path); worker.permanentFailure = true; @@ -2362,10 +2343,10 @@ void SubstitutionGoal::init() return; } - if (readOnlyMode) + if (settings.readOnlyMode) throw Error(format("cannot substitute path `%1%' - no write access to the Nix store") % storePath); - subs = substituters; + subs = settings.substituters; tryNext(); } @@ -2437,7 +2418,7 @@ void SubstitutionGoal::tryToRun() is maxBuildJobs == 0 (no local builds allowed), we still allow a substituter to run. This is because substitutions cannot be distributed to another machine via the build hook. */ - if (worker.getNrLocalBuilds() >= (maxBuildJobs == 0 ? 1 : maxBuildJobs)) { + if (worker.getNrLocalBuilds() >= (settings.maxBuildJobs == 0 ? 1 : settings.maxBuildJobs)) { worker.waitForBuildSlot(shared_from_this()); return; } @@ -2496,7 +2477,7 @@ void SubstitutionGoal::tryToRun() /* Pass configuration options (including those overriden with --option) to the substituter. */ - setenv("_NIX_OPTIONS", packSettings().c_str(), 1); + setenv("_NIX_OPTIONS", settings.pack().c_str(), 1); /* Fill in the arguments. */ Strings args; @@ -2525,7 +2506,7 @@ void SubstitutionGoal::tryToRun() state = &SubstitutionGoal::finished; - if (printBuildTrace) { + if (settings.printBuildTrace) { printMsg(lvlError, format("@ substituter-started %1% %2%") % storePath % sub); } @@ -2583,7 +2564,7 @@ void SubstitutionGoal::finished() printMsg(lvlInfo, e.msg()); - if (printBuildTrace) { + if (settings.printBuildTrace) { printMsg(lvlError, format("@ substituter-failed %1% %2% %3%") % storePath % status % e.msg()); } @@ -2611,7 +2592,7 @@ void SubstitutionGoal::finished() printMsg(lvlChatty, format("substitution of path `%1%' succeeded") % storePath); - if (printBuildTrace) { + if (settings.printBuildTrace) { printMsg(lvlError, format("@ substituter-succeeded %1%") % storePath); } @@ -2622,7 +2603,7 @@ void SubstitutionGoal::finished() void SubstitutionGoal::handleChildOutput(int fd, const string & data) { assert(fd == logPipe.readSide); - if (verbosity >= buildVerbosity) + if (verbosity >= settings.buildVerbosity) writeToStderr((unsigned char *) data.data(), data.size()); /* Don't write substitution output to a log file for now. We probably should, though. */ @@ -2650,7 +2631,6 @@ Worker::Worker(LocalStore & store) working = true; nrLocalBuilds = 0; lastWokenUp = 0; - cacheFailure = queryBoolSetting("build-cache-failure", false); permanentFailure = false; } @@ -2715,7 +2695,7 @@ void Worker::removeGoal(GoalPtr goal) topGoals.erase(goal); /* If a top-level goal failed, then kill all other goals (unless keepGoing was set). */ - if (goal->getExitCode() == Goal::ecFailed && !keepGoing) + if (goal->getExitCode() == Goal::ecFailed && !settings.keepGoing) topGoals.clear(); } @@ -2787,7 +2767,7 @@ void Worker::childTerminated(pid_t pid, bool wakeSleepers) void Worker::waitForBuildSlot(GoalPtr goal) { debug("wait for build slot"); - if (getNrLocalBuilds() < maxBuildJobs) + if (getNrLocalBuilds() < settings.maxBuildJobs) wakeUp(goal); /* we can do it right away */ else wantingToBuild.insert(goal); @@ -2836,7 +2816,7 @@ void Worker::run(const Goals & _topGoals) if (!children.empty() || !waitingForAWhile.empty()) waitForInput(); else { - if (awake.empty() && maxBuildJobs == 0) throw Error( + if (awake.empty() && settings.maxBuildJobs == 0) throw Error( "unable to start any build; either increase `--max-jobs' " "or enable distributed builds"); assert(!awake.empty()); @@ -2846,9 +2826,9 @@ void Worker::run(const Goals & _topGoals) /* If --keep-going is not set, it's possible that the main goal exited while some of its subgoals were still active. But if --keep-going *is* set, then they must all be finished now. */ - assert(!keepGoing || awake.empty()); - assert(!keepGoing || wantingToBuild.empty()); - assert(!keepGoing || children.empty()); + assert(!settings.keepGoing || awake.empty()); + assert(!settings.keepGoing || wantingToBuild.empty()); + assert(!settings.keepGoing || children.empty()); } @@ -2868,15 +2848,15 @@ void Worker::waitForInput() time_t before = time(0); /* If a global timeout has been set, sleep until it's done. */ - if (buildTimeout != 0) { + if (settings.buildTimeout != 0) { useTimeout = true; if (lastWait == 0 || lastWait > before) lastWait = before; - timeout.tv_sec = std::max((time_t) 0, lastWait + buildTimeout - before); + timeout.tv_sec = std::max((time_t) 0, lastWait + settings.buildTimeout - before); } /* If we're monitoring for silence on stdout/stderr, sleep until the first deadline for any child. */ - if (maxSilentTime != 0) { + if (settings.maxSilentTime != 0) { time_t oldest = 0; foreach (Children::iterator, i, children) { if (i->second.monitorForSilence) { @@ -2885,7 +2865,7 @@ void Worker::waitForInput() } } if (oldest) { - time_t silenceTimeout = std::max((time_t) 0, oldest + maxSilentTime - before); + time_t silenceTimeout = std::max((time_t) 0, oldest + settings.maxSilentTime - before); timeout.tv_sec = useTimeout ? std::min(silenceTimeout, timeout.tv_sec) : silenceTimeout; @@ -2896,14 +2876,12 @@ void Worker::waitForInput() /* If we are polling goals that are waiting for a lock, then wake up after a few seconds at most. */ - int wakeUpInterval = queryIntSetting("build-poll-interval", 5); - if (!waitingForAWhile.empty()) { useTimeout = true; if (lastWokenUp == 0) printMsg(lvlError, "waiting for locks or build slots..."); if (lastWokenUp == 0 || lastWokenUp > before) lastWokenUp = before; - timeout.tv_sec = std::max((time_t) 0, lastWokenUp + wakeUpInterval - before); + timeout.tv_sec = std::max((time_t) 0, lastWokenUp + settings.pollInterval - before); } else lastWokenUp = 0; using namespace std; @@ -2969,27 +2947,27 @@ void Worker::waitForInput() } } - if (maxSilentTime != 0 && + if (settings.maxSilentTime != 0 && j->second.monitorForSilence && - after - j->second.lastOutput >= (time_t) maxSilentTime) + after - j->second.lastOutput >= (time_t) settings.maxSilentTime) { printMsg(lvlError, format("%1% timed out after %2% seconds of silence") - % goal->getName() % maxSilentTime); + % goal->getName() % settings.maxSilentTime); goal->cancel(); } - if (buildTimeout != 0 && - after - before >= (time_t) buildTimeout) + if (settings.buildTimeout != 0 && + after - before >= (time_t) settings.buildTimeout) { printMsg(lvlError, format("%1% timed out after %2% seconds of activity") - % goal->getName() % buildTimeout); + % goal->getName() % settings.buildTimeout); goal->cancel(); } } - if (!waitingForAWhile.empty() && lastWokenUp + wakeUpInterval <= after) { + if (!waitingForAWhile.empty() && lastWokenUp + settings.pollInterval <= after) { lastWokenUp = after; foreach (WeakGoals::iterator, i, waitingForAWhile) { GoalPtr goal = i->lock(); diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 97343d57d478..73047c7538a0 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -12,7 +12,7 @@ void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash { recursive = false; string algo = hashAlgo; - + if (string(algo, 0, 2) == "r:") { recursive = true; algo = string(algo, 2); @@ -21,7 +21,7 @@ void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash hashType = parseHashType(algo); if (hashType == htUnknown) throw Error(format("unknown hash algorithm `%1%'") % algo); - + hash = parseHash(hashType, this->hash); } @@ -38,7 +38,7 @@ Path writeDerivation(StoreAPI & store, held during a garbage collection). */ string suffix = name + drvExtension; string contents = unparseDerivation(drv); - return readOnlyMode + return settings.readOnlyMode ? computeStorePathForText(suffix, contents, references) : store.addTextToStore(suffix, contents, references); } @@ -51,7 +51,7 @@ static Path parsePath(std::istream & str) throw Error(format("bad path `%1%' in derivation") % s); return s; } - + static StringSet parseStrings(std::istream & str, bool arePaths) { @@ -60,7 +60,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths) res.insert(arePaths ? parsePath(str) : parseString(str)); return res; } - + Derivation parseDerivation(const string & s) { @@ -106,7 +106,7 @@ Derivation parseDerivation(const string & s) expect(str, ")"); drv.env[name] = value; } - + expect(str, ")"); return drv; } @@ -165,7 +165,7 @@ string unparseDerivation(const Derivation & drv) s += "],"; printStrings(s, drv.inputSrcs.begin(), drv.inputSrcs.end()); - + s += ','; printString(s, drv.platform); s += ','; printString(s, drv.builder); s += ','; printStrings(s, drv.args.begin(), drv.args.end()); @@ -178,9 +178,9 @@ string unparseDerivation(const Derivation & drv) s += ','; printString(s, i->second); s += ')'; } - + s += "])"; - + return s; } @@ -190,7 +190,7 @@ bool isDerivation(const string & fileName) return hasSuffix(fileName, drvExtension); } - + bool isFixedOutputDrv(const Derivation & drv) { return drv.outputs.size() == 1 && @@ -247,7 +247,7 @@ Hash hashDerivationModulo(StoreAPI & store, Derivation drv) inputs2[printHash(h)] = i->second; } drv.inputDrvs = inputs2; - + return hashString(htSHA256, unparseDerivation(drv)); } diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 874efe4d32d9..1355702f8701 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -34,10 +34,10 @@ static const int defaultGcLevel = 1000; int LocalStore::openGCLock(LockType lockType) { Path fnGCLock = (format("%1%/%2%") - % nixStateDir % gcLockName).str(); - + % settings.nixStateDir % gcLockName).str(); + debug(format("acquiring global GC lock `%1%'") % fnGCLock); - + AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT, 0600); if (fdGCLock == -1) throw SysError(format("opening global GC lock `%1%'") % fnGCLock); @@ -51,7 +51,7 @@ int LocalStore::openGCLock(LockType lockType) /* !!! Restrict read permission on the GC root. Otherwise any process that can open the file for reading can DoS the collector. */ - + return fdGCLock.borrow(); } @@ -85,7 +85,7 @@ void LocalStore::addIndirectRoot(const Path & path) { string hash = printHash32(hashString(htSHA1, path)); Path realRoot = canonPath((format("%1%/%2%/auto/%3%") - % nixStateDir % gcRootsDir % hash).str()); + % settings.nixStateDir % gcRootsDir % hash).str()); createSymlink(realRoot, path); } @@ -113,15 +113,15 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, else { if (!allowOutsideRootsDir) { - Path rootsDir = canonPath((format("%1%/%2%") % nixStateDir % gcRootsDir).str()); - + Path rootsDir = canonPath((format("%1%/%2%") % settings.nixStateDir % gcRootsDir).str()); + if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/") throw Error(format( "path `%1%' is not a valid garbage collector root; " "it's not in the directory `%2%'") % gcRoot % rootsDir); } - + createSymlink(gcRoot, storePath); } @@ -130,10 +130,10 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, Instead of reading all the roots, it would be more efficient to check if the root is in a directory in or linked from the gcroots directory. */ - if (queryBoolSetting("gc-check-reachability", false)) { + if (settings.checkRootReachability) { Roots roots = store.findRoots(); if (roots.find(gcRoot) == roots.end()) - printMsg(lvlError, + printMsg(lvlError, format( "warning: `%1%' is not in a directory where the garbage collector looks for roots; " "therefore, `%2%' might be removed by the garbage collector") @@ -144,7 +144,7 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath, progress. This prevents the set of permanent roots from increasing while a GC is in progress. */ store.syncWithGC(); - + return gcRoot; } @@ -160,23 +160,23 @@ void LocalStore::addTempRoot(const Path & path) if (fdTempRoots == -1) { while (1) { - Path dir = (format("%1%/%2%") % nixStateDir % tempRootsDir).str(); + Path dir = (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str(); createDirs(dir); - + fnTempRoots = (format("%1%/%2%") % dir % getpid()).str(); AutoCloseFD fdGCLock = openGCLock(ltRead); - + if (pathExists(fnTempRoots)) /* It *must* be stale, since there can be no two processes with the same pid. */ unlink(fnTempRoots.c_str()); - fdTempRoots = openLockFile(fnTempRoots, true); + fdTempRoots = openLockFile(fnTempRoots, true); fdGCLock.close(); - + debug(format("acquiring read lock on `%1%'") % fnTempRoots); lockFile(fdTempRoots, ltRead, true); @@ -186,7 +186,7 @@ void LocalStore::addTempRoot(const Path & path) if (fstat(fdTempRoots, &st) == -1) throw SysError(format("statting `%1%'") % fnTempRoots); if (st.st_size == 0) break; - + /* The garbage collector deleted this file before we could get a lock. (It won't delete the file after we get a lock.) Try again. */ @@ -218,7 +218,7 @@ void removeTempRoots() /* Automatically clean up the temporary roots file when we exit. */ -struct RemoveTempRoots +struct RemoveTempRoots { ~RemoveTempRoots() { @@ -238,10 +238,10 @@ static void readTempRoots(PathSet & tempRoots, FDs & fds) /* Read the `temproots' directory for per-process temporary root files. */ Strings tempRootFiles = readDirectory( - (format("%1%/%2%") % nixStateDir % tempRootsDir).str()); + (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str()); foreach (Strings::iterator, i, tempRootFiles) { - Path path = (format("%1%/%2%/%3%") % nixStateDir % tempRootsDir % *i).str(); + Path path = (format("%1%/%2%/%3%") % settings.nixStateDir % tempRootsDir % *i).str(); debug(format("reading temporary root file `%1%'") % path); FDPtr fd(new AutoCloseFD(open(path.c_str(), O_RDWR, 0666))); @@ -295,7 +295,7 @@ static void findRoots(StoreAPI & store, const Path & path, bool recurseSymlinks, bool deleteStale, Roots & roots) { try { - + struct stat st; if (lstat(path.c_str(), &st) == -1) throw SysError(format("statting `%1%'") % path); @@ -315,7 +315,7 @@ static void findRoots(StoreAPI & store, const Path & path, debug(format("found root `%1%' in `%2%'") % target % path); Path storePath = toStorePath(target); - if (store.isValidPath(storePath)) + if (store.isValidPath(storePath)) roots[path] = storePath; else printMsg(lvlInfo, format("skipping invalid root from `%1%' to `%2%'") @@ -350,7 +350,7 @@ static void findRoots(StoreAPI & store, const Path & path, static Roots findRoots(StoreAPI & store, bool deleteStale) { Roots roots; - Path rootsDir = canonPath((format("%1%/%2%") % nixStateDir % gcRootsDir).str()); + Path rootsDir = canonPath((format("%1%/%2%") % settings.nixStateDir % gcRootsDir).str()); findRoots(store, rootsDir, true, deleteStale, roots); return roots; } @@ -365,16 +365,16 @@ Roots LocalStore::findRoots() static void addAdditionalRoots(StoreAPI & store, PathSet & roots) { Path rootFinder = getEnv("NIX_ROOT_FINDER", - nixLibexecDir + "/nix/find-runtime-roots.pl"); + settings.nixLibexecDir + "/nix/find-runtime-roots.pl"); if (rootFinder.empty()) return; - + debug(format("executing `%1%' to find additional roots") % rootFinder); string result = runProgram(rootFinder); Strings paths = tokenizeString(result, "\n"); - + foreach (Strings::iterator, i, paths) { if (isInStore(*i)) { Path path = toStorePath(*i); @@ -557,7 +557,7 @@ bool LocalStore::tryToDelete(GCState & state, const Path & path) } else printMsg(lvlTalkative, format("would delete `%1%'") % path); - + state.deleted.insert(path); if (state.options.action != GCOptions::gcReturnLive) state.results.paths.insert(path); @@ -605,10 +605,10 @@ void LocalStore::removeUnusedLinks() void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) { GCState state(results); - state.options = options; - - state.gcKeepOutputs = queryBoolSetting("gc-keep-outputs", false); - state.gcKeepDerivations = queryBoolSetting("gc-keep-derivations", true); + state.options = options; + + state.gcKeepOutputs = settings.gcKeepOutputs; + state.gcKeepDerivations = settings.gcKeepDerivations; /* Using `--ignore-liveness' with `--delete' can have unintended consequences if `gc-keep-outputs' or `gc-keep-derivations' are @@ -618,7 +618,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) state.gcKeepOutputs = false; state.gcKeepDerivations = false; } - + /* Acquire the global GC root. This prevents a) New roots from being added. b) Processes from creating new temporary root files. */ @@ -659,18 +659,18 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) if (!tryToDelete(state, *i)) throw Error(format("cannot delete path `%1%' since it is still alive") % *i); } - + } else { - + if (shouldDelete(state.options.action)) printMsg(lvlError, format("deleting garbage...")); else printMsg(lvlError, format("determining live/dead paths...")); - + try { - AutoCloseDir dir = opendir(nixStore.c_str()); - if (!dir) throw SysError(format("opening directory `%1%'") % nixStore); + AutoCloseDir dir = opendir(settings.nixStore.c_str()); + if (!dir) throw SysError(format("opening directory `%1%'") % settings.nixStore); /* Read the store and immediately delete all paths that aren't valid. When using --max-freed etc., deleting @@ -684,14 +684,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) checkInterrupt(); string name = dirent->d_name; if (name == "." || name == "..") continue; - Path path = nixStore + "/" + name; + Path path = settings.nixStore + "/" + name; if (isValidPath(path)) entries.push_back(path); else tryToDelete(state, path); } - dir.close(); + dir.close(); /* Now delete the unreachable valid paths. Randomise the order in which we delete entries to make the collector diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index a28e08427da5..7dc2e714bc9d 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -10,38 +10,63 @@ namespace nix { -string nixStore = "/UNINIT"; -string nixDataDir = "/UNINIT"; -string nixLogDir = "/UNINIT"; -string nixStateDir = "/UNINIT"; -string nixDBPath = "/UNINIT"; -string nixConfDir = "/UNINIT"; -string nixLibexecDir = "/UNINIT"; -string nixBinDir = "/UNINIT"; +Settings settings; -bool keepFailed = false; -bool keepGoing = false; -bool tryFallback = false; -Verbosity buildVerbosity = lvlError; -unsigned int maxBuildJobs = 1; -unsigned int buildCores = 1; -bool readOnlyMode = false; -string thisSystem = "unset"; -time_t maxSilentTime = 0; -time_t buildTimeout = 0; -Paths substituters; -bool useBuildHook = true; -bool printBuildTrace = false; +Settings::Settings() +{ + keepFailed = false; + keepGoing = false; + tryFallback = false; + buildVerbosity = lvlError; + maxBuildJobs = 1; + buildCores = 1; + readOnlyMode = false; + thisSystem = SYSTEM; + maxSilentTime = 0; + buildTimeout = 0; + useBuildHook = true; + printBuildTrace = false; + reservedSize = 1024 * 1024; + fsyncMetadata = true; + useSQLiteWAL = true; + syncBeforeRegistering = false; + useSubstitutes = true; + useChroot = false; + dirsInChroot.insert("/dev"); + dirsInChroot.insert("/dev/pts"); + impersonateLinux26 = false; + keepLog = true; + compressLog = true; + cacheFailure = false; + pollInterval = 5; + checkRootReachability = false; + gcKeepOutputs = false; + gcKeepDerivations = true; + autoOptimiseStore = true; + envKeepDerivations = false; +} -static bool settingsRead = false; - -typedef std::map Settings; -static Settings settings; +void Settings::processEnvironment() +{ + nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))); + nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)); + nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)); + nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)); + nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db"); + nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)); + nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)); + nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)); -/* Overriden settings. */ -Settings settingsCmdline; + string subs = getEnv("NIX_SUBSTITUTERS", "default"); + if (subs == "default") { + substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); + substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); + substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl"); + } else + substituters = tokenizeString(subs, ":"); +} string & at(Strings & ss, unsigned int n) @@ -52,7 +77,7 @@ string & at(Strings & ss, unsigned int n) } -static void readSettings() +void Settings::loadConfFile() { Path settingsFile = (format("%1%/%2%") % nixConfDir % "nix.conf").str(); if (!pathExists(settingsFile)) return; @@ -80,104 +105,88 @@ static void readSettings() Strings::iterator i = tokens.begin(); advance(i, 2); - settings[name] = Strings(i, tokens.end()); + settings[name] = concatStringsSep(" ", Strings(i, tokens.end())); // FIXME: slow }; - - settings.insert(settingsCmdline.begin(), settingsCmdline.end()); - - settingsRead = true; } -Strings querySetting(const string & name, const Strings & def) +void Settings::set(const string & name, const string & value) { - if (!settingsRead) readSettings(); - Settings::iterator i = settings.find(name); - return i == settings.end() ? def : i->second; + settings[name] = value; } -string querySetting(const string & name, const string & def) +void Settings::update() { - Strings defs; - defs.push_back(def); - - Strings value = querySetting(name, defs); - if (value.size() != 1) - throw Error(format("configuration option `%1%' should not be a list") % name); - - return value.front(); + get(thisSystem, "system"); + get(maxBuildJobs, "build-max-jobs"); + get(buildCores, "build-cores"); + get(maxSilentTime, "build-max-silent-time"); + get(buildTimeout, "build-timeout"); + get(reservedSize, "gc-reserved-space"); + get(fsyncMetadata, "fsync-metadata"); + get(useSQLiteWAL, "use-sqlite-wal"); + get(syncBeforeRegistering, "sync-before-registering"); + get(useSubstitutes, "build-use-substitutes"); + get(buildUsersGroup, "build-users-group"); + get(useChroot, "build-use-chroot"); + get(dirsInChroot, "build-chroot-dirs"); + get(impersonateLinux26, "build-impersonate-linux-26"); + get(keepLog, "build-keep-log"); + get(compressLog, "build-compress-log"); + get(cacheFailure, "build-cache-failure"); + get(pollInterval, "build-poll-interval"); + get(checkRootReachability, "gc-check-reachability"); + get(gcKeepOutputs, "gc-keep-outputs"); + get(gcKeepDerivations, "gc-keep-derivations"); + get(autoOptimiseStore, "auto-optimise-store"); + get(envKeepDerivations, "env-keep-derivations"); } -bool queryBoolSetting(const string & name, bool def) +void Settings::get(string & res, const string & name) { - string v = querySetting(name, def ? "true" : "false"); - if (v == "true") return true; - else if (v == "false") return false; - else throw Error(format("configuration option `%1%' should be either `true' or `false', not `%2%'") - % name % v); + SettingsMap::iterator i = settings.find(name); + if (i == settings.end()) return; + res = i->second; } -unsigned int queryIntSetting(const string & name, unsigned int def) +void Settings::get(bool & res, const string & name) { - int n; - if (!string2Int(querySetting(name, int2String(def)), n) || n < 0) - throw Error(format("configuration setting `%1%' should have an integer value") % name); - return n; -} - - -void overrideSetting(const string & name, const Strings & value) -{ - if (settingsRead) settings[name] = value; - settingsCmdline[name] = value; + SettingsMap::iterator i = settings.find(name); + if (i == settings.end()) return; + if (i->second == "true") res = true; + else if (i->second == "false") res = false; + else throw Error(format("configuration option `%1%' should be either `true' or `false', not `%2%'") + % name % i->second); } -void reloadSettings() +void Settings::get(PathSet & res, const string & name) { - settingsRead = false; - settings.clear(); + SettingsMap::iterator i = settings.find(name); + if (i == settings.end()) return; + res.clear(); + Strings ss = tokenizeString(i->second); + res.insert(ss.begin(), ss.end()); } -void setDefaultsFromEnvironment() +template void Settings::get(N & res, const string & name) { - /* Setup Nix paths. */ - nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))); - nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)); - nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)); - nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)); - nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db"); - nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)); - nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)); - nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)); - - string subs = getEnv("NIX_SUBSTITUTERS", "default"); - if (subs == "default") { - substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); - substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); - substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl"); - } else - substituters = tokenizeString(subs, ":"); - - /* Get some settings from the configuration file. */ - thisSystem = querySetting("system", SYSTEM); - maxBuildJobs = queryIntSetting("build-max-jobs", 1); - buildCores = queryIntSetting("build-cores", 1); - maxSilentTime = queryIntSetting("build-max-silent-time", 0); - buildTimeout = queryIntSetting("build-timeout", 0); + SettingsMap::iterator i = settings.find(name); + if (i == settings.end()) return; + if (!string2Int(i->second, res)) + throw Error(format("configuration setting `%1%' should have an integer value") % name); } -string packSettings() +string Settings::pack() { string s; - if (!settingsRead) readSettings(); - foreach (Settings::iterator, i, settings) { - s += i->first; s += '='; s += concatStringsSep(" ", i->second); s += '\n'; + foreach (SettingsMap::iterator, i, settings) { + s += i->first; s += '='; s += i->second; s += '\n'; } return s; } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 30acf59ef54f..5783d9bf37ba 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -2,120 +2,189 @@ #include "types.hh" +#include + namespace nix { -/* Path names. */ +struct Settings { + + Settings(); + + void processEnvironment(); + + void loadConfFile(); + + void set(const string & name, const string & value); + + void update(); + + string pack(); + + /* The directory where we store sources and derived files. */ + Path nixStore; + + Path nixDataDir; /* !!! fix */ + + /* The directory where we log various operations. */ + Path nixLogDir; + + /* The directory where state is stored. */ + Path nixStateDir; + + /* The directory where we keep the SQLite database. */ + Path nixDBPath; + + /* The directory where configuration files are stored. */ + Path nixConfDir; + + /* The directory where internal helper programs are stored. */ + Path nixLibexecDir; + + /* The directory where the main programs are stored. */ + Path nixBinDir; + + /* Whether to keep temporary directories of failed builds. */ + bool keepFailed; + + /* Whether to keep building subgoals when a sibling (another + subgoal of the same goal) fails. */ + bool keepGoing; + + /* Whether, if we cannot realise the known closure corresponding + to a derivation, we should try to normalise the derivation + instead. */ + bool tryFallback; + + /* Verbosity level for build output. */ + Verbosity buildVerbosity; -/* nixStore is the directory where we generally store atomic and - derived files. */ -extern string nixStore; + /* Maximum number of parallel build jobs. 0 means unlimited. */ + unsigned int maxBuildJobs; -extern string nixDataDir; /* !!! fix */ + /* Number of CPU cores to utilize in parallel within a build, + i.e. by passing this number to Make via '-j'. 0 means that the + number of actual CPU cores on the local host ought to be + auto-detected. */ + unsigned int buildCores; -/* nixLogDir is the directory where we log various operations. */ -extern string nixLogDir; + /* Read-only mode. Don't copy stuff to the store, don't change + the database. */ + bool readOnlyMode; -/* nixStateDir is the directory where state is stored. */ -extern string nixStateDir; + /* The canonical system name, as returned by config.guess. */ + string thisSystem; -/* nixDBPath is the path name of our Berkeley DB environment. */ -extern string nixDBPath; + /* The maximum time in seconds that a builer can go without + producing any output on stdout/stderr before it is killed. 0 + means infinity. */ + time_t maxSilentTime; -/* nixConfDir is the directory where configuration files are - stored. */ -extern string nixConfDir; + /* The maximum duration in seconds that a builder can run. 0 + means infinity. */ + time_t buildTimeout; -/* nixLibexecDir is the directory where internal helper programs are - stored. */ -extern string nixLibexecDir; + /* The substituters. There are programs that can somehow realise + a store path without building, e.g., by downloading it or + copying it from a CD. */ + Paths substituters; -/* nixBinDir is the directory where the main programs are stored. */ -extern string nixBinDir; + /* Whether to use build hooks (for distributed builds). Sometimes + users want to disable this from the command-line. */ + bool useBuildHook; + /* Whether buildDerivations() should print out lines on stderr in + a fixed format to allow its progress to be monitored. Each + line starts with a "@". The following are defined: -/* Misc. global flags. */ + @ build-started + @ build-failed + @ build-succeeded + @ substituter-started + @ substituter-failed + @ substituter-succeeded -/* Whether to keep temporary directories of failed builds. */ -extern bool keepFailed; + Best combined with --no-build-output, otherwise stderr might + conceivably contain lines in this format printed by the + builders. */ + bool printBuildTrace; -/* Whether to keep building subgoals when a sibling (another subgoal - of the same goal) fails. */ -extern bool keepGoing; + /* Amount of reserved space for the garbage collector + (/nix/var/nix/db/reserved). */ + off_t reservedSize; -/* Whether, if we cannot realise the known closure corresponding to a - derivation, we should try to normalise the derivation instead. */ -extern bool tryFallback; + /* Whether SQLite should use fsync. */ + bool fsyncMetadata; -/* Verbosity level for build output. */ -extern Verbosity buildVerbosity; + /* Whether SQLite should use WAL mode. */ + bool useSQLiteWAL; -/* Maximum number of parallel build jobs. 0 means unlimited. */ -extern unsigned int maxBuildJobs; + /* Whether to call sync() before registering a path as valid. */ + bool syncBeforeRegistering; -/* Number of CPU cores to utilize in parallel within a build, i.e. by passing - this number to Make via '-j'. 0 means that the number of actual CPU cores on - the local host ought to be auto-detected. */ -extern unsigned int buildCores; + /* Whether to use substitutes. */ + bool useSubstitutes; -/* Read-only mode. Don't copy stuff to the store, don't change the - database. */ -extern bool readOnlyMode; + /* The Unix group that contains the build users. */ + string buildUsersGroup; -/* The canonical system name, as returned by config.guess. */ -extern string thisSystem; + /* Whether to build in chroot. */ + bool useChroot; -/* The maximum time in seconds that a builer can go without producing - any output on stdout/stderr before it is killed. 0 means - infinity. */ -extern time_t maxSilentTime; + /* The directories from the host filesystem to be included in the + chroot. */ + PathSet dirsInChroot; -/* The maximum duration in seconds that a builder can run. 0 means - infinity. */ -extern time_t buildTimeout; + /* Whether to impersonate a Linux 2.6 machine on newer kernels. */ + bool impersonateLinux26; -/* The substituters. There are programs that can somehow realise a - store path without building, e.g., by downloading it or copying it - from a CD. */ -extern Paths substituters; + /* Whether to store build logs. */ + bool keepLog; -/* Whether to use build hooks (for distributed builds). Sometimes - users want to disable this from the command-line. */ -extern bool useBuildHook; + /* Whether to compress logs. */ + bool compressLog; -/* Whether buildDerivations() should print out lines on stderr in a - fixed format to allow its progress to be monitored. Each line - starts with a "@". The following are defined: + /* Whether to cache build failures. */ + bool cacheFailure; - @ build-started - @ build-failed - @ build-succeeded - @ substituter-started - @ substituter-failed - @ substituter-succeeded + /* How often (in seconds) to poll for locks. */ + unsigned int pollInterval; - Best combined with --no-build-output, otherwise stderr might - conceivably contain lines in this format printed by the builders. -*/ -extern bool printBuildTrace; + /* Whether to check if new GC roots can in fact be found by the + garbage collector. */ + bool checkRootReachability; + /* Whether the garbage collector should keep outputs of live + derivations. */ + bool gcKeepOutputs; -Strings querySetting(const string & name, const Strings & def); + /* Whether the garbage collector should keep derivers of live + paths. */ + bool gcKeepDerivations; -string querySetting(const string & name, const string & def); + /* Whether to automatically replace files with identical contents + with hard links. */ + bool autoOptimiseStore; -bool queryBoolSetting(const string & name, bool def); + /* Whether to add derivations as a dependency of user environments + (to prevent them from being GCed). */ + bool envKeepDerivations; -unsigned int queryIntSetting(const string & name, unsigned int def); +private: + typedef std::map SettingsMap; -void overrideSetting(const string & name, const Strings & value); + SettingsMap settings; -void reloadSettings(); + void get(string & res, const string & name); + void get(bool & res, const string & name); + void get(PathSet & res, const string & name); + template void get(N & res, const string & name); +}; -void setDefaultsFromEnvironment(); -string packSettings(); +// FIXME: don't use a global variable. +extern Settings settings; } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index aaa1abb56921..f20324d4e3d0 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -181,7 +181,7 @@ struct SQLiteTxn void checkStoreNotSymlink() { if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return; - Path path = nixStore; + Path path = settings.nixStore; struct stat st; while (path != "/") { if (lstat(path.c_str(), &st)) @@ -198,21 +198,21 @@ void checkStoreNotSymlink() LocalStore::LocalStore(bool reserveSpace) { - schemaPath = nixDBPath + "/schema"; + schemaPath = settings.nixDBPath + "/schema"; - if (readOnlyMode) { + if (settings.readOnlyMode) { openDB(false); return; } /* Create missing state directories if they don't already exist. */ - createDirs(nixStore); - createDirs(linksDir = nixStore + "/.links"); - Path profilesDir = nixStateDir + "/profiles"; - createDirs(nixStateDir + "/profiles"); - createDirs(nixStateDir + "/temproots"); - createDirs(nixDBPath); - Path gcRootsDir = nixStateDir + "/gcroots"; + createDirs(settings.nixStore); + createDirs(linksDir = settings.nixStore + "/.links"); + Path profilesDir = settings.nixStateDir + "/profiles"; + createDirs(settings.nixStateDir + "/profiles"); + createDirs(settings.nixStateDir + "/temproots"); + createDirs(settings.nixDBPath); + Path gcRootsDir = settings.nixStateDir + "/gcroots"; if (!pathExists(gcRootsDir)) { createDirs(gcRootsDir); if (symlink(profilesDir.c_str(), (gcRootsDir + "/profiles").c_str()) == -1) @@ -226,13 +226,12 @@ LocalStore::LocalStore(bool reserveSpace) needed, we reserve some dummy space that we can free just before doing a garbage collection. */ try { - Path reservedPath = nixDBPath + "/reserved"; + Path reservedPath = settings.nixDBPath + "/reserved"; if (reserveSpace) { - int reservedSize = queryIntSetting("gc-reserved-space", 1024 * 1024); struct stat st; if (stat(reservedPath.c_str(), &st) == -1 || - st.st_size != reservedSize) - writeFile(reservedPath, string(reservedSize, 'X')); + st.st_size != settings.reservedSize) + writeFile(reservedPath, string(settings.reservedSize, 'X')); } else deletePath(reservedPath); @@ -242,11 +241,11 @@ LocalStore::LocalStore(bool reserveSpace) /* Acquire the big fat lock in shared mode to make sure that no schema upgrade is in progress. */ try { - Path globalLockPath = nixDBPath + "/big-lock"; + Path globalLockPath = settings.nixDBPath + "/big-lock"; globalLock = openLockFile(globalLockPath.c_str(), true); } catch (SysError & e) { if (e.errNo != EACCES) throw; - readOnlyMode = true; + settings.readOnlyMode = true; openDB(false); return; } @@ -325,7 +324,7 @@ int LocalStore::getSchema() void LocalStore::openDB(bool create) { /* Open the Nix database. */ - if (sqlite3_open_v2((nixDBPath + "/db.sqlite").c_str(), &db.db, + if (sqlite3_open_v2((settings.nixDBPath + "/db.sqlite").c_str(), &db.db, SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK) throw Error("cannot open SQLite database"); @@ -342,13 +341,13 @@ void LocalStore::openDB(bool create) should be safe enough. If the user asks for it, don't sync at all. This can cause database corruption if the system crashes. */ - string syncMode = queryBoolSetting("fsync-metadata", true) ? "normal" : "off"; + string syncMode = settings.fsyncMetadata ? "normal" : "off"; if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK) throwSQLiteError(db, "setting synchronous mode"); /* Set the SQLite journal mode. WAL mode is fastest, so it's the default. */ - string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate"; + string mode = settings.useSQLiteWAL ? "wal" : "truncate"; string prevMode; { SQLiteStmt stmt; @@ -890,7 +889,7 @@ Path LocalStore::queryPathFromHashPart(const string & hashPart) SQLiteTxn txn(db); - Path prefix = nixStore + "/" + hashPart; + Path prefix = settings.nixStore + "/" + hashPart; SQLiteStmtUse use(stmtQueryPathFromHashPart); stmtQueryPathFromHashPart.bind(prefix); @@ -933,7 +932,7 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & /* Pass configuration options (including those overriden with --option) to the substituter. */ - setenv("_NIX_OPTIONS", packSettings().c_str(), 1); + setenv("_NIX_OPTIONS", settings.pack().c_str(), 1); fromPipe.readSide.close(); toPipe.writeSide.close(); @@ -969,7 +968,7 @@ template T getIntLine(int fd) PathSet LocalStore::querySubstitutablePaths(const PathSet & paths) { PathSet res; - foreach (Paths::iterator, i, substituters) { + foreach (Paths::iterator, i, settings.substituters) { if (res.size() == paths.size()) break; RunningSubstituter & run(runningSubstituters[*i]); startSubstituter(*i, run); @@ -1023,7 +1022,7 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) { PathSet todo = paths; - foreach (Paths::iterator, i, substituters) { + foreach (Paths::iterator, i, settings.substituters) { if (todo.empty()) break; querySubstitutablePathInfos(*i, todo, infos); } @@ -1046,11 +1045,10 @@ void LocalStore::registerValidPath(const ValidPathInfo & info) void LocalStore::registerValidPaths(const ValidPathInfos & infos) { - /* sqlite will fsync by default, but the new valid paths may not be fsync-ed. + /* SQLite will fsync by default, but the new valid paths may not be fsync-ed. * So some may want to fsync them before registering the validity, at the * expense of some speed of the path registering operation. */ - if (queryBoolSetting("sync-before-registering", false)) - sync(); + if (settings.syncBeforeRegistering) sync(); while (1) { try { @@ -1294,7 +1292,7 @@ void LocalStore::exportPath(const Path & path, bool sign, Path hashFile = tmpDir + "/hash"; writeFile(hashFile, printHash(hash)); - Path secretKey = nixConfDir + "/signing-key.sec"; + Path secretKey = settings.nixConfDir + "/signing-key.sec"; checkSecrecy(secretKey); Strings args; @@ -1340,7 +1338,7 @@ Path LocalStore::createTempDirInStore() /* There is a slight possibility that `tmpDir' gets deleted by the GC between createTempDir() and addTempRoot(), so repeat until `tmpDir' exists. */ - tmpDir = createTempDir(nixStore); + tmpDir = createTempDir(settings.nixStore); addTempRoot(tmpDir); } while (!pathExists(tmpDir)); return tmpDir; @@ -1392,7 +1390,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source) args.push_back("rsautl"); args.push_back("-verify"); args.push_back("-inkey"); - args.push_back(nixConfDir + "/signing-key.pub"); + args.push_back(settings.nixConfDir + "/signing-key.pub"); args.push_back("-pubin"); args.push_back("-in"); args.push_back(sigFile); @@ -1501,7 +1499,7 @@ void LocalStore::verifyStore(bool checkContents) /* Acquire the global GC lock to prevent a garbage collection. */ AutoCloseFD fdGCLock = openGCLock(ltWrite); - Paths entries = readDirectory(nixStore); + Paths entries = readDirectory(settings.nixStore); PathSet store(entries.begin(), entries.end()); /* Check whether all valid paths actually exist. */ @@ -1611,9 +1609,9 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, PathSet LocalStore::queryValidPathsOld() { PathSet paths; - Strings entries = readDirectory(nixDBPath + "/info"); + Strings entries = readDirectory(settings.nixDBPath + "/info"); foreach (Strings::iterator, i, entries) - if (i->at(0) != '.') paths.insert(nixStore + "/" + *i); + if (i->at(0) != '.') paths.insert(settings.nixStore + "/" + *i); return paths; } @@ -1625,7 +1623,7 @@ ValidPathInfo LocalStore::queryPathInfoOld(const Path & path) /* Read the info file. */ string baseName = baseNameOf(path); - Path infoFile = (format("%1%/info/%2%") % nixDBPath % baseName).str(); + Path infoFile = (format("%1%/info/%2%") % settings.nixDBPath % baseName).str(); if (!pathExists(infoFile)) throw Error(format("path `%1%' is not valid") % path); string info = readFile(infoFile); diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 4761658ed830..7cf9fc18d207 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -75,7 +75,7 @@ struct SQLiteStmt void bind64(long long value); void bind(); }; - + class LocalStore : public StoreAPI { @@ -84,7 +84,7 @@ private: RunningSubstituters runningSubstituters; Path linksDir; - + public: /* Initialise the local store, upgrading the schema if @@ -92,15 +92,15 @@ public: LocalStore(bool reserveSpace = true); ~LocalStore(); - + /* Implementations of abstract store API methods. */ - + bool isValidPath(const Path & path); PathSet queryValidPaths(const PathSet & paths); - + PathSet queryAllValidPaths(); - + ValidPathInfo queryPathInfo(const Path & path); Hash queryPathHash(const Path & path); @@ -120,17 +120,17 @@ public: PathSet queryDerivationOutputs(const Path & path); StringSet queryDerivationOutputNames(const Path & path); - + Path queryPathFromHashPart(const string & hashPart); - + PathSet querySubstitutablePaths(const PathSet & paths); void querySubstitutablePathInfos(const Path & substituter, PathSet & paths, SubstitutablePathInfos & infos); - + void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos); - + Path addToStore(const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter); @@ -149,7 +149,7 @@ public: Sink & sink); Paths importPaths(bool requireSignature, Source & source); - + void buildPaths(const PathSet & paths); void ensurePath(const Path & path); @@ -157,7 +157,7 @@ public: void addTempRoot(const Path & path); void addIndirectRoot(const Path & path); - + void syncWithGC(); Roots findRoots(); @@ -170,7 +170,7 @@ public: /* Optimise a single store path. */ void optimisePath(const Path & path); - + /* Check the integrity of the Nix store. */ void verifyStore(bool checkContents); @@ -229,18 +229,18 @@ private: unsigned long long queryValidPathId(const Path & path); unsigned long long addValidPath(const ValidPathInfo & info, bool checkOutputs = true); - + void addReference(unsigned long long referrer, unsigned long long reference); - + void appendReferrer(const Path & from, const Path & to, bool lock); - + void rewriteReferrers(const Path & path, bool purge, PathSet referrers); void invalidatePath(const Path & path); /* Delete a path from the Nix store. */ void invalidatePathChecked(const Path & path); - + void verifyPath(const Path & path, const PathSet & store, PathSet & done, PathSet & validPaths); @@ -253,14 +253,14 @@ private: struct GCState; void deleteGarbage(GCState & state, const Path & path); - + bool tryToDelete(GCState & state, const Path & path); - + bool isActiveTempFile(const GCState & state, const Path & path, const string & suffix); - + int openGCLock(LockType lockType); - + void removeUnusedLinks(); void startSubstituter(const Path & substituter, @@ -269,7 +269,7 @@ private: Path createTempDirInStore(); Path importPath(bool requireSignature, Source & source); - + void checkDerivationOutputs(const Path & drvPath, const Derivation & drv); void optimisePath_(OptimiseStats & stats, const Path & path); @@ -290,9 +290,6 @@ void canonicalisePathMetaData(const Path & path, bool recurse); MakeError(PathInUse, Error); -/* Whether we are in build users mode. */ -bool haveBuildUsers(); - /* Whether we are root. */ bool amPrivileged(); @@ -305,5 +302,5 @@ void deletePathWrapped(const Path & path, unsigned long long & bytesFreed, unsigned long long & blocksFreed); void deletePathWrapped(const Path & path); - + } diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index aa5f6ff727c9..3ce300e30678 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -52,10 +52,8 @@ void queryMissing(StoreAPI & store, const PathSet & targets, unsigned long long & downloadSize, unsigned long long & narSize) { downloadSize = narSize = 0; - - PathSet todo(targets.begin(), targets.end()), done; - bool useSubstitutes = queryBoolSetting("build-use-substitutes", true); + PathSet todo(targets.begin(), targets.end()), done; /* Getting substitute info has high latency when using the binary cache substituter. Thus it's essential to do substitute @@ -77,7 +75,7 @@ void queryMissing(StoreAPI & store, const PathSet & targets, */ while (!todo.empty()) { - + PathSet query, todoDrv, todoNonDrv; foreach (PathSet::iterator, i, todo) { @@ -96,9 +94,9 @@ void queryMissing(StoreAPI & store, const PathSet & targets, foreach (DerivationOutputs::iterator, j, drv.outputs) if (!store.isValidPath(j->second.path)) invalid.insert(j->second.path); if (invalid.empty()) continue; - + todoDrv.insert(*i); - if (useSubstitutes) query.insert(invalid.begin(), invalid.end()); + if (settings.useSubstitutes) query.insert(invalid.begin(), invalid.end()); } else { @@ -109,7 +107,7 @@ void queryMissing(StoreAPI & store, const PathSet & targets, } todo.clear(); - + SubstitutablePathInfos infos; store.querySubstitutablePathInfos(query, infos); @@ -118,7 +116,7 @@ void queryMissing(StoreAPI & store, const PathSet & targets, Derivation drv = derivationFromPath(store, *i); bool mustBuild = false; - if (useSubstitutes) { + if (settings.useSubstitutes) { foreach (DerivationOutputs::iterator, j, drv.outputs) if (!store.isValidPath(j->second.path) && infos.find(j->second.path) == infos.end()) @@ -135,7 +133,7 @@ void queryMissing(StoreAPI & store, const PathSet & targets, foreach (DerivationOutputs::iterator, i, drv.outputs) todoNonDrv.insert(i->second.path); } - + foreach (PathSet::iterator, i, todoNonDrv) { done.insert(*i); SubstitutablePathInfos::iterator info = infos.find(*i); @@ -150,22 +148,22 @@ void queryMissing(StoreAPI & store, const PathSet & targets, } } - + static void dfsVisit(StoreAPI & store, const PathSet & paths, const Path & path, PathSet & visited, Paths & sorted, PathSet & parents) { if (parents.find(path) != parents.end()) throw BuildError(format("cycle detected in the references of `%1%'") % path); - + if (visited.find(path) != visited.end()) return; visited.insert(path); parents.insert(path); - + PathSet references; if (store.isValidPath(path)) store.queryReferences(path, references); - + foreach (PathSet::iterator, i, references) /* Don't traverse into paths that don't exist. That can happen due to substitutes for non-existent paths. */ diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 84a72604bba2..e08ee1784510 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -17,7 +17,7 @@ static void makeWritable(const Path & path) { struct stat st; if (lstat(path.c_str(), &st)) - throw SysError(format("getting attributes of path `%1%'") % path); + throw SysError(format("getting attributes of path `%1%'") % path); if (S_ISDIR(st.st_mode) || S_ISREG(st.st_mode)) makeMutable(path); if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1) throw SysError(format("changing writability of `%1%'") % path); @@ -53,15 +53,15 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path) { struct stat st; if (lstat(path.c_str(), &st)) - throw SysError(format("getting attributes of path `%1%'") % path); + throw SysError(format("getting attributes of path `%1%'") % path); if (S_ISDIR(st.st_mode)) { Strings names = readDirectory(path); - foreach (Strings::iterator, i, names) - optimisePath_(stats, path + "/" + *i); + foreach (Strings::iterator, i, names) + optimisePath_(stats, path + "/" + *i); return; } - + /* We can hard link regular files and maybe symlinks. */ if (!S_ISREG(st.st_mode) #if CAN_LINK_SYMLINK @@ -69,7 +69,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path) && !S_ISLNK(st.st_mode) #endif ) return; - + /* Sometimes SNAFUs can cause files in the Nix store to be modified, in particular when running programs as root under NixOS (example: $fontconfig/var/cache being modified). Skip @@ -110,25 +110,25 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path) current file with a hard link to that file. */ struct stat stLink; if (lstat(linkPath.c_str(), &stLink)) - throw SysError(format("getting attributes of path `%1%'") % linkPath); - + throw SysError(format("getting attributes of path `%1%'") % linkPath); + stats.sameContents++; if (st.st_ino == stLink.st_ino) { printMsg(lvlDebug, format("`%1%' is already linked to `%2%'") % path % linkPath); return; } - + printMsg(lvlTalkative, format("linking `%1%' to `%2%'") % path % linkPath); Path tempLink = (format("%1%/.tmp-link-%2%-%3%") - % nixStore % getpid() % rand()).str(); + % settings.nixStore % getpid() % rand()).str(); /* Make the containing directory writable, but only if it's not the store itself (we don't want or need to mess with its permissions). */ bool mustToggle = !isStorePath(path); if (mustToggle) makeWritable(dirOf(path)); - + /* When we're done, make the directory read-only again and reset its timestamp back to 0. */ MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : ""); @@ -192,10 +192,8 @@ void LocalStore::optimiseStore(OptimiseStats & stats) void LocalStore::optimisePath(const Path & path) { - if (queryBoolSetting("auto-optimise-store", true)) { - OptimiseStats stats; - optimisePath_(stats, path); - } + OptimiseStats stats; + if (settings.autoOptimiseStore) optimisePath_(stats, path); } diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 5910ffd53094..c67e53bfb87e 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -100,7 +100,7 @@ void RemoteStore::forkSlave() /* Start the worker. */ Path worker = getEnv("NIX_WORKER"); if (worker == "") - worker = nixBinDir + "/nix-worker"; + worker = settings.nixBinDir + "/nix-worker"; child = fork(); @@ -142,7 +142,7 @@ void RemoteStore::connectToDaemon() if (fdSocket == -1) throw SysError("cannot create Unix domain socket"); - string socketPath = nixStateDir + DEFAULT_SOCKET_PATH; + string socketPath = settings.nixStateDir + DEFAULT_SOCKET_PATH; /* Urgh, sockaddr_un allows path names of only 108 characters. So chdir to the socket directory so that we can pass a relative @@ -184,23 +184,23 @@ RemoteStore::~RemoteStore() void RemoteStore::setOptions() { writeInt(wopSetOptions, to); - writeInt(keepFailed, to); - writeInt(keepGoing, to); - writeInt(tryFallback, to); + writeInt(settings.keepFailed, to); + writeInt(settings.keepGoing, to); + writeInt(settings.tryFallback, to); writeInt(verbosity, to); - writeInt(maxBuildJobs, to); - writeInt(maxSilentTime, to); + writeInt(settings.maxBuildJobs, to); + writeInt(settings.maxSilentTime, to); if (GET_PROTOCOL_MINOR(daemonVersion) >= 2) - writeInt(useBuildHook, to); + writeInt(settings.useBuildHook, to); if (GET_PROTOCOL_MINOR(daemonVersion) >= 4) { - writeInt(buildVerbosity, to); + writeInt(settings.buildVerbosity, to); writeInt(logType, to); - writeInt(printBuildTrace, to); + writeInt(settings.printBuildTrace, to); } if (GET_PROTOCOL_MINOR(daemonVersion) >= 6) - writeInt(buildCores, to); + writeInt(settings.buildCores, to); if (GET_PROTOCOL_MINOR(daemonVersion) >= 10) - writeInt(queryBoolSetting("build-use-substitutes", true), to); + writeInt(settings.useSubstitutes, to); processStderr(); } diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index b64988268cdb..6f81a9aab072 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -19,16 +19,16 @@ GCOptions::GCOptions() bool isInStore(const Path & path) { return path[0] == '/' - && string(path, 0, nixStore.size()) == nixStore - && path.size() >= nixStore.size() + 2 - && path[nixStore.size()] == '/'; + && string(path, 0, settings.nixStore.size()) == settings.nixStore + && path.size() >= settings.nixStore.size() + 2 + && path[settings.nixStore.size()] == '/'; } bool isStorePath(const Path & path) { return isInStore(path) - && path.find('/', nixStore.size() + 1) == Path::npos; + && path.find('/', settings.nixStore.size() + 1) == Path::npos; } @@ -43,7 +43,7 @@ Path toStorePath(const Path & path) { if (!isInStore(path)) throw Error(format("path `%1%' is not in the Nix store") % path); - Path::size_type slash = path.find('/', nixStore.size() + 1); + Path::size_type slash = path.find('/', settings.nixStore.size() + 1); if (slash == Path::npos) return path; else @@ -74,7 +74,7 @@ Path followLinksToStorePath(const Path & path) string storePathToName(const Path & path) { assertStorePath(path); - return string(path, nixStore.size() + 34); + return string(path, settings.nixStore.size() + 34); } @@ -173,11 +173,11 @@ Path makeStorePath(const string & type, { /* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */ string s = type + ":sha256:" + printHash(hash) + ":" - + nixStore + ":" + name; + + settings.nixStore + ":" + name; checkStoreName(name); - return nixStore + "/" + return settings.nixStore + "/" + printHash32(compressHash(hashString(htSHA256, s), 20)) + "-" + name; } diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index f06f23dad523..475bdd3669b0 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -55,7 +55,6 @@ struct Globals EvalState state; bool dryRun; bool preserveInstalled; - bool keepDerivations; string forceName; bool prebuiltOnly; }; @@ -266,8 +265,8 @@ static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems, if (k != newest.end()) { d = j->first.system == k->second.first.system ? 0 : - j->first.system == thisSystem ? 1 : - k->second.first.system == thisSystem ? -1 : 0; + j->first.system == settings.thisSystem ? 1 : + k->second.first.system == settings.thisSystem ? -1 : 0; if (d == 0) d = comparePriorities(state, j->first, k->second.first); if (d == 0) @@ -498,7 +497,7 @@ static void installDerivations(Globals & globals, if (globals.dryRun) return; if (createUserEnv(globals.state, allElems, - profile, globals.keepDerivations, lockToken)) break; + profile, settings.envKeepDerivations, lockToken)) break; } } @@ -605,7 +604,7 @@ static void upgradeDerivations(Globals & globals, if (globals.dryRun) return; if (createUserEnv(globals.state, newElems, - globals.profile, globals.keepDerivations, lockToken)) break; + globals.profile, settings.envKeepDerivations, lockToken)) break; } } @@ -672,7 +671,7 @@ static void opSetFlag(Globals & globals, /* Write the new user environment. */ if (createUserEnv(globals.state, installedElems, - globals.profile, globals.keepDerivations, lockToken)) break; + globals.profile, settings.envKeepDerivations, lockToken)) break; } } @@ -740,7 +739,7 @@ static void uninstallDerivations(Globals & globals, Strings & selectors, if (globals.dryRun) return; if (createUserEnv(globals.state, newElems, - profile, globals.keepDerivations, lockToken)) break; + profile, settings.envKeepDerivations, lockToken)) break; } } @@ -869,7 +868,7 @@ static void opQuery(Globals & globals, enum { sInstalled, sAvailable } source = sInstalled; - readOnlyMode = true; /* makes evaluation a bit faster */ + settings.readOnlyMode = true; /* makes evaluation a bit faster */ for (Strings::iterator i = args.begin(); i != args.end(); ) { string arg = *i++; @@ -1262,9 +1261,6 @@ void run(Strings args) globals.preserveInstalled = false; globals.prebuiltOnly = false; - globals.keepDerivations = - queryBoolSetting("env-keep-derivations", false); - for (Strings::iterator i = args.begin(); i != args.end(); ) { string arg = *i++; @@ -1331,7 +1327,7 @@ void run(Strings args) Path profileLink = getHomeDir() + "/.nix-profile"; globals.profile = pathExists(profileLink) ? absPath(readLink(profileLink), dirOf(profileLink)) - : canonPath(nixStateDir + "/profiles/default"); + : canonPath(settings.nixStateDir + "/profiles/default"); } store = openStore(); diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index d86c9fc84572..270b4ddc2e90 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -94,11 +94,11 @@ void run(Strings args) if (arg == "-") readStdin = true; else if (arg == "--eval-only") { - readOnlyMode = true; + settings.readOnlyMode = true; evalOnly = true; } else if (arg == "--parse-only") { - readOnlyMode = true; + settings.readOnlyMode = true; parseOnly = evalOnly = true; } else if (arg == "--find-file") diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 941301d2e7a1..5ada79713636 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -47,7 +47,7 @@ LocalStore & ensureLocalStore() static Path useDeriver(Path path) -{ +{ if (!isDerivation(path)) { path = store->queryDeriver(path); if (path == "") @@ -89,18 +89,18 @@ static PathSet realisePath(const Path & path) static void opRealise(Strings opFlags, Strings opArgs) { bool dryRun = false; - + foreach (Strings::iterator, i, opFlags) if (*i == "--dry-run") dryRun = true; else throw UsageError(format("unknown flag `%1%'") % *i); foreach (Strings::iterator, i, opArgs) *i = followLinksToStorePath(*i); - + printMissing(*store, PathSet(opArgs.begin(), opArgs.end())); - + if (dryRun) return; - + /* Build all paths at the same time to exploit parallelism. */ PathSet paths(opArgs.begin(), opArgs.end()); store->buildPaths(paths); @@ -128,7 +128,7 @@ static void opAdd(Strings opFlags, Strings opArgs) static void opAddFixed(Strings opFlags, Strings opArgs) { bool recursive = false; - + for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ++i) if (*i == "--recursive") recursive = true; @@ -136,7 +136,7 @@ static void opAddFixed(Strings opFlags, Strings opArgs) if (opArgs.empty()) throw UsageError("first argument must be hash algorithm"); - + HashType hashAlgo = parseHashType(opArgs.front()); opArgs.pop_front(); @@ -149,7 +149,7 @@ static void opAddFixed(Strings opFlags, Strings opArgs) static void opPrintFixedPath(Strings opFlags, Strings opArgs) { bool recursive = false; - + for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ++i) if (*i == "--recursive") recursive = true; @@ -157,7 +157,7 @@ static void opPrintFixedPath(Strings opFlags, Strings opArgs) if (opArgs.size() != 3) throw UsageError(format("`--print-fixed-path' requires three arguments")); - + Strings::iterator i = opArgs.begin(); HashType hashAlgo = parseHashType(*i++); string hash = *i++; @@ -205,12 +205,12 @@ static void printTree(const Path & path, PathSet references; store->queryReferences(path, references); - -#if 0 + +#if 0 for (PathSet::iterator i = drv.inputSrcs.begin(); i != drv.inputSrcs.end(); ++i) cout << format("%1%%2%\n") % (tailPad + treeConn) % *i; -#endif +#endif /* Topologically sort under the relation A < B iff A \in closure(B). That is, if derivation A is an (possibly indirect) @@ -266,7 +266,7 @@ static void opQuery(Strings opFlags, Strings opArgs) else throw UsageError(format("unknown flag `%1%'") % *i); switch (query) { - + case qOutputs: { foreach (Strings::iterator, i, opArgs) { *i = followLinksToStorePath(*i); @@ -293,7 +293,7 @@ static void opQuery(Strings opFlags, Strings opArgs) } } Paths sorted = topoSortPaths(*store, paths); - for (Paths::reverse_iterator i = sorted.rbegin(); + for (Paths::reverse_iterator i = sorted.rbegin(); i != sorted.rend(); ++i) cout << format("%s\n") % *i; break; @@ -328,7 +328,7 @@ static void opQuery(Strings opFlags, Strings opArgs) if (query == qHash) { assert(info.hash.type == htSHA256); cout << format("sha256:%1%\n") % printHash32(info.hash); - } else if (query == qSize) + } else if (query == qSize) cout << format("%1%\n") % info.narSize; } } @@ -340,7 +340,7 @@ static void opQuery(Strings opFlags, Strings opArgs) printTree(followLinksToStorePath(*i), "", "", done); break; } - + case qGraph: { PathSet roots; foreach (Strings::iterator, i, opArgs) { @@ -366,7 +366,7 @@ static void opQuery(Strings opFlags, Strings opArgs) cout << format("%1%\n") % followLinksToStorePath(*i); break; } - + case qRoots: { PathSet referrers; foreach (Strings::iterator, i, opArgs) { @@ -380,7 +380,7 @@ static void opQuery(Strings opFlags, Strings opArgs) cout << format("%1%\n") % i->first; break; } - + default: abort(); } @@ -426,9 +426,9 @@ static void opReadLog(Strings opFlags, Strings opArgs) foreach (Strings::iterator, i, opArgs) { Path path = useDeriver(followLinksToStorePath(*i)); - + Path logPath = (format("%1%/%2%/%3%") % - nixLogDir % drvsLogDir % baseNameOf(path)).str(); + settings.nixLogDir % drvsLogDir % baseNameOf(path)).str(); Path logBz2Path = logPath + ".bz2"; if (pathExists(logPath)) { @@ -454,7 +454,7 @@ static void opReadLog(Strings opFlags, Strings opArgs) } while (err != BZ_STREAM_END); BZ2_bzReadClose(&err, bz); } - + else throw Error(format("build log of derivation `%1%' is not available") % path); } } @@ -474,7 +474,7 @@ static void opDumpDB(Strings opFlags, Strings opArgs) static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) { ValidPathInfos infos; - + while (1) { ValidPathInfo info = decodeValidPathInfo(cin, hashGiven); if (info.path == "") break; @@ -508,7 +508,7 @@ static void opRegisterValidity(Strings opFlags, Strings opArgs) { bool reregister = false; // !!! maybe this should be the default bool hashGiven = false; - + for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ++i) if (*i == "--reregister") reregister = true; @@ -524,7 +524,7 @@ static void opRegisterValidity(Strings opFlags, Strings opArgs) static void opCheckValidity(Strings opFlags, Strings opArgs) { bool printInvalid = false; - + for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ++i) if (*i == "--print-invalid") printInvalid = true; @@ -551,13 +551,13 @@ static string showBytes(unsigned long long bytes, unsigned long long blocks) } -struct PrintFreed +struct PrintFreed { bool show; const GCResults & results; PrintFreed(bool show, const GCResults & results) : show(show), results(results) { } - ~PrintFreed() + ~PrintFreed() { if (show) cout << format("%1% store paths deleted, %2% freed\n") @@ -572,9 +572,9 @@ static void opGC(Strings opFlags, Strings opArgs) bool printRoots = false; GCOptions options; options.action = GCOptions::gcDeleteDead; - + GCResults results; - + /* Do what? */ foreach (Strings::iterator, i, opFlags) if (*i == "--print-roots") printRoots = true; @@ -613,14 +613,14 @@ static void opDelete(Strings opFlags, Strings opArgs) { GCOptions options; options.action = GCOptions::gcDeleteSpecific; - + foreach (Strings::iterator, i, opFlags) if (*i == "--ignore-liveness") options.ignoreLiveness = true; else throw UsageError(format("unknown flag `%1%'") % *i); foreach (Strings::iterator, i, opArgs) options.pathsToDelete.insert(followLinksToStorePath(*i)); - + GCResults results; PrintFreed freed(true, results); store->collectGarbage(options, results); @@ -671,9 +671,9 @@ static void opImport(Strings opFlags, Strings opArgs) foreach (Strings::iterator, i, opFlags) if (*i == "--require-signature") requireSignature = true; else throw UsageError(format("unknown flag `%1%'") % *i); - + if (!opArgs.empty()) throw UsageError("no arguments expected"); - + FdSource source(STDIN_FILENO); Paths paths = store->importPaths(requireSignature, source); @@ -700,12 +700,12 @@ static void opVerify(Strings opFlags, Strings opArgs) throw UsageError("no arguments expected"); bool checkContents = false; - + for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ++i) if (*i == "--check-contents") checkContents = true; else throw UsageError(format("unknown flag `%1%'") % *i); - + ensureLocalStore().verifyStore(checkContents); } @@ -844,7 +844,7 @@ void run(Strings args) } else if (arg == "--indirect") indirectRoot = true; - else if (arg[0] == '-') { + else if (arg[0] == '-') { opFlags.push_back(arg); if (arg == "--max-freed" || arg == "--max-links" || arg == "--max-atime") { /* !!! hack */ if (i != args.end()) opFlags.push_back(*i++); diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 09800c16087a..84ad689048a0 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -527,27 +527,23 @@ static void performOp(unsigned int clientVersion, } case wopSetOptions: { - keepFailed = readInt(from) != 0; - keepGoing = readInt(from) != 0; - tryFallback = readInt(from) != 0; + settings.keepFailed = readInt(from) != 0; + settings.keepGoing = readInt(from) != 0; + settings.tryFallback = readInt(from) != 0; verbosity = (Verbosity) readInt(from); - maxBuildJobs = readInt(from); - maxSilentTime = readInt(from); + settings.maxBuildJobs = readInt(from); + settings.maxSilentTime = readInt(from); if (GET_PROTOCOL_MINOR(clientVersion) >= 2) - useBuildHook = readInt(from) != 0; + settings.useBuildHook = readInt(from) != 0; if (GET_PROTOCOL_MINOR(clientVersion) >= 4) { - buildVerbosity = (Verbosity) readInt(from); + settings.buildVerbosity = (Verbosity) readInt(from); logType = (LogType) readInt(from); - printBuildTrace = readInt(from) != 0; + settings.printBuildTrace = readInt(from) != 0; } if (GET_PROTOCOL_MINOR(clientVersion) >= 6) - buildCores = readInt(from); - if (GET_PROTOCOL_MINOR(clientVersion) >= 10) { - int x = readInt(from); - Strings ss; - ss.push_back(x == 0 ? "false" : "true"); - overrideSetting("build-use-substitutes", ss); - } + settings.buildCores = readInt(from); + if (GET_PROTOCOL_MINOR(clientVersion) >= 10) + settings.useSubstitutes = readInt(from) != 0; startWork(); stopWork(); break; @@ -768,7 +764,7 @@ static void daemonLoop() if (fdSocket == -1) throw SysError("cannot create Unix domain socket"); - string socketPath = nixStateDir + DEFAULT_SOCKET_PATH; + string socketPath = settings.nixStateDir + DEFAULT_SOCKET_PATH; createDirs(dirOf(socketPath)); @@ -867,10 +863,6 @@ static void daemonLoop() strncpy(argvSaved[1], processName.c_str(), strlen(argvSaved[1])); } - /* Since the daemon can be long-running, the - settings may have changed. So force a reload. */ - reloadSettings(); - /* Handle the connection. */ from.fd = remote; to.fd = remote; -- cgit 1.4.1 From 89a8207029e7f6d5cfe3ab972c49ea46f5b9a784 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 31 Jul 2012 17:56:02 -0400 Subject: Add an option ‘build-fallback’ (equivalent to the --fallback flag) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/manual/conf-file.xml | 10 ++++++++++ src/libmain/shared.cc | 2 +- src/libstore/globals.cc | 3 ++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml index c73466ef5cf6..c19e44ebf608 100644 --- a/doc/manual/conf-file.xml +++ b/doc/manual/conf-file.xml @@ -246,6 +246,16 @@ flag, e.g. --option gc-keep-outputs false. + build-fallback + + If set to true, Nix will fall + back to building from source if a binary substitute fails. This + is equivalent to the flag. The + default is false. + + + + build-chroot-dirs When builds are performed in a chroot environment, diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 9c62e320f66e..e1280911b399 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -191,7 +191,7 @@ static void initAndRun(int argc, char * * argv) else if (arg == "--keep-going" || arg == "-k") settings.keepGoing = true; else if (arg == "--fallback") - settings.tryFallback = true; + settings.set("build-fallback", "true"); else if (arg == "--max-jobs" || arg == "-j") settings.set("build-max-jobs", getArg(arg, i, args.end())); else if (arg == "--cores") diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 7dc2e714bc9d..f5f4f15f011f 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -118,9 +118,10 @@ void Settings::set(const string & name, const string & value) void Settings::update() { - get(thisSystem, "system"); + get(tryFallback, "build-fallback"); get(maxBuildJobs, "build-max-jobs"); get(buildCores, "build-cores"); + get(thisSystem, "system"); get(maxSilentTime, "build-max-silent-time"); get(buildTimeout, "build-timeout"); get(reservedSize, "gc-reserved-space"); -- cgit 1.4.1 From 90d9c58d4dabb370849cd523fb9ee471e8140b76 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 31 Jul 2012 18:19:44 -0400 Subject: Pass all --option flags to the daemon --- src/libstore/globals.cc | 7 +++++++ src/libstore/globals.hh | 8 +++++--- src/libstore/remote-store.cc | 10 ++++++++++ src/nix-worker/nix-worker.cc | 8 ++++++++ 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index f5f4f15f011f..bfb40a07a237 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -113,6 +113,7 @@ void Settings::loadConfFile() void Settings::set(const string & name, const string & value) { settings[name] = value; + overrides[name] = value; } @@ -193,4 +194,10 @@ string Settings::pack() } +Settings::SettingsMap Settings::getOverrides() +{ + return overrides; +} + + } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 5783d9bf37ba..1fb196db2924 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -10,6 +10,8 @@ namespace nix { struct Settings { + typedef std::map SettingsMap; + Settings(); void processEnvironment(); @@ -22,6 +24,8 @@ struct Settings { string pack(); + SettingsMap getOverrides(); + /* The directory where we store sources and derived files. */ Path nixStore; @@ -172,9 +176,7 @@ struct Settings { bool envKeepDerivations; private: - typedef std::map SettingsMap; - - SettingsMap settings; + SettingsMap settings, overrides; void get(string & res, const string & name); void get(bool & res, const string & name); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index c67e53bfb87e..56396541adec 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -184,6 +184,7 @@ RemoteStore::~RemoteStore() void RemoteStore::setOptions() { writeInt(wopSetOptions, to); + writeInt(settings.keepFailed, to); writeInt(settings.keepGoing, to); writeInt(settings.tryFallback, to); @@ -202,6 +203,15 @@ void RemoteStore::setOptions() if (GET_PROTOCOL_MINOR(daemonVersion) >= 10) writeInt(settings.useSubstitutes, to); + if (GET_PROTOCOL_MINOR(daemonVersion) >= 12) { + Settings::SettingsMap overrides = settings.getOverrides(); + writeInt(overrides.size(), to); + foreach (Settings::SettingsMap::iterator, i, overrides) { + writeString(i->first, to); + writeString(i->second, to); + } + } + processStderr(); } diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 84ad689048a0..8ccafca29d0c 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -544,6 +544,14 @@ static void performOp(unsigned int clientVersion, settings.buildCores = readInt(from); if (GET_PROTOCOL_MINOR(clientVersion) >= 10) settings.useSubstitutes = readInt(from) != 0; + if (GET_PROTOCOL_MINOR(clientVersion) >= 12) { + unsigned int n = readInt(from); + for (unsigned int i = 0; i < n; i++) { + string name = readString(from); + string value = readString(from); + settings.set("untrusted-" + name, value); + } + } startWork(); stopWork(); break; -- cgit 1.4.1 From eb7849e3a281511a59abf72ae5c3133f903bbaab Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 31 Jul 2012 18:50:32 -0400 Subject: Prevent an injection attack in passing untrusted options to substituters --- src/libstore/globals.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index bfb40a07a237..c75ebdd0e36b 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -188,6 +188,10 @@ string Settings::pack() { string s; foreach (SettingsMap::iterator, i, settings) { + if (i->first.find('\n') != string::npos || + i->first.find('=') != string::npos || + i->second.find('\n') != string::npos) + throw Error("illegal option name/value"); s += i->first; s += '='; s += i->second; s += '\n'; } return s; -- cgit 1.4.1 From 4d1b64f118cf6ebcbf530bea4a3c531704d7d6ba Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 31 Jul 2012 18:56:22 -0400 Subject: Allow daemon users to override ‘binary-caches’ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For security reasons, daemon users can only specify caches that appear in the ‘binary-caches’ and ‘trusted-binary-caches’ options in nix.conf. --- doc/manual/conf-file.xml | 13 +++++++++++++ scripts/download-from-binary-cache.pl.in | 30 +++++++++++++++++++++++++++--- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml index c19e44ebf608..c09d46206a23 100644 --- a/doc/manual/conf-file.xml +++ b/doc/manual/conf-file.xml @@ -329,6 +329,19 @@ build-use-chroot = /dev /proc /bin + trusted-binary-caches + + A list of URLs of binary caches, separated by + whitespace. These are not used by default, but can be enabled by + users of the Nix daemon by specifying --option + binary-caches urls on the + command line. Daemon users are only allowed to pass a subset of + the URLs listed in binary-caches and + trusted-binary-caches. + + + + binary-caches-parallel-connections The maximum number of parallel HTTP connections diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 5d65e664e564..3f7d3ef45f4a 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -8,6 +8,7 @@ use Nix::Store; use Nix::Utils; use WWW::Curl::Easy; use WWW::Curl::Multi; +use List::MoreUtils qw(any); use strict; @@ -166,9 +167,32 @@ sub getAvailableCaches { return if $gotCaches; $gotCaches = 1; - my @urls = map { s/\/+$//; $_ } split(/ /, - $Nix::Config::config{"binary-caches"} - // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : "")); + sub strToList { + my ($s) = @_; + return map { s/\/+$//; $_ } split(/ /, $s); + } + + my @urls = strToList + ($Nix::Config::config{"binary-caches"} + // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : "")); + + # Allow Nix daemon users to override the binary caches to a subset + # of those listed in the config file. Note that ‘untrusted-*’ + # denotes options passed by the client. + if (defined $Nix::Config::config{"untrusted-binary-caches"}) { + my @untrustedUrls = strToList $Nix::Config::config{"untrusted-binary-caches"}; + my @trustedUrls = (@urls, strToList($Nix::Config::config{"trusted-binary-caches"} // "")); + @urls = (); + foreach my $url (@untrustedUrls) { + if (any { $url eq $_ } @trustedUrls) { + push @urls, $url; + } else { + # FIXME: should die here, but we currently can't + # deliver error messages to clients. + warn "warning: binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n"; + } + } + } foreach my $url (@urls) { -- cgit 1.4.1 From c770a2422a47526d5eb336af6af4292df68dad2b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 11:19:24 -0400 Subject: Report substituter errors to clients of the Nix daemon --- scripts/download-from-binary-cache.pl.in | 10 ++---- src/libstore/local-store.cc | 58 ++++++++++++++++++++------------ src/libstore/local-store.hh | 2 +- src/libutil/util.cc | 9 ++++- src/libutil/util.hh | 4 +++ 5 files changed, 53 insertions(+), 30 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 3f7d3ef45f4a..94c446e37a4c 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -184,13 +184,9 @@ sub getAvailableCaches { my @trustedUrls = (@urls, strToList($Nix::Config::config{"trusted-binary-caches"} // "")); @urls = (); foreach my $url (@untrustedUrls) { - if (any { $url eq $_ } @trustedUrls) { - push @urls, $url; - } else { - # FIXME: should die here, but we currently can't - # deliver error messages to clients. - warn "warning: binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n"; - } + die "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n" + unless any { $url eq $_ } @trustedUrls; + push @urls, $url; } } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index f20324d4e3d0..bd63ce55d095 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -909,10 +909,11 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & debug(format("starting substituter program `%1%'") % substituter); - Pipe toPipe, fromPipe; + Pipe toPipe, fromPipe, errorPipe; toPipe.create(); fromPipe.create(); + errorPipe.create(); run.pid = fork(); @@ -940,6 +941,8 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & throw SysError("dupping stdin"); if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1) throw SysError("dupping stdout"); + if (dup2(errorPipe.writeSide, STDERR_FILENO) == -1) + throw SysError("dupping stderr"); closeMostFDs(set()); execl(substituter.c_str(), substituter.c_str(), "--query", NULL); throw SysError(format("executing `%1%'") % substituter); @@ -953,6 +956,7 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run.to = toPipe.writeSide.borrow(); run.from = fromPipe.readSide.borrow(); + run.error = errorPipe.readSide.borrow(); } @@ -973,13 +977,21 @@ PathSet LocalStore::querySubstitutablePaths(const PathSet & paths) RunningSubstituter & run(runningSubstituters[*i]); startSubstituter(*i, run); string s = "have "; - foreach (PathSet::const_iterator, i, paths) - if (res.find(*i) == res.end()) { s += *i; s += " "; } + foreach (PathSet::const_iterator, j, paths) + if (res.find(*j) == res.end()) { s += *j; s += " "; } writeLine(run.to, s); while (true) { - Path path = readLine(run.from); - if (path == "") break; - res.insert(path); + /* FIXME: we only read stderr when an error occurs, so + substituters should only write (short) messages to + stderr when they fail. I.e. they shouldn't write debug + output. */ + try { + Path path = readLine(run.from); + if (path == "") break; + res.insert(path); + } catch (EndOfFile e) { + throw Error(format("substituter `%1%' failed: %2%") % *i % chomp(drainFD(run.error))); + } } } return res; @@ -998,22 +1010,26 @@ void LocalStore::querySubstitutablePathInfos(const Path & substituter, writeLine(run.to, s); while (true) { - Path path = readLine(run.from); - if (path == "") break; - if (paths.find(path) == paths.end()) - throw Error(format("got unexpected path `%1%' from substituter") % path); - paths.erase(path); - SubstitutablePathInfo & info(infos[path]); - info.deriver = readLine(run.from); - if (info.deriver != "") assertStorePath(info.deriver); - int nrRefs = getIntLine(run.from); - while (nrRefs--) { - Path p = readLine(run.from); - assertStorePath(p); - info.references.insert(p); + try { + Path path = readLine(run.from); + if (path == "") break; + if (paths.find(path) == paths.end()) + throw Error(format("got unexpected path `%1%' from substituter") % path); + paths.erase(path); + SubstitutablePathInfo & info(infos[path]); + info.deriver = readLine(run.from); + if (info.deriver != "") assertStorePath(info.deriver); + int nrRefs = getIntLine(run.from); + while (nrRefs--) { + Path p = readLine(run.from); + assertStorePath(p); + info.references.insert(p); + } + info.downloadSize = getIntLine(run.from); + info.narSize = getIntLine(run.from); + } catch (EndOfFile e) { + throw Error(format("substituter `%1%' failed: %2%") % substituter % chomp(drainFD(run.error))); } - info.downloadSize = getIntLine(run.from); - info.narSize = getIntLine(run.from); } } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 7cf9fc18d207..3cb016e9cafd 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -45,7 +45,7 @@ struct OptimiseStats struct RunningSubstituter { Pid pid; - AutoCloseFD to, from; + AutoCloseFD to, from, error; }; diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 689fc543af31..086574058aff 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -253,7 +253,7 @@ string readLine(int fd) if (errno != EINTR) throw SysError("reading a line"); } else if (rd == 0) - throw Error("unexpected EOF reading a line"); + throw EndOfFile("unexpected EOF reading a line"); else { if (ch == '\n') return s; s += ch; @@ -1015,6 +1015,13 @@ string concatStringsSep(const string & sep, const Strings & ss) } +string chomp(const string & s) +{ + size_t i = s.find_last_not_of(" \n\r\t"); + return i == string::npos ? "" : string(s, 0, i); +} + + string statusToString(int status) { if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 9b8656f70485..16633a0835a3 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -292,6 +292,10 @@ Strings tokenizeString(const string & s, const string & separators = " \t\n\r"); string concatStringsSep(const string & sep, const Strings & ss); +/* Remove trailing whitespace from a string. */ +string chomp(const string & s); + + /* Convert the exit status of a child as returned by wait() into an error string. */ string statusToString(int status); -- cgit 1.4.1 From 8a25d787d7f05d612521bd489510aa23d4ef2177 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 11:33:15 -0400 Subject: download-from-binary-cache: Remove duplicate URLs --- scripts/download-from-binary-cache.pl.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 94c446e37a4c..f7f1f73460da 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -8,7 +8,7 @@ use Nix::Store; use Nix::Utils; use WWW::Curl::Easy; use WWW::Curl::Multi; -use List::MoreUtils qw(any); +use List::MoreUtils qw(any uniq); use strict; @@ -190,7 +190,7 @@ sub getAvailableCaches { } } - foreach my $url (@urls) { + foreach my $url (uniq @urls) { # FIXME: not atomic. $queryCache->execute($url); -- cgit 1.4.1 From afa7e0187815d89c8af93fa9c1081bf67ab0f10e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 16:34:17 -0400 Subject: Inline unpack-channel.sh --- corepkgs/Makefile.am | 2 +- corepkgs/unpack-channel.nix | 19 ++++++++++++++++--- corepkgs/unpack-channel.sh | 4 ---- 3 files changed, 17 insertions(+), 8 deletions(-) delete mode 100644 corepkgs/unpack-channel.sh diff --git a/corepkgs/Makefile.am b/corepkgs/Makefile.am index 4b0b8860be68..111b667d0b58 100644 --- a/corepkgs/Makefile.am +++ b/corepkgs/Makefile.am @@ -1,6 +1,6 @@ all-local: config.nix -files = nar.nix buildenv.nix buildenv.pl unpack-channel.nix unpack-channel.sh derivation.nix fetchurl.nix \ +files = nar.nix buildenv.nix buildenv.pl unpack-channel.nix derivation.nix fetchurl.nix \ imported-drv-to-derivation.nix install-exec-local: diff --git a/corepkgs/unpack-channel.nix b/corepkgs/unpack-channel.nix index 245430ab0feb..b26bece30eae 100644 --- a/corepkgs/unpack-channel.nix +++ b/corepkgs/unpack-channel.nix @@ -1,14 +1,27 @@ with import ; +let + + builder = builtins.toFile "unpack-channel.sh" + '' + mkdir $out + cd $out + ${bzip2} -d < $src | ${tar} xf - + mv * $out/$channelName + ''; + +in + { name, channelName, src }: derivation { system = builtins.currentSystem; builder = shell; - args = [ "-e" ./unpack-channel.sh ]; - inherit name channelName src bzip2 tar tr; + args = [ "-e" builder ]; + inherit name channelName src; + PATH = "${nixBinDir}:${coreutils}"; - + # No point in doing this remotely. preferLocalBuild = true; diff --git a/corepkgs/unpack-channel.sh b/corepkgs/unpack-channel.sh deleted file mode 100644 index f42b0870ae18..000000000000 --- a/corepkgs/unpack-channel.sh +++ /dev/null @@ -1,4 +0,0 @@ -mkdir $out -cd $out -$bzip2 -d < $src | $tar xf - -mv * $out/$channelName -- cgit 1.4.1 From 46f852cda013b818f113c7905f020131a44f2340 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 16:42:51 -0400 Subject: Use a GNU tar flag to shut up warnings about implausibly old timestamp --- configure.ac | 12 ++++++++++++ corepkgs/config.nix.in | 1 + corepkgs/unpack-channel.nix | 2 +- substitute.mk | 1 + 4 files changed, 15 insertions(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 21a87194f477..bbee7b061250 100644 --- a/configure.ac +++ b/configure.ac @@ -339,6 +339,18 @@ eval dynlib_suffix=$shrext_cmds AC_SUBST(dynlib_suffix) +# Do we have GNU tar? +AC_MSG_CHECKING([if you have GNU tar]) +if $tar --version 2> /dev/null | grep -q GNU; then + AC_MSG_RESULT(yes) + tarFlags="--warning=no-timestamp" +else + AC_MSG_RESULT(no) +fi +AC_SUBST(tarFlags) + + + AM_CONFIG_HEADER([config.h]) AC_CONFIG_FILES([Makefile src/Makefile diff --git a/corepkgs/config.nix.in b/corepkgs/config.nix.in index 26e821d0e2d7..6f6ec6de1a30 100644 --- a/corepkgs/config.nix.in +++ b/corepkgs/config.nix.in @@ -9,6 +9,7 @@ in { bzip2 = "@bzip2@"; xz = "@xz@"; tar = "@tar@"; + tarFlags = "@tarFlags@"; tr = "@tr@"; curl = "@curl@"; nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@"; diff --git a/corepkgs/unpack-channel.nix b/corepkgs/unpack-channel.nix index b26bece30eae..bbc54c7d1e0d 100644 --- a/corepkgs/unpack-channel.nix +++ b/corepkgs/unpack-channel.nix @@ -6,7 +6,7 @@ let '' mkdir $out cd $out - ${bzip2} -d < $src | ${tar} xf - + ${bzip2} -d < $src | ${tar} xf - --warning=no-timestamp mv * $out/$channelName ''; diff --git a/substitute.mk b/substitute.mk index 77c5afc28117..378751943a7e 100644 --- a/substitute.mk +++ b/substitute.mk @@ -22,6 +22,7 @@ -e "s^@coreutils\@^$(coreutils)^g" \ -e "s^@sed\@^$(sed)^g" \ -e "s^@tar\@^$(tar)^g" \ + -e "s^@tarFlags\@^$(tarFlags)^g" \ -e "s^@gzip\@^$(gzip)^g" \ -e "s^@pv\@^$(pv)^g" \ -e "s^@tr\@^$(tr)^g" \ -- cgit 1.4.1 From ca94b383718f2dc5f4f14ed6eddd8d04ac9d3fc2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 16:43:36 -0400 Subject: nix-env: Ignore manifest.nix when recursing into ~/.nix-defexpr Channels are implemented using a profile now, and profiles contain a manifest.nix file. This should be ignored to prevent bogus packages from showing up in nix-env. --- src/nix-env/nix-env.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 475bdd3669b0..5174daf90d86 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -112,6 +112,11 @@ static void getAllExprs(EvalState & state, StringSet namesSorted(names.begin(), names.end()); foreach (StringSet::iterator, i, namesSorted) { + /* Ignore the manifest.nix used by profiles. This is + necessary to prevent it from showing up in channels (which + are implemented using profiles). */ + if (*i == "manifest.nix") continue; + Path path2 = path + "/" + *i; struct stat st; -- cgit 1.4.1 From 79bba3782c275f03954cc9fc03f92aff487db953 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 17:21:47 -0400 Subject: Doh --- src/libutil/util.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 086574058aff..3790e2fa3bbd 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1018,7 +1018,7 @@ string concatStringsSep(const string & sep, const Strings & ss) string chomp(const string & s) { size_t i = s.find_last_not_of(" \n\r\t"); - return i == string::npos ? "" : string(s, 0, i); + return i == string::npos ? "" : string(s, 0, i + 1); } -- cgit 1.4.1 From 5170c5691aac1bd6abc69be65cf880316e11fe24 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 Aug 2012 17:56:11 -0400 Subject: nix-channel: Use binary caches advertised by channels Channels can now advertise a binary cache by creating a file /binary-cache-url. The channel unpacker puts these in its "binary-caches" subdirectory. Thus, the URLS of the binary caches for the channels added by root appear in /nix/var/nix/profiles/per-user/eelco/channels/binary-caches/*. The binary cache substituter reads these and adds them to the list of binary caches. --- corepkgs/unpack-channel.nix | 10 ++++++--- doc/manual/conf-file.xml | 12 +++++++++++ scripts/download-from-binary-cache.pl.in | 10 +++++++++ scripts/nix-channel.in | 37 +++++++++++++++++--------------- 4 files changed, 49 insertions(+), 20 deletions(-) diff --git a/corepkgs/unpack-channel.nix b/corepkgs/unpack-channel.nix index bbc54c7d1e0d..57d5a09a076a 100644 --- a/corepkgs/unpack-channel.nix +++ b/corepkgs/unpack-channel.nix @@ -6,19 +6,23 @@ let '' mkdir $out cd $out - ${bzip2} -d < $src | ${tar} xf - --warning=no-timestamp + ${bzip2} -d < $src | ${tar} xf - --warning=no-timestamp mv * $out/$channelName + if [ -n "$binaryCacheURL" ]; then + mkdir $out/binary-caches + echo -n "$binaryCacheURL" > $out/binary-caches/$channelName + fi ''; in -{ name, channelName, src }: +{ name, channelName, src, binaryCacheURL ? "" }: derivation { system = builtins.currentSystem; builder = shell; args = [ "-e" builder ]; - inherit name channelName src; + inherit name channelName src binaryCacheURL; PATH = "${nixBinDir}:${coreutils}"; diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml index c09d46206a23..ae167fb787c7 100644 --- a/doc/manual/conf-file.xml +++ b/doc/manual/conf-file.xml @@ -329,6 +329,18 @@ build-use-chroot = /dev /proc /bin + binary-caches-files + + A list of names of files that will be read to + obtain additional binary cache URLs. The default is + /nix/var/nix/profiles/per-user/root/channels/binary-caches/*, + which ensures that Nix will use the binary caches corresponding to + the channels installed by root. Do not set this option to read + files created by untrusted users! + + + + trusted-binary-caches A list of URLs of binary caches, separated by diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index f7f1f73460da..76306405cc05 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -176,6 +176,16 @@ sub getAvailableCaches { ($Nix::Config::config{"binary-caches"} // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : "")); + my $urlsFiles = $Nix::Config::config{"binary-cache-files"} + // "/nix/var/nix/profiles/per-user/root/channels/binary-caches/*"; + foreach my $urlFile (glob $urlsFiles) { + next unless -f $urlFile; + open FILE, "<$urlFile" or die "cannot open ‘$urlFile’\n"; + my $url = ; chomp $url; + close FILE; + push @urls, strToList($url); + } + # Allow Nix daemon users to override the binary caches to a subset # of those listed in the config file. Note that ‘untrusted-*’ # denotes options passed by the client. diff --git a/scripts/nix-channel.in b/scripts/nix-channel.in index e7a4b0900e86..e057cc91671a 100755 --- a/scripts/nix-channel.in +++ b/scripts/nix-channel.in @@ -22,7 +22,7 @@ my $nixDefExpr = "$home/.nix-defexpr"; my $userName = getpwuid($<) or die "cannot figure out user name"; my $profile = "$Nix::Config::stateDir/profiles/per-user/$userName/channels"; mkpath(dirname $profile, 0, 0755); - + my %channels; @@ -77,20 +77,14 @@ sub removeChannel { # channels. sub update { my @channelNames = @_; - - readChannels; - - # Create the manifests directory if it doesn't exist. - mkdir $manifestDir, 0755 unless -e $manifestDir; - # Do we have write permission to the manifests directory? - die "$0: you do not have write permission to `$manifestDir'!\n" unless -W $manifestDir; + readChannels; # Download each channel. my $exprs = ""; foreach my $name (keys %channels) { next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames}; - + my $url = $channels{$name}; my $origUrl = "$url/MANIFEST"; @@ -101,11 +95,20 @@ sub update { die "$0: unable to check `$url'\n" if $? != 0; $headers =~ s/\r//g; $url = $1 if $headers =~ /^Location:\s*(.*)\s*$/m; - - # Pull the channel manifest. - $ENV{'NIX_ORIG_URL'} = $origUrl; - system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0 - or die "cannot pull manifest from `$url'\n"; + + # Check if the channel advertises a binary cache. + my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`; + my $extraAttrs = ""; + if ($? == 0 && $binaryCacheURL ne "") { + $extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; "; + } else { + # No binary cache, so pull the channel manifest. + mkdir $manifestDir, 0755 unless -e $manifestDir; + die "$0: you do not have write permission to `$manifestDir'!\n" unless -W $manifestDir; + $ENV{'NIX_ORIG_URL'} = $origUrl; + system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0 + or die "cannot pull manifest from `$url'\n"; + } # Download the channel tarball. my $fullURL = "$url/nixexprs.tar.bz2"; @@ -120,7 +123,7 @@ sub update { my $cname = $name; $cname .= $1 if basename($url) =~ /(-\d.*)$/; - $exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; }' "; + $exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; $extraAttrs }' "; } # Unpack the channel tarballs into the Nix store and install them @@ -189,7 +192,7 @@ while (scalar @ARGV) { update(@ARGV); last; } - + elsif ($arg eq "--help") { usageError; } @@ -198,7 +201,7 @@ while (scalar @ARGV) { print "nix-channel (Nix) $Nix::Config::version\n"; exit 0; } - + else { die "unknown argument `$arg'; try `--help'"; } -- cgit 1.4.1 From babe54bf97091441353f2219e7846afd0e0d9f16 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 11:11:30 -0400 Subject: Add missing file --- perl/lib/Nix/Utils.pm | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 perl/lib/Nix/Utils.pm diff --git a/perl/lib/Nix/Utils.pm b/perl/lib/Nix/Utils.pm new file mode 100644 index 000000000000..943b8dd4abfa --- /dev/null +++ b/perl/lib/Nix/Utils.pm @@ -0,0 +1,8 @@ +package Nix::Utils; + +$urlRE = "(?: [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*]+ )"; + +sub checkURL { + my ($url) = @_; + die "invalid URL ‘$url’\n" unless $url =~ /^ $urlRE $ /x; +} -- cgit 1.4.1 From 8b8fe6139e05f990b9d2a35652fd9bdb79189f90 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 11:28:34 -0400 Subject: Drop dependency on List::MoreUtils --- perl/lib/Nix/Utils.pm | 11 +++++++++++ scripts/download-from-binary-cache.pl.in | 5 ++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/perl/lib/Nix/Utils.pm b/perl/lib/Nix/Utils.pm index 943b8dd4abfa..1e7e0b5afb2f 100644 --- a/perl/lib/Nix/Utils.pm +++ b/perl/lib/Nix/Utils.pm @@ -6,3 +6,14 @@ sub checkURL { my ($url) = @_; die "invalid URL ‘$url’\n" unless $url =~ /^ $urlRE $ /x; } + +sub uniq { + my %seen; + my @res; + foreach my $name (@_) { + next if $seen{$name}; + $seen{$name} = 1; + push @res, $name; + } + return @res; +} diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index 76306405cc05..e65e2d5b36d2 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -8,7 +8,6 @@ use Nix::Store; use Nix::Utils; use WWW::Curl::Easy; use WWW::Curl::Multi; -use List::MoreUtils qw(any uniq); use strict; @@ -195,12 +194,12 @@ sub getAvailableCaches { @urls = (); foreach my $url (@untrustedUrls) { die "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n" - unless any { $url eq $_ } @trustedUrls; + unless grep { $url eq $_ } @trustedUrls > 0; push @urls, $url; } } - foreach my $url (uniq @urls) { + foreach my $url (Nix::Utils::uniq @urls) { # FIXME: not atomic. $queryCache->execute($url); -- cgit 1.4.1 From 8207359227740bfb2fe77cf843a81aa878fd39aa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 11:28:48 -0400 Subject: Whitespace --- configure.ac | 14 +++++++------- release.nix | 16 ++++++++-------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/configure.ac b/configure.ac index bbee7b061250..17169c885f5b 100644 --- a/configure.ac +++ b/configure.ac @@ -25,12 +25,12 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], case "$host_os" in linux-gnu*) - # For backward compatibility, strip the `-gnu' part. - system="$machine_name-linux";; + # For backward compatibility, strip the `-gnu' part. + system="$machine_name-linux";; *) # Strip the version number from names such as `gnu0.3', # `darwin10.2.0', etc. - system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; + system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; esac]) sys_name=$(uname -s | tr 'A-Z ' 'a-z_') @@ -40,7 +40,7 @@ case $sys_name in sys_name=cygwin ;; esac - + AC_MSG_RESULT($system) AC_SUBST(system) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')]) @@ -267,7 +267,7 @@ if test "$gc" = yes; then fi -# Check for the required Perl dependencies (DBI and DBD::SQLite). +# Check for the required Perl dependencies (DBI, DBD::SQLite and WWW::Curl). perlFlags="-I$perllibdir" AC_ARG_WITH(dbi, AC_HELP_STRING([--with-dbi=PATH], @@ -288,14 +288,14 @@ if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then AC_MSG_FAILURE([The Perl modules DBI and/or DBD::SQLite are missing.]) fi AC_MSG_RESULT(yes) - + AC_MSG_CHECKING([whether WWW::Curl works]) if ! $perl $perlFlags -e 'use WWW::Curl;' 2>&5; then AC_MSG_RESULT(no) AC_MSG_FAILURE([The Perl module WWW::Curl is missing.]) fi AC_MSG_RESULT(yes) - + AC_SUBST(perlFlags) diff --git a/release.nix b/release.nix index 0b382fff31b2..02e1e447eefe 100644 --- a/release.nix +++ b/release.nix @@ -44,7 +44,7 @@ let preDist = '' make -C doc/manual install prefix=$out - + make -C doc/manual manual.pdf prefix=$out cp doc/manual/manual.pdf $out/manual.pdf @@ -55,7 +55,7 @@ let # to Windows and Macs, so there should be no Linux binaries # in the closure). nuke-refs $out/manual.pdf - + echo "doc manual $out/share/doc/nix/manual" >> $out/nix-support/hydra-build-products echo "doc-pdf manual $out/manual.pdf" >> $out/nix-support/hydra-build-products echo "doc release-notes $out/share/doc/nix/release-notes" >> $out/nix-support/hydra-build-products @@ -142,7 +142,7 @@ let dontInstall = false; doInstallCheck = true; - + lcovFilter = [ "*/boost/*" "*-tab.*" ]; # We call `dot', and even though we just use it to @@ -151,16 +151,16 @@ let FONTCONFIG_FILE = texFunctions.fontsConf; }; - + rpm_fedora13i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora13i386) 50; rpm_fedora13x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora13x86_64) 50; rpm_fedora16i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora16i386) 50; rpm_fedora16x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora16x86_64) 50; - + deb_debian60i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian60i386) 50; deb_debian60x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian60x86_64) 50; - + deb_ubuntu1004i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1004i386) 50; deb_ubuntu1004x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1004x86_64) 50; deb_ubuntu1010i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1010i386) 50; @@ -186,7 +186,7 @@ let makeRPM_i686 = makeRPM "i686-linux"; makeRPM_x86_64 = makeRPM "x86_64-linux"; - makeRPM = + makeRPM = system: diskImageFun: prio: with import nixpkgs { inherit system; }; @@ -204,7 +204,7 @@ let makeDeb_i686 = makeDeb "i686-linux"; makeDeb_x86_64 = makeDeb "x86_64-linux"; - + makeDeb = system: diskImageFun: prio: -- cgit 1.4.1 From 2688fb73f1e0bd96003a82c89ac8de12eca2b49f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 11:47:04 -0400 Subject: Add perl-WWW-Curl to the RPM image --- release.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release.nix b/release.nix index 02e1e447eefe..95c112811f50 100644 --- a/release.nix +++ b/release.nix @@ -195,7 +195,7 @@ let name = "nix-rpm-${diskImage.name}"; src = jobs.tarball; diskImage = (diskImageFun vmTools.diskImageFuns) - { extraPackages = [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" ]; }; + { extraPackages = [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "perl-WWW-Curl" ]; }; memSize = 1024; meta.schedulingPriority = prio; postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck"; -- cgit 1.4.1 From cc8641815b018315ee444c58dd4bc6bfc38c7d0f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 11:47:49 -0400 Subject: Add WWW::Curl to the RPM dependencies --- nix.spec.in | 1 + 1 file changed, 1 insertion(+) diff --git a/nix.spec.in b/nix.spec.in index 8b1ea8a543e9..9d93c654d05e 100644 --- a/nix.spec.in +++ b/nix.spec.in @@ -16,6 +16,7 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) %endif BuildRequires: perl(DBD::SQLite) BuildRequires: perl(DBI) +BuildRequires: perl(WWW::Curl) BuildRequires: perl(ExtUtils::ParseXS) Requires: /usr/bin/perl Requires: curl -- cgit 1.4.1 From f3077fd88d6ec8f05a5471687f23589e34dfeaeb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 13:45:53 -0400 Subject: Add libwww-curl-perl to the Debian/Ubuntu images --- release.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release.nix b/release.nix index 95c112811f50..2aadd320fa88 100644 --- a/release.nix +++ b/release.nix @@ -214,7 +214,7 @@ let name = "nix-deb"; src = jobs.tarball; diskImage = (diskImageFun vmTools.diskImageFuns) - { extraPackages = [ "libdbd-sqlite3-perl" "libsqlite3-dev" "libbz2-dev" ]; }; + { extraPackages = [ "libdbd-sqlite3-perl" "libsqlite3-dev" "libbz2-dev" "libwww-curl-perl" ]; }; memSize = 1024; meta.schedulingPriority = prio; configureFlags = "--sysconfdir=/etc"; -- cgit 1.4.1 From cfd968dd94f35c5ef781be9bda883d8818fc1d6e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 14:17:13 -0400 Subject: Fix stupid type error in calling std::max --- src/libstore/build.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 1840fb7b21bc..f93dd9b84a82 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2898,7 +2898,7 @@ void Worker::waitForInput() if (lastWokenUp == 0) printMsg(lvlError, "waiting for locks or build slots..."); if (lastWokenUp == 0 || lastWokenUp > before) lastWokenUp = before; - timeout.tv_sec = std::max((time_t) 0, lastWokenUp + settings.pollInterval - before); + timeout.tv_sec = std::max((time_t) 0, (time_t) (lastWokenUp + settings.pollInterval - before)); } else lastWokenUp = 0; using namespace std; -- cgit 1.4.1 From 9e2fc6951ca049b15149a2c4b75d2f5bff7f07e1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 27 Aug 2012 14:20:14 -0400 Subject: Disable the binary cache substituter by default for now --- doc/manual/conf-file.xml | 4 ++-- scripts/download-from-binary-cache.pl.in | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml index ae167fb787c7..82bc025bcd74 100644 --- a/doc/manual/conf-file.xml +++ b/doc/manual/conf-file.xml @@ -323,8 +323,8 @@ build-use-chroot = /dev /proc /bin binary-caches A list of URLs of binary caches, separated by - whitespace. The default is - http://nixos.org/binary-cache. + whitespace. The default is empty. diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in index e65e2d5b36d2..335d7d9706fd 100644 --- a/scripts/download-from-binary-cache.pl.in +++ b/scripts/download-from-binary-cache.pl.in @@ -171,9 +171,8 @@ sub getAvailableCaches { return map { s/\/+$//; $_ } split(/ /, $s); } - my @urls = strToList - ($Nix::Config::config{"binary-caches"} - // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : "")); + my @urls = strToList ($Nix::Config::config{"binary-caches"} // ""); + # // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : "")); my $urlsFiles = $Nix::Config::config{"binary-cache-files"} // "/nix/var/nix/profiles/per-user/root/channels/binary-caches/*"; -- cgit 1.4.1