diff options
Diffstat (limited to 'scripts')
-rwxr-xr-x | scripts/build-remote.pl.in | 302 | ||||
-rwxr-xr-x | scripts/copy-from-other-stores.pl.in | 101 | ||||
-rwxr-xr-x | scripts/copying-collector.pl | 111 | ||||
-rw-r--r-- | scripts/download-from-binary-cache.pl.in | 601 | ||||
-rwxr-xr-x | scripts/download-using-manifests.pl.in | 377 | ||||
-rwxr-xr-x | scripts/find-runtime-roots.pl.in | 79 | ||||
-rw-r--r-- | scripts/install-nix-from-closure.sh | 34 | ||||
-rw-r--r-- | scripts/local.mk | 37 | ||||
-rwxr-xr-x | scripts/nix-build.in | 261 | ||||
-rwxr-xr-x | scripts/nix-channel.in | 209 | ||||
-rwxr-xr-x | scripts/nix-collect-garbage.in | 57 | ||||
-rwxr-xr-x | scripts/nix-copy-closure.in | 131 | ||||
-rwxr-xr-x | scripts/nix-generate-patches.in | 52 | ||||
-rwxr-xr-x | scripts/nix-http-export.cgi.in | 51 | ||||
-rwxr-xr-x | scripts/nix-install-package.in | 138 | ||||
-rwxr-xr-x | scripts/nix-prefetch-url.in | 130 | ||||
-rw-r--r-- | scripts/nix-profile.sh.in | 22 | ||||
-rwxr-xr-x | scripts/nix-pull.in | 103 | ||||
-rwxr-xr-x | scripts/nix-push.in | 293 | ||||
-rwxr-xr-x | scripts/nix-reduce-build.in | 171 | ||||
-rwxr-xr-x | scripts/remove-patches.pl | 16 | ||||
-rwxr-xr-x | scripts/show-duplication.pl | 73 |
22 files changed, 3349 insertions, 0 deletions
diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in new file mode 100755 index 000000000000..6dfa16d5cbda --- /dev/null +++ b/scripts/build-remote.pl.in @@ -0,0 +1,302 @@ +#! @perl@ -w @perlFlags@ + +use Fcntl qw(:DEFAULT :flock); +use English '-no_match_vars'; +use IO::Handle; +use Nix::Config; +use Nix::SSH qw/sshOpts openSSHConnection/; +use Nix::CopyClosure; +use Nix::Store; +no warnings('once'); + + +# General operation: +# +# Try to find a free machine of type $neededSystem. We do this as +# follows: +# - We acquire an exclusive lock on $currentLoad/main-lock. +# - For each machine $machine of type $neededSystem and for each $slot +# less than the maximum load for that machine, we try to get an +# exclusive lock on $currentLoad/$machine-$slot (without blocking). +# If we get such a lock, we send "accept" to the caller. Otherwise, +# we send "postpone" and exit. +# - We release the exclusive lock on $currentLoad/main-lock. +# - We perform the build on $neededSystem. +# - We release the exclusive lock on $currentLoad/$machine-$slot. +# +# The nice thing about this scheme is that if we die prematurely, the +# locks are released automatically. + + +# Make sure that we don't get any SSH passphrase or host key popups - +# if there is any problem it should fail, not do something +# interactive. +$ENV{"DISPLAY"} = ""; +$ENV{"SSH_ASKPASS"} = ""; + + +sub sendReply { + my $reply = shift; + print STDERR "# $reply\n"; +} + +sub all { $_ || return 0 for @_; 1 } + + +# Initialisation. +my $loadIncreased = 0; + +my ($localSystem, $maxSilentTime, $printBuildTrace, $buildTimeout) = @ARGV; + +my $currentLoad = $ENV{"NIX_CURRENT_LOAD"}; +my $conf = $ENV{"NIX_REMOTE_SYSTEMS"}; + + +sub openSlotLock { + my ($machine, $slot) = @_; + my $slotLockFn = "$currentLoad/" . (join '+', @{$machine->{systemTypes}}) . "-" . $machine->{hostName} . "-$slot"; + my $slotLock = new IO::Handle; + sysopen $slotLock, "$slotLockFn", O_RDWR|O_CREAT, 0600 or die; + return $slotLock; +} + + +# Read the list of machines. +my @machines; +if (defined $conf && -e $conf) { + open CONF, "<$conf" or die; + while (<CONF>) { + chomp; + s/\#.*$//g; + next if /^\s*$/; + my @tokens = split /\s/, $_; + my @supportedFeatures = split(/,/, $tokens[5] || ""); + my @mandatoryFeatures = split(/,/, $tokens[6] || ""); + push @machines, + { hostName => $tokens[0] + , systemTypes => [ split(/,/, $tokens[1]) ] + , sshKeys => $tokens[2] + , maxJobs => int($tokens[3]) + , speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1) + , supportedFeatures => [ @supportedFeatures, @mandatoryFeatures ] + , mandatoryFeatures => [ @mandatoryFeatures ] + , enabled => 1 + }; + } + close CONF; +} + + + +# Wait for the calling process to ask us whether we can build some derivation. +my ($drvPath, $hostName, $slotLock); + +REQ: while (1) { + $_ = <STDIN> || exit 0; + (my $amWilling, my $neededSystem, $drvPath, my $requiredFeatures) = split; + my @requiredFeatures = split /,/, $requiredFeatures; + + my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem); + + if (!defined $currentLoad) { + sendReply "decline"; + next; + } + + # Acquire the exclusive lock on $currentLoad/main-lock. + mkdir $currentLoad, 0777 or die unless -d $currentLoad; + my $mainLock = "$currentLoad/main-lock"; + sysopen MAINLOCK, "$mainLock", O_RDWR|O_CREAT, 0600 or die; + flock(MAINLOCK, LOCK_EX) or die; + + + while (1) { + # Find all machine that can execute this build, i.e., that + # support builds for the given platform and features, and are + # not at their job limit. + my $rightType = 0; + my @available = (); + LOOP: foreach my $cur (@machines) { + if ($cur->{enabled} + && (grep { $neededSystem eq $_ } @{$cur->{systemTypes}}) + && all(map { my $f = $_; 0 != grep { $f eq $_ } @{$cur->{supportedFeatures}} } (@requiredFeatures, @mandatoryFeatures)) + && all(map { my $f = $_; 0 != grep { $f eq $_ } @requiredFeatures } @{$cur->{mandatoryFeatures}}) + ) + { + $rightType = 1; + + # We have a machine of the right type. Determine the load on + # the machine. + my $slot = 0; + my $load = 0; + my $free; + while ($slot < $cur->{maxJobs}) { + my $slotLock = openSlotLock($cur, $slot); + if (flock($slotLock, LOCK_EX | LOCK_NB)) { + $free = $slot unless defined $free; + flock($slotLock, LOCK_UN) or die; + } else { + $load++; + } + close $slotLock; + $slot++; + } + + push @available, { machine => $cur, load => $load, free => $free } + if $load < $cur->{maxJobs}; + } + } + + if (defined $ENV{NIX_DEBUG_HOOK}) { + print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n" + foreach @available; + } + + + # Didn't find any available machine? Then decline or postpone. + if (scalar @available == 0) { + # Postpone if we have a machine of the right type, except + # if the local system can and wants to do the build. + if ($rightType && !$canBuildLocally) { + sendReply "postpone"; + } else { + sendReply "decline"; + } + close MAINLOCK; + next REQ; + } + + + # Prioritise the available machines as follows: + # - First by load divided by speed factor, rounded to the nearest + # integer. This causes fast machines to be preferred over slow + # machines with similar loads. + # - Then by speed factor. + # - Finally by load. + sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); } + @available = sort + { lf($a) <=> lf($b) + || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor} + || $a->{load} <=> $b->{load} + } @available; + + + # Select the best available machine and lock a free slot. + my $selected = $available[0]; + my $machine = $selected->{machine}; + + $slotLock = openSlotLock($machine, $selected->{free}); + flock($slotLock, LOCK_EX | LOCK_NB) or die; + utime undef, undef, $slotLock; + + close MAINLOCK; + + + # Connect to the selected machine. + @sshOpts = ("-i", $machine->{sshKeys}, "-x"); + $hostName = $machine->{hostName}; + if (openSSHConnection($hostName)) { + last REQ if system("ssh $hostName @sshOpts nix-builds-inhibited < /dev/null > /dev/null 2>&1") != 0; + warn "machine `$hostName' is refusing builds, trying other available machines...\n"; + closeSSHConnection; + } else { + warn "unable to open SSH connection to `$hostName', trying other available machines...\n"; + } + $machine->{enabled} = 0; + } +} + + +# Tell Nix we've accepted the build. +sendReply "accept"; +my @inputs = split /\s/, readline(STDIN); +my @outputs = split /\s/, readline(STDIN); + + +print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace; + + +my $maybeSign = ""; +$maybeSign = "--sign" if -e "$Nix::Config::confDir/signing-key.sec"; + + +# Register the derivation as a temporary GC root. Note that $PPID is +# the PID of the remote SSH process, which, due to the use of a +# persistant SSH connection, should be the same across all remote +# command invocations for this session. +my $rootsDir = "@localstatedir@/nix/gcroots/tmp"; +system("ssh $hostName @sshOpts 'mkdir -m 1777 -p $rootsDir; ln -sfn $drvPath $rootsDir/\$PPID.drv'"); + +sub removeRoots { + system("ssh $hostName @sshOpts 'rm -f $rootsDir/\$PPID.drv $rootsDir/\$PPID.out'"); +} + + +# Copy the derivation and its dependencies to the build machine. This +# is guarded by an exclusive lock per machine to prevent multiple +# build-remote instances from copying to a machine simultaneously. +# That's undesirable because we may end up with N instances uploading +# the same missing path simultaneously, causing the effective network +# bandwidth and target disk speed to be divided by N. +my $uploadLock = "$currentLoad/$hostName.upload-lock"; +sysopen UPLOADLOCK, "$uploadLock", O_RDWR|O_CREAT, 0600 or die; +eval { + local $SIG{ALRM} = sub { die "alarm\n" }; + # Don't wait forever, so that a process that gets stuck while + # holding the lock doesn't block everybody else indefinitely. + # It's safe to continue after a timeout, just (potentially) + # inefficient. + alarm 15 * 60; + flock(UPLOADLOCK, LOCK_EX); + alarm 0; +}; +if ($@) { + die unless $@ eq "alarm\n"; + print STDERR "somebody is hogging $uploadLock, continuing...\n"; + unlink $uploadLock; +} +Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne "", ""); +close UPLOADLOCK; + + +# Perform the build. +my $buildFlags = + "--max-silent-time $maxSilentTime --option build-timeout $buildTimeout" + . " --fallback --add-root $rootsDir/\$PPID.out --quiet" + . " --option build-keep-log false --option build-use-substitutes false"; + +# We let the remote side kill its process group when the connection is +# closed unexpectedly. This is necessary to ensure that no processes +# are left running on the remote system if the local Nix process is +# killed. (SSH itself doesn't kill child processes if the connection +# is interrupted unless the `-tt' flag is used to force a pseudo-tty, +# in which case every child receives SIGHUP; however, `-tt' doesn't +# work on some platforms when connection sharing is used.) +print STDERR "building `$drvPath' on `$hostName'\n"; +pipe STDIN, DUMMY; # make sure we have a readable STDIN +if (system("exec ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & exec nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) { + # Note that if we get exit code 100 from `nix-store -r', it + # denotes a permanent build failure (as opposed to an SSH problem + # or a temporary Nix problem). We propagate this to the caller to + # allow it to distinguish between transient and permanent + # failures. + my $res = $? >> 8; + print STDERR "build of `$drvPath' on `$hostName' failed with exit code $res\n"; + removeRoots; + exit $res; +} + +#print "build of `$drvPath' on `$hostName' succeeded\n"; + + +# Copy the output from the build machine. +my @outputs2 = grep { !isValidPath($_) } @outputs; +if (scalar @outputs2 > 0) { + system("exec ssh $hostName @sshOpts 'nix-store --export @outputs2'" . + "| NIX_HELD_LOCKS='@outputs2' @bindir@/nix-store --import > /dev/null") == 0 + or die("cannot copy paths " . join(", ", @outputs) . " from `$hostName': $?"); +} + + +# Get rid of the temporary GC roots. +removeRoots; diff --git a/scripts/copy-from-other-stores.pl.in b/scripts/copy-from-other-stores.pl.in new file mode 100755 index 000000000000..bfd38c9ba3f8 --- /dev/null +++ b/scripts/copy-from-other-stores.pl.in @@ -0,0 +1,101 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Basename; +use IO::Handle; + +my $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@"; + + +STDOUT->autoflush(1); + +my @remoteStoresAll = split ':', ($ENV{"NIX_OTHER_STORES"} or ""); + +my @remoteStores; +foreach my $dir (@remoteStoresAll) { + push @remoteStores, glob($dir); +} + +exit if scalar @remoteStores == 0; +print "\n"; + + +$ENV{"NIX_REMOTE"} = ""; + + +sub findStorePath { + my $storePath = shift; + foreach my $store (@remoteStores) { + my $sourcePath = "$store/store/" . basename $storePath; + next unless -e $sourcePath || -l $sourcePath; + $ENV{"NIX_DB_DIR"} = "$store/var/nix/db"; + return ($store, $sourcePath) if + system("$binDir/nix-store --check-validity $storePath") == 0; + } + return undef; +} + + +if ($ARGV[0] eq "--query") { + + while (<STDIN>) { + chomp; + my ($cmd, @args) = split " ", $_; + + if ($cmd eq "have") { + foreach my $storePath (@args) { + print "$storePath\n" if defined findStorePath($storePath); + } + print "\n"; + } + + elsif ($cmd eq "info") { + foreach my $storePath (@args) { + my ($store, $sourcePath) = findStorePath($storePath); + next unless defined $store; + + $ENV{"NIX_DB_DIR"} = "$store/var/nix/db"; + + my $deriver = `$binDir/nix-store --query --deriver $storePath`; + die "cannot query deriver of `$storePath'" if $? != 0; + chomp $deriver; + $deriver = "" if $deriver eq "unknown-deriver"; + + my @references = split "\n", + `$binDir/nix-store --query --references $storePath`; + die "cannot query references of `$storePath'" if $? != 0; + + my $narSize = `$binDir/nix-store --query --size $storePath`; + die "cannot query size of `$storePath'" if $? != 0; + chomp $narSize; + + print "$storePath\n"; + print "$deriver\n"; + print scalar @references, "\n"; + print "$_\n" foreach @references; + print "0\n"; + print "$narSize\n"; + } + + print "\n"; + } + + else { die "unknown command `$cmd'"; } + } +} + + +elsif ($ARGV[0] eq "--substitute") { + die unless scalar @ARGV == 3; + my $storePath = $ARGV[1]; + my $destPath = $ARGV[2]; + my ($store, $sourcePath) = findStorePath $storePath; + die unless $store; + print STDERR "\n*** Copying `$storePath' from `$sourcePath'\n\n"; + system("$binDir/nix-store --dump $sourcePath | $binDir/nix-store --restore $destPath") == 0 + or die "cannot copy `$sourcePath' to `$storePath'"; + print "\n"; # no hash to verify +} + + +else { die; } diff --git a/scripts/copying-collector.pl b/scripts/copying-collector.pl new file mode 100755 index 000000000000..2e8be2d41f32 --- /dev/null +++ b/scripts/copying-collector.pl @@ -0,0 +1,111 @@ +#! /usr/bin/perl -w + +use strict; + +my @paths = `nix-store -qR /home/eelco/.nix-profile/bin/firefox`; + +my %copyMap; +my %rewriteMap; + + +my $counter = 0; + +foreach my $path (@paths) { + chomp $path; + + $path =~ /^(.*)\/([^-]+)-(.*)$/ or die "invalid store path `$path'"; + my $hash = $2; + +# my $newHash = "deadbeef" . (sprintf "%024d", $counter++); + my $newHash = "deadbeef" . substr($hash, 0, 24); + my $newPath = "/home/eelco/chroot/$1/$newHash-$3"; + + die unless length $newHash == length $hash; + + $copyMap{$path} = $newPath; + $rewriteMap{$hash} = $newHash; +} + + +my %rewriteMap2; + + +sub rewrite; +sub rewrite { + my $src = shift; + my $dst = shift; + + if (-l $dst) { + + my $target = readlink $dst or die; + + foreach my $srcHash (keys %rewriteMap2) { + my $dstHash = $rewriteMap{$srcHash}; + print " $srcHash -> $dstHash\n"; + $target =~ s/$srcHash/$dstHash/g; + } + + unlink $dst or die; + + symlink $target, $dst; + + } + + elsif (-f $dst) { + + print "$dst\n"; + + foreach my $srcHash (keys %rewriteMap2) { + my $dstHash = $rewriteMap{$srcHash}; + print " $srcHash -> $dstHash\n"; + + my @stats = lstat $dst or die; + + system "sed s/$srcHash/$dstHash/g < '$dst' > '$dst.tmp'"; + die if $? != 0; + rename "$dst.tmp", $dst or die; + + chmod $stats[2], $dst or die; + } + + } + + elsif (-d $dst) { + + chmod 0755, $dst; + + opendir(DIR, "$dst") or die "cannot open `$dst': $!"; + my @files = readdir DIR; + closedir DIR; + + foreach my $file (@files) { + next if $file eq "." || $file eq ".."; + rewrite "$src/$file", "$dst/$file"; + } + } +} + + +foreach my $src (keys %copyMap) { + my $dst = $copyMap{$src}; + print "$src -> $dst\n"; + + if (!-e $dst) { + system "cp -prd $src $dst"; + die if $? != 0; + + my @refs = `nix-store -q --references $src`; + + %rewriteMap2 = (); + foreach my $ref (@refs) { + chomp $ref; + + $ref =~ /^(.*)\/([^-]+)-(.*)$/ or die "invalid store path `$ref'"; + my $hash = $2; + + $rewriteMap2{$hash} = $rewriteMap{$hash}; + } + + rewrite $src, $dst; + } +} diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in new file mode 100644 index 000000000000..e6925d7316d0 --- /dev/null +++ b/scripts/download-from-binary-cache.pl.in @@ -0,0 +1,601 @@ +#! @perl@ -w @perlFlags@ + +use DBI; +use DBD::SQLite; +use File::Basename; +use IO::Select; +use Nix::Config; +use Nix::Store; +use Nix::Utils; +use Nix::Manifest; +use WWW::Curl::Easy; +use WWW::Curl::Multi; +use strict; + + +Nix::Config::readConfig; + +my @caches; +my $gotCaches = 0; + +my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150); +$maxParallelRequests = 1 if $maxParallelRequests < 1; + +my $ttlNegative = 24 * 3600; # when to purge negative lookups from the database +my $ttlNegativeUse = 3600; # how long negative lookups are valid for non-"have" lookups +my $didExpiration = 0; + +my $showAfter = 5; # show that we're waiting for a request after this many seconds + +my $debug = ($Nix::Config::config{"debug-subst"} // "") eq 1 || ($Nix::Config::config{"untrusted-debug-subst"} // "") eq 1; + +my $cacheFileURLs = ($ENV{"_NIX_CACHE_FILE_URLS"} // "") eq 1; # for testing + +my ($dbh, $queryCache, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence, $expireNARExistence); + +my $curlm = WWW::Curl::Multi->new; +my $activeRequests = 0; +my $curlIdCount = 1; +my %requests; +my %scheduled; +my $caBundle = $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"}; + +my $userName = getpwuid($<) or die "cannot figure out user name"; + +my $requireSignedBinaryCaches = ($Nix::Config::config{"signed-binary-caches"} // "0") ne "0"; + + +sub addRequest { + my ($storePath, $url, $head) = @_; + + my $curl = WWW::Curl::Easy->new; + my $curlId = $curlIdCount++; + $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "", type => $head ? "HEAD" : "GET" + , shown => 0, started => time() }; + + $curl->setopt(CURLOPT_PRIVATE, $curlId); + $curl->setopt(CURLOPT_URL, $url); + open (my $fh, ">", \$requests{$curlId}->{content}); + $curl->setopt(CURLOPT_WRITEDATA, $fh); + $curl->setopt(CURLOPT_FOLLOWLOCATION, 1); + $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle; + $curl->setopt(CURLOPT_USERAGENT, "Nix/$Nix::Config::version"); + $curl->setopt(CURLOPT_NOBODY, 1) if $head; + $curl->setopt(CURLOPT_FAILONERROR, 1); + $curl->setopt(CURLOPT_TIMEOUT, int($ENV{"NIX_CONNECT_TIMEOUT"} // 0)); + + if ($activeRequests >= $maxParallelRequests) { + $scheduled{$curlId} = 1; + } else { + $curlm->add_handle($curl); + $activeRequests++; + } + + return $requests{$curlId}; +} + + +sub processRequests { + while ($activeRequests) { + my ($rfds, $wfds, $efds) = $curlm->fdset(); + #print STDERR "R = @{$rfds}, W = @{$wfds}, E = @{$efds}\n"; + + # Sleep until we can read or write some data. + if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) { + IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 1.0); + } + + if ($curlm->perform() != $activeRequests) { + while (my ($id, $result) = $curlm->info_read) { + if ($id) { + my $request = $requests{$id} or die; + my $handle = $request->{handle}; + $request->{result} = $result; + $request->{httpStatus} = $handle->getinfo(CURLINFO_RESPONSE_CODE); + + print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug; + + $activeRequests--; + delete $request->{handle}; + + if (scalar(keys %scheduled) > 0) { + my $id2 = (keys %scheduled)[0]; + $curlm->add_handle($requests{$id2}->{handle}); + $activeRequests++; + delete $scheduled{$id2}; + } + } + } + } + + my $time = time(); + while (my ($key, $request) = each %requests) { + next unless defined $request->{handle}; + next if $request->{shown}; + if ($time > $request->{started} + $showAfter) { + print STDERR "still waiting for ‘$request->{url}’ after $showAfter seconds...\n"; + $request->{shown} = 1; + } + } + } +} + + +sub initCache { + my $dbPath = "$Nix::Config::stateDir/binary-cache-v3.sqlite"; + + unlink "$Nix::Config::stateDir/binary-cache-v1.sqlite"; + unlink "$Nix::Config::stateDir/binary-cache-v2.sqlite"; + + # Open/create the database. + $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "") + or die "cannot open database `$dbPath'"; + $dbh->{RaiseError} = 1; + $dbh->{PrintError} = 0; + + $dbh->sqlite_busy_timeout(60 * 60 * 1000); + + $dbh->do("pragma synchronous = off"); # we can always reproduce the cache + $dbh->do("pragma journal_mode = truncate"); + + # Initialise the database schema, if necessary. + $dbh->do(<<EOF); + create table if not exists BinaryCaches ( + id integer primary key autoincrement not null, + url text unique not null, + timestamp integer not null, + storeDir text not null, + wantMassQuery integer not null, + priority integer not null + ); +EOF + + $dbh->do(<<EOF); + create table if not exists NARs ( + cache integer not null, + storePath text not null, + url text not null, + compression text not null, + fileHash text, + fileSize integer, + narHash text, + narSize integer, + refs text, + deriver text, + signedBy text, + timestamp integer not null, + primary key (cache, storePath), + foreign key (cache) references BinaryCaches(id) on delete cascade + ); +EOF + + $dbh->do(<<EOF); + create table if not exists NARExistence ( + cache integer not null, + storePath text not null, + exist integer not null, + timestamp integer not null, + primary key (cache, storePath), + foreign key (cache) references BinaryCaches(id) on delete cascade + ); +EOF + + $dbh->do("create index if not exists NARExistenceByExistTimestamp on NARExistence (exist, timestamp)"); + + $queryCache = $dbh->prepare("select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?") or die; + + $insertNAR = $dbh->prepare( + "insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " . + "narSize, refs, deriver, signedBy, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die; + + $queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die; + + $insertNARExistence = $dbh->prepare( + "insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)") or die; + + $queryNARExistence = $dbh->prepare("select exist, timestamp from NARExistence where cache = ? and storePath = ?") or die; + + $expireNARExistence = $dbh->prepare("delete from NARExistence where exist = ? and timestamp < ?") or die; +} + + +sub getAvailableCaches { + return if $gotCaches; + $gotCaches = 1; + + sub strToList { + my ($s) = @_; + return map { s/\/+$//; $_ } split(/ /, $s); + } + + my @urls = strToList($Nix::Config::config{"binary-caches"} // + ($Nix::Config::storeDir eq "/nix/store" ? "http://cache.nixos.org" : "")); + + my $urlsFiles = $Nix::Config::config{"binary-cache-files"} + // "$Nix::Config::stateDir/profiles/per-user/$userName/channels/binary-caches/*"; + foreach my $urlFile (glob $urlsFiles) { + next unless -f $urlFile; + open FILE, "<$urlFile" or die "cannot open ‘$urlFile’\n"; + my $url = <FILE>; chomp $url; + close FILE; + push @urls, strToList($url); + } + + push @urls, strToList($Nix::Config::config{"extra-binary-caches"} // ""); + + # Allow Nix daemon users to override the binary caches to a subset + # of those listed in the config file. Note that ‘untrusted-*’ + # denotes options passed by the client. + my @trustedUrls = uniq(@urls, strToList($Nix::Config::config{"trusted-binary-caches"} // "")); + + if (defined $Nix::Config::config{"untrusted-binary-caches"}) { + my @untrustedUrls = strToList $Nix::Config::config{"untrusted-binary-caches"}; + @urls = (); + foreach my $url (@untrustedUrls) { + die "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ [@trustedUrls] in $Nix::Config::confDir/nix.conf)\n" + unless scalar(grep { $url eq $_ } @trustedUrls) > 0; + push @urls, $url; + } + } + + my @untrustedUrls = strToList $Nix::Config::config{"untrusted-extra-binary-caches"} // ""; + foreach my $url (@untrustedUrls) { + next unless scalar(grep { $url eq $_ } @trustedUrls) > 0; + push @urls, $url; + } + + foreach my $url (uniq @urls) { + + # FIXME: not atomic. + $queryCache->execute($url); + my $res = $queryCache->fetchrow_hashref(); + if (defined $res) { + next if $res->{storeDir} ne $Nix::Config::storeDir; + push @caches, { id => $res->{id}, url => $url, wantMassQuery => $res->{wantMassQuery}, priority => $res->{priority} }; + next; + } + + # Get the cache info file. + my $request = addRequest(undef, $url . "/nix-cache-info"); + processRequests; + + if ($request->{result} != 0) { + print STDERR "could not download ‘$request->{url}’ (" . + ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; + next; + } + + my $storeDir = "/nix/store"; + my $wantMassQuery = 0; + my $priority = 50; + foreach my $line (split "\n", $request->{content}) { + unless ($line =~ /^(.*): (.*)$/) { + print STDERR "bad cache info file ‘$request->{url}’\n"; + return undef; + } + if ($1 eq "StoreDir") { $storeDir = $2; } + elsif ($1 eq "WantMassQuery") { $wantMassQuery = int($2); } + elsif ($1 eq "Priority") { $priority = int($2); } + } + + $dbh->do("insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)", + {}, $url, time(), $storeDir, $wantMassQuery, $priority); + $queryCache->execute($url); + $res = $queryCache->fetchrow_hashref() or die; + next if $storeDir ne $Nix::Config::storeDir; + push @caches, { id => $res->{id}, url => $url, wantMassQuery => $wantMassQuery, priority => $priority }; + } + + @caches = sort { $a->{priority} <=> $b->{priority} } @caches; + + expireNegative(); +} + + +sub shouldCache { + my ($url) = @_; + return $cacheFileURLs || $url !~ /^file:/; +} + + +sub processNARInfo { + my ($storePath, $cache, $request) = @_; + + if ($request->{result} != 0) { + if ($request->{result} != 37 && $request->{httpStatus} != 404 && $request->{httpStatus} != 403) { + print STDERR "could not download ‘$request->{url}’ (" . + ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; + } else { + $insertNARExistence->execute($cache->{id}, basename($storePath), 0, time()) + if shouldCache $request->{url}; + } + return undef; + } + + my $narInfo = parseNARInfo($storePath, $request->{content}, $requireSignedBinaryCaches, $request->{url}); + return undef unless defined $narInfo; + + die if $requireSignedBinaryCaches && !defined $narInfo->{signedBy}; + + # Cache the result. + $insertNAR->execute( + $cache->{id}, basename($storePath), $narInfo->{url}, $narInfo->{compression}, + $narInfo->{fileHash}, $narInfo->{fileSize}, $narInfo->{narHash}, $narInfo->{narSize}, + join(" ", @{$narInfo->{refs}}), $narInfo->{deriver}, $narInfo->{signedBy}, time()) + if shouldCache $request->{url}; + + return $narInfo; +} + + +sub getCachedInfoFrom { + my ($storePath, $cache) = @_; + + $queryNAR->execute($cache->{id}, basename($storePath)); + my $res = $queryNAR->fetchrow_hashref(); + return undef unless defined $res; + + # We may previously have cached this info when signature checking + # was disabled. In that case, ignore the cached info. + return undef if $requireSignedBinaryCaches && !defined $res->{signedBy}; + + return + { url => $res->{url} + , compression => $res->{compression} + , fileHash => $res->{fileHash} + , fileSize => $res->{fileSize} + , narHash => $res->{narHash} + , narSize => $res->{narSize} + , refs => [ split " ", $res->{refs} ] + , deriver => $res->{deriver} + , signedBy => $res->{signedBy} + } if defined $res; +} + + +sub negativeHit { + my ($storePath, $cache) = @_; + $queryNARExistence->execute($cache->{id}, basename($storePath)); + my $res = $queryNARExistence->fetchrow_hashref(); + return defined $res && $res->{exist} == 0 && time() - $res->{timestamp} < $ttlNegativeUse; +} + + +sub positiveHit { + my ($storePath, $cache) = @_; + return 1 if defined getCachedInfoFrom($storePath, $cache); + $queryNARExistence->execute($cache->{id}, basename($storePath)); + my $res = $queryNARExistence->fetchrow_hashref(); + return defined $res && $res->{exist} == 1; +} + + +sub expireNegative { + return if $didExpiration; + $didExpiration = 1; + my $time = time(); + # Round up to the next multiple of the TTL to ensure that we do + # expiration only once per time interval. E.g. if $ttlNegative == + # 3600, we expire entries at most once per hour. This is + # presumably faster than expiring a few entries per request (and + # thus doing a transaction). + my $limit = (int($time / $ttlNegative) - 1) * $ttlNegative; + $expireNARExistence->execute($limit, 0); + print STDERR "expired ", $expireNARExistence->rows, " negative entries\n" if $debug; +} + + +sub printInfo { + my ($storePath, $info) = @_; + print "$storePath\n"; + print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n"; + print scalar @{$info->{refs}}, "\n"; + print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}}; + print $info->{fileSize} || 0, "\n"; + print $info->{narSize} || 0, "\n"; +} + + +sub infoUrl { + my ($binaryCacheUrl, $storePath) = @_; + my $pathHash = substr(basename($storePath), 0, 32); + my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo"; +} + + +sub printInfoParallel { + my @paths = @_; + + # First print all paths for which we have cached info. + my @left; + foreach my $storePath (@paths) { + my $found = 0; + foreach my $cache (@caches) { + my $info = getCachedInfoFrom($storePath, $cache); + if (defined $info) { + printInfo($storePath, $info); + $found = 1; + last; + } + } + push @left, $storePath if !$found; + } + + return if scalar @left == 0; + + foreach my $cache (@caches) { + + my @left2; + %requests = (); + foreach my $storePath (@left) { + if (negativeHit($storePath, $cache)) { + push @left2, $storePath; + next; + } + addRequest($storePath, infoUrl($cache->{url}, $storePath)); + } + + processRequests; + + foreach my $request (values %requests) { + my $info = processNARInfo($request->{storePath}, $cache, $request); + if (defined $info) { + printInfo($request->{storePath}, $info); + } else { + push @left2, $request->{storePath}; + } + } + + @left = @left2; + } +} + + +sub printSubstitutablePaths { + my @paths = @_; + + # First look for paths that have cached info. + my @left; + foreach my $storePath (@paths) { + my $found = 0; + foreach my $cache (@caches) { + next unless $cache->{wantMassQuery}; + if (positiveHit($storePath, $cache)) { + print "$storePath\n"; + $found = 1; + last; + } + } + push @left, $storePath if !$found; + } + + return if scalar @left == 0; + + # For remaining paths, do HEAD requests. + foreach my $cache (@caches) { + next unless $cache->{wantMassQuery}; + my @left2; + %requests = (); + foreach my $storePath (@left) { + if (negativeHit($storePath, $cache)) { + push @left2, $storePath; + next; + } + addRequest($storePath, infoUrl($cache->{url}, $storePath), 1); + } + + processRequests; + + foreach my $request (values %requests) { + if ($request->{result} != 0) { + if ($request->{result} != 37 && $request->{httpStatus} != 404 && $request->{httpStatus} != 403) { + print STDERR "could not check ‘$request->{url}’ (" . + ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n"; + } else { + $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 0, time()) + if shouldCache $request->{url}; + } + push @left2, $request->{storePath}; + } else { + $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 1, time()) + if shouldCache $request->{url}; + print "$request->{storePath}\n"; + } + } + + @left = @left2; + } +} + + +sub downloadBinary { + my ($storePath, $destPath) = @_; + + foreach my $cache (@caches) { + my $info = getCachedInfoFrom($storePath, $cache); + + unless (defined $info) { + next if negativeHit($storePath, $cache); + my $request = addRequest($storePath, infoUrl($cache->{url}, $storePath)); + processRequests; + $info = processNARInfo($storePath, $cache, $request); + } + + next unless defined $info; + + my $decompressor; + if ($info->{compression} eq "bzip2") { $decompressor = "| $Nix::Config::bzip2 -d"; } + elsif ($info->{compression} eq "xz") { $decompressor = "| $Nix::Config::xz -d"; } + elsif ($info->{compression} eq "none") { $decompressor = ""; } + else { + print STDERR "unknown compression method ‘$info->{compression}’\n"; + next; + } + my $url = "$cache->{url}/$info->{url}"; # FIXME: handle non-relative URLs + die if $requireSignedBinaryCaches && !defined $info->{signedBy}; + print STDERR "\n*** Downloading ‘$url’ ", ($requireSignedBinaryCaches ? "(signed by ‘$info->{signedBy}’) " : ""), "to ‘$storePath’...\n"; + checkURL $url; + if (system("$Nix::Config::curl --fail --location --insecure '$url' $decompressor | $Nix::Config::binDir/nix-store --restore $destPath") != 0) { + warn "download of `$url' failed" . ($! ? ": $!" : "") . "\n"; + next; + } + + # Tell Nix about the expected hash so it can verify it. + die unless defined $info->{narHash} && $info->{narHash} ne ""; + print "$info->{narHash}\n"; + + print STDERR "\n"; + return; + } + + print STDERR "could not download ‘$storePath’ from any binary cache\n"; + exit 1; +} + + +# Bail out right away if binary caches are disabled. +exit 0 if + ($Nix::Config::config{"use-binary-caches"} // "true") eq "false" || + ($Nix::Config::config{"untrusted-use-binary-caches"} // "true") eq "false"; +print "\n"; +flush STDOUT; + +initCache(); + + +if ($ARGV[0] eq "--query") { + + while (<STDIN>) { + getAvailableCaches; + chomp; + my ($cmd, @args) = split " ", $_; + + if ($cmd eq "have") { + print STDERR "checking binary caches for existence of @args\n" if $debug; + printSubstitutablePaths(@args); + print "\n"; + } + + elsif ($cmd eq "info") { + print STDERR "checking binary caches for info on @args\n" if $debug; + printInfoParallel(@args); + print "\n"; + } + + else { die "unknown command `$cmd'"; } + + flush STDOUT; + } + +} + +elsif ($ARGV[0] eq "--substitute") { + my $storePath = $ARGV[1] or die; + my $destPath = $ARGV[2] or die; + getAvailableCaches; + downloadBinary($storePath, $destPath); +} + +else { + die; +} diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in new file mode 100755 index 000000000000..b670163038e1 --- /dev/null +++ b/scripts/download-using-manifests.pl.in @@ -0,0 +1,377 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use Nix::Config; +use Nix::Manifest; +use Nix::Store; +use Nix::Utils; +use POSIX qw(strftime); +use File::Temp qw(tempdir); + +STDOUT->autoflush(1); + +my $logFile = "$Nix::Config::logDir/downloads"; + +# For queries, skip expensive calls to nix-hash etc. We're just +# estimating the expected download size. +my $fast = 1; + +# ‘--insecure’ is fine because Nix verifies the hash of the result. +my $curl = "$Nix::Config::curl --fail --location --insecure"; + + +# Open the manifest cache and update it if necessary. +my $dbh = updateManifestDB(); +exit 0 unless defined $dbh; # exit if there are no manifests +print "\n"; + + +# $hashCache->{$algo}->{$path} yields the $algo-hash of $path. +my $hashCache; + + +sub parseHash { + my $hash = shift; + if ($hash =~ /^(.+):(.+)$/) { + return ($1, $2); + } else { + return ("md5", $hash); + } +} + + +# Compute the most efficient sequence of downloads to produce the +# given path. +sub computeSmallestDownload { + my $targetPath = shift; + + # Build a graph of all store paths that might contribute to the + # construction of $targetPath, and the special node "start". The + # edges are either patch operations, or downloads of full NAR + # files. The latter edges only occur between "start" and a store + # path. + my %graph; + + $graph{"start"} = {d => 0, pred => undef, edges => []}; + + my @queue = (); + my $queueFront = 0; + my %done; + + sub addNode { + my $graph = shift; + my $u = shift; + $$graph{$u} = {d => 999999999999, pred => undef, edges => []} + unless defined $$graph{$u}; + } + + sub addEdge { + my $graph = shift; + my $u = shift; + my $v = shift; + my $w = shift; + my $type = shift; + my $info = shift; + addNode $graph, $u; + push @{$$graph{$u}->{edges}}, + {weight => $w, start => $u, end => $v, type => $type, info => $info}; + my $n = scalar @{$$graph{$u}->{edges}}; + } + + push @queue, $targetPath; + + while ($queueFront < scalar @queue) { + my $u = $queue[$queueFront++]; + next if defined $done{$u}; + $done{$u} = 1; + + addNode \%graph, $u; + + # If the path already exists, it has distance 0 from the + # "start" node. + if (isValidPath($u)) { + addEdge \%graph, "start", $u, 0, "present", undef; + } + + else { + + # Add patch edges. + my $patchList = $dbh->selectall_arrayref( + "select * from Patches where storePath = ?", + { Slice => {} }, $u); + + foreach my $patch (@{$patchList}) { + if (isValidPath($patch->{basePath})) { + my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash}; + + my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}}; + if (!defined $hash) { + $hash = $fast && $baseHashAlgo eq "sha256" + ? queryPathHash($patch->{basePath}) + : hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath}); + $hash =~ s/.*://; + $hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash; + } + + next if $hash ne $baseHash; + } + push @queue, $patch->{basePath}; + addEdge \%graph, $patch->{basePath}, $u, $patch->{size}, "patch", $patch; + } + + # Add NAR file edges to the start node. + my $narFileList = $dbh->selectall_arrayref( + "select * from NARs where storePath = ?", + { Slice => {} }, $u); + + foreach my $narFile (@{$narFileList}) { + # !!! how to handle files whose size is not known in advance? + # For now, assume some arbitrary size (1 GB). + # This has the side-effect of preferring non-Hydra downloads. + addEdge \%graph, "start", $u, ($narFile->{size} || 1000000000), "narfile", $narFile; + } + } + } + + + # Run Dijkstra's shortest path algorithm to determine the shortest + # sequence of download and/or patch actions that will produce + # $targetPath. + + my @todo = keys %graph; + + while (scalar @todo > 0) { + + # Remove the closest element from the todo list. + # !!! inefficient, use a priority queue + @todo = sort { -($graph{$a}->{d} <=> $graph{$b}->{d}) } @todo; + my $u = pop @todo; + + my $u_ = $graph{$u}; + + foreach my $edge (@{$u_->{edges}}) { + my $v_ = $graph{$edge->{end}}; + if ($v_->{d} > $u_->{d} + $edge->{weight}) { + $v_->{d} = $u_->{d} + $edge->{weight}; + # Store the edge; to edge->start is actually the + # predecessor. + $v_->{pred} = $edge; + } + } + } + + + # Retrieve the shortest path from "start" to $targetPath. + my @path = (); + my $cur = $targetPath; + return () unless defined $graph{$targetPath}->{pred}; + while ($cur ne "start") { + push @path, $graph{$cur}->{pred}; + $cur = $graph{$cur}->{pred}->{start}; + } + + return @path; +} + + +# Parse the arguments. + +if ($ARGV[0] eq "--query") { + + while (<STDIN>) { + chomp; + my ($cmd, @args) = split " ", $_; + + if ($cmd eq "have") { + foreach my $storePath (@args) { + print "$storePath\n" if scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0; + } + print "\n"; + } + + elsif ($cmd eq "info") { + foreach my $storePath (@args) { + + my $infos = $dbh->selectall_arrayref( + "select * from NARs where storePath = ?", + { Slice => {} }, $storePath); + + next unless scalar @{$infos} > 0; + my $info = @{$infos}[0]; + + print "$storePath\n"; + print "$info->{deriver}\n"; + my @references = split " ", $info->{refs}; + print scalar @references, "\n"; + print "$_\n" foreach @references; + + my @path = computeSmallestDownload $storePath; + + my $downloadSize = 0; + while (scalar @path > 0) { + my $edge = pop @path; + my $u = $edge->{start}; + my $v = $edge->{end}; + if ($edge->{type} eq "patch") { + $downloadSize += $edge->{info}->{size} || 0; + } + elsif ($edge->{type} eq "narfile") { + $downloadSize += $edge->{info}->{size} || 0; + } + } + + print "$downloadSize\n"; + + my $narSize = $info->{narSize} || 0; + print "$narSize\n"; + } + + print "\n"; + } + + else { die "unknown command `$cmd'"; } + } + + exit 0; +} + +elsif ($ARGV[0] ne "--substitute") { + die; +} + + +die unless scalar @ARGV == 3; +my $targetPath = $ARGV[1]; +my $destPath = $ARGV[2]; +$fast = 0; + + +# Create a temporary directory. +my $tmpDir = tempdir("nix-download.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +my $tmpNar = "$tmpDir/nar"; +my $tmpNar2 = "$tmpDir/nar2"; + + +open LOGFILE, ">>$logFile" or die "cannot open log file $logFile"; + +my $date = strftime ("%F %H:%M:%S UTC", gmtime (time)); +print LOGFILE "$$ get $targetPath $date\n"; + +print STDERR "\n*** Trying to download/patch `$targetPath'\n"; + + +# Compute the shortest path. +my @path = computeSmallestDownload $targetPath; +die "don't know how to produce $targetPath\n" if scalar @path == 0; + + +# We don't need the manifest anymore, so close it as an optimisation: +# if we still have SQLite locks blocking other processes (we +# shouldn't), this gets rid of them. +$dbh->disconnect; + + +# Traverse the shortest path, perform the actions described by the +# edges. +my $curStep = 1; +my $maxStep = scalar @path; + +my $finalNarHash; + +while (scalar @path > 0) { + my $edge = pop @path; + my $u = $edge->{start}; + my $v = $edge->{end}; + + print STDERR "\n*** Step $curStep/$maxStep: "; + + if ($edge->{type} eq "present") { + print STDERR "using already present path `$v'\n"; + print LOGFILE "$$ present $v\n"; + + if ($curStep < $maxStep) { + # Since this is not the last step, the path will be used + # as a base to one or more patches. So turn the base path + # into a NAR archive, to which we can apply the patch. + print STDERR " packing base path...\n"; + system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0 + or die "cannot dump `$v'"; + } + } + + elsif ($edge->{type} eq "patch") { + my $patch = $edge->{info}; + print STDERR "applying patch `$patch->{url}' to `$u' to create `$v'\n"; + + print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n"; + + # Download the patch. + print STDERR " downloading patch...\n"; + my $patchPath = "$tmpDir/patch"; + checkURL $patch->{url}; + system("$curl '$patch->{url}' -o $patchPath") == 0 + or die "cannot download patch `$patch->{url}'\n"; + + # Apply the patch to the NAR archive produced in step 1 (for + # the already present path) or a later step (for patch sequences). + print STDERR " applying patch...\n"; + system("$Nix::Config::libexecDir/nix/bspatch $tmpNar $tmpNar2 $patchPath") == 0 + or die "cannot apply patch `$patchPath' to $tmpNar\n"; + + if ($curStep < $maxStep) { + # The archive will be used as the base of the next patch. + rename "$tmpNar2", "$tmpNar" or die "cannot rename NAR archive: $!"; + } else { + # This was the last patch. Unpack the final NAR archive + # into the target path. + print STDERR " unpacking patched archive...\n"; + system("$Nix::Config::binDir/nix-store --restore $destPath < $tmpNar2") == 0 + or die "cannot unpack $tmpNar2 to `$v'\n"; + } + + $finalNarHash = $patch->{narHash}; + } + + elsif ($edge->{type} eq "narfile") { + my $narFile = $edge->{info}; + print STDERR "downloading `$narFile->{url}' to `$v'\n"; + + my $size = $narFile->{size} || -1; + print LOGFILE "$$ narfile $narFile->{url} $size $v\n"; + + checkURL $narFile->{url}; + + my $decompressor = + $narFile->{compressionType} eq "bzip2" ? "| $Nix::Config::bzip2 -d" : + $narFile->{compressionType} eq "xz" ? "| $Nix::Config::xz -d" : + $narFile->{compressionType} eq "none" ? "" : + die "unknown compression type `$narFile->{compressionType}'"; + + if ($curStep < $maxStep) { + # The archive will be used a base to a patch. + system("$curl '$narFile->{url}' $decompressor > $tmpNar") == 0 + or die "cannot download and unpack `$narFile->{url}' to `$v'\n"; + } else { + # Unpack the archive to the target path. + system("$curl '$narFile->{url}' $decompressor | $Nix::Config::binDir/nix-store --restore '$destPath'") == 0 + or die "cannot download and unpack `$narFile->{url}' to `$v'\n"; + } + + $finalNarHash = $narFile->{narHash}; + } + + $curStep++; +} + + +# Tell Nix about the expected hash so it can verify it. +die "cannot check integrity of the downloaded path since its hash is not known\n" + unless defined $finalNarHash; +print "$finalNarHash\n"; + + +print STDERR "\n"; +print LOGFILE "$$ success\n"; +close LOGFILE; diff --git a/scripts/find-runtime-roots.pl.in b/scripts/find-runtime-roots.pl.in new file mode 100755 index 000000000000..e1a2dde556b6 --- /dev/null +++ b/scripts/find-runtime-roots.pl.in @@ -0,0 +1,79 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use Nix::Utils; +use Nix::Config; + + +sub readProc { + return unless -d "/proc"; + + opendir DIR, "/proc" or return; + + foreach my $name (readdir DIR) { + next unless $name =~ /^\d+$/; + + my $process = "/proc/$name"; + + #print STDERR "=== $process\n"; + + my $target; + print "$target\n" if $target = readlink "$process/exe"; + print "$target\n" if $target = readlink "$process/cwd"; + + if (opendir FDS, "$process/fd") { + foreach my $name (readdir FDS) { + $target = readlink "$process/fd/$name"; + print "$target\n" if $target && substr($target, 0, 1) eq "/"; + } + closedir FDS; + } + + if (open MAP, "<$process/maps") { + while (<MAP>) { + next unless /^ \s* \S+ \s+ \S+ \s+ \S+ \s+ \S+ \s+ \S+ \s+ (\/\S+) \s* $/x; + print "$1\n"; + } + close MAP; + } + + # Get all store paths that appear in the environment of this process. + eval { + my $env = Nix::Utils::readFile "$process/environ"; + my @matches = $env =~ /\Q$Nix::Config::storeDir\E\/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*/g; + print "$_\n" foreach @matches; + } + } + + closedir DIR; +} + + +sub lsof { + return unless open LSOF, "lsof -n -w -F n 2> /dev/null |"; + + while (<LSOF>) { + next unless /^n (\/ .*)$/x; + print $1, "\n"; + } + + close LSOF; +} + + +readProc; +lsof; + + +sub printFile { + my ($fn) = @_; + if (-e $fn) { + print Nix::Utils::readFile($fn), "\n"; + } +} + + +# This is rather NixOS-specific, so it probably shouldn't be here. +printFile "/proc/sys/kernel/modprobe"; +printFile "/proc/sys/kernel/fbsplash"; +printFile "/proc/sys/kernel/poweroff_cmd"; diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh new file mode 100644 index 000000000000..41e596e5fffd --- /dev/null +++ b/scripts/install-nix-from-closure.sh @@ -0,0 +1,34 @@ +#! /bin/sh -e + +nix=@nix@ +regInfo=@regInfo@ + +if ! $nix/bin/nix-store --load-db < $regInfo; then + echo "$0: unable to register valid paths" + exit 1 +fi + +. @nix@/etc/profile.d/nix.sh + +if ! $nix/bin/nix-env -i @nix@; then + echo "$0: unable to install Nix into your default profile" + exit 1 +fi + +# Add nix.sh to the shell's profile.d directory. +p=$NIX_LINK/etc/profile.d/nix.sh + +if [ -w /etc/profile.d ]; then + ln -s $p /etc/profile.d/ +elif [ -w /usr/local/etc/profile.d ]; then + ln -s $p /usr/local/etc/profile.d/ +else + cat <<EOF +Installation finished. To ensure that the necessary environment +variables are set, please add the line + + source $p + +to your shell profile (e.g. ~/.profile). +EOF +fi \ No newline at end of file diff --git a/scripts/local.mk b/scripts/local.mk new file mode 100644 index 000000000000..f4c5e8097de4 --- /dev/null +++ b/scripts/local.mk @@ -0,0 +1,37 @@ +nix_bin_scripts := \ + $(d)/nix-build \ + $(d)/nix-channel \ + $(d)/nix-collect-garbage \ + $(d)/nix-copy-closure \ + $(d)/nix-generate-patches \ + $(d)/nix-install-package \ + $(d)/nix-prefetch-url \ + $(d)/nix-pull \ + $(d)/nix-push + +bin-scripts += $(nix_bin_scripts) + +nix_substituters := \ + $(d)/copy-from-other-stores.pl \ + $(d)/download-from-binary-cache.pl \ + $(d)/download-using-manifests.pl + +nix_noinst_scripts := \ + $(d)/build-remote.pl \ + $(d)/find-runtime-roots.pl \ + $(d)/nix-http-export.cgi \ + $(d)/nix-profile.sh \ + $(d)/nix-reduce-build \ + $(nix_substituters) + +noinst-scripts += $(nix_noinst_scripts) + +profiledir = $(sysconfdir)/profile.d + +$(eval $(call install-file-as, $(d)/nix-profile.sh, $(profiledir)/nix.sh, 0644)) +$(eval $(call install-program-in, $(d)/find-runtime-roots.pl, $(libexecdir)/nix)) +$(eval $(call install-program-in, $(d)/build-remote.pl, $(libexecdir)/nix)) +$(foreach prog, $(nix_substituters), $(eval $(call install-program-in, $(prog), $(libexecdir)/nix/substituters))) +$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell)) + +clean-files += $(nix_bin_scripts) $(nix_noinst_scripts) diff --git a/scripts/nix-build.in b/scripts/nix-build.in new file mode 100755 index 000000000000..168730bbc92d --- /dev/null +++ b/scripts/nix-build.in @@ -0,0 +1,261 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use Nix::Config; +use Nix::Store; +use Nix::Utils; +use File::Temp qw(tempdir); + + +my $dryRun = 0; +my $verbose = 0; +my $runEnv = $0 =~ /nix-shell$/; +my $pure = 0; + +my @instArgs = (); +my @buildArgs = (); +my @exprs = (); + +my $shell = $ENV{SHELL} || "/bin/sh"; +my $envCommand = ""; # interactive shell +my @envExclude = (); + +my $myName = $runEnv ? "nix-shell" : "nix-build"; + + +my $tmpDir = tempdir("$myName.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +my $outLink = "./result"; +my $drvLink = "$tmpDir/derivation"; + +# Ensure that the $tmpDir is deleted. +$SIG{'INT'} = sub { exit 1 }; + + +for (my $n = 0; $n < scalar @ARGV; $n++) { + my $arg = $ARGV[$n]; + + if ($arg eq "--help") { + exec "man $myName" or die; + } + + elsif ($arg eq "--version") { + print "$myName (Nix) $Nix::Config::version\n"; + exit 0; + } + + elsif ($arg eq "--add-drv-link") { + $drvLink = "./derivation"; + } + + elsif ($arg eq "--no-out-link" or $arg eq "--no-link") { + $outLink = "$tmpDir/result"; + } + + elsif ($arg eq "--drv-link") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $drvLink = $ARGV[$n]; + } + + elsif ($arg eq "--out-link" or $arg eq "-o") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $outLink = $ARGV[$n]; + } + + elsif ($arg eq "--attr" or $arg eq "-A" or $arg eq "-I") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + push @instArgs, ($arg, $ARGV[$n]); + } + + elsif ($arg eq "--arg" || $arg eq "--argstr") { + die "$0: `$arg' requires two arguments\n" unless $n + 2 < scalar @ARGV; + push @instArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]); + $n += 2; + } + + elsif ($arg eq "--log-type") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + push @instArgs, ($arg, $ARGV[$n]); + push @buildArgs, ($arg, $ARGV[$n]); + } + + elsif ($arg eq "--option") { + die "$0: `$arg' requires two arguments\n" unless $n + 2 < scalar @ARGV; + push @instArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]); + push @buildArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]); + $n += 2; + } + + elsif ($arg eq "--max-jobs" or $arg eq "-j" or $arg eq "--max-silent-time" or $arg eq "--log-type" or $arg eq "--cores" or $arg eq "--timeout") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + push @buildArgs, ($arg, $ARGV[$n]); + } + + elsif ($arg eq "--dry-run") { + push @buildArgs, "--dry-run"; + $dryRun = 1; + } + + elsif ($arg eq "--show-trace") { + push @instArgs, $arg; + } + + elsif ($arg eq "-") { + @exprs = ("-"); + } + + elsif ($arg eq "--verbose" or substr($arg, 0, 2) eq "-v") { + push @buildArgs, $arg; + push @instArgs, $arg; + $verbose = 1; + } + + elsif ($arg eq "--quiet" || $arg eq "--repair") { + push @buildArgs, $arg; + push @instArgs, $arg; + } + + elsif ($arg eq "--run-env") { # obsolete + $runEnv = 1; + } + + elsif ($arg eq "--command") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $envCommand = "$ARGV[$n]\nexit $!"; + } + + elsif ($arg eq "--exclude") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + push @envExclude, $ARGV[$n]; + } + + elsif ($arg eq "--pure") { $pure = 1; } + elsif ($arg eq "--impure") { $pure = 0; } + + elsif (substr($arg, 0, 1) eq "-") { + push @buildArgs, $arg; + } + + else { + push @exprs, $arg; + } +} + +@exprs = ("./default.nix") if scalar @exprs == 0; + +$ENV{'IN_NIX_SHELL'} = 1 if $runEnv; + + +foreach my $expr (@exprs) { + + # Instantiate. + my @drvPaths; + if ($expr !~ /^\/.*\.drv$/) { + # !!! would prefer the perl 5.8.0 pipe open feature here. + my $pid = open(DRVPATHS, "-|") || exec "$Nix::Config::binDir/nix-instantiate", "--add-root", $drvLink, "--indirect", @instArgs, $expr; + while (<DRVPATHS>) {chomp; push @drvPaths, $_;} + if (!close DRVPATHS) { + die "nix-instantiate killed by signal " . ($? & 127) . "\n" if ($? & 127); + exit 1; + } + } else { + push @drvPaths, $expr; + } + + if ($runEnv) { + die "$0: a single derivation is required\n" if scalar @drvPaths != 1; + my $drvPath = $drvPaths[0]; + $drvPath = readlink $drvPath or die "cannot read symlink `$drvPath'" if -l $drvPath; + my $drv = derivationFromPath($drvPath); + + # Build or fetch all dependencies of the derivation. + my @inputDrvs = grep { my $x = $_; (grep { $x =~ $_ } @envExclude) == 0 } @{$drv->{inputDrvs}}; + system("$Nix::Config::binDir/nix-store", "-r", "--no-output", "--no-gc-warning", @buildArgs, @inputDrvs, @{$drv->{inputSrcs}}) == 0 + or die "$0: failed to build all dependencies\n"; + + # Set the environment. + if ($pure) { + foreach my $name (keys %ENV) { + next if grep { $_ eq $name } ("HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ"); + delete $ENV{$name}; + } + # NixOS hack: prevent /etc/bashrc from sourcing /etc/profile. + $ENV{'__ETC_PROFILE_SOURCED'} = 1; + } + $ENV{'NIX_BUILD_TOP'} = $ENV{'TMPDIR'} = $ENV{'TEMPDIR'} = $ENV{'TMP'} = $ENV{'TEMP'} = $ENV{'TMPDIR'} // "/tmp"; + $ENV{'NIX_STORE'} = $Nix::Config::storeDir; + $ENV{$_} = $drv->{env}->{$_} foreach keys %{$drv->{env}}; + + # Run a shell using the derivation's environment. For + # convenience, source $stdenv/setup to setup additional + # environment variables and shell functions. Also don't lose + # the current $PATH directories. + my $rcfile = "$tmpDir/rc"; + writeFile( + $rcfile, + 'unset BASH_ENV; ' . + '[ -n "$PS1" ] && [ -e ~/.bashrc ] && source ~/.bashrc; ' . + ($pure ? '' : 'p=$PATH; ' ) . + 'dontAddDisableDepTrack=1; ' . + '[ -e $stdenv/setup ] && source $stdenv/setup; ' . + ($pure ? '' : 'PATH=$PATH:$p; unset p; ') . + 'set +e; ' . + '[ -n "$PS1" ] && PS1="\n\[\033[1;32m\][nix-shell:\w]$\[\033[0m\] "; ' . + 'unset NIX_ENFORCE_PURITY; ' . + 'unset NIX_INDENT_MAKE; ' . + 'shopt -u nullglob; ' . + 'unset TZ; ' . (defined $ENV{'TZ'} ? "export TZ='${ENV{'TZ'}}'; " : '') . + $envCommand); + $ENV{BASH_ENV} = $rcfile; + exec($ENV{NIX_BUILD_SHELL} // "bash", "--rcfile", $rcfile); + die; + } + + # Ugly hackery to make "nix-build -A foo.all" produce symlinks + # ./result, ./result-dev, and so on, rather than ./result, + # ./result-2-dev, and so on. This combines multiple derivation + # paths into one "/nix/store/drv-path!out1,out2,..." argument. + my $prevDrvPath = ""; + my @drvPaths2; + foreach my $drvPath (@drvPaths) { + my $p = $drvPath; my $output = "out"; + if ($drvPath =~ /(.*)!(.*)/) { + $p = $1; $output = $2; + } else { + $p = $drvPath; + } + my $target = readlink $p or die "cannot read symlink `$p'"; + print STDERR "derivation is $target\n" if $verbose; + if ($target eq $prevDrvPath) { + push @drvPaths2, (pop @drvPaths2) . "," . $output; + } else { + push @drvPaths2, $target . "!" . $output; + $prevDrvPath = $target; + } + } + + # Build. + my @outPaths; + my $pid = open(OUTPATHS, "-|") || exec "$Nix::Config::binDir/nix-store", "--add-root", $outLink, "--indirect", "-r", + @buildArgs, @drvPaths2; + while (<OUTPATHS>) {chomp; push @outPaths, $_;} + if (!close OUTPATHS) { + die "nix-store killed by signal " . ($? & 127) . "\n" if ($? & 127); + exit 1; + } + + next if $dryRun; + + foreach my $outPath (@outPaths) { + my $target = readlink $outPath or die "cannot read symlink `$outPath'"; + print "$target\n"; + } +} diff --git a/scripts/nix-channel.in b/scripts/nix-channel.in new file mode 100755 index 000000000000..4a480ae28b04 --- /dev/null +++ b/scripts/nix-channel.in @@ -0,0 +1,209 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Basename; +use File::Path qw(mkpath); +use Nix::Config; +use Nix::Manifest; + +Nix::Config::readConfig; + +my $manifestDir = $Nix::Config::manifestDir; + + +# Turn on caching in nix-prefetch-url. +my $channelCache = "$Nix::Config::stateDir/channel-cache"; +mkdir $channelCache, 0755 unless -e $channelCache; +$ENV{'NIX_DOWNLOAD_CACHE'} = $channelCache if -W $channelCache; + +# Figure out the name of the `.nix-channels' file to use. +my $home = $ENV{"HOME"} or die '$HOME not set\n'; +my $channelsList = "$home/.nix-channels"; +my $nixDefExpr = "$home/.nix-defexpr"; + +# Figure out the name of the channels profile. +my $userName = getpwuid($<) or die "cannot figure out user name"; +my $profile = "$Nix::Config::stateDir/profiles/per-user/$userName/channels"; +mkpath(dirname $profile, 0, 0755); + +my %channels; + + +# Reads the list of channels. +sub readChannels { + return if (!-f $channelsList); + open CHANNELS, "<$channelsList" or die "cannot open `$channelsList': $!"; + while (<CHANNELS>) { + chomp; + next if /^\s*\#/; + my ($url, $name) = split ' ', $_; + $url =~ s/\/*$//; # remove trailing slashes + $name = basename $url unless defined $name; + $channels{$name} = $url; + } + close CHANNELS; +} + + +# Writes the list of channels. +sub writeChannels { + open CHANNELS, ">$channelsList" or die "cannot open `$channelsList': $!"; + foreach my $name (keys %channels) { + print CHANNELS "$channels{$name} $name\n"; + } + close CHANNELS; +} + + +# Adds a channel. +sub addChannel { + my ($url, $name) = @_; + readChannels; + $channels{$name} = $url; + writeChannels; +} + + +# Remove a channel. +sub removeChannel { + my ($name) = @_; + readChannels; + my $url = $channels{$name}; + deleteOldManifests($url . "/MANIFEST", undef) if defined $url; + delete $channels{$name}; + writeChannels; + + system("$Nix::Config::binDir/nix-env --profile '$profile' -e '$name'") == 0 + or die "cannot remove channel `$name'\n"; +} + + +# Fetch Nix expressions and pull manifests from the subscribed +# channels. +sub update { + my @channelNames = @_; + + readChannels; + + # Download each channel. + my $exprs = ""; + foreach my $name (keys %channels) { + next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames}; + + my $url = $channels{$name}; + my $origUrl = "$url/MANIFEST"; + + # Check if $url is a redirect. If so, follow it now to ensure + # consistency if the redirection is changed between + # downloading the manifest and the tarball. + my $headers = `$Nix::Config::curl --silent --head '$url'`; + die "$0: unable to check `$url'\n" if $? != 0; + $headers =~ s/\r//g; + $url = $1 if $headers =~ /^Location:\s*(.*)\s*$/m; + + # Check if the channel advertises a binary cache. + my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`; + my $extraAttrs = ""; + my $getManifest = ($Nix::Config::config{"force-manifest"} // "false") eq "true"; + if ($? == 0 && $binaryCacheURL ne "") { + $extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; "; + deleteOldManifests($origUrl, undef); + } else { + $getManifest = 1; + } + + if ($getManifest) { + # No binary cache, so pull the channel manifest. + mkdir $manifestDir, 0755 unless -e $manifestDir; + die "$0: you do not have write permission to `$manifestDir'!\n" unless -W $manifestDir; + $ENV{'NIX_ORIG_URL'} = $origUrl; + system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0 + or die "cannot pull manifest from `$url'\n"; + } + + # Download the channel tarball. + my $fullURL = "$url/nixexprs.tar.xz"; + system("$Nix::Config::curl --fail --silent --head '$fullURL' > /dev/null") == 0 or + $fullURL = "$url/nixexprs.tar.bz2"; + print STDERR "downloading Nix expressions from `$fullURL'...\n"; + my ($hash, $path) = `PRINT_PATH=1 QUIET=1 $Nix::Config::binDir/nix-prefetch-url '$fullURL'`; + die "cannot fetch `$fullURL'\n" if $? != 0; + chomp $path; + + # If the URL contains a version number, append it to the name + # attribute (so that "nix-env -q" on the channels profile + # shows something useful). + my $cname = $name; + $cname .= $1 if basename($url) =~ /(-\d.*)$/; + + $exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; $extraAttrs }' "; + } + + # Unpack the channel tarballs into the Nix store and install them + # into the channels profile. + print STDERR "unpacking channels...\n"; + system("$Nix::Config::binDir/nix-env --profile '$profile' " . + "-f '<nix/unpack-channel.nix>' -i -E $exprs --quiet") == 0 + or die "cannot unpack the channels"; + + # Make the channels appear in nix-env. + unlink $nixDefExpr if -l $nixDefExpr; # old-skool ~/.nix-defexpr + mkdir $nixDefExpr or die "cannot create directory `$nixDefExpr'" if !-e $nixDefExpr; + my $channelLink = "$nixDefExpr/channels"; + unlink $channelLink; # !!! not atomic + symlink($profile, $channelLink) or die "cannot symlink `$channelLink' to `$profile'"; +} + + +die "$0: argument expected\n" if scalar @ARGV == 0; + + +while (scalar @ARGV) { + my $arg = shift @ARGV; + + if ($arg eq "--add") { + die "$0: `--add' requires one or two arguments\n" if scalar @ARGV < 1 || scalar @ARGV > 2; + my $url = shift @ARGV; + my $name = shift @ARGV; + unless (defined $name) { + $name = basename $url; + $name =~ s/-unstable//; + $name =~ s/-stable//; + } + addChannel($url, $name); + last; + } + + if ($arg eq "--remove") { + die "$0: `--remove' requires one argument\n" if scalar @ARGV != 1; + removeChannel(shift @ARGV); + last; + } + + if ($arg eq "--list") { + die "$0: `--list' requires one argument\n" if scalar @ARGV != 0; + readChannels; + foreach my $name (keys %channels) { + print "$name $channels{$name}\n"; + } + last; + } + + elsif ($arg eq "--update") { + update(@ARGV); + last; + } + + elsif ($arg eq "--help") { + exec "man nix-channel" or die; + } + + elsif ($arg eq "--version") { + print "nix-channel (Nix) $Nix::Config::version\n"; + exit 0; + } + + else { + die "unknown argument `$arg'; try `--help'\n"; + } +} diff --git a/scripts/nix-collect-garbage.in b/scripts/nix-collect-garbage.in new file mode 100755 index 000000000000..28b0c749f125 --- /dev/null +++ b/scripts/nix-collect-garbage.in @@ -0,0 +1,57 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use Nix::Config; + +my $profilesDir = "@localstatedir@/nix/profiles"; + + +# Process the command line arguments. +my @args = (); +my $removeOld = 0; +my $dryRun = 0; + +for my $arg (@ARGV) { + if ($arg eq "--delete-old" || $arg eq "-d") { + $removeOld = 1; + } elsif ($arg eq "--dry-run") { + $dryRun = 1; + } elsif ($arg eq "--help") { + exec "man nix-collect-garbage" or die; + } else { + push @args, $arg; + } +} + + +# If `-d' was specified, remove all old generations of all profiles. +# Of course, this makes rollbacks to before this point in time +# impossible. + +sub removeOldGenerations; +sub removeOldGenerations { + my $dir = shift; + + my $dh; + opendir $dh, $dir or die; + + foreach my $name (sort (readdir $dh)) { + next if $name eq "." || $name eq ".."; + $name = $dir . "/" . $name; + if (-l $name && (readlink($name) =~ /link/)) { + print STDERR "removing old generations of profile $name\n"; + system("$Nix::Config::binDir/nix-env", "-p", $name, "--delete-generations", "old", $dryRun ? "--dry-run" : ()); + } + elsif (! -l $name && -d $name) { + removeOldGenerations $name; + } + } + + closedir $dh or die; +} + +removeOldGenerations $profilesDir if $removeOld; + + +# Run the actual garbage collector. +exec "$Nix::Config::binDir/nix-store", "--gc", @args unless $dryRun; diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in new file mode 100755 index 000000000000..23d5619519a4 --- /dev/null +++ b/scripts/nix-copy-closure.in @@ -0,0 +1,131 @@ +#! @perl@ -w @perlFlags@ + +use Nix::SSH; +use Nix::Config; +use Nix::Store; +use Nix::CopyClosure; +use List::Util qw(sum); + + +if (scalar @ARGV < 1) { + print STDERR <<EOF +Usage: nix-copy-closure [--from | --to] HOSTNAME [--sign] [--gzip] [--bzip2] [--xz] PATHS... +EOF + ; + exit 1; +} + + +# Get the target host. +my $sshHost; +my $sign = 0; +my $compressor = ""; +my $decompressor = ""; +my $progressViewer = ""; +my $toMode = 1; +my $includeOutputs = 0; +my $dryRun = 0; +my $useSubstitutes = 0; + + +# !!! Copied from nix-pack-closure, should put this in a module. +my @storePaths = (); + +while (@ARGV) { + my $arg = shift @ARGV; + + if ($arg eq "--help") { + exec "man nix-copy-closure" or die; + } + elsif ($arg eq "--sign") { + $sign = 1; + } + elsif ($arg eq "--gzip") { + $compressor = "gzip"; + $decompressor = "gunzip"; + } + elsif ($arg eq "--bzip2") { + $compressor = "bzip2"; + $decompressor = "bunzip2"; + } + elsif ($arg eq "--xz") { + $compressor = "xz"; + $decompressor = "xz -d"; + } + elsif ($arg eq "--from") { + $toMode = 0; + } + elsif ($arg eq "--to") { + $toMode = 1; + } + elsif ($arg eq "--include-outputs") { + $includeOutputs = 1; + } + elsif ($arg eq "--show-progress") { + $progressViewer = "@pv@"; + } + elsif ($arg eq "--dry-run") { + $dryRun = 1; + } + elsif ($arg eq "--use-substitutes" || $arg eq "-s") { + $useSubstitutes = 1; + } + elsif (!defined $sshHost) { + $sshHost = $arg; + } + else { + push @storePaths, $arg; + } +} + +die "$0: you did not specify a host name\n" unless defined $sshHost; + + +openSSHConnection $sshHost or die "$0: unable to start SSH\n"; + + +if ($toMode) { # Copy TO the remote machine. + Nix::CopyClosure::copyTo( + $sshHost, [ @sshOpts ], [ @storePaths ], $compressor, $decompressor, + $includeOutputs, $dryRun, $sign, $progressViewer, $useSubstitutes); +} + +else { # Copy FROM the remote machine. + + # Query the closure of the given store paths on the remote + # machine. Paths are assumed to be store paths; there is no + # resolution (following of symlinks). + my $extraOpts = $includeOutputs ? "--include-outputs" : ""; + my $pid = open(READ, + "set -f; ssh @sshOpts $sshHost nix-store --query --requisites $extraOpts @storePaths|") or die; + + while (<READ>) { + chomp; + die "bad: $_" unless /^\//; + push @missing, $_ unless isValidPath($_); + } + + close READ or die "nix-store on remote machine `$sshHost' failed: $?"; + + my $missingSize = 0; + if ($progressViewer ne "") { + $missingSize = sum (split ' ', `set -f; ssh @sshOpts $sshHost nix-store -q --size @missing`) or die; + } + + # Export the store paths on the remote machine and import them locally. + if (scalar @missing > 0) { + print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n"; + unless ($dryRun) { + if ($useSubstitutes) { + system "$Nix::Config::binDir/nix-store -r --ignore-unknown @missing"; + } + $compressor = "| $compressor" if $compressor ne ""; + $decompressor = "$decompressor |" if $decompressor ne ""; + $progressViewer = "$progressViewer -s $missingSize |" if $progressViewer ne ""; + my $extraOpts = $sign ? "--sign" : ""; + system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor $progressViewer $Nix::Config::binDir/nix-store --import > /dev/null") == 0 + or die "copying store paths from remote machine `$sshHost' failed: $?"; + } + } + +} diff --git a/scripts/nix-generate-patches.in b/scripts/nix-generate-patches.in new file mode 100755 index 000000000000..969af916d8e6 --- /dev/null +++ b/scripts/nix-generate-patches.in @@ -0,0 +1,52 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Temp qw(tempdir); +use Nix::Manifest; +use Nix::GeneratePatches; + +if (scalar @ARGV != 5) { + print STDERR <<EOF; +Usage: nix-generate-patches NAR-DIR PATCH-DIR PATCH-URI OLD-MANIFEST NEW-MANIFEST + +This command generates binary patches between NAR files listed in +OLD-MANIFEST and NEW-MANIFEST. The patches are written to the +directory PATCH-DIR, and the prefix PATCH-URI is used to generate URIs +for the patches. The patches are added to NEW-MANIFEST. All NARs are +required to exist in NAR-DIR. Patches are generated between +succeeding versions of packages with the same name. +EOF + exit 1; +} + +my $narPath = $ARGV[0]; +my $patchesPath = $ARGV[1]; +my $patchesURL = $ARGV[2]; +my $srcManifest = $ARGV[3]; +my $dstManifest = $ARGV[4]; + +my (%srcNarFiles, %srcLocalPaths, %srcPatches); +readManifest $srcManifest, \%srcNarFiles, \%srcPatches; + +my (%dstNarFiles, %dstLocalPaths, %dstPatches); +readManifest $dstManifest, \%dstNarFiles, \%dstPatches; + +my $tmpDir = tempdir("nix-generate-patches.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +generatePatches \%srcNarFiles, \%dstNarFiles, \%srcPatches, \%dstPatches, + $narPath, $patchesPath, $patchesURL, $tmpDir; + +propagatePatches \%srcPatches, \%dstNarFiles, \%dstPatches; + +# Optionally add all new patches to the manifest in $NIX_ALL_PATCHES. +my $allPatchesFile = $ENV{"NIX_ALL_PATCHES"}; +if (defined $allPatchesFile) { + my (%dummy, %allPatches); + readManifest("$patchesPath/all-patches", \%dummy, \%allPatches) + if -f $allPatchesFile; + copyPatches \%dstPatches, \%allPatches; + writeManifest($allPatchesFile, {}, \%allPatches, 0); +} + +writeManifest $dstManifest, \%dstNarFiles, \%dstPatches; diff --git a/scripts/nix-http-export.cgi.in b/scripts/nix-http-export.cgi.in new file mode 100755 index 000000000000..19a505af1c50 --- /dev/null +++ b/scripts/nix-http-export.cgi.in @@ -0,0 +1,51 @@ +#! /bin/sh + +export HOME=/tmp +export NIX_REMOTE=daemon + +TMP_DIR="${TMP_DIR:-/tmp/nix-export}" + +@coreutils@/mkdir -p "$TMP_DIR" || true +@coreutils@/chmod a+r "$TMP_DIR" + +needed_path="?$QUERY_STRING" +needed_path="${needed_path#*[?&]needed_path=}" +needed_path="${needed_path%%&*}" +#needed_path="$(echo $needed_path | ./unhttp)" +needed_path="${needed_path//%2B/+}" +needed_path="${needed_path//%3D/=}" + +echo needed_path: "$needed_path" >&2 + +NIX_STORE="${NIX_STORE_DIR:-/nix/store}" + +echo NIX_STORE: "${NIX_STORE}" >&2 + +full_path="${NIX_STORE}"/"$needed_path" + +if [ "$needed_path" != "${needed_path%.drv}" ]; then + echo "Status: 403 You should create the derivation file yourself" + echo "Content-Type: text/plain" + echo + echo "Refusing to disclose derivation contents" + exit +fi + +if @bindir@/nix-store --check-validity "$full_path"; then + if ! [ -e nix-export/"$needed_path".nar.gz ]; then + @bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz + @coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz + fi; + echo "Status: 301 Moved" + echo "Location: nix-export/"$needed_path".nar.gz" + echo +else + echo "Status: 404 No such path found" + echo "Content-Type: text/plain" + echo + echo "Path not found:" + echo "$needed_path" + echo "checked:" + echo "$full_path" +fi + diff --git a/scripts/nix-install-package.in b/scripts/nix-install-package.in new file mode 100755 index 000000000000..e45337bcc9e5 --- /dev/null +++ b/scripts/nix-install-package.in @@ -0,0 +1,138 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Temp qw(tempdir); +use Nix::Config; +use Nix::Utils; + + +# Parse the command line arguments. +my @args = @ARGV; + +my $source; +my $fromURL = 0; +my @extraNixEnvArgs = (); +my $interactive = 1; + +while (scalar @args) { + my $arg = shift @args; + if ($arg eq "--help") { + exec "man nix-install-package" or die; + } + elsif ($arg eq "--url") { + $fromURL = 1; + } + elsif ($arg eq "--profile" || $arg eq "-p") { + my $profile = shift @args; + die "$0: `--profile' requires an argument\n" if !defined $profile; + push @extraNixEnvArgs, "-p", $profile; + } + elsif ($arg eq "--non-interactive") { + $interactive = 0; + } + else { + $source = $arg; + } +} + +die "$0: please specify a .nixpkg file or URL\n" unless defined $source; + + +# Re-execute in a terminal, if necessary, so that if we're executed +# from a web browser, the user gets to see us. +if ($interactive && !defined $ENV{"NIX_HAVE_TERMINAL"}) { + $ENV{"NIX_HAVE_TERMINAL"} = "1"; + $ENV{"LD_LIBRARY_PATH"} = ""; + foreach my $term ("xterm", "konsole", "gnome-terminal", "xterm") { + exec($term, "-e", "$Nix::Config::binDir/nix-install-package", @ARGV); + } + die "cannot execute `xterm'"; +} + + +my $tmpDir = tempdir("nix-install-package.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + + +sub barf { + my $msg = shift; + print "\nInstallation failed: $msg\n"; + <STDIN> if $interactive; + exit 1; +} + + +# Download the package description, if necessary. +my $pkgFile = $source; +if ($fromURL) { + $pkgFile = "$tmpDir/tmp.nixpkg"; + system("@curl@", "--silent", $source, "-o", $pkgFile) == 0 + or barf "curl failed: $?"; +} + + +# Read and parse the package file. +open PKGFILE, "<$pkgFile" or barf "cannot open `$pkgFile': $!"; +my $contents = <PKGFILE>; +close PKGFILE; + +my $nameRE = "(?: [A-Za-z0-9\+\-\.\_\?\=]+ )"; # see checkStoreName() +my $systemRE = "(?: [A-Za-z0-9\+\-\_]+ )"; +my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )"; + +# Note: $pathRE doesn't check that whether we're looking at a valid +# store path. We'll let nix-env do that. + +$contents =~ + / ^ \s* (\S+) \s+ ($Nix::Utils::urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )? /x + or barf "invalid package contents"; +my $version = $1; +my $manifestURL = $2; +my $drvName = $3; +my $system = $4; +my $drvPath = $5; +my $outPath = $6; +my $binaryCacheURL = $8; + +barf "invalid package version `$version'" unless $version eq "NIXPKG1"; + + +if ($interactive) { + # Ask confirmation. + print "Do you want to install `$drvName' (Y/N)? "; + my $reply = <STDIN>; + chomp $reply; + exit if $reply ne "y" && $reply ne "Y"; +} + + +if (defined $binaryCacheURL) { + + push @extraNixEnvArgs, "--option", "binary-caches", $binaryCacheURL; + +} else { + + # Store the manifest in the temporary directory so that we don't + # pollute /nix/var/nix/manifests. This also requires that we + # don't use the Nix daemon (because otherwise + # download-using-manifests won't see our NIX_MANIFESTS_DIRS + # environment variable). + $ENV{NIX_MANIFESTS_DIR} = $tmpDir; + $ENV{NIX_REMOTE} = ""; + + print "\nPulling manifests...\n"; + system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0 + or barf "nix-pull failed: $?"; + +} + + +print "\nInstalling package...\n"; +system("$Nix::Config::binDir/nix-env", "--install", $outPath, "--force-name", $drvName, @extraNixEnvArgs) == 0 + or barf "nix-env failed: $?"; + + +if ($interactive) { + print "\nInstallation succeeded! Press Enter to continue.\n"; + <STDIN>; +} diff --git a/scripts/nix-prefetch-url.in b/scripts/nix-prefetch-url.in new file mode 100755 index 000000000000..bcd9197bcff1 --- /dev/null +++ b/scripts/nix-prefetch-url.in @@ -0,0 +1,130 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Basename; +use File::Temp qw(tempdir); +use File::stat; +use Nix::Store; +use Nix::Config; +use Nix::Utils; + +my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256"; # obsolete +my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'}; + +my @args; +my $arg; +while ($arg = shift) { + if ($arg eq "--help") { + exec "man nix-prefetch-url" or die; + } elsif ($arg eq "--type") { + $hashType = shift; + die "$0: `$arg' requires an argument\n" unless defined $hashType; + } elsif (substr($arg, 0, 1) eq "-") { + die "$0: unknown flag `$arg'\n"; + } else { + push @args, $arg; + } +} + +my $url = $args[0]; +my $expHash = $args[1]; + + +if (!defined $url || $url eq "") { + print STDERR <<EOF +Usage: nix-prefetch-url URL [EXPECTED-HASH] +EOF + ; + exit 1; +} + +my $tmpDir = tempdir("nix-prefetch-url.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +# Hack to support the mirror:// scheme from Nixpkgs. +if ($url =~ /^mirror:\/\//) { + system("$Nix::Config::binDir/nix-build '<nixpkgs>' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0 + or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n"; + my @expanded = split ' ', readFile("$tmpDir/urls"); + die "$0: cannot resolve ‘$url’" unless scalar @expanded > 0; + print STDERR "$url expands to $expanded[0]\n"; + $url = $expanded[0]; +} + +# Handle escaped characters in the URI. `+', `=' and `?' are the only +# characters that are valid in Nix store path names but have a special +# meaning in URIs. +my $name = basename $url; +die "cannot figure out file name for ‘$url’\n" if $name eq ""; +$name =~ s/%2b/+/g; +$name =~ s/%3d/=/g; +$name =~ s/%3f/?/g; + +my $finalPath; +my $hash; + +# If the hash was given, a file with that hash may already be in the +# store. +if (defined $expHash) { + $finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name); + if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; } +} + +# If we don't know the hash or a file with that hash doesn't exist, +# download the file and add it to the store. +if (!defined $finalPath) { + + my $tmpFile = "$tmpDir/$name"; + + # Optionally do timestamp-based caching of the download. + # Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is + # the hash and the timestamp of the file at $url. The caching of + # the file *contents* is done in Nix store, where it can be + # garbage-collected independently. + my ($cachedTimestampFN, $cachedHashFN, @cacheFlags); + if (defined $cacheDir) { + my $urlHash = hashString("sha256", 1, $url); + writeFile "$cacheDir/$urlHash.url", $url; + $cachedHashFN = "$cacheDir/$urlHash.$hashType"; + $cachedTimestampFN = "$cacheDir/$urlHash.stamp"; + @cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN; + } + + # Perform the download. + my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || ""))); + (system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of ‘$url’ failed\n"; + + if (defined $cacheDir && ! -e $tmpFile) { + # Curl didn't create $tmpFile, so apparently there's no newer + # file on the server. + $hash = readFile $cachedHashFN or die; + $finalPath = makeFixedOutputPath(0, $hashType, $hash, $name); + unless (isValidPath $finalPath) { + print STDERR "cached contents of ‘$url’ disappeared, redownloading...\n"; + $finalPath = undef; + (system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of ‘$url’ failed\n"; + } + } + + if (!defined $finalPath) { + + # Compute the hash. + $hash = hashFile($hashType, $hashType ne "md5", $tmpFile); + + if (defined $cacheDir) { + writeFile $cachedHashFN, $hash; + my $st = stat($tmpFile) or die; + open STAMP, ">$cachedTimestampFN" or die; close STAMP; + utime($st->atime, $st->mtime, $cachedTimestampFN) or die; + } + + # Add the downloaded file to the Nix store. + $finalPath = addToStore($tmpFile, 0, $hashType); + } + + die "$0: hash mismatch for ‘$url’\n" if defined $expHash && $expHash ne $hash; +} + +print STDERR "path is ‘$finalPath’\n" unless $ENV{'QUIET'}; +print "$hash\n"; +print "$finalPath\n" if $ENV{'PRINT_PATH'}; diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in new file mode 100644 index 000000000000..06e7bdb7b808 --- /dev/null +++ b/scripts/nix-profile.sh.in @@ -0,0 +1,22 @@ +if test -n "$HOME"; then + NIX_LINK="$HOME/.nix-profile" + + # Set the default profile. + if ! [ -L "$NIX_LINK" ]; then + echo "creating $NIX_LINK" >&2 + _NIX_DEF_LINK=@localstatedir@/nix/profiles/default + @coreutils@/ln -s "$_NIX_DEF_LINK" "$NIX_LINK" + fi + + export PATH=$NIX_LINK/bin:$PATH + + # Subscribe the root user to the Nixpkgs channel by default. + if [ ! -e $HOME/.nix-channels ]; then + echo "http://nixos.org/channels/nixpkgs-unstable nixpkgs" > $HOME/.nix-channels + fi + + # Append ~/.nix-defexpr/channels/nixpkgs to $NIX_PATH so that + # <nixpkgs> paths work when the user has fetched the Nixpkgs + # channel. + export NIX_PATH=${NIX_PATH:+$NIX_PATH:}nixpkgs=$HOME/.nix-defexpr/channels/nixpkgs +fi diff --git a/scripts/nix-pull.in b/scripts/nix-pull.in new file mode 100755 index 000000000000..58dd2cf27eee --- /dev/null +++ b/scripts/nix-pull.in @@ -0,0 +1,103 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Temp qw(tempdir); +use Nix::Config; +use Nix::Manifest; + +my $tmpDir = tempdir("nix-pull.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +my $manifestDir = $Nix::Config::manifestDir; + + +# Prevent access problems in shared-stored installations. +umask 0022; + + +# Create the manifests directory if it doesn't exist. +if (! -e $manifestDir) { + mkdir $manifestDir, 0755 or die "cannot create directory `$manifestDir'"; +} + + +# Make sure that the manifests directory is scanned for GC roots. +my $gcRootsDir = "$Nix::Config::stateDir/gcroots"; +my $manifestDirLink = "$gcRootsDir/manifests"; +if (! -l $manifestDirLink) { + symlink($manifestDir, $manifestDirLink) or die "cannot create symlink `$manifestDirLink'"; +} + + +# Process the URLs specified on the command line. + +sub downloadFile { + my $url = shift; + $ENV{"PRINT_PATH"} = 1; + $ENV{"QUIET"} = 1; + my ($dummy, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`; + die "cannot fetch `$url'" if $? != 0; + die "nix-prefetch-url did not return a path" unless defined $path; + chomp $path; + return $path; +} + +sub processURL { + my $url = shift; + + $url =~ s/\/$//; + + my $manifest; + + my $origUrl = $ENV{'NIX_ORIG_URL'} || $url; + + # First see if a bzipped manifest is available. + if (system("$Nix::Config::curl --fail --silent --location --head '$url'.bz2 > /dev/null") == 0) { + print "fetching list of Nix archives at `$url.bz2'...\n"; + $manifest = downloadFile "$url.bz2"; + } + + # Otherwise, just get the uncompressed manifest. + else { + print "fetching list of Nix archives at `$url'...\n"; + $manifest = downloadFile $url; + } + + my $baseName = "unnamed"; + if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component + $baseName = $1; + } + + my $hash = `$Nix::Config::binDir/nix-hash --flat '$manifest'` + or die "cannot hash `$manifest'"; + chomp $hash; + + my $urlFile = "$manifestDir/$baseName-$hash.url"; + open URL, ">$urlFile" or die "cannot create `$urlFile'"; + print URL $origUrl; + close URL; + + my $finalPath = "$manifestDir/$baseName-$hash.nixmanifest"; + + unlink $finalPath if -e $finalPath; + + symlink("$manifest", "$finalPath") + or die "cannot link `$finalPath to `$manifest'"; + + deleteOldManifests($origUrl, $urlFile); +} + +while (@ARGV) { + my $url = shift @ARGV; + if ($url eq "--help") { + exec "man nix-pull" or die; + } elsif ($url eq "--skip-wrong-store") { + # No-op, no longer supported. + } else { + processURL $url; + } +} + + +# Update the cache. +updateManifestDB(); diff --git a/scripts/nix-push.in b/scripts/nix-push.in new file mode 100755 index 000000000000..bdd128a6f5c2 --- /dev/null +++ b/scripts/nix-push.in @@ -0,0 +1,293 @@ +#! @perl@ -w @perlFlags@ + +use strict; +use File::Basename; +use File::Temp qw(tempdir); +use File::Path qw(mkpath); +use File::stat; +use File::Copy; +use Nix::Config; +use Nix::Store; +use Nix::Manifest; +use Nix::Utils; +use Nix::Crypto; + +my $tmpDir = tempdir("nix-push.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +my $nixExpr = "$tmpDir/create-nars.nix"; + + +# Parse the command line. +my $compressionType = "xz"; +my $force = 0; +my $destDir; +my $writeManifest = 0; +my $manifestPath; +my $archivesURL; +my $link = 0; +my $privateKeyFile; +my $keyName; +my @roots; + +for (my $n = 0; $n < scalar @ARGV; $n++) { + my $arg = $ARGV[$n]; + + if ($arg eq "--help") { + exec "man nix-push" or die; + } elsif ($arg eq "--bzip2") { + $compressionType = "bzip2"; + } elsif ($arg eq "--none") { + $compressionType = "none"; + } elsif ($arg eq "--force") { + $force = 1; + } elsif ($arg eq "--dest") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $destDir = $ARGV[$n]; + mkpath($destDir, 0, 0755); + } elsif ($arg eq "--manifest") { + $writeManifest = 1; + } elsif ($arg eq "--manifest-path") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $manifestPath = $ARGV[$n]; + $writeManifest = 1; + mkpath(dirname($manifestPath), 0, 0755); + } elsif ($arg eq "--url-prefix") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $archivesURL = $ARGV[$n]; + } elsif ($arg eq "--link") { + $link = 1; + } elsif ($arg eq "--key") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $privateKeyFile = $ARGV[$n]; + } elsif ($arg eq "--key-name") { + $n++; + die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV; + $keyName = $ARGV[$n]; + } elsif (substr($arg, 0, 1) eq "-") { + die "$0: unknown flag `$arg'\n"; + } else { + push @roots, $arg; + } +} + +die "$0: please specify a destination directory\n" if !defined $destDir; + +$archivesURL = "file://$destDir" unless defined $archivesURL; + + +# From the given store paths, determine the set of requisite store +# paths, i.e, the paths required to realise them. +my %storePaths; + +foreach my $path (@roots) { + # Get all paths referenced by the normalisation of the given + # Nix expression. + my $pid = open(READ, + "$Nix::Config::binDir/nix-store --query --requisites --force-realise " . + "--include-outputs '$path'|") or die; + + while (<READ>) { + chomp; + die "bad: $_" unless /^\//; + $storePaths{$_} = ""; + } + + close READ or die "nix-store failed: $?"; +} + +my @storePaths = keys %storePaths; + + +# Don't create archives for files that are already in the binary cache. +my @storePaths2; +my %narFiles; +foreach my $storePath (@storePaths) { + my $pathHash = substr(basename($storePath), 0, 32); + my $narInfoFile = "$destDir/$pathHash.narinfo"; + if (-e $narInfoFile) { + my $narInfo = parseNARInfo($storePath, readFile($narInfoFile), 0, $narInfoFile) or die "cannot read `$narInfoFile'\n"; + my $narFile = "$destDir/$narInfo->{url}"; + if (-e $narFile) { + print STDERR "skipping existing $storePath\n"; + # Add the NAR info to $narFiles if we're writing a + # manifest. + $narFiles{$storePath} = [ + { url => ("$archivesURL/" . basename $narInfo->{url}) + , hash => $narInfo->{fileHash} + , size => $narInfo->{fileSize} + , compressionType => $narInfo->{compression} + , narHash => $narInfo->{narHash} + , narSize => $narInfo->{narSize} + , references => join(" ", map { "$Nix::Config::storeDir/$_" } @{$narInfo->{refs}}) + , deriver => $narInfo->{deriver} ? "$Nix::Config::storeDir/$narInfo->{deriver}" : undef + } + ] if $writeManifest; + next; + } + } + push @storePaths2, $storePath; +} + + +# Create a list of Nix derivations that turn each path into a Nix +# archive. +open NIX, ">$nixExpr"; +print NIX "["; + +foreach my $storePath (@storePaths2) { + die unless ($storePath =~ /\/[0-9a-z]{32}[^\"\\\$]*$/); + + # Construct a Nix expression that creates a Nix archive. + my $nixexpr = + "(import <nix/nar.nix> " . + "{ storePath = builtins.storePath \"$storePath\"; hashAlgo = \"sha256\"; compressionType = \"$compressionType\"; }) "; + + print NIX $nixexpr; +} + +print NIX "]"; +close NIX; + + +# Build the Nix expression. +print STDERR "building compressed archives...\n"; +my @narPaths; +my $pid = open(READ, "$Nix::Config::binDir/nix-build $nixExpr -o $tmpDir/result |") + or die "cannot run nix-build"; +while (<READ>) { + chomp; + die unless /^\//; + push @narPaths, $_; +} +close READ or die "nix-build failed: $?"; + + +# Write the cache info file. +my $cacheInfoFile = "$destDir/nix-cache-info"; +if (! -e $cacheInfoFile) { + open FILE, ">$cacheInfoFile" or die "cannot create $cacheInfoFile: $!"; + print FILE "StoreDir: $Nix::Config::storeDir\n"; + print FILE "WantMassQuery: 0\n"; # by default, don't hit this cache for "nix-env -qas" + close FILE; +} + + +# Copy the archives and the corresponding NAR info files. +print STDERR "copying archives...\n"; + +my $totalNarSize = 0; +my $totalCompressedSize = 0; + +for (my $n = 0; $n < scalar @storePaths2; $n++) { + my $storePath = $storePaths2[$n]; + my $narDir = $narPaths[$n]; + my $baseName = basename $storePath; + + # Get info about the store path. + my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1); + + # In some exceptional cases (such as VM tests that use the Nix + # store of the host), the database doesn't contain the hash. So + # compute it. + if ($narHash =~ /^sha256:0*$/) { + my $nar = "$tmpDir/nar"; + system("$Nix::Config::binDir/nix-store --dump $storePath > $nar") == 0 + or die "cannot dump $storePath\n"; + $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 --flat $nar`; + die "cannot hash `$nar'" if $? != 0; + chomp $narHash; + $narHash = "sha256:$narHash"; + $narSize = stat("$nar")->size; + unlink $nar or die; + } + + $totalNarSize += $narSize; + + # Get info about the compressed NAR. + open HASH, "$narDir/nar-compressed-hash" or die "cannot open nar-compressed-hash"; + my $compressedHash = <HASH>; + chomp $compressedHash; + $compressedHash =~ /^[0-9a-z]+$/ or die "invalid hash"; + close HASH; + + my $narName = "$compressedHash.nar" . ($compressionType eq "xz" ? ".xz" : $compressionType eq "bzip2" ? ".bz2" : ""); + + my $narFile = "$narDir/$narName"; + (-f $narFile) or die "NAR file for $storePath not found"; + + my $compressedSize = stat($narFile)->size; + $totalCompressedSize += $compressedSize; + + printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath, + $compressedSize / (1024 * 1024), $compressedSize / $narSize * 100; + + # Copy the compressed NAR. + my $dst = "$destDir/$narName"; + if (! -f $dst) { + my $tmp = "$destDir/.tmp.$$.$narName"; + if ($link) { + link($narFile, $tmp) or die "cannot link $tmp to $narFile: $!\n"; + } else { + copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n"; + } + rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; + } + + # Write the info file. + my $info; + $info .= "StorePath: $storePath\n"; + $info .= "URL: $narName\n"; + $info .= "Compression: $compressionType\n"; + $info .= "FileHash: sha256:$compressedHash\n"; + $info .= "FileSize: $compressedSize\n"; + $info .= "NarHash: $narHash\n"; + $info .= "NarSize: $narSize\n"; + $info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n"; + if (defined $deriver) { + $info .= "Deriver: " . basename $deriver . "\n"; + if (isValidPath($deriver)) { + my $drv = derivationFromPath($deriver); + $info .= "System: $drv->{platform}\n"; + } + } + + if (defined $privateKeyFile && defined $keyName) { + my $sig = signString($privateKeyFile, $info); + $info .= "Signature: 1;$keyName;$sig\n"; + } + + my $pathHash = substr(basename($storePath), 0, 32); + + $dst = "$destDir/$pathHash.narinfo"; + if ($force || ! -f $dst) { + my $tmp = "$destDir/.tmp.$$.$pathHash.narinfo"; + open INFO, ">$tmp" or die; + print INFO "$info" or die; + close INFO or die; + rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n"; + } + + $narFiles{$storePath} = [ + { url => "$archivesURL/$narName" + , hash => "sha256:$compressedHash" + , size => $compressedSize + , compressionType => $compressionType + , narHash => "$narHash" + , narSize => $narSize + , references => join(" ", @{$refs}) + , deriver => $deriver + } + ] if $writeManifest; +} + +printf STDERR "total compressed size %.2f MiB, %.1f%%\n", + $totalCompressedSize / (1024 * 1024), $totalCompressedSize / ($totalNarSize || 1) * 100; + + +# Optionally write a manifest. +writeManifest($manifestPath // "$destDir/MANIFEST", \%narFiles, \()) if $writeManifest; diff --git a/scripts/nix-reduce-build.in b/scripts/nix-reduce-build.in new file mode 100755 index 000000000000..50beb9d10b16 --- /dev/null +++ b/scripts/nix-reduce-build.in @@ -0,0 +1,171 @@ +#! @bash@ + +WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX); +cd "$WORKING_DIRECTORY"; + +if test -z "$1" || test "a--help" = "a$1" ; then + echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2 + echo As in: >&2 + echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2 + echo nix-reduce-build /etc/nixos/nixos -- \\ + echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2 + echo " store path name will be added into the end of the URL" >&2 + echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2 + echo " that should be a directory where gzipped 'nix-store --export' ">&2 + echo " files are located (they should have .nar.gz extension)" >&2 + echo " Or all together: " >&2 + echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\ + " ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2 + echo " Also supports best-effort local builds of failing expression set:" >&2 + echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2 + echo " nix-daemon:// builds using daemon" + echo " nix-self:// builds directly using nix-store from current installation" >&2 + echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2; + echo "derivations with specified output hash (sha256, sha1 or md5)." >&2 + echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2; + echo "maximum amount of paths" >&2; + echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2; + echo "maximum amount of paths" >&2; + echo " If no package sources are specified, required paths are listed." >&2; + exit; +fi; + +while ! test "$1" = "--" || test "$1" = "" ; do + echo "$1" >> initial; >&2 + shift; +done +shift; +echo Will work on $(cat initial | wc -l) targets. >&2 + +while read ; do + case "$REPLY" in + ${NIX_STORE_DIR:-/nix/store}/*) + echo "$REPLY" >> paths; >&2 + ;; + *) + ( + IFS=: ; + nix-instantiate $REPLY >> paths; + ); + ;; + esac; +done < initial; +echo Proceeding $(cat paths | wc -l) paths. >&2 + +while read; do + case "$REPLY" in + *.drv) + echo "$REPLY" >> derivers; >&2 + ;; + *) + nix-store --query --deriver "$REPLY" >>derivers; + ;; + esac; +done < paths; +echo Found $(cat derivers | wc -l) derivers. >&2 + +cat derivers | xargs nix-store --query -R > derivers-closure; +echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2 + +cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths; +cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths; +echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2 + +cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths; +echo We need $(cat needed-paths | wc -l) paths. >&2 + +egrep '[.]drv$' derivers-closure > critical-derivers; + +if test -z "$1" ; then + cat needed-paths; +fi; + +refresh_critical_derivers() { + echo "Finding needed derivers..." >&2; + cat critical-derivers | while read; do + if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then + echo "$REPLY"; + fi; + done > new-critical-derivers; + mv new-critical-derivers critical-derivers; + echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2 +} + +build_here() { + cat critical-derivers | while read; do + echo "Realising $REPLY using nix-daemon" >&2 + @bindir@/nix-store -r "${REPLY}" + done; +} + +try_to_substitute(){ + cat needed-paths | while read ; do + echo "Building $REPLY using nix-daemon" >&2 + @bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}" + done; +} + +for i in "$@"; do + sshHost="${i#ssh://}"; + httpHost="${i#http://}"; + httpsHost="${i#https://}"; + filePath="${i#file:/}"; + if [ "$i" != "$sshHost" ]; then + cat needed-paths | while read; do + echo "Getting $REPLY and its closure over ssh" >&2 + nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true; + done; + elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then + cat needed-paths | while read; do + echo "Getting $REPLY over http/https" >&2 + curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import; + done; + elif [ "$i" != "$filePath" ] ; then + cat needed-paths | while read; do + echo "Installing $REPLY from file" >&2 + gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import; + done; + elif [ "$i" = "nix-daemon://" ] ; then + NIX_REMOTE=daemon try_to_substitute; + refresh_critical_derivers; + NIX_REMOTE=daemon build_here; + elif [ "$i" = "nix-self://" ] ; then + NIX_REMOTE= try_to_substitute; + refresh_critical_derivers; + NIX_REMOTE= build_here; + elif [ "$i" = "nix-daemon-fixed://" ] ; then + refresh_critical_derivers; + + cat critical-derivers | while read; do + if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then + echo "Realising $REPLY using nix-daemon" >&2 + NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}" + fi; + done; + elif [ "$i" = "nix-self-fixed://" ] ; then + refresh_critical_derivers; + + cat critical-derivers | while read; do + if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then + echo "Realising $REPLY using direct Nix build" >&2 + NIX_REMOTE= @bindir@/nix-store -r "${REPLY}" + fi; + done; + elif [ "$i" = "nix-daemon-substitute://" ] ; then + NIX_REMOTE=daemon try_to_substitute; + elif [ "$i" = "nix-self-substitute://" ] ; then + NIX_REMOTE= try_to_substitute; + elif [ "$i" = "nix-daemon-build://" ] ; then + refresh_critical_derivers; + NIX_REMOTE=daemon build_here; + elif [ "$i" = "nix-self-build://" ] ; then + refresh_critical_derivers; + NIX_REMOTE= build_here; + fi; + mv needed-paths wanted-paths; + cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths; + echo We still need $(cat needed-paths | wc -l) paths. >&2 +done; + +cd / +rm -r "$WORKING_DIRECTORY" diff --git a/scripts/remove-patches.pl b/scripts/remove-patches.pl new file mode 100755 index 000000000000..401771a27d76 --- /dev/null +++ b/scripts/remove-patches.pl @@ -0,0 +1,16 @@ +#! /usr/bin/perl -w -I/home/eelco/nix/scripts + +use strict; +use readmanifest; + +for my $p (@ARGV) { + + my %narFiles; + my %patches; + + readManifest $p, \%narFiles, \%patches; + + %patches = (); + + writeManifest $p, \%narFiles, \%patches; +} diff --git a/scripts/show-duplication.pl b/scripts/show-duplication.pl new file mode 100755 index 000000000000..0604c6696c7a --- /dev/null +++ b/scripts/show-duplication.pl @@ -0,0 +1,73 @@ +#! /usr/bin/perl -w + +if (scalar @ARGV != 1) { + print "syntax: show-duplication.pl PATH\n"; + exit 1; +} + +my $root = $ARGV[0]; + + +my $nameRE = "(?:(?:[A-Za-z0-9\+\_]|(?:-[^0-9]))+)"; +my $versionRE = "(?:[A-Za-z0-9\.\-]+)"; + + +my %pkgInstances; + + +my $pid = open(PATHS, "-|") || exec "nix-store", "-qR", $root; +while (<PATHS>) { + chomp; + /^.*\/[0-9a-z]*-(.*)$/; + my $nameVersion = $1; + $nameVersion =~ /^($nameRE)(-($versionRE))?$/; + $name = $1; + $version = $3; + $version = "(unnumbered)" unless defined $version; +# print "$nameVersion $name $version\n"; + push @{$pkgInstances{$name}}, {version => $version, path => $_}; +} +close PATHS or exit 1; + + +sub pathSize { + my $path = shift; + my @st = lstat $path or die; + + my $size = $st[7]; + + if (-d $path) { + opendir DIR, $path or die; + foreach my $name (readdir DIR) { + next if $name eq "." || $name eq ".."; + $size += pathSize("$path/$name"); + } + } + + return $size; +} + + +my $totalPaths = 0; +my $totalSize = 0, $totalWaste = 0; + +foreach my $name (sort {scalar @{$pkgInstances{$b}} <=> scalar @{$pkgInstances{$a}}} (keys %pkgInstances)) { + print "$name ", scalar @{$pkgInstances{$name}}, "\n"; + my $allSize = 0; + foreach my $x (sort {$a->{version} cmp $b->{version}} @{$pkgInstances{$name}}) { + $totalPaths++; + my $size = pathSize $x->{path}; + $allSize += $size; + print " $x->{version} $size\n"; + } + my $avgSize = int($allSize / scalar @{$pkgInstances{$name}}); + my $waste = $allSize - $avgSize; + $totalSize += $allSize; + $totalWaste += $waste; + print " average $avgSize, waste $waste\n"; +} + + +my $avgDupl = $totalPaths / scalar (keys %pkgInstances); +my $wasteFactor = ($totalWaste / $totalSize) * 100; +print "average package duplication $avgDupl, total size $totalSize, total waste $totalWaste, $wasteFactor% wasted\n"; |