about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.gitignore13
-rw-r--r--Makefile4
-rw-r--r--Makefile.config.in1
-rw-r--r--configure.ac24
-rw-r--r--corepkgs/buildenv.nix3
-rw-r--r--corepkgs/buildenv.pl168
-rw-r--r--corepkgs/config.nix.in2
-rw-r--r--corepkgs/fetchurl.nix5
-rw-r--r--corepkgs/local.mk2
-rw-r--r--corepkgs/nar.nix48
-rw-r--r--doc/manual/advanced-topics/distributed-builds.xml8
-rw-r--r--doc/manual/command-ref/conf-file.xml26
-rw-r--r--doc/manual/command-ref/env-common.xml9
-rw-r--r--doc/manual/command-ref/nix-channel.xml4
-rw-r--r--doc/manual/command-ref/nix-install-package.xml208
-rw-r--r--doc/manual/command-ref/nix-push.xml449
-rw-r--r--doc/manual/command-ref/nix-store.xml3
-rw-r--r--doc/manual/command-ref/utilities.xml2
-rw-r--r--doc/manual/expressions/builtins.xml15
-rw-r--r--doc/manual/expressions/derivations.xml2
-rw-r--r--doc/manual/expressions/language-constructs.xml18
-rw-r--r--doc/manual/expressions/language-values.xml17
-rw-r--r--doc/manual/introduction/quick-start.xml12
-rw-r--r--doc/manual/local.mk13
-rw-r--r--doc/manual/packages/one-click.xml37
-rw-r--r--doc/manual/packages/package-management.xml1
-rwxr-xr-xmaintainers/upload-release.pl148
-rw-r--r--misc/docker/Dockerfile4
-rw-r--r--misc/launchd/org.nixos.nix-daemon.plist.in5
-rw-r--r--misc/systemd/nix-daemon.service.in1
-rw-r--r--nix.spec.in2
-rw-r--r--perl/lib/Nix/Store.xs4
-rw-r--r--release.nix13
-rw-r--r--scripts/download-from-binary-cache.pl.in632
-rw-r--r--scripts/local.mk12
-rwxr-xr-xscripts/nix-build.in359
-rwxr-xr-xscripts/nix-channel.in228
-rwxr-xr-xscripts/nix-install-package.in127
-rw-r--r--scripts/nix-profile.sh.in4
-rwxr-xr-xscripts/nix-push.in296
-rwxr-xr-xscripts/resolve-system-dependencies.pl.in122
-rwxr-xr-xscripts/show-duplication.pl73
-rw-r--r--src/buildenv/buildenv.cc186
-rw-r--r--src/buildenv/local.mk9
-rw-r--r--src/download-via-ssh/download-via-ssh.cc2
-rw-r--r--src/libexpr/common-opts.cc2
-rw-r--r--src/libexpr/eval.cc28
-rw-r--r--src/libexpr/eval.hh15
-rw-r--r--src/libexpr/get-drvs.cc2
-rw-r--r--src/libexpr/json-to-value.cc9
-rw-r--r--src/libexpr/parser.y6
-rw-r--r--src/libexpr/primops.cc113
-rw-r--r--src/libexpr/value-to-json.cc55
-rw-r--r--src/libexpr/value-to-json.hh71
-rw-r--r--src/libexpr/value.hh17
-rw-r--r--src/libmain/shared.cc23
-rw-r--r--src/libstore/binary-cache-store.cc36
-rw-r--r--src/libstore/binary-cache-store.hh10
-rw-r--r--src/libstore/build.cc168
-rw-r--r--src/libstore/builtins.cc10
-rw-r--r--src/libstore/derivations.cc58
-rw-r--r--src/libstore/derivations.hh4
-rw-r--r--src/libstore/download.cc604
-rw-r--r--src/libstore/download.hh46
-rw-r--r--src/libstore/gc.cc36
-rw-r--r--src/libstore/globals.cc3
-rw-r--r--src/libstore/globals.hh3
-rw-r--r--src/libstore/http-binary-cache-store.cc48
-rw-r--r--src/libstore/local-binary-cache-store.cc24
-rw-r--r--src/libstore/local-fs-store.cc8
-rw-r--r--src/libstore/local-store.cc153
-rw-r--r--src/libstore/local-store.hh8
-rw-r--r--src/libstore/misc.cc104
-rw-r--r--src/libstore/nar-accessor.cc2
-rw-r--r--src/libstore/nar-info-disk-cache.cc13
-rw-r--r--src/libstore/nar-info.cc7
-rw-r--r--src/libstore/optimise-store.cc22
-rw-r--r--src/libstore/pathlocks.cc4
-rw-r--r--src/libstore/profiles.cc4
-rw-r--r--src/libstore/remote-store.cc99
-rw-r--r--src/libstore/remote-store.hh4
-rw-r--r--src/libstore/s3-binary-cache-store.cc58
-rw-r--r--src/libstore/schema.sql3
-rw-r--r--src/libstore/sqlite.cc17
-rw-r--r--src/libstore/sqlite.hh10
-rw-r--r--src/libstore/store-api.cc169
-rw-r--r--src/libstore/store-api.hh86
-rw-r--r--src/libutil/affinity.cc6
-rw-r--r--src/libutil/archive.cc4
-rw-r--r--src/libutil/args.hh2
-rw-r--r--src/libutil/json.cc176
-rw-r--r--src/libutil/json.hh184
-rw-r--r--src/libutil/logging.cc14
-rw-r--r--src/libutil/logging.hh11
-rw-r--r--src/libutil/serialise.cc2
-rw-r--r--src/libutil/thread-pool.cc2
-rw-r--r--src/libutil/types.hh69
-rw-r--r--src/libutil/util.cc108
-rw-r--r--src/libutil/util.hh74
-rw-r--r--src/nix-build/local.mk9
-rwxr-xr-xsrc/nix-build/nix-build.cc510
-rw-r--r--src/nix-channel/local.mk7
-rwxr-xr-xsrc/nix-channel/nix-channel.cc270
-rw-r--r--src/nix-collect-garbage/nix-collect-garbage.cc2
-rw-r--r--src/nix-daemon/nix-daemon.cc52
-rw-r--r--src/nix-env/nix-env.cc40
-rw-r--r--src/nix-env/user-env.cc2
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc12
-rw-r--r--src/nix-store/nix-store.cc17
-rw-r--r--src/nix/installables.cc42
-rw-r--r--src/nix/installables.hh12
-rw-r--r--src/nix/path-info.cc107
-rw-r--r--src/nix/sigs.cc4
-rw-r--r--src/nix/verify.cc16
-rw-r--r--src/resolve-system-dependencies/local.mk11
-rw-r--r--src/resolve-system-dependencies/resolve-system-dependencies.cc194
-rw-r--r--tests/binary-cache.sh6
-rw-r--r--tests/common.sh.in7
-rw-r--r--tests/config.nix2
-rw-r--r--tests/dump-db.sh3
-rw-r--r--tests/gc-runtime.sh2
-rw-r--r--tests/init.sh5
-rw-r--r--tests/install-package.sh20
-rw-r--r--tests/lang/eval-okay-partition.exp1
-rw-r--r--tests/lang/eval-okay-partition.nix5
-rw-r--r--tests/local.mk7
-rw-r--r--tests/nix-channel.sh2
-rw-r--r--tests/nix-push.sh12
-rw-r--r--tests/placeholders.sh22
-rw-r--r--tests/referrers.sh2
-rw-r--r--tests/remote-store.sh2
-rw-r--r--tests/repair.sh2
-rw-r--r--tests/tarball.sh6
-rw-r--r--tests/user-envs.sh5
134 files changed, 3655 insertions, 3898 deletions
diff --git a/.gitignore b/.gitignore
index a524e9b6a29b..04dd791964f2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,13 +34,9 @@ Makefile.config
 
 # /scripts/
 /scripts/nix-profile.sh
-/scripts/nix-push
 /scripts/nix-switch
 /scripts/nix-collect-garbage
 /scripts/nix-prefetch-url
-/scripts/nix-install-package
-/scripts/nix-channel
-/scripts/nix-build
 /scripts/nix-copy-closure
 /scripts/NixConfig.pm
 /scripts/NixManifest.pm
@@ -74,9 +70,18 @@ Makefile.config
 # /src/nix-daemon/
 /src/nix-daemon/nix-daemon
 
+# /src/nix-channel/
+/src/nix-channel/nix-channel
+
 # /src/download-via-ssh/
 /src/download-via-ssh/download-via-ssh
 
+# /src/buildenv/
+/src/buildenv/buildenv
+
+# /src/nix-build/
+/src/nix-build/nix-build
+
 # /tests/
 /tests/test-tmp
 /tests/common.sh
diff --git a/Makefile b/Makefile
index 90dca473f390..2ee40b56b1d3 100644
--- a/Makefile
+++ b/Makefile
@@ -12,6 +12,10 @@ makefiles = \
   src/nix-daemon/local.mk \
   src/nix-collect-garbage/local.mk \
   src/nix-prefetch-url/local.mk \
+  src/buildenv/local.mk \
+  src/resolve-system-dependencies/local.mk \
+  src/nix-channel/local.mk \
+  src/nix-build/local.mk \
   perl/local.mk \
   scripts/local.mk \
   corepkgs/local.mk \
diff --git a/Makefile.config.in b/Makefile.config.in
index 3e7847e365c7..2db7172b15c9 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -33,5 +33,6 @@ pkglibdir = $(libdir)/$(PACKAGE_NAME)
 prefix = @prefix@
 storedir = @storedir@
 sysconfdir = @sysconfdir@
+doc_generate = @doc_generate@
 xmllint = @xmllint@
 xsltproc = @xsltproc@
diff --git a/configure.ac b/configure.ac
index a9e6b4313644..91ed9947abdd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -246,23 +246,6 @@ AC_MSG_RESULT(yes)
 AC_SUBST(perlFlags)
 
 
-# Check for otool, an optional dependency on Darwin.
-AC_PATH_PROG(otool, otool)
-AC_MSG_CHECKING([that otool works])
-case $host_os in
-  darwin*)
-    if test -z "$otool" || ! $otool --version 2>/dev/null; then
-      AC_MSG_RESULT(no)
-      AC_MSG_ERROR([Can't get version from otool; do you need to install developer tools?])
-    fi
-    AC_MSG_RESULT(yes)
-    ;;
-  *)
-    AC_MSG_RESULT(not needed)
-    ;;
-esac
-
-
 # Whether to build the Perl bindings
 AC_MSG_CHECKING([whether to build the Perl bindings])
 AC_ARG_ENABLE(perl-bindings, AC_HELP_STRING([--enable-perl-bindings],
@@ -282,6 +265,13 @@ AC_ARG_ENABLE(init-state, AC_HELP_STRING([--disable-init-state],
 #AM_CONDITIONAL(INIT_STATE, test "$init_state" = "yes")
 
 
+# documentation generation switch
+AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
+  [disable documentation generation]),
+  doc_generate=$enableval, doc_generate=yes)
+AC_SUBST(doc_generate)
+
+
 # Setuid installations.
 AC_CHECK_FUNCS([setresuid setreuid lchown])
 
diff --git a/corepkgs/buildenv.nix b/corepkgs/buildenv.nix
index 70981a752c3c..5e7b40eaa0cb 100644
--- a/corepkgs/buildenv.nix
+++ b/corepkgs/buildenv.nix
@@ -5,8 +5,7 @@ with import <nix/config.nix>;
 derivation {
   name = "user-environment";
   system = builtins.currentSystem;
-  builder = perl;
-  args = [ "-w" ./buildenv.pl ];
+  builder = nixLibexecDir + "/nix/buildenv";
 
   inherit manifest;
 
diff --git a/corepkgs/buildenv.pl b/corepkgs/buildenv.pl
deleted file mode 100644
index dacc53701a01..000000000000
--- a/corepkgs/buildenv.pl
+++ /dev/null
@@ -1,168 +0,0 @@
-use strict;
-use Cwd;
-use IO::Handle;
-use utf8;
-
-STDOUT->autoflush(1);
-
-my $out = $ENV{"out"};
-mkdir "$out", 0755 || die "error creating $out";
-
-
-my $symlinks = 0;
-
-my %priorities;
-
-
-# For each activated package, create symlinks.
-
-sub createLinks {
-    my $srcDir = shift;
-    my $dstDir = shift;
-    my $priority = shift;
-
-    my @srcFiles = glob("$srcDir/*");
-
-    foreach my $srcFile (@srcFiles) {
-        my $baseName = $srcFile;
-        $baseName =~ s/^.*\///g; # strip directory
-        my $dstFile = "$dstDir/$baseName";
-
-        # The files below are special-cased so that they don't show up
-        # in user profiles, either because they are useless, or
-        # because they would cause pointless collisions (e.g., each
-        # Python package brings its own
-        # `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
-        # Urgh, hacky...
-        if ($srcFile =~ /\/propagated-build-inputs$/ ||
-            $srcFile =~ /\/nix-support$/ ||
-            $srcFile =~ /\/perllocal.pod$/ ||
-            $srcFile =~ /\/info\/dir$/ ||
-            $srcFile =~ /\/log$/)
-        {
-            # Do nothing.
-        }
-
-        elsif (-d $srcFile) {
-
-            lstat $dstFile;
-
-            if (-d _) {
-                createLinks($srcFile, $dstFile, $priority);
-            }
-
-            elsif (-l _) {
-                my $target = readlink $dstFile or die;
-                if (!-d $target) {
-                    die "collision between directory ‘$srcFile’ and non-directory ‘$target’";
-                }
-                unlink $dstFile or die "error unlinking ‘$dstFile’: $!";
-                mkdir $dstFile, 0755 ||
-                    die "error creating directory ‘$dstFile’: $!";
-                createLinks($target, $dstFile, $priorities{$dstFile});
-                createLinks($srcFile, $dstFile, $priority);
-            }
-
-            else {
-                symlink($srcFile, $dstFile) ||
-                    die "error creating link ‘$dstFile’: $!";
-                $priorities{$dstFile} = $priority;
-                $symlinks++;
-            }
-        }
-
-        else {
-
-            if (-l $dstFile) {
-                my $target = readlink $dstFile;
-                my $prevPriority = $priorities{$dstFile};
-                die("collision between ‘$srcFile’ and ‘$target’; " .
-                    "use ‘nix-env --set-flag priority NUMBER PKGNAME’ " .
-                    "to change the priority of one of the conflicting packages\n")
-                    if $prevPriority == $priority;
-                next if $prevPriority < $priority;
-                unlink $dstFile or die;
-            }
-
-            symlink($srcFile, $dstFile) ||
-                die "error creating link ‘$dstFile’: $!";
-            $priorities{$dstFile} = $priority;
-            $symlinks++;
-        }
-    }
-}
-
-
-my %done;
-my %postponed;
-
-sub addPkg;
-sub addPkg {
-    my $pkgDir = shift;
-    my $priority = shift;
-
-    return if (defined $done{$pkgDir});
-    $done{$pkgDir} = 1;
-
-#    print "symlinking $pkgDir\n";
-    createLinks("$pkgDir", "$out", $priority);
-
-    my $propagatedFN = "$pkgDir/nix-support/propagated-user-env-packages";
-    if (-e $propagatedFN) {
-        open PROP, "<$propagatedFN" or die;
-        my $propagated = <PROP>;
-        close PROP;
-        my @propagated = split ' ', $propagated;
-        foreach my $p (@propagated) {
-            $postponed{$p} = 1 unless defined $done{$p};
-        }
-    }
-}
-
-
-# Convert the stuff we get from the environment back into a coherent
-# data type.
-my @pkgs;
-my @derivations = split ' ', $ENV{"derivations"};
-while (scalar @derivations) {
-    my $active = shift @derivations;
-    my $priority = shift @derivations;
-    my $outputs = shift @derivations;
-    for (my $n = 0; $n < $outputs; $n++) {
-        my $path = shift @derivations;
-        push @pkgs,
-            { path => $path
-            , active => $active ne "false"
-            , priority => int($priority) };
-    }
-}
-
-
-# Symlink to the packages that have been installed explicitly by the
-# user.  Process in priority order to reduce unnecessary
-# symlink/unlink steps.
-@pkgs = sort { $a->{priority} <=> $b->{priority} || $a->{path} cmp $b->{path} } @pkgs;
-foreach my $pkg (@pkgs) {
-    #print $pkg, " ", $pkgs{$pkg}->{priority}, "\n";
-    addPkg($pkg->{path}, $pkg->{priority}) if $pkg->{active};
-}
-
-
-# Symlink to the packages that have been "propagated" by packages
-# installed by the user (i.e., package X declares that it wants Y
-# installed as well).  We do these later because they have a lower
-# priority in case of collisions.
-my $priorityCounter = 1000; # don't care about collisions
-while (scalar(keys %postponed) > 0) {
-    my @pkgDirs = keys %postponed;
-    %postponed = ();
-    foreach my $pkgDir (sort @pkgDirs) {
-        addPkg($pkgDir, $priorityCounter++);
-    }
-}
-
-
-print STDERR "created $symlinks symlinks in user environment\n";
-
-
-symlink($ENV{"manifest"}, "$out/manifest.nix") or die "cannot create manifest";
diff --git a/corepkgs/config.nix.in b/corepkgs/config.nix.in
index 90e8edbea833..f0f4890a32fd 100644
--- a/corepkgs/config.nix.in
+++ b/corepkgs/config.nix.in
@@ -3,7 +3,6 @@ let
     let val = builtins.getEnv var; in
     if val != "" then val else def;
 in rec {
-  perl = "@perl@";
   shell = "@bash@";
   coreutils = "@coreutils@";
   bzip2 = "@bzip2@";
@@ -14,6 +13,7 @@ in rec {
   tr = "@tr@";
   nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@";
   nixPrefix = "@prefix@";
+  nixLibexecDir = fromEnv "NIX_LIBEXEC_DIR" "@libexecdir@";
 
   # If Nix is installed in the Nix store, then automatically add it as
   # a dependency to the core packages. This ensures that they work
diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix
index 5e0ad9da3c68..042705b1abb0 100644
--- a/corepkgs/fetchurl.nix
+++ b/corepkgs/fetchurl.nix
@@ -1,5 +1,3 @@
-with import <nix/config.nix>;
-
 { system ? builtins.currentSystem
 , url
 , outputHash ? ""
@@ -35,4 +33,7 @@ derivation {
     # by definition pure.
     "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
   ];
+
+  # To make "nix-prefetch-url" work.
+  urls = [ url ];
 }
diff --git a/corepkgs/local.mk b/corepkgs/local.mk
index 19c1d06962c0..362c8eb612eb 100644
--- a/corepkgs/local.mk
+++ b/corepkgs/local.mk
@@ -1,4 +1,4 @@
-corepkgs_FILES = nar.nix buildenv.nix buildenv.pl unpack-channel.nix derivation.nix fetchurl.nix imported-drv-to-derivation.nix
+corepkgs_FILES = buildenv.nix unpack-channel.nix derivation.nix fetchurl.nix imported-drv-to-derivation.nix
 
 $(foreach file,config.nix $(corepkgs_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/corepkgs)))
 
diff --git a/corepkgs/nar.nix b/corepkgs/nar.nix
deleted file mode 100644
index 61b3fc6772c4..000000000000
--- a/corepkgs/nar.nix
+++ /dev/null
@@ -1,48 +0,0 @@
-with import <nix/config.nix>;
-
-let
-
-  builder = builtins.toFile "nar.sh"
-    ''
-      export PATH=${nixBinDir}:${coreutils}
-
-      if [ $compressionType = xz ]; then
-        ext=.xz
-        compressor="| ${xz} -7"
-      elif [ $compressionType = bzip2 ]; then
-        ext=.bz2
-        compressor="| ${bzip2}"
-      else
-        ext=
-        compressor=
-      fi
-
-      echo "packing ‘$storePath’..."
-      mkdir $out
-      dst=$out/tmp.nar$ext
-
-      set -o pipefail
-      eval "nix-store --dump \"$storePath\" $compressor > $dst"
-
-      hash=$(nix-hash --flat --type $hashAlgo --base32 $dst)
-      echo -n $hash > $out/nar-compressed-hash
-
-      mv $dst $out/$hash.nar$ext
-    '';
-
-in
-
-{ storePath, hashAlgo, compressionType }:
-
-derivation {
-  name = "nar";
-  system = builtins.currentSystem;
-  builder = shell;
-  args = [ "-e" builder ];
-  inherit storePath hashAlgo compressionType;
-
-  # Remote machines may not have ${nixBinDir} or ${coreutils} in the same prefixes
-  preferLocalBuild = true;
-
-  inherit chrootDeps;
-}
diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml
index 1f8d98f5d8fe..f8583700393c 100644
--- a/doc/manual/advanced-topics/distributed-builds.xml
+++ b/doc/manual/advanced-topics/distributed-builds.xml
@@ -11,9 +11,9 @@ forward Nix builds to other machines over the network.  This allows
 multiple builds to be performed in parallel (thus improving
 performance) and allows Nix to perform multi-platform builds in a
 semi-transparent way.  For instance, if you perform a build for a
-<literal>powerpc-darwin</literal> on an <literal>i686-linux</literal>
+<literal>x86_64-darwin</literal> on an <literal>i686-linux</literal>
 machine, Nix can automatically forward the build to a
-<literal>powerpc-darwin</literal> machine, if available.</para>
+<literal>x86_64-darwin</literal> machine, if available.</para>
 
 <para>You can enable distributed builds by setting the environment
 variable <envar>NIX_BUILD_HOOK</envar> to point to a program that Nix
@@ -30,7 +30,7 @@ variable</link>.</para>
 <example xml:id='ex-remote-systems'><title>Remote machine configuration:
 <filename>remote-systems.conf</filename></title>
 <programlisting>
-nix@mcflurry.labs.cs.uu.nl  powerpc-darwin  /home/nix/.ssh/id_quarterpounder_auto  2
+nix@mcflurry.labs.cs.uu.nl  x86_64-darwin   /home/nix/.ssh/id_quarterpounder_auto  2
 nix@scratchy.labs.cs.uu.nl  i686-linux      /home/nix/.ssh/id_scratchy_auto        8 1 kvm
 nix@itchy.labs.cs.uu.nl     i686-linux      /home/nix/.ssh/id_scratchy_auto        8 2
 nix@poochie.labs.cs.uu.nl   i686-linux      /home/nix/.ssh/id_scratchy_auto        8 2 kvm perf
@@ -59,7 +59,7 @@ bits of information:
   <filename>~/.ssh/config</filename>.</para></listitem>
 
   <listitem><para>A comma-separated list of Nix platform type
-  identifiers, such as <literal>powerpc-darwin</literal>.  It is
+  identifiers, such as <literal>x86_64-darwin</literal>.  It is
   possible for a machine to support multiple platform types, e.g.,
   <literal>i686-linux,x86_64-linux</literal>.</para></listitem>
 
diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml
index 4c8f3d9d3809..69295fafce84 100644
--- a/doc/manual/command-ref/conf-file.xml
+++ b/doc/manual/command-ref/conf-file.xml
@@ -408,10 +408,9 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
 
   <varlistentry><term><literal>binary-caches-parallel-connections</literal></term>
 
-    <listitem><para>The maximum number of parallel HTTP connections
-    used by the binary cache substituter to get NAR info files.  This
-    number should be high to minimise latency.  It defaults to
-    25.</para></listitem>
+    <listitem><para>The maximum number of parallel TCP connections
+    used to fetch files from binary caches and by other downloads. It
+    defaults to 25. 0 means no limit.</para></listitem>
 
   </varlistentry>
 
@@ -430,15 +429,15 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
     <listitem><para>This option specifies the canonical Nix system
     name of the current installation, such as
     <literal>i686-linux</literal> or
-    <literal>powerpc-darwin</literal>.  Nix can only build derivations
+    <literal>x86_64-darwin</literal>.  Nix can only build derivations
     whose <literal>system</literal> attribute equals the value
     specified here.  In general, it never makes sense to modify this
     value from its default, since you can use it to ‘lie’ about the
     platform you are building on (e.g., perform a Mac OS build on a
     Linux machine; the result would obviously be wrong).  It only
     makes sense if the Nix binaries can run on multiple platforms,
-    e.g., ‘universal binaries’ that run on <literal>powerpc-darwin</literal> and
-    <literal>i686-darwin</literal>.</para>
+    e.g., ‘universal binaries’ that run on <literal>x86_64-linux</literal> and
+    <literal>i686-linux</literal>.</para>
 
     <para>It defaults to the canonical Nix system name detected by
     <filename>configure</filename> at build time.</para></listitem>
@@ -603,6 +602,19 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
   </varlistentry>
 
 
+  <varlistentry xml:id="conf-sandbox-dev-shm-size"><term><literal>sandbox-dev-shm-size</literal></term>
+
+    <listitem><para>This option determines the maximum size of the
+    <literal>tmpfs</literal> filesystem mounted on
+    <filename>/dev/shm</filename> in Linux sandboxes. For the format,
+    see the description of the <option>size</option> option of
+    <literal>tmpfs</literal> in
+    <citerefentry><refentrytitle>mount</refentrytitle><manvolnum>8</manvolnum></citerefentry>. The
+    default is <literal>50%</literal>.</para></listitem>
+
+  </varlistentry>
+
+
 </variablelist>
 
 </para>
diff --git a/doc/manual/command-ref/env-common.xml b/doc/manual/command-ref/env-common.xml
index 27efef945f15..c757cb17bd10 100644
--- a/doc/manual/command-ref/env-common.xml
+++ b/doc/manual/command-ref/env-common.xml
@@ -129,15 +129,6 @@ $ mount -o bind /mnt/otherdisk/nix /nix</screen>
 </varlistentry>
 
 
-<varlistentry><term><envar>NIX_DB_DIR</envar></term>
-
-  <listitem><para>Overrides the location of the Nix database (default
-  <filename><replaceable>$NIX_STATE_DIR</replaceable>/db</filename>, i.e.,
-  <filename><replaceable>prefix</replaceable>/var/nix/db</filename>).</para></listitem>
-
-</varlistentry>
-
-
 <varlistentry><term><envar>NIX_CONF_DIR</envar></term>
 
   <listitem><para>Overrides the location of the Nix configuration
diff --git a/doc/manual/command-ref/nix-channel.xml b/doc/manual/command-ref/nix-channel.xml
index 0a1f2a8b722d..9acf44e52984 100644
--- a/doc/manual/command-ref/nix-channel.xml
+++ b/doc/manual/command-ref/nix-channel.xml
@@ -180,9 +180,7 @@ following files:</para>
     sufficient rights to add binary caches. For instance, in a
     multi-user Nix setup, the binary caches provided by the channels
     of the root user are used automatically, but caches corresponding
-    to the channels of non-root users are ignored. Binary caches can
-    be created and maintained using
-    <command>nix-push</command>.</para></listitem>
+    to the channels of non-root users are ignored.</para></listitem>
 
   </varlistentry>
 
diff --git a/doc/manual/command-ref/nix-install-package.xml b/doc/manual/command-ref/nix-install-package.xml
deleted file mode 100644
index e17166caaaf3..000000000000
--- a/doc/manual/command-ref/nix-install-package.xml
+++ /dev/null
@@ -1,208 +0,0 @@
-<refentry xmlns="http://docbook.org/ns/docbook"
-      xmlns:xlink="http://www.w3.org/1999/xlink"
-      xmlns:xi="http://www.w3.org/2001/XInclude"
-      version="5.0"
-      xml:id="sec-nix-install-package">
-
-<refmeta>
-  <refentrytitle>nix-install-package</refentrytitle>
-  <manvolnum>1</manvolnum>
-  <refmiscinfo class="source">Nix</refmiscinfo>
-  <refmiscinfo class="version"><xi:include href="../version.txt" parse="text"/></refmiscinfo>
-</refmeta>
-
-<refnamediv>
-  <refname>nix-install-package</refname>
-  <refpurpose>install a Nix Package file</refpurpose>
-</refnamediv>
-
-<refsynopsisdiv>
-  <cmdsynopsis>
-    <command>nix-install-package</command>
-    <arg><option>--non-interactive</option></arg>
-    <arg>
-      <group choice='req'>
-        <arg choice='plain'><option>--profile</option></arg>
-        <arg choice='plain'><option>-p</option></arg>
-      </group>
-      <replaceable>path</replaceable>
-    </arg>
-    <arg><option>--set</option></arg>
-    <sbr />
-    <group choice='req'>
-      <arg choice='req'>
-        <option>--url</option>
-        <arg choice='plain'><replaceable>url</replaceable></arg>
-      </arg>
-      <arg choice='req'>
-        <arg choice='plain'><replaceable>file</replaceable></arg>
-      </arg>
-    </group>
-  </cmdsynopsis>
-</refsynopsisdiv>
-
-
-<refsection><title>Description</title>
-
-<para>The command <command>nix-install-package</command> interactively
-installs a Nix Package file (<filename>*.nixpkg</filename>), which is
-a small file that contains a store path to be installed along with the
-URL of a binary cache.  The Nix Package file is either
-<replaceable>file</replaceable>, or automatically downloaded from
-<replaceable>url</replaceable> if the <option>--url</option> switch is
-used.</para>
-
-<para><command>nix-install-package</command> is used in <link
-linkend="sec-one-click">one-click installs</link> to download and
-install pre-built binary packages with all necessary dependencies.
-<command>nix-install-package</command> is intended to be associated
-with the MIME type <literal>application/nix-package</literal> in a web
-browser so that it is invoked automatically when you click on
-<filename>*.nixpkg</filename> files.  When invoked, it restarts itself
-in a terminal window (since otherwise it would be invisible when run
-from a browser), asks the user to confirm whether to install the
-package, and if so downloads and installs the package into the user’s
-current profile.</para>
-
-<para>To obtain a window, <command>nix-install-package</command> tries
-to restart itself with <command>xterm</command>,
-<command>konsole</command> and
-<command>gnome-terminal</command>.</para>
-
-</refsection>
-
-
-<refsection><title>Options</title>
-
-<variablelist>
-
-  <varlistentry><term><option>--non-interactive</option></term>
-
-    <listitem><para>Do not open a new terminal window and do not ask
-    for confirmation.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--profile</option></term>
-    <term><option>-p</option></term>
-
-    <listitem><para>Install the package into the specified profile
-    rather than the user’s current profile.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--set</option></term>
-
-    <listitem><para>Install the package as the profile so that the
-    profile contains exactly the contents of the package.</para></listitem>
-
-  </varlistentry>
-
-</variablelist>
-
-</refsection>
-
-
-<refsection><title>Examples</title>
-
-<para>To install <filename>subversion-1.4.0.nixpkg</filename> into the
-user’s current profile, without any prompting:
-
-<screen>
-$ nix-install-package --non-interactive subversion-1.4.0.nixpkg</screen>
-
-</para>
-
-<para>To install the same package from some URL into a different
-profile:
-
-<screen>
-$ nix-install-package --non-interactive -p /nix/var/nix/profiles/eelco \
-    --url http://nix.cs.uu.nl/dist/nix/nixpkgs-0.10pre6622/pkgs/subversion-1.4.0-i686-linux.nixpkg</screen>
-
-</para>
-
-</refsection>
-
-
-<refsection><title>Format of <literal>nixpkg</literal> files</title>
-
-<para>A Nix Package file consists of a single line with the following
-format:
-
-<screen>
-NIXPKG1 <replaceable>manifestURL</replaceable> <replaceable>name</replaceable> <replaceable>system</replaceable> <replaceable>drvPath</replaceable> <replaceable>outPath</replaceable></screen>
-
-The elements are as follows:
-
-<variablelist>
-
-  <varlistentry><term><literal>NIXPKG1</literal></term>
-
-    <listitem><para>The version of the Nix Package
-    file.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><replaceable>manifestURL</replaceable></term>
-
-    <listitem><para>Obsolete.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><replaceable>name</replaceable></term>
-
-    <listitem><para>The symbolic name and version of the
-    package.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><replaceable>system</replaceable></term>
-
-    <listitem><para>The platform identifier of the platform for which
-    this binary package is intended.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><replaceable>drvPath</replaceable></term>
-
-    <listitem><para>The path in the Nix store of the derivation from
-    which <replaceable>outPath</replaceable> was built.  Not currently
-    used.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><replaceable>outPath</replaceable></term>
-
-    <listitem><para>The path in the Nix store of the
-    package.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><replaceable>binaryCacheURL</replaceable></term>
-
-    <listitem><para>The URL of a binary cache containing the closure
-    of <replaceable>outPath</replaceable>.</para></listitem>
-
-  </varlistentry>
-
-</variablelist>
-
-</para>
-
-<para>An example follows:
-
-<screen>
-NIXPKG1 http://.../nixpkgs-0.10pre6622/MANIFEST subversion-1.4.0 i686-darwin \
-  /nix/store/4kh60jkp...-subversion-1.4.0.drv \
-  /nix/store/nkw7wpgb...-subversion-1.4.0</screen>
-
-(The line breaks (<literal>\</literal>) are for presentation purposes
-and not part of the actual file.)
-
-</para>
-
-</refsection>
-
-
-</refentry>
diff --git a/doc/manual/command-ref/nix-push.xml b/doc/manual/command-ref/nix-push.xml
deleted file mode 100644
index 0749824a0ad4..000000000000
--- a/doc/manual/command-ref/nix-push.xml
+++ /dev/null
@@ -1,449 +0,0 @@
-<refentry xmlns="http://docbook.org/ns/docbook"
-      xmlns:xlink="http://www.w3.org/1999/xlink"
-      xmlns:xi="http://www.w3.org/2001/XInclude"
-      version="5.0"
-      xml:id="sec-nix-push">
-
-<refmeta>
-  <refentrytitle>nix-push</refentrytitle>
-  <manvolnum>1</manvolnum>
-  <refmiscinfo class="source">Nix</refmiscinfo>
-  <refmiscinfo class="version"><xi:include href="../version.txt" parse="text"/></refmiscinfo>
-</refmeta>
-
-<refnamediv>
-  <refname>nix-push</refname>
-  <refpurpose>generate a binary cache</refpurpose>
-</refnamediv>
-
-<refsynopsisdiv>
-  <cmdsynopsis>
-    <command>nix-push</command>
-    <arg choice='plain'><option>--dest</option> <replaceable>dest-dir</replaceable></arg>
-    <arg><option>--bzip2</option></arg>
-    <arg><option>--none</option></arg>
-    <arg><option>--force</option></arg>
-    <arg><option>--link</option></arg>
-    <arg><option>--manifest</option></arg>
-    <arg><option>--manifest-path</option> <replaceable>filename</replaceable></arg>
-    <arg><option>--url-prefix</option> <replaceable>url</replaceable></arg>
-    <arg><option>--key-file</option> <replaceable>path</replaceable></arg>
-    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
-  </cmdsynopsis>
-</refsynopsisdiv>
-
-
-<refsection><title>Description</title>
-
-<para>The command <command>nix-push</command> produces a
-<emphasis>binary cache</emphasis>, a directory containing compressed
-Nix archives (NARs) plus some metadata of the closure of the specified
-store paths.  This directory can then be made available through a web
-server to other Nix installations, allowing them to skip building from
-source and instead download binaries from the cache
-automatically.</para>
-
-<para><command>nix-push</command> performs the following actions.
-
-<orderedlist>
-
-  <listitem><para>Each path in <replaceable>paths</replaceable> is
-  built (using <link
-  linkend='rsec-nix-store-realise'><command>nix-store
-  --realise</command></link>).</para></listitem>
-
-  <listitem><para>All paths in the closure of
-  <replaceable>paths</replaceable> are determined (using
-  <command>nix-store --query --requisites
-  --include-outputs</command>).  Note that since the
-  <option>--include-outputs</option> flag is used, if
-  <replaceable>paths</replaceable> includes a store derivation, you
-  get a combined source/binary distribution (e.g., source tarballs
-  will be included).</para></listitem>
-
-  <listitem><para>All store paths determined in the previous step are
-  packaged into a NAR (using <command>nix-store --dump</command>) and
-  compressed using <command>xz</command> or <command>bzip2</command>.
-  The resulting files have the extension <filename>.nar.xz</filename>
-  or <filename>.nar.bz2</filename>.  Also for each store path, Nix
-  generates a file with extension <filename>.narinfo</filename>
-  containing metadata such as the references, cryptographic hash and
-  size of each path.</para></listitem>
-
-  <listitem><para>Optionally, a single <emphasis>manifest</emphasis>
-  file is created that contains the same metadata as the
-  <filename>.narinfo</filename> files.  This is for compatibility with
-  Nix versions prior to 1.2.</para></listitem>
-
-  <listitem><para>A file named <option>nix-cache-info</option> is
-  placed in the destination directory.  The existence of this file
-  marks the directory as a binary cache.</para></listitem>
-
-</orderedlist>
-
-</para>
-
-</refsection>
-
-
-<refsection><title>Options</title>
-
-<variablelist>
-
-  <varlistentry><term><option>--dest</option> <replaceable>dest-dir</replaceable></term>
-
-    <listitem><para>Set the destination directory to
-    <replaceable>dir</replaceable>, which is created if it does not
-    exist.  This flag is required.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--bzip2</option></term>
-
-    <listitem><para>Compress NARs using <command>bzip2</command>
-    instead of <command>xz</command>.  The latter compresses about 30%
-    better on typical archives, decompresses about twice as fast, but
-    compresses a lot slower and is not supported by Nix prior to
-    version 1.2.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--none</option></term>
-
-    <listitem><para>Do not compress NARs.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--force</option></term>
-
-    <listitem><para>Overwrite <filename>.narinfo</filename> files if
-    they already exist.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--link</option></term>
-
-    <listitem><para>By default, NARs are generated in the Nix store
-    and then copied to <replaceable>dest-dir</replaceable>.  If this
-    option is given, hard links are used instead.  This only works if
-    <replaceable>dest-dir</replaceable> is on the same filesystem as
-    the Nix store.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--manifest</option></term>
-
-    <listitem><para>Force the generation of a manifest suitable for
-    use by old versions of Nix.  The manifest is stored as
-    <filename><replaceable>dest-dir</replaceable>/MANIFEST</filename>.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--manifest-path</option> <replaceable>filename</replaceable></term>
-
-    <listitem><para>Like <option>--manifest</option>, but store the
-    manifest in <replaceable>filename</replaceable>.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--url-prefix</option> <replaceable>url</replaceable></term>
-
-    <listitem><para>Manifests are expected to contain the absolute
-    URLs of NARs.  For generating these URLs, the prefix
-    <replaceable>url</replaceable> is used.  It defaults to
-    <uri>file://<replaceable>dest-dir</replaceable></uri>.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><option>--key-file</option> <replaceable>path</replaceable></term>
-
-    <listitem><para>Sign the binary cache using the secret key stored
-    in <replaceable>path</replaceable>. This secret key must have been
-    created using <command
-    linkend="rsec-nix-store-generate-binary-cache-key">nix-store
-    --generate-binary-cache-key</command>. Users of this binary cache
-    should add the corresponding public key to the option
-    <option>binary-cache-public-keys</option> in
-    <filename>nix.conf</filename>.</para></listitem>
-
-  </varlistentry>
-
-</variablelist>
-
-</refsection>
-
-
-<refsection><title>Examples</title>
-
-<para>To add the closure of Thunderbird to a binary cache:
-
-<screen>
-$ nix-push --dest /tmp/cache $(nix-build -A thunderbird)
-</screen>
-
-Assuming that <filename>/tmp/cache</filename> is exported by a web
-server as <uri>http://example.org/cache</uri>, you can then use this
-cache on another machine to speed up the installation of Thunderbird:
-
-<screen>
-$ nix-build -A thunderbird --option binary-caches http://example.org/cache
-</screen>
-
-Alternatively, you could add <literal>binary-caches =
-http://example.org/cache</literal> to
-<filename>nix.conf</filename>.</para>
-
-<para>To also include build-time dependencies (such as source
-tarballs):
-
-<screen>
-$ nix-push --dest /tmp/cache $(nix-instantiate -A thunderbird)
-</screen>
-
-</para>
-
-<para>To generate a signed binary cache, you must first generate a key
-pair, in this example called <literal>cache.example.org-1</literal>,
-storing the secret key in <filename>./sk</filename> and the public key
-in <filename>./pk</filename>:
-
-<screen>
-$ nix-store --generate-binary-cache-key cache.example.org-1 sk pk
-
-$ cat sk
-cache.example.org-1:jcMRQYFo8pQKzTtimpQLIPeHkMYZjfhB24hGfwF+u9PuX8H8FO7q564+X3G/JDlqqIqGar3OXRRwS9N3Wh3vbw==
-
-$ cat pk
-cache.example.org-1:7l/B/BTu6ueuPl9xvyQ5aqiKhmq9zl0UcEvTd1od728=
-</screen>
-
-You can then generate a binary cache signed with the secret key:
-
-<screen>
-$ nix-push --dest /tmp/cache --key-file ./sk $(type -p firefox)
-</screen>
-
-Users who wish to verify the integrity of binaries downloaded from
-your cache would add the following to their
-<filename>nix.conf</filename>:
-
-<programlisting>
-binary-caches = http://cache.example.org
-signed-binary-caches = *
-binary-cache-public-keys = cache.example.org-1:7l/B/BTu6ueuPl9xvyQ5aqiKhmq9zl0UcEvTd1od728=
-</programlisting>
-
-Nix will then ignore any binary that has a missing, incorrect or
-unrecognised signature.</para>
-
-</refsection>
-
-
-<refsection><title>Binary cache format and operation</title>
-
-<para>A binary cache with URL <replaceable>url</replaceable> only
-denotes a valid binary cache if the file
-<uri><replaceable>url</replaceable>/nix-cache-info</uri> exists.  If
-this file does not exist (or cannot be downloaded), the cache is
-ignored.  If it does exist, it must be a text file containing cache
-properties.  Here’s an example:
-
-<screen>
-StoreDir: /nix/store
-WantMassQuery: 1
-Priority: 10
-</screen>
-
-The properties that are currently supported are:
-
-<variablelist>
-
-  <varlistentry><term><literal>StoreDir</literal></term>
-
-    <listitem><para>The path of the Nix store to which this binary
-    cache applies.  Binaries are not relocatable — a binary built for
-    <filename>/nix/store</filename> won’t generally work in
-    <filename>/home/alice/store</filename> — so to prevent binaries
-    from being used in a wrong store, a binary cache is only used if
-    its <literal>StoreDir</literal> matches the local Nix
-    configuration.  The default is
-    <filename>/nix/store</filename>.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>WantMassQuery</literal></term>
-
-    <listitem><para>Query operations such as <command>nix-env
-    -qas</command> can cause thousands of cache queries, and thus
-    thousands of HTTP requests, to determine which packages are
-    available in binary form.  While these requests are small, not
-    every server may appreciate a potential onslaught of queries.  If
-    <literal>WantMassQuery</literal> is set to <literal>0</literal>
-    (default), “mass queries” such as <command>nix-env -qas</command>
-    will skip this cache.  Thus a package may appear not to have a
-    binary substitute.  However, the binary will still be used when
-    you actually install the package.  If
-    <literal>WantMassQuery</literal> is set to <literal>1</literal>,
-    mass queries will use this cache.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>Priority</literal></term>
-
-    <listitem><para>Each binary cache has a priority (defaulting to
-    50).  Binary caches are checked for binaries in order of ascending
-    priority; thus a higher number denotes a lower priority.  The
-    binary cache <uri>https://cache.nixos.org</uri> has priority
-    40.</para></listitem>
-
-  </varlistentry>
-
-</variablelist>
-
-</para>
-
-<para>Every time Nix needs to build some store path
-<replaceable>p</replaceable>, it will check each configured binary
-cache to see if it has a NAR file for <replaceable>p</replaceable>,
-until it finds one.  If no cache has a NAR, Nix will fall back to
-building the path from source (if applicable).  To see if a cache with
-URL <replaceable>url</replaceable> has a binary for
-<replaceable>p</replaceable>, Nix fetches
-<replaceable>url/h</replaceable>, where <replaceable>h</replaceable>
-is the hash part of <replaceable>p</replaceable>.  Thus, if we have a
-cache <uri>https://cache.nixos.org</uri> and we want to obtain the
-store path
-<screen>
-/nix/store/a8922c0h87iilxzzvwn2hmv8x210aqb9-glibc-2.7
-</screen>
-then Nix will attempt to fetch
-<screen>
-https://cache.nixos.org/a8922c0h87iilxzzvwn2hmv8x210aqb9.narinfo
-</screen>
-(Commands such as <command>nix-env -qas</command> will issue an HTTP
-HEAD request, since it only needs to know if the
-<filename>.narinfo</filename> file exists.)  The
-<filename>.narinfo</filename> file is a simple text file that looks
-like this:
-
-<screen>
-StorePath: /nix/store/a8922c0h87iilxzzvwn2hmv8x210aqb9-glibc-2.7
-URL: nar/0zzjpdz46mdn74v09m053yczlz4am038g8r74iy8w43gx8801h70.nar.bz2
-Compression: bzip2
-FileHash: sha256:0zzjpdz46mdn74v09m053yczlz4am038g8r74iy8w43gx8801h70
-FileSize: 24473768
-NarHash: sha256:0s491y1h9hxj5ghiizlxk7ax6jwbha00zwn7lpyd5xg5bhf60vzg
-NarSize: 109521136
-References: 2ma2k0ys8knh4an48n28vigcmc2z8773-linux-headers-2.6.23.16 ...
-Deriver: 7akyyc87ka32xwmqza9dvyg5pwx3j212-glibc-2.7.drv
-Sig: cache.example.org-1:WepnSp2UT0odDpR3NRjPVhJBHmdBgSBSTbHpdh4SCz92nGXwFY82bkPEmISoC0hGqBXDXEmB6y3Ohgna3mMgDg==
-</screen>
-
-The fields are as follows:
-
-<variablelist>
-
-  <varlistentry><term><literal>StorePath</literal></term>
-
-    <listitem><para>The full store path, including the name part
-    (e.g., <literal>glibc-2.7</literal>).  It must match the
-    requested store path.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>URL</literal></term>
-
-    <listitem><para>The URL of the NAR, relative to the binary cache
-    URL.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>Compression</literal></term>
-
-    <listitem><para>The compression method; either
-    <literal>xz</literal> or
-    <literal>bzip2</literal>.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>FileHash</literal></term>
-
-    <listitem><para>The SHA-256 hash of the compressed
-    NAR.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>FileSize</literal></term>
-
-    <listitem><para>The size of the compressed NAR.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>NarHash</literal></term>
-
-    <listitem><para>The SHA-256 hash of the uncompressed NAR.  This is
-    equal to the hash of the store path as returned by
-    <command>nix-store -q --hash
-    <replaceable>p</replaceable></command>.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>NarSize</literal></term>
-
-    <listitem><para>The size of the uncompressed NAR.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>References</literal></term>
-
-    <listitem><para>The references of the store path, without the Nix
-    store prefix.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>Deriver</literal></term>
-
-    <listitem><para>The deriver of the store path, without the Nix
-    store prefix.  This field is optional.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>System</literal></term>
-
-    <listitem><para>The Nix platform type of this binary, if known.
-    This field is optional.</para></listitem>
-
-  </varlistentry>
-
-  <varlistentry><term><literal>Sig</literal></term>
-
-    <listitem><para>A signature of the the form
-    <literal><replaceable>key-name</replaceable>:<replaceable>sig</replaceable></literal>,
-    where <replaceable>key-name</replaceable> is the symbolic name of
-    the key pair used to sign and verify the cache
-    (e.g. <literal>cache.example.org-1</literal>), and
-    <replaceable>sig</replaceable> is the actual signature, computed
-    over the <varname>StorePath</varname>, <varname>NarHash</varname>,
-    <varname>NarSize</varname> and <varname>References</varname>
-    fields using the <link
-    xlink:href="http://ed25519.cr.yp.to/">Ed25519 public-key signature
-    system</link>.</para></listitem>
-
-  </varlistentry>
-
-</variablelist>
-
-</para>
-
-<para>Thus, in our example, after recursively ensuring that the
-references exist (e.g.,
-<filename>/nix/store/2ma2k0ys8knh4an48n28vigcmc2z8773-linux-headers-2.6.23.16</filename>),
-Nix will fetch <screen>
-https://cache.nixos.org/nar/0zzjpdz46mdn74v09m053yczlz4am038g8r74iy8w43gx8801h70.nar.bz2
-</screen> and decompress and unpack it to
-<filename>/nix/store/a8922c0h87iilxzzvwn2hmv8x210aqb9-glibc-2.7</filename>.</para>
-
-</refsection>
-
-
-</refentry>
diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml
index 346718588b77..0f6172defb38 100644
--- a/doc/manual/command-ref/nix-store.xml
+++ b/doc/manual/command-ref/nix-store.xml
@@ -1390,8 +1390,7 @@ parameters:
 
 </orderedlist>
 
-For an example, see the manual page for <command
-linkend="sec-nix-push">nix-push</command>.</para>
+</para>
 
 </refsection>
 
diff --git a/doc/manual/command-ref/utilities.xml b/doc/manual/command-ref/utilities.xml
index 25e457e4e554..893f5b5b5260 100644
--- a/doc/manual/command-ref/utilities.xml
+++ b/doc/manual/command-ref/utilities.xml
@@ -14,9 +14,7 @@ work with Nix.</para>
 <xi:include href="nix-copy-closure.xml" />
 <xi:include href="nix-daemon.xml" />
 <xi:include href="nix-hash.xml" />
-<xi:include href="nix-install-package.xml" />
 <xi:include href="nix-instantiate.xml" />
 <xi:include href="nix-prefetch-url.xml" />
-<xi:include href="nix-push.xml" />
 
 </chapter>
diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml
index eae5f5a029bf..9517f20106ef 100644
--- a/doc/manual/expressions/builtins.xml
+++ b/doc/manual/expressions/builtins.xml
@@ -142,7 +142,7 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
     evaluates to the Nix platform identifier for the Nix installation
     on which the expression is being evaluated, such as
     <literal>"i686-linux"</literal> or
-    <literal>"powerpc-darwin"</literal>.</para></listitem>
+    <literal>"x86_64-darwin"</literal>.</para></listitem>
 
   </varlistentry>
 
@@ -853,7 +853,14 @@ builtins.sort builtins.lessThan [ 483 249 526 147 42 77 ]
     len</replaceable> lies beyond the end of the string, only the
     substring up to the end of the string is returned.
     <replaceable>start</replaceable> must be
-    non-negative.</para></listitem>
+    non-negative. For example,
+
+<programlisting>
+builtins.substring 0 3 "nixos"
+</programlisting>
+
+   evaluates to <literal>"nix"</literal>.
+   </para></listitem>
 
   </varlistentry>
 
@@ -988,9 +995,9 @@ in foo</programlisting>
     <listitem><para>Convert the expression
     <replaceable>e</replaceable> to a string.
     <replaceable>e</replaceable> can be a string (in which case
-    <function>toString</function> is a no-op) or a path (e.g.,
+    <function>toString</function> is a no-op), a path (e.g.,
     <literal>toString /foo/bar</literal> yields
-    <literal>"/foo/bar"</literal>.</para></listitem>
+    <literal>"/foo/bar"</literal> or a set containing <literal>{ __toString = self: ...; }</literal>.</para></listitem>
 
   </varlistentry>
 
diff --git a/doc/manual/expressions/derivations.xml b/doc/manual/expressions/derivations.xml
index f2a73dccfe18..5efe2213e370 100644
--- a/doc/manual/expressions/derivations.xml
+++ b/doc/manual/expressions/derivations.xml
@@ -16,7 +16,7 @@ of which specify the inputs of the build.</para>
   <listitem xml:id="attr-system"><para>There must be an attribute named
   <varname>system</varname> whose value must be a string specifying a
   Nix platform identifier, such as <literal>"i686-linux"</literal> or
-  <literal>"powerpc-darwin"</literal><footnote><para>To figure out
+  <literal>"x86_64-darwin"</literal><footnote><para>To figure out
   your platform identifier, look at the line <quote>Checking for the
   canonical Nix system name</quote> in the output of Nix's
   <filename>configure</filename> script.</para></footnote> The build
diff --git a/doc/manual/expressions/language-constructs.xml b/doc/manual/expressions/language-constructs.xml
index 7535e64ea9a5..fe69dba837a1 100644
--- a/doc/manual/expressions/language-constructs.xml
+++ b/doc/manual/expressions/language-constructs.xml
@@ -202,24 +202,6 @@ in concat { x = "foo"; y = "bar"; }</programlisting>
 
 </para>
 
-<para>A set that has a <literal>__functor</literal> attribute whose value
-is callable (i.e. is itself a function or a set with a
-<literal>__functor</literal> attribute whose value is callable) can be
-applied as if it were a function, with the set itself passed in first
-, e.g.,
-
-<programlisting>
-let add = { __functor = self: x: x + self.x; };
-    inc = add // { x = 1; };
-in inc 1
-</programlisting>
-
-evaluates to <literal>2</literal>. This can be used to attach metadata to a
-function without the caller needing to treat it specially, or to implement
-a form of object-oriented programming, for example.
-
-</para>
-
 </simplesect>
 
 
diff --git a/doc/manual/expressions/language-values.xml b/doc/manual/expressions/language-values.xml
index f1174ecb5d8d..b90baac5054c 100644
--- a/doc/manual/expressions/language-values.xml
+++ b/doc/manual/expressions/language-values.xml
@@ -276,6 +276,23 @@ added to the set:
 This will evaluate to <literal>{}</literal> if <literal>foo</literal>
 evaluates to <literal>false</literal>.</para>
 
+<para>A set that has a <literal>__functor</literal> attribute whose value
+is callable (i.e. is itself a function or a set with a
+<literal>__functor</literal> attribute whose value is callable) can be
+applied as if it were a function, with the set itself passed in first
+, e.g.,
+
+<programlisting>
+let add = { __functor = self: x: x + self.x; };
+    inc = add // { x = 1; };
+in inc 1
+</programlisting>
+
+evaluates to <literal>2</literal>. This can be used to attach metadata to a
+function without the caller needing to treat it specially, or to implement
+a form of object-oriented programming, for example.
+
+</para>
 
 </simplesect>
 
diff --git a/doc/manual/introduction/quick-start.xml b/doc/manual/introduction/quick-start.xml
index 0d13651e0ab3..5ae9f6ad543b 100644
--- a/doc/manual/introduction/quick-start.xml
+++ b/doc/manual/introduction/quick-start.xml
@@ -95,18 +95,6 @@ The latter command will upgrade each installed package for which there
 is a “newer” version (as determined by comparing the version
 numbers).</para></step>
 
-<!--
-<step><para>You can also install specific packages directly from
-your web browser.  For instance, you can go to <link
-xlink:href="http://hydra.nixos.org/jobset/nixpkgs/trunk/channel/latest"
-/> and click on any link for the individual packages for your
-platform.  Associate <literal>application/nix-package</literal> with
-the program <command>nix-install-package</command>.  A window should
-appear asking you whether it’s okay to install the package.  Say
-<literal>Y</literal>.  The package and all its dependencies will be
-installed.</para></step>
--->
-
 <step><para>If you're unhappy with the result of a
 <command>nix-env</command> action (e.g., an upgraded package turned
 out not to work properly), you can go back:
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index 3c4fc52dfd67..4376c3644d38 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -1,3 +1,6 @@
+
+ifeq ($(doc_generate),yes)
+
 XSLTPROC = $(xsltproc) --nonet $(xmlflags) \
   --param section.autolabel 1 \
   --param section.label.includes.component.label 1 \
@@ -39,9 +42,9 @@ dist-files += $(d)/manual.xmli $(d)/version.txt $(d)/manual.is-valid
 # Generate man pages.
 man-pages := $(foreach n, \
   nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
-  nix-collect-garbage.1 nix-push.1 \
+  nix-collect-garbage.1 \
   nix-prefetch-url.1 nix-channel.1 \
-  nix-install-package.1 nix-hash.1 nix-copy-closure.1 \
+  nix-hash.1 nix-copy-closure.1 \
   nix.conf.5 nix-daemon.8, \
   $(d)/$(n))
 
@@ -71,8 +74,14 @@ $(foreach file, $(wildcard $(d)/images/callouts/*.gif), $(eval $(call install-da
 
 $(eval $(call install-symlink, manual.html, $(docdir)/manual/index.html))
 
+
 all: $(d)/manual.html
 
+
+
 clean-files += $(d)/manual.html
 
 dist-files += $(d)/manual.html
+
+
+endif
diff --git a/doc/manual/packages/one-click.xml b/doc/manual/packages/one-click.xml
deleted file mode 100644
index cef9a2bbff4e..000000000000
--- a/doc/manual/packages/one-click.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<chapter xmlns="http://docbook.org/ns/docbook"
-      xmlns:xlink="http://www.w3.org/1999/xlink"
-      xmlns:xi="http://www.w3.org/2001/XInclude"
-      version="5.0"
-      xml:id="sec-one-click">
-
-<title>One-Click Installation</title>
-
-<para>Often, when you want to install a specific package (e.g., from
-the <link
-xlink:href="http://nixos.org/nixpkgs/">Nix
-Packages collection</link>), subscribing to a channel is a bit
-cumbersome.  And channels don’t help you at all if you want to install
-an older version of a package than the one provided by the current
-contents of the channel, or a package that has been removed from the
-channel.  That’s when <emphasis>one-click installs</emphasis> come in
-handy: you can just go to the web page that contains the package,
-click on it, and it will be installed with all the necessary
-dependencies.</para>
-
-<para>For instance, you can go to <link
-xlink:href="http://hydra.nixos.org/jobset/nixpkgs/trunk/channel/latest"
-/> and click on any link for the individual packages for your
-platform.  The first time you do this, your browser will ask what to
-do with <literal>application/nix-package</literal> files.  You should
-open them with <filename>/nix/bin/nix-install-package</filename>.
-This will open a window that asks you to confirm that you want to
-install the package.  When you answer <literal>Y</literal>, the
-package and all its dependencies will be installed.  This is a binary
-deployment mechanism — you get packages pre-compiled for the selected
-platform type.</para>
-
-<para>You can also install <literal>application/nix-package</literal>
-files from the command line directly.  See <xref
-linkend='sec-nix-install-package' /> for details.</para>
-
-</chapter>
\ No newline at end of file
diff --git a/doc/manual/packages/package-management.xml b/doc/manual/packages/package-management.xml
index 5cc5c381bb43..61e55faeb311 100644
--- a/doc/manual/packages/package-management.xml
+++ b/doc/manual/packages/package-management.xml
@@ -18,7 +18,6 @@ who want to <emphasis>create</emphasis> packages should consult
 <xi:include href="profiles.xml" />
 <xi:include href="garbage-collection.xml" />
 <xi:include href="channels.xml" />
-<xi:include href="one-click.xml" />
 <xi:include href="sharing-packages.xml" />
 
 </part>
diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl
new file mode 100755
index 000000000000..6c9a724dd6a8
--- /dev/null
+++ b/maintainers/upload-release.pl
@@ -0,0 +1,148 @@
+#! /usr/bin/env nix-shell
+#! nix-shell -i perl -p perl perlPackages.LWPUserAgent perlPackages.LWPProtocolHttps perlPackages.FileSlurp gnupg1
+
+use strict;
+use Data::Dumper;
+use File::Basename;
+use File::Path;
+use File::Slurp;
+use JSON::PP;
+use LWP::UserAgent;
+
+my $evalId = $ARGV[0] or die "Usage: $0 EVAL-ID\n";
+
+my $releasesDir = "/home/eelco/mnt/releases";
+my $nixpkgsDir = "/home/eelco/Dev/nixpkgs-pristine";
+
+# FIXME: cut&paste from nixos-channel-scripts.
+sub fetch {
+    my ($url, $type) = @_;
+
+    my $ua = LWP::UserAgent->new;
+    $ua->default_header('Accept', $type) if defined $type;
+
+    my $response = $ua->get($url);
+    die "could not download $url: ", $response->status_line, "\n" unless $response->is_success;
+
+    return $response->decoded_content;
+}
+
+my $evalUrl = "https://hydra.nixos.org/eval/$evalId";
+my $evalInfo = decode_json(fetch($evalUrl, 'application/json'));
+#print Dumper($evalInfo);
+
+my $nixRev = $evalInfo->{jobsetevalinputs}->{nix}->{revision} or die;
+
+my $tarballInfo = decode_json(fetch("$evalUrl/job/tarball", 'application/json'));
+
+my $releaseName = $tarballInfo->{releasename};
+$releaseName =~ /nix-(.*)$/ or die;
+my $version = $1;
+
+print STDERR "Nix revision is $nixRev, version is $version\n";
+
+File::Path::make_path($releasesDir);
+if (system("mountpoint -q $releasesDir") != 0) {
+    system("sshfs hydra-mirror:/releases $releasesDir") == 0 or die;
+}
+
+my $releaseDir = "$releasesDir/nix/$releaseName";
+File::Path::make_path($releaseDir);
+
+sub downloadFile {
+    my ($jobName, $productNr, $dstName) = @_;
+
+    my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
+
+    my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die;
+    $dstName //= basename($srcFile);
+    my $dstFile = "$releaseDir/" . $dstName;
+
+    if (! -e $dstFile) {
+        print STDERR "downloading $srcFile to $dstFile...\n";
+        system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$dstFile.tmp'") == 0
+            or die "unable to fetch $srcFile\n";
+        rename("$dstFile.tmp", $dstFile) or die;
+    }
+
+    my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die;
+    my $sha256_actual = `nix hash-file --type sha256 '$dstFile'`;
+    chomp $sha256_actual;
+    if ($sha256_expected ne $sha256_actual) {
+        print STDERR "file $dstFile is corrupt\n";
+        exit 1;
+    }
+
+    write_file("$dstFile.sha256", $sha256_expected);
+
+    return ($dstFile, $sha256_expected);
+}
+
+downloadFile("tarball", "2"); # PDF
+downloadFile("tarball", "3"); # .tar.bz2
+my ($tarball, $tarballHash) = downloadFile("tarball", "4"); # .tar.xz
+my ($tarball_i686_linux, $tarball_i686_linux_hash) = downloadFile("binaryTarball.i686-linux", "1");
+my ($tarball_x86_64_linux, $tarball_x86_64_linux_hash) = downloadFile("binaryTarball.x86_64-linux", "1");
+my ($tarball_x86_64_darwin, $tarball_x86_64_darwin_hash) = downloadFile("binaryTarball.x86_64-darwin", "1");
+
+# Update Nixpkgs in a very hacky way.
+my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName;
+my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash;
+print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
+
+my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
+my $oldFile = read_file($fn);
+$oldFile =~ s/$oldName/"$releaseName"/g;
+$oldFile =~ s/$oldHash/"$tarballHash"/g;
+write_file($fn, $oldFile);
+
+$oldName =~ s/nix-//g;
+$oldName =~ s/"//g;
+
+sub getStorePath {
+    my ($jobName) = @_;
+    my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
+    die unless $buildInfo->{buildproducts}->{1}->{type} eq "nix-build";
+    return $buildInfo->{buildproducts}->{1}->{path};
+}
+
+write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
+           "{\n" .
+           "  x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
+           "  i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
+           "  x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
+           "}\n");
+
+system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
+
+# Extract the HTML manual.
+File::Path::make_path("$releaseDir/manual");
+
+system("tar xvf $tarball --strip-components=3 -C $releaseDir/manual --wildcards '*/doc/manual/*.html' '*/doc/manual/*.css' '*/doc/manual/*.gif' '*/doc/manual/*.png'") == 0 or die;
+
+if (! -e "$releaseDir/manual/index.html") {
+    symlink("manual.html", "$releaseDir/manual/index.html") or die;
+}
+
+# Update the "latest" symlink.
+symlink("$releaseName", "$releasesDir/nix/latest-tmp") or die;
+rename("$releasesDir/nix/latest-tmp", "$releasesDir/nix/latest") or die;
+
+# Tag the release in Git.
+chdir("/home/eelco/Dev/nix-pristine") or die;
+system("git remote update origin") == 0 or die;
+system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die;
+
+# Update the website.
+my $siteDir = "/home/eelco/Dev/nixos-homepage-pristine";
+write_file("$siteDir/nix-release.tt",
+           "[%-\n" .
+           "latestNixVersion = \"$version\"\n" .
+           "nix_hash_i686_linux = \"$tarball_i686_linux_hash\"\n" .
+           "nix_hash_x86_64_linux = \"$tarball_x86_64_linux_hash\"\n" .
+           "nix_hash_x86_64_darwin = \"$tarball_x86_64_darwin_hash\"\n" .
+           "-%]\n");
+
+system("cd $siteDir && nix-shell --run 'make nix/install nix/install.sig'") == 0 or die;
+
+system("cd $siteDir && git commit -a -m 'Nix $version released'") == 0 or die;
diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile
index 098633fc3c86..20171d250207 100644
--- a/misc/docker/Dockerfile
+++ b/misc/docker/Dockerfile
@@ -5,7 +5,9 @@ RUN wget -O- http://nixos.org/releases/nix/nix-1.11.2/nix-1.11.2-x86_64-linux.ta
     && for i in $(seq 1 30); do echo "nixbld$i:x:$((30000 + $i)):30000:::" >> /etc/passwd; done \
     && mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \
     && echo ". /root/.nix-profile/etc/profile.d/nix.sh" >> /etc/profile \
-    && rm -r /nix-*-x86_64-linux
+    && rm -r /nix-*-x86_64-linux \
+    && apk --update add bash tar \
+    && rm -rf /var/cache/apk/*
 
 ONBUILD ENV \
     ENV=/etc/profile \
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index 66fcd155ee9b..0dd665db635f 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -12,5 +12,10 @@
     <string>/var/log/nix-daemon.log</string>
     <key>StandardOutPath</key>
     <string>/dev/null</string>
+    <key>EnvironmentVariables</key>
+    <dict>
+      <key>SSL_CERT_FILE</key>
+      <string>/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt</string>
+    </dict>
   </dict>
 </plist>
diff --git a/misc/systemd/nix-daemon.service.in b/misc/systemd/nix-daemon.service.in
index 5fc04a3f5713..fcd799e177d0 100644
--- a/misc/systemd/nix-daemon.service.in
+++ b/misc/systemd/nix-daemon.service.in
@@ -7,3 +7,4 @@ ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
 [Service]
 ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
 KillMode=process
+Environment=XDG_CACHE_HOME=/root/.cache
diff --git a/nix.spec.in b/nix.spec.in
index 61da404a3844..edbc12d8f05c 100644
--- a/nix.spec.in
+++ b/nix.spec.in
@@ -10,7 +10,7 @@ License: LGPLv2+
 Group: Applications/System
 %endif
 URL: http://nixos.org/
-Source0: %{name}-%{version}.tar.xz
+Source0: %{name}-%{version}.tar.bz2
 %if 0%{?el5}
 BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
 %endif
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index 697fd79f9b09..6b137a13c414 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -288,8 +288,8 @@ SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
     PPCODE:
         try {
             HashType ht = parseHashType(algo);
-            Path path = store()->makeFixedOutputPath(recursive, ht,
-                parseHash16or32(ht, hash), name);
+            Hash h = parseHash16or32(ht, hash);
+            Path path = store()->makeFixedOutputPath(recursive, h, name);
             XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
         } catch (Error & e) {
             croak("%s", e.what());
diff --git a/release.nix b/release.nix
index 1da34443dd4f..d236cdae9bf0 100644
--- a/release.nix
+++ b/release.nix
@@ -172,10 +172,6 @@ let
       };
 
 
-    rpm_fedora19i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora19i386) [];
-    rpm_fedora19x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora19x86_64) [];
-    rpm_fedora20i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora20i386) [];
-    rpm_fedora20x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora20x86_64) [];
     rpm_fedora21i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora21i386) [ "libsodium-devel" ];
     rpm_fedora21x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora21x86_64) [ "libsodium-devel" ];
 
@@ -183,10 +179,6 @@ let
     deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ];
     deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ];
 
-    deb_ubuntu1310i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1310i386) [] [];
-    deb_ubuntu1310x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1310x86_64) [] [];
-    deb_ubuntu1404i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1404i386) [] [];
-    deb_ubuntu1404x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1404x86_64) [] [];
     deb_ubuntu1410i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1410i386) [] [];
     deb_ubuntu1410x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1410x86_64) [] [];
     deb_ubuntu1504i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1504i386) [ "libsodium-dev" ] [ "libsodium13" ];
@@ -236,7 +228,6 @@ let
     tests.evalNixOS =
       pkgs.runCommand "eval-nixos" { buildInputs = [ build.x86_64-linux ]; }
         ''
-          export NIX_DB_DIR=$TMPDIR
           export NIX_STATE_DIR=$TMPDIR
           nix-store --init
 
@@ -264,12 +255,8 @@ let
           binaryTarball.x86_64-linux
           deb_debian8i386
           deb_debian8x86_64
-          deb_ubuntu1404i386 # LTS
-          deb_ubuntu1404x86_64 # LTS
           deb_ubuntu1504i386
           deb_ubuntu1504x86_64
-          rpm_fedora20i386
-          rpm_fedora20x86_64
           rpm_fedora21i386
           rpm_fedora21x86_64
           tests.remoteBuilds
diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in
deleted file mode 100644
index a4f858610aca..000000000000
--- a/scripts/download-from-binary-cache.pl.in
+++ /dev/null
@@ -1,632 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use DBI;
-use DBD::SQLite;
-use File::Basename;
-use IO::Select;
-use Nix::Config;
-use Nix::Store;
-use Nix::Utils;
-use Nix::Manifest;
-use WWW::Curl::Easy;
-use WWW::Curl::Multi;
-use strict;
-
-STDERR->autoflush(1);
-binmode STDERR, ":encoding(utf8)";
-
-Nix::Config::readConfig;
-
-my @caches;
-my $gotCaches = 0;
-
-my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 25);
-$maxParallelRequests = 1 if $maxParallelRequests < 1;
-
-my $ttlNegative = 24 * 3600; # when to purge negative lookups from the database
-my $ttlNegativeUse = 3600; # how long negative lookups are valid for non-"have" lookups
-my $didExpiration = 0;
-
-my $showAfter = 5; # show that we're waiting for a request after this many seconds
-
-my $debug = ($Nix::Config::config{"debug-subst"} // "") eq 1 || ($Nix::Config::config{"untrusted-debug-subst"} // "") eq 1;
-
-my $cacheFileURLs = ($ENV{"_NIX_CACHE_FILE_URLS"} // "") eq 1; # for testing
-
-my ($dbh, $queryCache, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence, $expireNARExistence);
-
-my $curlm = WWW::Curl::Multi->new;
-my $activeRequests = 0;
-my $curlIdCount = 1;
-my %requests;
-my %scheduled;
-my $caBundle = $ENV{"SSL_CERT_FILE"} // $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"};
-$caBundle = "/etc/ssl/certs/ca-bundle.crt" if !$caBundle && -f "/etc/ssl/certs/ca-bundle.crt";
-$caBundle = "/etc/ssl/certs/ca-certificates.crt" if !$caBundle && -f "/etc/ssl/certs/ca-certificates.crt";
-
-my $userName = getpwuid($<) || $ENV{"USER"} or die "cannot figure out user name";
-
-my $userAgent = "Nix/$Nix::Config::version";
-
-sub isTrue {
-    my ($x) = @_;
-    return $x eq "true" || $x eq "1";
-}
-
-# FIXME: this should be cache URLs required to have valid signatures,
-# or "*" to require signatures on all binary caches.
-# FIXME: should binary caches using a key in
-# ‘binary-cache-public-keys’ be trusted by default?
-my $requireSignedBinaryCaches = ($Nix::Config::config{"signed-binary-caches"} // "0") ne "0";
-
-my $curlConnectTimeout = int(
-    $Nix::Config::config{"untrusted-connect-timeout"} //
-    $Nix::Config::config{"connect-timeout"} //
-    $ENV{"NIX_CONNECT_TIMEOUT"} // 0);
-
-
-sub addRequest {
-    my ($storePath, $url, $head) = @_;
-
-    my $curl = WWW::Curl::Easy->new;
-    my $curlId = $curlIdCount++;
-    $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "", type => $head ? "HEAD" : "GET"
-                         , shown => 0, started => time() };
-
-    $curl->setopt(CURLOPT_PRIVATE, $curlId);
-    $curl->setopt(CURLOPT_URL, $url);
-    open (my $fh, ">", \$requests{$curlId}->{content});
-    $curl->setopt(CURLOPT_WRITEDATA, $fh);
-    $curl->setopt(CURLOPT_FOLLOWLOCATION, 1);
-    $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle;
-
-    unless (isTrue($Nix::Config::config{"verify-https-binary-caches"} // "1")) {
-        $curl->setopt(CURLOPT_SSL_VERIFYPEER, 0);
-        $curl->setopt(CURLOPT_SSL_VERIFYHOST, 0);
-    }
-
-    $curl->setopt(CURLOPT_USERAGENT, $userAgent);
-    $curl->setopt(CURLOPT_NOBODY, 1) if $head;
-    $curl->setopt(CURLOPT_FAILONERROR, 1);
-    $curl->setopt(CURLOPT_CONNECTTIMEOUT, $curlConnectTimeout);
-    $curl->setopt(CURLOPT_TIMEOUT, 20 * 60);
-
-    if ($activeRequests >= $maxParallelRequests) {
-        $scheduled{$curlId} = 1;
-    } else {
-        $curlm->add_handle($curl);
-        $activeRequests++;
-    }
-
-    return $requests{$curlId};
-}
-
-
-sub processRequests {
-    while ($activeRequests) {
-        my ($rfds, $wfds, $efds) = $curlm->fdset();
-        #print STDERR "R = @{$rfds}, W = @{$wfds}, E = @{$efds}\n";
-
-        # Sleep until we can read or write some data.
-        if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) {
-            IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 1.0);
-        }
-
-        if ($curlm->perform() != $activeRequests) {
-            while (my ($id, $result) = $curlm->info_read) {
-                if ($id) {
-                    my $request = $requests{$id} or die;
-                    my $handle = $request->{handle};
-                    $request->{result} = $result;
-                    $request->{httpStatus} = $handle->getinfo(CURLINFO_RESPONSE_CODE);
-
-                    print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug;
-
-                    $activeRequests--;
-                    delete $request->{handle};
-
-                    if (scalar(keys %scheduled) > 0) {
-                        my $id2 = (keys %scheduled)[0];
-                        $curlm->add_handle($requests{$id2}->{handle});
-                        $activeRequests++;
-                        delete $scheduled{$id2};
-                    }
-                }
-            }
-        }
-
-        my $time = time();
-        while (my ($key, $request) = each %requests) {
-            next unless defined $request->{handle};
-            next if $request->{shown};
-            if ($time > $request->{started} + $showAfter) {
-                print STDERR "still waiting for ‘$request->{url}’ after $showAfter seconds...\n";
-                $request->{shown} = 1;
-            }
-        }
-    }
-}
-
-
-sub initCache {
-    my $dbPath = "$Nix::Config::stateDir/binary-cache-v3.sqlite";
-
-    unlink "$Nix::Config::stateDir/binary-cache-v1.sqlite";
-    unlink "$Nix::Config::stateDir/binary-cache-v2.sqlite";
-
-    # Open/create the database.
-    $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
-        or die "cannot open database ‘$dbPath’";
-    $dbh->{RaiseError} = 1;
-    $dbh->{PrintError} = 0;
-
-    $dbh->sqlite_busy_timeout(60 * 60 * 1000);
-
-    $dbh->do("pragma synchronous = off"); # we can always reproduce the cache
-    $dbh->do("pragma journal_mode = truncate");
-
-    # Initialise the database schema, if necessary.
-    $dbh->do(<<EOF);
-        create table if not exists BinaryCaches (
-            id        integer primary key autoincrement not null,
-            url       text unique not null,
-            timestamp integer not null,
-            storeDir  text not null,
-            wantMassQuery integer not null,
-            priority  integer not null
-        );
-EOF
-
-    $dbh->do(<<EOF);
-        create table if not exists NARs (
-            cache            integer not null,
-            storePath        text not null,
-            url              text not null,
-            compression      text not null,
-            fileHash         text,
-            fileSize         integer,
-            narHash          text,
-            narSize          integer,
-            refs             text,
-            deriver          text,
-            signedBy         text,
-            timestamp        integer not null,
-            primary key (cache, storePath),
-            foreign key (cache) references BinaryCaches(id) on delete cascade
-        );
-EOF
-
-    $dbh->do(<<EOF);
-        create table if not exists NARExistence (
-            cache            integer not null,
-            storePath        text not null,
-            exist            integer not null,
-            timestamp        integer not null,
-            primary key (cache, storePath),
-            foreign key (cache) references BinaryCaches(id) on delete cascade
-        );
-EOF
-
-    $dbh->do("create index if not exists NARExistenceByExistTimestamp on NARExistence (exist, timestamp)");
-
-    $queryCache = $dbh->prepare("select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?") or die;
-
-    $insertNAR = $dbh->prepare(
-        "insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " .
-        "narSize, refs, deriver, signedBy, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
-
-    $queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die;
-
-    $insertNARExistence = $dbh->prepare(
-        "insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)") or die;
-
-    $queryNARExistence = $dbh->prepare("select exist, timestamp from NARExistence where cache = ? and storePath = ?") or die;
-
-    $expireNARExistence = $dbh->prepare("delete from NARExistence where exist = ? and timestamp < ?") or die;
-}
-
-
-sub getAvailableCaches {
-    return if $gotCaches;
-    $gotCaches = 1;
-
-    sub strToList {
-        my ($s) = @_;
-        return map { s/\/+$//; $_ } split(/ /, $s);
-    }
-
-    my @urls = strToList($Nix::Config::config{"binary-caches"} //
-        ($Nix::Config::storeDir eq "/nix/store" ? "https://cache.nixos.org" : ""));
-
-    my $urlsFiles = $Nix::Config::config{"binary-cache-files"}
-        // "$Nix::Config::stateDir/profiles/per-user/$userName/channels/binary-caches/*";
-    foreach my $urlFile (glob $urlsFiles) {
-        next unless -f $urlFile;
-        open FILE, "<$urlFile" or die "cannot open ‘$urlFile’\n";
-        my $url = <FILE>; chomp $url;
-        close FILE;
-        push @urls, strToList($url);
-    }
-
-    push @urls, strToList($Nix::Config::config{"extra-binary-caches"} // "");
-
-    # Allow Nix daemon users to override the binary caches to a subset
-    # of those listed in the config file.  Note that ‘untrusted-*’
-    # denotes options passed by the client.
-    my @trustedUrls = uniq(@urls, strToList($Nix::Config::config{"trusted-binary-caches"} // ""));
-
-    if (defined $Nix::Config::config{"untrusted-binary-caches"}) {
-        my @untrustedUrls = strToList $Nix::Config::config{"untrusted-binary-caches"};
-        @urls = ();
-        foreach my $url (@untrustedUrls) {
-            die "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n"
-                unless scalar(grep { $url eq $_ } @trustedUrls) > 0;
-            push @urls, $url;
-        }
-    }
-
-    my @untrustedUrls = strToList $Nix::Config::config{"untrusted-extra-binary-caches"} // "";
-    foreach my $url (@untrustedUrls) {
-        unless (scalar(grep { $url eq $_ } @trustedUrls) > 0) {
-            warn "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n";
-            next;
-        }
-        push @urls, $url;
-    }
-
-    foreach my $url (uniq @urls) {
-
-        # FIXME: not atomic.
-        $queryCache->execute($url);
-        my $res = $queryCache->fetchrow_hashref();
-        if (defined $res) {
-            next if $res->{storeDir} ne $Nix::Config::storeDir;
-            push @caches, { id => $res->{id}, url => $url, wantMassQuery => $res->{wantMassQuery}, priority => $res->{priority} };
-            next;
-        }
-
-        # Get the cache info file.
-        my $request = addRequest(undef, $url . "/nix-cache-info");
-        processRequests;
-
-        if ($request->{result} != 0) {
-            print STDERR "could not download ‘$request->{url}’ (" .
-                ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
-            next;
-        }
-
-        my $storeDir = "/nix/store";
-        my $wantMassQuery = 0;
-        my $priority = 50;
-        foreach my $line (split "\n", $request->{content}) {
-            unless ($line =~ /^(.*): (.*)$/) {
-                print STDERR "bad cache info file ‘$request->{url}’\n";
-                return undef;
-            }
-            if ($1 eq "StoreDir") { $storeDir = $2; }
-            elsif ($1 eq "WantMassQuery") { $wantMassQuery = int($2); }
-            elsif ($1 eq "Priority") { $priority = int($2); }
-        }
-
-        $dbh->do("insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)",
-                 {}, $url, time(), $storeDir, $wantMassQuery, $priority);
-        $queryCache->execute($url);
-        $res = $queryCache->fetchrow_hashref() or die;
-        next if $storeDir ne $Nix::Config::storeDir;
-        push @caches, { id => $res->{id}, url => $url, wantMassQuery => $wantMassQuery, priority => $priority };
-    }
-
-    @caches = sort { $a->{priority} <=> $b->{priority} } @caches;
-
-    expireNegative();
-}
-
-
-sub shouldCache {
-    my ($url) = @_;
-    return $cacheFileURLs || $url !~ /^file:/;
-}
-
-
-sub processNARInfo {
-    my ($storePath, $cache, $request) = @_;
-
-    if ($request->{result} != 0) {
-        if ($request->{result} != 37 && $request->{httpStatus} != 404 && $request->{httpStatus} != 403) {
-            print STDERR "could not download ‘$request->{url}’ (" .
-                ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
-        } else {
-            $insertNARExistence->execute($cache->{id}, basename($storePath), 0, time())
-                if shouldCache $request->{url};
-        }
-        return undef;
-    }
-
-    my $narInfo = parseNARInfo($storePath, $request->{content}, $requireSignedBinaryCaches, $request->{url});
-    return undef unless defined $narInfo;
-
-    die if $requireSignedBinaryCaches && !defined $narInfo->{signedBy};
-
-    # Cache the result.
-    $insertNAR->execute(
-        $cache->{id}, basename($storePath), $narInfo->{url}, $narInfo->{compression},
-        $narInfo->{fileHash}, $narInfo->{fileSize}, $narInfo->{narHash}, $narInfo->{narSize},
-        join(" ", @{$narInfo->{refs}}), $narInfo->{deriver}, $narInfo->{signedBy}, time())
-        if shouldCache $request->{url};
-
-    return $narInfo;
-}
-
-
-sub getCachedInfoFrom {
-    my ($storePath, $cache) = @_;
-
-    $queryNAR->execute($cache->{id}, basename($storePath));
-    my $res = $queryNAR->fetchrow_hashref();
-    return undef unless defined $res;
-
-    # We may previously have cached this info when signature checking
-    # was disabled.  In that case, ignore the cached info.
-    return undef if $requireSignedBinaryCaches && !defined $res->{signedBy};
-
-    return
-        { url => $res->{url}
-        , compression => $res->{compression}
-        , fileHash => $res->{fileHash}
-        , fileSize => $res->{fileSize}
-        , narHash => $res->{narHash}
-        , narSize => $res->{narSize}
-        , refs => [ split " ", $res->{refs} ]
-        , deriver => $res->{deriver}
-        , signedBy => $res->{signedBy}
-        } if defined $res;
-}
-
-
-sub negativeHit {
-    my ($storePath, $cache) = @_;
-    $queryNARExistence->execute($cache->{id}, basename($storePath));
-    my $res = $queryNARExistence->fetchrow_hashref();
-    return defined $res && $res->{exist} == 0 && time() - $res->{timestamp} < $ttlNegativeUse;
-}
-
-
-sub positiveHit {
-    my ($storePath, $cache) = @_;
-    return 1 if defined getCachedInfoFrom($storePath, $cache);
-    $queryNARExistence->execute($cache->{id}, basename($storePath));
-    my $res = $queryNARExistence->fetchrow_hashref();
-    return defined $res && $res->{exist} == 1;
-}
-
-
-sub expireNegative {
-    return if $didExpiration;
-    $didExpiration = 1;
-    my $time = time();
-    # Round up to the next multiple of the TTL to ensure that we do
-    # expiration only once per time interval.  E.g. if $ttlNegative ==
-    # 3600, we expire entries at most once per hour.  This is
-    # presumably faster than expiring a few entries per request (and
-    # thus doing a transaction).
-    my $limit = (int($time / $ttlNegative) - 1) * $ttlNegative;
-    $expireNARExistence->execute($limit, 0);
-    print STDERR "expired ", $expireNARExistence->rows, " negative entries\n" if $debug;
-}
-
-
-sub printInfo {
-    my ($storePath, $info) = @_;
-    print "$storePath\n";
-    print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n";
-    print scalar @{$info->{refs}}, "\n";
-    print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}};
-    print $info->{fileSize} || 0, "\n";
-    print $info->{narSize} || 0, "\n";
-}
-
-
-sub infoUrl {
-    my ($binaryCacheUrl, $storePath) = @_;
-    my $pathHash = substr(basename($storePath), 0, 32);
-    my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo";
-}
-
-
-sub printInfoParallel {
-    my @paths = @_;
-
-    # First print all paths for which we have cached info.
-    my @left;
-    foreach my $storePath (@paths) {
-        my $found = 0;
-        foreach my $cache (@caches) {
-            my $info = getCachedInfoFrom($storePath, $cache);
-            if (defined $info) {
-                printInfo($storePath, $info);
-                $found = 1;
-                last;
-            }
-        }
-        push @left, $storePath if !$found;
-    }
-
-    return if scalar @left == 0;
-
-    foreach my $cache (@caches) {
-
-        my @left2;
-        %requests = ();
-        foreach my $storePath (@left) {
-            if (negativeHit($storePath, $cache)) {
-                push @left2, $storePath;
-                next;
-            }
-            addRequest($storePath, infoUrl($cache->{url}, $storePath));
-        }
-
-        processRequests;
-
-        foreach my $request (values %requests) {
-            my $info = processNARInfo($request->{storePath}, $cache, $request);
-            if (defined $info) {
-                printInfo($request->{storePath}, $info);
-            } else {
-                push @left2, $request->{storePath};
-            }
-        }
-
-        @left = @left2;
-    }
-}
-
-
-sub printSubstitutablePaths {
-    my @paths = @_;
-
-    # First look for paths that have cached info.
-    my @left;
-    foreach my $storePath (@paths) {
-        my $found = 0;
-        foreach my $cache (@caches) {
-            next unless $cache->{wantMassQuery};
-            if (positiveHit($storePath, $cache)) {
-                print "$storePath\n";
-                $found = 1;
-                last;
-            }
-        }
-        push @left, $storePath if !$found;
-    }
-
-    return if scalar @left == 0;
-
-    # For remaining paths, do HEAD requests.
-    foreach my $cache (@caches) {
-        next unless $cache->{wantMassQuery};
-        my @left2;
-        %requests = ();
-        foreach my $storePath (@left) {
-            if (negativeHit($storePath, $cache)) {
-                push @left2, $storePath;
-                next;
-            }
-            addRequest($storePath, infoUrl($cache->{url}, $storePath), 1);
-        }
-
-        processRequests;
-
-        foreach my $request (values %requests) {
-            if ($request->{result} != 0) {
-                if ($request->{result} != 37 && $request->{httpStatus} != 404 && $request->{httpStatus} != 403) {
-                    print STDERR "could not check ‘$request->{url}’ (" .
-                        ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
-                } else {
-                    $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 0, time())
-                        if shouldCache $request->{url};
-                }
-                push @left2, $request->{storePath};
-            } else {
-                $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 1, time())
-                    if shouldCache $request->{url};
-                print "$request->{storePath}\n";
-            }
-        }
-
-        @left = @left2;
-    }
-}
-
-
-sub downloadBinary {
-    my ($storePath, $destPath) = @_;
-
-    foreach my $cache (@caches) {
-        my $info = getCachedInfoFrom($storePath, $cache);
-
-        unless (defined $info) {
-            next if negativeHit($storePath, $cache);
-            my $request = addRequest($storePath, infoUrl($cache->{url}, $storePath));
-            processRequests;
-            $info = processNARInfo($storePath, $cache, $request);
-        }
-
-        next unless defined $info;
-
-        my $decompressor;
-        if ($info->{compression} eq "bzip2") { $decompressor = "| $Nix::Config::bzip2 -d"; }
-        elsif ($info->{compression} eq "xz") { $decompressor = "| $Nix::Config::xz -d"; }
-        elsif ($info->{compression} eq "none") { $decompressor = ""; }
-        else {
-            print STDERR "unknown compression method ‘$info->{compression}’\n";
-            next;
-        }
-        my $url = "$cache->{url}/$info->{url}"; # FIXME: handle non-relative URLs
-        die if $requireSignedBinaryCaches && !defined $info->{signedBy};
-        print STDERR "\n*** Downloading ‘$url’ ", ($requireSignedBinaryCaches ? "(signed by ‘$info->{signedBy}’) " : ""), "to ‘$storePath’...\n";
-        checkURL $url;
-        if (system("$Nix::Config::curl --fail --location --connect-timeout $curlConnectTimeout -A '$userAgent' '$url' $decompressor | $Nix::Config::binDir/nix-store --restore $destPath") != 0) {
-            warn "download of ‘$url’ failed" . ($! ? ": $!" : "") . "\n";
-            next;
-        }
-
-        # Tell Nix about the expected hash so it can verify it.
-        die unless defined $info->{narHash} && $info->{narHash} ne "";
-        print "$info->{narHash}\n";
-
-        print STDERR "\n";
-        return;
-    }
-
-    print STDERR "could not download ‘$storePath’ from any binary cache\n";
-    exit 1;
-}
-
-
-# Bail out right away if binary caches are disabled.
-exit 0 if
-    ($Nix::Config::config{"use-binary-caches"} // "true") eq "false" ||
-    ($Nix::Config::config{"untrusted-use-binary-caches"} // "true") eq "false";
-print "\n";
-flush STDOUT;
-
-initCache();
-
-
-if ($ARGV[0] eq "--query") {
-
-    while (<STDIN>) {
-        getAvailableCaches;
-        chomp;
-        my ($cmd, @args) = split " ", $_;
-
-        if ($cmd eq "have") {
-            print STDERR "checking binary caches for existence of @args\n" if $debug;
-            printSubstitutablePaths(@args);
-            print "\n";
-        }
-
-        elsif ($cmd eq "info") {
-            print STDERR "checking binary caches for info on @args\n" if $debug;
-            printInfoParallel(@args);
-            print "\n";
-        }
-
-        else { die "unknown command ‘$cmd’"; }
-
-        flush STDOUT;
-    }
-
-}
-
-elsif ($ARGV[0] eq "--substitute") {
-    my $storePath = $ARGV[1] or die;
-    my $destPath = $ARGV[2] or die;
-    getAvailableCaches;
-    downloadBinary($storePath, $destPath);
-}
-
-else {
-    die;
-}
diff --git a/scripts/local.mk b/scripts/local.mk
index edaf44cc492d..ee8ae6845dc1 100644
--- a/scripts/local.mk
+++ b/scripts/local.mk
@@ -1,9 +1,5 @@
 nix_bin_scripts := \
-  $(d)/nix-build \
-  $(d)/nix-channel \
   $(d)/nix-copy-closure \
-  $(d)/nix-install-package \
-  $(d)/nix-push
 
 bin-scripts += $(nix_bin_scripts)
 
@@ -13,19 +9,11 @@ nix_noinst_scripts := \
   $(d)/nix-profile.sh \
   $(d)/nix-reduce-build
 
-ifeq ($(OS), Darwin)
-  nix_noinst_scripts += $(d)/resolve-system-dependencies.pl
-endif
-
 noinst-scripts += $(nix_noinst_scripts)
 
 profiledir = $(sysconfdir)/profile.d
 
 $(eval $(call install-file-as, $(d)/nix-profile.sh, $(profiledir)/nix.sh, 0644))
 $(eval $(call install-program-in, $(d)/build-remote.pl, $(libexecdir)/nix))
-ifeq ($(OS), Darwin)
-  $(eval $(call install-program-in, $(d)/resolve-system-dependencies.pl, $(libexecdir)/nix))
-endif
-$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell))
 
 clean-files += $(nix_bin_scripts) $(nix_noinst_scripts)
diff --git a/scripts/nix-build.in b/scripts/nix-build.in
deleted file mode 100755
index 2d45e37c52d6..000000000000
--- a/scripts/nix-build.in
+++ /dev/null
@@ -1,359 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use Nix::Config;
-use Nix::Store;
-use Nix::Utils;
-use File::Basename;
-use Text::ParseWords;
-use Cwd;
-
-binmode STDERR, ":encoding(utf8)";
-
-my $dryRun = 0;
-my $verbose = 0;
-my $runEnv = $0 =~ /nix-shell$/;
-my $pure = 0;
-my $fromArgs = 0;
-my $packages = 0;
-my $interactive = 1;
-
-my @instArgs = ();
-my @buildArgs = ();
-my @exprs = ();
-
-my $shell = $ENV{SHELL} || "/bin/sh";
-my $envCommand = ""; # interactive shell
-my @envExclude = ();
-
-my $myName = $runEnv ? "nix-shell" : "nix-build";
-
-my $inShebang = 0;
-my $script;
-my @savedArgs;
-
-my $tmpDir = mkTempDir($myName);
-
-my $outLink = "./result";
-my $drvLink = "$tmpDir/derivation";
-
-# Ensure that the $tmpDir is deleted.
-$SIG{'INT'} = sub { exit 1 };
-
-
-# Heuristic to see if we're invoked as a shebang script, namely, if we
-# have a single argument, it's the name of an executable file, and it
-# starts with "#!".
-if ($runEnv && defined $ARGV[0] && $ARGV[0] !~ /nix-shell/) {
-    $script = $ARGV[0];
-    if (-f $script && -x $script) {
-        open SCRIPT, "<$script" or die "$0: cannot open ‘$script’: $!\n";
-        my $first = <SCRIPT>;
-        if ($first =~ /^\#\!/) {
-            $inShebang = 1;
-            @savedArgs = @ARGV; shift @savedArgs;
-            @ARGV = ();
-            while (<SCRIPT>) {
-                chomp;
-                if (/^\#\!\s*nix-shell (.*)$/) {
-                    push @ARGV, shellwords($1);
-                }
-            }
-        }
-        close SCRIPT;
-    }
-}
-
-
-for (my $n = 0; $n < scalar @ARGV; $n++) {
-    my $arg = $ARGV[$n];
-
-    if ($arg eq "--help") {
-        exec "man $myName" or die;
-    }
-
-    elsif ($arg eq "--version") {
-        print "$myName (Nix) $Nix::Config::version\n";
-        exit 0;
-    }
-
-    elsif ($arg eq "--add-drv-link") {
-        $drvLink = "./derivation";
-    }
-
-    elsif ($arg eq "--no-out-link" || $arg eq "--no-link") {
-        $outLink = "$tmpDir/result";
-    }
-
-    elsif ($arg eq "--drv-link") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $drvLink = $ARGV[$n];
-    }
-
-    elsif ($arg eq "--out-link" || $arg eq "-o") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $outLink = $ARGV[$n];
-    }
-
-    elsif ($arg eq "--attr" || $arg eq "-A" || $arg eq "-I") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        push @instArgs, ($arg, $ARGV[$n]);
-    }
-
-    elsif ($arg eq "--arg" || $arg eq "--argstr") {
-        die "$0: ‘$arg’ requires two arguments\n" unless $n + 2 < scalar @ARGV;
-        push @instArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]);
-        $n += 2;
-    }
-
-    elsif ($arg eq "--option") {
-        die "$0: ‘$arg’ requires two arguments\n" unless $n + 2 < scalar @ARGV;
-        push @instArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]);
-        push @buildArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]);
-        $n += 2;
-    }
-
-    elsif ($arg eq "--max-jobs" || $arg eq "-j" || $arg eq "--max-silent-time" || $arg eq "--cores" || $arg eq "--timeout" || $arg eq '--add-root') {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        push @buildArgs, ($arg, $ARGV[$n]);
-    }
-
-    elsif ($arg eq "--dry-run") {
-        push @buildArgs, "--dry-run";
-        $dryRun = 1;
-    }
-
-    elsif ($arg eq "--show-trace") {
-        push @instArgs, $arg;
-    }
-
-    elsif ($arg eq "-") {
-        @exprs = ("-");
-    }
-
-    elsif ($arg eq "--verbose" || substr($arg, 0, 2) eq "-v") {
-        push @buildArgs, $arg;
-        push @instArgs, $arg;
-        $verbose = 1;
-    }
-
-    elsif ($arg eq "--quiet" || $arg eq "--repair") {
-        push @buildArgs, $arg;
-        push @instArgs, $arg;
-    }
-
-    elsif ($arg eq "--check") {
-        push @buildArgs, $arg;
-    }
-
-    elsif ($arg eq "--run-env") { # obsolete
-        $runEnv = 1;
-    }
-
-    elsif ($arg eq "--command" || $arg eq "--run") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $envCommand = "$ARGV[$n]\nexit";
-        $interactive = 0 if $arg eq "--run";
-    }
-
-    elsif ($arg eq "--exclude") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        push @envExclude, $ARGV[$n];
-    }
-
-    elsif ($arg eq "--pure") { $pure = 1; }
-    elsif ($arg eq "--impure") { $pure = 0; }
-
-    elsif ($arg eq "--expr" || $arg eq "-E") {
-        $fromArgs = 1;
-        push @instArgs, "--expr";
-    }
-
-    elsif ($arg eq "--packages" || $arg eq "-p") {
-        $packages = 1;
-    }
-
-    elsif ($inShebang && $arg eq "-i") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        my $interpreter = $ARGV[$n];
-        my $execArgs = "";
-
-        sub shellEscape {
-            my $s = $_;
-            $s =~ s/'/'\\''/g;
-            return "'" . $s . "'";
-        }
-
-        # Überhack to support Perl. Perl examines the shebang and
-        # executes it unless it contains the string "perl" or "indir",
-        # or (undocumented) argv[0] does not contain "perl". Exploit
-        # the latter by doing "exec -a".
-        if ($interpreter =~ /perl/) {
-            $execArgs = "-a PERL";
-        }
-
-        if ($interpreter =~ /ruby/) {
-            # Hack for Ruby. Ruby also examines the shebang. It tries to
-            # read the shebang to understand which packages to read from. Since
-            # this is handled via nix-shell -p, we wrap our ruby script execution
-            # in ruby -e 'load' which ignores the shebangs.
-            $envCommand = "exec $execArgs $interpreter -e 'load(\"$script\")' -- ${\(join ' ', (map shellEscape, @savedArgs))}";
-        } else {
-            $envCommand = "exec $execArgs $interpreter $script ${\(join ' ', (map shellEscape, @savedArgs))}";
-        }
-    }
-
-    elsif (substr($arg, 0, 1) eq "-") {
-        push @buildArgs, $arg;
-    }
-
-    elsif ($arg eq "-Q" || $arg eq "--no-build-output") {
-        push @buildArgs, $arg;
-        push @instArgs, $arg;
-    }
-
-    else {
-        push @exprs, $arg;
-    }
-}
-
-die "$0: ‘-p’ and ‘-E’ are mutually exclusive\n" if $packages && $fromArgs;
-
-if ($packages) {
-    push @instArgs, "--expr";
-    @exprs = (
-        'with import <nixpkgs> { }; runCommand "shell" { buildInputs = [ '
-        . (join " ", map { "($_)" } @exprs) . ']; } ""');
-} elsif (!$fromArgs) {
-    @exprs = ("shell.nix") if scalar @exprs == 0 && $runEnv && -e "shell.nix";
-    @exprs = ("default.nix") if scalar @exprs == 0;
-}
-
-$ENV{'IN_NIX_SHELL'} = 1 if $runEnv;
-
-
-foreach my $expr (@exprs) {
-
-    # Instantiate.
-    my @drvPaths;
-    if ($expr !~ /^\/.*\.drv$/) {
-        # If we're in a #! script, interpret filenames relative to the
-        # script.
-        $expr = dirname(Cwd::abs_path($script)) . "/" . $expr
-            if $inShebang && !$packages && $expr !~ /^\//;
-
-        # !!! would prefer the perl 5.8.0 pipe open feature here.
-        my $pid = open(DRVPATHS, "-|") || exec "$Nix::Config::binDir/nix-instantiate", "--add-root", $drvLink, "--indirect", @instArgs, $expr;
-        while (<DRVPATHS>) {chomp; push @drvPaths, $_;}
-        if (!close DRVPATHS) {
-            die "nix-instantiate killed by signal " . ($? & 127) . "\n" if ($? & 127);
-            exit 1;
-        }
-    } else {
-        push @drvPaths, $expr;
-    }
-
-    if ($runEnv) {
-        die "$0: a single derivation is required\n" if scalar @drvPaths != 1;
-        my $drvPath = $drvPaths[0];
-        $drvPath = (split '!',$drvPath)[0];
-        $drvPath = readlink $drvPath or die "cannot read symlink ‘$drvPath’" if -l $drvPath;
-        my $drv = derivationFromPath($drvPath);
-
-        # Build or fetch all dependencies of the derivation.
-        my @inputDrvs = grep { my $x = $_; (grep { $x =~ $_ } @envExclude) == 0 } @{$drv->{inputDrvs}};
-        system("$Nix::Config::binDir/nix-store", "-r", "--no-output", "--no-gc-warning", @buildArgs, @inputDrvs, @{$drv->{inputSrcs}}) == 0
-            or die "$0: failed to build all dependencies\n";
-
-        # Set the environment.
-        my $tmp = $ENV{"TMPDIR"} // $ENV{"XDG_RUNTIME_DIR"} // "/tmp";
-        if ($pure) {
-            foreach my $name (keys %ENV) {
-                next if grep { $_ eq $name } ("HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL");
-                delete $ENV{$name};
-            }
-            # NixOS hack: prevent /etc/bashrc from sourcing /etc/profile.
-            $ENV{'__ETC_PROFILE_SOURCED'} = 1;
-        }
-        $ENV{'NIX_BUILD_TOP'} = $ENV{'TMPDIR'} = $ENV{'TEMPDIR'} = $ENV{'TMP'} = $ENV{'TEMP'} = $tmp;
-        $ENV{'NIX_STORE'} = $Nix::Config::storeDir;
-        $ENV{$_} = $drv->{env}->{$_} foreach keys %{$drv->{env}};
-
-        # Run a shell using the derivation's environment.  For
-        # convenience, source $stdenv/setup to setup additional
-        # environment variables and shell functions.  Also don't lose
-        # the current $PATH directories.
-        my $rcfile = "$tmpDir/rc";
-        writeFile(
-            $rcfile,
-            "rm -rf '$tmpDir'; " .
-            'unset BASH_ENV; ' .
-            '[ -n "$PS1" ] && [ -e ~/.bashrc ] && source ~/.bashrc; ' .
-            ($pure ? '' : 'p=$PATH; ' ) .
-            'dontAddDisableDepTrack=1; ' .
-            '[ -e $stdenv/setup ] && source $stdenv/setup; ' .
-            ($pure ? '' : 'PATH=$PATH:$p; unset p; ') .
-            'set +e; ' .
-            '[ -n "$PS1" ] && PS1="\n\[\033[1;32m\][nix-shell:\w]$\[\033[0m\] "; ' .
-            'if [ "$(type -t runHook)" = function ]; then runHook shellHook; fi; ' .
-            'unset NIX_ENFORCE_PURITY; ' .
-            'unset NIX_INDENT_MAKE; ' .
-            'shopt -u nullglob; ' .
-            'unset TZ; ' . (defined $ENV{'TZ'} ? "export TZ='${ENV{'TZ'}}'; " : '') .
-            $envCommand);
-        $ENV{BASH_ENV} = $rcfile;
-        my @args = ($ENV{NIX_BUILD_SHELL} // "bash");
-        push @args, "--rcfile" if $interactive;
-        push @args, $rcfile;
-        exec @args;
-        die;
-    }
-
-    # Ugly hackery to make "nix-build -A foo.all" produce symlinks
-    # ./result, ./result-dev, and so on, rather than ./result,
-    # ./result-2-dev, and so on.  This combines multiple derivation
-    # paths into one "/nix/store/drv-path!out1,out2,..." argument.
-    my $prevDrvPath = "";
-    my @drvPaths2;
-    foreach my $drvPath (@drvPaths) {
-        my $p = $drvPath; my $output = "out";
-        if ($drvPath =~ /(.*)!(.*)/) {
-            $p = $1; $output = $2;
-        } else {
-            $p = $drvPath;
-        }
-        my $target = readlink $p or die "cannot read symlink ‘$p’";
-        print STDERR "derivation is $target\n" if $verbose;
-        if ($target eq $prevDrvPath) {
-            push @drvPaths2, (pop @drvPaths2) . "," . $output;
-        } else {
-            push @drvPaths2, $target . "!" . $output;
-            $prevDrvPath = $target;
-        }
-    }
-
-    # Build.
-    my @outPaths;
-    my $pid = open(OUTPATHS, "-|") || exec "$Nix::Config::binDir/nix-store", "--add-root", $outLink, "--indirect", "-r",
-        @buildArgs, @drvPaths2;
-    while (<OUTPATHS>) {chomp; push @outPaths, $_;}
-    if (!close OUTPATHS) {
-        die "nix-store killed by signal " . ($? & 127) . "\n" if ($? & 127);
-        exit ($? >> 8 || 1);
-    }
-
-    next if $dryRun;
-
-    foreach my $outPath (@outPaths) {
-        my $target = readlink $outPath or die "cannot read symlink ‘$outPath’";
-        print "$target\n";
-    }
-}
diff --git a/scripts/nix-channel.in b/scripts/nix-channel.in
deleted file mode 100755
index 65084ff1f34a..000000000000
--- a/scripts/nix-channel.in
+++ /dev/null
@@ -1,228 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use File::Basename;
-use File::Path qw(mkpath);
-use Nix::Config;
-use Nix::Manifest;
-use File::Temp qw(tempdir);
-
-binmode STDERR, ":encoding(utf8)";
-
-Nix::Config::readConfig;
-
-
-# Turn on caching in nix-prefetch-url.
-my $channelCache = "$Nix::Config::stateDir/channel-cache";
-mkdir $channelCache, 0755 unless -e $channelCache;
-$ENV{'NIX_DOWNLOAD_CACHE'} = $channelCache if -W $channelCache;
-
-# Figure out the name of the `.nix-channels' file to use.
-my $home = $ENV{"HOME"} or die '$HOME not set\n';
-my $channelsList = "$home/.nix-channels";
-my $nixDefExpr = "$home/.nix-defexpr";
-
-# Figure out the name of the channels profile.
-my $userName = getpwuid($<) || $ENV{"USER"} or die "cannot figure out user name";
-my $profile = "$Nix::Config::stateDir/profiles/per-user/$userName/channels";
-mkpath(dirname $profile, 0, 0755);
-
-my %channels;
-
-
-# Reads the list of channels.
-sub readChannels {
-    return if (!-f $channelsList);
-    open CHANNELS, "<$channelsList" or die "cannot open ‘$channelsList’: $!";
-    while (<CHANNELS>) {
-        chomp;
-        next if /^\s*\#/;
-        my ($url, $name) = split ' ', $_;
-        $url =~ s/\/*$//; # remove trailing slashes
-        $name = basename $url unless defined $name;
-        $channels{$name} = $url;
-    }
-    close CHANNELS;
-}
-
-
-# Writes the list of channels.
-sub writeChannels {
-    open CHANNELS, ">$channelsList" or die "cannot open ‘$channelsList’: $!";
-    foreach my $name (keys %channels) {
-        print CHANNELS "$channels{$name} $name\n";
-    }
-    close CHANNELS;
-}
-
-
-# Adds a channel.
-sub addChannel {
-    my ($url, $name) = @_;
-    die "invalid channel URL ‘$url’" unless $url =~ /^(file|http|https):\/\//;
-    die "invalid channel identifier ‘$name’" unless $name =~ /^[a-zA-Z0-9_][a-zA-Z0-9_\-\.]*$/;
-    readChannels;
-    $channels{$name} = $url;
-    writeChannels;
-}
-
-
-# Remove a channel.
-sub removeChannel {
-    my ($name) = @_;
-    readChannels;
-    my $url = $channels{$name};
-    delete $channels{$name};
-    writeChannels;
-
-    system("$Nix::Config::binDir/nix-env --profile '$profile' -e '$name'") == 0
-        or die "cannot remove channel ‘$name’\n";
-}
-
-
-# Fetch Nix expressions and binary cache URLs from the subscribed channels.
-sub update {
-    my @channelNames = @_;
-
-    readChannels;
-
-    # Download each channel.
-    my $exprs = "";
-    foreach my $name (keys %channels) {
-        next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames};
-
-        my $url = $channels{$name};
-
-        # We want to download the url to a file to see if it's a tarball while also checking if we
-        # got redirected in the process, so that we can grab the various parts of a nix channel
-        # definition from a consistent location if the redirect changes mid-download.
-        my $tmpdir = tempdir( CLEANUP => 1 );
-        my $filename;
-        ($url, $filename) = `cd $tmpdir && $Nix::Config::curl --silent --write-out '%{url_effective}\n%{filename_effective}' -L '$url' -O`;
-        chomp $url;
-        die "$0: unable to check ‘$url’\n" if $? != 0;
-
-        # If the URL contains a version number, append it to the name
-        # attribute (so that "nix-env -q" on the channels profile
-        # shows something useful).
-        my $cname = $name;
-        $cname .= $1 if basename($url) =~ /(-\d.*)$/;
-
-        my $path;
-        my $ret = -1;
-        if (-e "$tmpdir/$filename" && $filename =~ /\.tar\.(gz|bz2|xz)$/) {
-            # Get our temporary download into the store.
-            (my $hash, $path) = `PRINT_PATH=1 QUIET=1 $Nix::Config::binDir/nix-prefetch-url 'file://$tmpdir/$filename'`;
-            chomp $path;
-
-            # Try unpacking the expressions to see if they'll be valid for us to process later.
-            # Like anything in nix, this will cache the result so we don't do it again outside of the loop below.
-            $ret = system("$Nix::Config::binDir/nix-build --no-out-link -E 'import <nix/unpack-channel.nix> " .
-                          "{ name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; }'");
-        }
-
-        # The URL doesn't unpack directly, so let's try treating it like a full channel folder with files in it
-        my $extraAttrs = "";
-        if ($ret != 0) {
-            # Check if the channel advertises a binary cache.
-            my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`;
-            $extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; "
-                if $? == 0 && $binaryCacheURL ne "";
-
-            # Download the channel tarball.
-            my $fullURL = "$url/nixexprs.tar.xz";
-            system("$Nix::Config::curl --fail --silent --head '$fullURL' > /dev/null") == 0 or
-                $fullURL = "$url/nixexprs.tar.bz2";
-            print STDERR "downloading Nix expressions from ‘$fullURL’...\n";
-            (my $hash, $path) = `PRINT_PATH=1 QUIET=1 $Nix::Config::binDir/nix-prefetch-url '$fullURL'`;
-            die "cannot fetch ‘$fullURL’\n" if $? != 0;
-            chomp $path;
-        }
-
-        # Regardless of where it came from, add the expression representing this channel to accumulated expression
-        $exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; $extraAttrs }' ";
-    }
-
-    # Unpack the channel tarballs into the Nix store and install them
-    # into the channels profile.
-    print STDERR "unpacking channels...\n";
-    system("$Nix::Config::binDir/nix-env --profile '$profile' " .
-           "-f '<nix/unpack-channel.nix>' -i -E $exprs --quiet") == 0
-           or die "cannot unpack the channels";
-
-    # Make the channels appear in nix-env.
-    unlink $nixDefExpr if -l $nixDefExpr; # old-skool ~/.nix-defexpr
-    mkdir $nixDefExpr or die "cannot create directory ‘$nixDefExpr’" if !-e $nixDefExpr;
-    my $channelLink = "$nixDefExpr/channels";
-    unlink $channelLink; # !!! not atomic
-    symlink($profile, $channelLink) or die "cannot symlink ‘$channelLink’ to ‘$profile’";
-}
-
-
-die "$0: argument expected\n" if scalar @ARGV == 0;
-
-
-while (scalar @ARGV) {
-    my $arg = shift @ARGV;
-
-    if ($arg eq "--add") {
-        die "$0: ‘--add’ requires one or two arguments\n" if scalar @ARGV < 1 || scalar @ARGV > 2;
-        my $url = shift @ARGV;
-        my $name = shift @ARGV;
-        unless (defined $name) {
-            $name = basename $url;
-            $name =~ s/-unstable//;
-            $name =~ s/-stable//;
-        }
-        addChannel($url, $name);
-        last;
-    }
-
-    if ($arg eq "--remove") {
-        die "$0: ‘--remove’ requires one argument\n" if scalar @ARGV != 1;
-        removeChannel(shift @ARGV);
-        last;
-    }
-
-    if ($arg eq "--list") {
-        die "$0: ‘--list’ requires one argument\n" if scalar @ARGV != 0;
-        readChannels;
-        foreach my $name (keys %channels) {
-            print "$name $channels{$name}\n";
-        }
-        last;
-    }
-
-    elsif ($arg eq "--update") {
-        update(@ARGV);
-        last;
-    }
-
-    elsif ($arg eq "--rollback") {
-        die "$0: ‘--rollback’ has at most one argument\n" if scalar @ARGV > 1;
-        my $generation = shift @ARGV;
-        my @args = ("$Nix::Config::binDir/nix-env", "--profile", $profile);
-        if (defined $generation) {
-            die "invalid channel generation number ‘$generation’" unless $generation =~ /^[0-9]+$/;
-            push @args, "--switch-generation", $generation;
-        } else {
-            push @args, "--rollback";
-        }
-        system(@args) == 0 or exit 1;
-        last;
-    }
-
-    elsif ($arg eq "--help") {
-        exec "man nix-channel" or die;
-    }
-
-    elsif ($arg eq "--version") {
-        print "nix-channel (Nix) $Nix::Config::version\n";
-        exit 0;
-    }
-
-    else {
-        die "unknown argument ‘$arg’; try ‘--help’\n";
-    }
-}
diff --git a/scripts/nix-install-package.in b/scripts/nix-install-package.in
deleted file mode 100755
index ba349774af54..000000000000
--- a/scripts/nix-install-package.in
+++ /dev/null
@@ -1,127 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use Nix::Config;
-use Nix::Utils;
-
-binmode STDERR, ":encoding(utf8)";
-
-
-# Parse the command line arguments.
-my @args = @ARGV;
-
-my $source;
-my $fromURL = 0;
-my @extraNixEnvArgs = ();
-my $interactive = 1;
-my $op = "--install";
-
-while (scalar @args) {
-    my $arg = shift @args;
-    if ($arg eq "--help") {
-        exec "man nix-install-package" or die;
-    }
-    elsif ($arg eq "--url") {
-        $fromURL = 1;
-    }
-    elsif ($arg eq "--profile" || $arg eq "-p") {
-        my $profile = shift @args;
-        die "$0: ‘--profile’ requires an argument\n" if !defined $profile;
-        push @extraNixEnvArgs, "-p", $profile;
-    }
-    elsif ($arg eq "--set") {
-        $op = "--set";
-    }
-    elsif ($arg eq "--non-interactive") {
-        $interactive = 0;
-    }
-    else {
-        $source = $arg;
-    }
-}
-
-die "$0: please specify a .nixpkg file or URL\n" unless defined $source;
-
-
-# Re-execute in a terminal, if necessary, so that if we're executed
-# from a web browser, the user gets to see us.
-if ($interactive && !defined $ENV{"NIX_HAVE_TERMINAL"}) {
-    $ENV{"NIX_HAVE_TERMINAL"} = "1";
-    $ENV{"LD_LIBRARY_PATH"} = "";
-    foreach my $term ("xterm", "konsole", "gnome-terminal", "xterm") {
-        exec($term, "-e", "$Nix::Config::binDir/nix-install-package", @ARGV);
-    }
-    die "cannot execute ‘xterm’";
-}
-
-
-my $tmpDir = mkTempDir("nix-install-package");
-
-
-sub barf {
-    my $msg = shift;
-    print "\nInstallation failed: $msg\n";
-    <STDIN> if $interactive;
-    exit 1;
-}
-
-
-# Download the package description, if necessary.
-my $pkgFile = $source;
-if ($fromURL) {
-    $pkgFile = "$tmpDir/tmp.nixpkg";
-    system("@curl@", "-L", "--silent", $source, "-o", $pkgFile) == 0
-        or barf "curl failed: $?";
-}
-
-
-# Read and parse the package file.
-open PKGFILE, "<$pkgFile" or barf "cannot open ‘$pkgFile’: $!";
-my $contents = <PKGFILE>;
-close PKGFILE;
-
-my $nameRE = "(?: [A-Za-z0-9\+\-\.\_\?\=]+ )"; # see checkStoreName()
-my $systemRE = "(?: [A-Za-z0-9\+\-\_]+ )";
-my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )";
-
-# Note: $pathRE doesn't check that whether we're looking at a valid
-# store path.  We'll let nix-env do that.
-
-$contents =~
-    / ^ \s* (\S+) \s+ (\S+) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )?  /x
-    or barf "invalid package contents";
-my $version = $1;
-my $manifestURL = $2;
-my $drvName = $3;
-my $system = $4;
-my $drvPath = $5;
-my $outPath = $6;
-my $binaryCacheURL = $8;
-
-barf "invalid package version ‘$version’" unless $version eq "NIXPKG1";
-
-
-if ($interactive) {
-    # Ask confirmation.
-    print "Do you want to install ‘$drvName’ (Y/N)? ";
-    my $reply = <STDIN>;
-    chomp $reply;
-    exit if $reply ne "y" && $reply ne "Y";
-}
-
-
-die "$0: package does not supply a binary cache\n" unless defined $binaryCacheURL;
-
-push @extraNixEnvArgs, "--option", "extra-binary-caches", $binaryCacheURL;
-
-
-print "\nInstalling package...\n";
-system("$Nix::Config::binDir/nix-env", $op, $outPath, "--force-name", $drvName, @extraNixEnvArgs) == 0
-    or barf "nix-env failed: $?";
-
-
-if ($interactive) {
-    print "\nInstallation succeeded! Press Enter to continue.\n";
-    <STDIN>;
-}
diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in
index cfe05c7166e7..41111848b2f3 100644
--- a/scripts/nix-profile.sh.in
+++ b/scripts/nix-profile.sh.in
@@ -81,6 +81,10 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then
         export SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt"
     fi
 
+    if [ -n ${MANPATH} ]; then
+        export MANPATH="$NIX_LINK/share/man:$MANPATH"
+    fi
+
     export PATH="$NIX_LINK/bin:$NIX_LINK/sbin:$__savedpath"
     unset __savedpath NIX_LINK NIX_USER_PROFILE_DIR NIX_PROFILES
 fi
diff --git a/scripts/nix-push.in b/scripts/nix-push.in
deleted file mode 100755
index 54456ac9512e..000000000000
--- a/scripts/nix-push.in
+++ /dev/null
@@ -1,296 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use File::Basename;
-use File::Path qw(mkpath);
-use File::stat;
-use File::Copy;
-use MIME::Base64;
-use Nix::Config;
-use Nix::Store;
-use Nix::Manifest;
-use Nix::Utils;
-
-binmode STDERR, ":encoding(utf8)";
-
-my $tmpDir = mkTempDir("nix-push");
-
-my $nixExpr = "$tmpDir/create-nars.nix";
-
-
-# Parse the command line.
-my $compressionType = "xz";
-my $force = 0;
-my $destDir;
-my $writeManifest = 0;
-my $manifestPath;
-my $archivesURL;
-my $link = 0;
-my $secretKeyFile;
-my @roots;
-my @buildArgs;
-
-for (my $n = 0; $n < scalar @ARGV; $n++) {
-    my $arg = $ARGV[$n];
-
-    if ($arg eq "--help") {
-        exec "man nix-push" or die;
-    } elsif ($arg eq "--bzip2") {
-        $compressionType = "bzip2";
-    } elsif ($arg eq "--none") {
-        $compressionType = "none";
-    } elsif ($arg eq "--force") {
-        $force = 1;
-    } elsif ($arg eq "--dest") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $destDir = $ARGV[$n];
-        mkpath($destDir, 0, 0755);
-    } elsif ($arg eq "--manifest") {
-        $writeManifest = 1;
-    } elsif ($arg eq "--manifest-path") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $manifestPath = $ARGV[$n];
-        $writeManifest = 1;
-        mkpath(dirname($manifestPath), 0, 0755);
-    } elsif ($arg eq "--url-prefix") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $archivesURL = $ARGV[$n];
-    } elsif ($arg eq "--link") {
-        $link = 1;
-    } elsif ($arg eq "--key-file") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        $secretKeyFile = $ARGV[$n];
-    } elsif ($arg eq "--max-jobs" || $arg eq "-j") {
-        $n++;
-        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
-        push @buildArgs, ($arg, $ARGV[$n]);
-    } elsif (substr($arg, 0, 1) eq "-") {
-        die "$0: unknown flag ‘$arg’\n";
-    } else {
-        push @roots, $arg;
-    }
-}
-
-die "$0: please specify a destination directory\n" if !defined $destDir;
-
-$archivesURL = "file://$destDir" unless defined $archivesURL;
-
-
-# From the given store paths, determine the set of requisite store
-# paths, i.e, the paths required to realise them.
-my %storePaths;
-
-foreach my $path (@roots) {
-    # Get all paths referenced by the normalisation of the given
-    # Nix expression.
-    my $pid = open(READ,
-        "$Nix::Config::binDir/nix-store --query --requisites --force-realise " .
-        "--include-outputs '$path'|") or die;
-
-    while (<READ>) {
-        chomp;
-        die "bad: $_" unless /^\//;
-        $storePaths{$_} = "";
-    }
-
-    close READ or die "nix-store failed: $?";
-}
-
-my @storePaths = keys %storePaths;
-
-
-# Don't create archives for files that are already in the binary cache.
-my @storePaths2;
-my %narFiles;
-foreach my $storePath (@storePaths) {
-    my $pathHash = substr(basename($storePath), 0, 32);
-    my $narInfoFile = "$destDir/$pathHash.narinfo";
-    if (!$force && -e $narInfoFile) {
-        my $narInfo = parseNARInfo($storePath, readFile($narInfoFile), 0, $narInfoFile) or die "cannot read ‘$narInfoFile’\n";
-        my $narFile = "$destDir/$narInfo->{url}";
-        if (-e $narFile) {
-            print STDERR "skipping existing $storePath\n";
-            # Add the NAR info to $narFiles if we're writing a
-            # manifest.
-            $narFiles{$storePath} = [
-                { url => ("$archivesURL/" . basename $narInfo->{url})
-                  , hash => $narInfo->{fileHash}
-                  , size => $narInfo->{fileSize}
-                  , compressionType => $narInfo->{compression}
-                  , narHash => $narInfo->{narHash}
-                  , narSize => $narInfo->{narSize}
-                  , references => join(" ", map { "$Nix::Config::storeDir/$_" } @{$narInfo->{refs}})
-                  , deriver => $narInfo->{deriver} ? "$Nix::Config::storeDir/$narInfo->{deriver}" : undef
-                  }
-            ] if $writeManifest;
-            next;
-        }
-    }
-    push @storePaths2, $storePath;
-}
-
-
-# Create a list of Nix derivations that turn each path into a Nix
-# archive.
-open NIX, ">$nixExpr";
-print NIX "[";
-
-foreach my $storePath (@storePaths2) {
-    die unless ($storePath =~ /\/[0-9a-z]{32}[^\"\\\$]*$/);
-
-    # Construct a Nix expression that creates a Nix archive.
-    my $nixexpr =
-        "(import <nix/nar.nix> " .
-        "{ storePath = builtins.storePath \"$storePath\"; hashAlgo = \"sha256\"; compressionType = \"$compressionType\"; }) ";
-
-    print NIX $nixexpr;
-}
-
-print NIX "]";
-close NIX;
-
-
-# Build the Nix expression.
-print STDERR "building compressed archives...\n";
-my @narPaths;
-my $pid = open(READ, "-|", "$Nix::Config::binDir/nix-build", $nixExpr, "-o", "$tmpDir/result", @buildArgs)
-    or die "cannot run nix-build";
-while (<READ>) {
-    chomp;
-    die unless /^\//;
-    push @narPaths, $_;
-}
-close READ or die "nix-build failed: $?";
-
-
-# Write the cache info file.
-my $cacheInfoFile = "$destDir/nix-cache-info";
-if (! -e $cacheInfoFile) {
-    open FILE, ">$cacheInfoFile" or die "cannot create $cacheInfoFile: $!";
-    print FILE "StoreDir: $Nix::Config::storeDir\n";
-    print FILE "WantMassQuery: 0\n"; # by default, don't hit this cache for "nix-env -qas"
-    close FILE;
-}
-
-
-# Copy the archives and the corresponding NAR info files.
-print STDERR "copying archives...\n";
-
-my $totalNarSize = 0;
-my $totalCompressedSize = 0;
-
-for (my $n = 0; $n < scalar @storePaths2; $n++) {
-    my $storePath = $storePaths2[$n];
-    my $narDir = $narPaths[$n];
-    my $baseName = basename $storePath;
-
-    # Get info about the store path.
-    my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
-
-    # In some exceptional cases (such as VM tests that use the Nix
-    # store of the host), the database doesn't contain the hash.  So
-    # compute it.
-    if ($narHash =~ /^sha256:0*$/) {
-        my $nar = "$tmpDir/nar";
-        system("$Nix::Config::binDir/nix-store --dump $storePath > $nar") == 0
-            or die "cannot dump $storePath\n";
-        $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 --flat $nar`;
-        die "cannot hash ‘$nar’" if $? != 0;
-        chomp $narHash;
-        $narHash = "sha256:$narHash";
-        $narSize = stat("$nar")->size;
-        unlink $nar or die;
-    }
-
-    $totalNarSize += $narSize;
-
-    # Get info about the compressed NAR.
-    open HASH, "$narDir/nar-compressed-hash" or die "cannot open nar-compressed-hash";
-    my $compressedHash = <HASH>;
-    chomp $compressedHash;
-    $compressedHash =~ /^[0-9a-z]+$/ or die "invalid hash";
-    close HASH;
-
-    my $narName = "$compressedHash.nar" . ($compressionType eq "xz" ? ".xz" : $compressionType eq "bzip2" ? ".bz2" : "");
-
-    my $narFile = "$narDir/$narName";
-    (-f $narFile) or die "NAR file for $storePath not found";
-
-    my $compressedSize = stat($narFile)->size;
-    $totalCompressedSize += $compressedSize;
-
-    printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath,
-        $compressedSize / (1024 * 1024), $compressedSize / $narSize * 100;
-
-    # Copy the compressed NAR.
-    my $dst = "$destDir/$narName";
-    if (! -f $dst) {
-        my $tmp = "$destDir/.tmp.$$.$narName";
-        if ($link) {
-            link($narFile, $tmp) or die "cannot link $tmp to $narFile: $!\n";
-        } else {
-            copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n";
-        }
-        rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n";
-    }
-
-    # Write the info file.
-    my $info;
-    $info .= "StorePath: $storePath\n";
-    $info .= "URL: $narName\n";
-    $info .= "Compression: $compressionType\n";
-    $info .= "FileHash: sha256:$compressedHash\n";
-    $info .= "FileSize: $compressedSize\n";
-    $info .= "NarHash: $narHash\n";
-    $info .= "NarSize: $narSize\n";
-    $info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
-    if (defined $deriver) {
-        $info .= "Deriver: " . basename $deriver . "\n";
-        if (isValidPath($deriver)) {
-            my $drv = derivationFromPath($deriver);
-            $info .= "System: $drv->{platform}\n";
-        }
-    }
-
-    if (defined $secretKeyFile) {
-        my $secretKey = readFile $secretKeyFile;
-        my $fingerprint = fingerprintPath($storePath, $narHash, $narSize, $refs);
-        my $sig = signString($secretKey, $fingerprint);
-        $info .= "Sig: $sig\n";
-    }
-
-    my $pathHash = substr(basename($storePath), 0, 32);
-
-    $dst = "$destDir/$pathHash.narinfo";
-    if ($force || ! -f $dst) {
-        my $tmp = "$destDir/.tmp.$$.$pathHash.narinfo";
-        open INFO, ">$tmp" or die;
-        print INFO "$info" or die;
-        close INFO or die;
-        rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n";
-    }
-
-    $narFiles{$storePath} = [
-        { url => "$archivesURL/$narName"
-        , hash => "sha256:$compressedHash"
-        , size => $compressedSize
-        , compressionType => $compressionType
-        , narHash => "$narHash"
-        , narSize => $narSize
-        , references => join(" ", @{$refs})
-        , deriver => $deriver
-        }
-    ] if $writeManifest;
-}
-
-printf STDERR "total compressed size %.2f MiB, %.1f%%\n",
-    $totalCompressedSize / (1024 * 1024), $totalCompressedSize / ($totalNarSize || 1) * 100;
-
-
-# Optionally write a manifest.
-writeManifest($manifestPath // "$destDir/MANIFEST", \%narFiles, \()) if $writeManifest;
diff --git a/scripts/resolve-system-dependencies.pl.in b/scripts/resolve-system-dependencies.pl.in
deleted file mode 100755
index a20f0dc020fe..000000000000
--- a/scripts/resolve-system-dependencies.pl.in
+++ /dev/null
@@ -1,122 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use warnings;
-use Cwd qw(realpath);
-use Errno;
-use File::Basename qw(dirname);
-use File::Path qw(make_path);
-use File::Spec::Functions qw(catfile);
-use List::Util qw(reduce);
-use IPC::Open3;
-use Nix::Config;
-use Nix::Store qw(derivationFromPath);
-use POSIX qw(uname);
-use Storable qw(lock_retrieve lock_store);
-
-my ($sysname, undef, $version, undef, $machine) = uname;
-$sysname =~ /Darwin/ or die "This tool is only meant to be used on Darwin systems.";
-
-my $cache = "$Nix::Config::stateDir/dependency-maps/$machine-$sysname-$version.map";
-
-make_path dirname($cache);
-
-our $DEPS;
-eval {
-  $DEPS = lock_retrieve($cache);
-};
-
-if($!{ENOENT}) {
-  lock_store {}, $cache;
-  $DEPS = {};
-} elsif($@) {
-  die "Unable to obtain a lock on dependency-map file $cache: $@";
-}
-
-sub mkset(@) {
-  my %set;
-  @set{@_} = ();
-  \%set
-}
-
-sub union($$) {
-  my ($set1, $set2) = @_;
-  my %new = (%$set1, %$set2);
-  \%new
-}
-
-sub cache_filepath($) {
-  my $fp = shift;
-  $fp =~ s/-/--/g;
-  $fp =~ s/\//-/g;
-  $fp =~ s/^-//g;
-  catfile $cache, $fp
-}
-
-sub resolve_tree {
-  sub resolve_tree_inner {
-    my ($lib, $TREE) = @_;
-    return if (defined $TREE->{$lib});
-    $TREE->{$lib} = mkset(@{cache_get($lib)});
-    foreach my $dep (keys %{$TREE->{$lib}}) {
-      resolve_tree_inner($dep, $TREE);
-    }
-    values %$TREE
-  }
-
-  reduce { union($a, $b) } {}, resolve_tree_inner(@_)
-}
-
-sub cache_get {
-  my $key = shift;
-  if (defined $DEPS->{$key}) {
-    $DEPS->{$key}
-  } else {
-    cache_insert($key);
-    cache_get($key)
-  }
-}
-
-sub cache_insert($) {
-  my $key = shift;
-  print STDERR "Finding dependencies for $key...\n";
-  my @deps = find_deps($key);
-  $DEPS->{$key} = \@deps;
-}
-
-sub find_deps($) {
-  my $lib = shift;
-  my($chld_in, $chld_out, $chld_err);
-  my $pid = open3($chld_in, $chld_out, $chld_err, "@otool@", "-L", "-arch", "x86_64", $lib);
-  waitpid($pid, 0);
-  my $line = readline $chld_out;
-  if($? == 0 and $line !~ /not an object file/) {
-    my @libs;
-    while(<$chld_out>) {
-      my $dep = (split /\s+/)[1];
-      push @libs, $dep unless $dep eq $lib or $dep =~ /\@rpath/;
-    }
-    @libs
-  } elsif (-l $lib) {
-    (realpath($lib))
-  } else {
-    ()
-  }
-}
-
-if (defined $ARGV[0]) {
-  my $deps = derivationFromPath($ARGV[0])->{"env"}->{"__impureHostDeps"};
-  if (defined $deps) {
-    my @files = split(/\s+/, $deps);
-    my $depcache = {};
-    my $depset = reduce { union($a, $b) } (map { resolve_tree($_, $depcache) } @files);
-    print "extra-chroot-dirs\n";
-    print join("\n", keys %$depset);
-    print "\n";
-  }
-  lock_store($DEPS, $cache);
-} else {
-  print STDERR "Usage: $0 path/to/derivation.drv\n";
-  exit 1
-}
diff --git a/scripts/show-duplication.pl b/scripts/show-duplication.pl
deleted file mode 100755
index 0604c6696c7a..000000000000
--- a/scripts/show-duplication.pl
+++ /dev/null
@@ -1,73 +0,0 @@
-#! /usr/bin/perl -w
-
-if (scalar @ARGV != 1) {
-    print "syntax: show-duplication.pl PATH\n";
-    exit 1;
-}
-
-my $root = $ARGV[0];
-
-
-my $nameRE = "(?:(?:[A-Za-z0-9\+\_]|(?:-[^0-9]))+)";
-my $versionRE = "(?:[A-Za-z0-9\.\-]+)";
-
-
-my %pkgInstances;
-
-
-my $pid = open(PATHS, "-|") || exec "nix-store", "-qR", $root;
-while (<PATHS>) {
-    chomp;
-    /^.*\/[0-9a-z]*-(.*)$/;
-    my $nameVersion = $1;
-    $nameVersion =~ /^($nameRE)(-($versionRE))?$/;
-    $name = $1;
-    $version = $3;
-    $version = "(unnumbered)" unless defined $version;
-#    print "$nameVersion $name $version\n";
-    push @{$pkgInstances{$name}}, {version => $version, path => $_};
-}
-close PATHS or exit 1;
-
-
-sub pathSize {
-    my $path = shift;
-    my @st = lstat $path or die;
-
-    my $size = $st[7];
-
-    if (-d $path) {
-        opendir DIR, $path or die;
-        foreach my $name (readdir DIR) {
-            next if $name eq "." || $name eq "..";
-            $size += pathSize("$path/$name");
-        }
-    }
-    
-    return $size;
-}
-
-
-my $totalPaths = 0;
-my $totalSize = 0, $totalWaste = 0;
-
-foreach my $name (sort {scalar @{$pkgInstances{$b}} <=> scalar @{$pkgInstances{$a}}} (keys %pkgInstances)) {
-    print "$name ", scalar @{$pkgInstances{$name}}, "\n";
-    my $allSize = 0;
-    foreach my $x (sort {$a->{version} cmp $b->{version}} @{$pkgInstances{$name}}) {
-        $totalPaths++;
-        my $size = pathSize $x->{path};
-        $allSize += $size;
-        print "    $x->{version} $size\n";
-    }
-    my $avgSize = int($allSize / scalar @{$pkgInstances{$name}});
-    my $waste = $allSize - $avgSize;
-    $totalSize += $allSize;
-    $totalWaste += $waste;
-    print "    average $avgSize, waste $waste\n";
-}
-
-
-my $avgDupl = $totalPaths / scalar (keys %pkgInstances);
-my $wasteFactor = ($totalWaste / $totalSize) * 100;
-print "average package duplication $avgDupl, total size $totalSize, total waste $totalWaste, $wasteFactor% wasted\n";
diff --git a/src/buildenv/buildenv.cc b/src/buildenv/buildenv.cc
new file mode 100644
index 000000000000..f997096eddbb
--- /dev/null
+++ b/src/buildenv/buildenv.cc
@@ -0,0 +1,186 @@
+#include "shared.hh"
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <algorithm>
+
+using namespace nix;
+
+typedef std::map<Path,int> Priorities;
+
+static bool isDirectory (const Path & path)
+{
+    struct stat st;
+    if (stat(path.c_str(), &st) == -1)
+        throw SysError(format("getting status of ‘%1%’") % path);
+    return S_ISDIR(st.st_mode);
+}
+
+static auto priorities = Priorities{};
+
+static auto symlinks = 0;
+
+/* For each activated package, create symlinks */
+static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
+{
+    auto srcFiles = readDirectory(srcDir);
+    for (const auto & ent : srcFiles) {
+        if (ent.name[0] == '.')
+            /* not matched by glob */
+            continue;
+        const auto & srcFile = srcDir + "/" + ent.name;
+        auto dstFile = dstDir + "/" + ent.name;
+
+        /* The files below are special-cased to that they don't show up
+         * in user profiles, either because they are useless, or
+         * because they would cauase pointless collisions (e.g., each
+         * Python package brings its own
+         * `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
+         */
+        if (hasSuffix(srcFile, "/propagated-build-inputs") ||
+            hasSuffix(srcFile, "/nix-support") ||
+            hasSuffix(srcFile, "/perllocal.pod") ||
+            hasSuffix(srcFile, "/info/dir") ||
+            hasSuffix(srcFile, "/log")) {
+            continue;
+        } else if (isDirectory(srcFile)) {
+            struct stat dstSt;
+            auto res = lstat(dstFile.c_str(), &dstSt);
+            if (res == 0) {
+                if (S_ISDIR(dstSt.st_mode)) {
+                    createLinks(srcFile, dstFile, priority);
+                    continue;
+                } else if (S_ISLNK(dstSt.st_mode)) {
+                    auto target = readLink(dstFile);
+                    if (!isDirectory(target))
+                        throw Error(format("collision between ‘%1%’ and non-directory ‘%2%’")
+                            % srcFile % target);
+                    if (unlink(dstFile.c_str()) == -1)
+                        throw SysError(format("unlinking ‘%1%’") % dstFile);
+                    if (mkdir(dstFile.c_str(), 0755) == -1)
+                        throw SysError(format("creating directory ‘%1%’"));
+                    createLinks(target, dstFile, priorities[dstFile]);
+                    createLinks(srcFile, dstFile, priority);
+                    continue;
+                }
+            } else if (errno != ENOENT)
+                throw SysError(format("getting status of ‘%1%’") % dstFile);
+        } else {
+            struct stat dstSt;
+            auto res = lstat(dstFile.c_str(), &dstSt);
+            if (res == 0) {
+                if (S_ISLNK(dstSt.st_mode)) {
+                    auto target = readLink(dstFile);
+                    auto prevPriority = priorities[dstFile];
+                    if (prevPriority == priority)
+                        throw Error(format(
+                                "collision between ‘%1%’ and ‘%2%’; "
+                                "use ‘nix-env --set-flag priority NUMBER PKGNAME’ "
+                                "to change the priority of one of the conflicting packages"
+                                ) % srcFile % target);
+                    if (prevPriority < priority)
+                        continue;
+                    if (unlink(dstFile.c_str()) == -1)
+                        throw SysError(format("unlinking ‘%1%’") % dstFile);
+                }
+            } else if (errno != ENOENT)
+                throw SysError(format("getting status of ‘%1%’") % dstFile);
+        }
+        createSymlink(srcFile, dstFile);
+        priorities[dstFile] = priority;
+        symlinks++;
+    }
+}
+
+typedef std::set<Path> FileProp;
+
+static auto done = FileProp{};
+static auto postponed = FileProp{};
+
+static auto out = string{};
+
+static void addPkg(const Path & pkgDir, int priority)
+{
+    if (done.find(pkgDir) != done.end())
+        return;
+    done.insert(pkgDir);
+    createLinks(pkgDir, out, priority);
+    auto propagatedFN = pkgDir + "/nix-support/propagated-user-env-packages";
+    auto propagated = string{};
+    {
+        AutoCloseFD fd = open(propagatedFN.c_str(), O_RDONLY | O_CLOEXEC);
+        if (!fd) {
+            if (errno == ENOENT)
+                return;
+            throw SysError(format("opening ‘%1%’") % propagatedFN);
+        }
+        propagated = readLine(fd.get());
+    }
+    for (const auto & p : tokenizeString<std::vector<string>>(propagated, " "))
+        if (done.find(p) == done.end())
+            postponed.insert(p);
+}
+
+struct Package {
+    Path path;
+    bool active;
+    int priority;
+    Package(Path path, bool active, int priority) : path{std::move(path)}, active{active}, priority{priority} {}
+};
+
+typedef std::vector<Package> Packages;
+
+int main(int argc, char ** argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+        out = getEnv("out");
+        if (mkdir(out.c_str(), 0755) == -1)
+            throw SysError(format("creating %1%") % out);
+
+        /* Convert the stuff we get from the environment back into a coherent
+         * data type.
+         */
+        auto pkgs = Packages{};
+        auto derivations = tokenizeString<Strings>(getEnv("derivations"));
+        while (!derivations.empty()) {
+            /* !!! We're trusting the caller to structure derivations env var correctly */
+            auto active = derivations.front(); derivations.pop_front();
+            auto priority = stoi(derivations.front()); derivations.pop_front();
+            auto outputs = stoi(derivations.front()); derivations.pop_front();
+            for (auto n = 0; n < outputs; n++) {
+                auto path = derivations.front(); derivations.pop_front();
+                pkgs.emplace_back(path, active != "false", priority);
+            }
+        }
+
+        /* Symlink to the packages that have been installed explicitly by the
+         * user. Process in priority order to reduce unnecessary
+         * symlink/unlink steps.
+         */
+        std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) {
+            return a.priority < b.priority || (a.priority == b.priority && a.path < b.path);
+        });
+        for (const auto & pkg : pkgs)
+            if (pkg.active)
+                addPkg(pkg.path, pkg.priority);
+
+        /* Symlink to the packages that have been "propagated" by packages
+         * installed by the user (i.e., package X declares that it wants Y
+         * installed as well). We do these later because they have a lower
+         * priority in case of collisions.
+         */
+        auto priorityCounter = 1000;
+        while (!postponed.empty()) {
+            auto pkgDirs = postponed;
+            postponed = FileProp{};
+            for (const auto & pkgDir : pkgDirs)
+                addPkg(pkgDir, priorityCounter++);
+        }
+
+        std::cerr << "created " << symlinks << " symlinks in user environment\n";
+
+        createSymlink(getEnv("manifest"), out + "/manifest.nix");
+    });
+}
+
diff --git a/src/buildenv/local.mk b/src/buildenv/local.mk
new file mode 100644
index 000000000000..17ec13b235f4
--- /dev/null
+++ b/src/buildenv/local.mk
@@ -0,0 +1,9 @@
+programs += buildenv
+
+buildenv_DIR := $(d)
+
+buildenv_INSTALL_DIR := $(libexecdir)/nix
+
+buildenv_LIBS = libmain libstore libutil libformat
+
+buildenv_SOURCES := $(d)/buildenv.cc
diff --git a/src/download-via-ssh/download-via-ssh.cc b/src/download-via-ssh/download-via-ssh.cc
index ed551ac461fb..ff28a60ff4b0 100644
--- a/src/download-via-ssh/download-via-ssh.cc
+++ b/src/download-via-ssh/download-via-ssh.cc
@@ -132,7 +132,7 @@ int main(int argc, char * * argv)
                 throw UsageError("download-via-ssh: --substitute takes exactly two arguments");
             Path storePath = argv[2];
             Path destPath = argv[3];
-            printMsg(lvlError, format("downloading ‘%1%’ via SSH from ‘%2%’...") % storePath % host);
+            printError(format("downloading ‘%1%’ via SSH from ‘%2%’...") % storePath % host);
             substitute(pipes, storePath, destPath);
         }
         else
diff --git a/src/libexpr/common-opts.cc b/src/libexpr/common-opts.cc
index 8a7989aac663..06d6ed87df94 100644
--- a/src/libexpr/common-opts.cc
+++ b/src/libexpr/common-opts.cc
@@ -55,7 +55,7 @@ bool parseSearchPathArg(Strings::iterator & i,
 Path lookupFileArg(EvalState & state, string s)
 {
     if (isUri(s))
-        return makeDownloader()->downloadCached(state.store, s, true);
+        return getDownloader()->downloadCached(state.store, s, true);
     else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
         Path p = s.substr(1, s.size() - 2);
         return state.findFile(p);
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 0833603b2a9e..64f3874db614 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -293,6 +293,8 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
     , sColumn(symbols.create("column"))
     , sFunctor(symbols.create("__functor"))
     , sToString(symbols.create("__toString"))
+    , sRight(symbols.create("right"))
+    , sWrong(symbols.create("wrong"))
     , store(store)
     , baseEnv(allocEnv(128))
     , staticBaseEnv(false, 0)
@@ -379,9 +381,9 @@ void EvalState::addPrimOp(const string & name,
 }
 
 
-void EvalState::getBuiltin(const string & name, Value & v)
+Value & EvalState::getBuiltin(const string & name)
 {
-    v = *baseEnv.values[0]->attrs->find(symbols.create(name))->value;
+    return *baseEnv.values[0]->attrs->find(symbols.create(name))->value;
 }
 
 
@@ -462,7 +464,7 @@ void mkString(Value & v, const char * s)
 }
 
 
-void mkString(Value & v, const string & s, const PathSet & context)
+Value & mkString(Value & v, const string & s, const PathSet & context)
 {
     mkString(v, s.c_str());
     if (!context.empty()) {
@@ -473,6 +475,7 @@ void mkString(Value & v, const string & s, const PathSet & context)
             v.string.context[n++] = dupString(i.c_str());
         v.string.context[n] = 0;
     }
+    return v;
 }
 
 
@@ -993,11 +996,18 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po
     if (fun.type == tAttrs) {
       auto found = fun.attrs->find(sFunctor);
       if (found != fun.attrs->end()) {
+        /* fun may be allocated on the stack of the calling function,
+         * but for functors we may keep a reference, so heap-allocate
+         * a copy and use that instead.
+         */
+        auto & fun2 = *allocValue();
+        fun2 = fun;
+        /* !!! Should we use the attr pos here? */
         forceValue(*found->value, pos);
-        Value * v2 = allocValue();
-        callFunction(*found->value, fun, *v2, pos);
-        forceValue(*v2, pos);
-        return callFunction(*v2, arg, v, pos);
+        Value v2;
+        callFunction(*found->value, fun2, v2, pos);
+        forceValue(v2, pos);
+        return callFunction(v2, arg, v, pos);
       }
     }
 
@@ -1368,11 +1378,11 @@ NixFloat EvalState::forceFloat(Value & v, const Pos & pos)
 }
 
 
-bool EvalState::forceBool(Value & v)
+bool EvalState::forceBool(Value & v, const Pos & pos)
 {
     forceValue(v);
     if (v.type != tBool)
-        throwTypeError("value is %1% while a Boolean was expected", v);
+        throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos);
     return v.boolean;
 }
 
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 80e369f2d68f..195cb0db3acc 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -8,10 +8,6 @@
 
 #include <map>
 
-#if HAVE_BOEHMGC
-#include <gc/gc_allocator.h>
-#endif
-
 
 namespace nix {
 
@@ -43,7 +39,7 @@ struct Env
 };
 
 
-void mkString(Value & v, const string & s, const PathSet & context = PathSet());
+Value & mkString(Value & v, const string & s, const PathSet & context = PathSet());
 
 void copyContext(const Value & v, PathSet & context);
 
@@ -71,7 +67,8 @@ public:
 
     const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue,
         sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls,
-        sFile, sLine, sColumn, sFunctor, sToString;
+        sFile, sLine, sColumn, sFunctor, sToString,
+        sRight, sWrong;
     Symbol sDerivationNix;
 
     /* If set, force copying files to the Nix store even if they
@@ -108,6 +105,8 @@ public:
 
     void addToSearchPath(const string & s);
 
+    SearchPath getSearchPath() { return searchPath; }
+
     Path checkSourcePath(const Path & path);
 
     /* Parse a Nix expression from the specified file. */
@@ -154,7 +153,7 @@ public:
     /* Force `v', and then verify that it has the expected type. */
     NixInt forceInt(Value & v, const Pos & pos);
     NixFloat forceFloat(Value & v, const Pos & pos);
-    bool forceBool(Value & v);
+    bool forceBool(Value & v, const Pos & pos);
     inline void forceAttrs(Value & v);
     inline void forceAttrs(Value & v, const Pos & pos);
     inline void forceList(Value & v);
@@ -204,7 +203,7 @@ private:
 
 public:
 
-    void getBuiltin(const string & name, Value & v);
+    Value & getBuiltin(const string & name);
 
 private:
 
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
index b06c539de0fb..dc5def911ca0 100644
--- a/src/libexpr/get-drvs.cc
+++ b/src/libexpr/get-drvs.cc
@@ -301,7 +301,7 @@ static void getDerivations(EvalState & state, Value & vIn,
                    `recurseForDerivations = true' attribute. */
                 if (v2.type == tAttrs) {
                     Bindings::iterator j = v2.attrs->find(state.symbols.create("recurseForDerivations"));
-                    if (j != v2.attrs->end() && state.forceBool(*j->value))
+                    if (j != v2.attrs->end() && state.forceBool(*j->value, *j->pos))
                         getDerivations(state, v2, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
                 }
             }
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
index 1daf84600dca..f671802bcc24 100644
--- a/src/libexpr/json-to-value.cc
+++ b/src/libexpr/json-to-value.cc
@@ -12,15 +12,6 @@ static void skipWhitespace(const char * & s)
 }
 
 
-#if HAVE_BOEHMGC
-typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
-typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<Value *> > ValueMap;
-#else
-typedef std::vector<Value *> ValueVector;
-typedef std::map<Symbol, Value *> ValueMap;
-#endif
-
-
 static string parseJSONString(const char * & s)
 {
     string res;
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index 776e5cb39b81..d07eedddaf6b 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -662,9 +662,9 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
                 // FIXME: support specifying revision/branch
                 res = { true, exportGit(store, elem.second, "master") };
             else
-                res = { true, makeDownloader()->downloadCached(store, elem.second, true) };
+                res = { true, getDownloader()->downloadCached(store, elem.second, true) };
         } catch (DownloadError & e) {
-            printMsg(lvlError, format("warning: Nix search path entry ‘%1%’ cannot be downloaded, ignoring") % elem.second);
+            printError(format("warning: Nix search path entry ‘%1%’ cannot be downloaded, ignoring") % elem.second);
             res = { false, "" };
         }
     } else {
@@ -672,7 +672,7 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
         if (pathExists(path))
             res = { true, path };
         else {
-            printMsg(lvlError, format("warning: Nix search path entry ‘%1%’ does not exist, ignoring") % elem.second);
+            printError(format("warning: Nix search path entry ‘%1%’ does not exist, ignoring") % elem.second);
             res = { false, "" };
         }
     }
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 565ed69ae77b..377fb8c75eb4 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -427,9 +427,9 @@ static void prim_trace(EvalState & state, const Pos & pos, Value * * args, Value
 {
     state.forceValue(*args[0]);
     if (args[0]->type == tString)
-        printMsg(lvlError, format("trace: %1%") % args[0]->string.s);
+        printError(format("trace: %1%") % args[0]->string.s);
     else
-        printMsg(lvlError, format("trace: %1%") % *args[0]);
+        printError(format("trace: %1%") % *args[0]);
     state.forceValue(*args[1]);
     v = *args[1];
 }
@@ -477,7 +477,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
     bool ignoreNulls = false;
     attr = args[0]->attrs->find(state.sIgnoreNulls);
     if (attr != args[0]->attrs->end())
-        ignoreNulls = state.forceBool(*attr->value);
+        ignoreNulls = state.forceBool(*attr->value, pos);
 
     /* Build the derivation expression by processing the attributes. */
     Derivation drv;
@@ -624,7 +624,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
         outputHash = printHash(h);
         if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;
 
-        Path outPath = state.store->makeFixedOutputPath(outputHashRecursive, ht, h, drvName);
+        Path outPath = state.store->makeFixedOutputPath(outputHashRecursive, h, drvName);
         drv.env["out"] = outPath;
         drv.outputs["out"] = DerivationOutput(outPath, outputHashAlgo, outputHash);
     }
@@ -673,6 +673,19 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
 }
 
 
+/* Return a placeholder string for the specified output that will be
+   substituted by the corresponding output path at build time. For
+   example, ‘placeholder "out"’ returns the string
+   /1rz4g4znpzjwh1xymhjpm42vipw92pr73vdgl6xs1hycac8kf2n9. At build
+   time, any occurence of this string in an derivation attribute will
+   be replaced with the concrete path in the Nix store of the output
+   ‘out’. */
+static void prim_placeholder(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkString(v, hashPlaceholder(state.forceStringNoCtx(*args[0], pos)));
+}
+
+
 /*************************************************************
  * Paths
  *************************************************************/
@@ -912,9 +925,10 @@ struct FilterFromExpr : PathFilter
 {
     EvalState & state;
     Value & filter;
+    Pos pos;
 
-    FilterFromExpr(EvalState & state, Value & filter)
-        : state(state), filter(filter)
+    FilterFromExpr(EvalState & state, Value & filter, const Pos & pos)
+        : state(state), filter(filter), pos(pos)
     {
     }
 
@@ -942,7 +956,7 @@ struct FilterFromExpr : PathFilter
         Value res;
         state.callFunction(fun2, arg2, res, noPos);
 
-        return state.forceBool(res);
+        return state.forceBool(res, pos);
     }
 };
 
@@ -958,7 +972,7 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args
     if (args[0]->type != tLambda)
         throw TypeError(format("first argument in call to ‘filterSource’ is not a function but %1%, at %2%") % showType(*args[0]) % pos);
 
-    FilterFromExpr filter(state, *args[0]);
+    FilterFromExpr filter(state, *args[0], pos);
 
     path = state.checkSourcePath(path);
 
@@ -1278,7 +1292,7 @@ static void prim_filter(EvalState & state, const Pos & pos, Value * * args, Valu
     for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
         Value res;
         state.callFunction(*args[0], *args[1]->listElems()[n], res, noPos);
-        if (state.forceBool(res))
+        if (state.forceBool(res, pos))
             vs[k++] = args[1]->listElems()[n];
         else
             same = false;
@@ -1354,7 +1368,7 @@ static void anyOrAll(bool any, EvalState & state, const Pos & pos, Value * * arg
     Value vTmp;
     for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
         state.callFunction(*args[0], *args[1]->listElems()[n], vTmp, pos);
-        bool res = state.forceBool(vTmp);
+        bool res = state.forceBool(vTmp, pos);
         if (res == any) {
             mkBool(v, any);
             return;
@@ -1420,7 +1434,7 @@ static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value
         Value vTmp1, vTmp2;
         state.callFunction(*args[0], *a, vTmp1, pos);
         state.callFunction(vTmp1, *b, vTmp2, pos);
-        return state.forceBool(vTmp2);
+        return state.forceBool(vTmp2, pos);
     };
 
     /* FIXME: std::sort can segfault if the comparator is not a strict
@@ -1430,6 +1444,40 @@ static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value
 }
 
 
+static void prim_partition(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceFunction(*args[0], pos);
+    state.forceList(*args[1], pos);
+
+    auto len = args[1]->listSize();
+
+    ValueVector right, wrong;
+
+    for (unsigned int n = 0; n < len; ++n) {
+        auto vElem = args[1]->listElems()[n];
+        state.forceValue(*vElem);
+        Value res;
+        state.callFunction(*args[0], *vElem, res, pos);
+        if (state.forceBool(res, pos))
+            right.push_back(vElem);
+        else
+            wrong.push_back(vElem);
+    }
+
+    state.mkAttrs(v, 2);
+
+    Value * vRight = state.allocAttr(v, state.sRight);
+    state.mkList(*vRight, right.size());
+    memcpy(vRight->listElems(), right.data(), sizeof(Value *) * right.size());
+
+    Value * vWrong = state.allocAttr(v, state.sWrong);
+    state.mkList(*vWrong, wrong.size());
+    memcpy(vWrong->listElems(), wrong.data(), sizeof(Value *) * wrong.size());
+
+    v.attrs->sort();
+}
+
+
 /*************************************************************
  * Integer arithmetic
  *************************************************************/
@@ -1620,13 +1668,18 @@ static void prim_replaceStrings(EvalState & state, const Pos & pos, Value * * ar
     if (args[0]->listSize() != args[1]->listSize())
         throw EvalError(format("‘from’ and ‘to’ arguments to ‘replaceStrings’ have different lengths, at %1%") % pos);
 
-    Strings from;
+    vector<string> from;
+    from.reserve(args[0]->listSize());
     for (unsigned int n = 0; n < args[0]->listSize(); ++n)
-        from.push_back(state.forceStringNoCtx(*args[0]->listElems()[n], pos));
+        from.push_back(state.forceString(*args[0]->listElems()[n], pos));
 
-    Strings to;
-    for (unsigned int n = 0; n < args[1]->listSize(); ++n)
-        to.push_back(state.forceStringNoCtx(*args[1]->listElems()[n], pos));
+    vector<std::pair<string, PathSet>> to;
+    to.reserve(args[1]->listSize());
+    for (unsigned int n = 0; n < args[1]->listSize(); ++n) {
+        PathSet ctx;
+        auto s = state.forceString(*args[1]->listElems()[n], ctx, pos);
+        to.push_back(std::make_pair(std::move(s), std::move(ctx)));
+    }
 
     PathSet context;
     auto s = state.forceString(*args[2], context, pos);
@@ -1634,11 +1687,16 @@ static void prim_replaceStrings(EvalState & state, const Pos & pos, Value * * ar
     string res;
     for (size_t p = 0; p < s.size(); ) {
         bool found = false;
-        for (auto i = from.begin(), j = to.begin(); i != from.end(); ++i, ++j)
+        auto i = from.begin();
+        auto j = to.begin();
+        for (; i != from.end(); ++i, ++j)
             if (s.compare(p, i->size(), *i) == 0) {
                 found = true;
                 p += i->size();
-                res += *j;
+                res += j->first;
+                for (auto& path : j->second)
+                    context.insert(path);
+                j->second.clear();
                 break;
             }
         if (!found) res += s[p++];
@@ -1680,9 +1738,9 @@ static void prim_compareVersions(EvalState & state, const Pos & pos, Value * * a
 void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
     const string & who, bool unpack)
 {
-    if (state.restricted) throw Error(format("‘%1%’ is not allowed in restricted mode") % who);
-
     string url;
+    Hash expectedHash;
+    string name;
 
     state.forceValue(*args[0]);
 
@@ -1691,9 +1749,13 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
         state.forceAttrs(*args[0], pos);
 
         for (auto & attr : *args[0]->attrs) {
-            string name(attr.name);
-            if (name == "url")
+            string n(attr.name);
+            if (n == "url")
                 url = state.forceStringNoCtx(*attr.value, *attr.pos);
+            else if (n == "sha256")
+                expectedHash = parseHash16or32(htSHA256, state.forceStringNoCtx(*attr.value, *attr.pos));
+            else if (n == "name")
+                name = state.forceStringNoCtx(*attr.value, *attr.pos);
             else
                 throw EvalError(format("unsupported argument ‘%1%’ to ‘%2%’, at %3%") % attr.name % who % attr.pos);
         }
@@ -1704,7 +1766,10 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
     } else
         url = state.forceStringNoCtx(*args[0], pos);
 
-    Path res = makeDownloader()->downloadCached(state.store, url, unpack);
+    if (state.restricted && !expectedHash)
+        throw Error(format("‘%1%’ is not allowed in restricted mode") % who);
+
+    Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash);
     mkString(v, res, PathSet({res}));
 }
 
@@ -1851,6 +1916,7 @@ void EvalState::createBaseEnv()
     addPrimOp("__all", 2, prim_all);
     addPrimOp("__genList", 2, prim_genList);
     addPrimOp("__sort", 2, prim_sort);
+    addPrimOp("__partition", 2, prim_partition);
 
     // Integer arithmetic
     addPrimOp("__add", 2, prim_add);
@@ -1876,6 +1942,7 @@ void EvalState::createBaseEnv()
 
     // Derivations
     addPrimOp("derivationStrict", 1, prim_derivationStrict);
+    addPrimOp("placeholder", 1, prim_placeholder);
 
     // Networking
     addPrimOp("__fetchurl", 1, prim_fetchurl);
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
index 47ee324a6e4f..72e413e4491e 100644
--- a/src/libexpr/value-to-json.cc
+++ b/src/libexpr/value-to-json.cc
@@ -1,4 +1,5 @@
 #include "value-to-json.hh"
+#include "json.hh"
 #include "eval-inline.hh"
 #include "util.hh"
 
@@ -8,24 +9,8 @@
 
 namespace nix {
 
-
-void escapeJSON(std::ostream & str, const string & s)
-{
-    str << "\"";
-    for (auto & i : s)
-        if (i == '\"' || i == '\\') str << "\\" << i;
-        else if (i == '\n') str << "\\n";
-        else if (i == '\r') str << "\\r";
-        else if (i == '\t') str << "\\t";
-        else if (i >= 0 && i < 32)
-            str << "\\u" << std::setfill('0') << std::setw(4) << std::hex << (uint16_t) i << std::dec;
-        else str << i;
-    str << "\"";
-}
-
-
 void printValueAsJSON(EvalState & state, bool strict,
-    Value & v, std::ostream & str, PathSet & context)
+    Value & v, JSONPlaceholder & out, PathSet & context)
 {
     checkInterrupt();
 
@@ -34,58 +19,58 @@ void printValueAsJSON(EvalState & state, bool strict,
     switch (v.type) {
 
         case tInt:
-            str << v.integer;
+            out.write(v.integer);
             break;
 
         case tBool:
-            str << (v.boolean ? "true" : "false");
+            out.write(v.boolean);
             break;
 
         case tString:
             copyContext(v, context);
-            escapeJSON(str, v.string.s);
+            out.write(v.string.s);
             break;
 
         case tPath:
-            escapeJSON(str, state.copyPathToStore(context, v.path));
+            out.write(state.copyPathToStore(context, v.path));
             break;
 
         case tNull:
-            str << "null";
+            out.write(nullptr);
             break;
 
         case tAttrs: {
             Bindings::iterator i = v.attrs->find(state.sOutPath);
             if (i == v.attrs->end()) {
-                JSONObject json(str);
+                auto obj(out.object());
                 StringSet names;
                 for (auto & j : *v.attrs)
                     names.insert(j.name);
                 for (auto & j : names) {
                     Attr & a(*v.attrs->find(state.symbols.create(j)));
-                    json.attr(j);
-                    printValueAsJSON(state, strict, *a.value, str, context);
+                    auto placeholder(obj.placeholder(j));
+                    printValueAsJSON(state, strict, *a.value, placeholder, context);
                 }
             } else
-                printValueAsJSON(state, strict, *i->value, str, context);
+                printValueAsJSON(state, strict, *i->value, out, context);
             break;
         }
 
         case tList1: case tList2: case tListN: {
-            JSONList json(str);
+            auto list(out.list());
             for (unsigned int n = 0; n < v.listSize(); ++n) {
-                json.elem();
-                printValueAsJSON(state, strict, *v.listElems()[n], str, context);
+                auto placeholder(list.placeholder());
+                printValueAsJSON(state, strict, *v.listElems()[n], placeholder, context);
             }
             break;
         }
 
         case tExternal:
-            v.external->printValueAsJSON(state, strict, str, context);
+            v.external->printValueAsJSON(state, strict, out, context);
             break;
 
         case tFloat:
-            str << v.fpoint;
+            out.write(v.fpoint);
             break;
 
         default:
@@ -93,9 +78,15 @@ void printValueAsJSON(EvalState & state, bool strict,
     }
 }
 
+void printValueAsJSON(EvalState & state, bool strict,
+    Value & v, std::ostream & str, PathSet & context)
+{
+    JSONPlaceholder out(str);
+    printValueAsJSON(state, strict, v, out, context);
+}
 
 void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
-      std::ostream & str, PathSet & context) const
+    JSONPlaceholder & out, PathSet & context) const
 {
     throw TypeError(format("cannot convert %1% to JSON") % showType());
 }
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
index c59caf5641bc..67fed6487dd9 100644
--- a/src/libexpr/value-to-json.hh
+++ b/src/libexpr/value-to-json.hh
@@ -8,73 +8,12 @@
 
 namespace nix {
 
-void printValueAsJSON(EvalState & state, bool strict,
-    Value & v, std::ostream & out, PathSet & context);
-
-void escapeJSON(std::ostream & str, const string & s);
+class JSONPlaceholder;
 
-struct JSONObject
-{
-    std::ostream & str;
-    bool first;
-    JSONObject(std::ostream & str) : str(str), first(true)
-    {
-        str << "{";
-    }
-    ~JSONObject()
-    {
-        str << "}";
-    }
-    void attr(const string & s)
-    {
-        if (!first) str << ","; else first = false;
-        escapeJSON(str, s);
-        str << ":";
-    }
-    void attr(const string & s, const string & t)
-    {
-        attr(s);
-        escapeJSON(str, t);
-    }
-    void attr(const string & s, const char * t)
-    {
-        attr(s);
-        escapeJSON(str, t);
-    }
-    void attr(const string & s, bool b)
-    {
-        attr(s);
-        str << (b ? "true" : "false");
-    }
-    template<typename T>
-    void attr(const string & s, const T & n)
-    {
-        attr(s);
-        str << n;
-    }
-};
+void printValueAsJSON(EvalState & state, bool strict,
+    Value & v, JSONPlaceholder & out, PathSet & context);
 
-struct JSONList
-{
-    std::ostream & str;
-    bool first;
-    JSONList(std::ostream & str) : str(str), first(true)
-    {
-        str << "[";
-    }
-    ~JSONList()
-    {
-        str << "]";
-    }
-    void elem()
-    {
-        if (!first) str << ","; else first = false;
-    }
-    void elem(const string & s)
-    {
-        elem();
-        escapeJSON(str, s);
-    }
-};
+void printValueAsJSON(EvalState & state, bool strict,
+    Value & v, std::ostream & str, PathSet & context);
 
 }
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 62bdd9281f08..271e6a1b24a2 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -1,7 +1,12 @@
 #pragma once
 
+#include "config.h"
 #include "symbol-table.hh"
 
+#if HAVE_BOEHMGC
+#include <gc/gc_allocator.h>
+#endif
+
 namespace nix {
 
 
@@ -36,6 +41,7 @@ class Symbol;
 struct Pos;
 class EvalState;
 class XMLWriter;
+class JSONPlaceholder;
 
 
 typedef long NixInt;
@@ -73,7 +79,7 @@ class ExternalValueBase
 
     /* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
     virtual void printValueAsJSON(EvalState & state, bool strict,
-        std::ostream & str, PathSet & context) const;
+        JSONPlaceholder & out, PathSet & context) const;
 
     /* Print the value as XML. Defaults to unevaluated */
     virtual void printValueAsXML(EvalState & state, bool strict, bool location,
@@ -249,4 +255,13 @@ void mkPath(Value & v, const char * s);
 size_t valueSize(Value & v);
 
 
+#if HAVE_BOEHMGC
+typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
+typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<Value *> > ValueMap;
+#else
+typedef std::vector<Value *> ValueVector;
+typedef std::map<Symbol, Value *> ValueMap;
+#endif
+
+
 }
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 515d80091de3..0c6e3fb76d64 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -56,26 +56,26 @@ void printMissing(ref<Store> store, const PathSet & willBuild,
     unsigned long long downloadSize, unsigned long long narSize)
 {
     if (!willBuild.empty()) {
-        printMsg(lvlInfo, format("these derivations will be built:"));
+        printInfo(format("these derivations will be built:"));
         Paths sorted = store->topoSortPaths(willBuild);
         reverse(sorted.begin(), sorted.end());
         for (auto & i : sorted)
-            printMsg(lvlInfo, format("  %1%") % i);
+            printInfo(format("  %1%") % i);
     }
 
     if (!willSubstitute.empty()) {
-        printMsg(lvlInfo, format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
+        printInfo(format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
             % (downloadSize / (1024.0 * 1024.0))
             % (narSize / (1024.0 * 1024.0)));
         for (auto & i : willSubstitute)
-            printMsg(lvlInfo, format("  %1%") % i);
+            printInfo(format("  %1%") % i);
     }
 
     if (!unknown.empty()) {
-        printMsg(lvlInfo, format("don't know how to build these paths%1%:")
+        printInfo(format("don't know how to build these paths%1%:")
             % (settings.readOnlyMode ? " (may be caused by read-only store access)" : ""));
         for (auto & i : unknown)
-            printMsg(lvlInfo, format("  %1%") % i);
+            printInfo(format("  %1%") % i);
     }
 }
 
@@ -252,7 +252,6 @@ void printVersion(const string & programName)
         std::cout << "Configuration file: " << settings.nixConfDir + "/nix.conf" << "\n";
         std::cout << "Store directory: " << settings.nixStore << "\n";
         std::cout << "State directory: " << settings.nixStateDir << "\n";
-        std::cout << "Database directory: " << settings.nixDBPath << "\n";
     }
     throw Exit();
 }
@@ -283,20 +282,20 @@ int handleExceptions(const string & programName, std::function<void()> fun)
     } catch (Exit & e) {
         return e.status;
     } catch (UsageError & e) {
-        printMsg(lvlError,
+        printError(
             format(error + "%1%\nTry ‘%2% --help’ for more information.")
             % e.what() % programName);
         return 1;
     } catch (BaseError & e) {
-        printMsg(lvlError, format(error + "%1%%2%") % (settings.showTrace ? e.prefix() : "") % e.msg());
+        printError(format(error + "%1%%2%") % (settings.showTrace ? e.prefix() : "") % e.msg());
         if (e.prefix() != "" && !settings.showTrace)
-            printMsg(lvlError, "(use ‘--show-trace’ to show detailed location information)");
+            printError("(use ‘--show-trace’ to show detailed location information)");
         return e.status;
     } catch (std::bad_alloc & e) {
-        printMsg(lvlError, error + "out of memory");
+        printError(error + "out of memory");
         return 1;
     } catch (std::exception & e) {
-        printMsg(lvlError, error + e.what());
+        printError(error + e.what());
         return 1;
     }
 
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 668f1e566c01..0ffbd6e552b7 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -12,6 +12,8 @@
 
 #include <chrono>
 
+#include <future>
+
 namespace nix {
 
 BinaryCacheStore::BinaryCacheStore(const Params & params)
@@ -58,6 +60,19 @@ void BinaryCacheStore::notImpl()
     throw Error("operation not implemented for binary cache stores");
 }
 
+std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+{
+    std::promise<std::shared_ptr<std::string>> promise;
+    getFile(path,
+        [&](std::shared_ptr<std::string> result) {
+            promise.set_value(result);
+        },
+        [&](std::exception_ptr exc) {
+            promise.set_exception(exc);
+        });
+    return promise.get_future().get();
+}
+
 Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
 {
     assertStorePath(storePath);
@@ -176,17 +191,22 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
     sink((unsigned char *) nar->c_str(), nar->size());
 }
 
-std::shared_ptr<ValidPathInfo> BinaryCacheStore::queryPathInfoUncached(const Path & storePath)
+void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
+        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure)
 {
     auto narInfoFile = narInfoFileFor(storePath);
-    auto data = getFile(narInfoFile);
-    if (!data) return 0;
 
-    auto narInfo = make_ref<NarInfo>(*this, *data, narInfoFile);
+    getFile(narInfoFile,
+        [=](std::shared_ptr<std::string> data) {
+            if (!data) return success(0);
 
-    stats.narInfoRead++;
+            stats.narInfoRead++;
 
-    return std::shared_ptr<NarInfo>(narInfo);
+            callSuccess(success, failure, (std::shared_ptr<ValidPathInfo>)
+                std::make_shared<NarInfo>(*this, *data, narInfoFile));
+        },
+        failure);
 }
 
 Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
@@ -209,7 +229,7 @@ Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
     }
 
     ValidPathInfo info;
-    info.path = makeFixedOutputPath(recursive, hashAlgo, h, name);
+    info.path = makeFixedOutputPath(recursive, h, name);
 
     addToStore(info, *sink.s, repair);
 
@@ -254,7 +274,7 @@ struct BinaryCacheStoreAccessor : public FSAccessor
         std::string restPath = std::string(path, storePath.size());
 
         if (!store->isValidPath(storePath))
-            throw Error(format("path ‘%1%’ is not a valid store path") % storePath);
+            throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
 
         auto i = nars.find(storePath);
         if (i != nars.end()) return {i->second, restPath};
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index 2d10179f32ab..41671b7d9ffd 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -31,7 +31,11 @@ protected:
 
     /* Return the contents of the specified file, or null if it
        doesn't exist. */
-    virtual std::shared_ptr<std::string> getFile(const std::string & path) = 0;
+    virtual void getFile(const std::string & path,
+        std::function<void(std::shared_ptr<std::string>)> success,
+        std::function<void(std::exception_ptr exc)> failure) = 0;
+
+    std::shared_ptr<std::string> getFile(const std::string & path);
 
     bool wantMassQuery_ = false;
     int priority = 50;
@@ -56,7 +60,9 @@ public:
     PathSet queryAllValidPaths() override
     { notImpl(); }
 
-    std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) override;
+    void queryPathInfoUncached(const Path & path,
+        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure) override;
 
     void queryReferrers(const Path & path,
         PathSet & referrers) override
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index af2c908c30db..e7054a05a53a 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -193,6 +193,7 @@ bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) {
 struct Child
 {
     WeakGoalPtr goal;
+    Goal * goal2; // ugly hackery
     set<int> fds;
     bool respectTimeouts;
     bool inBuildSlot;
@@ -284,7 +285,7 @@ public:
        false if there is no sense in waking up goals that are sleeping
        because they can't run yet (e.g., there is no free build slot,
        or the hook would still say `postpone'). */
-    void childTerminated(GoalPtr goal, bool wakeSleepers = true);
+    void childTerminated(Goal * goal, bool wakeSleepers = true);
 
     /* Put `goal' to sleep until a build slot becomes available (which
        might be right away). */
@@ -652,18 +653,15 @@ HookInstance::~HookInstance()
 //////////////////////////////////////////////////////////////////////
 
 
-typedef map<string, string> HashRewrites;
+typedef map<std::string, std::string> StringRewrites;
 
 
-string rewriteHashes(string s, const HashRewrites & rewrites)
+std::string rewriteStrings(std::string s, const StringRewrites & rewrites)
 {
     for (auto & i : rewrites) {
-        assert(i.first.size() == i.second.size());
         size_t j = 0;
-        while ((j = s.find(i.first, j)) != string::npos) {
-            debug(format("rewriting @ %1%") % j);
-            s.replace(j, i.second.size(), i.second);
-        }
+        while ((j = s.find(i.first, j)) != string::npos)
+            s.replace(j, i.first.size(), i.second);
     }
     return s;
 }
@@ -782,7 +780,7 @@ private:
 #endif
 
     /* Hash rewriting. */
-    HashRewrites rewritesToTmp, rewritesFromTmp;
+    StringRewrites inputRewrites, outputRewrites;
     typedef map<Path, Path> RedirectedOutputs;
     RedirectedOutputs redirectedOutputs;
 
@@ -806,6 +804,9 @@ private:
        result. */
     ValidPathInfos prevInfos;
 
+    const uid_t sandboxUid = 1000;
+    const gid_t sandboxGid = 100;
+
 public:
     DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
         Worker & worker, BuildMode buildMode = bmNormal);
@@ -938,7 +939,7 @@ DerivationGoal::~DerivationGoal()
 void DerivationGoal::killChild()
 {
     if (pid != -1) {
-        worker.childTerminated(shared_from_this());
+        worker.childTerminated(this);
 
         if (buildUser.enabled()) {
             /* If we're using a build user, then there is a tricky
@@ -1013,7 +1014,7 @@ void DerivationGoal::loadDerivation()
     trace("loading derivation");
 
     if (nrFailed != 0) {
-        printMsg(lvlError, format("cannot build missing derivation ‘%1%’") % drvPath);
+        printError(format("cannot build missing derivation ‘%1%’") % drvPath);
         done(BuildResult::MiscFailure);
         return;
     }
@@ -1167,7 +1168,7 @@ void DerivationGoal::repairClosure()
     PathSet broken;
     for (auto & i : outputClosure) {
         if (worker.pathContentsGood(i)) continue;
-        printMsg(lvlError, format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % i % drvPath);
+        printError(format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % i % drvPath);
         Path drvPath2 = outputsToDrv[i];
         if (drvPath2 == "")
             addWaitee(worker.makeSubstitutionGoal(i, true));
@@ -1200,7 +1201,7 @@ void DerivationGoal::inputsRealised()
     if (nrFailed != 0) {
         if (!useDerivation)
             throw Error(format("some dependencies of ‘%1%’ are missing") % drvPath);
-        printMsg(lvlError,
+        printError(
             format("cannot build derivation ‘%1%’: %2% dependencies couldn't be built")
             % drvPath % nrFailed);
         done(BuildResult::DependencyFailed);
@@ -1365,7 +1366,7 @@ void DerivationGoal::tryToBuild()
         startBuilder();
 
     } catch (BuildError & e) {
-        printMsg(lvlError, e.msg());
+        printError(e.msg());
         outputLocks.unlock();
         buildUser.release();
         worker.permanentFailure = true;
@@ -1412,7 +1413,7 @@ void DerivationGoal::buildDone()
     debug(format("builder process for ‘%1%’ finished") % drvPath);
 
     /* So the child is gone now. */
-    worker.childTerminated(shared_from_this());
+    worker.childTerminated(this);
 
     /* Close the read side of the logger pipe. */
     if (hook) {
@@ -1514,7 +1515,7 @@ void DerivationGoal::buildDone()
 
     } catch (BuildError & e) {
         if (!hook)
-            printMsg(lvlError, e.msg());
+            printError(e.msg());
         outputLocks.unlock();
         buildUser.release();
 
@@ -1643,7 +1644,7 @@ void DerivationGoal::startBuilder()
         nrRounds > 1 ? "building path(s) %1% (round %2%/%3%)" :
         "building path(s) %1%");
     f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit);
-    printMsg(lvlInfo, f % showPaths(missingPaths) % curRound % nrRounds);
+    printInfo(f % showPaths(missingPaths) % curRound % nrRounds);
 
     /* Right platform? */
     if (!drv->canBuildLocally()) {
@@ -1774,6 +1775,10 @@ void DerivationGoal::startBuilder()
         for (auto & i : varNames) env[i] = getEnv(i);
     }
 
+    /* Substitute output placeholders with the actual output paths. */
+    for (auto & output : drv->outputs)
+        inputRewrites[hashPlaceholder(output.first)] = output.second.path;
+
     /* The `exportReferencesGraph' feature allows the references graph
        to be passed to a builder.  This attribute should be a list of
        pairs [name1 path1 name2 path2 ...].  The references graph of
@@ -1872,8 +1877,12 @@ void DerivationGoal::startBuilder()
         /* Add the closure of store paths to the chroot. */
         PathSet closure;
         for (auto & i : dirsInChroot)
-            if (worker.store.isInStore(i.second))
-                worker.store.computeFSClosure(worker.store.toStorePath(i.second), closure);
+            try {
+                if (worker.store.isInStore(i.second))
+                    worker.store.computeFSClosure(worker.store.toStorePath(i.second), closure);
+            } catch (Error & e) {
+                throw Error(format("while processing ‘build-sandbox-paths’: %s") % e.what());
+            }
         for (auto & i : closure)
             dirsInChroot[i] = i;
 
@@ -1935,14 +1944,18 @@ void DerivationGoal::startBuilder()
         createDirs(chrootRootDir + "/etc");
 
         writeFile(chrootRootDir + "/etc/passwd",
-            "root:x:0:0:Nix build user:/:/noshell\n"
-            "nobody:x:65534:65534:Nobody:/:/noshell\n");
+            (format(
+                "root:x:0:0:Nix build user:/:/noshell\n"
+                "nixbld:x:%1%:%2%:Nix build user:/:/noshell\n"
+                "nobody:x:65534:65534:Nobody:/:/noshell\n") % sandboxUid % sandboxGid).str());
 
         /* Declare the build user's group so that programs get a consistent
            view of the system (e.g., "id -gn"). */
         writeFile(chrootRootDir + "/etc/group",
-            "root:x:0:\n"
-            "nobody:x:65534:\n");
+            (format(
+                "root:x:0:\n"
+                "nixbld:!:%1%:\n"
+                "nogroup:x:65534:\n") % sandboxGid).str());
 
         /* Create /etc/hosts with localhost entry. */
         if (!fixedOutput)
@@ -2124,7 +2137,12 @@ void DerivationGoal::startBuilder()
         Pid helper = startProcess([&]() {
 
             /* Drop additional groups here because we can't do it
-               after we've created the new user namespace. */
+               after we've created the new user namespace.  FIXME:
+               this means that if we're not root in the parent
+               namespace, we can't drop additional groups; they will
+               be mapped to nogroup in the child namespace. There does
+               not seem to be a workaround for this. (But who can tell
+               from reading user_namespaces(7)?)*/
             if (getuid() == 0 && setgroups(0, 0) == -1)
                 throw SysError("setgroups failed");
 
@@ -2157,19 +2175,19 @@ void DerivationGoal::startBuilder()
         if (!string2Int<pid_t>(readLine(builderOut.readSide.get()), tmp)) abort();
         pid = tmp;
 
-        /* Set the UID/GID mapping of the builder's user
-           namespace such that root maps to the build user, or to the
-           calling user (if build users are disabled). */
-        uid_t targetUid = buildUser.enabled() ? buildUser.getUID() : getuid();
-        uid_t targetGid = buildUser.enabled() ? buildUser.getGID() : getgid();
+        /* Set the UID/GID mapping of the builder's user namespace
+           such that the sandbox user maps to the build user, or to
+           the calling user (if build users are disabled). */
+        uid_t hostUid = buildUser.enabled() ? buildUser.getUID() : getuid();
+        uid_t hostGid = buildUser.enabled() ? buildUser.getGID() : getgid();
 
         writeFile("/proc/" + std::to_string(pid) + "/uid_map",
-            (format("0 %d 1") % targetUid).str());
+            (format("%d %d 1") % sandboxUid % hostUid).str());
 
         writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
 
         writeFile("/proc/" + std::to_string(pid) + "/gid_map",
-            (format("0 %d 1") % targetGid).str());
+            (format("%d %d 1") % sandboxGid % hostGid).str());
 
         /* Signal the builder that we've updated its user
            namespace. */
@@ -2198,7 +2216,7 @@ void DerivationGoal::startBuilder()
             if (msg.size() == 1) break;
             throw Error(string(msg, 1));
         }
-        printMsg(lvlDebug, msg);
+        debug(msg);
     }
 }
 
@@ -2328,13 +2346,15 @@ void DerivationGoal::runChild()
 
             /* Mount a new tmpfs on /dev/shm to ensure that whatever
                the builder puts in /dev/shm is cleaned up automatically. */
-            if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0, 0) == -1)
+            if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
+                    fmt("size=%s", settings.get("sandbox-dev-shm-size", std::string("50%"))).c_str()) == -1)
                 throw SysError("mounting /dev/shm");
 
             /* Mount a new devpts on /dev/pts.  Note that this
                requires the kernel to be compiled with
                CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case
                if /dev/ptx/ptmx exists). */
+#if 0
             if (pathExists("/dev/pts/ptmx") &&
                 !pathExists(chrootRootDir + "/dev/ptmx")
                 && dirsInChroot.find("/dev/pts") == dirsInChroot.end())
@@ -2347,6 +2367,7 @@ void DerivationGoal::runChild()
                    Linux versions, it is created with permissions 0.  */
                 chmod_(chrootRootDir + "/dev/pts/ptmx", 0666);
             }
+#endif
 
             /* Do the chroot(). */
             if (chdir(chrootRootDir.c_str()) == -1)
@@ -2367,11 +2388,12 @@ void DerivationGoal::runChild()
             if (rmdir("real-root") == -1)
                 throw SysError("cannot remove real-root directory");
 
-            /* Become root in the user namespace, which corresponds to
-               the build user or calling user in the parent namespace. */
-            if (setgid(0) == -1)
+            /* Switch to the sandbox uid/gid in the user namespace,
+               which corresponds to the build user or calling user in
+               the parent namespace. */
+            if (setgid(sandboxGid) == -1)
                 throw SysError("setgid failed");
-            if (setuid(0) == -1)
+            if (setuid(sandboxUid) == -1)
                 throw SysError("setuid failed");
 
             setUser = false;
@@ -2418,7 +2440,7 @@ void DerivationGoal::runChild()
         /* Fill in the environment. */
         Strings envStrs;
         for (auto & i : env)
-            envStrs.push_back(rewriteHashes(i.first + "=" + i.second, rewritesToTmp));
+            envStrs.push_back(rewriteStrings(i.first + "=" + i.second, inputRewrites));
 
         /* If we are running in `build-users' mode, then switch to the
            user we allocated above.  Make sure that we drop all root
@@ -2560,7 +2582,7 @@ void DerivationGoal::runChild()
         }
 
         for (auto & i : drv->args)
-            args.push_back(rewriteHashes(i, rewritesToTmp));
+            args.push_back(rewriteStrings(i, inputRewrites));
 
         restoreSIGPIPE();
 
@@ -2682,8 +2704,8 @@ void DerivationGoal::registerOutputs()
 
         /* Apply hash rewriting if necessary. */
         bool rewritten = false;
-        if (!rewritesFromTmp.empty()) {
-            printMsg(lvlError, format("warning: rewriting hashes in ‘%1%’; cross fingers") % path);
+        if (!outputRewrites.empty()) {
+            printError(format("warning: rewriting hashes in ‘%1%’; cross fingers") % path);
 
             /* Canonicalise first.  This ensures that the path we're
                rewriting doesn't contain a hard link to /etc/shadow or
@@ -2694,7 +2716,7 @@ void DerivationGoal::registerOutputs()
             StringSink sink;
             dumpPath(actualPath, sink);
             deletePath(actualPath);
-            sink.s = make_ref<std::string>(rewriteHashes(*sink.s, rewritesFromTmp));
+            sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
             StringSource source(*sink.s);
             restorePath(actualPath, source);
 
@@ -2706,8 +2728,8 @@ void DerivationGoal::registerOutputs()
            hash). */
         if (i.second.hash != "") {
 
-            bool recursive; HashType ht; Hash h;
-            i.second.parseHashInfo(recursive, ht, h);
+            bool recursive; Hash h;
+            i.second.parseHashInfo(recursive, h);
 
             if (!recursive) {
                 /* The output path should be a regular file without
@@ -2719,11 +2741,11 @@ void DerivationGoal::registerOutputs()
 
             /* Check the hash. In hash mode, move the path produced by
                the derivation to its content-addressed location. */
-            Hash h2 = recursive ? hashPath(ht, actualPath).first : hashFile(ht, actualPath);
+            Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath);
             if (buildMode == bmHash) {
-                Path dest = worker.store.makeFixedOutputPath(recursive, ht, h2, drv->env["name"]);
-                printMsg(lvlError, format("build produced path ‘%1%’ with %2% hash ‘%3%’")
-                    % dest % printHashType(ht) % printHash16or32(h2));
+                Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]);
+                printError(format("build produced path ‘%1%’ with %2% hash ‘%3%’")
+                    % dest % printHashType(h.type) % printHash16or32(h2));
                 if (worker.store.isValidPath(dest))
                     return;
                 Path actualDest = worker.store.toRealPath(dest);
@@ -2910,7 +2932,7 @@ Path DerivationGoal::openLogFile()
     string baseName = baseNameOf(drvPath);
 
     /* Create a log file. */
-    Path dir = (format("%1%/%2%/%3%/") % settings.nixLogDir % drvsLogDir % string(baseName, 0, 2)).str();
+    Path dir = (format("%1%/%2%/%3%/") % worker.store.logDir % drvsLogDir % string(baseName, 0, 2)).str();
     createDirs(dir);
 
     Path logFileName = (format("%1%/%2%%3%")
@@ -2946,7 +2968,7 @@ void DerivationGoal::deleteTmpDir(bool force)
 {
     if (tmpDir != "") {
         if (settings.keepFailed && !force) {
-            printMsg(lvlError,
+            printError(
                 format("note: keeping build directory ‘%2%’")
                 % drvPath % tmpDir);
             chmod(tmpDir.c_str(), 0755);
@@ -2965,7 +2987,7 @@ void DerivationGoal::handleChildOutput(int fd, const string & data)
     {
         logSize += data.size();
         if (settings.maxLogSize && logSize > settings.maxLogSize) {
-            printMsg(lvlError,
+            printError(
                 format("%1% killed after writing more than %2% bytes of log output")
                 % getName() % settings.maxLogSize);
             killChild();
@@ -2988,7 +3010,7 @@ void DerivationGoal::handleChildOutput(int fd, const string & data)
     }
 
     if (hook && fd == hook->fromHook.readSide.get())
-        printMsg(lvlError, data); // FIXME?
+        printError(data); // FIXME?
 }
 
 
@@ -3002,7 +3024,7 @@ void DerivationGoal::handleEOF(int fd)
 void DerivationGoal::flushLine()
 {
     if (settings.verboseBuild)
-        printMsg(lvlInfo, filterANSIEscapes(currentLogLine, true));
+        printInfo(filterANSIEscapes(currentLogLine, true));
     else {
         logTail.push_back(currentLogLine);
         if (logTail.size() > settings.logLines) logTail.pop_front();
@@ -3033,8 +3055,8 @@ Path DerivationGoal::addHashRewrite(const Path & path)
     Path p = worker.store.storeDir + "/" + h2 + string(path, worker.store.storeDir.size() + 33);
     deletePath(p);
     assert(path.size() == p.size());
-    rewritesToTmp[h1] = h2;
-    rewritesFromTmp[h2] = h1;
+    inputRewrites[h1] = h2;
+    outputRewrites[h2] = h1;
     redirectedOutputs[path] = p;
     return p;
 }
@@ -3140,8 +3162,9 @@ SubstitutionGoal::~SubstitutionGoal()
 {
     try {
         if (thr.joinable()) {
+            // FIXME: signal worker thread to quit.
             thr.join();
-            //worker.childTerminated(shared_from_this()); // FIXME
+            worker.childTerminated(this);
         }
     } catch (...) {
         ignoreException();
@@ -3213,8 +3236,8 @@ void SubstitutionGoal::tryNext()
     /* Bail out early if this substituter lacks a valid
        signature. LocalStore::addToStore() also checks for this, but
        only after we've downloaded the path. */
-    if (worker.store.requireSigs && !info->checkSignatures(worker.store.publicKeys)) {
-        printMsg(lvlInfo, format("warning: substituter ‘%s’ does not have a valid signature for path ‘%s’")
+    if (worker.store.requireSigs && !info->checkSignatures(worker.store, worker.store.publicKeys)) {
+        printInfo(format("warning: substituter ‘%s’ does not have a valid signature for path ‘%s’")
             % sub->getUri() % storePath);
         tryNext();
         return;
@@ -3265,7 +3288,7 @@ void SubstitutionGoal::tryToRun()
         return;
     }
 
-    printMsg(lvlInfo, format("fetching path ‘%1%’...") % storePath);
+    printInfo(format("fetching path ‘%1%’...") % storePath);
 
     outPipe.create();
 
@@ -3296,12 +3319,12 @@ void SubstitutionGoal::finished()
     trace("substitute finished");
 
     thr.join();
-    worker.childTerminated(shared_from_this());
+    worker.childTerminated(this);
 
     try {
         promise.get_future().get();
     } catch (Error & e) {
-        printMsg(lvlInfo, e.msg());
+        printInfo(e.msg());
 
         /* Try the next substitute. */
         state = &SubstitutionGoal::tryNext;
@@ -3449,6 +3472,7 @@ void Worker::childStarted(GoalPtr goal, const set<int> & fds,
 {
     Child child;
     child.goal = goal;
+    child.goal2 = goal.get();
     child.fds = fds;
     child.timeStarted = child.lastOutput = time(0);
     child.inBuildSlot = inBuildSlot;
@@ -3458,11 +3482,11 @@ void Worker::childStarted(GoalPtr goal, const set<int> & fds,
 }
 
 
-void Worker::childTerminated(GoalPtr goal, bool wakeSleepers)
+void Worker::childTerminated(Goal * goal, bool wakeSleepers)
 {
     auto i = std::find_if(children.begin(), children.end(),
-        [&](const Child & child) { return child.goal.lock() == goal; });
-    assert(i != children.end());
+        [&](const Child & child) { return child.goal2 == goal; });
+    if (i == children.end()) return;
 
     if (i->inBuildSlot) {
         assert(nrLocalBuilds > 0);
@@ -3594,7 +3618,7 @@ void Worker::waitForInput()
     if (!waitingForAWhile.empty()) {
         useTimeout = true;
         if (lastWokenUp == 0)
-            printMsg(lvlError, "waiting for locks or build slots...");
+            printError("waiting for locks or build slots...");
         if (lastWokenUp == 0 || lastWokenUp > before) lastWokenUp = before;
         timeout.tv_sec = std::max((time_t) 1, (time_t) (lastWokenUp + settings.pollInterval - before));
     } else lastWokenUp = 0;
@@ -3657,7 +3681,7 @@ void Worker::waitForInput()
             j->respectTimeouts &&
             after - j->lastOutput >= (time_t) settings.maxSilentTime)
         {
-            printMsg(lvlError,
+            printError(
                 format("%1% timed out after %2% seconds of silence")
                 % goal->getName() % settings.maxSilentTime);
             goal->timedOut();
@@ -3668,7 +3692,7 @@ void Worker::waitForInput()
             j->respectTimeouts &&
             after - j->timeStarted >= (time_t) settings.buildTimeout)
         {
-            printMsg(lvlError,
+            printError(
                 format("%1% timed out after %2% seconds")
                 % goal->getName() % settings.buildTimeout);
             goal->timedOut();
@@ -3696,7 +3720,7 @@ bool Worker::pathContentsGood(const Path & path)
 {
     std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
     if (i != pathContentsGoodCache.end()) return i->second;
-    printMsg(lvlInfo, format("checking path ‘%1%’...") % path);
+    printInfo(format("checking path ‘%1%’...") % path);
     auto info = store.queryPathInfo(path);
     bool res;
     if (!pathExists(path))
@@ -3707,7 +3731,7 @@ bool Worker::pathContentsGood(const Path & path)
         res = info->narHash == nullHash || info->narHash == current.first;
     }
     pathContentsGoodCache[path] = res;
-    if (!res) printMsg(lvlError, format("path ‘%1%’ is corrupted or missing!") % path);
+    if (!res) printError(format("path ‘%1%’ is corrupted or missing!") % path);
     return res;
 }
 
@@ -3745,7 +3769,7 @@ void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
         }
 
     if (!failed.empty())
-        throw Error(format("build of %1% failed") % showPaths(failed), worker.exitStatus());
+        throw Error(worker.exitStatus(), "build of %s failed",showPaths(failed));
 }
 
 
@@ -3781,7 +3805,7 @@ void LocalStore::ensurePath(const Path & path)
     worker.run(goals);
 
     if (goal->getExitCode() != Goal::ecSuccess)
-        throw Error(format("path ‘%1%’ does not exist and cannot be created") % path, worker.exitStatus());
+        throw Error(worker.exitStatus(), "path ‘%s’ does not exist and cannot be created", path);
 }
 
 
@@ -3802,7 +3826,7 @@ void LocalStore::repairPath(const Path & path)
             goals.insert(worker.makeDerivationGoal(deriver, StringSet(), bmRepair));
             worker.run(goals);
         } else
-            throw Error(format("cannot repair path ‘%1%’") % path, worker.exitStatus());
+            throw Error(worker.exitStatus(), "cannot repair path ‘%s’", path);
     }
 }
 
diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc
index d3194a905733..a30f30906f01 100644
--- a/src/libstore/builtins.cc
+++ b/src/libstore/builtins.cc
@@ -17,13 +17,15 @@ void builtinFetchurl(const BasicDerivation & drv)
     auto fetch = [&](const string & url) {
         /* No need to do TLS verification, because we check the hash of
            the result anyway. */
-        DownloadOptions options;
-        options.verifyTLS = false;
+        DownloadRequest request(url);
+        request.verifyTLS = false;
 
         /* Show a progress indicator, even though stderr is not a tty. */
-        options.showProgress = DownloadOptions::yes;
+        request.showProgress = DownloadRequest::yes;
 
-        auto data = makeDownloader()->download(url, options);
+        /* Note: have to use a fresh downloader here because we're in
+           a forked process. */
+        auto data = makeDownloader()->download(request);
         assert(data.data);
 
         return data.data;
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 5590b99b36fa..d934bda38225 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -9,7 +9,7 @@
 namespace nix {
 
 
-void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash & hash) const
+void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
 {
     recursive = false;
     string algo = hashAlgo;
@@ -19,7 +19,7 @@ void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash
         algo = string(algo, 2);
     }
 
-    hashType = parseHashType(algo);
+    HashType hashType = parseHashType(algo);
     if (hashType == htUnknown)
         throw Error(format("unknown hash algorithm ‘%1%’") % algo);
 
@@ -61,6 +61,7 @@ bool BasicDerivation::canBuildLocally() const
 #if __linux__
         || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux")
         || (platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux")
+        || (platform == "armv5tel-linux" && (settings.thisSystem == "armv7l-linux" || settings.thisSystem == "armv6l-linux"))
 #elif __FreeBSD__
         || (platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd")
         || (platform == "i686-linux" && settings.thisSystem == "i686-freebsd")
@@ -87,6 +88,38 @@ Path writeDerivation(ref<Store> store,
 }
 
 
+MakeError(FormatError, Error)
+
+
+/* Read string `s' from stream `str'. */
+static void expect(std::istream & str, const string & s)
+{
+    char s2[s.size()];
+    str.read(s2, s.size());
+    if (string(s2, s.size()) != s)
+        throw FormatError(format("expected string ‘%1%’") % s);
+}
+
+
+/* Read a C-style string from stream `str'. */
+static string parseString(std::istream & str)
+{
+    string res;
+    expect(str, "\"");
+    int c;
+    while ((c = str.get()) != '"')
+        if (c == '\\') {
+            c = str.get();
+            if (c == 'n') res += '\n';
+            else if (c == 'r') res += '\r';
+            else if (c == 't') res += '\t';
+            else res += c;
+        }
+        else res += c;
+    return res;
+}
+
+
 static Path parsePath(std::istream & str)
 {
     string s = parseString(str);
@@ -96,6 +129,20 @@ static Path parsePath(std::istream & str)
 }
 
 
+static bool endOfList(std::istream & str)
+{
+    if (str.peek() == ',') {
+        str.get();
+        return false;
+    }
+    if (str.peek() == ']') {
+        str.get();
+        return true;
+    }
+    return false;
+}
+
+
 static StringSet parseStrings(std::istream & str, bool arePaths)
 {
     StringSet res;
@@ -390,4 +437,11 @@ Sink & operator << (Sink & out, const BasicDerivation & drv)
 }
 
 
+std::string hashPlaceholder(const std::string & outputName)
+{
+    // FIXME: memoize?
+    return "/" + printHash32(hashString(htSHA256, "nix-output:" + outputName));
+}
+
+
 }
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index e27c937132e8..9717a81e469c 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -29,7 +29,7 @@ struct DerivationOutput
         this->hashAlgo = hashAlgo;
         this->hash = hash;
     }
-    void parseHashInfo(bool & recursive, HashType & hashType, Hash & hash) const;
+    void parseHashInfo(bool & recursive, Hash & hash) const;
 };
 
 typedef std::map<string, DerivationOutput> DerivationOutputs;
@@ -117,4 +117,6 @@ struct Sink;
 Source & readDerivation(Source & in, Store & store, BasicDerivation & drv);
 Sink & operator << (Sink & out, const BasicDerivation & drv);
 
+std::string hashPlaceholder(const std::string & outputName);
+
 }
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 04a2b325c651..b228cd0b4b37 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -3,10 +3,17 @@
 #include "globals.hh"
 #include "hash.hh"
 #include "store-api.hh"
+#include "archive.hh"
+
+#include <unistd.h>
+#include <fcntl.h>
 
 #include <curl/curl.h>
 
 #include <iostream>
+#include <thread>
+#include <cmath>
+#include <random>
 
 
 namespace nix {
@@ -28,203 +35,473 @@ std::string resolveUri(const std::string & uri)
 
 struct CurlDownloader : public Downloader
 {
-    CURL * curl;
-    ref<std::string> data;
-    string etag, status, expectedETag;
+    CURLM * curlm = 0;
 
-    struct curl_slist * requestHeaders;
+    std::random_device rd;
+    std::mt19937 mt19937;
 
-    bool showProgress;
-    double prevProgressTime{0}, startTime{0};
-    unsigned int moveBack{1};
+    bool enableHttp2;
 
-    size_t writeCallback(void * contents, size_t size, size_t nmemb)
+    struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
     {
-        size_t realSize = size * nmemb;
-        data->append((char *) contents, realSize);
-        return realSize;
-    }
+        CurlDownloader & downloader;
+        DownloadRequest request;
+        DownloadResult result;
+        bool done = false; // whether either the success or failure function has been called
+        std::function<void(const DownloadResult &)> success;
+        std::function<void(std::exception_ptr exc)> failure;
+        CURL * req = 0;
+        bool active = false; // whether the handle has been added to the multi object
+        std::string status;
+
+        bool showProgress = false;
+        double prevProgressTime{0}, startTime{0};
+        unsigned int moveBack{1};
+
+        unsigned int attempt = 0;
+
+        /* Don't start this download until the specified time point
+           has been reached. */
+        std::chrono::steady_clock::time_point embargo;
+
+        struct curl_slist * requestHeaders = 0;
+
+        DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
+            : downloader(downloader), request(request)
+        {
+            showProgress =
+                request.showProgress == DownloadRequest::yes ||
+                (request.showProgress == DownloadRequest::automatic && isatty(STDERR_FILENO));
+
+            if (!request.expectedETag.empty())
+                requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
+        }
 
-    static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
-    {
-        return ((CurlDownloader *) userp)->writeCallback(contents, size, nmemb);
-    }
+        ~DownloadItem()
+        {
+            if (req) {
+                if (active)
+                    curl_multi_remove_handle(downloader.curlm, req);
+                curl_easy_cleanup(req);
+            }
+            if (requestHeaders) curl_slist_free_all(requestHeaders);
+            try {
+                if (!done)
+                    fail(DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri));
+            } catch (...) {
+                ignoreException();
+            }
+        }
 
-    size_t headerCallback(void * contents, size_t size, size_t nmemb)
-    {
-        size_t realSize = size * nmemb;
-        string line = string((char *) contents, realSize);
-        printMsg(lvlVomit, format("got header: %1%") % trim(line));
-        if (line.compare(0, 5, "HTTP/") == 0) { // new response starts
-            etag = "";
-            auto ss = tokenizeString<vector<string>>(line, " ");
-            status = ss.size() >= 2 ? ss[1] : "";
-        } else {
-            auto i = line.find(':');
-            if (i != string::npos) {
-                string name = trim(string(line, 0, i));
-                if (name == "ETag") { // FIXME: case
-                    etag = trim(string(line, i + 1));
-                    /* Hack to work around a GitHub bug: it sends
-                       ETags, but ignores If-None-Match. So if we get
-                       the expected ETag on a 200 response, then shut
-                       down the connection because we already have the
-                       data. */
-                    printMsg(lvlDebug, format("got ETag: %1%") % etag);
-                    if (etag == expectedETag && status == "200") {
-                        printMsg(lvlDebug, format("shutting down on 200 HTTP response with expected ETag"));
-                        return 0;
+        template<class T>
+        void fail(const T & e)
+        {
+            assert(!done);
+            done = true;
+            callFailure(failure, std::make_exception_ptr(e));
+        }
+
+        size_t writeCallback(void * contents, size_t size, size_t nmemb)
+        {
+            size_t realSize = size * nmemb;
+            result.data->append((char *) contents, realSize);
+            return realSize;
+        }
+
+        static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
+        {
+            return ((DownloadItem *) userp)->writeCallback(contents, size, nmemb);
+        }
+
+        size_t headerCallback(void * contents, size_t size, size_t nmemb)
+        {
+            size_t realSize = size * nmemb;
+            std::string line((char *) contents, realSize);
+            printMsg(lvlVomit, format("got header for ‘%s’: %s") % request.uri % trim(line));
+            if (line.compare(0, 5, "HTTP/") == 0) { // new response starts
+                result.etag = "";
+                auto ss = tokenizeString<vector<string>>(line, " ");
+                status = ss.size() >= 2 ? ss[1] : "";
+                result.data = std::make_shared<std::string>();
+            } else {
+                auto i = line.find(':');
+                if (i != string::npos) {
+                    string name = toLower(trim(string(line, 0, i)));
+                    if (name == "etag") {
+                        result.etag = trim(string(line, i + 1));
+                        /* Hack to work around a GitHub bug: it sends
+                           ETags, but ignores If-None-Match. So if we get
+                           the expected ETag on a 200 response, then shut
+                           down the connection because we already have the
+                           data. */
+                        if (result.etag == request.expectedETag && status == "200") {
+                            debug(format("shutting down on 200 HTTP response with expected ETag"));
+                            return 0;
+                        }
                     }
                 }
             }
+            return realSize;
         }
-        return realSize;
-    }
 
-    static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
-    {
-        return ((CurlDownloader *) userp)->headerCallback(contents, size, nmemb);
-    }
+        static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
+        {
+            return ((DownloadItem *) userp)->headerCallback(contents, size, nmemb);
+        }
 
-    int progressCallback(double dltotal, double dlnow)
-    {
-        if (showProgress) {
-            double now = getTime();
-            if (prevProgressTime <= now - 1) {
-                string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]")
-                    % (dlnow / 1024.0)
-                    % (dltotal / 1024.0)
-                    % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str();
-                std::cerr << "\e[" << moveBack << "D" << s;
-                moveBack = s.size();
+        int progressCallback(double dltotal, double dlnow)
+        {
+            if (showProgress) {
+                double now = getTime();
+                if (prevProgressTime <= now - 1) {
+                    string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]")
+                        % (dlnow / 1024.0)
+                        % (dltotal / 1024.0)
+                        % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str();
+                    std::cerr << "\e[" << moveBack << "D" << s;
+                    moveBack = s.size();
+                    std::cerr.flush();
+                    prevProgressTime = now;
+                }
+            }
+            return _isInterrupted;
+        }
+
+        static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
+        {
+            return ((DownloadItem *) userp)->progressCallback(dltotal, dlnow);
+        }
+
+        void init()
+        {
+            // FIXME: handle parallel downloads.
+            if (showProgress) {
+                std::cerr << (format("downloading ‘%1%’... ") % request.uri);
                 std::cerr.flush();
-                prevProgressTime = now;
+                startTime = getTime();
+            }
+
+            if (!req) req = curl_easy_init();
+
+            curl_easy_reset(req);
+            curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
+            curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
+            curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
+            curl_easy_setopt(req, CURLOPT_USERAGENT, ("Nix/" + nixVersion).c_str());
+            #ifdef CURLOPT_PIPEWAIT
+            curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
+            #endif
+            #ifdef CURL_HTTP_VERSION_2TLS
+            if (downloader.enableHttp2)
+                curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
+            #endif
+            curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
+            curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
+            curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, DownloadItem::headerCallbackWrapper);
+            curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
+
+            curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
+            curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this);
+            curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
+
+            curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
+
+            if (request.head)
+                curl_easy_setopt(req, CURLOPT_NOBODY, 1);
+
+            if (request.verifyTLS)
+                curl_easy_setopt(req, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str());
+            else {
+                curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
+                curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
             }
+
+            result.data = std::make_shared<std::string>();
         }
-        return _isInterrupted;
-    }
 
-    static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
-    {
-        return ((CurlDownloader *) userp)->progressCallback(dltotal, dlnow);
-    }
+        void finish(CURLcode code)
+        {
+            if (showProgress)
+                //std::cerr << "\e[" << moveBack << "D\e[K\n";
+                std::cerr << "\n";
 
-    CurlDownloader()
-        : data(make_ref<std::string>())
-    {
-        requestHeaders = 0;
+            long httpStatus = 0;
+            curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
 
-        curl = curl_easy_init();
-        if (!curl) throw nix::Error("unable to initialize curl");
-    }
+            char * effectiveUrlCStr;
+            curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUrlCStr);
+            if (effectiveUrlCStr)
+                result.effectiveUrl = effectiveUrlCStr;
 
-    ~CurlDownloader()
-    {
-        if (curl) curl_easy_cleanup(curl);
-        if (requestHeaders) curl_slist_free_all(requestHeaders);
-    }
+            debug(format("finished download of ‘%s’; curl status = %d, HTTP status = %d, body = %d bytes")
+                % request.uri % code % httpStatus % (result.data ? result.data->size() : 0));
+
+            if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
+                code = CURLE_OK;
+                httpStatus = 304;
+            }
+
+            if (code == CURLE_OK &&
+                (httpStatus == 200 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
+            {
+                result.cached = httpStatus == 304;
+                done = true;
+                callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+            } else {
+                Error err =
+                    (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) ? NotFound :
+                    httpStatus == 403 ? Forbidden :
+                    (httpStatus == 408 || httpStatus == 500 || httpStatus == 503
+                        || httpStatus == 504  || httpStatus == 522 || httpStatus == 524
+                        || code == CURLE_COULDNT_RESOLVE_HOST) ? Transient :
+                    Misc;
+
+                attempt++;
+
+                auto exc =
+                    code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
+                    ? DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri)
+                    : httpStatus != 0
+                      ? DownloadError(err, format("unable to download ‘%s’: HTTP error %d") % request.uri % httpStatus)
+                      : DownloadError(err, format("unable to download ‘%s’: %s (%d)") % request.uri % curl_easy_strerror(code) % code);
+
+                /* If this is a transient error, then maybe retry the
+                   download after a while. */
+                if (err == Transient && attempt < request.tries) {
+                    int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937));
+                    printError(format("warning: %s; retrying in %d ms") % exc.what() % ms);
+                    embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
+                    downloader.enqueueItem(shared_from_this());
+                }
+                else
+                    fail(exc);
+            }
+        }
+    };
 
-    bool fetch(const string & url, const DownloadOptions & options)
+    struct State
     {
-        showProgress =
-            options.showProgress == DownloadOptions::yes ||
-            (options.showProgress == DownloadOptions::automatic && isatty(STDERR_FILENO));
+        bool quit = false;
+        std::vector<std::shared_ptr<DownloadItem>> incoming;
+    };
 
-        curl_easy_reset(curl);
+    Sync<State> state_;
 
-        curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
-        curl_easy_setopt(curl, CURLOPT_USERAGENT, ("Nix/" + nixVersion).c_str());
-        curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
+    /* We can't use a std::condition_variable to wake up the curl
+       thread, because it only monitors file descriptors. So use a
+       pipe instead. */
+    Pipe wakeupPipe;
 
-        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeCallbackWrapper);
-        curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *) this);
+    std::thread workerThread;
 
-        curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, headerCallbackWrapper);
-        curl_easy_setopt(curl, CURLOPT_HEADERDATA, (void *) this);
+    CurlDownloader()
+        : mt19937(rd())
+    {
+        static std::once_flag globalInit;
+        std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
 
-        curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
-        curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, (void *) this);
-        curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0);
+        curlm = curl_multi_init();
 
-        curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
+        #ifdef CURLPIPE_MULTIPLEX
+        curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
+        #endif
+        curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
+            settings.get("binary-caches-parallel-connections", 25));
 
-        curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
+        enableHttp2 = settings.get("enable-http2", true);
 
-        if (options.verifyTLS)
-            curl_easy_setopt(curl, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str());
-        else {
-            curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0);
-            curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0);
-        }
+        wakeupPipe.create();
+        fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
 
-        data = make_ref<std::string>();
+        workerThread = std::thread([&]() { workerThreadEntry(); });
+    }
 
-        if (requestHeaders) {
-            curl_slist_free_all(requestHeaders);
-            requestHeaders = 0;
+    ~CurlDownloader()
+    {
+        /* Signal the worker thread to exit. */
+        {
+            auto state(state_.lock());
+            state->quit = true;
         }
+        writeFull(wakeupPipe.writeSide.get(), " ");
 
-        if (!options.expectedETag.empty()) {
-            this->expectedETag = options.expectedETag;
-            requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + options.expectedETag).c_str());
-        }
+        workerThread.join();
+
+        if (curlm) curl_multi_cleanup(curlm);
+    }
+
+    void workerThreadMain()
+    {
+        std::map<CURL *, std::shared_ptr<DownloadItem>> items;
+
+        bool quit;
+
+        std::chrono::steady_clock::time_point nextWakeup;
+
+        while (!quit) {
+            checkInterrupt();
+
+            /* Let curl do its thing. */
+            int running;
+            CURLMcode mc = curl_multi_perform(curlm, &running);
+            if (mc != CURLM_OK)
+                throw nix::Error(format("unexpected error from curl_multi_perform(): %s") % curl_multi_strerror(mc));
+
+            /* Set the promises of any finished requests. */
+            CURLMsg * msg;
+            int left;
+            while ((msg = curl_multi_info_read(curlm, &left))) {
+                if (msg->msg == CURLMSG_DONE) {
+                    auto i = items.find(msg->easy_handle);
+                    assert(i != items.end());
+                    i->second->finish(msg->data.result);
+                    curl_multi_remove_handle(curlm, i->second->req);
+                    i->second->active = false;
+                    items.erase(i);
+                }
+            }
 
-        curl_easy_setopt(curl, CURLOPT_HTTPHEADER, requestHeaders);
+            /* Wait for activity, including wakeup events. */
+            int numfds = 0;
+            struct curl_waitfd extraFDs[1];
+            extraFDs[0].fd = wakeupPipe.readSide.get();
+            extraFDs[0].events = CURL_WAIT_POLLIN;
+            extraFDs[0].revents = 0;
+            auto sleepTimeMs =
+                nextWakeup != std::chrono::steady_clock::time_point()
+                ? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count())
+                : 1000000000;
+            //printMsg(lvlVomit, format("download thread waiting for %d ms") % sleepTimeMs);
+            mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds);
+            if (mc != CURLM_OK)
+                throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % curl_multi_strerror(mc));
+
+            nextWakeup = std::chrono::steady_clock::time_point();
+
+            /* Add new curl requests from the incoming requests queue,
+               except for requests that are embargoed (waiting for a
+               retry timeout to expire). FIXME: should use a priority
+               queue for the embargoed items to prevent repeated O(n)
+               checks. */
+            if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
+                char buf[1024];
+                auto res = read(extraFDs[0].fd, buf, sizeof(buf));
+                if (res == -1 && errno != EINTR)
+                    throw SysError("reading curl wakeup socket");
+            }
 
-        if (options.head)
-            curl_easy_setopt(curl, CURLOPT_NOBODY, 1);
+            std::vector<std::shared_ptr<DownloadItem>> incoming, embargoed;
+            auto now = std::chrono::steady_clock::now();
+
+            {
+                auto state(state_.lock());
+                for (auto & item: state->incoming) {
+                    if (item->embargo <= now)
+                        incoming.push_back(item);
+                    else {
+                        embargoed.push_back(item);
+                        if (nextWakeup == std::chrono::steady_clock::time_point()
+                            || item->embargo < nextWakeup)
+                            nextWakeup = item->embargo;
+                    }
+                }
+                state->incoming = embargoed;
+                quit = state->quit;
+            }
 
-        if (showProgress) {
-            std::cerr << (format("downloading ‘%1%’... ") % url);
-            std::cerr.flush();
-            startTime = getTime();
+            for (auto & item : incoming) {
+                debug(format("starting download of %s") % item->request.uri);
+                item->init();
+                curl_multi_add_handle(curlm, item->req);
+                item->active = true;
+                items[item->req] = item;
+            }
         }
 
-        CURLcode res = curl_easy_perform(curl);
-        if (showProgress)
-            //std::cerr << "\e[" << moveBack << "D\e[K\n";
-            std::cerr << "\n";
-        checkInterrupt();
-        if (res == CURLE_WRITE_ERROR && etag == options.expectedETag) return false;
-
-        long httpStatus = -1;
-        curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &httpStatus);
-
-        if (res != CURLE_OK) {
-            Error err =
-                httpStatus == 404 ? NotFound :
-                httpStatus == 403 ? Forbidden : Misc;
-            throw DownloadError(err, format("unable to download ‘%1%’: %2% (%3%)")
-                % url % curl_easy_strerror(res) % res);
+        debug("download thread shutting down");
+    }
+
+    void workerThreadEntry()
+    {
+        try {
+            workerThreadMain();
+        } catch (nix::Interrupted & e) {
+        } catch (std::exception & e) {
+            printError(format("unexpected error in download thread: %s") % e.what());
         }
 
-        if (httpStatus == 304) return false;
+        {
+            auto state(state_.lock());
+            state->incoming.clear();
+            state->quit = true;
+        }
+    }
 
-        return true;
+    void enqueueItem(std::shared_ptr<DownloadItem> item)
+    {
+        {
+            auto state(state_.lock());
+            if (state->quit)
+                throw nix::Error("cannot enqueue download request because the download thread is shutting down");
+            state->incoming.push_back(item);
+        }
+        writeFull(wakeupPipe.writeSide.get(), " ");
     }
 
-    DownloadResult download(string url, const DownloadOptions & options) override
+    void enqueueDownload(const DownloadRequest & request,
+        std::function<void(const DownloadResult &)> success,
+        std::function<void(std::exception_ptr exc)> failure) override
     {
-        DownloadResult res;
-        if (fetch(resolveUri(url), options)) {
-            res.cached = false;
-            res.data = data;
-        } else
-            res.cached = true;
-        res.etag = etag;
-        return res;
+        auto item = std::make_shared<DownloadItem>(*this, request);
+        item->success = success;
+        item->failure = failure;
+        enqueueItem(item);
     }
 };
 
+ref<Downloader> getDownloader()
+{
+    static std::shared_ptr<Downloader> downloader;
+    static std::once_flag downloaderCreated;
+    std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); });
+    return ref<Downloader>(downloader);
+}
+
 ref<Downloader> makeDownloader()
 {
     return make_ref<CurlDownloader>();
 }
 
-Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack)
+std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest & request)
+{
+    auto promise = std::make_shared<std::promise<DownloadResult>>();
+    enqueueDownload(request,
+        [promise](const DownloadResult & result) { promise->set_value(result); },
+        [promise](std::exception_ptr exc) { promise->set_exception(exc); });
+    return promise->get_future();
+}
+
+DownloadResult Downloader::download(const DownloadRequest & request)
+{
+    return enqueueDownload(request).get();
+}
+
+Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl)
 {
     auto url = resolveUri(url_);
 
+    if (name == "") {
+        auto p = url.rfind('/');
+        if (p != string::npos) name = string(url, p + 1);
+    }
+
+    Path expectedStorePath;
+    if (expectedHash) {
+        expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name);
+        if (store->isValidPath(expectedStorePath))
+            return expectedStorePath;
+    }
+
     Path cacheDir = getCacheDir() + "/nix/tarballs";
     createDirs(cacheDir);
 
@@ -247,10 +524,12 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
             auto ss = tokenizeString<vector<string>>(readFile(dataFile), "\n");
             if (ss.size() >= 3 && ss[0] == url) {
                 time_t lastChecked;
-                if (string2Int(ss[2], lastChecked) && lastChecked + ttl >= time(0))
+                if (string2Int(ss[2], lastChecked) && lastChecked + ttl >= time(0)) {
                     skip = true;
-                else if (!ss[1].empty()) {
-                    printMsg(lvlDebug, format("verifying previous ETag ‘%1%’") % ss[1]);
+                    if (effectiveUrl)
+                        *effectiveUrl = url_;
+                } else if (!ss[1].empty()) {
+                    debug(format("verifying previous ETag ‘%1%’") % ss[1]);
                     expectedETag = ss[1];
                 }
             }
@@ -258,19 +537,25 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
             storePath = "";
     }
 
-    string name;
-    auto p = url.rfind('/');
-    if (p != string::npos) name = string(url, p + 1);
-
     if (!skip) {
 
         try {
-            DownloadOptions options;
-            options.expectedETag = expectedETag;
-            auto res = download(url, options);
-
-            if (!res.cached)
-                storePath = store->addTextToStore(name, *res.data, PathSet(), false);
+            DownloadRequest request(url);
+            request.expectedETag = expectedETag;
+            auto res = download(request);
+            if (effectiveUrl)
+                *effectiveUrl = res.effectiveUrl;
+
+            if (!res.cached) {
+                ValidPathInfo info;
+                StringSink sink;
+                dumpString(*res.data, sink);
+                Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data);
+                info.path = store->makeFixedOutputPath(false, hash, name);
+                info.narHash = hashString(htSHA256, *sink.s);
+                store->addToStore(info, *sink.s, false, true);
+                storePath = info.path;
+            }
 
             assert(!storePath.empty());
             replaceSymlink(storePath, fileLink);
@@ -278,7 +563,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
             writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
         } catch (DownloadError & e) {
             if (storePath.empty()) throw;
-            printMsg(lvlError, format("warning: %1%; using cached result") % e.msg());
+            printError(format("warning: %1%; using cached result") % e.msg());
         }
     }
 
@@ -292,7 +577,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
                 unpackedStorePath = "";
         }
         if (unpackedStorePath.empty()) {
-            printMsg(lvlInfo, format("unpacking ‘%1%’...") % url);
+            printInfo(format("unpacking ‘%1%’...") % url);
             Path tmpDir = createTempDir();
             AutoDelete autoDelete(tmpDir, true);
             // FIXME: this requires GNU tar for decompression.
@@ -300,9 +585,12 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
             unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false);
         }
         replaceSymlink(unpackedStorePath, unpackedLink);
-        return unpackedStorePath;
+        storePath = unpackedStorePath;
     }
 
+    if (expectedStorePath != "" && storePath != expectedStorePath)
+        throw nix::Error(format("hash mismatch in file downloaded from ‘%s’") % url);
+
     return storePath;
 }
 
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index eb2b76678ac7..82b5d641fde9 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -1,23 +1,31 @@
 #pragma once
 
 #include "types.hh"
+#include "hash.hh"
 
 #include <string>
+#include <future>
 
 namespace nix {
 
-struct DownloadOptions
+struct DownloadRequest
 {
-    string expectedETag;
-    bool verifyTLS{true};
-    enum { yes, no, automatic } showProgress{yes};
-    bool head{false};
+    std::string uri;
+    std::string expectedETag;
+    bool verifyTLS = true;
+    enum { yes, no, automatic } showProgress = yes;
+    bool head = false;
+    size_t tries = 1;
+    unsigned int baseRetryTimeMs = 250;
+
+    DownloadRequest(const std::string & uri) : uri(uri) { }
 };
 
 struct DownloadResult
 {
     bool cached;
-    string etag;
+    std::string etag;
+    std::string effectiveUrl;
     std::shared_ptr<std::string> data;
 };
 
@@ -25,13 +33,33 @@ class Store;
 
 struct Downloader
 {
-    virtual DownloadResult download(string url, const DownloadOptions & options) = 0;
+    /* Enqueue a download request, returning a future to the result of
+       the download. The future may throw a DownloadError
+       exception. */
+    virtual void enqueueDownload(const DownloadRequest & request,
+        std::function<void(const DownloadResult &)> success,
+        std::function<void(std::exception_ptr exc)> failure) = 0;
+
+    std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
 
-    Path downloadCached(ref<Store> store, const string & url, bool unpack);
+    /* Synchronously download a file. */
+    DownloadResult download(const DownloadRequest & request);
 
-    enum Error { NotFound, Forbidden, Misc };
+    /* Check if the specified file is already in ~/.cache/nix/tarballs
+       and is more recent than ‘tarball-ttl’ seconds. Otherwise,
+       use the recorded ETag to verify if the server has a more
+       recent version, and if so, download it to the Nix store. */
+    Path downloadCached(ref<Store> store, const string & uri, bool unpack, string name = "",
+        const Hash & expectedHash = Hash(), string * effectiveUri = nullptr);
+
+    enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
 };
 
+/* Return a shared Downloader object. Using this object is preferred
+   because it enables connection reuse and HTTP/2 multiplexing. */
+ref<Downloader> getDownloader();
+
+/* Return a new Downloader object. */
 ref<Downloader> makeDownloader();
 
 class DownloadError : public Error
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 2eab7de0d8bf..ae03604faf98 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -39,7 +39,7 @@ int LocalStore::openGCLock(LockType lockType)
         throw SysError(format("opening global GC lock ‘%1%’") % fnGCLock);
 
     if (!lockFile(fdGCLock.get(), lockType, false)) {
-        printMsg(lvlError, format("waiting for the big garbage collector lock..."));
+        printError(format("waiting for the big garbage collector lock..."));
         lockFile(fdGCLock.get(), lockType, true);
     }
 
@@ -129,7 +129,7 @@ Path LocalFSStore::addPermRoot(const Path & _storePath,
     if (settings.checkRootReachability) {
         Roots roots = findRoots();
         if (roots.find(gcRoot) == roots.end())
-            printMsg(lvlError,
+            printError(
                 format(
                     "warning: ‘%1%’ is not in a directory where the garbage collector looks for roots; "
                     "therefore, ‘%2%’ might be removed by the garbage collector")
@@ -226,7 +226,7 @@ void LocalStore::readTempRoots(PathSet & tempRoots, FDs & fds)
            only succeed if the owning process has died.  In that case
            we don't care about its temporary roots. */
         if (lockFile(fd->get(), ltWrite, false)) {
-            printMsg(lvlError, format("removing stale temporary roots file ‘%1%’") % path);
+            printError(format("removing stale temporary roots file ‘%1%’") % path);
             unlink(path.c_str());
             writeFull(fd->get(), "d");
             continue;
@@ -264,7 +264,7 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
         if (isStorePath(storePath) && isValidPath(storePath))
             roots[path] = storePath;
         else
-            printMsg(lvlInfo, format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath);
+            printInfo(format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath);
     };
 
     try {
@@ -287,7 +287,7 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
                 target = absPath(target, dirOf(path));
                 if (!pathExists(target)) {
                     if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
-                        printMsg(lvlInfo, format("removing stale link from ‘%1%’ to ‘%2%’") % path % target);
+                        printInfo(format("removing stale link from ‘%1%’ to ‘%2%’") % path % target);
                         unlink(path.c_str());
                     }
                 } else {
@@ -310,7 +310,7 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
     catch (SysError & e) {
         /* We only ignore permanent failures. */
         if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
-            printMsg(lvlInfo, format("cannot read potential root ‘%1%’") % path);
+            printInfo(format("cannot read potential root ‘%1%’") % path);
         else
             throw;
     }
@@ -513,7 +513,7 @@ void LocalStore::deletePathRecursive(GCState & state, const Path & path)
         throw SysError(format("getting status of %1%") % realPath);
     }
 
-    printMsg(lvlInfo, format("deleting ‘%1%’") % path);
+    printInfo(format("deleting ‘%1%’") % path);
 
     state.results.paths.insert(path);
 
@@ -535,7 +535,7 @@ void LocalStore::deletePathRecursive(GCState & state, const Path & path)
             state.bytesInvalidated += size;
         } catch (SysError & e) {
             if (e.errNo == ENOSPC) {
-                printMsg(lvlInfo, format("note: can't create move ‘%1%’: %2%") % realPath % e.msg());
+                printInfo(format("note: can't create move ‘%1%’: %2%") % realPath % e.msg());
                 deleteGarbage(state, realPath);
             }
         }
@@ -543,7 +543,7 @@ void LocalStore::deletePathRecursive(GCState & state, const Path & path)
         deleteGarbage(state, realPath);
 
     if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
-        printMsg(lvlInfo, format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
+        printInfo(format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
         throw GCLimitReached();
     }
 }
@@ -562,7 +562,7 @@ bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & p
     }
 
     if (state.roots.find(path) != state.roots.end()) {
-        printMsg(lvlDebug, format("cannot delete ‘%1%’ because it's a root") % path);
+        debug(format("cannot delete ‘%1%’ because it's a root") % path);
         state.alive.insert(path);
         return true;
     }
@@ -626,7 +626,7 @@ void LocalStore::tryToDelete(GCState & state, const Path & path)
     PathSet visited;
 
     if (canReachRoot(state, visited, path)) {
-        printMsg(lvlDebug, format("cannot delete ‘%1%’ because it's still reachable") % path);
+        debug(format("cannot delete ‘%1%’ because it's still reachable") % path);
     } else {
         /* No path we visited was a root, so everything is garbage.
            But we only delete ‘path’ and its referrers here so that
@@ -682,7 +682,7 @@ void LocalStore::removeUnusedLinks(const GCState & state)
         throw SysError(format("statting ‘%1%’") % linksDir);
     long long overhead = st.st_blocks * 512ULL;
 
-    printMsg(lvlInfo, format("note: currently hard linking saves %.2f MiB")
+    printInfo(format("note: currently hard linking saves %.2f MiB")
         % ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
 }
 
@@ -715,7 +715,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
 
     /* Find the roots.  Since we've grabbed the GC lock, the set of
        permanent roots cannot increase now. */
-    printMsg(lvlError, format("finding garbage collector roots..."));
+    printError(format("finding garbage collector roots..."));
     Roots rootMap = options.ignoreLiveness ? Roots() : findRoots();
 
     for (auto & i : rootMap) state.roots.insert(i.second);
@@ -744,7 +744,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
             createDirs(trashDir);
         } catch (SysError & e) {
             if (e.errNo == ENOSPC) {
-                printMsg(lvlInfo, format("note: can't create trash directory: %1%") % e.msg());
+                printInfo(format("note: can't create trash directory: %1%") % e.msg());
                 state.moveToTrash = false;
             }
         }
@@ -765,9 +765,9 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
     } else if (options.maxFreed > 0) {
 
         if (state.shouldDelete)
-            printMsg(lvlError, format("deleting garbage..."));
+            printError(format("deleting garbage..."));
         else
-            printMsg(lvlError, format("determining live/dead paths..."));
+            printError(format("determining live/dead paths..."));
 
         try {
 
@@ -825,12 +825,12 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
     fds.clear();
 
     /* Delete the trash directory. */
-    printMsg(lvlInfo, format("deleting ‘%1%’") % trashDir);
+    printInfo(format("deleting ‘%1%’") % trashDir);
     deleteGarbage(state, trashDir);
 
     /* Clean up the links directory. */
     if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
-        printMsg(lvlError, format("deleting unused links..."));
+        printError(format("deleting unused links..."));
         removeUnusedLinks(state);
     }
 
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index c12178e4028a..ecf81e8eb38e 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -69,7 +69,6 @@ void Settings::processEnvironment()
     nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
     nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
     nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
-    nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
     nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
     nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
     nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
@@ -77,7 +76,7 @@ void Settings::processEnvironment()
 
     // should be set with the other config options, but depends on nixLibexecDir
 #ifdef __APPLE__
-    preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies.pl";
+    preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies";
 #endif
 }
 
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 65f763ace3c7..3194193bc842 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -51,9 +51,6 @@ struct Settings {
     /* The directory where state is stored. */
     Path nixStateDir;
 
-    /* The directory where we keep the SQLite database. */
-    Path nixDBPath;
-
     /* The directory where configuration files are stored. */
     Path nixConfDir;
 
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index da80b636c76c..74ae7a4d198a 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -13,17 +13,12 @@ private:
 
     Path cacheUri;
 
-    Pool<Downloader> downloaders;
-
 public:
 
     HttpBinaryCacheStore(
         const Params & params, const Path & _cacheUri)
         : BinaryCacheStore(params)
         , cacheUri(_cacheUri)
-        , downloaders(
-            std::numeric_limits<size_t>::max(),
-            []() { return makeDownloader(); })
     {
         if (cacheUri.back() == '/')
             cacheUri.pop_back();
@@ -54,11 +49,11 @@ protected:
     bool fileExists(const std::string & path) override
     {
         try {
-            auto downloader(downloaders.get());
-            DownloadOptions options;
-            options.showProgress = DownloadOptions::no;
-            options.head = true;
-            downloader->download(cacheUri + "/" + path, options);
+            DownloadRequest request(cacheUri + "/" + path);
+            request.showProgress = DownloadRequest::no;
+            request.head = true;
+            request.tries = 5;
+            getDownloader()->download(request);
             return true;
         } catch (DownloadError & e) {
             /* S3 buckets return 403 if a file doesn't exist and the
@@ -74,18 +69,29 @@ protected:
         throw UploadToHTTP("uploading to an HTTP binary cache is not supported");
     }
 
-    std::shared_ptr<std::string> getFile(const std::string & path) override
+    void getFile(const std::string & path,
+        std::function<void(std::shared_ptr<std::string>)> success,
+        std::function<void(std::exception_ptr exc)> failure)
     {
-        auto downloader(downloaders.get());
-        DownloadOptions options;
-        options.showProgress = DownloadOptions::no;
-        try {
-            return downloader->download(cacheUri + "/" + path, options).data;
-        } catch (DownloadError & e) {
-            if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
-                return 0;
-            throw;
-        }
+        DownloadRequest request(cacheUri + "/" + path);
+        request.showProgress = DownloadRequest::no;
+        request.tries = 8;
+
+        getDownloader()->enqueueDownload(request,
+            [success](const DownloadResult & result) {
+                success(result.data);
+            },
+            [success, failure](std::exception_ptr exc) {
+                try {
+                    std::rethrow_exception(exc);
+                } catch (DownloadError & e) {
+                    if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+                        return success(0);
+                    failure(exc);
+                } catch (...) {
+                    failure(exc);
+                }
+            });
     }
 
 };
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index 91d2650fe124..0f377989bd89 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -32,7 +32,19 @@ protected:
 
     void upsertFile(const std::string & path, const std::string & data) override;
 
-    std::shared_ptr<std::string> getFile(const std::string & path) override;
+    void getFile(const std::string & path,
+        std::function<void(std::shared_ptr<std::string>)> success,
+        std::function<void(std::exception_ptr exc)> failure) override
+    {
+        sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
+            try {
+                return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path));
+            } catch (SysError & e) {
+                if (e.errNo == ENOENT) return std::shared_ptr<std::string>();
+                throw;
+            }
+        });
+    }
 
     PathSet queryAllValidPaths() override
     {
@@ -76,16 +88,6 @@ void LocalBinaryCacheStore::upsertFile(const std::string & path, const std::stri
     atomicWrite(binaryCacheDir + "/" + path, data);
 }
 
-std::shared_ptr<std::string> LocalBinaryCacheStore::getFile(const std::string & path)
-{
-    try {
-        return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path));
-    } catch (SysError & e) {
-        if (e.errNo == ENOENT) return 0;
-        throw;
-    }
-}
-
 static RegisterStoreImplementation regStore([](
     const std::string & uri, const Store::Params & params)
     -> std::shared_ptr<Store>
diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc
index b1b9dc29e40d..4571a2211cd2 100644
--- a/src/libstore/local-fs-store.cc
+++ b/src/libstore/local-fs-store.cc
@@ -7,7 +7,9 @@ namespace nix {
 
 LocalFSStore::LocalFSStore(const Params & params)
     : Store(params)
-    , stateDir(get(params, "state", settings.nixStateDir))
+    , rootDir(get(params, "root"))
+    , stateDir(canonPath(get(params, "state", rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir)))
+    , logDir(canonPath(get(params, "log", rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir)))
 {
 }
 
@@ -21,7 +23,7 @@ struct LocalStoreAccessor : public FSAccessor
     {
         Path storePath = store->toStorePath(path);
         if (!store->isValidPath(storePath))
-            throw Error(format("path ‘%1%’ is not a valid store path") % storePath);
+            throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
         return store->getRealStoreDir() + std::string(path, store->storeDir.size());
     }
 
@@ -79,7 +81,7 @@ void LocalFSStore::narFromPath(const Path & path, Sink & sink)
 {
     if (!isValidPath(path))
         throw Error(format("path ‘%s’ is not valid") % path);
-    dumpPath(path, sink);
+    dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
 }
 
 }
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 822d5fce3fb3..7cd84d03d4a8 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -38,13 +38,13 @@ namespace nix {
 
 LocalStore::LocalStore(const Params & params)
     : LocalFSStore(params)
-    , realStoreDir(get(params, "real", storeDir))
-    , dbDir(get(params, "state", "") != "" ? get(params, "state", "") + "/db" : settings.nixDBPath)
+    , realStoreDir(get(params, "real", rootDir != "" ? rootDir + "/nix/store" : storeDir))
+    , dbDir(stateDir + "/db")
     , linksDir(realStoreDir + "/.links")
     , reservedPath(dbDir + "/reserved")
     , schemaPath(dbDir + "/schema")
     , trashDir(realStoreDir + "/trash")
-    , requireSigs(settings.get("signed-binary-caches", std::string("")) != "") // FIXME: rename option
+    , requireSigs(trim(settings.get("signed-binary-caches", std::string(""))) != "") // FIXME: rename option
     , publicKeys(getDefaultPublicKeys())
 {
     auto state(_state.lock());
@@ -76,7 +76,7 @@ LocalStore::LocalStore(const Params & params)
 
         struct group * gr = getgrnam(settings.buildUsersGroup.c_str());
         if (!gr)
-            printMsg(lvlError, format("warning: the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+            printError(format("warning: the group ‘%1%’ specified in ‘build-users-group’ does not exist")
                 % settings.buildUsersGroup);
         else {
             struct stat st;
@@ -136,7 +136,7 @@ LocalStore::LocalStore(const Params & params)
     globalLock = openLockFile(globalLockPath.c_str(), true);
 
     if (!lockFile(globalLock.get(), ltRead, false)) {
-        printMsg(lvlError, "waiting for the big Nix store lock...");
+        printError("waiting for the big Nix store lock...");
         lockFile(globalLock.get(), ltRead, true);
     }
 
@@ -167,7 +167,7 @@ LocalStore::LocalStore(const Params & params)
                 "please upgrade Nix to version 1.11 first.");
 
         if (!lockFile(globalLock.get(), ltWrite, false)) {
-            printMsg(lvlError, "waiting for exclusive access to the Nix store...");
+            printError("waiting for exclusive access to the Nix store...");
             lockFile(globalLock.get(), ltWrite, true);
         }
 
@@ -181,17 +181,20 @@ LocalStore::LocalStore(const Params & params)
 
         if (curSchema < 8) {
             SQLiteTxn txn(state->db);
-            if (sqlite3_exec(state->db, "alter table ValidPaths add column ultimate integer", 0, 0, 0) != SQLITE_OK)
-                throwSQLiteError(state->db, "upgrading database schema");
-            if (sqlite3_exec(state->db, "alter table ValidPaths add column sigs text", 0, 0, 0) != SQLITE_OK)
-                throwSQLiteError(state->db, "upgrading database schema");
+            state->db.exec("alter table ValidPaths add column ultimate integer");
+            state->db.exec("alter table ValidPaths add column sigs text");
             txn.commit();
         }
 
         if (curSchema < 9) {
             SQLiteTxn txn(state->db);
-            if (sqlite3_exec(state->db, "drop table FailedPaths", 0, 0, 0) != SQLITE_OK)
-                throwSQLiteError(state->db, "upgrading database schema");
+            state->db.exec("drop table FailedPaths");
+            txn.commit();
+        }
+
+        if (curSchema < 10) {
+            SQLiteTxn txn(state->db);
+            state->db.exec("alter table ValidPaths add column ca text");
             txn.commit();
         }
 
@@ -204,13 +207,13 @@ LocalStore::LocalStore(const Params & params)
 
     /* Prepare SQL statements. */
     state->stmtRegisterValidPath.create(state->db,
-        "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs) values (?, ?, ?, ?, ?, ?, ?);");
+        "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);");
     state->stmtUpdatePathInfo.create(state->db,
-        "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ? where path = ?;");
+        "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;");
     state->stmtAddReference.create(state->db,
         "insert or replace into Refs (referrer, reference) values (?, ?);");
     state->stmtQueryPathInfo.create(state->db,
-        "select id, hash, registrationTime, deriver, narSize, ultimate, sigs from ValidPaths where path = ?;");
+        "select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;");
     state->stmtQueryReferences.create(state->db,
         "select path from Refs join ValidPaths on reference = id where referrer = ?;");
     state->stmtQueryReferrers.create(state->db,
@@ -279,8 +282,7 @@ void LocalStore::openDB(State & state, bool create)
     if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
         throwSQLiteError(db, "setting timeout");
 
-    if (sqlite3_exec(db, "pragma foreign_keys = 1;", 0, 0, 0) != SQLITE_OK)
-        throwSQLiteError(db, "enabling foreign keys");
+    db.exec("pragma foreign_keys = 1");
 
     /* !!! check whether sqlite has been built with foreign key
        support */
@@ -290,8 +292,7 @@ void LocalStore::openDB(State & state, bool create)
        all.  This can cause database corruption if the system
        crashes. */
     string syncMode = settings.fsyncMetadata ? "normal" : "off";
-    if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
-        throwSQLiteError(db, "setting synchronous mode");
+    db.exec("pragma synchronous = " + syncMode);
 
     /* Set the SQLite journal mode.  WAL mode is fastest, so it's the
        default. */
@@ -319,8 +320,7 @@ void LocalStore::openDB(State & state, bool create)
         const char * schema =
 #include "schema.sql.hh"
             ;
-        if (sqlite3_exec(db, (const char *) schema, 0, 0, 0) != SQLITE_OK)
-            throwSQLiteError(db, "initialising database schema");
+        db.exec(schema);
     }
 }
 
@@ -486,9 +486,9 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation &
         if (out == drv.outputs.end())
             throw Error(format("derivation ‘%1%’ does not have an output named ‘out’") % drvPath);
 
-        bool recursive; HashType ht; Hash h;
-        out->second.parseHashInfo(recursive, ht, h);
-        Path outPath = makeFixedOutputPath(recursive, ht, h, drvName);
+        bool recursive; Hash h;
+        out->second.parseHashInfo(recursive, h);
+        Path outPath = makeFixedOutputPath(recursive, h, drvName);
 
         StringPairs::const_iterator j = drv.env.find("out");
         if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
@@ -527,6 +527,7 @@ uint64_t LocalStore::addValidPath(State & state,
         (info.narSize, info.narSize != 0)
         (info.ultimate ? 1 : 0, info.ultimate)
         (concatStringsSep(" ", info.sigs), !info.sigs.empty())
+        (info.ca, !info.ca.empty())
         .exec();
     uint64_t id = sqlite3_last_insert_rowid(state.db);
 
@@ -576,46 +577,54 @@ Hash parseHashField(const Path & path, const string & s)
 }
 
 
-std::shared_ptr<ValidPathInfo> LocalStore::queryPathInfoUncached(const Path & path)
+void LocalStore::queryPathInfoUncached(const Path & path,
+    std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+    std::function<void(std::exception_ptr exc)> failure)
 {
-    auto info = std::make_shared<ValidPathInfo>();
-    info->path = path;
+    sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
 
-    assertStorePath(path);
+        auto info = std::make_shared<ValidPathInfo>();
+        info->path = path;
 
-    return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
-        auto state(_state.lock());
+        assertStorePath(path);
+
+        return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
+            auto state(_state.lock());
 
-        /* Get the path info. */
-        auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path));
+            /* Get the path info. */
+            auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path));
 
-        if (!useQueryPathInfo.next())
-            return std::shared_ptr<ValidPathInfo>();
+            if (!useQueryPathInfo.next())
+                return std::shared_ptr<ValidPathInfo>();
 
-        info->id = useQueryPathInfo.getInt(0);
+            info->id = useQueryPathInfo.getInt(0);
 
-        info->narHash = parseHashField(path, useQueryPathInfo.getStr(1));
+            info->narHash = parseHashField(path, useQueryPathInfo.getStr(1));
 
-        info->registrationTime = useQueryPathInfo.getInt(2);
+            info->registrationTime = useQueryPathInfo.getInt(2);
 
-        auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
-        if (s) info->deriver = s;
+            auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
+            if (s) info->deriver = s;
 
-        /* Note that narSize = NULL yields 0. */
-        info->narSize = useQueryPathInfo.getInt(4);
+            /* Note that narSize = NULL yields 0. */
+            info->narSize = useQueryPathInfo.getInt(4);
 
-        info->ultimate = useQueryPathInfo.getInt(5) == 1;
+            info->ultimate = useQueryPathInfo.getInt(5) == 1;
 
-        s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6);
-        if (s) info->sigs = tokenizeString<StringSet>(s, " ");
+            s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6);
+            if (s) info->sigs = tokenizeString<StringSet>(s, " ");
 
-        /* Get the references. */
-        auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
+            s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 7);
+            if (s) info->ca = s;
 
-        while (useQueryReferences.next())
-            info->references.insert(useQueryReferences.getStr(0));
+            /* Get the references. */
+            auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
 
-        return info;
+            while (useQueryReferences.next())
+                info->references.insert(useQueryReferences.getStr(0));
+
+            return info;
+        });
     });
 }
 
@@ -628,6 +637,7 @@ void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
         ("sha256:" + printHash(info.narHash))
         (info.ultimate ? 1 : 0, info.ultimate)
         (concatStringsSep(" ", info.sigs), !info.sigs.empty())
+        (info.ca, !info.ca.empty())
         (info.path)
         .exec();
 }
@@ -755,7 +765,7 @@ Path LocalStore::queryPathFromHashPart(const string & hashPart)
 
     Path prefix = storeDir + "/" + hashPart;
 
-    return retrySQLite<Path>([&]() {
+    return retrySQLite<Path>([&]() -> std::string {
         auto state(_state.lock());
 
         auto useQueryPathFromHashPart(state->stmtQueryPathFromHashPart.use()(prefix));
@@ -898,7 +908,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, const std::string & nar,
         throw Error(format("hash mismatch importing path ‘%s’; expected hash ‘%s’, got ‘%s’") %
             info.path % info.narHash.to_string() % h.to_string());
 
-    if (requireSigs && !dontCheckSigs && !info.checkSignatures(publicKeys))
+    if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys))
         throw Error(format("cannot import path ‘%s’ because it lacks a valid signature") % info.path);
 
     addTempRoot(info.path);
@@ -940,7 +950,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
 {
     Hash h = hashString(hashAlgo, dump);
 
-    Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
+    Path dstPath = makeFixedOutputPath(recursive, h, name);
 
     addTempRoot(dstPath);
 
@@ -983,6 +993,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
             info.narHash = hash.first;
             info.narSize = hash.second;
             info.ultimate = true;
+            info.ca = "fixed:" + (recursive ? (std::string) "r:" : "") + h.to_string();
             registerValidPath(info);
         }
 
@@ -1014,7 +1025,8 @@ Path LocalStore::addToStore(const string & name, const Path & _srcPath,
 Path LocalStore::addTextToStore(const string & name, const string & s,
     const PathSet & references, bool repair)
 {
-    Path dstPath = computeStorePathForText(name, s, references);
+    auto hash = hashString(htSHA256, s);
+    auto dstPath = makeTextPath(name, hash, references);
 
     addTempRoot(dstPath);
 
@@ -1034,16 +1046,17 @@ Path LocalStore::addTextToStore(const string & name, const string & s,
 
             StringSink sink;
             dumpString(s, sink);
-            auto hash = hashString(htSHA256, *sink.s);
+            auto narHash = hashString(htSHA256, *sink.s);
 
             optimisePath(realPath);
 
             ValidPathInfo info;
             info.path = dstPath;
-            info.narHash = hash;
+            info.narHash = narHash;
             info.narSize = sink.s->size();
             info.references = references;
             info.ultimate = true;
+            info.ca = "text:" + hash.to_string();
             registerValidPath(info);
         }
 
@@ -1095,7 +1108,7 @@ void LocalStore::invalidatePathChecked(const Path & path)
 
 bool LocalStore::verifyStore(bool checkContents, bool repair)
 {
-    printMsg(lvlError, format("reading the Nix store..."));
+    printError(format("reading the Nix store..."));
 
     bool errors = false;
 
@@ -1106,7 +1119,7 @@ bool LocalStore::verifyStore(bool checkContents, bool repair)
     for (auto & i : readDirectory(realStoreDir)) store.insert(i.name);
 
     /* Check whether all valid paths actually exist. */
-    printMsg(lvlInfo, "checking path existence...");
+    printInfo("checking path existence...");
 
     PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
 
@@ -1119,7 +1132,7 @@ bool LocalStore::verifyStore(bool checkContents, bool repair)
 
     /* Optionally, check the content hashes (slow). */
     if (checkContents) {
-        printMsg(lvlInfo, "checking hashes...");
+        printInfo("checking hashes...");
 
         Hash nullHash(htSHA256);
 
@@ -1132,7 +1145,7 @@ bool LocalStore::verifyStore(bool checkContents, bool repair)
                 HashResult current = hashPath(info->narHash.type, i);
 
                 if (info->narHash != nullHash && info->narHash != current.first) {
-                    printMsg(lvlError, format("path ‘%1%’ was modified! "
+                    printError(format("path ‘%1%’ was modified! "
                             "expected hash ‘%2%’, got ‘%3%’")
                         % i % printHash(info->narHash) % printHash(current.first));
                     if (repair) repairPath(i); else errors = true;
@@ -1142,14 +1155,14 @@ bool LocalStore::verifyStore(bool checkContents, bool repair)
 
                     /* Fill in missing hashes. */
                     if (info->narHash == nullHash) {
-                        printMsg(lvlError, format("fixing missing hash on ‘%1%’") % i);
+                        printError(format("fixing missing hash on ‘%1%’") % i);
                         info->narHash = current.first;
                         update = true;
                     }
 
                     /* Fill in missing narSize fields (from old stores). */
                     if (info->narSize == 0) {
-                        printMsg(lvlError, format("updating size field on ‘%1%’ to %2%") % i % current.second);
+                        printError(format("updating size field on ‘%1%’ to %2%") % i % current.second);
                         info->narSize = current.second;
                         update = true;
                     }
@@ -1165,9 +1178,9 @@ bool LocalStore::verifyStore(bool checkContents, bool repair)
                 /* It's possible that the path got GC'ed, so ignore
                    errors on invalid paths. */
                 if (isValidPath(i))
-                    printMsg(lvlError, format("error: %1%") % e.msg());
+                    printError(format("error: %1%") % e.msg());
                 else
-                    printMsg(lvlError, format("warning: %1%") % e.msg());
+                    printError(format("warning: %1%") % e.msg());
                 errors = true;
             }
         }
@@ -1186,7 +1199,7 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
     done.insert(path);
 
     if (!isStorePath(path)) {
-        printMsg(lvlError, format("path ‘%1%’ is not in the Nix store") % path);
+        printError(format("path ‘%1%’ is not in the Nix store") % path);
         auto state(_state.lock());
         invalidatePath(*state, path);
         return;
@@ -1205,16 +1218,16 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
             }
 
         if (canInvalidate) {
-            printMsg(lvlError, format("path ‘%1%’ disappeared, removing from database...") % path);
+            printError(format("path ‘%1%’ disappeared, removing from database...") % path);
             auto state(_state.lock());
             invalidatePath(*state, path);
         } else {
-            printMsg(lvlError, format("path ‘%1%’ disappeared, but it still has valid referrers!") % path);
+            printError(format("path ‘%1%’ disappeared, but it still has valid referrers!") % path);
             if (repair)
                 try {
                     repairPath(path);
                 } catch (Error & e) {
-                    printMsg(lvlError, format("warning: %1%") % e.msg());
+                    printError(format("warning: %1%") % e.msg());
                     errors = true;
                 }
             else errors = true;
@@ -1266,7 +1279,7 @@ static void makeMutable(const Path & path)
 void LocalStore::upgradeStore7()
 {
     if (getuid() != 0) return;
-    printMsg(lvlError, "removing immutable bits from the Nix store (this may take a while)...");
+    printError("removing immutable bits from the Nix store (this may take a while)...");
     makeMutable(realStoreDir);
 }
 
@@ -1282,9 +1295,7 @@ void LocalStore::upgradeStore7()
 void LocalStore::vacuumDB()
 {
     auto state(_state.lock());
-
-    if (sqlite3_exec(state->db, "vacuum;", 0, 0, 0) != SQLITE_OK)
-        throwSQLiteError(state->db, "vacuuming SQLite database");
+    state->db.exec("vacuum");
 }
 
 
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 7bfc4ad34c3f..24188130db78 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -17,8 +17,8 @@ namespace nix {
 /* Nix store and database schema version.  Version 1 (or 0) was Nix <=
    0.7.  Version 2 was Nix 0.8 and 0.9.  Version 3 is Nix 0.10.
    Version 4 is Nix 0.11.  Version 5 is Nix 0.12-0.16.  Version 6 is
-   Nix 1.0.  Version 7 is Nix 1.3. Version 9 is 1.12. */
-const int nixSchemaVersion = 9;
+   Nix 1.0.  Version 7 is Nix 1.3. Version 10 is 1.12. */
+const int nixSchemaVersion = 10;
 
 
 extern string drvsLogDir;
@@ -106,7 +106,9 @@ public:
 
     PathSet queryAllValidPaths() override;
 
-    std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) override;
+    void queryPathInfoUncached(const Path & path,
+        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure) override;
 
     void queryReferrers(const Path & path, PathSet & referrers) override;
 
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index da654ba0d2c3..0c2c49e5531f 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -8,66 +8,90 @@
 namespace nix {
 
 
-void Store::computeFSClosure(const Path & path,
-    PathSet & paths, bool flipDirection, bool includeOutputs, bool includeDerivers)
+void Store::computeFSClosure(const Path & startPath,
+    PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
 {
-    ThreadPool pool;
+    struct State
+    {
+        size_t pending;
+        PathSet & paths;
+        std::exception_ptr exc;
+    };
 
-    Sync<bool> state_;
+    Sync<State> state_(State{0, paths_, 0});
 
-    std::function<void(Path)> doPath;
+    std::function<void(const Path &)> enqueue;
 
-    doPath = [&](const Path & path) {
+    std::condition_variable done;
+
+    enqueue = [&](const Path & path) -> void {
         {
             auto state(state_.lock());
-            if (paths.count(path)) return;
-            paths.insert(path);
+            if (state->exc) return;
+            if (state->paths.count(path)) return;
+            state->paths.insert(path);
+            state->pending++;
         }
 
-        auto info = queryPathInfo(path);
+        queryPathInfo(path,
+            [&, path](ref<ValidPathInfo> info) {
+                // FIXME: calls to isValidPath() should be async
 
-        if (flipDirection) {
+                if (flipDirection) {
 
-            PathSet referrers;
-            queryReferrers(path, referrers);
-            for (auto & ref : referrers)
-                if (ref != path)
-                    pool.enqueue(std::bind(doPath, ref));
+                    PathSet referrers;
+                    queryReferrers(path, referrers);
+                    for (auto & ref : referrers)
+                        if (ref != path)
+                            enqueue(ref);
 
-            if (includeOutputs) {
-                PathSet derivers = queryValidDerivers(path);
-                for (auto & i : derivers)
-                    pool.enqueue(std::bind(doPath, i));
-            }
+                    if (includeOutputs)
+                        for (auto & i : queryValidDerivers(path))
+                            enqueue(i);
 
-            if (includeDerivers && isDerivation(path)) {
-                PathSet outputs = queryDerivationOutputs(path);
-                for (auto & i : outputs)
-                    if (isValidPath(i) && queryPathInfo(i)->deriver == path)
-                        pool.enqueue(std::bind(doPath, i));
-            }
+                    if (includeDerivers && isDerivation(path))
+                        for (auto & i : queryDerivationOutputs(path))
+                            if (isValidPath(i) && queryPathInfo(i)->deriver == path)
+                                enqueue(i);
 
-        } else {
+                } else {
 
-            for (auto & ref : info->references)
-                if (ref != path)
-                    pool.enqueue(std::bind(doPath, ref));
+                    for (auto & ref : info->references)
+                        if (ref != path)
+                            enqueue(ref);
 
-            if (includeOutputs && isDerivation(path)) {
-                PathSet outputs = queryDerivationOutputs(path);
-                for (auto & i : outputs)
-                    if (isValidPath(i)) pool.enqueue(std::bind(doPath, i));
-            }
+                    if (includeOutputs && isDerivation(path))
+                        for (auto & i : queryDerivationOutputs(path))
+                            if (isValidPath(i)) enqueue(i);
 
-            if (includeDerivers && isValidPath(info->deriver))
-                pool.enqueue(std::bind(doPath, info->deriver));
+                    if (includeDerivers && isValidPath(info->deriver))
+                        enqueue(info->deriver);
 
-        }
+                }
+
+                {
+                    auto state(state_.lock());
+                    assert(state->pending);
+                    if (!--state->pending) done.notify_one();
+                }
+
+            },
+
+            [&, path](std::exception_ptr exc) {
+                auto state(state_.lock());
+                if (!state->exc) state->exc = exc;
+                assert(state->pending);
+                if (!--state->pending) done.notify_one();
+            });
     };
 
-    pool.enqueue(std::bind(doPath, path));
+    enqueue(startPath);
 
-    pool.process();
+    {
+        auto state(state_.lock());
+        while (state->pending) state.wait(done);
+        if (state->exc) std::rethrow_exception(state->exc);
+    }
 }
 
 
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
index 8896862be149..ded19c05d2cd 100644
--- a/src/libstore/nar-accessor.cc
+++ b/src/libstore/nar-accessor.cc
@@ -27,7 +27,7 @@ struct NarIndexer : ParseSink, StringSource
 
     Path currentPath;
     std::string currentStart;
-    bool isExec;
+    bool isExec = false;
 
     NarIndexer(const std::string & nar) : StringSource(nar)
     {
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 172a918ff453..d28ff42c7f23 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -78,21 +78,16 @@ public:
         Path dbPath = getCacheDir() + "/nix/binary-cache-v5.sqlite";
         createDirs(dirOf(dbPath));
 
-        if (sqlite3_open_v2(dbPath.c_str(), &state->db.db,
-                SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK)
-            throw Error(format("cannot open store cache ‘%s’") % dbPath);
+        state->db = SQLite(dbPath);
 
         if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
             throwSQLiteError(state->db, "setting timeout");
 
         // We can always reproduce the cache.
-        if (sqlite3_exec(state->db, "pragma synchronous = off", 0, 0, 0) != SQLITE_OK)
-            throwSQLiteError(state->db, "making database asynchronous");
-        if (sqlite3_exec(state->db, "pragma main.journal_mode = truncate", 0, 0, 0) != SQLITE_OK)
-            throwSQLiteError(state->db, "setting journal mode");
+        state->db.exec("pragma synchronous = off");
+        state->db.exec("pragma main.journal_mode = truncate");
 
-        if (sqlite3_exec(state->db, schema, 0, 0, 0) != SQLITE_OK)
-            throwSQLiteError(state->db, "initialising database schema");
+        state->db.exec(schema);
 
         state->insertCache.create(state->db,
             "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc
index b0a8d77c2fba..201cac671a55 100644
--- a/src/libstore/nar-info.cc
+++ b/src/libstore/nar-info.cc
@@ -67,6 +67,10 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
             system = value;
         else if (name == "Sig")
             sigs.insert(value);
+        else if (name == "CA") {
+            if (!ca.empty()) corrupt();
+            ca = value;
+        }
 
         pos = eol + 1;
     }
@@ -101,6 +105,9 @@ std::string NarInfo::to_string() const
     for (auto sig : sigs)
         res += "Sig: " + sig + "\n";
 
+    if (!ca.empty())
+        res += "CA: " + ca + "\n";
+
     return res;
 }
 
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 927478121244..1bf8b7d83bbc 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -43,7 +43,7 @@ struct MakeReadOnly
 
 LocalStore::InodeHash LocalStore::loadInodeHash()
 {
-    printMsg(lvlDebug, "loading hash inodes in memory");
+    debug("loading hash inodes in memory");
     InodeHash inodeHash;
 
     AutoCloseDir dir = opendir(linksDir.c_str());
@@ -75,7 +75,7 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHa
         checkInterrupt();
 
         if (inodeHash.count(dirent->d_ino)) {
-            printMsg(lvlDebug, format("‘%1%’ is already linked") % dirent->d_name);
+            debug(format("‘%1%’ is already linked") % dirent->d_name);
             continue;
         }
 
@@ -116,13 +116,13 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
        NixOS (example: $fontconfig/var/cache being modified).  Skip
        those files.  FIXME: check the modification time. */
     if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
-        printMsg(lvlError, format("skipping suspicious writable file ‘%1%’") % path);
+        printError(format("skipping suspicious writable file ‘%1%’") % path);
         return;
     }
 
     /* This can still happen on top-level files. */
     if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
-        printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
+        debug(format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
         return;
     }
 
@@ -136,7 +136,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
        contents of the symlink (i.e. the result of readlink()), not
        the contents of the target (which may not even exist). */
     Hash hash = hashPath(htSHA256, path).first;
-    printMsg(lvlDebug, format("‘%1%’ has hash ‘%2%’") % path % printHash(hash));
+    debug(format("‘%1%’ has hash ‘%2%’") % path % printHash(hash));
 
     /* Check if this is a known hash. */
     Path linkPath = linksDir + "/" + printHash32(hash);
@@ -161,12 +161,12 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
         throw SysError(format("getting attributes of path ‘%1%’") % linkPath);
 
     if (st.st_ino == stLink.st_ino) {
-        printMsg(lvlDebug, format("‘%1%’ is already linked to ‘%2%’") % path % linkPath);
+        debug(format("‘%1%’ is already linked to ‘%2%’") % path % linkPath);
         return;
     }
 
     if (st.st_size != stLink.st_size) {
-        printMsg(lvlError, format("removing corrupted link ‘%1%’") % linkPath);
+        printError(format("removing corrupted link ‘%1%’") % linkPath);
         unlink(linkPath.c_str());
         goto retry;
     }
@@ -192,7 +192,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
                systems).  This is likely to happen with empty files.
                Just shrug and ignore. */
             if (st.st_size)
-                printMsg(lvlInfo, format("‘%1%’ has maximum number of links") % linkPath);
+                printInfo(format("‘%1%’ has maximum number of links") % linkPath);
             return;
         }
         throw SysError(format("cannot link ‘%1%’ to ‘%2%’") % tempLink % linkPath);
@@ -201,14 +201,14 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
     /* Atomically replace the old file with the new hard link. */
     if (rename(tempLink.c_str(), path.c_str()) == -1) {
         if (unlink(tempLink.c_str()) == -1)
-            printMsg(lvlError, format("unable to unlink ‘%1%’") % tempLink);
+            printError(format("unable to unlink ‘%1%’") % tempLink);
         if (errno == EMLINK) {
             /* Some filesystems generate too many links on the rename,
                rather than on the original link.  (Probably it
                temporarily increases the st_nlink field before
                decreasing it again.) */
             if (st.st_size)
-                printMsg(lvlInfo, format("‘%1%’ has maximum number of links") % linkPath);
+                printInfo(format("‘%1%’ has maximum number of links") % linkPath);
             return;
         }
         throw SysError(format("cannot rename ‘%1%’ to ‘%2%’") % tempLink % path);
@@ -244,7 +244,7 @@ void LocalStore::optimiseStore()
 
     optimiseStore(stats);
 
-    printMsg(lvlError,
+    printError(
         format("%1% freed by hard-linking %2% files")
         % showBytes(stats.bytesFreed)
         % stats.filesLinked);
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
index b9e178d61f3c..8788ee1649fb 100644
--- a/src/libstore/pathlocks.cc
+++ b/src/libstore/pathlocks.cc
@@ -121,7 +121,7 @@ bool PathLocks::lockPaths(const PathSet & _paths,
             /* Acquire an exclusive lock. */
             if (!lockFile(fd.get(), ltWrite, false)) {
                 if (wait) {
-                    if (waitMsg != "") printMsg(lvlError, waitMsg);
+                    if (waitMsg != "") printError(waitMsg);
                     lockFile(fd.get(), ltWrite, true);
                 } else {
                     /* Failed to lock this path; release all other
@@ -174,7 +174,7 @@ void PathLocks::unlock()
 
         lockedPaths.erase(i.second);
         if (close(i.first) == -1)
-            printMsg(lvlError,
+            printError(
                 format("error (ignored): cannot close lock file on ‘%1%’") % i.second);
 
         debug(format("lock released on ‘%1%’") % i.second);
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index 449c88b576b6..f24daa8862a1 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -132,9 +132,9 @@ void deleteGeneration(const Path & profile, unsigned int gen)
 static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRun)
 {
     if (dryRun)
-        printMsg(lvlInfo, format("would remove generation %1%") % gen);
+        printInfo(format("would remove generation %1%") % gen);
     else {
-        printMsg(lvlInfo, format("removing generation %1%") % gen);
+        printInfo(format("removing generation %1%") % gen);
         deleteGeneration(profile, gen);
     }
 }
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index ab05c3844289..38af145f9856 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -94,6 +94,8 @@ ref<RemoteStore::Connection> RemoteStore::openConnection()
         conn->daemonVersion = readInt(conn->from);
         if (GET_PROTOCOL_MAJOR(conn->daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
             throw Error("Nix daemon protocol version not supported");
+        if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 10)
+            throw Error("the Nix daemon version is too old");
         conn->to << PROTOCOL_VERSION;
 
         if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 14) {
@@ -127,17 +129,13 @@ void RemoteStore::setOptions(ref<Connection> conn)
        << settings.tryFallback
        << verbosity
        << settings.maxBuildJobs
-       << settings.maxSilentTime;
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 2)
-        conn->to << settings.useBuildHook;
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 4)
-        conn->to << (settings.verboseBuild ? lvlError : lvlVomit)
-                 << 0 // obsolete log type
-                 << 0 /* obsolete print build trace */;
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 6)
-        conn->to << settings.buildCores;
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 10)
-        conn->to << settings.useSubstitutes;
+       << settings.maxSilentTime
+       << settings.useBuildHook
+       << (settings.verboseBuild ? lvlError : lvlVomit)
+       << 0 // obsolete log type
+       << 0 /* obsolete print build trace */
+       << settings.buildCores
+       << settings.useSubstitutes;
 
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 12) {
         Settings::SettingsMap overrides = settings.getOverrides();
@@ -213,8 +211,6 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
 
     auto conn(connections->get());
 
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 3) return;
-
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
 
         for (auto & i : paths) {
@@ -227,7 +223,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
             if (info.deriver != "") assertStorePath(info.deriver);
             info.references = readStorePaths<PathSet>(*this, conn->from);
             info.downloadSize = readLongLong(conn->from);
-            info.narSize = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 7 ? readLongLong(conn->from) : 0;
+            info.narSize = readLongLong(conn->from);
             infos[i] = info;
         }
 
@@ -250,35 +246,40 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
 }
 
 
-std::shared_ptr<ValidPathInfo> RemoteStore::queryPathInfoUncached(const Path & path)
+void RemoteStore::queryPathInfoUncached(const Path & path,
+    std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+    std::function<void(std::exception_ptr exc)> failure)
 {
-    auto conn(connections->get());
-    conn->to << wopQueryPathInfo << path;
-    try {
-        conn->processStderr();
-    } catch (Error & e) {
-        // Ugly backwards compatibility hack.
-        if (e.msg().find("is not valid") != std::string::npos)
-            throw InvalidPath(e.what());
-        throw;
-    }
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
-        bool valid = readInt(conn->from) != 0;
-        if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path);
-    }
-    auto info = std::make_shared<ValidPathInfo>();
-    info->path = path;
-    info->deriver = readString(conn->from);
-    if (info->deriver != "") assertStorePath(info->deriver);
-    info->narHash = parseHash(htSHA256, readString(conn->from));
-    info->references = readStorePaths<PathSet>(*this, conn->from);
-    info->registrationTime = readInt(conn->from);
-    info->narSize = readLongLong(conn->from);
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
-        info->ultimate = readInt(conn->from) != 0;
-        info->sigs = readStrings<StringSet>(conn->from);
-    }
-    return info;
+    sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
+        auto conn(connections->get());
+        conn->to << wopQueryPathInfo << path;
+        try {
+            conn->processStderr();
+        } catch (Error & e) {
+            // Ugly backwards compatibility hack.
+            if (e.msg().find("is not valid") != std::string::npos)
+                throw InvalidPath(e.what());
+            throw;
+        }
+        if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
+            bool valid = readInt(conn->from) != 0;
+            if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path);
+        }
+        auto info = std::make_shared<ValidPathInfo>();
+        info->path = path;
+        info->deriver = readString(conn->from);
+        if (info->deriver != "") assertStorePath(info->deriver);
+        info->narHash = parseHash(htSHA256, readString(conn->from));
+        info->references = readStorePaths<PathSet>(*this, conn->from);
+        info->registrationTime = readInt(conn->from);
+        info->narSize = readLongLong(conn->from);
+        if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
+            info->ultimate = readInt(conn->from) != 0;
+            info->sigs = readStrings<StringSet>(conn->from);
+            info->ca = readString(conn->from);
+        }
+        return info;
+    });
 }
 
 
@@ -481,11 +482,11 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
 {
     auto conn(connections->get());
 
-    conn->to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
-       << options.maxFreed << 0;
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 5)
+    conn->to
+        << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
+        << options.maxFreed
         /* removed options */
-        conn->to << 0 << 0;
+        << 0 << 0 << 0;
 
     conn->processStderr();
 
@@ -558,12 +559,12 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
             to.flush();
         }
         else
-            printMsg(lvlError, chomp(readString(from)));
+            printError(chomp(readString(from)));
     }
     if (msg == STDERR_ERROR) {
         string error = readString(from);
-        unsigned int status = GET_PROTOCOL_MINOR(daemonVersion) >= 8 ? readInt(from) : 1;
-        throw Error(format("%1%") % error, status);
+        unsigned int status = readInt(from);
+        throw Error(status, error);
     }
     else if (msg != STDERR_LAST)
         throw Error("protocol error processing standard error");
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index e756805ea05b..9879337d65a6 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -34,7 +34,9 @@ public:
 
     PathSet queryAllValidPaths() override;
 
-    std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) override;
+    void queryPathInfoUncached(const Path & path,
+        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure) override;
 
     void queryReferrers(const Path & path, PathSet & referrers) override;
 
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index ed95620bbd7c..91ec3643b90e 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -161,52 +161,56 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
 
         auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
 
-        printMsg(lvlInfo, format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
+        printInfo(format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
             % bucketName % path % data.size() % duration);
 
         stats.putTimeMs += duration;
     }
 
-    std::shared_ptr<std::string> getFile(const std::string & path) override
+    void getFile(const std::string & path,
+        std::function<void(std::shared_ptr<std::string>)> success,
+        std::function<void(std::exception_ptr exc)> failure) override
     {
-        debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
+        sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
+            debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
 
-        auto request =
-            Aws::S3::Model::GetObjectRequest()
-            .WithBucket(bucketName)
-            .WithKey(path);
+            auto request =
+                Aws::S3::Model::GetObjectRequest()
+                .WithBucket(bucketName)
+                .WithKey(path);
 
-        request.SetResponseStreamFactory([&]() {
-            return Aws::New<std::stringstream>("STRINGSTREAM");
-        });
+            request.SetResponseStreamFactory([&]() {
+                return Aws::New<std::stringstream>("STRINGSTREAM");
+            });
 
-        stats.get++;
+            stats.get++;
 
-        try {
+            try {
 
-            auto now1 = std::chrono::steady_clock::now();
+                auto now1 = std::chrono::steady_clock::now();
 
-            auto result = checkAws(format("AWS error fetching ‘%s’") % path,
-                client->GetObject(request));
+                auto result = checkAws(format("AWS error fetching ‘%s’") % path,
+                    client->GetObject(request));
 
-            auto now2 = std::chrono::steady_clock::now();
+                auto now2 = std::chrono::steady_clock::now();
 
-            auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str();
+                auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str();
 
-            auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+                auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
 
-            printMsg(lvlTalkative, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
-                % bucketName % path % res.size() % duration);
+                printMsg(lvlTalkative, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
+                    % bucketName % path % res.size() % duration);
 
-            stats.getBytes += res.size();
-            stats.getTimeMs += duration;
+                stats.getBytes += res.size();
+                stats.getTimeMs += duration;
 
-            return std::make_shared<std::string>(res);
+                return std::make_shared<std::string>(res);
 
-        } catch (S3Error & e) {
-            if (e.err == Aws::S3::S3Errors::NO_SUCH_KEY) return 0;
-            throw;
-        }
+            } catch (S3Error & e) {
+                if (e.err == Aws::S3::S3Errors::NO_SUCH_KEY) return std::shared_ptr<std::string>();
+                throw;
+            }
+        });
     }
 
     PathSet queryAllValidPaths() override
diff --git a/src/libstore/schema.sql b/src/libstore/schema.sql
index 91878af1580d..09c71a2b8dd7 100644
--- a/src/libstore/schema.sql
+++ b/src/libstore/schema.sql
@@ -6,7 +6,8 @@ create table if not exists ValidPaths (
     deriver          text,
     narSize          integer,
     ultimate         integer, -- null implies "false"
-    sigs             text -- space-separated
+    sigs             text, -- space-separated
+    ca               text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo
 );
 
 create table if not exists Refs (
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 816f9984d6eb..0197b091cd12 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -10,11 +10,11 @@ namespace nix {
     int err = sqlite3_errcode(db);
     if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
         if (err == SQLITE_PROTOCOL)
-            printMsg(lvlError, "warning: SQLite database is busy (SQLITE_PROTOCOL)");
+            printError("warning: SQLite database is busy (SQLITE_PROTOCOL)");
         else {
             static bool warned = false;
             if (!warned) {
-                printMsg(lvlError, "warning: SQLite database is busy");
+                printError("warning: SQLite database is busy");
                 warned = true;
             }
         }
@@ -35,6 +35,13 @@ namespace nix {
         throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
 }
 
+SQLite::SQLite(const Path & path)
+{
+    if (sqlite3_open_v2(path.c_str(), &db,
+            SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK)
+        throw Error(format("cannot open SQLite database ‘%s’") % path);
+}
+
 SQLite::~SQLite()
 {
     try {
@@ -45,6 +52,12 @@ SQLite::~SQLite()
     }
 }
 
+void SQLite::exec(const std::string & stmt)
+{
+    if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
+        throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+}
+
 void SQLiteStmt::create(sqlite3 * db, const string & s)
 {
     checkInterrupt();
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
index d6b4a8d9117b..7c1ed538215c 100644
--- a/src/libstore/sqlite.hh
+++ b/src/libstore/sqlite.hh
@@ -13,10 +13,16 @@ namespace nix {
 /* RAII wrapper to close a SQLite database automatically. */
 struct SQLite
 {
-    sqlite3 * db;
-    SQLite() { db = 0; }
+    sqlite3 * db = 0;
+    SQLite() { }
+    SQLite(const Path & path);
+    SQLite(const SQLite & from) = delete;
+    SQLite& operator = (const SQLite & from) = delete;
+    SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }
     ~SQLite();
     operator sqlite3 * () { return db; }
+
+    void exec(const std::string & stmt);
 };
 
 /* RAII wrapper to create and destroy SQLite prepared statements. */
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 2a062b9b2cd3..90e1747157d4 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -4,6 +4,8 @@
 #include "util.hh"
 #include "nar-info-disk-cache.hh"
 
+#include <future>
+
 
 namespace nix {
 
@@ -191,32 +193,21 @@ Path Store::makeOutputPath(const string & id,
 
 
 Path Store::makeFixedOutputPath(bool recursive,
-    HashType hashAlgo, Hash hash, string name) const
+    const Hash & hash, const string & name) const
 {
-    return hashAlgo == htSHA256 && recursive
+    return hash.type == htSHA256 && recursive
         ? makeStorePath("source", hash, name)
         : makeStorePath("output:out", hashString(htSHA256,
                 "fixed:out:" + (recursive ? (string) "r:" : "") +
-                printHashType(hashAlgo) + ":" + printHash(hash) + ":"),
+                printHashType(hash.type) + ":" + printHash(hash) + ":"),
             name);
 }
 
 
-std::pair<Path, Hash> Store::computeStorePathForPath(const Path & srcPath,
-    bool recursive, HashType hashAlgo, PathFilter & filter) const
-{
-    HashType ht(hashAlgo);
-    Hash h = recursive ? hashPath(ht, srcPath, filter).first : hashFile(ht, srcPath);
-    string name = baseNameOf(srcPath);
-    Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
-    return std::pair<Path, Hash>(dstPath, h);
-}
-
-
-Path Store::computeStorePathForText(const string & name, const string & s,
+Path Store::makeTextPath(const string & name, const Hash & hash,
     const PathSet & references) const
 {
-    Hash hash = hashString(htSHA256, s);
+    assert(hash.type == htSHA256);
     /* Stuff the references (if any) into the type.  This is a bit
        hacky, but we can't put them in `s' since that would be
        ambiguous. */
@@ -229,6 +220,23 @@ Path Store::computeStorePathForText(const string & name, const string & s,
 }
 
 
+std::pair<Path, Hash> Store::computeStorePathForPath(const Path & srcPath,
+    bool recursive, HashType hashAlgo, PathFilter & filter) const
+{
+    Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath);
+    string name = baseNameOf(srcPath);
+    Path dstPath = makeFixedOutputPath(recursive, h, name);
+    return std::pair<Path, Hash>(dstPath, h);
+}
+
+
+Path Store::computeStorePathForText(const string & name, const string & s,
+    const PathSet & references) const
+{
+    return makeTextPath(name, hashString(htSHA256, s), references);
+}
+
+
 Store::Store(const Params & params)
     : storeDir(get(params, "store", settings.nixStore))
 {
@@ -277,51 +285,79 @@ bool Store::isValidPath(const Path & storePath)
 
 ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
 {
+    std::promise<ref<ValidPathInfo>> promise;
+
+    queryPathInfo(storePath,
+        [&](ref<ValidPathInfo> info) {
+            promise.set_value(info);
+        },
+        [&](std::exception_ptr exc) {
+            promise.set_exception(exc);
+        });
+
+    return promise.get_future().get();
+}
+
+
+void Store::queryPathInfo(const Path & storePath,
+    std::function<void(ref<ValidPathInfo>)> success,
+    std::function<void(std::exception_ptr exc)> failure)
+{
     auto hashPart = storePathToHash(storePath);
 
-    {
-        auto state_(state.lock());
-        auto res = state_->pathInfoCache.get(hashPart);
-        if (res) {
-            stats.narInfoReadAverted++;
-            if (!*res)
-                throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
-            return ref<ValidPathInfo>(*res);
+    try {
+
+        {
+            auto res = state.lock()->pathInfoCache.get(hashPart);
+            if (res) {
+                stats.narInfoReadAverted++;
+                if (!*res)
+                    throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
+                return success(ref<ValidPathInfo>(*res));
+            }
         }
-    }
 
-    if (diskCache) {
-        auto res = diskCache->lookupNarInfo(getUri(), hashPart);
-        if (res.first != NarInfoDiskCache::oUnknown) {
-            stats.narInfoReadAverted++;
-            auto state_(state.lock());
-            state_->pathInfoCache.upsert(hashPart,
-                res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
-            if (res.first == NarInfoDiskCache::oInvalid ||
-                (res.second->path != storePath && storePathToName(storePath) != ""))
-                throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
-            return ref<ValidPathInfo>(res.second);
+        if (diskCache) {
+            auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+            if (res.first != NarInfoDiskCache::oUnknown) {
+                stats.narInfoReadAverted++;
+                {
+                    auto state_(state.lock());
+                    state_->pathInfoCache.upsert(hashPart,
+                        res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
+                    if (res.first == NarInfoDiskCache::oInvalid ||
+                        (res.second->path != storePath && storePathToName(storePath) != ""))
+                        throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
+                }
+                return success(ref<ValidPathInfo>(res.second));
+            }
         }
+
+    } catch (std::exception & e) {
+        return callFailure(failure);
     }
 
-    auto info = queryPathInfoUncached(storePath);
+    queryPathInfoUncached(storePath,
+        [this, storePath, hashPart, success, failure](std::shared_ptr<ValidPathInfo> info) {
 
-    if (diskCache)
-        diskCache->upsertNarInfo(getUri(), hashPart, info);
+            if (diskCache)
+                diskCache->upsertNarInfo(getUri(), hashPart, info);
 
-    {
-        auto state_(state.lock());
-        state_->pathInfoCache.upsert(hashPart, info);
-    }
+            {
+                auto state_(state.lock());
+                state_->pathInfoCache.upsert(hashPart, info);
+            }
 
-    if (!info
-        || (info->path != storePath && storePathToName(storePath) != ""))
-    {
-        stats.narInfoMissing++;
-        throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
-    }
+            if (!info
+                || (info->path != storePath && storePathToName(storePath) != ""))
+            {
+                stats.narInfoMissing++;
+                return failure(std::make_exception_ptr(InvalidPath(format("path ‘%s’ is not valid") % storePath)));
+            }
 
-    return ref<ValidPathInfo>(info);
+            callSuccess(success, failure, ref<ValidPathInfo>(info));
+
+        }, failure);
 }
 
 
@@ -433,9 +469,38 @@ void ValidPathInfo::sign(const SecretKey & secretKey)
 }
 
 
-unsigned int ValidPathInfo::checkSignatures(const PublicKeys & publicKeys) const
+bool ValidPathInfo::isContentAddressed(const Store & store) const
+{
+    auto warn = [&]() {
+        printError(format("warning: path ‘%s’ claims to be content-addressed but isn't") % path);
+    };
+
+    if (hasPrefix(ca, "text:")) {
+        auto hash = parseHash(std::string(ca, 5));
+        if (store.makeTextPath(storePathToName(path), hash, references) == path)
+            return true;
+        else
+            warn();
+    }
+
+    else if (hasPrefix(ca, "fixed:")) {
+        bool recursive = ca.compare(6, 2, "r:") == 0;
+        auto hash = parseHash(std::string(ca, recursive ? 8 : 6));
+        if (store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path)
+            return true;
+        else
+            warn();
+    }
+
+    return false;
+}
+
+
+size_t ValidPathInfo::checkSignatures(const Store & store, const PublicKeys & publicKeys) const
 {
-    unsigned int good = 0;
+    if (isContentAddressed(store)) return maxSigs;
+
+    size_t good = 0;
     for (auto & sig : sigs)
         if (checkSignature(publicKeys, sig))
             good++;
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 75caab7ea388..cba4deaad5d7 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -16,6 +16,13 @@
 namespace nix {
 
 
+struct BasicDerivation;
+struct Derivation;
+class FSAccessor;
+class NarInfoDiskCache;
+class Store;
+
+
 /* Size of the hash part of store paths, in base-32 characters. */
 const size_t storePathHashLen = 32; // i.e. 160 bits
 
@@ -109,6 +116,34 @@ struct ValidPathInfo
 
     StringSet sigs; // note: not necessarily verified
 
+    /* If non-empty, an assertion that the path is content-addressed,
+       i.e., that the store path is computed from a cryptographic hash
+       of the contents of the path, plus some other bits of data like
+       the "name" part of the path. Such a path doesn't need
+       signatures, since we don't have to trust anybody's claim that
+       the path is the output of a particular derivation. (In the
+       extensional store model, we have to trust that the *contents*
+       of an output path of a derivation were actually produced by
+       that derivation. In the intensional model, we have to trust
+       that a particular output path was produced by a derivation; the
+       path name then implies the contents.)
+
+       Ideally, the content-addressability assertion would just be a
+       Boolean, and the store path would be computed from
+       ‘storePathToName(path)’, ‘narHash’ and ‘references’. However,
+       1) we've accumulated several types of content-addressed paths
+       over the years; and 2) fixed-output derivations support
+       multiple hash algorithms and serialisation methods (flat file
+       vs NAR). Thus, ‘ca’ has one of the following forms:
+
+       * ‘text:sha256:<sha256 hash of file contents>’: For paths
+         computed by makeTextPath() / addTextToStore().
+
+       * ‘fixed:<r?>:<ht>:<h>’: For paths computed by
+         makeFixedOutputPath() / addToStore().
+    */
+    std::string ca;
+
     bool operator == (const ValidPathInfo & i) const
     {
         return
@@ -117,19 +152,25 @@ struct ValidPathInfo
             && references == i.references;
     }
 
-    /*  Return a fingerprint of the store path to be used in binary
-        cache signatures. It contains the store path, the base-32
-        SHA-256 hash of the NAR serialisation of the path, the size of
-        the NAR, and the sorted references. The size field is strictly
-        speaking superfluous, but might prevent endless/excessive data
-        attacks. */
+    /* Return a fingerprint of the store path to be used in binary
+       cache signatures. It contains the store path, the base-32
+       SHA-256 hash of the NAR serialisation of the path, the size of
+       the NAR, and the sorted references. The size field is strictly
+       speaking superfluous, but might prevent endless/excessive data
+       attacks. */
     std::string fingerprint() const;
 
     void sign(const SecretKey & secretKey);
 
+    /* Return true iff the path is verifiably content-addressed. */
+    bool isContentAddressed(const Store & store) const;
+
+    static const size_t maxSigs = std::numeric_limits<size_t>::max();
+
     /* Return the number of signatures on this .narinfo that were
-       produced by one of the specified keys. */
-    unsigned int checkSignatures(const PublicKeys & publicKeys) const;
+       produced by one of the specified keys, or maxSigs if the path
+       is content-addressed. */
+    size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const;
 
     /* Verify a single signature. */
     bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const;
@@ -169,12 +210,6 @@ struct BuildResult
 };
 
 
-struct BasicDerivation;
-struct Derivation;
-class FSAccessor;
-class NarInfoDiskCache;
-
-
 class Store : public std::enable_shared_from_this<Store>
 {
 public:
@@ -232,12 +267,14 @@ public:
         const Hash & hash, const string & name) const;
 
     Path makeFixedOutputPath(bool recursive,
-        HashType hashAlgo, Hash hash, string name) const;
+        const Hash & hash, const string & name) const;
 
-    /* This is the preparatory part of addToStore() and
-       addToStoreFixed(); it computes the store path to which srcPath
-       is to be copied.  Returns the store path and the cryptographic
-       hash of the contents of srcPath. */
+    Path makeTextPath(const string & name, const Hash & hash,
+        const PathSet & references) const;
+
+    /* This is the preparatory part of addToStore(); it computes the
+       store path to which srcPath is to be copied.  Returns the store
+       path and the cryptographic hash of the contents of srcPath. */
     std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
         bool recursive = true, HashType hashAlgo = htSHA256,
         PathFilter & filter = defaultPathFilter) const;
@@ -282,9 +319,16 @@ public:
        the name part of the store path. */
     ref<const ValidPathInfo> queryPathInfo(const Path & path);
 
+    /* Asynchronous version of queryPathInfo(). */
+    void queryPathInfo(const Path & path,
+        std::function<void(ref<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure);
+
 protected:
 
-    virtual std::shared_ptr<ValidPathInfo> queryPathInfoUncached(const Path & path) = 0;
+    virtual void queryPathInfoUncached(const Path & path,
+        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure) = 0;
 
 public:
 
@@ -491,7 +535,9 @@ protected:
 class LocalFSStore : public Store
 {
 public:
+    const Path rootDir;
     const Path stateDir;
+    const Path logDir;
 
     LocalFSStore(const Params & params);
 
diff --git a/src/libutil/affinity.cc b/src/libutil/affinity.cc
index 3cbdf878617a..98f8287ada67 100644
--- a/src/libutil/affinity.cc
+++ b/src/libutil/affinity.cc
@@ -20,12 +20,12 @@ void setAffinityTo(int cpu)
 #if __linux__
     if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return;
     didSaveAffinity = true;
-    printMsg(lvlDebug, format("locking this thread to CPU %1%") % cpu);
+    debug(format("locking this thread to CPU %1%") % cpu);
     cpu_set_t newAffinity;
     CPU_ZERO(&newAffinity);
     CPU_SET(cpu, &newAffinity);
     if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1)
-        printMsg(lvlError, format("failed to lock thread to CPU %1%") % cpu);
+        printError(format("failed to lock thread to CPU %1%") % cpu);
 #endif
 }
 
@@ -47,7 +47,7 @@ void restoreAffinity()
 #if __linux__
     if (!didSaveAffinity) return;
     if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1)
-        printMsg(lvlError, "failed to restore affinity %1%");
+        printError("failed to restore affinity %1%");
 #endif
 }
 
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index edd4a881b485..b9b26c5f5f98 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -84,7 +84,7 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter)
                 string name(i.name);
                 size_t pos = i.name.find(caseHackSuffix);
                 if (pos != string::npos) {
-                    printMsg(lvlDebug, format("removing case hack suffix from ‘%1%’") % (path + "/" + i.name));
+                    debug(format("removing case hack suffix from ‘%1%’") % (path + "/" + i.name));
                     name.erase(pos);
                 }
                 if (unhacked.find(name) != unhacked.end())
@@ -248,7 +248,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path)
                     if (useCaseHack) {
                         auto i = names.find(name);
                         if (i != names.end()) {
-                            printMsg(lvlDebug, format("case collision between ‘%1%’ and ‘%2%’") % i->first % name);
+                            debug(format("case collision between ‘%1%’ and ‘%2%’") % i->first % name);
                             name += caseHackSuffix;
                             name += std::to_string(++i->second);
                         } else
diff --git a/src/libutil/args.hh b/src/libutil/args.hh
index 6aa08aacac9e..ac12f8be633a 100644
--- a/src/libutil/args.hh
+++ b/src/libutil/args.hh
@@ -8,7 +8,7 @@
 
 namespace nix {
 
-MakeError(UsageError, nix::Error);
+MakeError(UsageError, Error);
 
 enum HashType : char;
 
diff --git a/src/libutil/json.cc b/src/libutil/json.cc
new file mode 100644
index 000000000000..ecc3fdfe514e
--- /dev/null
+++ b/src/libutil/json.cc
@@ -0,0 +1,176 @@
+#include "json.hh"
+
+#include <iomanip>
+#include <cstring>
+
+namespace nix {
+
+void toJSON(std::ostream & str, const char * start, const char * end)
+{
+    str << '"';
+    for (auto i = start; i != end; i++)
+        if (*i == '\"' || *i == '\\') str << '\\' << *i;
+        else if (*i == '\n') str << "\\n";
+        else if (*i == '\r') str << "\\r";
+        else if (*i == '\t') str << "\\t";
+        else if (*i >= 0 && *i < 32)
+            str << "\\u" << std::setfill('0') << std::setw(4) << std::hex << (uint16_t) *i << std::dec;
+        else str << *i;
+    str << '"';
+}
+
+void toJSON(std::ostream & str, const std::string & s)
+{
+    toJSON(str, s.c_str(), s.c_str() + s.size());
+}
+
+void toJSON(std::ostream & str, const char * s)
+{
+    if (!s) str << "null"; else toJSON(str, s, s + strlen(s));
+}
+
+void toJSON(std::ostream & str, unsigned long long n)
+{
+    str << n;
+}
+
+void toJSON(std::ostream & str, unsigned long n)
+{
+    str << n;
+}
+
+void toJSON(std::ostream & str, long n)
+{
+    str << n;
+}
+
+void toJSON(std::ostream & str, double f)
+{
+    str << f;
+}
+
+void toJSON(std::ostream & str, bool b)
+{
+    str << (b ? "true" : "false");
+}
+
+JSONWriter::JSONWriter(std::ostream & str, bool indent)
+    : state(new JSONState(str, indent))
+{
+    state->stack.push_back(this);
+}
+
+JSONWriter::JSONWriter(JSONState * state)
+    : state(state)
+{
+    state->stack.push_back(this);
+}
+
+JSONWriter::~JSONWriter()
+{
+    assertActive();
+    state->stack.pop_back();
+    if (state->stack.empty()) delete state;
+}
+
+void JSONWriter::comma()
+{
+    assertActive();
+    if (first) {
+        first = false;
+    } else {
+        state->str << ',';
+    }
+    if (state->indent) indent();
+}
+
+void JSONWriter::indent()
+{
+    state->str << '\n' << std::string(state->depth * 2, ' ');
+}
+
+void JSONList::open()
+{
+    state->depth++;
+    state->str << '[';
+}
+
+JSONList::~JSONList()
+{
+    state->depth--;
+    if (state->indent && !first) indent();
+    state->str << "]";
+}
+
+JSONList JSONList::list()
+{
+    comma();
+    return JSONList(state);
+}
+
+JSONObject JSONList::object()
+{
+    comma();
+    return JSONObject(state);
+}
+
+JSONPlaceholder JSONList::placeholder()
+{
+    comma();
+    return JSONPlaceholder(state);
+}
+
+void JSONObject::open()
+{
+    state->depth++;
+    state->str << '{';
+}
+
+JSONObject::~JSONObject()
+{
+    state->depth--;
+    if (state->indent && !first) indent();
+    state->str << "}";
+}
+
+void JSONObject::attr(const std::string & s)
+{
+    comma();
+    toJSON(state->str, s);
+    state->str << ':';
+    if (state->indent) state->str << ' ';
+}
+
+JSONList JSONObject::list(const std::string & name)
+{
+    attr(name);
+    return JSONList(state);
+}
+
+JSONObject JSONObject::object(const std::string & name)
+{
+    attr(name);
+    return JSONObject(state);
+}
+
+JSONPlaceholder JSONObject::placeholder(const std::string & name)
+{
+    attr(name);
+    return JSONPlaceholder(state);
+}
+
+JSONList JSONPlaceholder::list()
+{
+    assertValid();
+    first = false;
+    return JSONList(state);
+}
+
+JSONObject JSONPlaceholder::object()
+{
+    assertValid();
+    first = false;
+    return JSONObject(state);
+}
+
+}
diff --git a/src/libutil/json.hh b/src/libutil/json.hh
new file mode 100644
index 000000000000..aec456845056
--- /dev/null
+++ b/src/libutil/json.hh
@@ -0,0 +1,184 @@
+#pragma once
+
+#include <iostream>
+#include <vector>
+#include <cassert>
+
+namespace nix {
+
+void toJSON(std::ostream & str, const char * start, const char * end);
+void toJSON(std::ostream & str, const std::string & s);
+void toJSON(std::ostream & str, const char * s);
+void toJSON(std::ostream & str, unsigned long long n);
+void toJSON(std::ostream & str, unsigned long n);
+void toJSON(std::ostream & str, long n);
+void toJSON(std::ostream & str, double f);
+void toJSON(std::ostream & str, bool b);
+
+class JSONWriter
+{
+protected:
+
+    struct JSONState
+    {
+        std::ostream & str;
+        bool indent;
+        size_t depth = 0;
+        std::vector<JSONWriter *> stack;
+        JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
+        ~JSONState()
+        {
+            assert(stack.empty());
+        }
+    };
+
+    JSONState * state;
+
+    bool first = true;
+
+    JSONWriter(std::ostream & str, bool indent);
+
+    JSONWriter(JSONState * state);
+
+    ~JSONWriter();
+
+    void assertActive()
+    {
+        assert(!state->stack.empty() && state->stack.back() == this);
+    }
+
+    void comma();
+
+    void indent();
+};
+
+class JSONObject;
+class JSONPlaceholder;
+
+class JSONList : JSONWriter
+{
+private:
+
+    friend class JSONObject;
+    friend class JSONPlaceholder;
+
+    void open();
+
+    JSONList(JSONState * state)
+        : JSONWriter(state)
+    {
+        open();
+    }
+
+public:
+
+    JSONList(std::ostream & str, bool indent = false)
+        : JSONWriter(str, indent)
+    {
+        open();
+    }
+
+    ~JSONList();
+
+    template<typename T>
+    JSONList & elem(const T & v)
+    {
+        comma();
+        toJSON(state->str, v);
+        return *this;
+    }
+
+    JSONList list();
+
+    JSONObject object();
+
+    JSONPlaceholder placeholder();
+};
+
+class JSONObject : JSONWriter
+{
+private:
+
+    friend class JSONList;
+    friend class JSONPlaceholder;
+
+    void open();
+
+    JSONObject(JSONState * state)
+        : JSONWriter(state)
+    {
+        open();
+    }
+
+    void attr(const std::string & s);
+
+public:
+
+    JSONObject(std::ostream & str, bool indent = false)
+        : JSONWriter(str, indent)
+    {
+        open();
+    }
+
+    ~JSONObject();
+
+    template<typename T>
+    JSONObject & attr(const std::string & name, const T & v)
+    {
+        attr(name);
+        toJSON(state->str, v);
+        return *this;
+    }
+
+    JSONList list(const std::string & name);
+
+    JSONObject object(const std::string & name);
+
+    JSONPlaceholder placeholder(const std::string & name);
+};
+
+class JSONPlaceholder : JSONWriter
+{
+
+private:
+
+    friend class JSONList;
+    friend class JSONObject;
+
+    JSONPlaceholder(JSONState * state)
+        : JSONWriter(state)
+    {
+    }
+
+    void assertValid()
+    {
+        assertActive();
+        assert(first);
+    }
+
+public:
+
+    JSONPlaceholder(std::ostream & str, bool indent = false)
+        : JSONWriter(str, indent)
+    {
+    }
+
+    ~JSONPlaceholder()
+    {
+        assert(!first || std::uncaught_exception());
+    }
+
+    template<typename T>
+    void write(const T & v)
+    {
+        assertValid();
+        first = false;
+        toJSON(state->str, v);
+    }
+
+    JSONList list();
+
+    JSONObject object();
+};
+
+}
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index 15bb1e175da6..d9e8d22d7685 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -52,7 +52,7 @@ Verbosity verbosity = lvlInfo;
 void warnOnce(bool & haveWarned, const FormatOrString & fs)
 {
     if (!haveWarned) {
-        printMsg(lvlError, format("warning: %1%") % fs.s);
+        printError(format("warning: %1%") % fs.s);
         haveWarned = true;
     }
 }
@@ -60,14 +60,12 @@ void warnOnce(bool & haveWarned, const FormatOrString & fs)
 void writeToStderr(const string & s)
 {
     try {
-        writeFull(STDERR_FILENO, s);
+        writeFull(STDERR_FILENO, s, false);
     } catch (SysError & e) {
-        /* Ignore failing writes to stderr if we're in an exception
-           handler, otherwise throw an exception.  We need to ignore
-           write errors in exception handlers to ensure that cleanup
-           code runs to completion if the other side of stderr has
-           been closed unexpectedly. */
-        if (!std::uncaught_exception()) throw;
+        /* Ignore failing writes to stderr.  We need to ignore write
+           errors to ensure that cleanup code that logs to stderr runs
+           to completion if the other side of stderr has been closed
+           unexpectedly. */
     }
 }
 
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index 277dff280053..ba99a81c3826 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -66,14 +66,19 @@ Logger * makeDefaultLogger();
 
 extern Verbosity verbosity; /* suppress msgs > this */
 
-#define printMsg(level, f) \
+/* Print a message if the current log level is at least the specified
+   level. Note that this has to be implemented as a macro to ensure
+   that the arguments are evaluated lazily. */
+#define printMsg(level, args...) \
     do { \
         if (level <= nix::verbosity) { \
-            logger->log(level, (f)); \
+            logger->log(level, fmt(args)); \
         } \
     } while (0)
 
-#define debug(f) printMsg(lvlDebug, f)
+#define printError(args...) printMsg(lvlError, args)
+#define printInfo(args...) printMsg(lvlInfo, args)
+#define debug(args...) printMsg(lvlDebug, args)
 
 void warnOnce(bool & haveWarned, const FormatOrString & fs);
 
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 776308cdf321..24c6d107359e 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -49,7 +49,7 @@ size_t threshold = 256 * 1024 * 1024;
 
 static void warnLargeDump()
 {
-    printMsg(lvlError, "warning: dumping very large path (> 256 MiB); this may run out of memory");
+    printError("warning: dumping very large path (> 256 MiB); this may run out of memory");
 }
 
 
diff --git a/src/libutil/thread-pool.cc b/src/libutil/thread-pool.cc
index 696ecd6c38c8..0a3a407240f7 100644
--- a/src/libutil/thread-pool.cc
+++ b/src/libutil/thread-pool.cc
@@ -87,7 +87,7 @@ void ThreadPool::workerEntry()
             if (state->exception) {
                 if (!dynamic_cast<Interrupted*>(&e) &&
                     !dynamic_cast<ThreadPoolShutDown*>(&e))
-                    printMsg(lvlError, format("error: %s") % e.what());
+                    printError(format("error: %s") % e.what());
             } else {
                 state->exception = std::current_exception();
                 work.notify_all();
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
index bd192b8506b2..b9a93d27d2ad 100644
--- a/src/libutil/types.hh
+++ b/src/libutil/types.hh
@@ -41,6 +41,45 @@ struct FormatOrString
 };
 
 
+/* A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is
+   equivalent to ‘boost::format(format) % a_0 % ... %
+   ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion
+   takes place). */
+
+inline void formatHelper(boost::format & f)
+{
+}
+
+template<typename T, typename... Args>
+inline void formatHelper(boost::format & f, T x, Args... args)
+{
+    formatHelper(f % x, args...);
+}
+
+inline std::string fmt(const std::string & s)
+{
+    return s;
+}
+
+inline std::string fmt(const char * s)
+{
+    return s;
+}
+
+inline std::string fmt(const FormatOrString & fs)
+{
+    return fs.s;
+}
+
+template<typename... Args>
+inline std::string fmt(const std::string & fs, Args... args)
+{
+    boost::format f(fs);
+    formatHelper(f, args...);
+    return f.str();
+}
+
+
 /* BaseError should generally not be caught, as it has Interrupted as
    a subclass. Catch Error instead. */
 class BaseError : public std::exception
@@ -49,14 +88,28 @@ protected:
     string prefix_; // used for location traces etc.
     string err;
 public:
-    unsigned int status; // exit status
-    BaseError(const FormatOrString & fs, unsigned int status = 1);
+    unsigned int status = 1; // exit status
+
+    template<typename... Args>
+    BaseError(unsigned int status, Args... args)
+        : err(fmt(args...))
+        , status(status)
+    {
+    }
+
+    template<typename... Args>
+    BaseError(Args... args)
+        : err(fmt(args...))
+    {
+    }
+
 #ifdef EXCEPTION_NEEDS_THROW_SPEC
     ~BaseError() throw () { };
     const char * what() const throw () { return err.c_str(); }
 #else
     const char * what() const noexcept { return err.c_str(); }
 #endif
+
     const string & msg() const { return err; }
     const string & prefix() const { return prefix_; }
     BaseError & addPrefix(const FormatOrString & fs);
@@ -66,7 +119,7 @@ public:
     class newClass : public superClass                  \
     {                                                   \
     public:                                             \
-        newClass(const FormatOrString & fs, unsigned int status = 1) : superClass(fs, status) { }; \
+        using superClass::superClass;                   \
     };
 
 MakeError(Error, BaseError)
@@ -75,7 +128,15 @@ class SysError : public Error
 {
 public:
     int errNo;
-    SysError(const FormatOrString & fs);
+
+    template<typename... Args>
+    SysError(Args... args)
+        : Error(addErrno(fmt(args...)))
+    { }
+
+private:
+
+    std::string addErrno(const std::string & s);
 };
 
 
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 95103c4e5d12..1f923fe6bc0f 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -9,6 +9,7 @@
 #include <cstdlib>
 #include <sstream>
 #include <cstring>
+#include <cctype>
 
 #include <sys/wait.h>
 #include <unistd.h>
@@ -30,13 +31,6 @@ extern char * * environ;
 namespace nix {
 
 
-BaseError::BaseError(const FormatOrString & fs, unsigned int status)
-    : status(status)
-{
-    err = fs.s;
-}
-
-
 BaseError & BaseError::addPrefix(const FormatOrString & fs)
 {
     prefix_ = fs.s + prefix_;
@@ -44,10 +38,10 @@ BaseError & BaseError::addPrefix(const FormatOrString & fs)
 }
 
 
-SysError::SysError(const FormatOrString & fs)
-    : Error(format("%1%: %2%") % fs.s % strerror(errno))
-    , errNo(errno)
+std::string SysError::addErrno(const std::string & s)
 {
+    errNo = errno;
+    return s + ": " + strerror(errNo);
 }
 
 
@@ -58,6 +52,21 @@ string getEnv(const string & key, const string & def)
 }
 
 
+std::map<std::string, std::string> getEnv()
+{
+    std::map<std::string, std::string> env;
+    for (size_t i = 0; environ[i]; ++i) {
+        auto s = environ[i];
+        auto eq = strchr(s, '=');
+        if (!eq)
+            // invalid env, just keep going
+            continue;
+        env.emplace(std::string(s, eq), std::string(eq + 1));
+    }
+    return env;
+}
+
+
 Path absPath(Path path, Path dir)
 {
     if (path[0] != '/') {
@@ -474,24 +483,24 @@ void readFull(int fd, unsigned char * buf, size_t count)
 }
 
 
-void writeFull(int fd, const unsigned char * buf, size_t count)
+void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts)
 {
     while (count) {
-        checkInterrupt();
         ssize_t res = write(fd, (char *) buf, count);
-        if (res == -1) {
-            if (errno == EINTR) continue;
+        if (res == -1 && errno != EINTR)
             throw SysError("writing to file");
+        if (res > 0) {
+            count -= res;
+            buf += res;
         }
-        count -= res;
-        buf += res;
+        if (allowInterrupts) checkInterrupt();
     }
 }
 
 
-void writeFull(int fd, const string & s)
+void writeFull(int fd, const string & s, bool allowInterrupts)
 {
-    writeFull(fd, (const unsigned char *) s.data(), s.size());
+    writeFull(fd, (const unsigned char *) s.data(), s.size(), allowInterrupts);
 }
 
 
@@ -716,20 +725,20 @@ void Pid::kill(bool quiet)
     if (pid == -1 || pid == 0) return;
 
     if (!quiet)
-        printMsg(lvlError, format("killing process %1%") % pid);
+        printError(format("killing process %1%") % pid);
 
     /* Send the requested signal to the child.  If it has its own
        process group, send the signal to every process in the child
        process group (which hopefully includes *all* its children). */
     if (::kill(separatePG ? -pid : pid, killSignal) != 0)
-        printMsg(lvlError, (SysError(format("killing process %1%") % pid).msg()));
+        printError((SysError(format("killing process %1%") % pid).msg()));
 
     /* Wait until the child dies, disregarding the exit status. */
     int status;
     while (waitpid(pid, &status, 0) == -1) {
         checkInterrupt();
         if (errno != EINTR) {
-            printMsg(lvlError,
+            printError(
                 (SysError(format("waiting for process %1%") % pid).msg()));
             break;
         }
@@ -919,7 +928,7 @@ string runProgram(Path program, bool searchPath, const Strings & args,
     /* Wait for the child to finish. */
     int status = pid.wait(true);
     if (!statusOk(status))
-        throw ExecError(format("program ‘%1%’ %2%")
+        throw ExecError(status, format("program ‘%1%’ %2%")
             % program % statusToString(status));
 
     return result;
@@ -1088,44 +1097,12 @@ bool hasSuffix(const string & s, const string & suffix)
 }
 
 
-void expect(std::istream & str, const string & s)
-{
-    char s2[s.size()];
-    str.read(s2, s.size());
-    if (string(s2, s.size()) != s)
-        throw FormatError(format("expected string ‘%1%’") % s);
-}
-
-
-string parseString(std::istream & str)
+std::string toLower(const std::string & s)
 {
-    string res;
-    expect(str, "\"");
-    int c;
-    while ((c = str.get()) != '"')
-        if (c == '\\') {
-            c = str.get();
-            if (c == 'n') res += '\n';
-            else if (c == 'r') res += '\r';
-            else if (c == 't') res += '\t';
-            else res += c;
-        }
-        else res += c;
-    return res;
-}
-
-
-bool endOfList(std::istream & str)
-{
-    if (str.peek() == ',') {
-        str.get();
-        return false;
-    }
-    if (str.peek() == ']') {
-        str.get();
-        return true;
-    }
-    return false;
+    std::string r(s);
+    for (auto & c : r)
+        c = std::tolower(c);
+    return r;
 }
 
 
@@ -1149,7 +1126,7 @@ void ignoreException()
     try {
         throw;
     } catch (std::exception & e) {
-        printMsg(lvlError, format("error (ignored): %1%") % e.what());
+        printError(format("error (ignored): %1%") % e.what());
     }
 }
 
@@ -1247,4 +1224,15 @@ string base64Decode(const string & s)
 }
 
 
+void callFailure(const std::function<void(std::exception_ptr exc)> & failure, std::exception_ptr exc)
+{
+    try {
+        failure(exc);
+    } catch (std::exception & e) {
+        printError(format("uncaught exception: %s") % e.what());
+        abort();
+    }
+}
+
+
 }
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index 819921dfff1e..259c73260da3 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -8,9 +8,11 @@
 #include <dirent.h>
 #include <unistd.h>
 #include <signal.h>
+
 #include <functional>
 #include <limits>
 #include <cstdio>
+#include <map>
 
 #ifndef HAVE_STRUCT_DIRENT_D_TYPE
 #define DT_UNKNOWN 0
@@ -25,6 +27,9 @@ namespace nix {
 /* Return an environment variable. */
 string getEnv(const string & key, const string & def = "");
 
+/* Get the entire environment. */
+std::map<std::string, std::string> getEnv();
+
 /* Return an absolutized path, resolving paths relative to the
    specified directory, or the current directory otherwise.  The path
    is also canonicalised. */
@@ -120,8 +125,8 @@ void replaceSymlink(const Path & target, const Path & link);
 /* Wrappers arount read()/write() that read/write exactly the
    requested number of bytes. */
 void readFull(int fd, unsigned char * buf, size_t count);
-void writeFull(int fd, const unsigned char * buf, size_t count);
-void writeFull(int fd, const string & s);
+void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts = true);
+void writeFull(int fd, const string & s, bool allowInterrupts = true);
 
 MakeError(EndOfFile, Error)
 
@@ -242,7 +247,16 @@ pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = P
 string runProgram(Path program, bool searchPath = false,
     const Strings & args = Strings(), const string & input = "");
 
-MakeError(ExecError, Error)
+class ExecError : public Error
+{
+public:
+    int status;
+
+    template<typename... Args>
+    ExecError(int status, Args... args)
+        : Error(args...), status(status)
+    { }
+};
 
 /* Convert a list of strings to a null-terminated vector of char
    *'s. The result must not be accessed beyond the lifetime of the
@@ -334,18 +348,8 @@ bool hasPrefix(const string & s, const string & prefix);
 bool hasSuffix(const string & s, const string & suffix);
 
 
-/* Read string `s' from stream `str'. */
-void expect(std::istream & str, const string & s);
-
-MakeError(FormatError, Error)
-
-
-/* Read a C-style string from stream `str'. */
-string parseString(std::istream & str);
-
-
-/* Utility function used to parse legacy ATerms. */
-bool endOfList(std::istream & str);
+/* Convert a string to lower case. */
+std::string toLower(const std::string & s);
 
 
 /* Escape a string that contains octal-encoded escape codes such as
@@ -386,4 +390,44 @@ string get(const T & map, const string & key, const string & def = "")
 }
 
 
+/* Call ‘failure’ with the current exception as argument. If ‘failure’
+   throws an exception, abort the program. */
+void callFailure(const std::function<void(std::exception_ptr exc)> & failure,
+    std::exception_ptr exc = std::current_exception());
+
+
+/* Evaluate the function ‘f’. If it returns a value, call ‘success’
+   with that value as its argument. If it or ‘success’ throws an
+   exception, call ‘failure’. If ‘failure’ throws an exception, abort
+   the program. */
+template<class T>
+void sync2async(
+    const std::function<void(T)> & success,
+    const std::function<void(std::exception_ptr exc)> & failure,
+    const std::function<T()> & f)
+{
+    try {
+        success(f());
+    } catch (...) {
+        callFailure(failure);
+    }
+}
+
+
+/* Call the function ‘success’. If it throws an exception, call
+   ‘failure’. If that throws an exception, abort the program. */
+template<class T>
+void callSuccess(
+    const std::function<void(T)> & success,
+    const std::function<void(std::exception_ptr exc)> & failure,
+    T && arg)
+{
+    try {
+        success(arg);
+    } catch (...) {
+        callFailure(failure);
+    }
+}
+
+
 }
diff --git a/src/nix-build/local.mk b/src/nix-build/local.mk
new file mode 100644
index 000000000000..91532411a505
--- /dev/null
+++ b/src/nix-build/local.mk
@@ -0,0 +1,9 @@
+programs += nix-build
+
+nix-build_DIR := $(d)
+
+nix-build_SOURCES := $(d)/nix-build.cc
+
+nix-build_LIBS = libmain libstore libutil libformat
+
+$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell))
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
new file mode 100755
index 000000000000..b209464b8279
--- /dev/null
+++ b/src/nix-build/nix-build.cc
@@ -0,0 +1,510 @@
+#include <cstring>
+#include <fstream>
+#include <iostream>
+#include <regex>
+#include <sstream>
+#include <vector>
+
+#include <unistd.h>
+
+#include "store-api.hh"
+#include "globals.hh"
+#include "derivations.hh"
+#include "affinity.hh"
+#include "util.hh"
+#include "shared.hh"
+
+using namespace nix;
+
+/* Recreate the effect of the perl shellwords function, breaking up a
+ * string into arguments like a shell word, including escapes
+ */
+std::vector<string> shellwords(const string & s)
+{
+    std::regex whitespace("^(\\s+).*");
+    auto begin = s.cbegin();
+    std::vector<string> res;
+    std::string cur;
+    enum state {
+        sBegin,
+        sQuote
+    };
+    state st = sBegin;
+    auto it = begin;
+    for (; it != s.cend(); ++it) {
+        if (st == sBegin) {
+            std::smatch match;
+            if (regex_search(it, s.cend(), match, whitespace)) {
+                cur.append(begin, it);
+                res.push_back(cur);
+                cur.clear();
+                it = match[1].second;
+                begin = it;
+            }
+        }
+        switch (*it) {
+            case '"':
+                cur.append(begin, it);
+                begin = it + 1;
+                st = st == sBegin ? sQuote : sBegin;
+                break;
+            case '\\':
+                /* perl shellwords mostly just treats the next char as part of the string with no special processing */
+                cur.append(begin, it);
+                begin = ++it;
+                break;
+        }
+    }
+    cur.append(begin, it);
+    if (!cur.empty()) res.push_back(cur);
+    return res;
+}
+
+static void maybePrintExecError(ExecError & e)
+{
+    if (WIFEXITED(e.status))
+        throw Exit(WEXITSTATUS(e.status));
+    else
+        throw e;
+}
+
+int main(int argc, char ** argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+        auto store = openStore();
+        auto dryRun = false;
+        auto verbose = false;
+        auto runEnv = std::regex_search(argv[0], std::regex("nix-shell$"));
+        auto pure = false;
+        auto fromArgs = false;
+        auto packages = false;
+        auto interactive = true;
+
+        Strings instArgs;
+        Strings buildArgs;
+        Strings exprs;
+
+        auto shell = getEnv("SHELL", "/bin/sh");
+        std::string envCommand; // interactive shell
+        Strings envExclude;
+
+        auto myName = runEnv ? "nix-shell" : "nix-build";
+
+        auto inShebang = false;
+        std::string script;
+        std::vector<string> savedArgs;
+
+        AutoDelete tmpDir(createTempDir("", myName));
+
+        std::string outLink = "./result";
+        auto drvLink = (Path) tmpDir + "/derivation";
+
+        std::vector<string> args;
+        for (int i = 1; i < argc; ++i)
+            args.push_back(argv[i]);
+        // Heuristic to see if we're invoked as a shebang script, namely, if we
+        // have a single argument, it's the name of an executable file, and it
+        // starts with "#!".
+        if (runEnv && argc > 1 && !std::regex_search(argv[1], std::regex("nix-shell"))) {
+            script = argv[1];
+            if (access(script.c_str(), F_OK) == 0 && access(script.c_str(), X_OK) == 0) {
+                auto lines = tokenizeString<Strings>(readFile(script), "\n");
+                if (std::regex_search(lines.front(), std::regex("^#!"))) {
+                    lines.pop_front();
+                    inShebang = true;
+                    for (int i = 2; i < argc - 1; ++i)
+                        savedArgs.push_back(argv[i]);
+                    std::vector<string> args;
+                    for (auto line : lines) {
+                        line = chomp(line);
+                        std::smatch match;
+                        if (std::regex_match(line, match, std::regex("^#!\\s*nix-shell (.*)$")))
+                            for (const auto & word : shellwords(match[1].str()))
+                                args.push_back(word);
+                    }
+                }
+            }
+        }
+
+        for (size_t n = 0; n < args.size(); ++n) {
+            auto arg = args[n];
+
+            if (arg == "--help") {
+                deletePath(tmpDir);
+                tmpDir.cancel();
+                execlp("man", "man", myName, NULL);
+                throw SysError("executing man");
+            }
+
+            else if (arg == "--version") {
+                std::cout << myName << " (Nix) " << nixVersion << '\n';
+                return;
+            }
+
+            else if (arg == "--add-drv-link") {
+                drvLink = "./derivation";
+            }
+
+            else if (arg == "--no-out-link" || arg == "--no-link") {
+                outLink = (Path) tmpDir + "/result";
+            }
+
+            else if (arg == "--drv-link") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError("--drv-link requires an argument");
+                }
+                drvLink = args[n];
+            }
+
+            else if (arg == "--out-link" || arg == "-o") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError(format("%1% requires an argument") % arg);
+                }
+                outLink = args[n];
+            }
+
+            else if (arg == "--attr" || arg == "-A" || arg == "-I") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError(format("%1% requires an argument") % arg);
+                }
+                instArgs.push_back(arg);
+                instArgs.push_back(args[n]);
+            }
+
+            else if (arg == "--arg" || arg == "--argstr") {
+                if (n + 2 >= args.size()) {
+                    throw UsageError(format("%1% requires two arguments") % arg);
+                }
+                instArgs.push_back(arg);
+                instArgs.push_back(args[n + 1]);
+                instArgs.push_back(args[n + 2]);
+                n += 2;
+            }
+
+            else if (arg == "--option") {
+                if (n + 2 >= args.size()) {
+                    throw UsageError(format("%1% requires two arguments") % arg);
+                }
+                instArgs.push_back(arg);
+                instArgs.push_back(args[n + 1]);
+                instArgs.push_back(args[n + 2]);
+                buildArgs.push_back(arg);
+                buildArgs.push_back(args[n + 1]);
+                buildArgs.push_back(args[n + 2]);
+                n += 2;
+            }
+
+            else if (arg == "--max-jobs" || arg == "-j" || arg == "--max-silent-time" || arg == "--cores" || arg == "--timeout" || arg == "--add-root") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError(format("%1% requires an argument") % arg);
+                }
+                buildArgs.push_back(arg);
+                buildArgs.push_back(args[n]);
+            }
+
+            else if (arg == "--dry-run") {
+                buildArgs.push_back("--dry-run");
+                dryRun = true;
+            }
+
+            else if (arg == "--show-trace") {
+                instArgs.push_back(arg);
+            }
+
+            else if (arg == "-") {
+                exprs = Strings{"-"};
+            }
+
+            else if (arg == "--verbose" || (arg.size() >= 2 && arg.substr(0, 2) == "-v")) {
+                buildArgs.push_back(arg);
+                instArgs.push_back(arg);
+                verbose = true;
+            }
+
+            else if (arg == "--quiet" || arg == "--repair") {
+                buildArgs.push_back(arg);
+                instArgs.push_back(arg);
+            }
+
+            else if (arg == "--check") {
+                buildArgs.push_back(arg);
+            }
+
+            else if (arg == "--run-env") { // obsolete
+                runEnv = true;
+            }
+
+            else if (arg == "--command" || arg == "--run") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError(format("%1% requires an argument") % arg);
+                }
+                envCommand = args[n] + "\nexit";
+                if (arg == "--run")
+                    interactive = false;
+            }
+
+            else if (arg == "--exclude") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError(format("%1% requires an argument") % arg);
+                }
+                envExclude.push_back(args[n]);
+            }
+
+            else if (arg == "--pure") { pure = true; }
+            else if (arg == "--impure") { pure = false; }
+
+            else if (arg == "--expr" || arg == "-E") {
+                fromArgs = true;
+                instArgs.push_back("--expr");
+            }
+
+            else if (arg == "--packages" || arg == "-p") {
+                packages = true;
+            }
+
+            else if (inShebang && arg == "-i") {
+                n++;
+                if (n >= args.size()) {
+                    throw UsageError(format("%1% requires an argument") % arg);
+                }
+                auto interpreter = args[n];
+                auto execArgs = "";
+
+                auto shellEscape = [](const string & s) {
+                    return "'" + std::regex_replace(s, std::regex("'"), "'\\''") + "'";
+                };
+
+                // Überhack to support Perl. Perl examines the shebang and
+                // executes it unless it contains the string "perl" or "indir",
+                // or (undocumented) argv[0] does not contain "perl". Exploit
+                // the latter by doing "exec -a".
+                if (std::regex_search(interpreter, std::regex("perl"))) {
+                        execArgs = "-a PERL";
+                }
+
+                std::ostringstream joined;
+                for (const auto & i : savedArgs)
+                    joined << shellEscape(i) << ' ';
+
+                if (std::regex_search(interpreter, std::regex("ruby"))) {
+                    // Hack for Ruby. Ruby also examines the shebang. It tries to
+                    // read the shebang to understand which packages to read from. Since
+                    // this is handled via nix-shell -p, we wrap our ruby script execution
+                    // in ruby -e 'load' which ignores the shebangs.
+
+                    envCommand = (format("exec %1% %2% -e 'load(\"%3%\") -- %4%") % execArgs % interpreter % script % joined.str()).str();
+                } else {
+                    envCommand = (format("exec %1% %2% %3% %4%") % execArgs % interpreter % script % joined.str()).str();
+                }
+            }
+
+            else if (!arg.empty() && arg[0] == '-') {
+                buildArgs.push_back(arg);
+            }
+
+            else if (arg == "-Q" || arg == "--no-build-output") {
+                buildArgs.push_back(arg);
+                instArgs.push_back(arg);
+            }
+
+            else {
+                exprs.push_back(arg);
+            }
+        }
+
+        if (packages && fromArgs) {
+            throw UsageError("‘-p’ and ‘-E’ are mutually exclusive");
+        }
+
+        if (packages) {
+            instArgs.push_back("--expr");
+            std::ostringstream joined;
+            joined << "with import <nixpkgs> { }; runCommand \"shell\" { buildInputs = [ ";
+            for (const auto & i : exprs)
+                joined << '(' << i << ") ";
+            joined << "]; } \"\"";
+            exprs = Strings{joined.str()};
+        } else if (!fromArgs) {
+            if (exprs.empty() && runEnv && access("shell.nix", F_OK) == 0)
+                exprs.push_back("shell.nix");
+            if (exprs.empty())
+                exprs.push_back("default.nix");
+        }
+
+        if (runEnv)
+            setenv("IN_NIX_SHELL", pure ? "pure" : "impure", 1);
+
+        for (auto & expr : exprs) {
+            // Instantiate.
+            std::vector<string> drvPaths;
+            if (!std::regex_match(expr, std::regex("^/.*\\.drv$"))) {
+                // If we're in a #! script, interpret filenames relative to the
+                // script.
+                if (inShebang && !packages)
+                    expr = absPath(expr, dirOf(script));
+
+                Strings instantiateArgs{"--add-root", drvLink, "--indirect"};
+                for (const auto & arg : instArgs)
+                    instantiateArgs.push_back(arg);
+                instantiateArgs.push_back(expr);
+                try {
+                    auto instOutput = runProgram(settings.nixBinDir + "/nix-instantiate", false, instantiateArgs);
+                    drvPaths = tokenizeString<std::vector<string>>(instOutput);
+                } catch (ExecError & e) {
+                    maybePrintExecError(e);
+                }
+            } else {
+                drvPaths.push_back(expr);
+            }
+
+            if (runEnv) {
+                if (drvPaths.size() != 1)
+                    throw UsageError("a single derivation is required");
+                auto drvPath = drvPaths[0];
+                drvPath = drvPath.substr(0, drvPath.find_first_of('!'));
+                if (isLink(drvPath))
+                    drvPath = readLink(drvPath);
+                auto drv = store->derivationFromPath(drvPath);
+
+                // Build or fetch all dependencies of the derivation.
+                Strings nixStoreArgs{"-r", "--no-output", "--no-gc-warning"};
+                for (const auto & arg : buildArgs)
+                    nixStoreArgs.push_back(arg);
+                for (const auto & input : drv.inputDrvs)
+                    if (std::all_of(envExclude.cbegin(), envExclude.cend(), [&](const string & exclude) { return !std::regex_search(input.first, std::regex(exclude)); }))
+                        nixStoreArgs.push_back(input.first);
+                for (const auto & src : drv.inputSrcs)
+                    nixStoreArgs.push_back(src);
+
+                try {
+                    runProgram(settings.nixBinDir + "/nix-store", false, nixStoreArgs);
+                } catch (ExecError & e) {
+                    maybePrintExecError(e);
+                }
+
+                // Set the environment.
+                auto env = getEnv();
+
+                auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp"));
+
+                if (pure) {
+                    std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL"};
+                    decltype(env) newEnv;
+                    for (auto & i : env)
+                        if (keepVars.count(i.first))
+                            newEnv.emplace(i);
+                    env = newEnv;
+                    // NixOS hack: prevent /etc/bashrc from sourcing /etc/profile.
+                    env["__ETC_PROFILE_SOURCED"] = "1";
+                }
+
+                env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmp;
+                env["NIX_STORE"] = store->storeDir;
+
+                for (auto & var : drv.env)
+                    env.emplace(var);
+
+                restoreAffinity();
+
+                // Run a shell using the derivation's environment.  For
+                // convenience, source $stdenv/setup to setup additional
+                // environment variables and shell functions.  Also don't lose
+                // the current $PATH directories.
+                auto rcfile = (Path) tmpDir + "/rc";
+                writeFile(rcfile, (format(
+                        "rm -rf '%1%'; "
+                        "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc; "
+                        "%2%"
+                        "dontAddDisableDepTrack=1; "
+                        "[ -e $stdenv/setup ] && source $stdenv/setup; "
+                        "%3%"
+                        "set +e; "
+                        "[ -n \"$PS1\" ] && PS1=\"\\n\\[\\033[1;32m\\][nix-shell:\\w]$\\[\\033[0m\\] \"; "
+                        "if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; "
+                        "unset NIX_ENFORCE_PURITY; "
+                        "unset NIX_INDENT_MAKE; "
+                        "shopt -u nullglob; "
+                        "unset TZ; %4%"
+                        "%5%"
+                        )
+                        % (Path) tmpDir
+                        % (pure ? "" : "p=$PATH; ")
+                        % (pure ? "" : "PATH=$PATH:$p; unset p; ")
+                        % (getenv("TZ") ? (string("export TZ='") + getenv("TZ") + "'; ") : "")
+                        % envCommand).str());
+
+                Strings envStrs;
+                for (auto & i : env)
+                    envStrs.push_back(i.first + "=" + i.second);
+
+                auto args = interactive
+                    ? Strings{"bash", "--rcfile", rcfile}
+                    : Strings{"bash", rcfile};
+
+                execvpe(getEnv("NIX_BUILD_SHELL", "bash").c_str(),
+                        stringsToCharPtrs(args).data(),
+                        stringsToCharPtrs(envStrs).data());
+
+                throw SysError("executing shell");
+            }
+
+            // Ugly hackery to make "nix-build -A foo.all" produce symlinks
+            // ./result, ./result-dev, and so on, rather than ./result,
+            // ./result-2-dev, and so on.  This combines multiple derivation
+            // paths into one "/nix/store/drv-path!out1,out2,..." argument.
+            std::string prevDrvPath;
+            Strings drvPaths2;
+            for (const auto & drvPath : drvPaths) {
+                auto p = drvPath;
+                std::string output = "out";
+                std::smatch match;
+                if (std::regex_match(drvPath, match, std::regex("(.*)!(.*)"))) {
+                    p = match[1].str();
+                    output = match[2].str();
+                }
+                auto target = readLink(p);
+                if (verbose)
+                    std::cerr << "derivation is " << target << '\n';
+                if (target == prevDrvPath) {
+                    auto last = drvPaths2.back();
+                    drvPaths2.pop_back();
+                    drvPaths2.push_back(last + "," + output);
+                } else {
+                    drvPaths2.push_back(target + "!" + output);
+                    prevDrvPath = target;
+                }
+            }
+            // Build.
+            Strings outPaths;
+            Strings nixStoreArgs{"--add-root", outLink, "--indirect", "-r"};
+            for (const auto & arg : buildArgs)
+                nixStoreArgs.push_back(arg);
+            for (const auto & path : drvPaths2)
+                nixStoreArgs.push_back(path);
+
+            std::string nixStoreRes;
+            try {
+                nixStoreRes = runProgram(settings.nixBinDir + "/nix-store", false, nixStoreArgs);
+            } catch (ExecError & e) {
+                maybePrintExecError(e);
+            }
+
+            for (const auto & outpath : tokenizeString<std::vector<string>>(nixStoreRes))
+                outPaths.push_back(chomp(outpath));
+
+            if (dryRun)
+                continue;
+
+            for (const auto & outPath : outPaths)
+                std::cout << readLink(outPath) << '\n';
+        }
+    });
+}
+
diff --git a/src/nix-channel/local.mk b/src/nix-channel/local.mk
new file mode 100644
index 000000000000..49fc105c6f79
--- /dev/null
+++ b/src/nix-channel/local.mk
@@ -0,0 +1,7 @@
+programs += nix-channel
+
+nix-channel_DIR := $(d)
+
+nix-channel_LIBS = libmain libutil libformat libstore
+
+nix-channel_SOURCES := $(d)/nix-channel.cc
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
new file mode 100755
index 000000000000..5b4c2181996c
--- /dev/null
+++ b/src/nix-channel/nix-channel.cc
@@ -0,0 +1,270 @@
+#include "shared.hh"
+#include "globals.hh"
+#include "download.hh"
+#include <fcntl.h>
+#include <regex>
+#include "store-api.hh"
+#include <pwd.h>
+
+using namespace nix;
+
+typedef std::map<string,string> Channels;
+
+static auto channels = Channels{};
+static auto channelsList = Path{};
+
+// Reads the list of channels.
+static void readChannels()
+{
+    if (!pathExists(channelsList)) return;
+    auto channelsFile = readFile(channelsList);
+
+    for (const auto & line : tokenizeString<std::vector<string>>(channelsFile, "\n")) {
+        chomp(line);
+        if (std::regex_search(line, std::regex("^\\s*\\#")))
+            continue;
+        auto split = tokenizeString<std::vector<string>>(line, " ");
+        auto url = std::regex_replace(split[0], std::regex("/*$"), "");
+        auto name = split.size() > 1 ? split[1] : baseNameOf(url);
+        channels[name] = url;
+    }
+}
+
+// Writes the list of channels.
+static void writeChannels()
+{
+    auto channelsFD = AutoCloseFD{open(channelsList.c_str(), O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0644)};
+    if (!channelsFD)
+        throw SysError(format("opening ‘%1%’ for writing") % channelsList);
+    for (const auto & channel : channels)
+        writeFull(channelsFD.get(), channel.second + " " + channel.first + "\n");
+}
+
+// Adds a channel.
+static void addChannel(const string & url, const string & name)
+{
+    if (!regex_search(url, std::regex("^(file|http|https)://")))
+        throw Error(format("invalid channel URL ‘%1%’") % url);
+    if (!regex_search(name, std::regex("^[a-zA-Z0-9_][a-zA-Z0-9_\\.-]*$")))
+        throw Error(format("invalid channel identifier ‘%1%’") % name);
+    readChannels();
+    channels[name] = url;
+    writeChannels();
+}
+
+static auto profile = Path{};
+
+// Remove a channel.
+static void removeChannel(const string & name)
+{
+    readChannels();
+    channels.erase(name);
+    writeChannels();
+
+    runProgram(settings.nixBinDir + "/nix-env", true, { "--profile", profile, "--uninstall", name });
+}
+
+static auto nixDefExpr = Path{};
+
+// Fetch Nix expressions and binary cache URLs from the subscribed channels.
+static void update(const StringSet & channelNames)
+{
+    readChannels();
+
+    auto store = openStore();
+
+    // Download each channel.
+    auto exprs = Strings{};
+    for (const auto & channel : channels) {
+        if (!channelNames.empty() && channelNames.find(channel.first) != channelNames.end())
+            continue;
+        auto name = channel.first;
+        auto url = channel.second;
+
+        // We want to download the url to a file to see if it's a tarball while also checking if we
+        // got redirected in the process, so that we can grab the various parts of a nix channel
+        // definition from a consistent location if the redirect changes mid-download.
+        auto effectiveUrl = string{};
+        auto dl = getDownloader();
+        auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl);
+        url = chomp(std::move(effectiveUrl));
+
+        // If the URL contains a version number, append it to the name
+        // attribute (so that "nix-env -q" on the channels profile
+        // shows something useful).
+        auto cname = name;
+        std::smatch match;
+        auto urlBase = baseNameOf(url);
+        if (std::regex_search(urlBase, match, std::regex("(-\\d.*)$"))) {
+            cname = cname + (string) match[1];
+        }
+
+        auto extraAttrs = string{};
+
+        auto unpacked = false;
+        if (std::regex_search(filename, std::regex("\\.tar\\.(gz|bz2|xz)$"))) {
+            try {
+                runProgram(settings.nixBinDir + "/nix-build", false, { "--no-out-link", "--expr", "import <nix/unpack-channel.nix> "
+                            "{ name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; }" });
+                unpacked = true;
+            } catch (ExecError & e) {
+            }
+        }
+
+        if (!unpacked) {
+            // The URL doesn't unpack directly, so let's try treating it like a full channel folder with files in it
+            // Check if the channel advertises a binary cache.
+            DownloadRequest request(url + "/binary-cache-url");
+            request.showProgress = DownloadRequest::no;
+            try {
+                auto dlRes = dl->download(request);
+                extraAttrs = "binaryCacheURL = \"" + *dlRes.data + "\";";
+            } catch (DownloadError & e) {
+            }
+
+            // Download the channel tarball.
+            auto fullURL = url + "/nixexprs.tar.xz";
+            try {
+                filename = dl->downloadCached(store, fullURL, false);
+            } catch (DownloadError & e) {
+                fullURL = url + "/nixexprs.tar.bz2";
+                filename = dl->downloadCached(store, fullURL, false);
+            }
+            chomp(filename);
+        }
+
+        // Regardless of where it came from, add the expression representing this channel to accumulated expression
+        exprs.push_back("f: f { name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; " + extraAttrs + " }");
+    }
+
+    // Unpack the channel tarballs into the Nix store and install them
+    // into the channels profile.
+    std::cerr << "unpacking channels...\n";
+    auto envArgs = Strings{ "--profile", profile, "--file", "<nix/unpack-channel.nix>", "--install", "--from-expression" };
+    for (auto & expr : exprs)
+        envArgs.push_back(std::move(expr));
+    envArgs.push_back("--quiet");
+    runProgram(settings.nixBinDir + "/nix-env", false, envArgs);
+
+    // Make the channels appear in nix-env.
+    struct stat st;
+    if (lstat(nixDefExpr.c_str(), &st) == 0) {
+        if (S_ISLNK(st.st_mode))
+            // old-skool ~/.nix-defexpr
+            if (unlink(nixDefExpr.c_str()) == -1)
+                throw SysError(format("unlinking %1%") % nixDefExpr);
+    } else if (errno != ENOENT) {
+        throw SysError(format("getting status of %1%") % nixDefExpr);
+    }
+    createDirs(nixDefExpr);
+    auto channelLink = nixDefExpr + "/channels";
+    replaceSymlink(profile, channelLink);
+}
+
+int main(int argc, char ** argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        // Turn on caching in nix-prefetch-url.
+        auto channelCache = settings.nixStateDir + "/channel-cache";
+        createDirs(channelCache);
+        setenv("NIX_DOWNLOAD_CACHE", channelCache.c_str(), 1);
+
+        // Figure out the name of the `.nix-channels' file to use
+        auto home = getEnv("HOME");
+        if (home.empty())
+            throw Error("$HOME not set");
+        channelsList = home + "/.nix-channels";
+        nixDefExpr = home + "/.nix-defexpr";
+
+        // Figure out the name of the channels profile.
+        auto name = string{};
+        auto pw = getpwuid(getuid());
+        if (!pw)
+            name = getEnv("USER", "");
+        else
+            name = pw->pw_name;
+        if (name.empty())
+            throw Error("cannot figure out user name");
+        profile = settings.nixStateDir + "/profiles/per-user/" + name + "/channels";
+        createDirs(dirOf(profile));
+
+        enum {
+            cNone,
+            cAdd,
+            cRemove,
+            cList,
+            cUpdate,
+            cRollback
+        } cmd = cNone;
+        auto args = std::vector<string>{};
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            if (*arg == "--help") {
+                showManPage("nix-channel");
+            } else if (*arg == "--version") {
+                printVersion("nix-channel");
+            } else if (*arg == "--add") {
+                cmd = cAdd;
+            } else if (*arg == "--remove") {
+                cmd = cRemove;
+            } else if (*arg == "--list") {
+                cmd = cList;
+            } else if (*arg == "--update") {
+                cmd = cUpdate;
+            } else if (*arg == "--rollback") {
+                cmd = cRollback;
+            } else {
+                args.push_back(std::move(*arg));
+            }
+            return true;
+        });
+        switch (cmd) {
+            case cNone:
+                throw UsageError("no command specified");
+            case cAdd:
+                if (args.size() < 1 || args.size() > 2)
+                    throw UsageError("‘--add’ requires one or two arguments");
+                {
+                auto url = args[0];
+                auto name = string{};
+                if (args.size() == 2) {
+                    name = args[1];
+                } else {
+                    name = baseNameOf(url);
+                    name = std::regex_replace(name, std::regex("-unstable$"), "");
+                    name = std::regex_replace(name, std::regex("-stable$"), "");
+                }
+                addChannel(url, name);
+                }
+                break;
+            case cRemove:
+                if (args.size() != 1)
+                    throw UsageError("‘--remove’ requires one argument");
+                removeChannel(args[0]);
+                break;
+            case cList:
+                if (!args.empty())
+                    throw UsageError("‘--list’ expects no arguments");
+                readChannels();
+                for (const auto & channel : channels)
+                    std::cout << channel.first << ' ' << channel.second << '\n';
+                break;
+            case cUpdate:
+                update(StringSet(args.begin(), args.end()));
+                break;
+            case cRollback:
+                if (args.size() > 1)
+                    throw UsageError("‘--rollback’ has at most one argument");
+                auto envArgs = Strings{"--profile", profile};
+                if (args.size() == 1) {
+                    envArgs.push_back("--switch-generation");
+                    envArgs.push_back(args[0]);
+                } else {
+                    envArgs.push_back("--rollback");
+                }
+                runProgram(settings.nixBinDir + "/nix-env", false, envArgs);
+                break;
+        }
+    });
+}
diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc
index 3aa348581b19..cc663a96924d 100644
--- a/src/nix-collect-garbage/nix-collect-garbage.cc
+++ b/src/nix-collect-garbage/nix-collect-garbage.cc
@@ -36,7 +36,7 @@ void removeOldGenerations(std::string dir)
                 if (e.errNo == ENOENT) continue;
             }
             if (link.find("link") != string::npos) {
-                printMsg(lvlInfo, format("removing old generations of profile %1%") % path);
+                printInfo(format("removing old generations of profile %1%") % path);
                 if (deleteOlderThan != "")
                     deleteGenerationsOlderThan(path, deleteOlderThan, dryRun);
                 else
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 6e0d869f4c87..5ed38871f774 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -413,12 +413,10 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         options.pathsToDelete = readStorePaths<PathSet>(*store, from);
         options.ignoreLiveness = readInt(from);
         options.maxFreed = readLongLong(from);
-        readInt(from); // obsolete field
-        if (GET_PROTOCOL_MINOR(clientVersion) >= 5) {
-            /* removed options */
-            readInt(from);
-            readInt(from);
-        }
+        // obsolete fields
+        readInt(from);
+        readInt(from);
+        readInt(from);
 
         GCResults results;
 
@@ -440,17 +438,12 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         verbosity = (Verbosity) readInt(from);
         settings.set("build-max-jobs", std::to_string(readInt(from)));
         settings.set("build-max-silent-time", std::to_string(readInt(from)));
-        if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
-            settings.useBuildHook = readInt(from) != 0;
-        if (GET_PROTOCOL_MINOR(clientVersion) >= 4) {
-            settings.verboseBuild = lvlError == (Verbosity) readInt(from);
-            readInt(from); // obsolete logType
-            readInt(from); // obsolete printBuildTrace
-        }
-        if (GET_PROTOCOL_MINOR(clientVersion) >= 6)
-            settings.set("build-cores", std::to_string(readInt(from)));
-        if (GET_PROTOCOL_MINOR(clientVersion) >= 10)
-            settings.set("build-use-substitutes", readInt(from) ? "true" : "false");
+        settings.useBuildHook = readInt(from) != 0;
+        settings.verboseBuild = lvlError == (Verbosity) readInt(from);
+        readInt(from); // obsolete logType
+        readInt(from); // obsolete printBuildTrace
+        settings.set("build-cores", std::to_string(readInt(from)));
+        settings.set("build-use-substitutes", readInt(from) ? "true" : "false");
         if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
             unsigned int n = readInt(from);
             for (unsigned int i = 0; i < n; i++) {
@@ -478,9 +471,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         if (i == infos.end())
             to << 0;
         else {
-            to << 1 << i->second.deriver << i->second.references << i->second.downloadSize;
-            if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
-                to << i->second.narSize;
+            to << 1 << i->second.deriver << i->second.references << i->second.downloadSize << i->second.narSize;
         }
         break;
     }
@@ -524,7 +515,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
                << info->registrationTime << info->narSize;
             if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
                 to << info->ultimate
-                   << info->sigs;
+                   << info->sigs
+                   << info->ca;
             }
         } else {
             assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
@@ -585,11 +577,13 @@ static void processConnection(bool trusted)
     to.flush();
     unsigned int clientVersion = readInt(from);
 
+    if (clientVersion < 0x10a)
+        throw Error("the Nix client version is too old");
+
     if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
         setAffinityTo(readInt(from));
 
-    if (GET_PROTOCOL_MINOR(clientVersion) >= 11)
-        readInt(from); // obsolete reserveSpace
+    readInt(from); // obsolete reserveSpace
 
     /* Send startup error messages to the client. */
     startWork();
@@ -636,10 +630,10 @@ static void processConnection(bool trusted)
                    during addTextToStore() / importPath().  If that
                    happens, just send the error message and exit. */
                 bool errorAllowed = canSendStderr;
-                stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0);
+                stopWork(false, e.msg(), e.status);
                 if (!errorAllowed) throw;
             } catch (std::bad_alloc & e) {
-                stopWork(false, "Nix daemon out of memory", GET_PROTOCOL_MINOR(clientVersion) >= 8 ? 1 : 0);
+                stopWork(false, "Nix daemon out of memory", 1);
                 throw;
             }
 
@@ -650,10 +644,10 @@ static void processConnection(bool trusted)
 
         canSendStderr = false;
         _isInterrupted = false;
-        printMsg(lvlDebug, format("%1% operations") % opCount);
+        debug(format("%1% operations") % opCount);
 
     } catch (Error & e) {
-        stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? 1 : 0);
+        stopWork(false, e.msg(), 1);
         to.flush();
         return;
     }
@@ -843,7 +837,7 @@ static void daemonLoop(char * * argv)
             if (!trusted && !matchUser(user, group, allowedUsers))
                 throw Error(format("user ‘%1%’ is not allowed to connect to the Nix daemon") % user);
 
-            printMsg(lvlInfo, format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""))
+            printInfo(format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""))
                 % (peer.pidKnown ? std::to_string(peer.pid) : "<unknown>")
                 % (peer.uidKnown ? user : "<unknown>"));
 
@@ -880,7 +874,7 @@ static void daemonLoop(char * * argv)
         } catch (Interrupted & e) {
             throw;
         } catch (Error & e) {
-            printMsg(lvlError, format("error processing connection: %1%") % e.msg());
+            printError(format("error processing connection: %1%") % e.msg());
         }
     }
 }
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 3f0486bb6541..908c09bc8c8a 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -10,6 +10,7 @@
 #include "store-api.hh"
 #include "user-env.hh"
 #include "util.hh"
+#include "json.hh"
 #include "value-to-json.hh"
 #include "xml-writer.hh"
 
@@ -123,14 +124,13 @@ static void getAllExprs(EvalState & state,
             if (hasSuffix(attrName, ".nix"))
                 attrName = string(attrName, 0, attrName.size() - 4);
             if (attrs.find(attrName) != attrs.end()) {
-                printMsg(lvlError, format("warning: name collision in input Nix expressions, skipping ‘%1%’") % path2);
+                printError(format("warning: name collision in input Nix expressions, skipping ‘%1%’") % path2);
                 continue;
             }
             attrs.insert(attrName);
             /* Load the expression on demand. */
-            Value & vFun(*state.allocValue());
+            Value & vFun = state.getBuiltin("import");
             Value & vArg(*state.allocValue());
-            state.getBuiltin("import", vFun);
             mkString(vArg, path2);
             if (v.attrs->size() == v.attrs->capacity())
                 throw Error(format("too many Nix expressions in directory ‘%1%’") % path);
@@ -304,7 +304,7 @@ static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems,
             matches.clear();
             for (auto & j : newest) {
                 if (multiple.find(j.second.first.name) != multiple.end())
-                    printMsg(lvlInfo,
+                    printInfo(
                         format("warning: there are multiple derivations named ‘%1%’; using the first one")
                         % j.second.first.name);
                 matches.push_back(j.second);
@@ -496,13 +496,13 @@ static void installDerivations(Globals & globals,
                 if (!globals.preserveInstalled &&
                     newNames.find(drvName.name) != newNames.end() &&
                     !keep(i))
-                    printMsg(lvlInfo, format("replacing old ‘%1%’") % i.name);
+                    printInfo(format("replacing old ‘%1%’") % i.name);
                 else
                     allElems.push_back(i);
             }
 
             for (auto & i : newElems)
-                printMsg(lvlInfo, format("installing ‘%1%’") % i.name);
+                printInfo(format("installing ‘%1%’") % i.name);
         }
 
         printMissing(*globals.state, newElems);
@@ -604,7 +604,7 @@ static void upgradeDerivations(Globals & globals,
                 {
                     const char * action = compareVersions(drvName.version, bestVersion) <= 0
                         ? "upgrading" : "downgrading";
-                    printMsg(lvlInfo,
+                    printInfo(
                         format("%1% ‘%2%’ to ‘%3%’")
                         % action % i.name % bestElem->name);
                     newElems.push_back(*bestElem);
@@ -674,7 +674,7 @@ static void opSetFlag(Globals & globals, Strings opFlags, Strings opArgs)
             DrvName drvName(i.name);
             for (auto & j : selectors)
                 if (j.matches(drvName)) {
-                    printMsg(lvlInfo, format("setting flag on ‘%1%’") % i.name);
+                    printInfo(format("setting flag on ‘%1%’") % i.name);
                     j.hits++;
                     setMetaFlag(*globals.state, i, flagName, flagValue);
                     break;
@@ -748,7 +748,7 @@ static void uninstallDerivations(Globals & globals, Strings & selectors,
                 if ((isPath(j) && i.queryOutPath() == globals.state->store->followLinksToStorePath(j))
                     || DrvName(j).matches(drvName))
                 {
-                    printMsg(lvlInfo, format("uninstalling ‘%1%’") % i.name);
+                    printInfo(format("uninstalling ‘%1%’") % i.name);
                     found = true;
                     break;
                 }
@@ -861,26 +861,24 @@ static VersionDiff compareVersionAgainstSet(
 
 static void queryJSON(Globals & globals, vector<DrvInfo> & elems)
 {
-    JSONObject topObj(cout);
+    JSONObject topObj(cout, true);
     for (auto & i : elems) {
-        topObj.attr(i.attrPath);
-        JSONObject pkgObj(cout);
+        JSONObject pkgObj = topObj.object(i.attrPath);
 
         pkgObj.attr("name", i.name);
         pkgObj.attr("system", i.system);
 
-        pkgObj.attr("meta");
-        JSONObject metaObj(cout);
+        JSONObject metaObj = pkgObj.object("meta");
         StringSet metaNames = i.queryMetaNames();
         for (auto & j : metaNames) {
-            metaObj.attr(j);
+            auto placeholder = metaObj.placeholder(j);
             Value * v = i.queryMeta(j);
             if (!v) {
-                printMsg(lvlError, format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i.name % j);
-                cout << "null";
+                printError(format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i.name % j);
+                placeholder.write(nullptr);
             } else {
                 PathSet context;
-                printValueAsJSON(*globals.state, true, *v, cout, context);
+                printValueAsJSON(*globals.state, true, *v, placeholder, context);
             }
         }
     }
@@ -1120,7 +1118,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
                             attrs2["name"] = j;
                             Value * v = i.queryMeta(j);
                             if (!v)
-                                printMsg(lvlError, format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i.name % j);
+                                printError(format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i.name % j);
                             else {
                                 if (v->type == tString) {
                                     attrs2["type"] = "string";
@@ -1221,7 +1219,7 @@ static void switchGeneration(Globals & globals, int dstGen)
             throw Error(format("generation %1% does not exist") % dstGen);
     }
 
-    printMsg(lvlInfo, format("switching from generation %1% to %2%")
+    printInfo(format("switching from generation %1% to %2%")
         % curGen % dst.number);
 
     if (globals.dryRun) return;
@@ -1374,7 +1372,7 @@ int main(int argc, char * * argv)
             else if (*arg == "--delete-generations")
                 op = opDeleteGenerations;
             else if (*arg == "--dry-run") {
-                printMsg(lvlInfo, "(dry run; not doing anything)");
+                printInfo("(dry run; not doing anything)");
                 globals.dryRun = true;
             }
             else if (*arg == "--system-filter")
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
index f239f63776c2..e9997fae57ba 100644
--- a/src/nix-env/user-env.cc
+++ b/src/nix-env/user-env.cc
@@ -139,7 +139,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
 
         Path lockTokenCur = optimisticLockProfile(profile);
         if (lockToken != lockTokenCur) {
-            printMsg(lvlError, format("profile ‘%1%’ changed while we were busy; restarting") % profile);
+            printError(format("profile ‘%1%’ changed while we were busy; restarting") % profile);
             return false;
         }
 
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
index 44a16743d886..acf603025690 100644
--- a/src/nix-prefetch-url/nix-prefetch-url.cc
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -122,7 +122,7 @@ int main(int argc, char * * argv)
             /* Extract the hash mode. */
             attr = v.attrs->find(state.symbols.create("outputHashMode"));
             if (attr == v.attrs->end())
-                printMsg(lvlInfo, "warning: this does not look like a fetchurl call");
+                printInfo("warning: this does not look like a fetchurl call");
             else
                 unpack = state.forceString(*attr->value) == "recursive";
 
@@ -146,7 +146,7 @@ int main(int argc, char * * argv)
         Path storePath;
         if (args.size() == 2) {
             expectedHash = parseHash16or32(ht, args[1]);
-            storePath = store->makeFixedOutputPath(unpack, ht, expectedHash, name);
+            storePath = store->makeFixedOutputPath(unpack, expectedHash, name);
             if (store->isValidPath(storePath))
                 hash = expectedHash;
             else
@@ -158,7 +158,7 @@ int main(int argc, char * * argv)
             auto actualUri = resolveMirrorUri(state, uri);
 
             /* Download the file. */
-            auto result = makeDownloader()->download(actualUri, DownloadOptions());
+            auto result = getDownloader()->download(DownloadRequest(actualUri));
 
             AutoDelete tmpDir(createTempDir(), true);
             Path tmpFile = (Path) tmpDir + "/tmp";
@@ -166,7 +166,7 @@ int main(int argc, char * * argv)
 
             /* Optionally unpack the file. */
             if (unpack) {
-                printMsg(lvlInfo, "unpacking...");
+                printInfo("unpacking...");
                 Path unpacked = (Path) tmpDir + "/unpacked";
                 createDirs(unpacked);
                 if (hasSuffix(baseNameOf(uri), ".zip"))
@@ -197,11 +197,11 @@ int main(int argc, char * * argv)
                into the Nix store. */
             storePath = store->addToStore(name, tmpFile, unpack, ht);
 
-            assert(storePath == store->makeFixedOutputPath(unpack, ht, hash, name));
+            assert(storePath == store->makeFixedOutputPath(unpack, hash, name));
         }
 
         if (!printPath)
-            printMsg(lvlInfo, format("path is ‘%1%’") % storePath);
+            printInfo(format("path is ‘%1%’") % storePath);
 
         std::cout << printHash16or32(hash) << std::endl;
         if (printPath)
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 78b2aa102714..a8cb46319abc 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -213,8 +213,7 @@ static void opPrintFixedPath(Strings opFlags, Strings opArgs)
     string name = *i++;
 
     cout << format("%1%\n") %
-        store->makeFixedOutputPath(recursive, hashAlgo,
-            parseHash16or32(hashAlgo, hash), name);
+        store->makeFixedOutputPath(recursive, parseHash16or32(hashAlgo, hash), name);
 }
 
 
@@ -484,6 +483,10 @@ static void opReadLog(Strings opFlags, Strings opArgs)
 
     RunPager pager;
 
+    // FIXME: move getting logs into Store.
+    auto store2 = std::dynamic_pointer_cast<LocalFSStore>(store);
+    if (!store2) throw Error(format("store ‘%s’ does not support reading logs") % store->getUri());
+
     for (auto & i : opArgs) {
         Path path = useDeriver(store->followLinksToStorePath(i));
 
@@ -494,8 +497,8 @@ static void opReadLog(Strings opFlags, Strings opArgs)
 
             Path logPath =
                 j == 0
-                ? (format("%1%/%2%/%3%/%4%") % settings.nixLogDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str()
-                : (format("%1%/%2%/%3%") % settings.nixLogDir % drvsLogDir % baseName).str();
+                ? (format("%1%/%2%/%3%/%4%") % store2->logDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str()
+                : (format("%1%/%2%/%3%") % store2->logDir % drvsLogDir % baseName).str();
             Path logBz2Path = logPath + ".bz2";
 
             if (pathExists(logPath)) {
@@ -752,7 +755,7 @@ static void opVerify(Strings opFlags, Strings opArgs)
         else throw UsageError(format("unknown flag ‘%1%’") % i);
 
     if (store->verifyStore(checkContents, repair)) {
-        printMsg(lvlError, "warning: not all errors were fixed");
+        printError("warning: not all errors were fixed");
         throw Exit(1);
     }
 }
@@ -774,7 +777,7 @@ static void opVerifyPath(Strings opFlags, Strings opArgs)
         store->narFromPath(path, sink);
         auto current = sink.finish();
         if (current.first != info->narHash) {
-            printMsg(lvlError,
+            printError(
                 format("path ‘%1%’ was modified! expected hash ‘%2%’, got ‘%3%’")
                 % path % printHash(info->narHash) % printHash(current.first));
             status = 1;
@@ -876,7 +879,7 @@ static void opServe(Strings opFlags, Strings opArgs)
                         try {
                             store->buildPaths(willSubstitute);
                         } catch (Error & e) {
-                            printMsg(lvlError, format("warning: %1%") % e.msg());
+                            printError(format("warning: %1%") % e.msg());
                         }
                 }
 
diff --git a/src/nix/installables.cc b/src/nix/installables.cc
index 6257c7679af9..8341bbc5a3a4 100644
--- a/src/nix/installables.cc
+++ b/src/nix/installables.cc
@@ -9,6 +9,41 @@
 
 namespace nix {
 
+Value * MixInstallables::buildSourceExpr(EvalState & state)
+{
+    Value * vRoot = state.allocValue();
+
+    if (file != "") {
+        Expr * e = state.parseExprFromFile(resolveExprPath(lookupFileArg(state, file)));
+        state.eval(e, *vRoot);
+    }
+
+    else {
+
+        /* Construct the installation source from $NIX_PATH. */
+
+        auto searchPath = state.getSearchPath();
+
+        state.mkAttrs(*vRoot, searchPath.size());
+
+        std::unordered_set<std::string> seen;
+
+        for (auto & i : searchPath) {
+            if (i.first == "") continue;
+            if (seen.count(i.first)) continue;
+            seen.insert(i.first);
+            if (!pathExists(i.second)) continue;
+            mkApp(*state.allocAttr(*vRoot, state.symbols.create(i.first)),
+                state.getBuiltin("import"),
+                mkString(*state.allocValue(), i.second));
+        }
+
+        vRoot->attrs->sort();
+    }
+
+    return vRoot;
+}
+
 UserEnvElems MixInstallables::evalInstallables(ref<Store> store)
 {
     UserEnvElems res;
@@ -46,15 +81,12 @@ UserEnvElems MixInstallables::evalInstallables(ref<Store> store)
 
             EvalState state({}, store);
 
-            Expr * e = state.parseExprFromFile(resolveExprPath(lookupFileArg(state, file)));
-
-            Value vRoot;
-            state.eval(e, vRoot);
+            auto vRoot = buildSourceExpr(state);
 
             std::map<string, string> autoArgs_;
             Bindings & autoArgs(*evalAutoArgs(state, autoArgs_));
 
-            Value & v(*findAlongAttrPath(state, installable, autoArgs, vRoot));
+            Value & v(*findAlongAttrPath(state, installable, autoArgs, *vRoot));
             state.forceValue(v);
 
             DrvInfos drvs;
diff --git a/src/nix/installables.hh b/src/nix/installables.hh
index 5eb897d46148..a58f7dc59bb4 100644
--- a/src/nix/installables.hh
+++ b/src/nix/installables.hh
@@ -21,10 +21,13 @@ struct UserEnvElem
 
 typedef std::vector<UserEnvElem> UserEnvElems;
 
+struct Value;
+class EvalState;
+
 struct MixInstallables : virtual Args
 {
     Strings installables;
-    Path file = "<nixpkgs>";
+    Path file;
 
     MixInstallables()
     {
@@ -33,6 +36,13 @@ struct MixInstallables : virtual Args
     }
 
     UserEnvElems evalInstallables(ref<Store> store);
+
+    /* Return a value representing the Nix expression from which we
+       are installing. This is either the file specified by ‘--file’,
+       or an attribute set constructed from $NIX_PATH, e.g. ‘{ nixpkgs
+       = import ...; bla = import ...; }’. */
+    Value * buildSourceExpr(EvalState & state);
+
 };
 
 }
diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc
index c61fe7ff1e00..a9b33e1877dd 100644
--- a/src/nix/path-info.cc
+++ b/src/nix/path-info.cc
@@ -1,6 +1,7 @@
 #include "command.hh"
 #include "shared.hh"
 #include "store-api.hh"
+#include "json.hh"
 
 #include <iomanip>
 #include <algorithm>
@@ -12,12 +13,14 @@ struct CmdPathInfo : StorePathsCommand
     bool showSize = false;
     bool showClosureSize = false;
     bool showSigs = false;
+    bool json = false;
 
     CmdPathInfo()
     {
         mkFlag('s', "size", "print size of the NAR dump of each path", &showSize);
         mkFlag('S', "closure-size", "print sum size of the NAR dumps of the closure of each path", &showClosureSize);
         mkFlag(0, "sigs", "show signatures", &showSigs);
+        mkFlag(0, "json", "produce JSON output", &json);
     }
 
     std::string name() override
@@ -41,6 +44,18 @@ struct CmdPathInfo : StorePathsCommand
                 "To check the existence of a path in a binary cache:",
                 "nix path-info -r /nix/store/7qvk5c91...-geeqie-1.1 --store https://cache.nixos.org/"
             },
+            Example{
+                "To print the 10 most recently added paths (using --json and the jq(1) command):",
+                "nix path-info --json --all | jq -r 'sort_by(.registrationTime)[-11:-1][].path'"
+            },
+            Example{
+                "To show the size of the entire Nix store:",
+                "nix path-info --json --all | jq 'map(.narSize) | add'"
+            },
+            Example{
+                "To show every path whose closure is bigger than 1 GB, sorted by closure size:",
+                "nix path-info --json --all -S | jq 'map(select(.closureSize > 1e9)) | sort_by(.closureSize) | map([.path, .closureSize])'"
+            },
         };
     }
 
@@ -50,35 +65,85 @@ struct CmdPathInfo : StorePathsCommand
         for (auto & storePath : storePaths)
             pathLen = std::max(pathLen, storePath.size());
 
-        for (auto storePath : storePaths) {
-            auto info = store->queryPathInfo(storePath);
-            storePath = info->path; // FIXME: screws up padding
+        auto getClosureSize = [&](const Path & storePath) -> unsigned long long {
+            unsigned long long totalSize = 0;
+            PathSet closure;
+            store->computeFSClosure(storePath, closure, false, false);
+            for (auto & p : closure)
+                totalSize += store->queryPathInfo(p)->narSize;
+            return totalSize;
+        };
 
-            std::cout << storePath << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' ');
+        if (json) {
+            JSONList jsonRoot(std::cout, true);
 
-            if (showSize) {
-                std::cout << '\t' << std::setw(11) << info->narSize;
-            }
+            for (auto storePath : storePaths) {
+                auto info = store->queryPathInfo(storePath);
+                storePath = info->path;
+
+                auto jsonPath = jsonRoot.object();
+                jsonPath
+                    .attr("path", storePath)
+                    .attr("narHash", info->narHash.to_string())
+                    .attr("narSize", info->narSize);
+
+                if (showClosureSize)
+                    jsonPath.attr("closureSize", getClosureSize(storePath));
+
+                if (info->deriver != "")
+                    jsonPath.attr("deriver", info->deriver);
+
+                {
+                    auto jsonRefs = jsonPath.list("references");
+                    for (auto & ref : info->references)
+                        jsonRefs.elem(ref);
+                }
 
-            if (showClosureSize) {
-                size_t totalSize = 0;
-                PathSet closure;
-                store->computeFSClosure(storePath, closure, false, false);
-                for (auto & p : closure)
-                    totalSize += store->queryPathInfo(p)->narSize;
-                std::cout << '\t' << std::setw(11) << totalSize;
+                if (info->registrationTime)
+                    jsonPath.attr("registrationTime", info->registrationTime);
+
+                if (info->ultimate)
+                    jsonPath.attr("ultimate", info->ultimate);
+
+                if (info->ca != "")
+                    jsonPath.attr("ca", info->ca);
+
+                if (!info->sigs.empty()) {
+                    auto jsonSigs = jsonPath.list("signatures");
+                    for (auto & sig : info->sigs)
+                        jsonSigs.elem(sig);
+                }
             }
+        }
+
+        else {
+
+            for (auto storePath : storePaths) {
+                auto info = store->queryPathInfo(storePath);
+                storePath = info->path; // FIXME: screws up padding
 
-            if (showSigs) {
-                std::cout << '\t';
-                Strings ss;
-                if (info->ultimate) ss.push_back("ultimate");
-                for (auto & sig : info->sigs) ss.push_back(sig);
-                std::cout << concatStringsSep(" ", ss);
+                std::cout << storePath << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' ');
+
+                if (showSize)
+                    std::cout << '\t' << std::setw(11) << info->narSize;
+
+                if (showClosureSize)
+                    std::cout << '\t' << std::setw(11) << getClosureSize(storePath);
+
+                if (showSigs) {
+                    std::cout << '\t';
+                    Strings ss;
+                    if (info->ultimate) ss.push_back("ultimate");
+                    if (info->ca != "") ss.push_back("ca:" + info->ca);
+                    for (auto & sig : info->sigs) ss.push_back(sig);
+                    std::cout << concatStringsSep(" ", ss);
+                }
+
+                std::cout << std::endl;
             }
 
-            std::cout << std::endl;
         }
+
     }
 };
 
diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc
index 9932aa4a9eb0..0ff1b9f7cea0 100644
--- a/src/nix/sigs.cc
+++ b/src/nix/sigs.cc
@@ -84,7 +84,7 @@ struct CmdCopySigs : StorePathsCommand
 
         pool.process();
 
-        printMsg(lvlInfo, format("imported %d signatures") % added);
+        printInfo(format("imported %d signatures") % added);
     }
 };
 
@@ -132,7 +132,7 @@ struct CmdSignPaths : StorePathsCommand
             }
         }
 
-        printMsg(lvlInfo, format("added %d signatures") % added);
+        printInfo(format("added %d signatures") % added);
     }
 };
 
diff --git a/src/nix/verify.cc b/src/nix/verify.cc
index fd904f465687..5314a42a46c7 100644
--- a/src/nix/verify.cc
+++ b/src/nix/verify.cc
@@ -87,7 +87,7 @@ struct CmdVerify : StorePathsCommand
                     if (hash.first != info->narHash) {
                         logger->incProgress(corruptedLabel);
                         corrupted = 1;
-                        printMsg(lvlError,
+                        printError(
                             format("path ‘%s’ was modified! expected hash ‘%s’, got ‘%s’")
                             % info->path % printHash(info->narHash) % printHash(hash.first));
                     }
@@ -116,15 +116,19 @@ struct CmdVerify : StorePathsCommand
                             }
                         };
 
+                        if (info->isContentAddressed(*store)) validSigs = ValidPathInfo::maxSigs;
+
                         doSigs(info->sigs);
 
                         for (auto & store2 : substituters) {
                             if (validSigs >= actualSigsNeeded) break;
                             try {
-                                doSigs(store2->queryPathInfo(info->path)->sigs);
+                                auto info2 = store2->queryPathInfo(info->path);
+                                if (info2->isContentAddressed(*store)) validSigs = ValidPathInfo::maxSigs;
+                                doSigs(info2->sigs);
                             } catch (InvalidPath &) {
                             } catch (Error & e) {
-                                printMsg(lvlError, format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
+                                printError(format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
                             }
                         }
 
@@ -135,7 +139,7 @@ struct CmdVerify : StorePathsCommand
                     if (!good) {
                         logger->incProgress(untrustedLabel);
                         untrusted++;
-                        printMsg(lvlError, format("path ‘%s’ is untrusted") % info->path);
+                        printError(format("path ‘%s’ is untrusted") % info->path);
                     }
 
                 }
@@ -144,7 +148,7 @@ struct CmdVerify : StorePathsCommand
                 done++;
 
             } catch (Error & e) {
-                printMsg(lvlError, format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
+                printError(format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
                 logger->incProgress(failedLabel);
                 failed++;
             }
@@ -155,7 +159,7 @@ struct CmdVerify : StorePathsCommand
 
         pool.process();
 
-        printMsg(lvlInfo, format("%d paths checked, %d untrusted, %d corrupted, %d failed")
+        printInfo(format("%d paths checked, %d untrusted, %d corrupted, %d failed")
             % done % untrusted % corrupted % failed);
 
         throw Exit(
diff --git a/src/resolve-system-dependencies/local.mk b/src/resolve-system-dependencies/local.mk
new file mode 100644
index 000000000000..8792a4a252fa
--- /dev/null
+++ b/src/resolve-system-dependencies/local.mk
@@ -0,0 +1,11 @@
+ifeq ($(OS), Darwin)
+  programs += resolve-system-dependencies
+endif
+
+resolve-system-dependencies_DIR := $(d)
+
+resolve-system-dependencies_INSTALL_DIR := $(libexecdir)/nix
+
+resolve-system-dependencies_LIBS := libstore libmain libutil libformat
+
+resolve-system-dependencies_SOURCES := $(d)/resolve-system-dependencies.cc
diff --git a/src/resolve-system-dependencies/resolve-system-dependencies.cc b/src/resolve-system-dependencies/resolve-system-dependencies.cc
new file mode 100644
index 000000000000..ae8ca36ba9de
--- /dev/null
+++ b/src/resolve-system-dependencies/resolve-system-dependencies.cc
@@ -0,0 +1,194 @@
+#include "derivations.hh"
+#include "globals.hh"
+#include "shared.hh"
+#include "store-api.hh"
+#include <sys/utsname.h>
+#include <algorithm>
+#include <iostream>
+#include <fstream>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <mach-o/loader.h>
+#include <mach-o/swap.h>
+
+#define DO_SWAP(x, y) ((x) ? OSSwapInt32(y) : (y))
+
+using namespace nix;
+
+static auto cacheDir = Path{};
+
+Path resolveCacheFile(Path lib) {
+    std::replace(lib.begin(), lib.end(), '/', '%');
+    return cacheDir + "/" + lib;
+}
+
+std::set<string> readCacheFile(const Path & file) {
+    return tokenizeString<set<string>>(readFile(file), "\n");
+}
+
+void writeCacheFile(const Path & file, std::set<string> & deps) {
+    std::ofstream fp;
+    fp.open(file);
+    for (auto & d : deps) {
+        fp << d << "\n";
+    }
+    fp.close();
+}
+
+std::string findDylibName(bool should_swap, ptrdiff_t dylib_command_start) {
+    struct dylib_command *dylc = (struct dylib_command*)dylib_command_start;
+    return std::string((char*)(dylib_command_start + DO_SWAP(should_swap, dylc->dylib.name.offset)));
+}
+
+std::set<std::string> runResolver(const Path & filename) {
+    int fd = open(filename.c_str(), O_RDONLY);
+    struct stat s;
+    fstat(fd, &s);
+    void *obj = mmap(NULL, s.st_size, PROT_READ, MAP_SHARED, fd, 0);
+
+    ptrdiff_t mach64_offset = 0;
+
+    uint32_t magic = ((struct mach_header_64*)obj)->magic;
+    if(magic == FAT_CIGAM || magic == FAT_MAGIC) {
+        bool should_swap = magic == FAT_CIGAM;
+        uint32_t narches = DO_SWAP(should_swap, ((struct fat_header*)obj)->nfat_arch);
+
+        for(uint32_t iter = 0; iter < narches; iter++) {
+            ptrdiff_t header_offset = (ptrdiff_t)obj + sizeof(struct fat_header) * (iter + 1);
+            struct fat_arch* arch = (struct fat_arch*)header_offset;
+            if(DO_SWAP(should_swap, arch->cputype) == CPU_TYPE_X86_64) {
+                mach64_offset = (ptrdiff_t)DO_SWAP(should_swap, arch->offset);
+                break;
+            }
+        }
+        if (mach64_offset == 0) {
+            printError(format("Could not find any mach64 blobs in file ‘%1%’, continuing...") % filename);
+            return std::set<string>();
+        }
+    } else if (magic == MH_MAGIC_64 || magic == MH_CIGAM_64) {
+        mach64_offset = 0;
+    } else {
+        printError(format("Object file has unknown magic number ‘%1%’, skipping it...") % magic);
+        return std::set<string>();
+    }
+
+    ptrdiff_t mach_header_offset = (ptrdiff_t)obj + mach64_offset;
+    struct mach_header_64 *m_header = (struct mach_header_64 *)mach_header_offset;
+
+    bool should_swap = magic == MH_CIGAM_64;
+    ptrdiff_t cmd_offset = mach_header_offset + sizeof(struct mach_header_64);
+
+    std::set<string> libs;
+    for(uint32_t i = 0; i < DO_SWAP(should_swap, m_header->ncmds); i++) {
+        struct load_command *cmd = (struct load_command*)cmd_offset;
+        switch(DO_SWAP(should_swap, cmd->cmd)) {
+            case LC_LOAD_UPWARD_DYLIB:
+            case LC_LOAD_DYLIB:
+            case LC_REEXPORT_DYLIB:
+                libs.insert(findDylibName(should_swap, cmd_offset));
+                break;
+        }
+        cmd_offset += DO_SWAP(should_swap, cmd->cmdsize);
+    }
+
+    return libs;
+}
+
+bool isSymlink(const Path & path) {
+    struct stat st;
+    if(lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+    return S_ISLNK(st.st_mode);
+}
+
+Path resolveSymlink(const Path & path) {
+    char buf[PATH_MAX];
+    ssize_t len = readlink(path.c_str(), buf, sizeof(buf) - 1);
+    if(len != -1) {
+        buf[len] = 0;
+        return Path(buf);
+    } else {
+        throw SysError(format("readlink('%1%')") % path);
+    }
+}
+
+std::set<string> resolveTree(const Path & path, PathSet & deps) {
+    std::set<string> results;
+    if(deps.find(path) != deps.end()) {
+        return std::set<string>();
+    }
+    deps.insert(path);
+    for (auto & lib : runResolver(path)) {
+        results.insert(lib);
+        for (auto & p : resolveTree(lib, deps)) {
+            results.insert(p);
+        }
+    }
+    return results;
+}
+
+std::set<string> getPath(const Path & path) {
+    Path cacheFile = resolveCacheFile(path);
+    if(pathExists(cacheFile)) {
+        return readCacheFile(cacheFile);
+    }
+
+    std::set<string> deps;
+    std::set<string> paths;
+    paths.insert(path);
+
+    Path next_path = Path(path);
+    while(isSymlink(next_path)) {
+        next_path = resolveSymlink(next_path);
+        paths.insert(next_path);
+    }
+
+    for(auto & t : resolveTree(next_path, deps)) {
+        paths.insert(t);
+    }
+
+    writeCacheFile(cacheFile, paths);
+
+    return paths;
+}
+
+int main(int argc, char ** argv) {
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        struct utsname _uname;
+
+        uname(&_uname);
+
+        auto cacheParentDir = (format("%1%/dependency-maps") % settings.nixStateDir).str();
+
+        cacheDir = (format("%1%/%2%-%3%-%4%")
+                % cacheParentDir
+                % _uname.machine
+                % _uname.sysname
+                % _uname.release).str();
+
+        mkdir(cacheParentDir.c_str(), 0755);
+        mkdir(cacheDir.c_str(), 0755);
+
+        auto store = openStore();
+
+        auto drv = store->derivationFromPath(Path(argv[1]));
+        Strings impurePaths = tokenizeString<Strings>(get(drv.env, "__impureHostDeps"));
+
+        std::set<string> all_paths;
+
+        for (auto & path : impurePaths) {
+            for(auto & p : getPath(path)) {
+                all_paths.insert(p);
+            }
+        }
+
+        std::cout << "extra-chroot-dirs" << std::endl;
+        for(auto & path : all_paths) {
+            std::cout << path << std::endl;
+        }
+        std::cout << std::endl;
+    });
+}
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index 96cab6ad4269..4ce428f643e5 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -6,7 +6,7 @@ clearCache
 # Create the binary cache.
 outPath=$(nix-build dependencies.nix --no-out-link)
 
-nix-push --dest $cacheDir $outPath
+nix copy --recursive --to file://$cacheDir $outPath
 
 
 basicTests() {
@@ -58,7 +58,7 @@ unset _NIX_FORCE_HTTP_BINARY_CACHE_STORE
 # Test whether Nix notices if the NAR doesn't match the hash in the NAR info.
 clearStore
 
-nar=$(ls $cacheDir/*.nar.xz | head -n1)
+nar=$(ls $cacheDir/nar/*.nar.xz | head -n1)
 mv $nar $nar.good
 mkdir -p $TEST_ROOT/empty
 nix-store --dump $TEST_ROOT/empty | xz > $nar
@@ -117,7 +117,7 @@ badKey="$(cat $TEST_ROOT/pk2)"
 res=($(nix-store --generate-binary-cache-key foo.nixos.org-1 $TEST_ROOT/sk3 $TEST_ROOT/pk3))
 otherKey="$(cat $TEST_ROOT/pk3)"
 
-nix-push --dest $cacheDir --key-file $TEST_ROOT/sk1 $outPath
+nix copy --recursive --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath
 
 
 # Downloading should fail if we don't provide a key.
diff --git a/tests/common.sh.in b/tests/common.sh.in
index 097d94bb1eea..316d5f6896bb 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -12,7 +12,6 @@ fi
 export NIX_LOCALSTATE_DIR=$TEST_ROOT/var
 export NIX_LOG_DIR=$TEST_ROOT/var/log/nix
 export NIX_STATE_DIR=$TEST_ROOT/var/nix
-export NIX_DB_DIR=$TEST_ROOT/db
 export NIX_CONF_DIR=$TEST_ROOT/etc
 export NIX_MANIFESTS_DIR=$TEST_ROOT/var/nix/manifests
 export _NIX_TEST_SHARED=$TEST_ROOT/shared
@@ -51,12 +50,10 @@ clearStore() {
     chmod -R +w "$NIX_STORE_DIR"
     rm -rf "$NIX_STORE_DIR"
     mkdir "$NIX_STORE_DIR"
-    rm -rf "$NIX_DB_DIR"
-    mkdir "$NIX_DB_DIR"
+    rm -rf "$NIX_STATE_DIR"
+    mkdir "$NIX_STATE_DIR"
     nix-store --init
     clearProfiles
-    rm -f "$NIX_STATE_DIR"/gcroots/auto/*
-    rm -f "$NIX_STATE_DIR"/gcroots/ref
 }
 
 clearCache() {
diff --git a/tests/config.nix b/tests/config.nix
index 6244a15fa48a..76388fdd5b95 100644
--- a/tests/config.nix
+++ b/tests/config.nix
@@ -13,7 +13,7 @@ rec {
     derivation ({
       inherit system;
       builder = shell;
-      args = ["-e" args.builder];
+      args = ["-e" args.builder or (builtins.toFile "builder.sh" "eval \"$buildCommand\"")];
       PATH = path;
     } // removeAttrs args ["builder" "meta"])
     // { meta = args.meta or {}; };
diff --git a/tests/dump-db.sh b/tests/dump-db.sh
index 57c8c401600d..d6eea42aa04e 100644
--- a/tests/dump-db.sh
+++ b/tests/dump-db.sh
@@ -8,8 +8,7 @@ deps="$(nix-store -qR $TEST_ROOT/result)"
 
 nix-store --dump-db > $TEST_ROOT/dump
 
-rm -rf $NIX_DB_DIR
-mkdir $NIX_DB_DIR
+rm -rf $NIX_STATE_DIR/db
 
 nix-store --load-db < $TEST_ROOT/dump
 
diff --git a/tests/gc-runtime.sh b/tests/gc-runtime.sh
index a44195756f52..4c5028005c57 100644
--- a/tests/gc-runtime.sh
+++ b/tests/gc-runtime.sh
@@ -10,7 +10,7 @@ esac
 set -m # enable job control, needed for kill
 
 profiles="$NIX_STATE_DIR"/profiles
-rm -f $profiles/*
+rm -rf $profiles
 
 nix-env -p $profiles/test -f ./gc-runtime.nix -i gc-runtime
 
diff --git a/tests/init.sh b/tests/init.sh
index 5be999e4b1be..4571b75b859e 100644
--- a/tests/init.sh
+++ b/tests/init.sh
@@ -1,7 +1,5 @@
 source common.sh
 
-echo "NIX_STORE_DIR=$NIX_STORE_DIR NIX_DB_DIR=$NIX_DB_DIR"
-
 test -n "$TEST_ROOT"
 if test -d "$TEST_ROOT"; then
     chmod -R u+w "$TEST_ROOT"
@@ -13,7 +11,6 @@ mkdir "$NIX_STORE_DIR"
 mkdir "$NIX_LOCALSTATE_DIR"
 mkdir -p "$NIX_LOG_DIR"/drvs
 mkdir "$NIX_STATE_DIR"
-mkdir "$NIX_DB_DIR"
 mkdir "$NIX_CONF_DIR"
 
 cat > "$NIX_CONF_DIR"/nix.conf <<EOF
@@ -28,6 +25,6 @@ EOF
 nix-store --init
 
 # Did anything happen?
-test -e "$NIX_DB_DIR"/db.sqlite
+test -e "$NIX_STATE_DIR"/db/db.sqlite
 
 echo 'Hello World' > ./dummy
diff --git a/tests/install-package.sh b/tests/install-package.sh
deleted file mode 100644
index 1916f72713e2..000000000000
--- a/tests/install-package.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-source common.sh
-
-drvPath=$(nix-instantiate ./dependencies.nix)
-outPath=$(nix-store -r $drvPath)
-nix-push --dest $cacheDir $outPath
-
-clearStore
-clearProfiles
-
-cat > $TEST_ROOT/foo.nixpkg <<EOF
-NIXPKG1 - simple $system $drvPath $outPath file://$cacheDir
-EOF
-
-nix-install-package --non-interactive -p $profiles/test $TEST_ROOT/foo.nixpkg
-test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1
-
-clearProfiles
-
-nix-install-package --non-interactive -p $profiles/test --url file://$TEST_ROOT/foo.nixpkg
-test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1
diff --git a/tests/lang/eval-okay-partition.exp b/tests/lang/eval-okay-partition.exp
new file mode 100644
index 000000000000..cd8b8b020c05
--- /dev/null
+++ b/tests/lang/eval-okay-partition.exp
@@ -0,0 +1 @@
+{ right = [ 0 2 4 6 8 10 100 102 104 106 108 110 ]; wrong = [ 1 3 5 7 9 101 103 105 107 109 ]; }
diff --git a/tests/lang/eval-okay-partition.nix b/tests/lang/eval-okay-partition.nix
new file mode 100644
index 000000000000..846d2ce49486
--- /dev/null
+++ b/tests/lang/eval-okay-partition.nix
@@ -0,0 +1,5 @@
+with import ./lib.nix;
+
+builtins.partition
+  (x: x / 2 * 2 == x)
+  (builtins.concatLists [ (range 0 10) (range 100 110) ])
diff --git a/tests/local.mk b/tests/local.mk
index 7c5a553d39e0..2ca52144baee 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -3,14 +3,15 @@ check:
 
 nix_tests = \
   init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
-  build-hook.sh nix-push.sh gc.sh gc-concurrent.sh \
+  build-hook.sh gc.sh gc-concurrent.sh \
   referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
-  gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
+  gc-runtime.sh check-refs.sh filter-source.sh \
   remote-store.sh export.sh export-graph.sh \
   timeout.sh secure-drv-outputs.sh nix-channel.sh \
   multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
   binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \
-  check-reqs.sh pass-as-file.sh tarball.sh restricted.sh
+  check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \
+  placeholders.sh
   # parallel.sh
 
 install-tests += $(foreach x, $(nix_tests), tests/$(x))
diff --git a/tests/nix-channel.sh b/tests/nix-channel.sh
index 2ec986dd415b..553ada51d9f7 100644
--- a/tests/nix-channel.sh
+++ b/tests/nix-channel.sh
@@ -15,7 +15,7 @@ nix-channel --remove xyzzy
 # Create a channel.
 rm -rf $TEST_ROOT/foo
 mkdir -p $TEST_ROOT/foo
-nix-push --dest $TEST_ROOT/foo --manifest --bzip2 $(nix-store -r $(nix-instantiate dependencies.nix))
+nix copy --recursive --to file://$TEST_ROOT/foo?compression="bzip2" $(nix-store -r $(nix-instantiate dependencies.nix))
 rm -rf $TEST_ROOT/nixexprs
 mkdir -p $TEST_ROOT/nixexprs
 cp config.nix dependencies.nix dependencies.builder*.sh $TEST_ROOT/nixexprs/
diff --git a/tests/nix-push.sh b/tests/nix-push.sh
deleted file mode 100644
index 8ea59516c62c..000000000000
--- a/tests/nix-push.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-source common.sh
-
-clearStore
-
-drvPath=$(nix-instantiate dependencies.nix)
-outPath=$(nix-store -r $drvPath)
-
-echo "pushing $drvPath"
-
-mkdir -p $TEST_ROOT/cache
-
-nix-push --dest $TEST_ROOT/cache --manifest $drvPath --bzip2
diff --git a/tests/placeholders.sh b/tests/placeholders.sh
new file mode 100644
index 000000000000..071cfe2dc893
--- /dev/null
+++ b/tests/placeholders.sh
@@ -0,0 +1,22 @@
+source common.sh
+
+clearStore
+
+nix-build --no-out-link -E '
+  with import ./config.nix;
+
+  mkDerivation {
+    name = "placeholders";
+    outputs = [ "out" "bin" "dev" ];
+    buildCommand = "
+      echo foo1 > $out
+      echo foo2 > $bin
+      echo foo3 > $dev
+      [[ $(cat ${placeholder "out"}) = foo1 ]]
+      [[ $(cat ${placeholder "bin"}) = foo2 ]]
+      [[ $(cat ${placeholder "dev"}) = foo3 ]]
+    ";
+  }
+'
+
+echo XYZZY
diff --git a/tests/referrers.sh b/tests/referrers.sh
index 5c1ef20cfeb4..8ab8e5ddfe87 100644
--- a/tests/referrers.sh
+++ b/tests/referrers.sh
@@ -30,7 +30,7 @@ echo "collecting garbage..."
 ln -sfn $reference "$NIX_STATE_DIR"/gcroots/ref
 nix-store --gc
 
-if [ -n "$(type -p sqlite3)" -a "$(sqlite3 $NIX_DB_DIR/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then
+if [ -n "$(type -p sqlite3)" -a "$(sqlite3 $NIX_STATE_DIR/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then
     echo "referrers not cleaned up"
     exit 1
 fi
diff --git a/tests/remote-store.sh b/tests/remote-store.sh
index b3908717a40e..f2f2806d022d 100644
--- a/tests/remote-store.sh
+++ b/tests/remote-store.sh
@@ -4,7 +4,7 @@ clearStore
 
 startDaemon
 
-$SHELL ./user-envs.sh
+storeCleared=1 $SHELL ./user-envs.sh
 
 nix-store --dump-db > $TEST_ROOT/d1
 NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2
diff --git a/tests/repair.sh b/tests/repair.sh
index 92f2f8fe60a5..782838704da7 100644
--- a/tests/repair.sh
+++ b/tests/repair.sh
@@ -46,7 +46,7 @@ fi
 # --verify can fix it.
 clearCache
 
-nix-push --dest $cacheDir $path
+nix copy --recursive --to file://$cacheDir $path
 
 chmod u+w $path2
 rm -rf $path2
diff --git a/tests/tarball.sh b/tests/tarball.sh
index 329e73b91696..ba534c6261ad 100644
--- a/tests/tarball.sh
+++ b/tests/tarball.sh
@@ -15,11 +15,11 @@ tarball=$TEST_ROOT/tarball.tar.xz
 
 nix-env -f file://$tarball -qa --out-path | grep -q dependencies
 
-nix-build -o $TMPDIR/result file://$tarball
+nix-build -o $TEST_ROOT/result file://$tarball
 
-nix-build -o $TMPDIR/result '<foo>' -I foo=file://$tarball
+nix-build -o $TEST_ROOT/result '<foo>' -I foo=file://$tarball
 
-nix-build -o $TMPDIR/result -E "import (fetchTarball file://$tarball)"
+nix-build -o $TEST_ROOT/result -E "import (fetchTarball file://$tarball)"
 
 nix-instantiate --eval -E '1 + 2' -I fnord=file://no-such-tarball.tar.xz
 nix-instantiate --eval -E 'with <fnord/xyzzy>; 1 + 2' -I fnord=file://no-such-tarball.tar.xz
diff --git a/tests/user-envs.sh b/tests/user-envs.sh
index 526c1267cefe..c4192fdc59b2 100644
--- a/tests/user-envs.sh
+++ b/tests/user-envs.sh
@@ -1,6 +1,9 @@
 source common.sh
 
-clearStore
+if [ -z "$storeCleared" ]; then
+    clearStore
+fi
+
 clearProfiles
 
 # Query installed: should be empty.