about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--Makefile.config.in2
-rw-r--r--README.md2
-rw-r--r--configure.ac4
-rw-r--r--corepkgs/fetchurl.nix14
-rw-r--r--corepkgs/unpack-channel.nix4
-rw-r--r--doc/manual/command-ref/conf-file.xml60
-rw-r--r--doc/manual/command-ref/nix-store.xml7
-rw-r--r--doc/manual/command-ref/opt-common.xml5
-rw-r--r--doc/manual/expressions/builtins.xml15
-rw-r--r--doc/manual/release-notes/rl-1.8.xml4
-rw-r--r--misc/docker/Dockerfile2
-rw-r--r--misc/launchd/org.nixos.nix-daemon.plist.in2
-rw-r--r--nix.spec.in4
-rw-r--r--perl/lib/Nix/Config.pm.in10
-rw-r--r--perl/lib/Nix/Store.xs1
-rw-r--r--release.nix38
-rw-r--r--shell.nix3
-rw-r--r--src/build-remote/build-remote.cc109
-rw-r--r--src/libexpr/json-to-value.cc1
-rw-r--r--src/libexpr/primops.cc2
-rw-r--r--src/libexpr/primops/fetchgit.cc10
-rw-r--r--src/libexpr/symbol-table.hh2
-rw-r--r--src/libexpr/value.hh3
-rw-r--r--src/libmain/shared.cc14
-rw-r--r--src/libmain/stack.cc2
-rw-r--r--src/libstore/binary-cache-store.cc33
-rw-r--r--src/libstore/binary-cache-store.hh6
-rw-r--r--src/libstore/build.cc94
-rw-r--r--src/libstore/builtins.cc10
-rw-r--r--src/libstore/builtins.hh2
-rw-r--r--src/libstore/crypto.cc4
-rw-r--r--src/libstore/derivations.cc12
-rw-r--r--src/libstore/download.cc78
-rw-r--r--src/libstore/download.hh5
-rw-r--r--src/libstore/export-import.cc34
-rw-r--r--src/libstore/globals.cc61
-rw-r--r--src/libstore/globals.hh15
-rw-r--r--src/libstore/http-binary-cache-store.cc4
-rw-r--r--src/libstore/legacy-ssh-store.cc103
-rw-r--r--src/libstore/local-binary-cache-store.cc8
-rw-r--r--src/libstore/local-fs-store.cc44
-rw-r--r--src/libstore/local-store.cc11
-rw-r--r--src/libstore/local-store.hh5
-rw-r--r--src/libstore/nar-info-disk-cache.cc219
-rw-r--r--src/libstore/nar-info.cc8
-rw-r--r--src/libstore/optimise-store.cc2
-rw-r--r--src/libstore/remote-store.cc56
-rw-r--r--src/libstore/remote-store.hh10
-rw-r--r--src/libstore/s3-binary-cache-store.cc178
-rw-r--r--src/libstore/s3.hh33
-rw-r--r--src/libstore/sqlite.cc72
-rw-r--r--src/libstore/sqlite.hh6
-rw-r--r--src/libstore/ssh-store.cc85
-rw-r--r--src/libstore/ssh.cc102
-rw-r--r--src/libstore/ssh.hh49
-rw-r--r--src/libstore/store-api.cc53
-rw-r--r--src/libstore/store-api.hh27
-rw-r--r--src/libutil/archive.cc2
-rw-r--r--src/libutil/archive.hh7
-rw-r--r--src/libutil/compression.cc63
-rw-r--r--src/libutil/compression.hh2
-rw-r--r--src/libutil/hash.cc6
-rw-r--r--src/libutil/istringstream_nocopy.hh92
-rw-r--r--src/libutil/local.mk2
-rw-r--r--src/libutil/logging.hh1
-rw-r--r--src/libutil/pool.hh8
-rw-r--r--src/libutil/serialise.cc49
-rw-r--r--src/libutil/serialise.hh63
-rw-r--r--src/libutil/types.hh1
-rw-r--r--src/libutil/util.cc50
-rw-r--r--src/libutil/util.hh10
-rwxr-xr-xsrc/nix-build/nix-build.cc10
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc10
-rw-r--r--src/nix-daemon/nix-daemon.cc46
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc4
-rw-r--r--src/nix-store/local.mk2
-rw-r--r--src/nix-store/nix-store.cc64
-rw-r--r--src/nix/command.cc7
-rw-r--r--src/nix/command.hh1
-rw-r--r--src/nix/copy.cc12
-rw-r--r--src/nix/log.cc57
-rw-r--r--tests/binary-cache.sh10
-rw-r--r--tests/nix-shell.sh1
-rw-r--r--tests/remote-builds.nix2
-rw-r--r--tests/repair.sh4
-rw-r--r--tests/shell.nix1
87 files changed, 1483 insertions, 855 deletions
diff --git a/Makefile b/Makefile
index 960685b8f48c..40ac4e72dbc3 100644
--- a/Makefile
+++ b/Makefile
@@ -27,7 +27,7 @@ makefiles = \
   doc/manual/local.mk \
   tests/local.mk
 
-GLOBAL_CXXFLAGS += -std=c++14 -g -Wall
+GLOBAL_CXXFLAGS += -std=c++14 -g -Wall -include config.h
 
 -include Makefile.config
 
diff --git a/Makefile.config.in b/Makefile.config.in
index e2277c667ec4..53dca1fcf10a 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -14,7 +14,7 @@ LIBLZMA_LIBS = @LIBLZMA_LIBS@
 SQLITE3_LIBS = @SQLITE3_LIBS@
 bash = @bash@
 bindir = @bindir@
-curl = @curl@
+bro = @bro@
 datadir = @datadir@
 datarootdir = @datarootdir@
 docdir = @docdir@
diff --git a/README.md b/README.md
index 1eb73b256f55..3173c6c448a9 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
 Nix, the purely functional package manager
 ------------------------------------------
 
-Nix is a new take on package management that is fairly unique. Because of it's
+Nix is a new take on package management that is fairly unique. Because of its
 purity aspects, a lot of issues found in traditional package managers don't
 appear with Nix.
 
diff --git a/configure.ac b/configure.ac
index 5d5f1d2be79b..3e6a894e3b10 100644
--- a/configure.ac
+++ b/configure.ac
@@ -114,7 +114,6 @@ if test -z "$$1"; then
 fi
 ])
 
-NEED_PROG(curl, curl)
 NEED_PROG(bash, bash)
 NEED_PROG(patch, patch)
 AC_PATH_PROG(xmllint, xmllint, false)
@@ -128,6 +127,7 @@ NEED_PROG(gzip, gzip)
 NEED_PROG(xz, xz)
 AC_PATH_PROG(dot, dot)
 AC_PATH_PROG(pv, pv, pv)
+AC_PATH_PROG(bro, bro, bro)
 
 
 NEED_PROG(cat, cat)
@@ -213,7 +213,7 @@ AC_CHECK_FUNCS([setresuid setreuid lchown])
 
 
 # Nice to have, but not essential.
-AC_CHECK_FUNCS([strsignal posix_fallocate nanosleep sysconf])
+AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
 
 
 # This is needed if bzip2 is a static library, and the Nix libraries
diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix
index 042705b1abb0..62359433971d 100644
--- a/corepkgs/fetchurl.nix
+++ b/corepkgs/fetchurl.nix
@@ -1,24 +1,20 @@
 { system ? builtins.currentSystem
 , url
-, outputHash ? ""
-, outputHashAlgo ? ""
 , md5 ? "", sha1 ? "", sha256 ? ""
+, outputHash ?
+    if sha1 != "" then sha1 else if md5 != "" then md5 else sha256
+, outputHashAlgo ?
+    if sha1 != "" then "sha1" else if md5 != "" then "md5" else "sha256"
 , executable ? false
 , unpack ? false
 , name ? baseNameOf (toString url)
 }:
 
-assert (outputHash != "" && outputHashAlgo != "")
-    || md5 != "" || sha1 != "" || sha256 != "";
-
 derivation {
   builder = "builtin:fetchurl";
 
   # New-style output content requirements.
-  outputHashAlgo = if outputHashAlgo != "" then outputHashAlgo else
-      if sha256 != "" then "sha256" else if sha1 != "" then "sha1" else "md5";
-  outputHash = if outputHash != "" then outputHash else
-      if sha256 != "" then sha256 else if sha1 != "" then sha1 else md5;
+  inherit outputHashAlgo outputHash;
   outputHashMode = if unpack || executable then "recursive" else "flat";
 
   inherit name system url executable unpack;
diff --git a/corepkgs/unpack-channel.nix b/corepkgs/unpack-channel.nix
index 9445532ded03..a654db40e62a 100644
--- a/corepkgs/unpack-channel.nix
+++ b/corepkgs/unpack-channel.nix
@@ -15,7 +15,9 @@ let
       else
         ${bzip2} -d < $src | ${tar} xf - ${tarFlags}
       fi
-      mv * $out/$channelName
+      if [ * != $channelName ]; then
+        mv * $out/$channelName
+      fi
       if [ -n "$binaryCacheURL" ]; then
         mkdir $out/binary-caches
         echo -n "$binaryCacheURL" > $out/binary-caches/$channelName
diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml
index 6c0af39ecda9..6952829e8f71 100644
--- a/doc/manual/command-ref/conf-file.xml
+++ b/doc/manual/command-ref/conf-file.xml
@@ -101,9 +101,9 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
 
     <listitem><para>This option defines the maximum number of jobs
     that Nix will try to build in parallel.  The default is
-    <literal>1</literal>.  You should generally set it to the number
-    of CPUs in your system (e.g., <literal>2</literal> on an Athlon 64
-    X2).  It can be overridden using the <option
+    <literal>1</literal>. The special value <literal>auto</literal>
+    causes Nix to use the number of CPUs in your system.  It can be
+    overridden using the <option
     linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>)
     command line switch.</para></listitem>
 
@@ -394,9 +394,10 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
 
   <varlistentry><term><literal>signed-binary-caches</literal></term>
 
-    <listitem><para>If set to <literal>*</literal>, Nix will only
-    download binaries if they are signed using one of the keys listed
-    in <option>binary-cache-public-keys</option>.</para></listitem>
+    <listitem><para>If set to <literal>*</literal> (the default), Nix
+    will only download binaries if they are signed using one of the
+    keys listed in <option>binary-cache-public-keys</option>. Set to
+    the empty string to disable signature checking.</para></listitem>
 
   </varlistentry>
 
@@ -430,6 +431,29 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
   </varlistentry>
 
 
+  <varlistentry><term><literal>netrc-file</literal></term>
+
+    <listitem><para>If set to an absolute path to a <filename>netrc</filename>
+    file, Nix will use the HTTP authentication credentials in this file when
+    trying to download from a remote host through HTTP or HTTPS. Defaults to
+    <filename>$NIX_CONF_DIR/netrc</filename>.</para>
+
+    <para>The <filename>netrc</filename> file consists of a list of
+    accounts in the following format:
+
+<screen>
+machine <replaceable>my-machine</replaceable>
+login <replaceable>my-username</replaceable>
+password <replaceable>my-password</replaceable>
+</screen>
+
+    For the exact syntax, see <link
+    xlink:href="https://ec.haxx.se/usingcurl-netrc.html">the
+    <literal>curl</literal> documentation.</link></para></listitem>
+
+  </varlistentry>
+
+
   <varlistentry><term><literal>system</literal></term>
 
     <listitem><para>This option specifies the canonical Nix system
@@ -489,20 +513,6 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
   </varlistentry>
 
 
-  <varlistentry xml:id="conf-log-servers"><term><literal>log-servers</literal></term>
-
-    <listitem>
-
-      <para>A list of URL prefixes (such as
-      <literal>http://hydra.nixos.org/log</literal>) from which
-      <command>nix-store -l</command> will try to fetch build logs if
-      they’re not available locally.</para>
-
-    </listitem>
-
-  </varlistentry>
-
-
   <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term>
 
     <listitem>
@@ -621,6 +631,16 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
   </varlistentry>
 
 
+  <varlistentry xml:id="conf-allow-import-from-derivation"><term><literal>allow-import-from-derivation</literal></term>
+
+    <listitem><para>By default, Nix allows you to <function>import</function> from a derivation,
+    allowing building at evaluation time. With this option set to false, Nix will throw an error
+    when evaluating an expression that uses this feature, allowing users to ensure their evaluation
+    will not require any builds to take place.</para></listitem>
+
+  </varlistentry>
+
+
 </variablelist>
 
 </para>
diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml
index 0f6172defb38..fb017b741da9 100644
--- a/doc/manual/command-ref/nix-store.xml
+++ b/doc/manual/command-ref/nix-store.xml
@@ -1236,12 +1236,7 @@ the store path is used.</para>
 <filename>/nix/var/log/nix/drvs</filename>.  However, there is no
 guarantee that a build log is available for any particular store path.
 For instance, if the path was downloaded as a pre-built binary through
-a substitute, then the log is unavailable. If the log is not available
-locally, then <command>nix-store</command> will try to download the
-log from the servers specified in the Nix option
-<option>log-servers</option>. For example, if it’s set to
-<literal>http://hydra.nixos.org/log</literal>, then Nix will check
-<literal>http://hydra.nixos.org/log/<replaceable>base-name</replaceable></literal>.</para>
+a substitute, then the log is unavailable.</para>
 
 </refsection>
 
diff --git a/doc/manual/command-ref/opt-common.xml b/doc/manual/command-ref/opt-common.xml
index 2a076877a1b4..2aa41c4d4389 100644
--- a/doc/manual/command-ref/opt-common.xml
+++ b/doc/manual/command-ref/opt-common.xml
@@ -93,8 +93,9 @@
   <term><option>-j</option></term>
 
   <listitem><para>Sets the maximum number of build jobs that Nix will
-  perform in parallel to the specified number.  The default is
-  specified by the <link
+  perform in parallel to the specified number.  Specify
+  <literal>auto</literal> to use the number of CPUs in the system.
+  The default is specified by the <link
   linkend='conf-build-max-jobs'><literal>build-max-jobs</literal></link>
   configuration setting, which itself defaults to
   <literal>1</literal>.  A higher value is useful on SMP systems or to
diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml
index 063bc04be483..e9baff65961a 100644
--- a/doc/manual/expressions/builtins.xml
+++ b/doc/manual/expressions/builtins.xml
@@ -1023,10 +1023,17 @@ in foo</programlisting>
 
     <listitem><para>Convert the expression
     <replaceable>e</replaceable> to a string.
-    <replaceable>e</replaceable> can be a string (in which case
-    <function>toString</function> is a no-op), a path (e.g.,
-    <literal>toString /foo/bar</literal> yields
-    <literal>"/foo/bar"</literal> or a set containing <literal>{ __toString = self: ...; }</literal>.</para></listitem>
+    <replaceable>e</replaceable> can be:</para>
+    <itemizedlist>
+      <listitem><para>A string (in which case the string is returned unmodified).</para></listitem>
+      <listitem><para>A path (e.g., <literal>toString /foo/bar</literal> yields <literal>"/foo/bar"</literal>.</para></listitem>
+      <listitem><para>A set containing <literal>{ __toString = self: ...; }</literal>.</para></listitem>
+      <listitem><para>An integer.</para></listitem>
+      <listitem><para>A list, in which case the string representations of its elements are joined with spaces.</para></listitem>
+      <listitem><para>A Boolean (<literal>false</literal> yields <literal>""</literal>, <literal>true</literal> yields <literal>"1"</literal>.</para></listitem>
+      <listitem><para><literal>null</literal>, which yields the empty string.</para></listitem>
+    </itemizedlist>
+    </listitem>
 
   </varlistentry>
 
diff --git a/doc/manual/release-notes/rl-1.8.xml b/doc/manual/release-notes/rl-1.8.xml
index 48caac2c6b60..c854c5c5f850 100644
--- a/doc/manual/release-notes/rl-1.8.xml
+++ b/doc/manual/release-notes/rl-1.8.xml
@@ -83,8 +83,8 @@ $ nix-store -l $(which xterm)
   caches).</para></listitem>
 
   <listitem><para>The configuration option
-  <option>build-max-jobs</option> now defaults to the number of
-  available CPU cores.</para></listitem>
+  <option>build-cores</option> now defaults to the number of available
+  CPU cores.</para></listitem>
 
   <listitem><para>Build users are now used by default when Nix is
   invoked as root. This prevents builds from accidentally running as
diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile
index 7b2865c946d3..85bd32e199a9 100644
--- a/misc/docker/Dockerfile
+++ b/misc/docker/Dockerfile
@@ -1,6 +1,6 @@
 FROM alpine
 
-RUN wget -O- http://nixos.org/releases/nix/nix-1.11.2/nix-1.11.2-x86_64-linux.tar.bz2 | bzcat - | tar xf - \
+RUN wget -O- http://nixos.org/releases/nix/nix-1.11.7/nix-1.11.7-x86_64-linux.tar.bz2 | bzcat - | tar xf - \
     && echo "nixbld:x:30000:nixbld1,nixbld2,nixbld3,nixbld4,nixbld5,nixbld6,nixbld7,nixbld8,nixbld9,nixbld10,nixbld11,nixbld12,nixbld13,nixbld14,nixbld15,nixbld16,nixbld17,nixbld18,nixbld19,nixbld20,nixbld21,nixbld22,nixbld23,nixbld24,nixbld25,nixbld26,nixbld27,nixbld28,nixbld29,nixbld30" >> /etc/group \
     && for i in $(seq 1 30); do echo "nixbld$i:x:$((30000 + $i)):30000:::" >> /etc/passwd; done \
     && mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index c5ef97ee9a3f..5d57a5ec8ff1 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -16,6 +16,8 @@
     <dict>
       <key>NIX_SSL_CERT_FILE</key>
       <string>/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt</string>
+      <key>XDG_CACHE_HOME</key>
+      <string>/root/.cache</string>
     </dict>
   </dict>
 </plist>
diff --git a/nix.spec.in b/nix.spec.in
index 401a2dc8a1f9..0c9b9ab20133 100644
--- a/nix.spec.in
+++ b/nix.spec.in
@@ -1,3 +1,5 @@
+%undefine _hardened_build
+
 %global nixbld_user "nix-builder-"
 %global nixbld_group "nixbld"
 
@@ -105,7 +107,7 @@ extraFlags=
 %configure --localstatedir=/nix/var \
            --docdir=%{_defaultdocdir}/%{name}-doc-%{version} \
            $extraFlags
-make %{?_smp_flags}
+make -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES
 %{_emacs_bytecompile} misc/emacs/nix-mode.el
 
 
diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in
index 4f1dd9674917..f494e34a5e7b 100644
--- a/perl/lib/Nix/Config.pm.in
+++ b/perl/lib/Nix/Config.pm.in
@@ -19,10 +19,6 @@ $useBindings = 1;
 
 %config = ();
 
-%binaryCachePublicKeys = ();
-
-$defaultPublicKeys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
-
 sub readConfig {
     if (defined $ENV{'_NIX_OPTIONS'}) {
         foreach my $s (split '\n', $ENV{'_NIX_OPTIONS'}) {
@@ -40,12 +36,6 @@ sub readConfig {
         }
         close CONFIG;
     }
-
-    foreach my $s (split(/ /, $config{"binary-cache-public-keys"} // $defaultPublicKeys)) {
-        my ($keyName, $publicKey) = split ":", $s;
-        next unless defined $keyName && defined $publicKey;
-        $binaryCachePublicKeys{$keyName} = decode_base64($publicKey);
-    }
 }
 
 return 1;
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index 6b137a13c414..f613e3df329a 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -26,7 +26,6 @@ static ref<Store> store()
     if (!_store) {
         try {
             logger = makeDefaultLogger();
-            settings.processEnvironment();
             settings.loadConfFile();
             settings.update();
             settings.lockCPU = false;
diff --git a/release.nix b/release.nix
index c0e1385e13f3..1a8d0927c10c 100644
--- a/release.nix
+++ b/release.nix
@@ -7,7 +7,7 @@ let
 
   pkgs = import <nixpkgs> {};
 
-  systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" /* "x86_64-freebsd" "i686-freebsd" */ ];
+  systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" ];
 
 
   jobs = rec {
@@ -24,7 +24,8 @@ let
         inherit officialRelease;
 
         buildInputs =
-          [ curl bison flex libxml2 libxslt bzip2 xz
+          [ curl bison flex libxml2 libxslt
+            bzip2 xz brotli
             pkgconfig sqlite libsodium boehmgc
             docbook5 docbook5_xsl
             autoconf-archive
@@ -70,9 +71,12 @@ let
         src = tarball;
 
         buildInputs =
-          [ curl bzip2 xz openssl pkgconfig sqlite boehmgc ]
-          ++ lib.optional stdenv.isLinux libsodium
-          ++ lib.optional stdenv.isLinux
+          [ curl
+            bzip2 xz brotli
+            openssl pkgconfig sqlite boehmgc
+          ]
+          ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
+          ++ lib.optional (stdenv.isLinux || stdenv.isDarwin)
             (aws-sdk-cpp.override {
               apis = ["s3"];
               customMemoryManagement = false;
@@ -190,8 +194,8 @@ let
       };
 
 
-    rpm_fedora21i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora21i386) [ "libsodium-devel" ];
-    rpm_fedora21x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora21x86_64) [ "libsodium-devel" ];
+    rpm_fedora25i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora25i386) [ "libsodium-devel" ];
+    rpm_fedora25x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora25x86_64) [ "libsodium-devel" ];
 
 
     deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ];
@@ -199,12 +203,10 @@ let
 
     deb_ubuntu1410i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1410i386) [] [];
     deb_ubuntu1410x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1410x86_64) [] [];
-    deb_ubuntu1504i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1504i386) [ "libsodium-dev" ] [ "libsodium13" ];
-    deb_ubuntu1504x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1504x86_64) [ "libsodium-dev" ] [ "libsodium13" ];
-    deb_ubuntu1510i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1510i386) [ "libsodium-dev" ] [ "libsodium13"];
-    deb_ubuntu1510x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1510x86_64) [ "libsodium-dev" ] [ "libsodium13" ];
     deb_ubuntu1604i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1604i386) [ "libsodium-dev" ] [ "libsodium18" ];
     deb_ubuntu1604x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1604x86_64) [ "libsodium-dev" ] [ "libsodium18" ];
+    deb_ubuntu1610i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1610i386) [ "libsodium-dev" ] [ "libsodium18" ];
+    deb_ubuntu1610x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1610x86_64) [ "libsodium-dev" ] [ "libsodium18" ];
 
 
     # System tests.
@@ -261,22 +263,18 @@ let
       meta.description = "Release-critical builds";
       constituents =
         [ tarball
-          #build.i686-freebsd
           build.i686-linux
           build.x86_64-darwin
-          #build.x86_64-freebsd
           build.x86_64-linux
-          #binaryTarball.i686-freebsd
           binaryTarball.i686-linux
           binaryTarball.x86_64-darwin
-          #binaryTarball.x86_64-freebsd
           binaryTarball.x86_64-linux
           deb_debian8i386
           deb_debian8x86_64
-          deb_ubuntu1504i386
-          deb_ubuntu1504x86_64
-          rpm_fedora21i386
-          rpm_fedora21x86_64
+          deb_ubuntu1604i386
+          deb_ubuntu1604x86_64
+          rpm_fedora25i386
+          rpm_fedora25x86_64
           tests.remoteBuilds
           tests.nix-copy-closure
           tests.binaryTarball
@@ -306,6 +304,7 @@ let
       memSize = 1024;
       meta.schedulingPriority = 50;
       postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck";
+      #enableParallelBuilding = true;
     };
 
 
@@ -333,6 +332,7 @@ let
         ++ extraDebPackages;
       debMaintainer = "Eelco Dolstra <eelco.dolstra@logicblox.com>";
       doInstallCheck = true;
+      #enableParallelBuilding = true;
     };
 
 
diff --git a/shell.nix b/shell.nix
index 4c1608230cee..df0ad01df583 100644
--- a/shell.nix
+++ b/shell.nix
@@ -6,7 +6,8 @@ with import <nixpkgs> {};
   name = "nix";
 
   buildInputs =
-    [ curl bison flex perl libxml2 libxslt bzip2 xz
+    [ curl bison flex perl libxml2 libxslt
+      bzip2 xz brotli
       pkgconfig sqlite libsodium boehmgc
       docbook5 docbook5_xsl
       autoconf-archive
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 2ce20882da17..d7aee288670a 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -17,13 +17,12 @@
 #include "derivations.hh"
 
 using namespace nix;
-using std::cerr;
 using std::cin;
 
-static void handle_alarm(int sig) {
+static void handleAlarm(int sig) {
 }
 
-class machine {
+class Machine {
     const std::set<string> supportedFeatures;
     const std::set<string> mandatoryFeatures;
 
@@ -31,8 +30,8 @@ public:
     const string hostName;
     const std::vector<string> systemTypes;
     const string sshKey;
-    const unsigned long long maxJobs;
-    const unsigned long long speedFactor;
+    const unsigned int maxJobs;
+    const unsigned int speedFactor;
     bool enabled;
 
     bool allSupported(const std::set<string> & features) const {
@@ -50,28 +49,29 @@ public:
             });
     }
 
-    machine(decltype(hostName) hostName,
+    Machine(decltype(hostName) hostName,
         decltype(systemTypes) systemTypes,
         decltype(sshKey) sshKey,
         decltype(maxJobs) maxJobs,
         decltype(speedFactor) speedFactor,
         decltype(supportedFeatures) supportedFeatures,
         decltype(mandatoryFeatures) mandatoryFeatures) :
-        supportedFeatures{std::move(supportedFeatures)},
-        mandatoryFeatures{std::move(mandatoryFeatures)},
-        hostName{std::move(hostName)},
-        systemTypes{std::move(systemTypes)},
-        sshKey{std::move(sshKey)},
-        maxJobs{std::move(maxJobs)},
-        speedFactor{speedFactor == 0 ? 1 : std::move(speedFactor)},
-        enabled{true} {};
+        supportedFeatures(supportedFeatures),
+        mandatoryFeatures(mandatoryFeatures),
+        hostName(hostName),
+        systemTypes(systemTypes),
+        sshKey(sshKey),
+        maxJobs(maxJobs),
+        speedFactor(std::max(1U, speedFactor)),
+        enabled(true)
+    {};
 };;
 
-static std::vector<machine> read_conf()
+static std::vector<Machine> readConf()
 {
     auto conf = getEnv("NIX_REMOTE_SYSTEMS", SYSCONFDIR "/nix/machines");
 
-    auto machines = std::vector<machine>{};
+    auto machines = std::vector<Machine>{};
     auto lines = std::vector<string>{};
     try {
         lines = tokenizeString<std::vector<string>>(readFile(conf), "\n");
@@ -87,10 +87,8 @@ static std::vector<machine> read_conf()
         }
         auto tokens = tokenizeString<std::vector<string>>(line);
         auto sz = tokens.size();
-        if (sz < 4) {
-            throw new FormatError(format("Bad machines.conf file %1%")
-                % conf);
-        }
+        if (sz < 4)
+            throw FormatError("bad machines.conf file ‘%1%’", conf);
         machines.emplace_back(tokens[0],
             tokenizeString<std::vector<string>>(tokens[1], ","),
             tokens[2],
@@ -108,7 +106,7 @@ static std::vector<machine> read_conf()
 
 static string currentLoad;
 
-static AutoCloseFD openSlotLock(const machine & m, unsigned long long slot)
+static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot)
 {
     std::ostringstream fn_stream(currentLoad, std::ios_base::ate | std::ios_base::out);
     fn_stream << "/";
@@ -126,15 +124,14 @@ int main (int argc, char * * argv)
 {
     return handleExceptions(argv[0], [&]() {
         initNix();
+
         /* Ensure we don't get any SSH passphrase or host key popups. */
         if (putenv(display_env) == -1 ||
-            putenv(ssh_env) == -1) {
-            throw SysError("Setting SSH env vars");
-        }
+            putenv(ssh_env) == -1)
+            throw SysError("setting SSH env vars");
 
-        if (argc != 4) {
+        if (argc != 4)
             throw UsageError("called without required arguments");
-        }
 
         auto store = openStore();
 
@@ -147,15 +144,14 @@ int main (int argc, char * * argv)
         std::shared_ptr<Store> sshStore;
         AutoCloseFD bestSlotLock;
 
-        auto machines = read_conf();
+        auto machines = readConf();
         string drvPath;
         string hostName;
         for (string line; getline(cin, line);) {
             auto tokens = tokenizeString<std::vector<string>>(line);
             auto sz = tokens.size();
-            if (sz != 3 && sz != 4) {
-                throw Error(format("invalid build hook line %1%") % line);
-            }
+            if (sz != 3 && sz != 4)
+                throw Error("invalid build hook line ‘%1%’", line);
             auto amWilling = tokens[0] == "1";
             auto neededSystem = tokens[1];
             drvPath = tokens[2];
@@ -174,7 +170,7 @@ int main (int argc, char * * argv)
 
                 bool rightType = false;
 
-                machine * bestMachine = nullptr;
+                Machine * bestMachine = nullptr;
                 unsigned long long bestLoad = 0;
                 for (auto & m : machines) {
                     if (m.enabled && std::find(m.systemTypes.begin(),
@@ -221,11 +217,10 @@ int main (int argc, char * * argv)
                 }
 
                 if (!bestSlotLock) {
-                    if (rightType && !canBuildLocally) {
-                        cerr << "# postpone\n";
-                    } else {
-                        cerr << "# decline\n";
-                    }
+                    if (rightType && !canBuildLocally)
+                        std::cerr << "# postpone\n";
+                    else
+                        std::cerr << "# decline\n";
                     break;
                 }
 
@@ -238,47 +233,51 @@ int main (int argc, char * * argv)
                 lock = -1;
 
                 try {
-                    sshStore = openStore("ssh://" + bestMachine->hostName + "?key=" + bestMachine->sshKey);
+                    sshStore = openStore("ssh-ng://" + bestMachine->hostName,
+                        { {"ssh-key", bestMachine->sshKey },
+                          {"max-connections", "1" } });
                     hostName = bestMachine->hostName;
                 } catch (std::exception & e) {
-                    cerr << e.what() << '\n';
-                    cerr << "unable to open SSH connection to ‘" << bestMachine->hostName << "’, trying other available machines...\n";
+                    printError("unable to open SSH connection to ‘%s’: %s; trying other available machines...",
+                        bestMachine->hostName, e.what());
                     bestMachine->enabled = false;
                     continue;
                 }
                 goto connected;
             }
         }
+
 connected:
-        cerr << "# accept\n";
+        std::cerr << "# accept\n";
         string line;
-        if (!getline(cin, line)) {
+        if (!getline(cin, line))
             throw Error("hook caller didn't send inputs");
-        }
-        auto inputs = tokenizeString<std::list<string>>(line);
-        if (!getline(cin, line)) {
+        auto inputs = tokenizeString<PathSet>(line);
+        if (!getline(cin, line))
             throw Error("hook caller didn't send outputs");
-        }
-        auto outputs = tokenizeString<Strings>(line);
+        auto outputs = tokenizeString<PathSet>(line);
         AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + hostName + ".upload-lock", true);
-        auto old = signal(SIGALRM, handle_alarm);
+        auto old = signal(SIGALRM, handleAlarm);
         alarm(15 * 60);
-        if (!lockFile(uploadLock.get(), ltWrite, true)) {
-            cerr << "somebody is hogging the upload lock for " << hostName << ", continuing...\n";
-        }
+        if (!lockFile(uploadLock.get(), ltWrite, true))
+            printError("somebody is hogging the upload lock for ‘%s’, continuing...");
         alarm(0);
         signal(SIGALRM, old);
         copyPaths(store, ref<Store>(sshStore), inputs);
         uploadLock = -1;
 
-        cerr << "building ‘" << drvPath << "’ on ‘" << hostName << "’\n";
+        printError("building ‘%s’ on ‘%s’", drvPath, hostName);
         sshStore->buildDerivation(drvPath, readDerivation(drvPath));
 
-        std::remove_if(outputs.begin(), outputs.end(), [=](const Path & path) { return store->isValidPath(path); });
-        if (!outputs.empty()) {
-            setenv("NIX_HELD_LOCKS", concatStringsSep(" ", outputs).c_str(), 1); /* FIXME: ugly */
-            copyPaths(ref<Store>(sshStore), store, outputs);
+        PathSet missing;
+        for (auto & path : outputs)
+            if (!store->isValidPath(path)) missing.insert(path);
+
+        if (!missing.empty()) {
+            setenv("NIX_HELD_LOCKS", concatStringsSep(" ", missing).c_str(), 1); /* FIXME: ugly */
+            copyPaths(ref<Store>(sshStore), store, missing);
         }
+
         return;
     });
 }
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
index f671802bcc24..c189cdef35e7 100644
--- a/src/libexpr/json-to-value.cc
+++ b/src/libexpr/json-to-value.cc
@@ -1,4 +1,3 @@
-#include "config.h"
 #include "json-to-value.hh"
 
 #include <cstring>
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 5a570cefb2fa..93097f3d1bf3 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -59,6 +59,8 @@ void EvalState::realiseContext(const PathSet & context)
             drvs.insert(decoded.first + "!" + decoded.second);
     }
     if (!drvs.empty()) {
+        if (!settings.enableImportFromDerivation)
+            throw EvalError(format("attempted to realize ‘%1%’ during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin()));
         /* For performance, prefetch all substitute info. */
         PathSet willBuild, willSubstitute, unknown;
         unsigned long long downloadSize, narSize;
diff --git a/src/libexpr/primops/fetchgit.cc b/src/libexpr/primops/fetchgit.cc
index bd440c8c62ad..09e2c077baba 100644
--- a/src/libexpr/primops/fetchgit.cc
+++ b/src/libexpr/primops/fetchgit.cc
@@ -58,12 +58,14 @@ static void prim_fetchgit(EvalState & state, const Pos & pos, Value * * args, Va
 
         for (auto & attr : *args[0]->attrs) {
             string name(attr.name);
-            if (name == "url")
-                url = state.forceStringNoCtx(*attr.value, *attr.pos);
-            else if (name == "rev")
+            if (name == "url") {
+                PathSet context;
+                url = state.coerceToString(*attr.pos, *attr.value, context, false, false);
+                if (hasPrefix(url, "/")) url = "file://" + url;
+            } else if (name == "rev")
                 rev = state.forceStringNoCtx(*attr.value, *attr.pos);
             else
-                throw EvalError(format("unsupported argument ‘%1%’ to ‘fetchgit’, at %3%") % attr.name % attr.pos);
+                throw EvalError("unsupported argument ‘%s’ to ‘fetchgit’, at %s", attr.name, *attr.pos);
         }
 
         if (url.empty())
diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh
index 2fdf820211c8..c2ee49dd32fb 100644
--- a/src/libexpr/symbol-table.hh
+++ b/src/libexpr/symbol-table.hh
@@ -1,7 +1,5 @@
 #pragma once
 
-#include "config.h"
-
 #include <map>
 #include <unordered_set>
 
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 271e6a1b24a2..802e8ed2ee75 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -1,6 +1,5 @@
 #pragma once
 
-#include "config.h"
 #include "symbol-table.hh"
 
 #if HAVE_BOEHMGC
@@ -257,7 +256,7 @@ size_t valueSize(Value & v);
 
 #if HAVE_BOEHMGC
 typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
-typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<Value *> > ValueMap;
+typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
 #else
 typedef std::vector<Value *> ValueVector;
 typedef std::map<Symbol, Value *> ValueMap;
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 52cb2312826b..a720afd6cdd4 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "common-args.hh"
 #include "globals.hh"
 #include "shared.hh"
@@ -114,7 +112,6 @@ void initNix()
     opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
     CRYPTO_set_locking_callback(opensslLockCallback);
 
-    settings.processEnvironment();
     settings.loadConfFile();
 
     startSignalHandlerThread();
@@ -170,6 +167,10 @@ struct LegacyArgs : public MixCommonArgs
             settings.set("build-fallback", "true");
         });
 
+        mkFlag1('j', "max-jobs", "jobs", "maximum number of parallel builds", [=](std::string s) {
+            settings.set("build-max-jobs", s);
+        });
+
         auto intSettingAlias = [&](char shortName, const std::string & longName,
             const std::string & description, const std::string & dest) {
             mkFlag<unsigned int>(shortName, longName, description, [=](unsigned int n) {
@@ -177,7 +178,6 @@ struct LegacyArgs : public MixCommonArgs
             });
         };
 
-        intSettingAlias('j', "max-jobs", "maximum number of parallel builds", "build-max-jobs");
         intSettingAlias(0, "cores", "maximum number of CPU cores to use inside a build", "build-cores");
         intSettingAlias(0, "max-silent-time", "number of seconds of silence before a build is killed", "build-max-silent-time");
         intSettingAlias(0, "timeout", "number of seconds before a build is killed", "build-timeout");
@@ -332,11 +332,7 @@ RunPager::~RunPager()
             pid.wait();
         }
     } catch (...) {
-        try {
-            pid.kill(true);
-        } catch (...) {
-            ignoreException();
-        }
+        ignoreException();
     }
 }
 
diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc
index abf59dc4baa6..57b6a197c0f0 100644
--- a/src/libmain/stack.cc
+++ b/src/libmain/stack.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "types.hh"
 
 #include <cstring>
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 3e07a2aa2b60..25ad0d75b70a 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -97,7 +97,7 @@ void BinaryCacheStore::init()
 
     auto cacheInfo = getFile(cacheInfoFile);
     if (!cacheInfo) {
-        upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n");
+        upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
     } else {
         for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
             size_t colon = line.find(':');
@@ -224,7 +224,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
             }
         }
 
-        upsertFile(storePathToHash(info.path) + ".ls.xz", *compress("xz", jsonOut.str()));
+        upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
     }
 
     else {
@@ -250,10 +250,11 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
     narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar"
         + (compression == "xz" ? ".xz" :
            compression == "bzip2" ? ".bz2" :
+           compression == "br" ? ".br" :
            "");
     if (repair || !fileExists(narInfo->url)) {
         stats.narWrite++;
-        upsertFile(narInfo->url, *narCompressed);
+        upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
     } else
         stats.narWriteAverted++;
 
@@ -264,7 +265,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
     /* Atomically write the NAR info file.*/
     if (secretKey) narInfo->sign(*secretKey);
 
-    upsertFile(narInfoFile, narInfo->to_string());
+    upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
 
     auto hashPart = storePathToHash(narInfo->path);
 
@@ -382,4 +383,28 @@ ref<FSAccessor> BinaryCacheStore::getFSAccessor()
     return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
 }
 
+std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path)
+{
+    Path drvPath;
+
+    if (isDerivation(path))
+        drvPath = path;
+    else {
+        try {
+            auto info = queryPathInfo(path);
+            // FIXME: add a "Log" field to .narinfo
+            if (info->deriver == "") return nullptr;
+            drvPath = info->deriver;
+        } catch (InvalidPath &) {
+            return nullptr;
+        }
+    }
+
+    auto logPath = "log/" + baseNameOf(drvPath);
+
+    debug("fetching build log from binary cache ‘%s/%s’", getUri(), logPath);
+
+    return getFile(logPath);
+}
+
 }
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index a70d50d4949c..d42b1abd2455 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -31,7 +31,9 @@ public:
 
     virtual bool fileExists(const std::string & path) = 0;
 
-    virtual void upsertFile(const std::string & path, const std::string & data) = 0;
+    virtual void upsertFile(const std::string & path,
+        const std::string & data,
+        const std::string & mimeType) = 0;
 
     /* Return the contents of the specified file, or null if it
        doesn't exist. */
@@ -122,6 +124,8 @@ public:
     void addSignatures(const Path & storePath, const StringSet & sigs) override
     { notImpl(); }
 
+    std::shared_ptr<std::string> getBuildLog(const Path & path) override;
+
 };
 
 }
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index 5d6fff4e349f..fc840df81a56 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "references.hh"
 #include "pathlocks.hh"
 #include "globals.hh"
@@ -644,7 +642,7 @@ HookInstance::~HookInstance()
 {
     try {
         toHook.writeSide = -1;
-        if (pid != -1) pid.kill(true);
+        if (pid != -1) pid.kill();
     } catch (...) {
         ignoreException();
     }
@@ -1439,7 +1437,7 @@ void DerivationGoal::buildDone()
        to have terminated.  In fact, the builder could also have
        simply have closed its end of the pipe, so just to be sure,
        kill it. */
-    int status = hook ? hook->pid.kill(true) : pid.kill(true);
+    int status = hook ? hook->pid.kill() : pid.kill();
 
     debug(format("builder process for ‘%1%’ finished") % drvPath);
 
@@ -1582,36 +1580,48 @@ HookReply DerivationGoal::tryBuildHook()
     if (!worker.hook)
         worker.hook = std::make_unique<HookInstance>();
 
-    /* Tell the hook about system features (beyond the system type)
-       required from the build machine.  (The hook could parse the
-       drv file itself, but this is easier.) */
-    Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
-    for (auto & i : features) checkStoreName(i); /* !!! abuse */
-
-    /* Send the request to the hook. */
-    writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%")
-        % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
-        % drv->platform % drvPath % concatStringsSep(",", features)).str());
+    try {
 
-    /* Read the first line of input, which should be a word indicating
-       whether the hook wishes to perform the build. */
-    string reply;
-    while (true) {
-        string s = readLine(worker.hook->fromHook.readSide.get());
-        if (string(s, 0, 2) == "# ") {
-            reply = string(s, 2);
-            break;
+        /* Tell the hook about system features (beyond the system type)
+           required from the build machine.  (The hook could parse the
+           drv file itself, but this is easier.) */
+        Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
+        for (auto & i : features) checkStoreName(i); /* !!! abuse */
+
+        /* Send the request to the hook. */
+        writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%")
+                % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
+                % drv->platform % drvPath % concatStringsSep(",", features)).str());
+
+        /* Read the first line of input, which should be a word indicating
+           whether the hook wishes to perform the build. */
+        string reply;
+        while (true) {
+            string s = readLine(worker.hook->fromHook.readSide.get());
+            if (string(s, 0, 2) == "# ") {
+                reply = string(s, 2);
+                break;
+            }
+            s += "\n";
+            writeToStderr(s);
         }
-        s += "\n";
-        writeToStderr(s);
-    }
 
-    debug(format("hook reply is ‘%1%’") % reply);
+        debug(format("hook reply is ‘%1%’") % reply);
 
-    if (reply == "decline" || reply == "postpone")
-        return reply == "decline" ? rpDecline : rpPostpone;
-    else if (reply != "accept")
-        throw Error(format("bad hook reply ‘%1%’") % reply);
+        if (reply == "decline" || reply == "postpone")
+            return reply == "decline" ? rpDecline : rpPostpone;
+        else if (reply != "accept")
+            throw Error(format("bad hook reply ‘%1%’") % reply);
+
+    } catch (SysError & e) {
+        if (e.errNo == EPIPE) {
+            printError("build hook died unexpectedly: %s",
+                chomp(drainFD(worker.hook->fromHook.readSide.get())));
+            worker.hook = 0;
+            return rpDecline;
+        } else
+            throw;
+    }
 
     printMsg(lvlTalkative, format("using hook to build path(s) %1%") % showPaths(missingPaths));
 
@@ -2309,6 +2319,14 @@ void DerivationGoal::runChild()
 
         bool setUser = true;
 
+        /* Make the contents of netrc available to builtin:fetchurl
+           (which may run under a different uid and/or in a sandbox). */
+        std::string netrcData;
+        try {
+            if (drv->isBuiltin() && drv->builder == "builtin:fetchurl")
+                netrcData = readFile(settings.netrcFile);
+        } catch (SysError &) { }
+
 #if __linux__
         if (useChroot) {
 
@@ -2677,7 +2695,7 @@ void DerivationGoal::runChild()
         if (drv->isBuiltin()) {
             try {
                 if (drv->builder == "builtin:fetchurl")
-                    builtinFetchurl(*drv);
+                    builtinFetchurl(*drv, netrcData);
                 else
                     throw Error(format("unsupported builtin function ‘%1%’") % string(drv->builder, 8));
                 _exit(0);
@@ -2747,6 +2765,8 @@ void DerivationGoal::registerOutputs()
         Path path = i.second.path;
         if (missingPaths.find(path) == missingPaths.end()) continue;
 
+        ValidPathInfo info;
+
         Path actualPath = path;
         if (useChroot) {
             actualPath = chrootRootDir + path;
@@ -2849,6 +2869,8 @@ void DerivationGoal::registerOutputs()
                         format("output path ‘%1%’ has %2% hash ‘%3%’ when ‘%4%’ was expected")
                         % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h));
             }
+
+            info.ca = makeFixedOutputCA(recursive, h2);
         }
 
         /* Get rid of all weird permissions.  This also checks that
@@ -2948,7 +2970,6 @@ void DerivationGoal::registerOutputs()
             worker.markContentsGood(path);
         }
 
-        ValidPathInfo info;
         info.path = path;
         info.narHash = hash.first;
         info.narSize = hash.second;
@@ -3027,9 +3048,6 @@ void DerivationGoal::registerOutputs()
 }
 
 
-string drvsLogDir = "drvs";
-
-
 Path DerivationGoal::openLogFile()
 {
     logSize = 0;
@@ -3039,7 +3057,7 @@ Path DerivationGoal::openLogFile()
     string baseName = baseNameOf(drvPath);
 
     /* Create a log file. */
-    Path dir = (format("%1%/%2%/%3%/") % worker.store.logDir % drvsLogDir % string(baseName, 0, 2)).str();
+    Path dir = (format("%1%/%2%/%3%/") % worker.store.logDir % worker.store.drvsLogDir % string(baseName, 0, 2)).str();
     createDirs(dir);
 
     Path logFileName = (format("%1%/%2%%3%")
@@ -3074,7 +3092,9 @@ void DerivationGoal::closeLogFile()
 void DerivationGoal::deleteTmpDir(bool force)
 {
     if (tmpDir != "") {
-        if (settings.keepFailed && !force) {
+        /* Don't keep temporary directories for builtins because they
+           might have privileged stuff (like a copy of netrc). */
+        if (settings.keepFailed && !force && !drv->isBuiltin()) {
             printError(
                 format("note: keeping build directory ‘%2%’")
                 % drvPath % tmpDir);
diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc
index a30f30906f01..c5dbd57f8bc6 100644
--- a/src/libstore/builtins.cc
+++ b/src/libstore/builtins.cc
@@ -6,8 +6,16 @@
 
 namespace nix {
 
-void builtinFetchurl(const BasicDerivation & drv)
+void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
 {
+    /* Make the host's netrc data available. Too bad curl requires
+       this to be stored in a file. It would be nice if we could just
+       pass a pointer to the data. */
+    if (netrcData != "") {
+        settings.netrcFile = "netrc";
+        writeFile(settings.netrcFile, netrcData, 0600);
+    }
+
     auto getAttr = [&](const string & name) {
         auto i = drv.env.find(name);
         if (i == drv.env.end()) throw Error(format("attribute ‘%s’ missing") % name);
diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh
index 4b2431aa08cf..0cc6ba31f658 100644
--- a/src/libstore/builtins.hh
+++ b/src/libstore/builtins.hh
@@ -4,6 +4,6 @@
 
 namespace nix {
 
-void builtinFetchurl(const BasicDerivation & drv);
+void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
 
 }
diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc
index 747483afb30b..0fc86a1fe921 100644
--- a/src/libstore/crypto.cc
+++ b/src/libstore/crypto.cc
@@ -105,7 +105,9 @@ PublicKeys getDefaultPublicKeys()
 
     // FIXME: filter duplicates
 
-    for (auto s : settings.get("binary-cache-public-keys", Strings())) {
+    for (auto s : settings.get("binary-cache-public-keys",
+            Strings{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}))
+    {
         PublicKey key(s);
         publicKeys.emplace(key.name, key);
     }
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 79526c594f71..0c6ceb9f6741 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -4,7 +4,7 @@
 #include "util.hh"
 #include "worker-protocol.hh"
 #include "fs-accessor.hh"
-
+#include "istringstream_nocopy.hh"
 
 namespace nix {
 
@@ -152,7 +152,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
 static Derivation parseDerivation(const string & s)
 {
     Derivation drv;
-    std::istringstream str(s);
+    istringstream_nocopy str(s);
     expect(str, "Derive([");
 
     /* Parse the list of outputs. */
@@ -397,8 +397,8 @@ PathSet BasicDerivation::outputPaths() const
 Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
 {
     drv.outputs.clear();
-    auto nr = readInt(in);
-    for (unsigned int n = 0; n < nr; n++) {
+    auto nr = readNum<size_t>(in);
+    for (size_t n = 0; n < nr; n++) {
         auto name = readString(in);
         DerivationOutput o;
         in >> o.path >> o.hashAlgo >> o.hash;
@@ -410,8 +410,8 @@ Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
     in >> drv.platform >> drv.builder;
     drv.args = readStrings<Strings>(in);
 
-    nr = readInt(in);
-    for (unsigned int n = 0; n < nr; n++) {
+    nr = readNum<size_t>(in);
+    for (size_t n = 0; n < nr; n++) {
         auto key = readString(in);
         auto value = readString(in);
         drv.env[key] = value;
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 074e0ca6642a..22bde086e6a2 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -4,6 +4,12 @@
 #include "hash.hh"
 #include "store-api.hh"
 #include "archive.hh"
+#include "s3.hh"
+#include "compression.hh"
+
+#ifdef ENABLE_S3
+#include <aws/core/client/ClientConfiguration.h>
+#endif
 
 #include <unistd.h>
 #include <fcntl.h>
@@ -33,6 +39,16 @@ std::string resolveUri(const std::string & uri)
         return uri;
 }
 
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data)
+{
+    if (encoding == "")
+        return data;
+    else if (encoding == "br")
+        return decompress(encoding, *data);
+    else
+        throw Error("unsupported Content-Encoding ‘%s’", encoding);
+}
+
 struct CurlDownloader : public Downloader
 {
     CURLM * curlm = 0;
@@ -66,6 +82,8 @@ struct CurlDownloader : public Downloader
 
         struct curl_slist * requestHeaders = 0;
 
+        std::string encoding;
+
         DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
             : downloader(downloader), request(request)
         {
@@ -123,6 +141,7 @@ struct CurlDownloader : public Downloader
                 auto ss = tokenizeString<vector<string>>(line, " ");
                 status = ss.size() >= 2 ? ss[1] : "";
                 result.data = std::make_shared<std::string>();
+                encoding = "";
             } else {
                 auto i = line.find(':');
                 if (i != string::npos) {
@@ -138,7 +157,8 @@ struct CurlDownloader : public Downloader
                             debug(format("shutting down on 200 HTTP response with expected ETag"));
                             return 0;
                         }
-                    }
+                    } else if (name == "content-encoding")
+                        encoding = trim(string(line, i + 1));;
                 }
             }
             return realSize;
@@ -200,7 +220,7 @@ struct CurlDownloader : public Downloader
             curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
             curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
             curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
-            curl_easy_setopt(req, CURLOPT_USERAGENT, ("Nix/" + nixVersion).c_str());
+            curl_easy_setopt(req, CURLOPT_USERAGENT, ("curl/" LIBCURL_VERSION " Nix/" + nixVersion).c_str());
             #if LIBCURL_VERSION_NUM >= 0x072b00
             curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
             #endif
@@ -223,13 +243,17 @@ struct CurlDownloader : public Downloader
                 curl_easy_setopt(req, CURLOPT_NOBODY, 1);
 
             if (request.verifyTLS)
-                curl_easy_setopt(req, CURLOPT_CAINFO,
-                    getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt")).c_str());
+                curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
             else {
                 curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
                 curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
             }
 
+            /* If no file exist in the specified path, curl continues to work
+               anyway as if netrc support was disabled. */
+            curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.c_str());
+            curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
+
             result.data = std::make_shared<std::string>();
         }
 
@@ -260,14 +284,26 @@ struct CurlDownloader : public Downloader
             {
                 result.cached = httpStatus == 304;
                 done = true;
-                callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+
+                try {
+                    result.data = decodeContent(encoding, ref<std::string>(result.data));
+                    callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+                } catch (...) {
+                    done = true;
+                    callFailure(failure, std::current_exception());
+                }
             } else {
                 Error err =
                     (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) ? NotFound :
                     httpStatus == 403 ? Forbidden :
                     (httpStatus == 408 || httpStatus == 500 || httpStatus == 503
                         || httpStatus == 504  || httpStatus == 522 || httpStatus == 524
-                        || code == CURLE_COULDNT_RESOLVE_HOST) ? Transient :
+                        || code == CURLE_COULDNT_RESOLVE_HOST
+                        || code == CURLE_RECV_ERROR
+#if LIBCURL_VERSION_NUM >= 0x073200
+                        || code == CURLE_HTTP2_STREAM
+#endif
+                        ) ? Transient :
                     Misc;
 
                 attempt++;
@@ -480,6 +516,31 @@ struct CurlDownloader : public Downloader
         std::function<void(const DownloadResult &)> success,
         std::function<void(std::exception_ptr exc)> failure) override
     {
+        /* Ugly hack to support s3:// URIs. */
+        if (hasPrefix(request.uri, "s3://")) {
+            // FIXME: do this on a worker thread
+            sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult {
+#ifdef ENABLE_S3
+                S3Helper s3Helper(Aws::Region::US_EAST_1); // FIXME: make configurable
+                auto slash = request.uri.find('/', 5);
+                if (slash == std::string::npos)
+                    throw nix::Error("bad S3 URI ‘%s’", request.uri);
+                std::string bucketName(request.uri, 5, slash - 5);
+                std::string key(request.uri, slash + 1);
+                // FIXME: implement ETag
+                auto s3Res = s3Helper.getObject(bucketName, key);
+                DownloadResult res;
+                if (!s3Res.data)
+                    throw DownloadError(NotFound, fmt("S3 object ‘%s’ does not exist", request.uri));
+                res.data = s3Res.data;
+                return res;
+#else
+                throw nix::Error("cannot download ‘%s’ because Nix is not built with S3 support", request.uri);
+#endif
+            });
+            return;
+        }
+
         auto item = std::make_shared<DownloadItem>(*this, request);
         item->success = success;
         item->failure = failure;
@@ -581,6 +642,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
                 Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data);
                 info.path = store->makeFixedOutputPath(false, hash, name);
                 info.narHash = hashString(htSHA256, *sink.s);
+                info.ca = makeFixedOutputCA(false, hash);
                 store->addToStore(info, sink.s, false, true);
                 storePath = info.path;
             }
@@ -609,7 +671,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
             Path tmpDir = createTempDir();
             AutoDelete autoDelete(tmpDir, true);
             // FIXME: this requires GNU tar for decompression.
-            runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}, "");
+            runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"});
             unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false);
         }
         replaceSymlink(unpackedStorePath, unpackedLink);
@@ -629,7 +691,7 @@ bool isUri(const string & s)
     size_t pos = s.find("://");
     if (pos == string::npos) return false;
     string scheme(s, 0, pos);
-    return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git";
+    return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3";
 }
 
 
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index 82b5d641fde9..e2e16b361036 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -23,7 +23,7 @@ struct DownloadRequest
 
 struct DownloadResult
 {
-    bool cached;
+    bool cached = false;
     std::string etag;
     std::string effectiveUrl;
     std::shared_ptr<std::string> data;
@@ -73,4 +73,7 @@ public:
 
 bool isUri(const string & s);
 
+/* Decode data according to the Content-Encoding header. */
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data);
+
 }
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
index c5618c826c54..2b8ab063e18e 100644
--- a/src/libstore/export-import.cc
+++ b/src/libstore/export-import.cc
@@ -61,39 +61,17 @@ void Store::exportPath(const Path & path, Sink & sink)
     hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0;
 }
 
-struct TeeSource : Source
-{
-    Source & readSource;
-    ref<std::string> data;
-    TeeSource(Source & readSource)
-        : readSource(readSource)
-        , data(make_ref<std::string>())
-    {
-    }
-    size_t read(unsigned char * data, size_t len)
-    {
-        size_t n = readSource.read(data, len);
-        this->data->append((char *) data, n);
-        return n;
-    }
-};
-
-struct NopSink : ParseSink
-{
-};
-
 Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, bool dontCheckSigs)
 {
     Paths res;
     while (true) {
-        unsigned long long n = readLongLong(source);
+        auto n = readNum<uint64_t>(source);
         if (n == 0) break;
         if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’");
 
         /* Extract the NAR from the source. */
-        TeeSource tee(source);
-        NopSink sink;
-        parseDump(sink, tee);
+        TeeSink tee(source);
+        parseDump(tee, tee.source);
 
         uint32_t magic = readInt(source);
         if (magic != exportMagic)
@@ -110,14 +88,14 @@ Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor,
         info.deriver = readString(source);
         if (info.deriver != "") assertStorePath(info.deriver);
 
-        info.narHash = hashString(htSHA256, *tee.data);
-        info.narSize = tee.data->size();
+        info.narHash = hashString(htSHA256, *tee.source.data);
+        info.narSize = tee.source.data->size();
 
         // Ignore optional legacy signature.
         if (readInt(source) == 1)
             readString(source);
 
-        addToStore(info, tee.data, false, dontCheckSigs, accessor);
+        addToStore(info, tee.source.data, false, dontCheckSigs, accessor);
 
         res.push_back(info.path);
     }
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 00b468892529..012b3d5b8b98 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -1,12 +1,10 @@
-#include "config.h"
-
 #include "globals.hh"
 #include "util.hh"
 #include "archive.hh"
 
-#include <map>
 #include <algorithm>
-#include <unistd.h>
+#include <map>
+#include <thread>
 
 
 namespace nix {
@@ -25,15 +23,26 @@ Settings settings;
 
 Settings::Settings()
 {
+    nixPrefix = NIX_PREFIX;
+    nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
+    nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
+    nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
+    nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
+    nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
+    nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
+    nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
+    nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH);
+
+    // should be set with the other config options, but depends on nixLibexecDir
+#ifdef __APPLE__
+    preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies";
+#endif
+
     keepFailed = false;
     keepGoing = false;
     tryFallback = false;
     maxBuildJobs = 1;
-    buildCores = 1;
-#ifdef _SC_NPROCESSORS_ONLN
-    long res = sysconf(_SC_NPROCESSORS_ONLN);
-    if (res > 0) buildCores = res;
-#endif
+    buildCores = std::max(1U, std::thread::hardware_concurrency());
     readOnlyMode = false;
     thisSystem = SYSTEM;
     maxSilentTime = 0;
@@ -59,25 +68,9 @@ Settings::Settings()
     lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
     showTrace = false;
     enableImportNative = false;
-}
-
-
-void Settings::processEnvironment()
-{
-    nixPrefix = NIX_PREFIX;
-    nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
-    nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
-    nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
-    nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
-    nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
-    nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
-    nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
-    nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH);
-
-    // should be set with the other config options, but depends on nixLibexecDir
-#ifdef __APPLE__
-    preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies";
-#endif
+    netrcFile = fmt("%s/%s", nixConfDir, "netrc");
+    caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt"));
+    enableImportFromDerivation = true;
 }
 
 
@@ -156,7 +149,14 @@ int Settings::get(const string & name, int def)
 void Settings::update()
 {
     _get(tryFallback, "build-fallback");
-    _get(maxBuildJobs, "build-max-jobs");
+
+    auto s = get("build-max-jobs", std::string("1"));
+    if (s == "auto")
+        maxBuildJobs = std::max(1U, std::thread::hardware_concurrency());
+    else
+        if (!string2Int(s, maxBuildJobs))
+            throw Error("configuration setting ‘build-max-jobs’ should be ‘auto’ or an integer");
+
     _get(buildCores, "build-cores");
     _get(thisSystem, "system");
     _get(maxSilentTime, "build-max-silent-time");
@@ -179,12 +179,13 @@ void Settings::update()
     _get(envKeepDerivations, "env-keep-derivations");
     _get(sshSubstituterHosts, "ssh-substituter-hosts");
     _get(useSshSubstituter, "use-ssh-substituter");
-    _get(logServers, "log-servers");
     _get(enableImportNative, "allow-unsafe-native-code-during-evaluation");
     _get(useCaseHack, "use-case-hack");
     _get(preBuildHook, "pre-build-hook");
     _get(keepGoing, "keep-going");
     _get(keepFailed, "keep-failed");
+    _get(netrcFile, "netrc-file");
+    _get(enableImportFromDerivation, "allow-import-from-derivation");
 }
 
 
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index a423b4e5c0f4..462721681912 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -16,8 +16,6 @@ struct Settings {
 
     Settings();
 
-    void processEnvironment();
-
     void loadConfFile();
 
     void set(const string & name, const string & value);
@@ -183,9 +181,6 @@ struct Settings {
     /* Whether to show a stack trace if Nix evaluation fails. */
     bool showTrace;
 
-    /* A list of URL prefixes that can return Nix build logs. */
-    Strings logServers;
-
     /* Whether the importNative primop should be enabled */
     bool enableImportNative;
 
@@ -193,6 +188,16 @@ struct Settings {
        build settings */
     Path preBuildHook;
 
+    /* Path to the netrc file used to obtain usernames/passwords for
+       downloads. */
+    Path netrcFile;
+
+    /* Path to the SSL CA file used */
+    Path caFile;
+
+    /* Whether we allow import-from-derivation */
+    bool enableImportFromDerivation;
+
 private:
     SettingsMap settings, overrides;
 
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 9d31f77c921f..37a7d6ace142 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -64,7 +64,9 @@ protected:
         }
     }
 
-    void upsertFile(const std::string & path, const std::string & data) override
+    void upsertFile(const std::string & path,
+        const std::string & data,
+        const std::string & mimeType) override
     {
         throw UploadToHTTP("uploading to an HTTP binary cache is not supported");
     }
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 5d9e5aad6e0a..0e838846c794 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -4,80 +4,50 @@
 #include "serve-protocol.hh"
 #include "store-api.hh"
 #include "worker-protocol.hh"
+#include "ssh.hh"
 
 namespace nix {
 
-static std::string uriScheme = "legacy-ssh://";
+static std::string uriScheme = "ssh://";
 
 struct LegacySSHStore : public Store
 {
-    string host;
-
     struct Connection
     {
-        Pid sshPid;
-        AutoCloseFD out;
-        AutoCloseFD in;
+        std::unique_ptr<SSHMaster::Connection> sshConn;
         FdSink to;
         FdSource from;
     };
 
-    AutoDelete tmpDir;
-
-    Path socketPath;
-
-    Pid sshMaster;
+    std::string host;
 
     ref<Pool<Connection>> connections;
 
-    Path key;
+    SSHMaster master;
 
-    LegacySSHStore(const string & host, const Params & params,
-        size_t maxConnections = std::numeric_limits<size_t>::max())
+    LegacySSHStore(const string & host, const Params & params)
         : Store(params)
         , host(host)
-        , tmpDir(createTempDir("", "nix", true, true, 0700))
-        , socketPath((Path) tmpDir + "/ssh.sock")
         , connections(make_ref<Pool<Connection>>(
-            maxConnections,
+            std::max(1, std::stoi(get(params, "max-connections", "1"))),
             [this]() { return openConnection(); },
             [](const ref<Connection> & r) { return true; }
             ))
-        , key(get(params, "ssh-key", ""))
+        , master(
+            host,
+            get(params, "ssh-key", ""),
+            // Use SSH master only if using more than 1 connection.
+            connections->capacity() > 1,
+            get(params, "compress", "") == "true")
     {
     }
 
     ref<Connection> openConnection()
     {
-        if ((pid_t) sshMaster == -1) {
-            sshMaster = startProcess([&]() {
-                restoreSignals();
-                Strings args{ "ssh", "-M", "-S", socketPath, "-N", "-x", "-a", host };
-                if (!key.empty())
-                    args.insert(args.end(), {"-i", key});
-                execvp("ssh", stringsToCharPtrs(args).data());
-                throw SysError("starting SSH master connection to host ‘%s’", host);
-            });
-        }
-
         auto conn = make_ref<Connection>();
-        Pipe in, out;
-        in.create();
-        out.create();
-        conn->sshPid = startProcess([&]() {
-            if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
-                throw SysError("duping over STDIN");
-            if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
-                throw SysError("duping over STDOUT");
-            execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-store", "--serve", "--write", nullptr);
-            throw SysError("executing ‘nix-store --serve’ on remote host ‘%s’", host);
-        });
-        in.readSide = -1;
-        out.writeSide = -1;
-        conn->out = std::move(out.readSide);
-        conn->in = std::move(in.writeSide);
-        conn->to = FdSink(conn->in.get());
-        conn->from = FdSource(conn->out.get());
+        conn->sshConn = master.startCommand("nix-store --serve --write");
+        conn->to = FdSink(conn->sshConn->in.get());
+        conn->from = FdSource(conn->sshConn->out.get());
 
         int remoteVersion;
 
@@ -169,9 +139,9 @@ struct LegacySSHStore : public Store
 
         /* FIXME: inefficient. */
         ParseSink parseSink; /* null sink; just parse the NAR */
-        SavingSourceAdapter savedNAR(conn->from);
+        TeeSource savedNAR(conn->from);
         parseDump(parseSink, savedNAR);
-        sink(savedNAR.s);
+        sink(*savedNAR.data);
     }
 
     /* Unsupported methods. */
@@ -225,7 +195,7 @@ struct LegacySSHStore : public Store
     void collectGarbage(const GCOptions & options, GCResults & results) override
     { unsupported(); }
 
-    ref<FSAccessor> getFSAccessor()
+    ref<FSAccessor> getFSAccessor() override
     { unsupported(); }
 
     void addSignatures(const Path & storePath, const StringSet & sigs) override
@@ -234,6 +204,41 @@ struct LegacySSHStore : public Store
     bool isTrusted() override
     { return true; }
 
+    void computeFSClosure(const PathSet & paths,
+        PathSet & out, bool flipDirection = false,
+        bool includeOutputs = false, bool includeDerivers = false) override
+    {
+        if (flipDirection || includeDerivers) {
+            Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
+            return;
+        }
+
+        auto conn(connections->get());
+
+        conn->to
+            << cmdQueryClosure
+            << includeOutputs
+            << paths;
+        conn->to.flush();
+
+        auto res = readStorePaths<PathSet>(*this, conn->from);
+
+        out.insert(res.begin(), res.end());
+    }
+
+    PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override
+    {
+        auto conn(connections->get());
+
+        conn->to
+            << cmdQueryValidPaths
+            << false // lock
+            << maybeSubstitute
+            << paths;
+        conn->to.flush();
+
+        return readStorePaths<PathSet>(*this, conn->from);
+    }
 };
 
 static RegisterStoreImplementation regStore([](
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index 0f377989bd89..aff22f9fcc22 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -30,7 +30,9 @@ protected:
 
     bool fileExists(const std::string & path) override;
 
-    void upsertFile(const std::string & path, const std::string & data) override;
+    void upsertFile(const std::string & path,
+        const std::string & data,
+        const std::string & mimeType) override;
 
     void getFile(const std::string & path,
         std::function<void(std::shared_ptr<std::string>)> success,
@@ -83,7 +85,9 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
     return pathExists(binaryCacheDir + "/" + path);
 }
 
-void LocalBinaryCacheStore::upsertFile(const std::string & path, const std::string & data)
+void LocalBinaryCacheStore::upsertFile(const std::string & path,
+    const std::string & data,
+    const std::string & mimeType)
 {
     atomicWrite(binaryCacheDir + "/" + path, data);
 }
diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc
index 4571a2211cd2..57e1b8a09fe6 100644
--- a/src/libstore/local-fs-store.cc
+++ b/src/libstore/local-fs-store.cc
@@ -2,6 +2,8 @@
 #include "fs-accessor.hh"
 #include "store-api.hh"
 #include "globals.hh"
+#include "compression.hh"
+#include "derivations.hh"
 
 namespace nix {
 
@@ -84,4 +86,46 @@ void LocalFSStore::narFromPath(const Path & path, Sink & sink)
     dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
 }
 
+const string LocalFSStore::drvsLogDir = "drvs";
+
+std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_)
+{
+    auto path(path_);
+
+    assertStorePath(path);
+
+
+    if (!isDerivation(path)) {
+        try {
+            path = queryPathInfo(path)->deriver;
+        } catch (InvalidPath &) {
+            return nullptr;
+        }
+        if (path == "") return nullptr;
+    }
+
+    string baseName = baseNameOf(path);
+
+    for (int j = 0; j < 2; j++) {
+
+        Path logPath =
+            j == 0
+            ? (format("%1%/%2%/%3%/%4%") % logDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str()
+            : (format("%1%/%2%/%3%") % logDir % drvsLogDir % baseName).str();
+        Path logBz2Path = logPath + ".bz2";
+
+        if (pathExists(logPath))
+            return std::make_shared<std::string>(readFile(logPath));
+
+        else if (pathExists(logBz2Path)) {
+            try {
+                return decompress("bzip2", readFile(logBz2Path));
+            } catch (Error &) { }
+        }
+
+    }
+
+    return nullptr;
+}
+
 }
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 612efde7bb8f..8610841d7229 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -1,4 +1,3 @@
-#include "config.h"
 #include "local-store.hh"
 #include "globals.hh"
 #include "archive.hh"
@@ -45,7 +44,7 @@ LocalStore::LocalStore(const Params & params)
     , reservedPath(dbDir + "/reserved")
     , schemaPath(dbDir + "/schema")
     , trashDir(realStoreDir + "/trash")
-    , requireSigs(trim(settings.get("signed-binary-caches", std::string(""))) != "") // FIXME: rename option
+    , requireSigs(trim(settings.get("signed-binary-caches", std::string("*"))) != "") // FIXME: rename option
     , publicKeys(getDefaultPublicKeys())
 {
     auto state(_state.lock());
@@ -520,6 +519,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation &
 uint64_t LocalStore::addValidPath(State & state,
     const ValidPathInfo & info, bool checkOutputs)
 {
+    assert(info.ca == "" || info.isContentAddressed(*this));
+
     state.stmtRegisterValidPath.use()
         (info.path)
         ("sha256:" + printHash(info.narHash))
@@ -668,7 +669,7 @@ bool LocalStore::isValidPathUncached(const Path & path)
 }
 
 
-PathSet LocalStore::queryValidPaths(const PathSet & paths)
+PathSet LocalStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
 {
     PathSet res;
     for (auto & i : paths)
@@ -919,7 +920,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> &
             info.path % info.narHash.to_string() % h.to_string());
 
     if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys))
-        throw Error(format("cannot import path ‘%s’ because it lacks a valid signature") % info.path);
+        throw Error("cannot add path ‘%s’ because it lacks a valid signature", info.path);
 
     addTempRoot(info.path);
 
@@ -1003,7 +1004,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
             info.narHash = hash.first;
             info.narSize = hash.second;
             info.ultimate = true;
-            info.ca = "fixed:" + (recursive ? (std::string) "r:" : "") + h.to_string();
+            info.ca = makeFixedOutputCA(recursive, h);
             registerValidPath(info);
         }
 
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 511209d8404a..28e9a31c9feb 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -21,9 +21,6 @@ namespace nix {
 const int nixSchemaVersion = 10;
 
 
-extern string drvsLogDir;
-
-
 struct Derivation;
 
 
@@ -102,7 +99,7 @@ public:
 
     bool isValidPathUncached(const Path & path) override;
 
-    PathSet queryValidPaths(const PathSet & paths) override;
+    PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
 
     PathSet queryAllValidPaths() override;
 
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 13b67b81f35e..180a936edb85 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -106,25 +106,27 @@ public:
             "select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
 
         /* Periodically purge expired entries from the database. */
-        auto now = time(0);
-
-        SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
-        auto queryLastPurge_(queryLastPurge.use());
-
-        if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
-            SQLiteStmt(state->db,
-                "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
-                .use()
-                (now - ttlNegative)
-                (now - ttlPositive)
-                .exec();
-
-            debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
-
-            SQLiteStmt(state->db,
-                "insert or replace into LastPurge(dummy, value) values ('', ?)")
-                .use()(now).exec();
-        }
+        retrySQLite<void>([&]() {
+            auto now = time(0);
+
+            SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
+            auto queryLastPurge_(queryLastPurge.use());
+
+            if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
+                SQLiteStmt(state->db,
+                    "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
+                    .use()
+                    (now - ttlNegative)
+                    (now - ttlPositive)
+                    .exec();
+
+                debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
+
+                SQLiteStmt(state->db,
+                    "insert or replace into LastPurge(dummy, value) values ('', ?)")
+                    .use()(now).exec();
+            }
+        });
     }
 
     Cache & getCache(State & state, const std::string & uri)
@@ -136,114 +138,123 @@ public:
 
     void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
     {
-        auto state(_state.lock());
+        retrySQLite<void>([&]() {
+            auto state(_state.lock());
 
-        // FIXME: race
+            // FIXME: race
 
-        state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
-        assert(sqlite3_changes(state->db) == 1);
-        state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+            state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
+            assert(sqlite3_changes(state->db) == 1);
+            state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+        });
     }
 
     bool cacheExists(const std::string & uri,
         bool & wantMassQuery, int & priority) override
     {
-        auto state(_state.lock());
+        return retrySQLite<bool>([&]() {
+            auto state(_state.lock());
 
-        auto i = state->caches.find(uri);
-        if (i == state->caches.end()) {
-            auto queryCache(state->queryCache.use()(uri));
-            if (!queryCache.next()) return false;
-            state->caches.emplace(uri,
-                Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
-        }
+            auto i = state->caches.find(uri);
+            if (i == state->caches.end()) {
+                auto queryCache(state->queryCache.use()(uri));
+                if (!queryCache.next()) return false;
+                state->caches.emplace(uri,
+                    Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+            }
 
-        auto & cache(getCache(*state, uri));
+            auto & cache(getCache(*state, uri));
 
-        wantMassQuery = cache.wantMassQuery;
-        priority = cache.priority;
+            wantMassQuery = cache.wantMassQuery;
+            priority = cache.priority;
 
-        return true;
+            return true;
+        });
     }
 
     std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
         const std::string & uri, const std::string & hashPart) override
     {
-        auto state(_state.lock());
+        return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
+            [&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
+            auto state(_state.lock());
+
+            auto & cache(getCache(*state, uri));
 
-        auto & cache(getCache(*state, uri));
-
-        auto now = time(0);
-
-        auto queryNAR(state->queryNAR.use()
-            (cache.id)
-            (hashPart)
-            (now - ttlNegative)
-            (now - ttlPositive));
-
-        if (!queryNAR.next())
-            return {oUnknown, 0};
-
-        if (!queryNAR.getInt(13))
-            return {oInvalid, 0};
-
-        auto narInfo = make_ref<NarInfo>();
-
-        auto namePart = queryNAR.getStr(2);
-        narInfo->path = cache.storeDir + "/" +
-            hashPart + (namePart.empty() ? "" : "-" + namePart);
-        narInfo->url = queryNAR.getStr(3);
-        narInfo->compression = queryNAR.getStr(4);
-        if (!queryNAR.isNull(5))
-            narInfo->fileHash = parseHash(queryNAR.getStr(5));
-        narInfo->fileSize = queryNAR.getInt(6);
-        narInfo->narHash = parseHash(queryNAR.getStr(7));
-        narInfo->narSize = queryNAR.getInt(8);
-        for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
-            narInfo->references.insert(cache.storeDir + "/" + r);
-        if (!queryNAR.isNull(10))
-            narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10);
-        for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
-            narInfo->sigs.insert(sig);
-
-        return {oValid, narInfo};
+            auto now = time(0);
+
+            auto queryNAR(state->queryNAR.use()
+                (cache.id)
+                (hashPart)
+                (now - ttlNegative)
+                (now - ttlPositive));
+
+            if (!queryNAR.next())
+                return {oUnknown, 0};
+
+            if (!queryNAR.getInt(13))
+                return {oInvalid, 0};
+
+            auto narInfo = make_ref<NarInfo>();
+
+            auto namePart = queryNAR.getStr(2);
+            narInfo->path = cache.storeDir + "/" +
+                hashPart + (namePart.empty() ? "" : "-" + namePart);
+            narInfo->url = queryNAR.getStr(3);
+            narInfo->compression = queryNAR.getStr(4);
+            if (!queryNAR.isNull(5))
+                narInfo->fileHash = parseHash(queryNAR.getStr(5));
+            narInfo->fileSize = queryNAR.getInt(6);
+            narInfo->narHash = parseHash(queryNAR.getStr(7));
+            narInfo->narSize = queryNAR.getInt(8);
+            for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
+                narInfo->references.insert(cache.storeDir + "/" + r);
+            if (!queryNAR.isNull(10))
+                narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10);
+            for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
+                narInfo->sigs.insert(sig);
+
+            return {oValid, narInfo};
+        });
     }
 
     void upsertNarInfo(
         const std::string & uri, const std::string & hashPart,
         std::shared_ptr<ValidPathInfo> info) override
     {
-        auto state(_state.lock());
-
-        auto & cache(getCache(*state, uri));
-
-        if (info) {
-
-            auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
-
-            assert(hashPart == storePathToHash(info->path));
-
-            state->insertNAR.use()
-                (cache.id)
-                (hashPart)
-                (storePathToName(info->path))
-                (narInfo ? narInfo->url : "", narInfo != 0)
-                (narInfo ? narInfo->compression : "", narInfo != 0)
-                (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
-                (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
-                (info->narHash.to_string())
-                (info->narSize)
-                (concatStringsSep(" ", info->shortRefs()))
-                (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
-                (concatStringsSep(" ", info->sigs))
-                (time(0)).exec();
-
-        } else {
-            state->insertMissingNAR.use()
-                (cache.id)
-                (hashPart)
-                (time(0)).exec();
-        }
+        retrySQLite<void>([&]() {
+            auto state(_state.lock());
+
+            auto & cache(getCache(*state, uri));
+
+            if (info) {
+
+                auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
+
+                assert(hashPart == storePathToHash(info->path));
+
+                state->insertNAR.use()
+                    (cache.id)
+                    (hashPart)
+                    (storePathToName(info->path))
+                    (narInfo ? narInfo->url : "", narInfo != 0)
+                    (narInfo ? narInfo->compression : "", narInfo != 0)
+                    (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
+                    (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
+                    (info->narHash.to_string())
+                    (info->narSize)
+                    (concatStringsSep(" ", info->shortRefs()))
+                    (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
+                    (concatStringsSep(" ", info->sigs))
+                    (time(0)).exec();
+
+            } else {
+                state->insertMissingNAR.use()
+                    (cache.id)
+                    (hashPart)
+                    (time(0)).exec();
+            }
+        });
     }
 };
 
diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc
index 201cac671a55..d1042c6de25e 100644
--- a/src/libstore/nar-info.cc
+++ b/src/libstore/nar-info.cc
@@ -59,9 +59,11 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
             }
         }
         else if (name == "Deriver") {
-            auto p = store.storeDir + "/" + value;
-            if (!store.isStorePath(p)) corrupt();
-            deriver = p;
+            if (value != "unknown-deriver") {
+                auto p = store.storeDir + "/" + value;
+                if (!store.isStorePath(p)) corrupt();
+                deriver = p;
+            }
         }
         else if (name == "System")
             system = value;
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index b71c7e905ff1..cf234e35d373 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "util.hh"
 #include "local-store.hh"
 #include "globals.hh"
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 42c09ec7e0b6..a1f2db5b0ec8 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -40,21 +40,34 @@ template PathSet readStorePaths(Store & store, Source & from);
 template Paths readStorePaths(Store & store, Source & from);
 
 /* TODO: Separate these store impls into different files, give them better names */
-RemoteStore::RemoteStore(const Params & params, size_t maxConnections)
+RemoteStore::RemoteStore(const Params & params)
     : Store(params)
     , connections(make_ref<Pool<Connection>>(
-            maxConnections,
-            [this]() { return openConnection(); },
+            std::max(1, std::stoi(get(params, "max-connections", "1"))),
+            [this]() { return openConnectionWrapper(); },
             [](const ref<Connection> & r) { return r->to.good() && r->from.good(); }
             ))
 {
 }
 
 
-UDSRemoteStore::UDSRemoteStore(const Params & params, size_t maxConnections)
+ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
+{
+    if (failed)
+        throw Error("opening a connection to remote store ‘%s’ previously failed", getUri());
+    try {
+        return openConnection();
+    } catch (...) {
+        failed = true;
+        throw;
+    }
+}
+
+
+UDSRemoteStore::UDSRemoteStore(const Params & params)
     : Store(params)
     , LocalFSStore(params)
-    , RemoteStore(params, maxConnections)
+    , RemoteStore(params)
 {
 }
 
@@ -108,7 +121,7 @@ void RemoteStore::initConnection(Connection & conn)
         unsigned int magic = readInt(conn.from);
         if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
 
-        conn.daemonVersion = readInt(conn.from);
+        conn.from >> conn.daemonVersion;
         if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
             throw Error("Nix daemon protocol version not supported");
         if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
@@ -129,7 +142,7 @@ void RemoteStore::initConnection(Connection & conn)
         conn.processStderr();
     }
     catch (Error & e) {
-        throw Error(format("cannot start daemon worker: %1%") % e.msg());
+        throw Error("cannot open connection to remote store ‘%s’: %s", getUri(), e.what());
     }
 
     setOptions(conn);
@@ -170,12 +183,11 @@ bool RemoteStore::isValidPathUncached(const Path & path)
     auto conn(connections->get());
     conn->to << wopIsValidPath << path;
     conn->processStderr();
-    unsigned int reply = readInt(conn->from);
-    return reply != 0;
+    return readInt(conn->from);
 }
 
 
-PathSet RemoteStore::queryValidPaths(const PathSet & paths)
+PathSet RemoteStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
 {
     auto conn(connections->get());
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
@@ -246,8 +258,8 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
 
         conn->to << wopQuerySubstitutablePathInfos << paths;
         conn->processStderr();
-        unsigned int count = readInt(conn->from);
-        for (unsigned int n = 0; n < count; n++) {
+        size_t count = readNum<size_t>(conn->from);
+        for (size_t n = 0; n < count; n++) {
             Path path = readStorePath(*this, conn->from);
             SubstitutablePathInfo & info(infos[path]);
             info.deriver = readString(conn->from);
@@ -277,7 +289,7 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
             throw;
         }
         if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
-            bool valid = readInt(conn->from) != 0;
+            bool valid; conn->from >> valid;
             if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path);
         }
         auto info = std::make_shared<ValidPathInfo>();
@@ -286,12 +298,11 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
         if (info->deriver != "") assertStorePath(info->deriver);
         info->narHash = parseHash(htSHA256, readString(conn->from));
         info->references = readStorePaths<PathSet>(*this, conn->from);
-        info->registrationTime = readInt(conn->from);
-        info->narSize = readLongLong(conn->from);
+        conn->from >> info->registrationTime >> info->narSize;
         if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
-            info->ultimate = readInt(conn->from) != 0;
+            conn->from >> info->ultimate;
             info->sigs = readStrings<StringSet>(conn->from);
-            info->ca = readString(conn->from);
+            conn->from >> info->ca;
         }
         return info;
     });
@@ -380,8 +391,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string>
         conn->to << wopAddToStoreNar
                  << info.path << info.deriver << printHash(info.narHash)
                  << info.references << info.registrationTime << info.narSize
-                 << info.ultimate << info.sigs << *nar << repair << dontCheckSigs;
-        // FIXME: don't send nar as a string
+                 << info.ultimate << info.sigs << info.ca
+                 << repair << dontCheckSigs;
+        conn->to(*nar);
         conn->processStderr();
     }
 }
@@ -515,7 +527,7 @@ Roots RemoteStore::findRoots()
     auto conn(connections->get());
     conn->to << wopFindRoots;
     conn->processStderr();
-    unsigned int count = readInt(conn->from);
+    size_t count = readNum<size_t>(conn->from);
     Roots result;
     while (count--) {
         Path link = readString(conn->from);
@@ -563,7 +575,7 @@ bool RemoteStore::verifyStore(bool checkContents, bool repair)
     auto conn(connections->get());
     conn->to << wopVerifyStore << checkContents << repair;
     conn->processStderr();
-    return readInt(conn->from) != 0;
+    return readInt(conn->from);
 }
 
 
@@ -599,7 +611,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
         }
         else if (msg == STDERR_READ) {
             if (!source) throw Error("no source");
-            size_t len = readInt(from);
+            size_t len = readNum<size_t>(from);
             auto buf = std::make_unique<unsigned char[]>(len);
             writeString(buf.get(), source->read(buf.get(), len), to);
             to.flush();
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 40f17da300d0..a08bd305639d 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -22,13 +22,13 @@ class RemoteStore : public virtual Store
 {
 public:
 
-    RemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+    RemoteStore(const Params & params);
 
     /* Implementations of abstract store API methods. */
 
     bool isValidPathUncached(const Path & path) override;
 
-    PathSet queryValidPaths(const PathSet & paths) override;
+    PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
 
     PathSet queryAllValidPaths() override;
 
@@ -98,6 +98,8 @@ protected:
         void processStderr(Sink * sink = 0, Source * source = 0);
     };
 
+    ref<Connection> openConnectionWrapper();
+
     virtual ref<Connection> openConnection() = 0;
 
     void initConnection(Connection & conn);
@@ -106,6 +108,8 @@ protected:
 
 private:
 
+    std::atomic_bool failed{false};
+
     void setOptions(Connection & conn);
 };
 
@@ -113,7 +117,7 @@ class UDSRemoteStore : public LocalFSStore, public RemoteStore
 {
 public:
 
-    UDSRemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+    UDSRemoteStore(const Params & params);
 
     std::string getUri() override;
 
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index ccb71f1eefe5..3053f908c4e2 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -1,15 +1,17 @@
-#include "config.h"
-
 #if ENABLE_S3
-#if __linux__
 
+#include "s3.hh"
 #include "s3-binary-cache-store.hh"
 #include "nar-info.hh"
 #include "nar-info-disk-cache.hh"
 #include "globals.hh"
+#include "compression.hh"
+#include "download.hh"
+#include "istringstream_nocopy.hh"
 
 #include <aws/core/Aws.h>
 #include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/client/DefaultRetryStrategy.h>
 #include <aws/s3/S3Client.h>
 #include <aws/s3/model/CreateBucketRequest.h>
 #include <aws/s3/model/GetBucketLocationRequest.h>
@@ -20,15 +22,6 @@
 
 namespace nix {
 
-struct istringstream_nocopy : public std::stringstream
-{
-    istringstream_nocopy(const std::string & s)
-    {
-        rdbuf()->pubsetbuf(
-            (char *) s.data(), s.size());
-    }
-};
-
 struct S3Error : public Error
 {
     Aws::S3::S3Errors err;
@@ -62,21 +55,92 @@ static void initAWS()
     });
 }
 
+S3Helper::S3Helper(const string & region)
+    : config(makeConfig(region))
+    , client(make_ref<Aws::S3::S3Client>(*config))
+{
+}
+
+/* Log AWS retries. */
+class RetryStrategy : public Aws::Client::DefaultRetryStrategy
+{
+    long CalculateDelayBeforeNextRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
+    {
+        auto res = Aws::Client::DefaultRetryStrategy::CalculateDelayBeforeNextRetry(error, attemptedRetries);
+        printError("AWS error '%s' (%s), will retry in %d ms",
+            error.GetExceptionName(), error.GetMessage(), res);
+        return res;
+    }
+};
+
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
+{
+    initAWS();
+    auto res = make_ref<Aws::Client::ClientConfiguration>();
+    res->region = region;
+    res->requestTimeoutMs = 600 * 1000;
+    res->retryStrategy = std::make_shared<RetryStrategy>();
+    res->caFile = settings.caFile;
+    return res;
+}
+
+S3Helper::DownloadResult S3Helper::getObject(
+    const std::string & bucketName, const std::string & key)
+{
+    debug("fetching ‘s3://%s/%s’...", bucketName, key);
+
+    auto request =
+        Aws::S3::Model::GetObjectRequest()
+        .WithBucket(bucketName)
+        .WithKey(key);
+
+    request.SetResponseStreamFactory([&]() {
+        return Aws::New<std::stringstream>("STRINGSTREAM");
+    });
+
+    DownloadResult res;
+
+    auto now1 = std::chrono::steady_clock::now();
+
+    try {
+
+        auto result = checkAws(fmt("AWS error fetching ‘%s’", key),
+            client->GetObject(request));
+
+        res.data = decodeContent(
+            result.GetContentEncoding(),
+            make_ref<std::string>(
+                dynamic_cast<std::stringstream &>(result.GetBody()).str()));
+
+    } catch (S3Error & e) {
+        if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
+    }
+
+    auto now2 = std::chrono::steady_clock::now();
+
+    res.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+
+    return res;
+}
+
 struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
 {
     std::string bucketName;
 
-    ref<Aws::Client::ClientConfiguration> config;
-    ref<Aws::S3::S3Client> client;
-
     Stats stats;
 
+    S3Helper s3Helper;
+
+    std::string narinfoCompression, lsCompression, logCompression;
+
     S3BinaryCacheStoreImpl(
         const Params & params, const std::string & bucketName)
         : S3BinaryCacheStore(params)
         , bucketName(bucketName)
-        , config(makeConfig())
-        , client(make_ref<Aws::S3::S3Client>(*config))
+        , s3Helper(get(params, "aws-region", Aws::Region::US_EAST_1))
+        , narinfoCompression(get(params, "narinfo-compression", ""))
+        , lsCompression(get(params, "ls-compression", ""))
+        , logCompression(get(params, "log-compression", ""))
     {
         diskCache = getNarInfoDiskCache();
     }
@@ -86,15 +150,6 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         return "s3://" + bucketName;
     }
 
-    ref<Aws::Client::ClientConfiguration> makeConfig()
-    {
-        initAWS();
-        auto res = make_ref<Aws::Client::ClientConfiguration>();
-        res->region = Aws::Region::US_EAST_1; // FIXME: make configurable
-        res->requestTimeoutMs = 600 * 1000;
-        return res;
-    }
-
     void init() override
     {
         if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
@@ -102,7 +157,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
             /* Create the bucket if it doesn't already exists. */
             // FIXME: HeadBucket would be more appropriate, but doesn't return
             // an easily parsed 404 message.
-            auto res = client->GetBucketLocation(
+            auto res = s3Helper.client->GetBucketLocation(
                 Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
 
             if (!res.IsSuccess()) {
@@ -110,7 +165,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
                     throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage());
 
                 checkAws(format("AWS error creating bucket ‘%s’") % bucketName,
-                    client->CreateBucket(
+                    s3Helper.client->CreateBucket(
                         Aws::S3::Model::CreateBucketRequest()
                         .WithBucket(bucketName)
                         .WithCreateBucketConfiguration(
@@ -148,7 +203,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
     {
         stats.head++;
 
-        auto res = client->HeadObject(
+        auto res = s3Helper.client->HeadObject(
             Aws::S3::Model::HeadObjectRequest()
             .WithBucket(bucketName)
             .WithKey(path));
@@ -164,13 +219,20 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         return true;
     }
 
-    void upsertFile(const std::string & path, const std::string & data) override
+    void uploadFile(const std::string & path, const std::string & data,
+        const std::string & mimeType,
+        const std::string & contentEncoding)
     {
         auto request =
             Aws::S3::Model::PutObjectRequest()
             .WithBucket(bucketName)
             .WithKey(path);
 
+        request.SetContentType(mimeType);
+
+        if (contentEncoding != "")
+            request.SetContentEncoding(contentEncoding);
+
         auto stream = std::make_shared<istringstream_nocopy>(data);
 
         request.SetBody(stream);
@@ -181,7 +243,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         auto now1 = std::chrono::steady_clock::now();
 
         auto result = checkAws(format("AWS error uploading ‘%s’") % path,
-            client->PutObject(request));
+            s3Helper.client->PutObject(request));
 
         auto now2 = std::chrono::steady_clock::now();
 
@@ -193,6 +255,19 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         stats.putTimeMs += duration;
     }
 
+    void upsertFile(const std::string & path, const std::string & data,
+        const std::string & mimeType) override
+    {
+        if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
+            uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
+        else if (lsCompression != "" && hasSuffix(path, ".ls"))
+            uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
+        else if (logCompression != "" && hasPrefix(path, "log/"))
+            uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
+        else
+            uploadFile(path, data, mimeType, "");
+    }
+
     void getFile(const std::string & path,
         std::function<void(std::shared_ptr<std::string>)> success,
         std::function<void(std::exception_ptr exc)> failure) override
@@ -200,42 +275,18 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
             debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
 
-            auto request =
-                Aws::S3::Model::GetObjectRequest()
-                .WithBucket(bucketName)
-                .WithKey(path);
-
-            request.SetResponseStreamFactory([&]() {
-                return Aws::New<std::stringstream>("STRINGSTREAM");
-            });
-
             stats.get++;
 
-            try {
-
-                auto now1 = std::chrono::steady_clock::now();
+            auto res = s3Helper.getObject(bucketName, path);
 
-                auto result = checkAws(format("AWS error fetching ‘%s’") % path,
-                    client->GetObject(request));
+            stats.getBytes += res.data ? res.data->size() : 0;
+            stats.getTimeMs += res.durationMs;
 
-                auto now2 = std::chrono::steady_clock::now();
+            if (res.data)
+                printTalkative("downloaded ‘s3://%s/%s’ (%d bytes) in %d ms",
+                    bucketName, path, res.data->size(), res.durationMs);
 
-                auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str();
-
-                auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
-
-                printMsg(lvlTalkative, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
-                    % bucketName % path % res.size() % duration);
-
-                stats.getBytes += res.size();
-                stats.getTimeMs += duration;
-
-                return std::make_shared<std::string>(res);
-
-            } catch (S3Error & e) {
-                if (e.err == Aws::S3::S3Errors::NO_SUCH_KEY) return std::shared_ptr<std::string>();
-                throw;
-            }
+            return res.data;
         });
     }
 
@@ -248,7 +299,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
             debug(format("listing bucket ‘s3://%s’ from key ‘%s’...") % bucketName % marker);
 
             auto res = checkAws(format("AWS error listing bucket ‘%s’") % bucketName,
-                client->ListObjects(
+                s3Helper.client->ListObjects(
                     Aws::S3::Model::ListObjectsRequest()
                     .WithBucket(bucketName)
                     .WithDelimiter("/")
@@ -286,4 +337,3 @@ static RegisterStoreImplementation regStore([](
 }
 
 #endif
-#endif
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
new file mode 100644
index 000000000000..08a7fbf96e98
--- /dev/null
+++ b/src/libstore/s3.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#if ENABLE_S3
+
+#include "ref.hh"
+
+namespace Aws { namespace Client { class ClientConfiguration; } }
+namespace Aws { namespace S3 { class S3Client; } }
+
+namespace nix {
+
+struct S3Helper
+{
+    ref<Aws::Client::ClientConfiguration> config;
+    ref<Aws::S3::S3Client> client;
+
+    S3Helper(const std::string & region);
+
+    ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
+
+    struct DownloadResult
+    {
+        std::shared_ptr<std::string> data;
+        unsigned int durationMs;
+    };
+
+    DownloadResult getObject(
+        const std::string & bucketName, const std::string & key);
+};
+
+}
+
+#endif
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 0197b091cd12..a81e62dbd6eb 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -3,36 +3,25 @@
 
 #include <sqlite3.h>
 
+#include <atomic>
+
 namespace nix {
 
 [[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f)
 {
     int err = sqlite3_errcode(db);
+
+    auto path = sqlite3_db_filename(db, nullptr);
+    if (!path) path = "(in-memory)";
+
     if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
-        if (err == SQLITE_PROTOCOL)
-            printError("warning: SQLite database is busy (SQLITE_PROTOCOL)");
-        else {
-            static bool warned = false;
-            if (!warned) {
-                printError("warning: SQLite database is busy");
-                warned = true;
-            }
-        }
-        /* Sleep for a while since retrying the transaction right away
-           is likely to fail again. */
-        checkInterrupt();
-#if HAVE_NANOSLEEP
-        struct timespec t;
-        t.tv_sec = 0;
-        t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
-        nanosleep(&t, 0);
-#else
-        sleep(1);
-#endif
-        throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
+        throw SQLiteBusy(
+            err == SQLITE_PROTOCOL
+            ? fmt("SQLite database ‘%s’ is busy (SQLITE_PROTOCOL)", path)
+            : fmt("SQLite database ‘%s’ is busy", path));
     }
     else
-        throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
+        throw SQLiteError("%s: %s (in ‘%s’)", f.str(), sqlite3_errstr(err), path);
 }
 
 SQLite::SQLite(const Path & path)
@@ -54,24 +43,27 @@ SQLite::~SQLite()
 
 void SQLite::exec(const std::string & stmt)
 {
-    if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
-        throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+    retrySQLite<void>([&]() {
+        if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
+            throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+    });
 }
 
-void SQLiteStmt::create(sqlite3 * db, const string & s)
+void SQLiteStmt::create(sqlite3 * db, const string & sql)
 {
     checkInterrupt();
     assert(!stmt);
-    if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK)
-        throwSQLiteError(db, "creating statement");
+    if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
+        throwSQLiteError(db, fmt("creating statement ‘%s’", sql));
     this->db = db;
+    this->sql = sql;
 }
 
 SQLiteStmt::~SQLiteStmt()
 {
     try {
         if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
-            throwSQLiteError(db, "finalizing statement");
+            throwSQLiteError(db, fmt("finalizing statement ‘%s’", sql));
     } catch (...) {
         ignoreException();
     }
@@ -128,14 +120,14 @@ void SQLiteStmt::Use::exec()
     int r = step();
     assert(r != SQLITE_ROW);
     if (r != SQLITE_DONE)
-        throwSQLiteError(stmt.db, "executing SQLite statement");
+        throwSQLiteError(stmt.db, fmt("executing SQLite statement ‘%s’", stmt.sql));
 }
 
 bool SQLiteStmt::Use::next()
 {
     int r = step();
     if (r != SQLITE_DONE && r != SQLITE_ROW)
-        throwSQLiteError(stmt.db, "executing SQLite query");
+        throwSQLiteError(stmt.db, fmt("executing SQLite query ‘%s’", stmt.sql));
     return r == SQLITE_ROW;
 }
 
@@ -182,4 +174,24 @@ SQLiteTxn::~SQLiteTxn()
     }
 }
 
+void handleSQLiteBusy(const SQLiteBusy & e)
+{
+    static std::atomic<time_t> lastWarned{0};
+
+    time_t now = time(0);
+
+    if (now > lastWarned + 10) {
+        lastWarned = now;
+        printError("warning: %s", e.what());
+    }
+
+    /* Sleep for a while since retrying the transaction right away
+       is likely to fail again. */
+    checkInterrupt();
+    struct timespec t;
+    t.tv_sec = 0;
+    t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
+    nanosleep(&t, 0);
+}
+
 }
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
index 4d347a2e56ab..14a7a0dd8996 100644
--- a/src/libstore/sqlite.hh
+++ b/src/libstore/sqlite.hh
@@ -30,8 +30,9 @@ struct SQLiteStmt
 {
     sqlite3 * db = 0;
     sqlite3_stmt * stmt = 0;
+    std::string sql;
     SQLiteStmt() { }
-    SQLiteStmt(sqlite3 * db, const std::string & s) { create(db, s); }
+    SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); }
     void create(sqlite3 * db, const std::string & s);
     ~SQLiteStmt();
     operator sqlite3_stmt * () { return stmt; }
@@ -94,6 +95,8 @@ MakeError(SQLiteBusy, SQLiteError);
 
 [[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f);
 
+void handleSQLiteBusy(const SQLiteBusy & e);
+
 /* Convenience function for retrying a SQLite transaction when the
    database is busy. */
 template<typename T>
@@ -103,6 +106,7 @@ T retrySQLite(std::function<T()> fun)
         try {
             return fun();
         } catch (SQLiteBusy & e) {
+            handleSQLiteBusy(e);
         }
     }
 }
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index 6f1862afa899..2a81a8b1ebe5 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -4,18 +4,33 @@
 #include "archive.hh"
 #include "worker-protocol.hh"
 #include "pool.hh"
+#include "ssh.hh"
 
 namespace nix {
 
-static std::string uriScheme = "ssh://";
+static std::string uriScheme = "ssh-ng://";
 
 class SSHStore : public RemoteStore
 {
 public:
 
-    SSHStore(string host, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+    SSHStore(const std::string & host, const Params & params)
+        : Store(params)
+        , RemoteStore(params)
+        , host(host)
+        , master(
+            host,
+            get(params, "ssh-key", ""),
+            // Use SSH master only if using more than 1 connection.
+            connections->capacity() > 1,
+            get(params, "compress", "") == "true")
+    {
+    }
 
-    std::string getUri() override;
+    std::string getUri() override
+    {
+        return uriScheme + host;
+    }
 
     void narFromPath(const Path & path, Sink & sink) override;
 
@@ -25,43 +40,16 @@ private:
 
     struct Connection : RemoteStore::Connection
     {
-        Pid sshPid;
-        AutoCloseFD out;
-        AutoCloseFD in;
+        std::unique_ptr<SSHMaster::Connection> sshConn;
     };
 
     ref<RemoteStore::Connection> openConnection() override;
 
-    AutoDelete tmpDir;
-
-    Path socketPath;
-
-    Pid sshMaster;
-
-    string host;
-
-    Path key;
+    std::string host;
 
-    bool compress;
+    SSHMaster master;
 };
 
-SSHStore::SSHStore(string host, const Params & params, size_t maxConnections)
-    : Store(params)
-    , RemoteStore(params, maxConnections)
-    , tmpDir(createTempDir("", "nix", true, true, 0700))
-    , socketPath((Path) tmpDir + "/ssh.sock")
-    , host(std::move(host))
-    , key(get(params, "ssh-key", ""))
-    , compress(get(params, "compress", "") == "true")
-{
-    /* open a connection and perform the handshake to verify all is well */
-    connections->get();
-}
-
-string SSHStore::getUri()
-{
-    return uriScheme + host;
-}
 
 class ForwardSource : public Source
 {
@@ -94,35 +82,10 @@ ref<FSAccessor> SSHStore::getFSAccessor()
 
 ref<RemoteStore::Connection> SSHStore::openConnection()
 {
-    if ((pid_t) sshMaster == -1) {
-        sshMaster = startProcess([&]() {
-            restoreSignals();
-            if (key.empty())
-                execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), host.c_str(), NULL);
-            else
-                execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), host.c_str(), NULL);
-            throw SysError("starting ssh master");
-        });
-    }
-
     auto conn = make_ref<Connection>();
-    Pipe in, out;
-    in.create();
-    out.create();
-    conn->sshPid = startProcess([&]() {
-        if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
-            throw SysError("duping over STDIN");
-        if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
-            throw SysError("duping over STDOUT");
-        execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-daemon", "--stdio", NULL);
-        throw SysError("executing nix-daemon --stdio over ssh");
-    });
-    in.readSide = -1;
-    out.writeSide = -1;
-    conn->out = std::move(out.readSide);
-    conn->in = std::move(in.writeSide);
-    conn->to = FdSink(conn->in.get());
-    conn->from = FdSource(conn->out.get());
+    conn->sshConn = master.startCommand("nix-daemon --stdio");
+    conn->to = FdSink(conn->sshConn->in.get());
+    conn->from = FdSource(conn->sshConn->out.get());
     initConnection(*conn);
     return conn;
 }
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
new file mode 100644
index 000000000000..e54f3f4ba284
--- /dev/null
+++ b/src/libstore/ssh.cc
@@ -0,0 +1,102 @@
+#include "ssh.hh"
+
+namespace nix {
+
+void SSHMaster::addCommonSSHOpts(Strings & args)
+{
+    for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
+        args.push_back(i);
+    if (!keyFile.empty())
+        args.insert(args.end(), {"-i", keyFile});
+    if (compress)
+        args.push_back("-C");
+}
+
+std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
+{
+    Path socketPath = startMaster();
+
+    Pipe in, out;
+    in.create();
+    out.create();
+
+    auto conn = std::make_unique<Connection>();
+    conn->sshPid = startProcess([&]() {
+        restoreSignals();
+
+        close(in.writeSide.get());
+        close(out.readSide.get());
+
+        if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
+            throw SysError("duping over stdin");
+        if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+            throw SysError("duping over stdout");
+
+        Strings args = { "ssh", host.c_str(), "-x", "-a" };
+        addCommonSSHOpts(args);
+        if (socketPath != "")
+            args.insert(args.end(), {"-S", socketPath});
+        args.push_back(command);
+        execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+        throw SysError("executing ‘%s’ on ‘%s’", command, host);
+    });
+
+
+    in.readSide = -1;
+    out.writeSide = -1;
+
+    conn->out = std::move(out.readSide);
+    conn->in = std::move(in.writeSide);
+
+    return conn;
+}
+
+Path SSHMaster::startMaster()
+{
+    if (!useMaster) return "";
+
+    auto state(state_.lock());
+
+    if (state->sshMaster != -1) return state->socketPath;
+
+    state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
+
+    state->socketPath = (Path) *state->tmpDir + "/ssh.sock";
+
+    Pipe out;
+    out.create();
+
+    state->sshMaster = startProcess([&]() {
+        restoreSignals();
+
+        close(out.readSide.get());
+
+        if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+            throw SysError("duping over stdout");
+
+        Strings args =
+            { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath
+            , "-o", "LocalCommand=echo started"
+            , "-o", "PermitLocalCommand=yes"
+            };
+        addCommonSSHOpts(args);
+        execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+        throw SysError("starting SSH master");
+    });
+
+    out.writeSide = -1;
+
+    std::string reply;
+    try {
+        reply = readLine(out.readSide.get());
+    } catch (EndOfFile & e) { }
+
+    if (reply != "started")
+        throw Error("failed to start SSH master connection to ‘%s’", host);
+
+    return state->socketPath;
+}
+
+}
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
new file mode 100644
index 000000000000..b4396467e54e
--- /dev/null
+++ b/src/libstore/ssh.hh
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "util.hh"
+#include "sync.hh"
+
+namespace nix {
+
+class SSHMaster
+{
+private:
+
+    const std::string host;
+    const std::string keyFile;
+    const bool useMaster;
+    const bool compress;
+
+    struct State
+    {
+        Pid sshMaster;
+        std::unique_ptr<AutoDelete> tmpDir;
+        Path socketPath;
+    };
+
+    Sync<State> state_;
+
+    void addCommonSSHOpts(Strings & args);
+
+public:
+
+    SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress)
+        : host(host)
+        , keyFile(keyFile)
+        , useMaster(useMaster)
+        , compress(compress)
+    {
+    }
+
+    struct Connection
+    {
+        Pid sshPid;
+        AutoCloseFD out, in;
+    };
+
+    std::unique_ptr<Connection> startCommand(const std::string & command);
+
+    Path startMaster();
+};
+
+}
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index b5934a0d1232..441166d04d8f 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -377,7 +377,7 @@ void Store::queryPathInfo(const Path & storePath,
 }
 
 
-PathSet Store::queryValidPaths(const PathSet & paths)
+PathSet Store::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
 {
     struct State
     {
@@ -550,6 +550,8 @@ void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
     for (auto & path : storePaths)
         srcStore->computeFSClosure(path, closure);
 
+    // FIXME: use copyStorePaths()
+
     PathSet valid = dstStore->queryValidPaths(closure);
 
     if (valid.size() == closure.size()) return;
@@ -676,6 +678,12 @@ Strings ValidPathInfo::shortRefs() const
 }
 
 
+std::string makeFixedOutputCA(bool recursive, const Hash & hash)
+{
+    return "fixed:" + (recursive ? (std::string) "r:" : "") + hash.to_string();
+}
+
+
 }
 
 
@@ -702,7 +710,11 @@ ref<Store> openStore(const std::string & uri_)
         }
         uri = uri_.substr(0, q);
     }
+    return openStore(uri, params);
+}
 
+ref<Store> openStore(const std::string & uri, const Store::Params & params)
+{
     for (auto fun : *RegisterStoreImplementation::implementations) {
         auto store = fun(uri, params);
         if (store) return ref<Store>(store);
@@ -766,10 +778,11 @@ std::list<ref<Store>> getDefaultSubstituters()
         state->stores.push_back(openStore(uri));
     };
 
-    for (auto uri : settings.get("substituters", Strings()))
-        addStore(uri);
+    Strings defaultSubstituters;
+    if (settings.nixStore == "/nix/store")
+        defaultSubstituters.push_back("https://cache.nixos.org/");
 
-    for (auto uri : settings.get("binary-caches", Strings()))
+    for (auto uri : settings.get("substituters", settings.get("binary-caches", defaultSubstituters)))
         addStore(uri);
 
     for (auto uri : settings.get("extra-binary-caches", Strings()))
@@ -781,37 +794,25 @@ std::list<ref<Store>> getDefaultSubstituters()
 }
 
 
-void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute)
-{
-    if (substitute) {
-        /* Filter out .drv files (we don't want to build anything). */
-        PathSet paths2;
-        for (auto & path : storePaths)
-            if (!isDerivation(path)) paths2.insert(path);
-        unsigned long long downloadSize, narSize;
-        PathSet willBuild, willSubstitute, unknown;
-        to->queryMissing(PathSet(paths2.begin(), paths2.end()),
-            willBuild, willSubstitute, unknown, downloadSize, narSize);
-        /* FIXME: should use ensurePath(), but it only
-           does one path at a time. */
-        if (!willSubstitute.empty())
-            try {
-                to->buildPaths(willSubstitute);
-            } catch (Error & e) {
-                printMsg(lvlError, format("warning: %1%") % e.msg());
-            }
-    }
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute)
+{
+    PathSet valid = to->queryValidPaths(storePaths, substitute);
+
+    PathSet missing;
+    for (auto & path : storePaths)
+        if (!valid.count(path)) missing.insert(path);
 
     std::string copiedLabel = "copied";
 
-    logger->setExpected(copiedLabel, storePaths.size());
+    logger->setExpected(copiedLabel, missing.size());
 
     ThreadPool pool;
 
     processGraph<Path>(pool,
-        PathSet(storePaths.begin(), storePaths.end()),
+        PathSet(missing.begin(), missing.end()),
 
         [&](const Path & storePath) {
+            if (to->isValidPath(storePath)) return PathSet();
             return from->queryPathInfo(storePath)->references;
         },
 
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index d03e70849f93..98f2803f8136 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -128,7 +128,7 @@ struct ValidPathInfo
        of an output path of a derivation were actually produced by
        that derivation. In the intensional model, we have to trust
        that a particular output path was produced by a derivation; the
-       path name then implies the contents.)
+       path then implies the contents.)
 
        Ideally, the content-addressability assertion would just be a
        Boolean, and the store path would be computed from
@@ -324,8 +324,10 @@ protected:
 
 public:
 
-    /* Query which of the given paths is valid. */
-    virtual PathSet queryValidPaths(const PathSet & paths);
+    /* Query which of the given paths is valid. Optionally, try to
+       substitute missing paths. */
+    virtual PathSet queryValidPaths(const PathSet & paths,
+        bool maybeSubstitute = false);
 
     /* Query the set of all valid paths. Note that for some store
        backends, the name part of store paths may be omitted
@@ -511,7 +513,7 @@ public:
        `storePath' is returned; that is, the closures under the
        `referrers' relation instead of the `references' relation is
        returned. */
-    void computeFSClosure(const PathSet & paths,
+    virtual void computeFSClosure(const PathSet & paths,
         PathSet & out, bool flipDirection = false,
         bool includeOutputs = false, bool includeDerivers = false);
 
@@ -566,6 +568,11 @@ public:
        if they lack a signature. */
     virtual bool isTrusted() { return false; }
 
+    /* Return the build log of the specified store path, if available,
+       or null otherwise. */
+    virtual std::shared_ptr<std::string> getBuildLog(const Path & path)
+    { return nullptr; }
+
 protected:
 
     Stats stats;
@@ -579,6 +586,7 @@ public:
     const Path rootDir;
     const Path stateDir;
     const Path logDir;
+    const static string drvsLogDir;
 
     LocalFSStore(const Params & params);
 
@@ -595,6 +603,8 @@ public:
     {
         return getRealStoreDir() + "/" + baseNameOf(storePath);
     }
+
+    std::shared_ptr<std::string> getBuildLog(const Path & path) override;
 };
 
 
@@ -642,8 +652,10 @@ void removeTempRoots();
    set to true *unless* you're going to collect garbage. */
 ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE"));
 
+ref<Store> openStore(const std::string & uri, const Store::Params & params);
+
 
-void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute = false);
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute = false);
 
 enum StoreType {
     tDaemon,
@@ -687,6 +699,11 @@ ValidPathInfo decodeValidPathInfo(std::istream & str,
     bool hashGiven = false);
 
 
+/* Compute the content-addressability assertion (ValidPathInfo::ca)
+   for paths created by makeFixedOutputPath() / addToStore(). */
+std::string makeFixedOutputCA(bool recursive, const Hash & hash);
+
+
 MakeError(SubstError, Error)
 MakeError(BuildError, Error) /* denotes a permanent build failure */
 MakeError(InvalidPath, Error)
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index fbba7f853f95..e0e6f5dfd73c 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include <cerrno>
 #include <algorithm>
 #include <vector>
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index d58b91df0461..607ebf8b28f9 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -70,6 +70,13 @@ struct ParseSink
     virtual void createSymlink(const Path & path, const string & target) { };
 };
 
+struct TeeSink : ParseSink
+{
+    TeeSource source;
+
+    TeeSink(Source & source) : source(source) { }
+};
+
 void parseDump(ParseSink & sink, Source & source);
 
 void restorePath(const Path & path, Source & source);
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index a3bbb5170d9f..b0b1d709fa44 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -18,7 +18,7 @@ static ref<std::string> decompressXZ(const std::string & in)
     lzma_ret ret = lzma_stream_decoder(
         &strm, UINT64_MAX, LZMA_CONCATENATED);
     if (ret != LZMA_OK)
-        throw Error("unable to initialise lzma decoder");
+        throw CompressionError("unable to initialise lzma decoder");
 
     Finally free([&]() { lzma_end(&strm); });
 
@@ -48,7 +48,7 @@ static ref<std::string> decompressXZ(const std::string & in)
             return res;
 
         if (ret != LZMA_OK)
-            throw Error("error while decompressing xz file");
+            throw CompressionError("error %d while decompressing xz file", ret);
     }
 }
 
@@ -59,7 +59,7 @@ static ref<std::string> decompressBzip2(const std::string & in)
 
     int ret = BZ2_bzDecompressInit(&strm, 0, 0);
     if (ret != BZ_OK)
-        throw Error("unable to initialise bzip2 decoder");
+        throw CompressionError("unable to initialise bzip2 decoder");
 
     Finally free([&]() { BZ2_bzDecompressEnd(&strm); });
 
@@ -85,10 +85,19 @@ static ref<std::string> decompressBzip2(const std::string & in)
             return res;
 
         if (ret != BZ_OK)
-            throw Error("error while decompressing bzip2 file");
+            throw CompressionError("error while decompressing bzip2 file");
+
+        if (strm.avail_in == 0)
+            throw CompressionError("bzip2 data ends prematurely");
     }
 }
 
+static ref<std::string> decompressBrotli(const std::string & in)
+{
+    // FIXME: use libbrotli
+    return make_ref<std::string>(runProgram(BRO, true, {"-d"}, {in}));
+}
+
 ref<std::string> compress(const std::string & method, const std::string & in)
 {
     StringSink ssink;
@@ -106,6 +115,8 @@ ref<std::string> decompress(const std::string & method, const std::string & in)
         return decompressXZ(in);
     else if (method == "bzip2")
         return decompressBzip2(in);
+    else if (method == "br")
+        return decompressBrotli(in);
     else
         throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method);
 }
@@ -130,7 +141,7 @@ struct XzSink : CompressionSink
         lzma_ret ret = lzma_easy_encoder(
             &strm, 6, LZMA_CHECK_CRC64);
         if (ret != LZMA_OK)
-            throw Error("unable to initialise lzma encoder");
+            throw CompressionError("unable to initialise lzma encoder");
         // FIXME: apply the x86 BCJ filter?
 
         strm.next_out = outbuf;
@@ -139,7 +150,6 @@ struct XzSink : CompressionSink
 
     ~XzSink()
     {
-        assert(finished);
         lzma_end(&strm);
     }
 
@@ -155,7 +165,7 @@ struct XzSink : CompressionSink
 
             lzma_ret ret = lzma_code(&strm, LZMA_FINISH);
             if (ret != LZMA_OK && ret != LZMA_STREAM_END)
-                throw Error("error while flushing xz file");
+                throw CompressionError("error while flushing xz file");
 
             if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
                 nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
@@ -179,7 +189,7 @@ struct XzSink : CompressionSink
 
             lzma_ret ret = lzma_code(&strm, LZMA_RUN);
             if (ret != LZMA_OK)
-                throw Error("error while compressing xz file");
+                throw CompressionError("error while compressing xz file");
 
             if (strm.avail_out == 0) {
                 nextSink(outbuf, sizeof(outbuf));
@@ -202,7 +212,7 @@ struct BzipSink : CompressionSink
         memset(&strm, 0, sizeof(strm));
         int ret = BZ2_bzCompressInit(&strm, 9, 0, 30);
         if (ret != BZ_OK)
-            throw Error("unable to initialise bzip2 encoder");
+            throw CompressionError("unable to initialise bzip2 encoder");
 
         strm.next_out = outbuf;
         strm.avail_out = sizeof(outbuf);
@@ -210,7 +220,6 @@ struct BzipSink : CompressionSink
 
     ~BzipSink()
     {
-        assert(finished);
         BZ2_bzCompressEnd(&strm);
     }
 
@@ -226,7 +235,7 @@ struct BzipSink : CompressionSink
 
             int ret = BZ2_bzCompress(&strm, BZ_FINISH);
             if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
-                throw Error("error while flushing bzip2 file");
+                throw CompressionError("error while flushing bzip2 file");
 
             if (strm.avail_out == 0 || ret == BZ_STREAM_END) {
                 nextSink((unsigned char *) outbuf, sizeof(outbuf) - strm.avail_out);
@@ -250,7 +259,7 @@ struct BzipSink : CompressionSink
 
             int ret = BZ2_bzCompress(&strm, BZ_RUN);
             if (ret != BZ_OK)
-                Error("error while compressing bzip2 file");
+                CompressionError("error while compressing bzip2 file");
 
             if (strm.avail_out == 0) {
                 nextSink((unsigned char *) outbuf, sizeof(outbuf));
@@ -261,6 +270,34 @@ struct BzipSink : CompressionSink
     }
 };
 
+struct BrotliSink : CompressionSink
+{
+    Sink & nextSink;
+    std::string data;
+
+    BrotliSink(Sink & nextSink) : nextSink(nextSink)
+    {
+    }
+
+    ~BrotliSink()
+    {
+    }
+
+    // FIXME: use libbrotli
+
+    void finish() override
+    {
+        flush();
+        nextSink(runProgram(BRO, true, {}, data));
+    }
+
+    void write(const unsigned char * data, size_t len) override
+    {
+        checkInterrupt();
+        this->data.append((const char *) data, len);
+    }
+};
+
 ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink)
 {
     if (method == "none")
@@ -269,6 +306,8 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next
         return make_ref<XzSink>(nextSink);
     else if (method == "bzip2")
         return make_ref<BzipSink>(nextSink);
+    else if (method == "br")
+        return make_ref<BrotliSink>(nextSink);
     else
         throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method);
 }
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
index eacf559d65e9..e3e6f5a99303 100644
--- a/src/libutil/compression.hh
+++ b/src/libutil/compression.hh
@@ -21,4 +21,6 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next
 
 MakeError(UnknownCompressionMethod, Error);
 
+MakeError(CompressionError, Error);
+
 }
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index aa50fceb9e3e..9f4afd93c2fc 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include <iostream>
 #include <cstring>
 
@@ -9,12 +7,12 @@
 #include "hash.hh"
 #include "archive.hh"
 #include "util.hh"
+#include "istringstream_nocopy.hh"
 
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
 
-
 namespace nix {
 
 
@@ -106,7 +104,7 @@ Hash parseHash(HashType ht, const string & s)
         string s2(s, i * 2, 2);
         if (!isxdigit(s2[0]) || !isxdigit(s2[1]))
             throw BadHash(format("invalid hash ‘%1%’") % s);
-        std::istringstream str(s2);
+        istringstream_nocopy str(s2);
         int n;
         str >> std::hex >> n;
         hash.hash[i] = n;
diff --git a/src/libutil/istringstream_nocopy.hh b/src/libutil/istringstream_nocopy.hh
new file mode 100644
index 000000000000..f7beac578e39
--- /dev/null
+++ b/src/libutil/istringstream_nocopy.hh
@@ -0,0 +1,92 @@
+/* This file provides a variant of std::istringstream that doesn't
+   copy its string argument. This is useful for large strings. The
+   caller must ensure that the string object is not destroyed while
+   it's referenced by this object. */
+
+#pragma once
+
+#include <string>
+#include <iostream>
+
+template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
+class basic_istringbuf_nocopy : public std::basic_streambuf<CharT, Traits>
+{
+public:
+    typedef std::basic_string<CharT, Traits, Allocator> string_type;
+
+    typedef typename std::basic_streambuf<CharT, Traits>::off_type off_type;
+
+    typedef typename std::basic_streambuf<CharT, Traits>::pos_type pos_type;
+
+    typedef typename std::basic_streambuf<CharT, Traits>::int_type int_type;
+
+    typedef typename std::basic_streambuf<CharT, Traits>::traits_type traits_type;
+
+private:
+    const string_type & s;
+
+    off_type off;
+
+public:
+    basic_istringbuf_nocopy(const string_type & s) : s{s}, off{0}
+    {
+    }
+
+private:
+    pos_type seekoff(off_type off, std::ios_base::seekdir dir, std::ios_base::openmode which)
+    {
+        if (which & std::ios_base::in) {
+            this->off = dir == std::ios_base::beg
+                ? off
+                : (dir == std::ios_base::end
+                    ? s.size() + off
+                    : this->off + off);
+        }
+        return pos_type(this->off);
+    }
+
+    pos_type seekpos(pos_type pos, std::ios_base::openmode which)
+    {
+        return seekoff(pos, std::ios_base::beg, which);
+    }
+
+    std::streamsize showmanyc()
+    {
+        return s.size() - off;
+    }
+
+    int_type underflow()
+    {
+        if (typename string_type::size_type(off) == s.size())
+            return traits_type::eof();
+        return traits_type::to_int_type(s[off]);
+    }
+
+    int_type uflow()
+    {
+        if (typename string_type::size_type(off) == s.size())
+            return traits_type::eof();
+        return traits_type::to_int_type(s[off++]);
+    }
+
+    int_type pbackfail(int_type ch)
+    {
+        if (off == 0 || (ch != traits_type::eof() && ch != s[off - 1]))
+            return traits_type::eof();
+
+        return traits_type::to_int_type(s[--off]);
+    }
+
+};
+
+template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
+class basic_istringstream_nocopy : public std::basic_iostream<CharT, Traits>
+{
+    typedef basic_istringbuf_nocopy<CharT, Traits, Allocator> buf_type;
+    buf_type buf;
+public:
+    basic_istringstream_nocopy(const typename buf_type::string_type & s) :
+        std::basic_iostream<CharT, Traits>(&buf), buf(s) {};
+};
+
+typedef basic_istringstream_nocopy<char> istringstream_nocopy;
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
index cac5c8795db7..0721b21c2089 100644
--- a/src/libutil/local.mk
+++ b/src/libutil/local.mk
@@ -9,3 +9,5 @@ libutil_SOURCES := $(wildcard $(d)/*.cc)
 libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS)
 
 libutil_LIBS = libformat
+
+libutil_CXXFLAGS = -DBRO=\"$(bro)\"
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index 3e6c4b54853c..3f83664794f7 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -78,6 +78,7 @@ extern Verbosity verbosity; /* suppress msgs > this */
 
 #define printError(args...) printMsg(lvlError, args)
 #define printInfo(args...) printMsg(lvlInfo, args)
+#define printTalkative(args...) printMsg(lvlTalkative, args)
 #define debug(args...) printMsg(lvlDebug, args)
 #define vomit(args...) printMsg(lvlVomit, args)
 
diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh
index f291cd578388..20df21948849 100644
--- a/src/libutil/pool.hh
+++ b/src/libutil/pool.hh
@@ -137,15 +137,21 @@ public:
         } catch (...) {
             auto state_(state.lock());
             state_->inUse--;
+            wakeup.notify_one();
             throw;
         }
     }
 
-    unsigned int count()
+    size_t count()
     {
         auto state_(state.lock());
         return state_->idle.size() + state_->inUse;
     }
+
+    size_t capacity()
+    {
+        return state.lock()->max;
+    }
 };
 
 }
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index a68f7a0fa8ee..950e6362a245 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -194,39 +194,9 @@ void readPadding(size_t len, Source & source)
 }
 
 
-unsigned int readInt(Source & source)
-{
-    unsigned char buf[8];
-    source(buf, sizeof(buf));
-    if (buf[4] || buf[5] || buf[6] || buf[7])
-        throw SerialisationError("implementation cannot deal with > 32-bit integers");
-    return
-        buf[0] |
-        (buf[1] << 8) |
-        (buf[2] << 16) |
-        (buf[3] << 24);
-}
-
-
-unsigned long long readLongLong(Source & source)
-{
-    unsigned char buf[8];
-    source(buf, sizeof(buf));
-    return
-        ((unsigned long long) buf[0]) |
-        ((unsigned long long) buf[1] << 8) |
-        ((unsigned long long) buf[2] << 16) |
-        ((unsigned long long) buf[3] << 24) |
-        ((unsigned long long) buf[4] << 32) |
-        ((unsigned long long) buf[5] << 40) |
-        ((unsigned long long) buf[6] << 48) |
-        ((unsigned long long) buf[7] << 56);
-}
-
-
 size_t readString(unsigned char * buf, size_t max, Source & source)
 {
-    size_t len = readInt(source);
+    auto len = readNum<size_t>(source);
     if (len > max) throw Error("string is too long");
     source(buf, len);
     readPadding(len, source);
@@ -236,11 +206,11 @@ size_t readString(unsigned char * buf, size_t max, Source & source)
 
 string readString(Source & source)
 {
-    size_t len = readInt(source);
-    auto buf = std::make_unique<unsigned char[]>(len);
-    source(buf.get(), len);
+    auto len = readNum<size_t>(source);
+    std::string res(len, 0);
+    source((unsigned char*) res.data(), len);
     readPadding(len, source);
-    return string((char *) buf.get(), len);
+    return res;
 }
 
 Source & operator >> (Source & in, string & s)
@@ -250,16 +220,9 @@ Source & operator >> (Source & in, string & s)
 }
 
 
-Source & operator >> (Source & in, unsigned int & n)
-{
-    n = readInt(in);
-    return in;
-}
-
-
 template<class T> T readStrings(Source & source)
 {
-    unsigned int count = readInt(source);
+    auto count = readNum<size_t>(source);
     T ss;
     while (count--)
         ss.insert(ss.end(), readString(source));
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 5646d08c1314..2bdee70807be 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -140,15 +140,16 @@ struct StringSource : Source
 
 
 /* Adapter class of a Source that saves all data read to `s'. */
-struct SavingSourceAdapter : Source
+struct TeeSource : Source
 {
     Source & orig;
-    string s;
-    SavingSourceAdapter(Source & orig) : orig(orig) { }
+    ref<std::string> data;
+    TeeSource(Source & orig)
+        : orig(orig), data(make_ref<std::string>()) { }
     size_t read(unsigned char * data, size_t len)
     {
         size_t n = orig.read(data, len);
-        s.append((const char *) data, n);
+        this->data->append((const char *) data, n);
         return n;
     }
 };
@@ -177,18 +178,64 @@ Sink & operator << (Sink & sink, const Strings & s);
 Sink & operator << (Sink & sink, const StringSet & s);
 
 
+MakeError(SerialisationError, Error)
+
+
+template<typename T>
+T readNum(Source & source)
+{
+    unsigned char buf[8];
+    source(buf, sizeof(buf));
+
+    uint64_t n =
+        ((unsigned long long) buf[0]) |
+        ((unsigned long long) buf[1] << 8) |
+        ((unsigned long long) buf[2] << 16) |
+        ((unsigned long long) buf[3] << 24) |
+        ((unsigned long long) buf[4] << 32) |
+        ((unsigned long long) buf[5] << 40) |
+        ((unsigned long long) buf[6] << 48) |
+        ((unsigned long long) buf[7] << 56);
+
+    if (n > std::numeric_limits<T>::max())
+        throw SerialisationError("serialised integer %d is too large for type ‘%s’", n, typeid(T).name());
+
+    return n;
+}
+
+
+inline unsigned int readInt(Source & source)
+{
+    return readNum<unsigned int>(source);
+}
+
+
+inline uint64_t readLongLong(Source & source)
+{
+    return readNum<uint64_t>(source);
+}
+
+
 void readPadding(size_t len, Source & source);
-unsigned int readInt(Source & source);
-unsigned long long readLongLong(Source & source);
 size_t readString(unsigned char * buf, size_t max, Source & source);
 string readString(Source & source);
 template<class T> T readStrings(Source & source);
 
 Source & operator >> (Source & in, string & s);
-Source & operator >> (Source & in, unsigned int & n);
 
+template<typename T>
+Source & operator >> (Source & in, T & n)
+{
+    n = readNum<T>(in);
+    return in;
+}
 
-MakeError(SerialisationError, Error)
+template<typename T>
+Source & operator >> (Source & in, bool & b)
+{
+    b = readNum<uint64_t>(in);
+    return in;
+}
 
 
 }
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
index b9a93d27d2ad..97d79af9b5d6 100644
--- a/src/libutil/types.hh
+++ b/src/libutil/types.hh
@@ -1,6 +1,5 @@
 #pragma once
 
-#include "config.h"
 
 #include "ref.hh"
 
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 6c4c5c969d86..99a91c8cc64a 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -1,8 +1,7 @@
-#include "config.h"
-
 #include "util.hh"
 #include "affinity.hh"
 #include "sync.hh"
+#include "finally.hh"
 
 #include <cctype>
 #include <cerrno>
@@ -12,6 +11,7 @@
 #include <iostream>
 #include <sstream>
 #include <thread>
+#include <future>
 
 #include <sys/wait.h>
 #include <unistd.h>
@@ -290,9 +290,9 @@ string readFile(const Path & path, bool drain)
 }
 
 
-void writeFile(const Path & path, const string & s)
+void writeFile(const Path & path, const string & s, mode_t mode)
 {
-    AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, 0666);
+    AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
     if (!fd)
         throw SysError(format("opening file ‘%1%’") % path);
     writeFull(fd.get(), s);
@@ -678,12 +678,11 @@ Pid::operator pid_t()
 }
 
 
-int Pid::kill(bool quiet)
+int Pid::kill()
 {
     assert(pid != -1);
 
-    if (!quiet)
-        printError(format("killing process %1%") % pid);
+    debug(format("killing process %1%") % pid);
 
     /* Send the requested signal to the child.  If it has its own
        process group, send the signal to every process in the child
@@ -839,23 +838,21 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss)
 
 
 string runProgram(Path program, bool searchPath, const Strings & args,
-    const string & input)
+    const std::experimental::optional<std::string> & input)
 {
     checkInterrupt();
 
     /* Create a pipe. */
     Pipe out, in;
     out.create();
-    if (!input.empty()) in.create();
+    if (input) in.create();
 
     /* Fork. */
     Pid pid = startProcess([&]() {
         if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
             throw SysError("dupping stdout");
-        if (!input.empty()) {
-            if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
-                throw SysError("dupping stdin");
-        }
+        if (input && dup2(in.readSide.get(), STDIN_FILENO) == -1)
+            throw SysError("dupping stdin");
 
         Strings args_(args);
         args_.push_front(program);
@@ -872,11 +869,27 @@ string runProgram(Path program, bool searchPath, const Strings & args,
 
     out.writeSide = -1;
 
-    /* FIXME: This can deadlock if the input is too long. */
-    if (!input.empty()) {
+    std::thread writerThread;
+
+    std::promise<void> promise;
+
+    Finally doJoin([&]() {
+        if (writerThread.joinable())
+            writerThread.join();
+    });
+
+
+    if (input) {
         in.readSide = -1;
-        writeFull(in.writeSide.get(), input);
-        in.writeSide = -1;
+        writerThread = std::thread([&]() {
+            try {
+                writeFull(in.writeSide.get(), *input);
+                promise.set_value();
+            } catch (...) {
+                promise.set_exception(std::current_exception());
+            }
+            in.writeSide = -1;
+        });
     }
 
     string result = drainFD(out.readSide.get());
@@ -887,6 +900,9 @@ string runProgram(Path program, bool searchPath, const Strings & args,
         throw ExecError(status, format("program ‘%1%’ %2%")
             % program % statusToString(status));
 
+    /* Wait for the writer thread to finish. */
+    if (input) promise.get_future().get();
+
     return result;
 }
 
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index cfaaf1486e9e..0e6941e4a8db 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -13,6 +13,8 @@
 #include <limits>
 #include <cstdio>
 #include <map>
+#include <sstream>
+#include <experimental/optional>
 
 #ifndef HAVE_STRUCT_DIRENT_D_TYPE
 #define DT_UNKNOWN 0
@@ -89,7 +91,7 @@ string readFile(int fd);
 string readFile(const Path & path, bool drain = false);
 
 /* Write a string to a file. */
-void writeFile(const Path & path, const string & s);
+void writeFile(const Path & path, const string & s, mode_t mode = 0666);
 
 /* Read a line from a file descriptor. */
 string readLine(int fd);
@@ -201,7 +203,7 @@ public:
     ~Pid();
     void operator =(pid_t pid);
     operator pid_t();
-    int kill(bool quiet = false);
+    int kill();
     int wait();
 
     void setSeparatePG(bool separatePG);
@@ -231,7 +233,8 @@ pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = P
 /* Run a program and return its stdout in a string (i.e., like the
    shell backtick operator). */
 string runProgram(Path program, bool searchPath = false,
-    const Strings & args = Strings(), const string & input = "");
+    const Strings & args = Strings(),
+    const std::experimental::optional<std::string> & input = {});
 
 class ExecError : public Error
 {
@@ -448,5 +451,4 @@ struct ReceiveInterrupts
     { }
 };
 
-
 }
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index ee030c57b6b3..b4206033cf5f 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -394,7 +394,7 @@ int main(int argc, char ** argv)
                 auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp"));
 
                 if (pure) {
-                    std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL"};
+                    std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"};
                     decltype(env) newEnv;
                     for (auto & i : env)
                         if (keepVars.count(i.first))
@@ -408,7 +408,7 @@ int main(int argc, char ** argv)
                 env["NIX_STORE"] = store->storeDir;
 
                 for (auto & var : drv.env)
-                    env.emplace(var);
+                    env[var.first] = var.second;
 
                 restoreAffinity();
 
@@ -448,15 +448,17 @@ int main(int argc, char ** argv)
 
                 auto envPtrs = stringsToCharPtrs(envStrs);
 
+                auto shell = getEnv("NIX_BUILD_SHELL", "bash");
+
                 environ = envPtrs.data();
 
                 auto argPtrs = stringsToCharPtrs(args);
 
                 restoreSignals();
 
-                execvp(getEnv("NIX_BUILD_SHELL", "bash").c_str(), argPtrs.data());
+                execvp(shell.c_str(), argPtrs.data());
 
-                throw SysError("executing shell");
+                throw SysError("executing shell ‘%s’", shell);
             }
 
             // Ugly hackery to make "nix-build -A foo.all" produce symlinks
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
index 4340443b5cc2..ed43bffbc8c8 100755
--- a/src/nix-copy-closure/nix-copy-closure.cc
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -47,13 +47,17 @@ int main(int argc, char ** argv)
         if (sshHost.empty())
             throw UsageError("no host name specified");
 
-        auto remoteUri = "legacy-ssh://" + sshHost + (gzip ? "?compress=true" : "");
+        auto remoteUri = "ssh://" + sshHost + (gzip ? "?compress=true" : "");
         auto to = toMode ? openStore(remoteUri) : openStore();
         auto from = toMode ? openStore() : openStore(remoteUri);
 
+        PathSet storePaths2;
+        for (auto & path : storePaths)
+            storePaths2.insert(from->followLinksToStorePath(path));
+
         PathSet closure;
-        from->computeFSClosure(storePaths, closure, false, includeOutputs);
+        from->computeFSClosure(storePaths2, closure, false, includeOutputs);
 
-        copyPaths(from, to, Paths(closure.begin(), closure.end()), useSubstitutes);
+        copyPaths(from, to, closure, useSubstitutes);
     });
 }
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index f3ee0afc11e7..ab5826b0d1a7 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -23,6 +23,7 @@
 #include <pwd.h>
 #include <grp.h>
 #include <fcntl.h>
+#include <limits.h>
 
 #if __APPLE__ || __FreeBSD__
 #include <sys/ucred.h>
@@ -272,10 +273,9 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
     }
 
     case wopAddToStore: {
-        string baseName = readString(from);
-        bool fixed = readInt(from) == 1; /* obsolete */
-        bool recursive = readInt(from) == 1;
-        string s = readString(from);
+        bool fixed, recursive;
+        std::string s, baseName;
+        from >> baseName >> fixed /* obsolete */ >> recursive >> s;
         /* Compatibility hack. */
         if (!fixed) {
             s = "sha256";
@@ -283,7 +283,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         }
         HashType hashAlgo = parseHashType(s);
 
-        SavingSourceAdapter savedNAR(from);
+        TeeSource savedNAR(from);
         RetrieveRegularNARSink savedRegular;
 
         if (recursive) {
@@ -297,7 +297,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
 
         startWork();
         if (!savedRegular.regular) throw Error("regular file expected");
-        Path path = store->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
+        Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
         stopWork();
 
         to << path;
@@ -339,7 +339,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         PathSet drvs = readStorePaths<PathSet>(*store, from);
         BuildMode mode = bmNormal;
         if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
-            mode = (BuildMode)readInt(from);
+            mode = (BuildMode) readInt(from);
 
             /* Repairing is not atomic, so disallowed for "untrusted"
                clients.  */
@@ -416,8 +416,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         GCOptions options;
         options.action = (GCOptions::GCAction) readInt(from);
         options.pathsToDelete = readStorePaths<PathSet>(*store, from);
-        options.ignoreLiveness = readInt(from);
-        options.maxFreed = readLongLong(from);
+        from >> options.ignoreLiveness >> options.maxFreed;
         // obsolete fields
         readInt(from);
         readInt(from);
@@ -437,8 +436,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
     }
 
     case wopSetOptions: {
-        settings.keepFailed = readInt(from) != 0;
-        settings.keepGoing = readInt(from) != 0;
+        from >> settings.keepFailed;
+        from >> settings.keepGoing;
         settings.set("build-fallback", readInt(from) ? "true" : "false");
         verbosity = (Verbosity) readInt(from);
         settings.set("build-max-jobs", std::to_string(readInt(from)));
@@ -538,8 +537,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
         break;
 
     case wopVerifyStore: {
-        bool checkContents = readInt(from) != 0;
-        bool repair = readInt(from) != 0;
+        bool checkContents, repair;
+        from >> checkContents >> repair;
         startWork();
         if (repair && !trusted)
             throw Error("you are not privileged to repair paths");
@@ -570,24 +569,25 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
     }
 
     case wopAddToStoreNar: {
+        bool repair, dontCheckSigs;
         ValidPathInfo info;
         info.path = readStorePath(*store, from);
-        info.deriver = readString(from);
+        from >> info.deriver;
         if (!info.deriver.empty())
             store->assertStorePath(info.deriver);
         info.narHash = parseHash(htSHA256, readString(from));
         info.references = readStorePaths<PathSet>(*store, from);
-        info.registrationTime = readInt(from);
-        info.narSize = readLongLong(from);
-        info.ultimate = readLongLong(from);
+        from >> info.registrationTime >> info.narSize >> info.ultimate;
         info.sigs = readStrings<StringSet>(from);
-        auto nar = make_ref<std::string>(readString(from));
-        auto repair = readInt(from) ? true : false;
-        auto dontCheckSigs = readInt(from) ? true : false;
+        from >> info.ca >> repair >> dontCheckSigs;
         if (!trusted && dontCheckSigs)
             dontCheckSigs = false;
+
+        TeeSink tee(from);
+        parseDump(tee, tee.source);
+
         startWork();
-        store->addToStore(info, nar, repair, dontCheckSigs, nullptr);
+        store->addToStore(info, tee.source.data, repair, dontCheckSigs, nullptr);
         stopWork();
         break;
     }
@@ -967,14 +967,14 @@ int main(int argc, char * * argv)
                     if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1)
                         throw SysError("waiting for data from client or server");
                     if (FD_ISSET(s, &fds)) {
-                        auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SIZE_MAX, SPLICE_F_MOVE);
+                        auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
                         if (res == -1)
                             throw SysError("splicing data from daemon socket to stdout");
                         else if (res == 0)
                             throw EndOfFile("unexpected EOF from daemon socket");
                     }
                     if (FD_ISSET(STDIN_FILENO, &fds)) {
-                        auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SIZE_MAX, SPLICE_F_MOVE);
+                        auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
                         if (res == -1)
                             throw SysError("splicing data from stdin to daemon socket");
                         else if (res == 0)
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
index acf603025690..b3b2fcac7132 100644
--- a/src/nix-prefetch-url/nix-prefetch-url.cc
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -170,10 +170,10 @@ int main(int argc, char * * argv)
                 Path unpacked = (Path) tmpDir + "/unpacked";
                 createDirs(unpacked);
                 if (hasSuffix(baseNameOf(uri), ".zip"))
-                    runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}, "");
+                    runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked});
                 else
                     // FIXME: this requires GNU tar for decompression.
-                    runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}, "");
+                    runProgram("tar", true, {"xf", tmpFile, "-C", unpacked});
 
                 /* If the archive unpacks to a single file/directory, then use
                    that as the top-level. */
diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk
index 84ff15b241f3..ade0b233adf3 100644
--- a/src/nix-store/local.mk
+++ b/src/nix-store/local.mk
@@ -7,5 +7,3 @@ nix-store_SOURCES := $(wildcard $(d)/*.cc)
 nix-store_LIBS = libmain libstore libutil libformat
 
 nix-store_LDFLAGS = -lbz2 -pthread $(SODIUM_LIBS)
-
-nix-store_CXXFLAGS = -DCURL=\"$(curl)\"
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 7dda6d208dfa..3dc167191c83 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -9,7 +9,6 @@
 #include "util.hh"
 #include "worker-protocol.hh"
 #include "xmlgraph.hh"
-#include "compression.hh"
 
 #include <iostream>
 #include <algorithm>
@@ -482,58 +481,12 @@ static void opReadLog(Strings opFlags, Strings opArgs)
 
     RunPager pager;
 
-    // FIXME: move getting logs into Store.
-    auto store2 = std::dynamic_pointer_cast<LocalFSStore>(store);
-    if (!store2) throw Error(format("store ‘%s’ does not support reading logs") % store->getUri());
-
     for (auto & i : opArgs) {
-        Path path = useDeriver(store->followLinksToStorePath(i));
-
-        string baseName = baseNameOf(path);
-        bool found = false;
-
-        for (int j = 0; j < 2; j++) {
-
-            Path logPath =
-                j == 0
-                ? (format("%1%/%2%/%3%/%4%") % store2->logDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str()
-                : (format("%1%/%2%/%3%") % store2->logDir % drvsLogDir % baseName).str();
-            Path logBz2Path = logPath + ".bz2";
-
-            if (pathExists(logPath)) {
-                /* !!! Make this run in O(1) memory. */
-                string log = readFile(logPath);
-                writeFull(STDOUT_FILENO, log);
-                found = true;
-                break;
-            }
-
-            else if (pathExists(logBz2Path)) {
-                std::cout << *decompress("bzip2", readFile(logBz2Path));
-                found = true;
-                break;
-            }
-        }
-
-        if (!found) {
-            for (auto & i : settings.logServers) {
-                string prefix = i;
-                if (!prefix.empty() && prefix.back() != '/') prefix += '/';
-                string url = prefix + baseName;
-                try {
-                    string log = runProgram(CURL, true, {"--fail", "--location", "--silent", "--", url});
-                    std::cout << "(using build log from " << url << ")" << std::endl;
-                    std::cout << log;
-                    found = true;
-                    break;
-                } catch (ExecError & e) {
-                    /* Ignore errors from curl. FIXME: actually, might be
-                       nice to print a warning on HTTP status != 404. */
-                }
-            }
-        }
-
-        if (!found) throw Error(format("build log of derivation ‘%1%’ is not available") % path);
+        auto path = store->followLinksToStorePath(i);
+        auto log = store->getBuildLog(path);
+        if (!log)
+            throw Error("build log of derivation ‘%s’ is not available", path);
+        std::cout << *log;
     }
 }
 
@@ -708,6 +661,9 @@ static void opExport(Strings opFlags, Strings opArgs)
     for (auto & i : opFlags)
         throw UsageError(format("unknown flag ‘%1%’") % i);
 
+    for (auto & i : opArgs)
+        i = store->followLinksToStorePath(i);
+
     FdSink sink(STDOUT_FILENO);
     store->exportPaths(opArgs, sink);
 }
@@ -721,7 +677,7 @@ static void opImport(Strings opFlags, Strings opArgs)
     if (!opArgs.empty()) throw UsageError("no arguments expected");
 
     FdSource source(STDIN_FILENO);
-    Paths paths = store->importPaths(source, 0);
+    Paths paths = store->importPaths(source, nullptr, true);
 
     for (auto & i : paths)
         cout << format("%1%\n") % i << std::flush;
@@ -839,7 +795,7 @@ static void opServe(Strings opFlags, Strings opArgs)
         settings.maxSilentTime = readInt(in);
         settings.buildTimeout = readInt(in);
         if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
-            settings.maxLogSize = readInt(in);
+            in >> settings.maxLogSize;
         if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
             settings.set("build-repeat", std::to_string(readInt(in)));
             settings.set("enforce-determinism", readInt(in) != 0 ? "true" : "false");
diff --git a/src/nix/command.cc b/src/nix/command.cc
index 5a8288da912f..a1b2c120a5d9 100644
--- a/src/nix/command.cc
+++ b/src/nix/command.cc
@@ -79,9 +79,14 @@ StoreCommand::StoreCommand()
     mkFlag(0, "store", "store-uri", "URI of the Nix store to use", &storeUri);
 }
 
+ref<Store> StoreCommand::createStore()
+{
+    return openStore(storeUri);
+}
+
 void StoreCommand::run()
 {
-    run(openStore(storeUri));
+    run(createStore());
 }
 
 StorePathsCommand::StorePathsCommand()
diff --git a/src/nix/command.hh b/src/nix/command.hh
index a29cdcf7f50f..fa6c21abf8ad 100644
--- a/src/nix/command.hh
+++ b/src/nix/command.hh
@@ -33,6 +33,7 @@ struct StoreCommand : virtual Command
     std::string storeUri;
     StoreCommand();
     void run() override;
+    virtual ref<Store> createStore();
     virtual void run(ref<Store>) = 0;
 };
 
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index 976b0d3e0b81..b2165cb8f85c 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -38,15 +38,19 @@ struct CmdCopy : StorePathsCommand
         };
     }
 
-    void run(ref<Store> store, Paths storePaths) override
+    ref<Store> createStore() override
+    {
+        return srcUri.empty() ? StoreCommand::createStore() : openStore(srcUri);
+    }
+
+    void run(ref<Store> srcStore, Paths storePaths) override
     {
         if (srcUri.empty() && dstUri.empty())
             throw UsageError("you must pass ‘--from’ and/or ‘--to’");
 
-        ref<Store> srcStore = srcUri.empty() ? store : openStore(srcUri);
-        ref<Store> dstStore = dstUri.empty() ? store : openStore(dstUri);
+        ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
 
-        copyPaths(srcStore, dstStore, storePaths);
+        copyPaths(srcStore, dstStore, PathSet(storePaths.begin(), storePaths.end()));
     }
 };
 
diff --git a/src/nix/log.cc b/src/nix/log.cc
new file mode 100644
index 000000000000..d8a3830e91c8
--- /dev/null
+++ b/src/nix/log.cc
@@ -0,0 +1,57 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "installables.hh"
+#include "shared.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+struct CmdLog : StoreCommand, MixInstallables
+{
+    CmdLog()
+    {
+    }
+
+    std::string name() override
+    {
+        return "log";
+    }
+
+    std::string description() override
+    {
+        return "show the build log of the specified packages or paths";
+    }
+
+    void run(ref<Store> store) override
+    {
+        auto elems = evalInstallables(store);
+
+        PathSet paths;
+
+        for (auto & elem : elems) {
+            if (elem.isDrv)
+                paths.insert(elem.drvPath);
+            else
+                paths.insert(elem.outPaths.begin(), elem.outPaths.end());
+        }
+
+        auto subs = getDefaultSubstituters();
+
+        subs.push_front(store);
+
+        for (auto & path : paths) {
+            bool found = false;
+            for (auto & sub : subs) {
+                auto log = sub->getBuildLog(path);
+                if (!log) continue;
+                std::cout << *log;
+                found = true;
+                break;
+            }
+            if (!found)
+                throw Error("build log of path ‘%s’ is not available", path);
+        }
+    }
+};
+
+static RegisterCommand r1(make_ref<CmdLog>());
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index 4ce428f643e5..532099d02142 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -18,7 +18,7 @@ basicTests() {
 
     nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---"
 
-    nix-store --option binary-caches "file://$cacheDir" -r $outPath
+    nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '' -r $outPath
 
     [ -x $outPath/program ]
 
@@ -34,7 +34,7 @@ basicTests() {
     x=$(nix-env -f dependencies.nix -qas \* --prebuilt-only)
     [ -z "$x" ]
 
-    nix-store --option binary-caches "file://$cacheDir" -r $outPath
+    nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '' -r $outPath
 
     nix-store --check-validity $outPath
     nix-store -qR $outPath | grep input-2
@@ -63,7 +63,7 @@ mv $nar $nar.good
 mkdir -p $TEST_ROOT/empty
 nix-store --dump $TEST_ROOT/empty | xz > $nar
 
-nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+nix-build --option binary-caches "file://$cacheDir" --option signed-binary-caches '' dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
 grep -q "hash mismatch" $TEST_ROOT/log
 
 mv $nar.good $nar
@@ -73,7 +73,7 @@ mv $nar.good $nar
 clearStore
 clearCacheCache
 
-if nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '*' -r $outPath; then
+if nix-store --option binary-caches "file://$cacheDir" -r $outPath; then
     echo "unsigned binary cache incorrectly accepted"
     exit 1
 fi
@@ -99,7 +99,7 @@ clearStore
 
 rm $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo)
 
-nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+nix-build --option binary-caches "file://$cacheDir" --option signed-binary-caches '' dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
 grep -q "fetching path" $TEST_ROOT/log
 
 
diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh
index 26cc521bbcbf..f0f34a5f8705 100644
--- a/tests/nix-shell.sh
+++ b/tests/nix-shell.sh
@@ -4,6 +4,7 @@ clearStore
 
 # Test nix-shell -A
 export IMPURE_VAR=foo
+export NIX_BUILD_SHELL=$SHELL
 output=$(nix-shell --pure shell.nix -A shellDrv --run \
     'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"')
 
diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix
index d14d6ff7f056..63aaa4d88f56 100644
--- a/tests/remote-builds.nix
+++ b/tests/remote-builds.nix
@@ -14,7 +14,7 @@ let
     { services.openssh.enable = true;
       virtualisation.writableStore = true;
       nix.package = nix;
-      nix.useChroot = true;
+      nix.useSandbox = true;
     };
 
   # Trivial Nix expression to build remotely.
diff --git a/tests/repair.sh b/tests/repair.sh
index 782838704da7..57152d450a17 100644
--- a/tests/repair.sh
+++ b/tests/repair.sh
@@ -51,7 +51,7 @@ nix copy --recursive --to file://$cacheDir $path
 chmod u+w $path2
 rm -rf $path2
 
-nix-store --verify --check-contents --repair --option binary-caches "file://$cacheDir"
+nix-store --verify --check-contents --repair --option binary-caches "file://$cacheDir" --option signed-binary-caches ''
 
 if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
     echo "path not repaired properly" >&2
@@ -69,7 +69,7 @@ if nix-store --verify-path $path2; then
     exit 1
 fi
 
-nix-store --repair-path $path2 --option binary-caches "file://$cacheDir"
+nix-store --repair-path $path2 --option binary-caches "file://$cacheDir" --option signed-binary-caches ''
 
 if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
     echo "path not repaired properly" >&2
diff --git a/tests/shell.nix b/tests/shell.nix
index ed4d6fbaaa0b..1a092913b3b2 100644
--- a/tests/shell.nix
+++ b/tests/shell.nix
@@ -34,6 +34,7 @@ rec {
     mkdir -p $out/bin
     echo 'echo foo' > $out/bin/foo
     chmod a+rx $out/bin/foo
+    ln -s ${shell} $out/bin/bash
   '';
 
   bar = runCommand "bar" {} ''