about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--doc/manual/command-ref/nix-env.xml15
-rw-r--r--doc/manual/command-ref/nix-store.xml2
-rw-r--r--doc/manual/expressions/builtins.xml48
-rw-r--r--doc/manual/installation/env-variables.xml49
-rwxr-xr-xmaintainers/upload-release.pl24
-rw-r--r--misc/docker/Dockerfile28
-rw-r--r--nix.spec.in61
-rw-r--r--perl/lib/Nix/Store.xs2
-rw-r--r--release-common.nix4
-rw-r--r--release.nix21
-rw-r--r--scripts/install-multi-user.sh12
-rw-r--r--scripts/install.in67
-rw-r--r--scripts/nix-profile-daemon.sh.in23
-rw-r--r--src/libexpr/attr-set.cc16
-rw-r--r--src/libexpr/eval-inline.hh14
-rw-r--r--src/libexpr/eval.cc109
-rw-r--r--src/libexpr/eval.hh39
-rw-r--r--src/libexpr/json-to-value.cc2
-rw-r--r--src/libexpr/lexer.l8
-rw-r--r--src/libexpr/nixexpr.hh1
-rw-r--r--src/libexpr/parser.y9
-rw-r--r--src/libexpr/primops.cc33
-rw-r--r--src/libexpr/primops/fetchGit.cc2
-rw-r--r--src/libexpr/primops/fetchMercurial.cc2
-rw-r--r--src/libmain/common-args.cc4
-rw-r--r--src/libmain/shared.cc2
-rw-r--r--src/libstore/binary-cache-store.cc77
-rw-r--r--src/libstore/binary-cache-store.hh18
-rw-r--r--src/libstore/build.cc50
-rw-r--r--src/libstore/builtins/fetchurl.cc78
-rw-r--r--src/libstore/derivations.cc2
-rw-r--r--src/libstore/download.cc208
-rw-r--r--src/libstore/download.hh16
-rw-r--r--src/libstore/gc.cc29
-rw-r--r--src/libstore/globals.cc38
-rw-r--r--src/libstore/globals.hh65
-rw-r--r--src/libstore/http-binary-cache-store.cc40
-rw-r--r--src/libstore/legacy-ssh-store.cc11
-rw-r--r--src/libstore/local-binary-cache-store.cc18
-rw-r--r--src/libstore/local-store.cc16
-rw-r--r--src/libstore/local-store.hh3
-rw-r--r--src/libstore/local.mk2
-rw-r--r--src/libstore/misc.cc17
-rw-r--r--src/libstore/optimise-store.cc3
-rw-r--r--src/libstore/profiles.cc23
-rw-r--r--src/libstore/profiles.hh2
-rw-r--r--src/libstore/remote-store.cc79
-rw-r--r--src/libstore/remote-store.hh3
-rw-r--r--src/libstore/s3-binary-cache-store.cc105
-rw-r--r--src/libstore/sqlite.cc3
-rw-r--r--src/libstore/store-api.cc92
-rw-r--r--src/libstore/store-api.hh7
-rw-r--r--src/libstore/worker-protocol.hh2
-rw-r--r--src/libutil/archive.cc26
-rw-r--r--src/libutil/archive.hh4
-rw-r--r--src/libutil/config.cc88
-rw-r--r--src/libutil/config.hh81
-rw-r--r--src/libutil/serialise.cc2
-rw-r--r--src/libutil/serialise.hh9
-rw-r--r--src/libutil/sync.hh4
-rw-r--r--src/libutil/util.cc27
-rw-r--r--src/libutil/util.hh56
-rwxr-xr-xsrc/nix-build/nix-build.cc28
-rwxr-xr-xsrc/nix-channel/nix-channel.cc2
-rw-r--r--src/nix-daemon/nix-daemon.cc23
-rw-r--r--src/nix-env/nix-env.cc8
-rw-r--r--src/nix-instantiate/nix-instantiate.cc20
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc28
-rw-r--r--src/nix/installables.cc24
-rw-r--r--src/nix/main.cc7
-rw-r--r--src/nix/progress-bar.cc5
-rw-r--r--src/nix/repl.cc4
-rw-r--r--src/nix/show-config.cc8
-rw-r--r--src/nix/upgrade-nix.cc14
-rw-r--r--tests/binary-cache.sh31
-rw-r--r--tests/common.sh.in8
-rw-r--r--tests/lang/eval-okay-arithmetic.exp2
-rw-r--r--tests/lang/eval-okay-arithmetic.nix4
-rw-r--r--tests/lang/parse-fail-uft8.nix1
-rw-r--r--tests/plugins/plugintest.cc13
80 files changed, 1363 insertions, 768 deletions
diff --git a/doc/manual/command-ref/nix-env.xml b/doc/manual/command-ref/nix-env.xml
index eac7739558be..56c466268ea0 100644
--- a/doc/manual/command-ref/nix-env.xml
+++ b/doc/manual/command-ref/nix-env.xml
@@ -1346,11 +1346,12 @@ $ nix-env --list-generations
 <para>This operation deletes the specified generations of the current
 profile.  The generations can be a list of generation numbers, the
 special value <literal>old</literal> to delete all non-current
-generations, or a value such as <literal>30d</literal> to delete all
+generations,  a value such as <literal>30d</literal> to delete all
 generations older than the specified number of days (except for the
-generation that was active at that point in time).
-Periodically deleting old generations is important to make garbage
-collection effective.</para>
+generation that was active at that point in time), or a value such as.
+<literal>+5</literal> to only keep the specified items older than the
+current generation. Periodically deleting old generations is important
+to make garbage collection effective.</para>
 
 </refsection>
 
@@ -1359,6 +1360,8 @@ collection effective.</para>
 <screen>
 $ nix-env --delete-generations 3 4 8
 
+$ nix-env --delete-generations +5
+
 $ nix-env --delete-generations 30d
 
 $ nix-env -p other_profile --delete-generations old</screen>
@@ -1458,7 +1461,7 @@ error: no generation older than the current (91) exists</screen>
 <refsection condition="manpage"><title>Environment variables</title>
 
 <variablelist>
-  
+
   <varlistentry><term><envar>NIX_PROFILE</envar></term>
 
     <listitem><para>Location of the Nix profile.  Defaults to the
@@ -1472,6 +1475,6 @@ error: no generation older than the current (91) exists</screen>
 </variablelist>
 
 </refsection>
-  
+
 
 </refentry>
diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml
index f2dc6ed8540d..5fff64a18f93 100644
--- a/doc/manual/command-ref/nix-store.xml
+++ b/doc/manual/command-ref/nix-store.xml
@@ -204,7 +204,7 @@ printed.)</para>
     with <option>-K</option>, if an output path is not identical to
     the corresponding output from the previous build, the new output
     path is left in
-    <filename>/nix/store/<replaceable>name</replaceable>-check.</filename></para>
+    <filename>/nix/store/<replaceable>name</replaceable>.check.</filename></para>
 
     <para>See also the <option>build-repeat</option> configuration
     option, which repeats a derivation a number of times and prevents
diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml
index ac1fe7e2fafe..07d8357b40b5 100644
--- a/doc/manual/expressions/builtins.xml
+++ b/doc/manual/expressions/builtins.xml
@@ -92,6 +92,36 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
+  <varlistentry><term><function>builtins.bitAnd</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the bitwise AND of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.bitOr</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the bitwise OR of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.bitXor</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the bitwise XOR of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
   <varlistentry><term><varname>builtins</varname></term>
 
     <listitem><para>The set <varname>builtins</varname> contains all
@@ -280,8 +310,17 @@ with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixo
 
 stdenv.mkDerivation { … }
 </programlisting>
+    </para>
 
-    Note that when obtaining the hash with <varname>nix-prefetch-url
+    <para>The fetched tarball is cached for a certain amount of time
+    (1 hour by default) in <filename>~/.cache/nix/tarballs/</filename>.
+    You can change the cache timeout either on the command line with
+    <option>--option tarball-ttl <replaceable>number of seconds</replaceable></option> or
+    in the Nix configuration file with this option:
+    <literal>tarball-ttl <replaceable>number of seconds to cache</replaceable></literal>.
+    </para>
+
+    <para>Note that when obtaining the hash with <varname>nix-prefetch-url
     </varname> the option <varname>--unpack</varname> is required.
     </para>
 
@@ -420,7 +459,9 @@ stdenv.mkDerivation {
       <literal>"unknown"</literal> (for other kinds of files such as
       device nodes or fifos — but note that those cannot be copied to
       the Nix store, so if the predicate returns
-      <literal>true</literal> for them, the copy will fail).</para>
+      <literal>true</literal> for them, the copy will fail). If you
+      exclude a directory, the entire corresponding subtree of
+      <replaceable>e2</replaceable> will be excluded.</para>
 
     </listitem>
 
@@ -468,8 +509,7 @@ builtins.fromJSON ''{"x": [1, 2, 3], "y": null}''
 </programlisting>
 
     returns the value <literal>{ x = [ 1 2 3 ]; y = null;
-    }</literal>. Floating point numbers are not
-    supported.</para></listitem>
+    }</literal>.</para></listitem>
 
   </varlistentry>
 
diff --git a/doc/manual/installation/env-variables.xml b/doc/manual/installation/env-variables.xml
index fc39cdd9dfef..1fd6bafee7e3 100644
--- a/doc/manual/installation/env-variables.xml
+++ b/doc/manual/installation/env-variables.xml
@@ -21,4 +21,51 @@ in your <filename>~/.profile</filename> (or similar), like this:</para>
 <screen>
 source <replaceable>prefix</replaceable>/etc/profile.d/nix.sh</screen>
 
-</chapter>
\ No newline at end of file
+<section xml:id="sec-nix-ssl-cert-file">
+
+<title><envar>NIX_SSL_CERT_FILE</envar></title>
+
+<para>If you need to specify a custom certificate bundle to account
+for an HTTPS-intercepting man in the middle proxy, you must specify
+the path to the certificate bundle in the environment variable
+<envar>NIX_SSL_CERT_FILE</envar>.</para>
+
+
+<para>If you don't specify a <envar>NIX_SSL_CERT_FILE</envar>
+manually, Nix will install and use its own certificate
+bundle.</para>
+
+<procedure>
+  <step><para>Set the environment variable and install Nix</para>
+    <screen>
+$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
+$ curl https://nixos.org/nix/install | sh
+</screen></step>
+
+  <step><para>In the shell profile and rc files (for example,
+  <filename>/etc/bashrc</filename>, <filename>/etc/zshrc</filename>),
+  add the following line:</para>
+<programlisting>
+export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
+</programlisting>
+</step>
+</procedure>
+
+<note><para>You must not add the export and then do the install, as
+the Nix installer will detect the presense of Nix configuration, and
+abort.</para></note>
+
+<section>
+<title><envar>NIX_SSL_CERT_FILE</envar> with macOS and the Nix daemon</title>
+
+<para>On macOS you must specify the environment variable for the Nix
+daemon service, then restart it:</para>
+
+<screen>
+$ sudo launchctl setenv NIX_SSL_CERT_FILE /etc/ssl/my-certificate-bundle.crt
+$ sudo launchctl kickstart -k system/org.nixos.nix-daemon
+</screen>
+</section>
+
+</section>
+</chapter>
diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl
index aa7633a709c1..9b0a09e6c834 100755
--- a/maintainers/upload-release.pl
+++ b/maintainers/upload-release.pl
@@ -76,15 +76,20 @@ sub downloadFile {
 
     write_file("$dstFile.sha256", $sha256_expected);
 
+    if (! -e "$dstFile.asc") {
+        system("gpg2 --detach-sign --armor $dstFile") == 0 or die "unable to sign $dstFile\n";
+    }
+
     return ($dstFile, $sha256_expected);
 }
 
 downloadFile("tarball", "2"); # .tar.bz2
 my ($tarball, $tarballHash) = downloadFile("tarball", "3"); # .tar.xz
-my ($tarball_i686_linux, $tarball_i686_linux_hash) = downloadFile("binaryTarball.i686-linux", "1");
-my ($tarball_x86_64_linux, $tarball_x86_64_linux_hash) = downloadFile("binaryTarball.x86_64-linux", "1");
-my ($tarball_aarch64_linux, $tarball_aarch64_linux_hash) = downloadFile("binaryTarball.aarch64-linux", "1");
-my ($tarball_x86_64_darwin, $tarball_x86_64_darwin_hash) = downloadFile("binaryTarball.x86_64-darwin", "1");
+downloadFile("binaryTarball.i686-linux", "1");
+downloadFile("binaryTarball.x86_64-linux", "1");
+downloadFile("binaryTarball.aarch64-linux", "1");
+downloadFile("binaryTarball.x86_64-darwin", "1");
+downloadFile("installerScript", "1");
 
 # Update Nixpkgs in a very hacky way.
 system("cd $nixpkgsDir && git pull") == 0 or die;
@@ -144,17 +149,6 @@ system("cd $siteDir && git pull") == 0 or die;
 write_file("$siteDir/nix-release.tt",
            "[%-\n" .
            "latestNixVersion = \"$version\"\n" .
-           "nix_hash_i686_linux = \"$tarball_i686_linux_hash\"\n" .
-           "nix_hash_x86_64_linux = \"$tarball_x86_64_linux_hash\"\n" .
-           "nix_hash_aarch64_linux = \"$tarball_aarch64_linux_hash\"\n" .
-           "nix_hash_x86_64_darwin = \"$tarball_x86_64_darwin_hash\"\n" .
            "-%]\n");
 
-system("cd $siteDir && nix-shell --run 'make nix/install nix/install.sig'") == 0 or die;
-
-copy("$siteDir/nix/install", "$siteDir/nix/install-$version") or die;
-copy("$siteDir/nix/install.sig", "$siteDir/nix/install-$version.sig") or die;
-
-system("cd $siteDir && git add nix/install-$version nix/install-$version.sig") == 0 or die;
-
 system("cd $siteDir && git commit -a -m 'Nix $version released'") == 0 or die;
diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile
deleted file mode 100644
index 2f8e3dd7a679..000000000000
--- a/misc/docker/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-FROM alpine
-
-# Enable HTTPS support in wget.
-RUN apk add --update openssl
-
-# Download Nix and install it into the system.
-RUN wget https://nixos.org/releases/nix/nix-2.0/nix-2.0-x86_64-linux.tar.bz2 \
-  && echo "6312837aee33306cdbb351b75ba1638b89d21b30f0caf0346f9a742425f197ee  nix-2.0-x86_64-linux.tar.bz2" | sha256sum -c \
-  && tar xjf nix-*-x86_64-linux.tar.bz2 \
-  && addgroup -g 30000 -S nixbld \
-  && for i in $(seq 1 30); do adduser -S -D -h /var/empty -g "Nix build user $i" -u $((30000 + i)) -G nixbld nixbld$i ; done \
-  && mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \
-  && ln -s /nix/var/nix/profiles/default/etc/profile.d/nix.sh /etc/profile.d/ \
-  && rm -r /nix-*-x86_64-linux \
-  && rm -r /var/cache/apk/*
-
-ONBUILD ENV \
-    ENV=/etc/profile \
-    PATH=/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin \
-    GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \
-    NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
-
-ENV \
-    ENV=/etc/profile \
-    PATH=/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin \
-    GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \
-    NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \
-    NIX_PATH=/nix/var/nix/profiles/per-user/root/channels
diff --git a/nix.spec.in b/nix.spec.in
index d962bcc857b9..cd053dbfce5c 100644
--- a/nix.spec.in
+++ b/nix.spec.in
@@ -3,33 +3,47 @@
 %global nixbld_user "nix-builder-"
 %global nixbld_group "nixbld"
 
+# NOTE: BUILD on EL7 requires
+# - Centos / RHEL7 software collection repository 
+#   yum install centos-release-scl
+#
+# - Recent boost backport
+#   curl https://copr.fedorainfracloud.org/coprs/whosthere/boost/repo/epel-7/whosthere-boost-epel-7.repo -o /etc/yum.repos.d/whosthere-boost-epel-7.repo
+#
+
+# Disable documentation generation
+# necessary on some platforms
+%bcond_without docgen
+
 Summary: The Nix software deployment system
 Name: nix
 Version: @PACKAGE_VERSION@
 Release: 2%{?dist}
 License: LGPLv2+
-%if 0%{?rhel} && 0%{?rhel} < 7
 Group: Applications/System
-%endif
 URL: http://nixos.org/
 Source0: %{name}-%{version}.tar.bz2
-%if 0%{?el5}
-BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
-%endif
+
 Requires: curl
 Requires: bzip2
 Requires: gzip
 Requires: xz
-Requires: libseccomp
-Requires: boost-context
+BuildRequires: bison
+BuildRequires: boost-devel >= 1.60
 BuildRequires: bzip2-devel
-BuildRequires: sqlite-devel
+
+# for RHEL <= 7, we need software collections for a C++14 compatible compatible compiler
+%if 0%{?rhel}
+BuildRequires: devtoolset-7-gcc
+BuildRequires: devtoolset-7-gcc-c++
+%endif
+
+BuildRequires: flex
 BuildRequires: libcurl-devel
 BuildRequires: libseccomp-devel
-BuildRequires: boost-devel
-
-# Hack to make that shitty RPM scanning hack shut up.
-Provides: perl(Nix::SSH)
+BuildRequires: openssl-devel
+BuildRequires: sqlite-devel
+BuildRequires: xz-devel
 
 %description
 Nix is a purely functional package manager. It allows multiple
@@ -41,9 +55,6 @@ it can be used equally well under other Unix systems.
 
 %package        devel
 Summary:        Development files for %{name}
-%if 0%{?rhel} && 0%{?rhel} < 7
-Group:          Development/Libraries
-%endif
 Requires:       %{name}%{?_isa} = %{version}-%{release}
 
 %description   devel
@@ -53,9 +64,6 @@ developing applications that use %{name}.
 
 %package doc
 Summary:        Documentation files for %{name}
-%if 0%{?rhel} && 0%{?rhel} < 7
-Group:          Documentation
-%endif
 BuildArch:      noarch
 Requires:       %{name} = %{version}-%{release}
 
@@ -67,20 +75,25 @@ The %{name}-doc package contains documentation files for %{name}.
 
 
 %build
+%if 0%{?rhel}
+source /opt/rh/devtoolset-7/enable 
+%endif
 extraFlags=
 # - override docdir so large documentation files are owned by the
 #   -doc subpackage
 # - set localstatedir by hand to the preferred nix value
 %configure --localstatedir=/nix/var \
+	   %{!?without_docgen:--disable-doc-gen} \
            --docdir=%{_defaultdocdir}/%{name}-doc-%{version} \
            $extraFlags
-make -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES
+make V=1 %{?_smp_mflags}
 
 
 %install
-%if 0%{?el5}
-rm -rf $RPM_BUILD_ROOT
+%if 0%{?rhel}
+source /opt/rh/devtoolset-7/enable 
 %endif
+
 make DESTDIR=$RPM_BUILD_ROOT install
 
 find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';'
@@ -130,6 +143,7 @@ systemctl start  nix-daemon.socket
 %endif
 
 %files
+%license COPYING
 %{_bindir}/nix*
 %{_libdir}/*.so
 %{_prefix}/libexec/*
@@ -138,9 +152,11 @@ systemctl start  nix-daemon.socket
 %{_prefix}/lib/systemd/system/nix-daemon.service
 %endif
 %{_datadir}/nix
+%if ! %{without docgen}
 %{_mandir}/man1/*.1*
 %{_mandir}/man5/*.5*
 %{_mandir}/man8/*.8*
+%endif
 %config(noreplace) %{_sysconfdir}/profile.d/nix.sh
 %config(noreplace) %{_sysconfdir}/profile.d/nix-daemon.sh
 /nix
@@ -149,6 +165,9 @@ systemctl start  nix-daemon.socket
 %{_includedir}/nix
 %{_prefix}/lib/pkgconfig/*.pc
 
+
+%if ! %{without docgen}
 %files doc
 %docdir %{_defaultdocdir}/%{name}-doc-%{version}
 %{_defaultdocdir}/%{name}-doc-%{version}
+%endif
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index bbfb2934315b..ce553bb53ebc 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -27,7 +27,7 @@ static ref<Store> store()
     static std::shared_ptr<Store> _store;
     if (!_store) {
         try {
-            settings.loadConfFile();
+            loadConfFile();
             settings.lockCPU = false;
             _store = openStore();
         } catch (Error & e) {
diff --git a/release-common.nix b/release-common.nix
index 0c12bc7ceb38..f98e86a1b444 100644
--- a/release-common.nix
+++ b/release-common.nix
@@ -57,11 +57,11 @@ rec {
       git
       mercurial
     ]
-    ++ lib.optional stdenv.isLinux libseccomp
+    ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal]
     ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
     ++ lib.optional (stdenv.isLinux || stdenv.isDarwin)
       (aws-sdk-cpp.override {
-        apis = ["s3"];
+        apis = ["s3" "transfer"];
         customMemoryManagement = false;
       });
 
diff --git a/release.nix b/release.nix
index 37deb8e7ee38..321f1688367a 100644
--- a/release.nix
+++ b/release.nix
@@ -275,6 +275,24 @@ let
         '';
 
 
+    installerScript =
+      pkgs.runCommand "installer-script"
+        { buildInputs = [ build.x86_64-linux ];
+        }
+        ''
+          mkdir -p $out/nix-support
+
+          substitute ${./scripts/install.in} $out/install \
+            ${pkgs.lib.concatMapStrings
+              (system: "--replace '@binaryTarball_${system}@' $(nix hash-file --type sha256 ${binaryTarball.${system}}/*.tar.bz2) ")
+              [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
+            } \
+            --replace '@nixVersion@' ${build.x86_64-linux.src.version}
+
+          echo "file installer $out/install" >> $out/nix-support/hydra-build-products
+        '';
+
+
     # Aggregate job containing the release-critical jobs.
     release = pkgs.releaseTools.aggregate {
       name = "nix-${tarball.version}";
@@ -284,14 +302,17 @@ let
           build.i686-linux
           build.x86_64-darwin
           build.x86_64-linux
+          build.aarch64-linux
           binaryTarball.i686-linux
           binaryTarball.x86_64-darwin
           binaryTarball.x86_64-linux
+          binaryTarball.aarch64-linux
           tests.remoteBuilds
           tests.nix-copy-closure
           tests.binaryTarball
           tests.evalNixpkgs
           tests.evalNixOS
+          installerScript
         ];
     };
 
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index 5f6542355e0c..6ee8dd48582e 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -727,11 +727,17 @@ setup_default_profile() {
     _sudo "to installing a bootstrapping Nix in to the default Profile" \
           HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
 
-    _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
-          HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
+    if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then
+        _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
+              HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
+        export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
+    fi
 
+    # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
+    # otherwise it will be lost in environments where sudo doesn't pass
+    # all the environment variables by default.
     _sudo "to update the default channel in the default profile" \
-          HOME="$ROOT_HOME" NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs
+          HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs
 }
 
 
diff --git a/scripts/install.in b/scripts/install.in
new file mode 100644
index 000000000000..26ab85ba0992
--- /dev/null
+++ b/scripts/install.in
@@ -0,0 +1,67 @@
+#!/bin/sh
+
+# This script installs the Nix package manager on your system by
+# downloading a binary distribution and running its installer script
+# (which in turn creates and populates /nix).
+
+{ # Prevent execution if this script was only partially downloaded
+oops() {
+    echo "$0:" "$@" >&2
+    exit 1
+}
+
+tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \
+          oops "Can\'t create temporary directory for downloading the Nix binary tarball")"
+cleanup() {
+    rm -rf "$tmpDir"
+}
+trap cleanup EXIT INT QUIT TERM
+
+require_util() {
+    type "$1" > /dev/null 2>&1 || which "$1" > /dev/null 2>&1 ||
+        oops "you do not have '$1' installed, which I need to $2"
+}
+
+case "$(uname -s).$(uname -m)" in
+    Linux.x86_64) system=x86_64-linux; hash=@binaryTarball_x86_64-linux@;;
+    Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;;
+    Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;;
+    Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;;
+    *) oops "sorry, there is no binary distribution of Nix for your platform";;
+esac
+
+url="https://nixos.org/releases/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.bz2"
+
+tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.bz2")"
+
+require_util curl "download the binary tarball"
+require_util bzcat "decompress the binary tarball"
+require_util tar "unpack the binary tarball"
+
+echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
+curl -L "$url" -o "$tarball" || oops "failed to download '$url'"
+
+if type sha256sum > /dev/null 2>&1; then
+    hash2="$(sha256sum -b "$tarball" | cut -c1-64)"
+elif type shasum > /dev/null 2>&1; then
+    hash2="$(shasum -a 256 -b "$tarball" | cut -c1-64)"
+elif type openssl > /dev/null 2>&1; then
+    hash2="$(openssl dgst -r -sha256 "$tarball" | cut -c1-64)"
+else
+    oops "cannot verify the SHA-256 hash of '$url'; you need one of 'shasum', 'sha256sum', or 'openssl'"
+fi
+
+if [ "$hash" != "$hash2" ]; then
+    oops "SHA-256 hash mismatch in '$url'; expected $hash, got $hash2"
+fi
+
+unpack=$tmpDir/unpack
+mkdir -p "$unpack"
+< "$tarball" bzcat | tar -xf - -C "$unpack" || oops "failed to unpack '$url'"
+
+script=$(echo "$unpack"/*/install)
+
+[ -e "$script" ] || oops "installation script is missing from the binary tarball!"
+"$script" "$@"
+
+} # End of wrapping
diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in
index 43c7156062de..1be9a0755d85 100644
--- a/scripts/nix-profile-daemon.sh.in
+++ b/scripts/nix-profile-daemon.sh.in
@@ -1,5 +1,5 @@
 # Only execute this file once per shell.
-if [ -n "$__ETC_PROFILE_NIX_SOURCED" ]; then return; fi
+if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi
 __ETC_PROFILE_NIX_SOURCED=1
 
 # Set up secure multi-user builds: non-root users build through the
@@ -49,6 +49,23 @@ if test -w $HOME; then
   fi
 fi
 
-export NIX_SSL_CERT_FILE="@localstatedir@/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
-export NIX_PATH="@localstatedir@/nix/profiles/per-user/root/channels"
+
+# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
+if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then
+    : # Allow users to override the NIX_SSL_CERT_FILE
+elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
+    export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
+elif [ -e /etc/ssl/ca-bundle.pem ]; then # openSUSE Tumbleweed
+    export NIX_SSL_CERT_FILE=/etc/ssl/ca-bundle.pem
+elif [ -e /etc/ssl/certs/ca-bundle.crt ]; then # Old NixOS
+    export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt
+elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
+    export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
+elif [ -e "$NIX_USER_PROFILE_DIR/etc/ssl/certs/ca-bundle.crt" ]; then # fall back to cacert in the user's Nix profile
+    export NIX_SSL_CERT_FILE=$NIX_USER_PROFILE_DIR/etc/ssl/certs/ca-bundle.crt
+elif [ -e "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt" ]; then # fall back to cacert in the default Nix profile
+    export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
+fi
+
+export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels"
 export PATH="$HOME/.nix-profile/bin:$HOME/.nix-profile/lib/kde4/libexec:@localstatedir@/nix/profiles/default/bin:@localstatedir@/nix/profiles/default:@localstatedir@/nix/profiles/default/lib/kde4/libexec:$PATH"
diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc
index 0474865c6d7d..0785897d2513 100644
--- a/src/libexpr/attr-set.cc
+++ b/src/libexpr/attr-set.cc
@@ -1,5 +1,5 @@
 #include "attr-set.hh"
-#include "eval.hh"
+#include "eval-inline.hh"
 
 #include <algorithm>
 
@@ -7,20 +7,6 @@
 namespace nix {
 
 
-/* Note: Various places expect the allocated memory to be zeroed. */
-static void * allocBytes(size_t n)
-{
-    void * p;
-#if HAVE_BOEHMGC
-    p = GC_malloc(n);
-#else
-    p = calloc(n, 1);
-#endif
-    if (!p) throw std::bad_alloc();
-    return p;
-}
-
-
 /* Allocate a new array of attributes for an attribute set with a specific
    capacity. The space is implicitly reserved after the Bindings
    structure. */
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index 8cc50e561354..c27116e3b448 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -78,4 +78,18 @@ inline void EvalState::forceList(Value & v, const Pos & pos)
         throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
 }
 
+/* Note: Various places expect the allocated memory to be zeroed. */
+inline void * allocBytes(size_t n)
+{
+    void * p;
+#if HAVE_BOEHMGC
+    p = GC_MALLOC(n);
+#else
+    p = calloc(n, 1);
+#endif
+    if (!p) throw std::bad_alloc();
+    return p;
+}
+
+
 }
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 353097f89713..e09297546c95 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -13,17 +13,14 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 
+#include <sys/time.h>
+#include <sys/resource.h>
+
 #if HAVE_BOEHMGC
 
 #include <gc/gc.h>
 #include <gc/gc_cpp.h>
 
-#define NEW new (UseGC)
-
-#else
-
-#define NEW new
-
 #endif
 
 
@@ -34,7 +31,7 @@ static char * dupString(const char * s)
 {
     char * t;
 #if HAVE_BOEHMGC
-    t = GC_strdup(s);
+    t = GC_STRDUP(s);
 #else
     t = strdup(s);
 #endif
@@ -43,20 +40,6 @@ static char * dupString(const char * s)
 }
 
 
-/* Note: Various places expect the allocated memory to be zeroed. */
-static void * allocBytes(size_t n)
-{
-    void * p;
-#if HAVE_BOEHMGC
-    p = GC_malloc(n);
-#else
-    p = calloc(n, 1);
-#endif
-    if (!p) throw std::bad_alloc();
-    return p;
-}
-
-
 static void printValue(std::ostream & str, std::set<const Value *> & active, const Value & v)
 {
     checkInterrupt();
@@ -199,8 +182,15 @@ void initGC()
 
 #if HAVE_BOEHMGC
     /* Initialise the Boehm garbage collector. */
+
+    /* Don't look for interior pointers. This reduces the odds of
+       misdetection a bit. */
     GC_set_all_interior_pointers(0);
 
+    /* We don't have any roots in data segments, so don't scan from
+       there. */
+    GC_set_no_dls(1);
+
     GC_INIT();
 
     GC_set_oom_fn(oomHandler);
@@ -307,15 +297,17 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
 
     assert(gcInitialised);
 
+    static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes");
+
     /* Initialise the Nix expression search path. */
-    if (!settings.pureEval) {
+    if (!evalSettings.pureEval) {
         Strings paths = parseNixPath(getEnv("NIX_PATH", ""));
         for (auto & i : _searchPath) addToSearchPath(i);
         for (auto & i : paths) addToSearchPath(i);
     }
     addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true));
 
-    if (settings.restrictEval || settings.pureEval) {
+    if (evalSettings.restrictEval || evalSettings.pureEval) {
         allowedPaths = PathSet();
 
         for (auto & i : searchPath) {
@@ -344,7 +336,6 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
 
 EvalState::~EvalState()
 {
-    fileEvalCache.clear();
 }
 
 
@@ -352,6 +343,10 @@ Path EvalState::checkSourcePath(const Path & path_)
 {
     if (!allowedPaths) return path_;
 
+    auto i = resolvedPaths.find(path_);
+    if (i != resolvedPaths.end())
+        return i->second;
+
     bool found = false;
 
     for (auto & i : *allowedPaths) {
@@ -369,8 +364,10 @@ Path EvalState::checkSourcePath(const Path & path_)
     Path path = canonPath(path_, true);
 
     for (auto & i : *allowedPaths) {
-        if (isDirOrInDir(path, i))
+        if (isDirOrInDir(path, i)) {
+            resolvedPaths[path_] = path;
             return path;
+        }
     }
 
     throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path);
@@ -379,13 +376,13 @@ Path EvalState::checkSourcePath(const Path & path_)
 
 void EvalState::checkURI(const std::string & uri)
 {
-    if (!settings.restrictEval) return;
+    if (!evalSettings.restrictEval) return;
 
     /* 'uri' should be equal to a prefix, or in a subdirectory of a
        prefix. Thus, the prefix https://github.co does not permit
        access to https://github.com. Note: this allows 'http://' and
        'https://' as prefixes for any http/https URI. */
-    for (auto & prefix : settings.allowedUris.get())
+    for (auto & prefix : evalSettings.allowedUris.get())
         if (uri == prefix ||
             (uri.size() > prefix.size()
             && prefix.size() > 0
@@ -443,7 +440,7 @@ Value * EvalState::addPrimOp(const string & name,
     string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
     Symbol sym = symbols.create(name2);
     v->type = tPrimOp;
-    v->primOp = NEW PrimOp(primOp, arity, sym);
+    v->primOp = new PrimOp(primOp, arity, sym);
     staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
     baseEnv.values[baseEnvDispl++] = v;
     baseEnv.values[0]->attrs->push_back(Attr(sym, v));
@@ -562,12 +559,12 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
     if (!var.fromWith) return env->values[var.displ];
 
     while (1) {
-        if (!env->haveWithAttrs) {
+        if (env->type == Env::HasWithExpr) {
             if (noEval) return 0;
             Value * v = allocValue();
             evalAttrs(*env->up, (Expr *) env->values[0], *v);
             env->values[0] = v;
-            env->haveWithAttrs = true;
+            env->type = Env::HasWithAttrs;
         }
         Bindings::iterator j = env->values[0]->attrs->find(var.name);
         if (j != env->values[0]->attrs->end()) {
@@ -581,10 +578,19 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
 }
 
 
+std::atomic<uint64_t> nrValuesFreed{0};
+
+void finalizeValue(void * obj, void * data)
+{
+    nrValuesFreed++;
+}
+
 Value * EvalState::allocValue()
 {
     nrValues++;
-    return (Value *) allocBytes(sizeof(Value));
+    auto v = (Value *) allocBytes(sizeof(Value));
+    //GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr);
+    return v;
 }
 
 
@@ -597,6 +603,7 @@ Env & EvalState::allocEnv(size_t size)
     nrValuesInEnvs += size;
     Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
     env->size = (decltype(Env::size)) size;
+    env->type = Env::Plain;
 
     /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */
 
@@ -716,7 +723,17 @@ void EvalState::evalFile(const Path & path_, Value & v)
     }
 
     printTalkative("evaluating file '%1%'", path2);
-    Expr * e = parseExprFromFile(checkSourcePath(path2));
+    Expr * e = nullptr;
+
+    auto j = fileParseCache.find(path2);
+    if (j != fileParseCache.end())
+        e = j->second;
+
+    if (!e)
+        e = parseExprFromFile(checkSourcePath(path2));
+
+    fileParseCache[path2] = e;
+
     try {
         eval(e, v);
     } catch (Error & e) {
@@ -1199,7 +1216,7 @@ void ExprWith::eval(EvalState & state, Env & env, Value & v)
     Env & env2(state.allocEnv(1));
     env2.up = &env;
     env2.prevWith = prevWith;
-    env2.haveWithAttrs = false;
+    env2.type = Env::HasWithExpr;
     env2.values[0] = (Value *) attrs;
 
     body->eval(state, env2, v);
@@ -1703,6 +1720,20 @@ bool EvalState::eqValues(Value & v1, Value & v2)
 }
 
 
+void EvalState::printStats2()
+{
+    struct rusage ru;
+    getrusage(RUSAGE_SELF, &ru);
+
+    GC_prof_stats_s gc;
+    GC_get_prof_stats(&gc, sizeof(gc));
+
+    printError("STATS %d %d %d %d %d %d",
+        nrValues, nrValuesFreed.load(), nrValues - nrValuesFreed,
+        ru.ru_maxrss,
+        gc.heapsize_full, gc.free_bytes_full);
+}
+
 void EvalState::printStats()
 {
     bool showStats = getEnv("NIX_SHOW_STATS", "0") != "0";
@@ -1857,9 +1888,10 @@ size_t valueSize(Value & v)
 
         size_t sz = sizeof(Env) + sizeof(Value *) * env.size;
 
-        for (size_t i = 0; i < env.size; ++i)
-            if (env.values[i])
-                sz += doValue(*env.values[i]);
+        if (env.type != Env::HasWithExpr)
+            for (size_t i = 0; i < env.size; ++i)
+                if (env.values[i])
+                    sz += doValue(*env.values[i]);
 
         if (env.up) sz += doEnv(*env.up);
 
@@ -1888,4 +1920,9 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) {
 }
 
 
+EvalSettings evalSettings;
+
+static GlobalConfig::Register r1(&evalSettings);
+
+
 }
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 86e93a5ac9ce..46bda86d084e 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -5,8 +5,10 @@
 #include "nixexpr.hh"
 #include "symbol-table.hh"
 #include "hash.hh"
+#include "config.hh"
 
 #include <map>
+#include <unordered_map>
 
 
 namespace nix {
@@ -34,8 +36,8 @@ struct Env
 {
     Env * up;
     unsigned short size; // used by ‘valueSize’
-    unsigned short prevWith:15; // nr of levels up to next `with' environment
-    unsigned short haveWithAttrs:1;
+    unsigned short prevWith:14; // nr of levels up to next `with' environment
+    enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2;
     Value * values[0];
 };
 
@@ -88,6 +90,14 @@ public:
 private:
     SrcToStore srcToStore;
 
+    /* A cache from path names to parse trees. */
+#if HAVE_BOEHMGC
+    typedef std::map<Path, Expr *, std::less<Path>, traceable_allocator<std::pair<const Path, Expr *> > > FileParseCache;
+#else
+    typedef std::map<Path, Expr *> FileParseCache;
+#endif
+    FileParseCache fileParseCache;
+
     /* A cache from path names to values. */
 #if HAVE_BOEHMGC
     typedef std::map<Path, Value, std::less<Path>, traceable_allocator<std::pair<const Path, Value> > > FileEvalCache;
@@ -100,6 +110,9 @@ private:
 
     std::map<std::string, std::pair<bool, std::string>> searchPathResolved;
 
+    /* Cache used by checkSourcePath(). */
+    std::unordered_map<Path, Path> resolvedPaths;
+
 public:
 
     EvalState(const Strings & _searchPath, ref<Store> store);
@@ -263,6 +276,7 @@ public:
 
     /* Print statistics. */
     void printStats();
+    void printStats2();
 
     void realiseContext(const PathSet & context);
 
@@ -316,4 +330,25 @@ struct InvalidPathError : EvalError
 #endif
 };
 
+struct EvalSettings : Config
+{
+    Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
+        "Whether builtin functions that allow executing native code should be enabled."};
+
+    Setting<bool> restrictEval{this, false, "restrict-eval",
+        "Whether to restrict file system access to paths in $NIX_PATH, "
+        "and network access to the URI prefixes listed in 'allowed-uris'."};
+
+    Setting<bool> pureEval{this, false, "pure-eval",
+        "Whether to restrict file system and network access to files specified by cryptographic hash."};
+
+    Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
+        "Whether the evaluator allows importing the result of a derivation."};
+
+    Setting<Strings> allowedUris{this, {}, "allowed-uris",
+        "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
+};
+
+extern EvalSettings evalSettings;
+
 }
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
index 8b1404595548..3f6017957782 100644
--- a/src/libexpr/json-to-value.cc
+++ b/src/libexpr/json-to-value.cc
@@ -110,7 +110,7 @@ static void parseJSON(EvalState & state, const char * & s, Value & v)
             if (number_type == tFloat)
                 mkFloat(v, stod(tmp_number));
             else
-                mkInt(v, stoi(tmp_number));
+                mkInt(v, stol(tmp_number));
         } catch (std::invalid_argument e) {
             throw JSONParseError("invalid JSON number");
         } catch (std::out_of_range e) {
diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l
index 1e9c29afa133..29ca327c1e4e 100644
--- a/src/libexpr/lexer.l
+++ b/src/libexpr/lexer.l
@@ -209,11 +209,13 @@ or          { return OR_KW; }
 \#[^\r\n]*    /* single-line comments */
 \/\*([^*]|\*+[^*/])*\*+\/  /* long comments */
 
-{ANY}           return yytext[0];
+{ANY}       {
+              /* Don't return a negative number, as this will cause
+                 Bison to stop parsing without an error. */
+              return (unsigned char) yytext[0];
+            }
 
 }
 
-<<EOF>> { data->atEnd = true; return 0; }
-
 %%
 
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
index b486595f07ab..665a42987dc1 100644
--- a/src/libexpr/nixexpr.hh
+++ b/src/libexpr/nixexpr.hh
@@ -11,7 +11,6 @@ namespace nix {
 
 MakeError(EvalError, Error)
 MakeError(ParseError, Error)
-MakeError(IncompleteParseError, ParseError)
 MakeError(AssertionError, EvalError)
 MakeError(ThrownError, AssertionError)
 MakeError(Abort, EvalError)
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index e3f4521844e8..eee48887dc22 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -31,12 +31,10 @@ namespace nix {
         Path basePath;
         Symbol path;
         string error;
-        bool atEnd;
         Symbol sLetBody;
         ParseData(EvalState & state)
             : state(state)
             , symbols(state.symbols)
-            , atEnd(false)
             , sLetBody(symbols.create("<let-body>"))
             { };
     };
@@ -541,12 +539,7 @@ Expr * EvalState::parse(const char * text,
     int res = yyparse(scanner, &data);
     yylex_destroy(scanner);
 
-    if (res) {
-      if (data.atEnd)
-        throw IncompleteParseError(data.error);
-      else
-        throw ParseError(data.error);
-    }
+    if (res) throw ParseError(data.error);
 
     data.result->bindVars(staticEnv);
 
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 57dc7bd1279d..3a6c4035b8b8 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -73,7 +73,7 @@ void EvalState::realiseContext(const PathSet & context)
 
     if (drvs.empty()) return;
 
-    if (!settings.enableImportFromDerivation)
+    if (!evalSettings.enableImportFromDerivation)
         throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin()));
 
     /* For performance, prefetch all substitute info. */
@@ -464,7 +464,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val
 static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
     string name = state.forceStringNoCtx(*args[0], pos);
-    mkString(v, settings.restrictEval || settings.pureEval ? "" : getEnv(name));
+    mkString(v, evalSettings.restrictEval || evalSettings.pureEval ? "" : getEnv(name));
 }
 
 
@@ -1031,7 +1031,7 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu
 static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_,
     Value * filterFun, bool recursive, const Hash & expectedHash, Value & v)
 {
-    const auto path = settings.pureEval && expectedHash ?
+    const auto path = evalSettings.pureEval && expectedHash ?
         path_ :
         state.checkSourcePath(path_);
     PathFilter filter = filterFun ? ([&](const Path & path) {
@@ -1676,6 +1676,20 @@ static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value &
     }
 }
 
+static void prim_bitAnd(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) & state.forceInt(*args[1], pos));
+}
+
+static void prim_bitOr(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) | state.forceInt(*args[1], pos));
+}
+
+static void prim_bitXor(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) ^ state.forceInt(*args[1], pos));
+}
 
 static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
@@ -2042,7 +2056,7 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
 
     state.checkURI(url);
 
-    if (settings.pureEval && !expectedHash)
+    if (evalSettings.pureEval && !expectedHash)
         throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who);
 
     Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash);
@@ -2110,12 +2124,12 @@ void EvalState::createBaseEnv()
         addConstant(name, v);
     };
 
-    if (!settings.pureEval) {
+    if (!evalSettings.pureEval) {
         mkInt(v, time(0));
         addConstant("__currentTime", v);
     }
 
-    if (!settings.pureEval) {
+    if (!evalSettings.pureEval) {
         mkString(v, settings.thisSystem);
         addConstant("__currentSystem", v);
     }
@@ -2140,7 +2154,7 @@ void EvalState::createBaseEnv()
     mkApp(v, *vScopedImport, *v2);
     forceValue(v);
     addConstant("import", v);
-    if (settings.enableNativeCode) {
+    if (evalSettings.enableNativeCode) {
         addPrimOp("__importNative", 2, prim_importNative);
         addPrimOp("__exec", 1, prim_exec);
     }
@@ -2167,7 +2181,7 @@ void EvalState::createBaseEnv()
 
     // Paths
     addPrimOp("__toPath", 1, prim_toPath);
-    if (settings.pureEval)
+    if (evalSettings.pureEval)
         addPurityError("__storePath");
     else
         addPrimOp("__storePath", 1, prim_storePath);
@@ -2221,6 +2235,9 @@ void EvalState::createBaseEnv()
     addPrimOp("__sub", 2, prim_sub);
     addPrimOp("__mul", 2, prim_mul);
     addPrimOp("__div", 2, prim_div);
+    addPrimOp("__bitAnd", 2, prim_bitAnd);
+    addPrimOp("__bitOr", 2, prim_bitOr);
+    addPrimOp("__bitXor", 2, prim_bitXor);
     addPrimOp("__lessThan", 2, prim_lessThan);
 
     // String manipulation
diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc
index 8bb74dad639e..7aa98e0bfab3 100644
--- a/src/libexpr/primops/fetchGit.cc
+++ b/src/libexpr/primops/fetchGit.cc
@@ -28,7 +28,7 @@ GitInfo exportGit(ref<Store> store, const std::string & uri,
     std::experimental::optional<std::string> ref, std::string rev,
     const std::string & name)
 {
-    if (settings.pureEval && rev == "")
+    if (evalSettings.pureEval && rev == "")
         throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision");
 
     if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) {
diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc
index a75c5fc2ddff..9d35f6d0d6d7 100644
--- a/src/libexpr/primops/fetchMercurial.cc
+++ b/src/libexpr/primops/fetchMercurial.cc
@@ -27,7 +27,7 @@ std::regex commitHashRegex("^[0-9a-fA-F]{40}$");
 HgInfo exportMercurial(ref<Store> store, const std::string & uri,
     std::string rev, const std::string & name)
 {
-    if (settings.pureEval && rev == "")
+    if (evalSettings.pureEval && rev == "")
         throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision");
 
     if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) {
diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc
index bcc05c2cdad6..4c35a4199590 100644
--- a/src/libmain/common-args.cc
+++ b/src/libmain/common-args.cc
@@ -29,14 +29,14 @@ MixCommonArgs::MixCommonArgs(const string & programName)
         .arity(2)
         .handler([](std::vector<std::string> ss) {
             try {
-                settings.set(ss[0], ss[1]);
+                globalConfig.set(ss[0], ss[1]);
             } catch (UsageError & e) {
                 warn(e.what());
             }
         });
 
     std::string cat = "config";
-    settings.convertToArgs(*this, cat);
+    globalConfig.convertToArgs(*this, cat);
 
     // Backward compatibility hack: nix-env already had a --system flag.
     if (programName == "nix-env") longFlags.erase("system");
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 91a4eaf922a6..4ed34e54dc55 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -109,7 +109,7 @@ void initNix()
     opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
     CRYPTO_set_locking_callback(opensslLockCallback);
 
-    settings.loadConfFile();
+    loadConfFile();
 
     startSignalHandlerThread();
 
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 2e9a13e564ca..76c0a1a891b8 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -54,17 +54,38 @@ void BinaryCacheStore::init()
     }
 }
 
-std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+void BinaryCacheStore::getFile(const std::string & path,
+    Callback<std::shared_ptr<std::string>> callback)
+{
+    try {
+        callback(getFile(path));
+    } catch (...) { callback.rethrow(); }
+}
+
+void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
 {
     std::promise<std::shared_ptr<std::string>> promise;
     getFile(path,
-        [&](std::shared_ptr<std::string> result) {
-            promise.set_value(result);
-        },
-        [&](std::exception_ptr exc) {
-            promise.set_exception(exc);
-        });
-    return promise.get_future().get();
+        {[&](std::future<std::shared_ptr<std::string>> result) {
+            try {
+                promise.set_value(result.get());
+            } catch (...) {
+                promise.set_exception(std::current_exception());
+            }
+        }});
+    auto data = promise.get_future().get();
+    sink((unsigned char *) data->data(), data->size());
+}
+
+std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+{
+    StringSink sink;
+    try {
+        getFile(path, sink);
+    } catch (NoSuchBinaryCacheFile &) {
+        return nullptr;
+    }
+    return sink.s;
 }
 
 Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
@@ -196,30 +217,31 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
 {
     auto info = queryPathInfo(storePath).cast<const NarInfo>();
 
-    auto nar = getFile(info->url);
-
-    if (!nar) throw Error(format("file '%s' missing from binary cache") % info->url);
+    auto source = sinkToSource([this, url{info->url}](Sink & sink) {
+        try {
+            getFile(url, sink);
+        } catch (NoSuchBinaryCacheFile & e) {
+            throw SubstituteGone(e.what());
+        }
+    });
 
     stats.narRead++;
-    stats.narReadCompressedBytes += nar->size();
+    //stats.narReadCompressedBytes += nar->size(); // FIXME
 
     uint64_t narSize = 0;
 
-    StringSource source(*nar);
-
     LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
         sink(data, len);
         narSize += len;
     });
 
-    decompress(info->compression, source, wrapperSink);
+    decompress(info->compression, *source, wrapperSink);
 
     stats.narReadBytes += narSize;
 }
 
 void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure)
+    Callback<std::shared_ptr<ValidPathInfo>> callback)
 {
     auto uri = getUri();
     auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
@@ -229,17 +251,22 @@ void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
     auto narInfoFile = narInfoFileFor(storePath);
 
     getFile(narInfoFile,
-        [=](std::shared_ptr<std::string> data) {
-            if (!data) return success(0);
+        {[=](std::future<std::shared_ptr<std::string>> fut) {
+            try {
+                auto data = fut.get();
 
-            stats.narInfoRead++;
+                if (!data) return callback(nullptr);
 
-            callSuccess(success, failure, (std::shared_ptr<ValidPathInfo>)
-                std::make_shared<NarInfo>(*this, *data, narInfoFile));
+                stats.narInfoRead++;
 
-            (void) act; // force Activity into this lambda to ensure it stays alive
-        },
-        failure);
+                callback((std::shared_ptr<ValidPathInfo>)
+                    std::make_shared<NarInfo>(*this, *data, narInfoFile));
+
+                (void) act; // force Activity into this lambda to ensure it stays alive
+            } catch (...) {
+                callback.rethrow();
+            }
+        }});
 }
 
 Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index e20b968442b7..6bc83fc50ca1 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -38,11 +38,16 @@ public:
         const std::string & data,
         const std::string & mimeType) = 0;
 
-    /* Return the contents of the specified file, or null if it
-       doesn't exist. */
+    /* Note: subclasses must implement at least one of the two
+       following getFile() methods. */
+
+    /* Dump the contents of the specified file to a sink. */
+    virtual void getFile(const std::string & path, Sink & sink);
+
+    /* Fetch the specified file and call the specified callback with
+       the result. A subclass may implement this asynchronously. */
     virtual void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) = 0;
+        Callback<std::shared_ptr<std::string>> callback);
 
     std::shared_ptr<std::string> getFile(const std::string & path);
 
@@ -71,8 +76,7 @@ public:
     { unsupported(); }
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override;
 
     void queryReferrers(const Path & path,
         PathSet & referrers) override
@@ -131,4 +135,6 @@ public:
 
 };
 
+MakeError(NoSuchBinaryCacheFile, Error);
+
 }
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index f70ab8108fd7..d75ca0be86ef 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -29,7 +29,9 @@
 #include <sys/utsname.h>
 #include <sys/select.h>
 #include <sys/resource.h>
+#include <sys/socket.h>
 #include <fcntl.h>
+#include <netdb.h>
 #include <unistd.h>
 #include <errno.h>
 #include <cstring>
@@ -672,8 +674,10 @@ HookInstance::HookInstance()
     toHook.readSide = -1;
 
     sink = FdSink(toHook.writeSide.get());
-    for (auto & setting : settings.getSettings())
-        sink << 1 << setting.first << setting.second;
+    std::map<std::string, Config::SettingInfo> settings;
+    globalConfig.getSettings(settings);
+    for (auto & setting : settings)
+        sink << 1 << setting.first << setting.second.value;
     sink << 0;
 }
 
@@ -731,7 +735,7 @@ private:
 
     /* Whether to retry substituting the outputs after building the
        inputs. */
-    bool retrySubstitution = false;
+    bool retrySubstitution;
 
     /* The derivation stored at drvPath. */
     std::unique_ptr<BasicDerivation> drv;
@@ -1121,6 +1125,8 @@ void DerivationGoal::haveDerivation()
 {
     trace("have derivation");
 
+    retrySubstitution = false;
+
     for (auto & i : drv->outputs)
         worker.store.addTempRoot(i.second.path);
 
@@ -1159,7 +1165,7 @@ void DerivationGoal::outputsSubstituted()
     /*  If the substitutes form an incomplete closure, then we should
         build the dependencies of this derivation, but after that, we
         can still use the substitutes for this derivation itself. */
-    if (nrIncompleteClosure > 0 && !retrySubstitution) retrySubstitution = true;
+    if (nrIncompleteClosure > 0) retrySubstitution = true;
 
     nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
 
@@ -1773,12 +1779,14 @@ static std::once_flag dns_resolve_flag;
 static void preloadNSS() {
     /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
        one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
-       been loaded in the parent. So we force a download of an invalid URL to force the NSS machinery to
+       been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
        load its lookup libraries in the parent before any child gets a chance to. */
     std::call_once(dns_resolve_flag, []() {
-        DownloadRequest request("http://this.pre-initializes.the.dns.resolvers.invalid");
-        request.tries = 1; // We only need to do it once, and this also suppresses an annoying warning
-        try { getDownloader()->download(request); } catch (...) {}
+        struct addrinfo *res = NULL;
+
+        if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) {
+            if (res) freeaddrinfo(res);
+        }
     });
 }
 
@@ -3522,8 +3530,8 @@ private:
     /* The current substituter. */
     std::shared_ptr<Store> sub;
 
-    /* Whether any substituter can realise this path. */
-    bool hasSubstitute;
+    /* Whether a substituter failed. */
+    bool substituterFailed = false;
 
     /* Path info returned by the substituter's query info operation. */
     std::shared_ptr<const ValidPathInfo> info;
@@ -3587,7 +3595,6 @@ public:
 
 SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, RepairFlag repair)
     : Goal(worker)
-    , hasSubstitute(false)
     , repair(repair)
 {
     this->storePath = storePath;
@@ -3651,9 +3658,9 @@ void SubstitutionGoal::tryNext()
         /* Hack: don't indicate failure if there were no substituters.
            In that case the calling derivation should just do a
            build. */
-        amDone(hasSubstitute ? ecFailed : ecNoSubstituters);
+        amDone(substituterFailed ? ecFailed : ecNoSubstituters);
 
-        if (hasSubstitute) {
+        if (substituterFailed) {
             worker.failedSubstitutions++;
             worker.updateProgress();
         }
@@ -3689,8 +3696,6 @@ void SubstitutionGoal::tryNext()
 
     worker.updateProgress();
 
-    hasSubstitute = true;
-
     /* Bail out early if this substituter lacks a valid
        signature. LocalStore::addToStore() also checks for this, but
        only after we've downloaded the path. */
@@ -3805,8 +3810,19 @@ void SubstitutionGoal::finished()
         state = &SubstitutionGoal::init;
         worker.waitForAWhile(shared_from_this());
         return;
-    } catch (Error & e) {
-        printError(e.msg());
+    } catch (std::exception & e) {
+        printError(e.what());
+
+        /* Cause the parent build to fail unless --fallback is given,
+           or the substitute has disappeared. The latter case behaves
+           the same as the substitute never having existed in the
+           first place. */
+        try {
+            throw;
+        } catch (SubstituteGone &) {
+        } catch (...) {
+            substituterFailed = true;
+        }
 
         /* Try the next substitute. */
         state = &SubstitutionGoal::tryNext;
diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc
index 4ca4a838e3c4..1f4abd374f54 100644
--- a/src/libstore/builtins/fetchurl.cc
+++ b/src/libstore/builtins/fetchurl.cc
@@ -22,52 +22,60 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
         return i->second;
     };
 
-    auto fetch = [&](const string & url) {
-        /* No need to do TLS verification, because we check the hash of
-           the result anyway. */
-        DownloadRequest request(url);
-        request.verifyTLS = false;
-        request.decompress = false;
-
-        /* Note: have to use a fresh downloader here because we're in
-           a forked process. */
-        auto data = makeDownloader()->download(request);
-        assert(data.data);
-
-        return data.data;
-    };
+    Path storePath = getAttr("out");
+    auto mainUrl = getAttr("url");
+
+    /* Note: have to use a fresh downloader here because we're in
+       a forked process. */
+    auto downloader = makeDownloader();
+
+    auto fetch = [&](const std::string & url) {
+
+        auto source = sinkToSource([&](Sink & sink) {
+
+            /* No need to do TLS verification, because we check the hash of
+               the result anyway. */
+            DownloadRequest request(url);
+            request.verifyTLS = false;
+            request.decompress = false;
+
+            downloader->download(std::move(request), sink);
+        });
+
+        if (get(drv.env, "unpack", "") == "1") {
 
-    std::shared_ptr<std::string> data;
+            if (hasSuffix(mainUrl, ".xz")) {
+                auto source2 = sinkToSource([&](Sink & sink) {
+                    decompress("xz", *source, sink);
+                });
+                restorePath(storePath, *source2);
+            } else
+                restorePath(storePath, *source);
 
+        } else
+              writeFile(storePath, *source);
+
+        auto executable = drv.env.find("executable");
+        if (executable != drv.env.end() && executable->second == "1") {
+            if (chmod(storePath.c_str(), 0755) == -1)
+                throw SysError(format("making '%1%' executable") % storePath);
+        }
+    };
+
+    /* Try the hashed mirrors first. */
     if (getAttr("outputHashMode") == "flat")
         for (auto hashedMirror : settings.hashedMirrors.get())
             try {
                 if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
                 auto ht = parseHashType(getAttr("outputHashAlgo"));
-                data = fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false));
-                break;
+                fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false));
+                return;
             } catch (Error & e) {
                 debug(e.what());
             }
 
-    if (!data) data = fetch(getAttr("url"));
-
-    Path storePath = getAttr("out");
-
-    auto unpack = drv.env.find("unpack");
-    if (unpack != drv.env.end() && unpack->second == "1") {
-        if (string(*data, 0, 6) == string("\xfd" "7zXZ\0", 6))
-            data = decompress("xz", *data);
-        StringSource source(*data);
-        restorePath(storePath, source);
-    } else
-        writeFile(storePath, *data);
-
-    auto executable = drv.env.find("executable");
-    if (executable != drv.env.end() && executable->second == "1") {
-        if (chmod(storePath.c_str(), 0755) == -1)
-            throw SysError(format("making '%1%' executable") % storePath);
-    }
+    /* Otherwise try the specified URL. */
+    fetch(mainUrl);
 }
 
 }
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 74b861281ee0..1e187ec5e954 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -342,7 +342,7 @@ Hash hashDerivationModulo(Store & store, Derivation drv)
         Hash h = drvHashes[i.first];
         if (!h) {
             assert(store.isValidPath(i.first));
-            Derivation drv2 = readDerivation(i.first);
+            Derivation drv2 = readDerivation(store.toRealPath(i.first));
             h = hashDerivationModulo(store, drv2);
             drvHashes[i.first] = h;
         }
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 18f9094f82e0..07acd5d0e004 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -7,6 +7,7 @@
 #include "s3.hh"
 #include "compression.hh"
 #include "pathlocks.hh"
+#include "finally.hh"
 
 #ifdef ENABLE_S3
 #include <aws/core/client/ClientConfiguration.h>
@@ -29,12 +30,25 @@ using namespace std::string_literals;
 
 namespace nix {
 
-double getTime()
+struct DownloadSettings : Config
 {
-    struct timeval tv;
-    gettimeofday(&tv, 0);
-    return tv.tv_sec + (tv.tv_usec / 1000000.0);
-}
+    Setting<bool> enableHttp2{this, true, "http2",
+        "Whether to enable HTTP/2 support."};
+
+    Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
+        "String appended to the user agent in HTTP requests."};
+
+    Setting<size_t> httpConnections{this, 25, "http-connections",
+        "Number of parallel HTTP connections.",
+        {"binary-caches-parallel-connections"}};
+
+    Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
+        "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
+};
+
+static DownloadSettings downloadSettings;
+
+static GlobalConfig::Register r1(&downloadSettings);
 
 std::string resolveUri(const std::string & uri)
 {
@@ -61,8 +75,6 @@ struct CurlDownloader : public Downloader
     std::random_device rd;
     std::mt19937 mt19937;
 
-    bool enableHttp2;
-
     struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
     {
         CurlDownloader & downloader;
@@ -70,8 +82,7 @@ struct CurlDownloader : public Downloader
         DownloadResult result;
         Activity act;
         bool done = false; // whether either the success or failure function has been called
-        std::function<void(const DownloadResult &)> success;
-        std::function<void(std::exception_ptr exc)> failure;
+        Callback<DownloadResult> callback;
         CURL * req = 0;
         bool active = false; // whether the handle has been added to the multi object
         std::string status;
@@ -86,10 +97,15 @@ struct CurlDownloader : public Downloader
 
         std::string encoding;
 
-        DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
+        DownloadItem(CurlDownloader & downloader,
+            const DownloadRequest & request,
+            Callback<DownloadResult> callback)
             : downloader(downloader)
             , request(request)
-            , act(*logger, lvlTalkative, actDownload, fmt("downloading '%s'", request.uri), {request.uri}, request.parentAct)
+            , act(*logger, lvlTalkative, actDownload,
+                fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
+                {request.uri}, request.parentAct)
+            , callback(callback)
         {
             if (!request.expectedETag.empty())
                 requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
@@ -118,13 +134,16 @@ struct CurlDownloader : public Downloader
         {
             assert(!done);
             done = true;
-            callFailure(failure, std::make_exception_ptr(e));
+            callback.rethrow(std::make_exception_ptr(e));
         }
 
         size_t writeCallback(void * contents, size_t size, size_t nmemb)
         {
             size_t realSize = size * nmemb;
-            result.data->append((char *) contents, realSize);
+            if (request.dataCallback)
+                request.dataCallback((char *) contents, realSize);
+            else
+                result.data->append((char *) contents, realSize);
             return realSize;
         }
 
@@ -194,7 +213,7 @@ struct CurlDownloader : public Downloader
         }
 
         size_t readOffset = 0;
-        int readCallback(char *buffer, size_t size, size_t nitems)
+        size_t readCallback(char *buffer, size_t size, size_t nitems)
         {
             if (readOffset == request.data->length())
                 return 0;
@@ -205,7 +224,7 @@ struct CurlDownloader : public Downloader
             return count;
         }
 
-        static int readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
+        static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
         {
             return ((DownloadItem *) userp)->readCallback(buffer, size, nitems);
         }
@@ -225,15 +244,16 @@ struct CurlDownloader : public Downloader
 
             curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
             curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
+            curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10);
             curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
             curl_easy_setopt(req, CURLOPT_USERAGENT,
                 ("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
-                    (settings.userAgentSuffix != "" ? " " + settings.userAgentSuffix.get() : "")).c_str());
+                    (downloadSettings.userAgentSuffix != "" ? " " + downloadSettings.userAgentSuffix.get() : "")).c_str());
             #if LIBCURL_VERSION_NUM >= 0x072b00
             curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
             #endif
             #if LIBCURL_VERSION_NUM >= 0x072f00
-            if (downloader.enableHttp2)
+            if (downloadSettings.enableHttp2)
                 curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
             #endif
             curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
@@ -265,7 +285,7 @@ struct CurlDownloader : public Downloader
                 curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
             }
 
-            curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, settings.connectTimeout.get());
+            curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get());
 
             curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
             curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, lowSpeedTimeout);
@@ -288,8 +308,8 @@ struct CurlDownloader : public Downloader
             if (effectiveUrlCStr)
                 result.effectiveUrl = effectiveUrlCStr;
 
-            debug(format("finished download of '%s'; curl status = %d, HTTP status = %d, body = %d bytes")
-                % request.uri % code % httpStatus % (result.data ? result.data->size() : 0));
+            debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
+                request.verb(), request.uri, code, httpStatus, result.data ? result.data->size() : 0);
 
             if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
                 code = CURLE_OK;
@@ -305,17 +325,17 @@ struct CurlDownloader : public Downloader
                 try {
                     if (request.decompress)
                         result.data = decodeContent(encoding, ref<std::string>(result.data));
-                    callSuccess(success, failure, const_cast<const DownloadResult &>(result));
                     act.progress(result.data->size(), result.data->size());
+                    callback(std::move(result));
                 } catch (...) {
                     done = true;
-                    callFailure(failure, std::current_exception());
+                    callback.rethrow();
                 }
             } else {
                 // We treat most errors as transient, but won't retry when hopeless
                 Error err = Transient;
 
-                if (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) {
+                if (httpStatus == 404 || httpStatus == 410 || code == CURLE_FILE_COULDNT_READ_FILE) {
                     // The file is definitely not there
                     err = NotFound;
                 } else if (httpStatus == 401 || httpStatus == 403 || httpStatus == 407) {
@@ -345,6 +365,7 @@ struct CurlDownloader : public Downloader
                         case CURLE_INTERFACE_FAILED:
                         case CURLE_UNKNOWN_OPTION:
                         case CURLE_SSL_CACERT_BADFILE:
+                        case CURLE_TOO_MANY_REDIRECTS:
                             err = Misc;
                             break;
                         default: // Shut up warnings
@@ -356,10 +377,16 @@ struct CurlDownloader : public Downloader
 
                 auto exc =
                     code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
-                    ? DownloadError(Interrupted, format("download of '%s' was interrupted") % request.uri)
+                    ? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
                     : httpStatus != 0
-                      ? DownloadError(err, format("unable to download '%s': HTTP error %d (curl error: %s)") % request.uri % httpStatus % curl_easy_strerror(code))
-                      : DownloadError(err, format("unable to download '%s': %s (%d)") % request.uri % curl_easy_strerror(code) % code);
+                    ? DownloadError(err,
+                        fmt("unable to %s '%s': HTTP error %d",
+                            request.verb(), request.uri, httpStatus)
+                        + (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
+                        )
+                    : DownloadError(err,
+                        fmt("unable to %s '%s': %s (%d)",
+                            request.verb(), request.uri, curl_easy_strerror(code), code));
 
                 /* If this is a transient error, then maybe retry the
                    download after a while. */
@@ -408,11 +435,9 @@ struct CurlDownloader : public Downloader
         #endif
         #if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
         curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
-            settings.binaryCachesParallelConnections.get());
+            downloadSettings.httpConnections.get());
         #endif
 
-        enableHttp2 = settings.enableHttp2;
-
         wakeupPipe.create();
         fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
 
@@ -522,7 +547,7 @@ struct CurlDownloader : public Downloader
             }
 
             for (auto & item : incoming) {
-                debug(format("starting download of %s") % item->request.uri);
+                debug("starting %s of %s", item->request.verb(), item->request.uri);
                 item->init();
                 curl_multi_add_handle(curlm, item->req);
                 item->active = true;
@@ -551,6 +576,11 @@ struct CurlDownloader : public Downloader
 
     void enqueueItem(std::shared_ptr<DownloadItem> item)
     {
+        if (item->request.data
+            && !hasPrefix(item->request.uri, "http://")
+            && !hasPrefix(item->request.uri, "https://"))
+            throw nix::Error("uploading to '%s' is not supported", item->request.uri);
+
         {
             auto state(state_.lock());
             if (state->quit)
@@ -561,13 +591,12 @@ struct CurlDownloader : public Downloader
     }
 
     void enqueueDownload(const DownloadRequest & request,
-        std::function<void(const DownloadResult &)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+        Callback<DownloadResult> callback) override
     {
         /* Ugly hack to support s3:// URIs. */
         if (hasPrefix(request.uri, "s3://")) {
             // FIXME: do this on a worker thread
-            sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult {
+            try {
 #ifdef ENABLE_S3
                 S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable
                 auto slash = request.uri.find('/', 5);
@@ -581,27 +610,22 @@ struct CurlDownloader : public Downloader
                 if (!s3Res.data)
                     throw DownloadError(NotFound, fmt("S3 object '%s' does not exist", request.uri));
                 res.data = s3Res.data;
-                return res;
+                callback(std::move(res));
 #else
                 throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
 #endif
-            });
+            } catch (...) { callback.rethrow(); }
             return;
         }
 
-        auto item = std::make_shared<DownloadItem>(*this, request);
-        item->success = success;
-        item->failure = failure;
-        enqueueItem(item);
+        enqueueItem(std::make_shared<DownloadItem>(*this, request, callback));
     }
 };
 
 ref<Downloader> getDownloader()
 {
-    static std::shared_ptr<Downloader> downloader;
-    static std::once_flag downloaderCreated;
-    std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); });
-    return ref<Downloader>(downloader);
+    static ref<Downloader> downloader = makeDownloader();
+    return downloader;
 }
 
 ref<Downloader> makeDownloader()
@@ -613,8 +637,13 @@ std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest &
 {
     auto promise = std::make_shared<std::promise<DownloadResult>>();
     enqueueDownload(request,
-        [promise](const DownloadResult & result) { promise->set_value(result); },
-        [promise](std::exception_ptr exc) { promise->set_exception(exc); });
+        {[promise](std::future<DownloadResult> fut) {
+            try {
+                promise->set_value(fut.get());
+            } catch (...) {
+                promise->set_exception(std::current_exception());
+            }
+        }});
     return promise->get_future();
 }
 
@@ -623,7 +652,93 @@ DownloadResult Downloader::download(const DownloadRequest & request)
     return enqueueDownload(request).get();
 }
 
-Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl)
+void Downloader::download(DownloadRequest && request, Sink & sink)
+{
+    /* Note: we can't call 'sink' via request.dataCallback, because
+       that would cause the sink to execute on the downloader
+       thread. If 'sink' is a coroutine, this will fail. Also, if the
+       sink is expensive (e.g. one that does decompression and writing
+       to the Nix store), it would stall the download thread too much.
+       Therefore we use a buffer to communicate data between the
+       download thread and the calling thread. */
+
+    struct State {
+        bool quit = false;
+        std::exception_ptr exc;
+        std::string data;
+        std::condition_variable avail, request;
+    };
+
+    auto _state = std::make_shared<Sync<State>>();
+
+    /* In case of an exception, wake up the download thread. FIXME:
+       abort the download request. */
+    Finally finally([&]() {
+        auto state(_state->lock());
+        state->quit = true;
+        state->request.notify_one();
+    });
+
+    request.dataCallback = [_state](char * buf, size_t len) {
+
+        auto state(_state->lock());
+
+        if (state->quit) return;
+
+        /* If the buffer is full, then go to sleep until the calling
+           thread wakes us up (i.e. when it has removed data from the
+           buffer). Note: this does stall the download thread. */
+        while (state->data.size() > 1024 * 1024) {
+            if (state->quit) return;
+            debug("download buffer is full; going to sleep");
+            state.wait(state->request);
+        }
+
+        /* Append data to the buffer and wake up the calling
+           thread. */
+        state->data.append(buf, len);
+        state->avail.notify_one();
+    };
+
+    enqueueDownload(request,
+        {[_state](std::future<DownloadResult> fut) {
+            auto state(_state->lock());
+            state->quit = true;
+            try {
+                fut.get();
+            } catch (...) {
+                state->exc = std::current_exception();
+            }
+            state->avail.notify_one();
+            state->request.notify_one();
+        }});
+
+    auto state(_state->lock());
+
+    while (true) {
+        checkInterrupt();
+
+        if (state->quit) {
+            if (state->exc) std::rethrow_exception(state->exc);
+            break;
+        }
+
+        /* If no data is available, then wait for the download thread
+           to wake us up. */
+        if (state->data.empty())
+            state.wait(state->avail);
+
+        /* If data is available, then flush it to the sink and wake up
+           the download thread if it's blocked on a full buffer. */
+        if (!state->data.empty()) {
+            sink((unsigned char *) state->data.data(), state->data.size());
+            state->data.clear();
+            state->request.notify_one();
+        }
+    }
+}
+
+Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl)
 {
     auto url = resolveUri(url_);
 
@@ -653,7 +768,6 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
 
     string expectedETag;
 
-    int ttl = settings.tarballTtl;
     bool skip = false;
 
     if (pathExists(fileLink) && pathExists(dataFile)) {
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index 0b8d29b21dfe..da55df7a6e71 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -2,6 +2,7 @@
 
 #include "types.hh"
 #include "hash.hh"
+#include "globals.hh"
 
 #include <string>
 #include <future>
@@ -20,9 +21,15 @@ struct DownloadRequest
     bool decompress = true;
     std::shared_ptr<std::string> data;
     std::string mimeType;
+    std::function<void(char *, size_t)> dataCallback;
 
     DownloadRequest(const std::string & uri)
         : uri(uri), parentAct(getCurActivity()) { }
+
+    std::string verb()
+    {
+        return data ? "upload" : "download";
+    }
 };
 
 struct DownloadResult
@@ -41,20 +48,23 @@ struct Downloader
        the download. The future may throw a DownloadError
        exception. */
     virtual void enqueueDownload(const DownloadRequest & request,
-        std::function<void(const DownloadResult &)> success,
-        std::function<void(std::exception_ptr exc)> failure) = 0;
+        Callback<DownloadResult> callback) = 0;
 
     std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
 
     /* Synchronously download a file. */
     DownloadResult download(const DownloadRequest & request);
 
+    /* Download a file, writing its data to a sink. The sink will be
+       invoked on the thread of the caller. */
+    void download(DownloadRequest && request, Sink & sink);
+
     /* Check if the specified file is already in ~/.cache/nix/tarballs
        and is more recent than ‘tarball-ttl’ seconds. Otherwise,
        use the recorded ETag to verify if the server has a more
        recent version, and if so, download it to the Nix store. */
     Path downloadCached(ref<Store> store, const string & uri, bool unpack, string name = "",
-        const Hash & expectedHash = Hash(), string * effectiveUri = nullptr);
+        const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, int ttl = settings.tarballTtl);
 
     enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
 };
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index ba49749d830a..b415d5421476 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -7,6 +7,7 @@
 #include <queue>
 #include <algorithm>
 #include <regex>
+#include <random>
 
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -365,7 +366,7 @@ try_again:
     char buf[bufsiz];
     auto res = readlink(file.c_str(), buf, bufsiz);
     if (res == -1) {
-        if (errno == ENOENT || errno == EACCES)
+        if (errno == ENOENT || errno == EACCES || errno == ESRCH)
             return;
         throw SysError("reading symlink");
     }
@@ -425,25 +426,28 @@ PathSet LocalStore::findRuntimeRoots()
                         readProcLink((format("%1%/%2%") % fdStr % fd_ent->d_name).str(), paths);
                     }
                 }
-                if (errno)
+                if (errno) {
+                    if (errno == ESRCH)
+                        continue;
                     throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
-                fdDir.reset();
-
-                auto mapLines =
-                    tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
-                for (const auto& line : mapLines) {
-                    auto match = std::smatch{};
-                    if (std::regex_match(line, match, mapRegex))
-                        paths.emplace(match[1]);
                 }
+                fdDir.reset();
 
                 try {
+                    auto mapLines =
+                        tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
+                    for (const auto& line : mapLines) {
+                        auto match = std::smatch{};
+                        if (std::regex_match(line, match, mapRegex))
+                            paths.emplace(match[1]);
+                    }
+
                     auto envString = readFile((format("/proc/%1%/environ") % ent->d_name).str(), true);
                     auto env_end = std::sregex_iterator{};
                     for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
                         paths.emplace(i->str());
                 } catch (SysError & e) {
-                    if (errno == ENOENT || errno == EACCES)
+                    if (errno == ENOENT || errno == EACCES || errno == ESRCH)
                         continue;
                     throw;
                 }
@@ -829,7 +833,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
                alphabetically first (e.g. /nix/store/000...).  This
                matters when using --max-freed etc. */
             vector<Path> entries_(entries.begin(), entries.end());
-            random_shuffle(entries_.begin(), entries_.end());
+            std::mt19937 gen(1);
+            std::shuffle(entries_.begin(), entries_.end(), gen);
 
             for (auto & i : entries_)
                 tryToDelete(state, i);
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 544566e0b573..d95db56726cb 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -28,9 +28,10 @@ namespace nix {
 
 Settings settings;
 
+static GlobalConfig::Register r1(&settings);
+
 Settings::Settings()
-    : Config({})
-    , nixPrefix(NIX_PREFIX)
+    : nixPrefix(NIX_PREFIX)
     , nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
     , nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
     , nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
@@ -69,20 +70,15 @@ Settings::Settings()
     allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
 }
 
-void Settings::loadConfFile()
+void loadConfFile()
 {
-    applyConfigFile(nixConfDir + "/nix.conf");
+    globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf");
 
     /* We only want to send overrides to the daemon, i.e. stuff from
        ~/.nix/nix.conf or the command line. */
-    resetOverriden();
+    globalConfig.resetOverriden();
 
-    applyConfigFile(getConfigDir() + "/nix/nix.conf");
-}
-
-void Settings::set(const string & name, const string & value)
-{
-    Config::set(name, value);
+    globalConfig.applyConfigFile(getConfigDir() + "/nix/nix.conf");
 }
 
 unsigned int Settings::getDefaultCores()
@@ -162,23 +158,11 @@ void initPlugins()
                 throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
         }
     }
-    /* We handle settings registrations here, since plugins can add settings */
-    if (RegisterSetting::settingRegistrations) {
-        for (auto & registration : *RegisterSetting::settingRegistrations)
-            settings.addSetting(registration);
-        delete RegisterSetting::settingRegistrations;
-    }
-    settings.handleUnknownSettings();
-}
-
-RegisterSetting::SettingRegistrations * RegisterSetting::settingRegistrations;
 
-RegisterSetting::RegisterSetting(AbstractSetting * s)
-{
-    if (!settingRegistrations)
-        settingRegistrations = new SettingRegistrations;
-    settingRegistrations->emplace_back(s);
+    /* Since plugins can add settings, try to re-apply previously
+       unknown settings. */
+    globalConfig.reapplyUnknownSettings();
+    globalConfig.warnUnknownSettings();
 }
 
-
 }
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 9360096aae8c..f589078dbb98 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -13,26 +13,6 @@ namespace nix {
 
 typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
 
-extern bool useCaseHack; // FIXME
-
-struct CaseHackSetting : public BaseSetting<bool>
-{
-    CaseHackSetting(Config * options,
-        const std::string & name,
-        const std::string & description,
-        const std::set<std::string> & aliases = {})
-        : BaseSetting<bool>(useCaseHack, name, description, aliases)
-    {
-        options->addSetting(this);
-    }
-
-    void set(const std::string & str) override
-    {
-        BaseSetting<bool>::set(str);
-        nix::useCaseHack = value;
-    }
-};
-
 struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
 {
     MaxBuildJobsSetting(Config * options,
@@ -56,10 +36,6 @@ public:
 
     Settings();
 
-    void loadConfFile();
-
-    void set(const string & name, const string & value);
-
     Path nixPrefix;
 
     /* The directory where we store sources and derived files. */
@@ -217,9 +193,6 @@ public:
     Setting<bool> showTrace{this, false, "show-trace",
         "Whether to show a stack trace on evaluation errors."};
 
-    Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
-        "Whether builtin functions that allow executing native code should be enabled."};
-
     Setting<SandboxMode> sandboxMode{this, smDisabled, "sandbox",
         "Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
         {"build-use-chroot", "build-use-sandbox"}};
@@ -232,13 +205,6 @@ public:
         "Additional paths to make available inside the build sandbox.",
         {"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
 
-    Setting<bool> restrictEval{this, false, "restrict-eval",
-        "Whether to restrict file system access to paths in $NIX_PATH, "
-        "and network access to the URI prefixes listed in 'allowed-uris'."};
-
-    Setting<bool> pureEval{this, false, "pure-eval",
-        "Whether to restrict file system and network access to files specified by cryptographic hash."};
-
     Setting<size_t> buildRepeat{this, 0, "repeat",
         "The number of times to repeat a build in order to verify determinism.",
         {"build-repeat"}};
@@ -280,13 +246,6 @@ public:
     Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
         "Secret keys with which to sign local builds."};
 
-    Setting<size_t> binaryCachesParallelConnections{this, 25, "http-connections",
-        "Number of parallel HTTP connections.",
-        {"binary-caches-parallel-connections"}};
-
-    Setting<bool> enableHttp2{this, true, "http2",
-        "Whether to enable HTTP/2 support."};
-
     Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
         "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."};
 
@@ -350,18 +309,6 @@ public:
     /* Path to the SSL CA file used */
     Path caFile;
 
-    Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
-        "Whether the evaluator allows importing the result of a derivation."};
-
-    CaseHackSetting useCaseHack{this, "use-case-hack",
-        "Whether to enable a Darwin-specific hack for dealing with file name collisions."};
-
-    Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
-        "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
-
-    Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
-        "String appended to the user agent in HTTP requests."};
-
 #if __linux__
     Setting<bool> filterSyscalls{this, true, "filter-syscalls",
             "Whether to prevent certain dangerous system calls, such as "
@@ -383,9 +330,6 @@ public:
     Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
         "Stop deleting garbage when free disk space is above the specified amount."};
 
-    Setting<Strings> allowedUris{this, {}, "allowed-uris",
-        "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
-
     Setting<Paths> pluginFiles{this, {}, "plugin-files",
         "Plugins to dynamically load at nix initialization time."};
 };
@@ -398,15 +342,8 @@ extern Settings settings;
    anything else */
 void initPlugins();
 
+void loadConfFile();
 
 extern const string nixVersion;
 
-struct RegisterSetting
-{
-    typedef std::vector<AbstractSetting *> SettingRegistrations;
-    static SettingRegistrations * settingRegistrations;
-    RegisterSetting(AbstractSetting * s);
-};
-
-
 }
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index b9e9cd5daba5..ab524d523cf2 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -73,32 +73,46 @@ protected:
         try {
             getDownloader()->download(req);
         } catch (DownloadError & e) {
-            throw UploadToHTTP(format("uploading to HTTP binary cache at %1% not supported: %2%") % cacheUri % e.msg());
+            throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg());
         }
     }
 
-    void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+    DownloadRequest makeRequest(const std::string & path)
     {
         DownloadRequest request(cacheUri + "/" + path);
         request.tries = 8;
+        return request;
+    }
+
+    void getFile(const std::string & path, Sink & sink) override
+    {
+        auto request(makeRequest(path));
+        try {
+            getDownloader()->download(std::move(request), sink);
+        } catch (DownloadError & e) {
+            if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+                throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
+            throw;
+        }
+    }
+
+    void getFile(const std::string & path,
+        Callback<std::shared_ptr<std::string>> callback) override
+    {
+        auto request(makeRequest(path));
 
         getDownloader()->enqueueDownload(request,
-            [success](const DownloadResult & result) {
-                success(result.data);
-            },
-            [success, failure](std::exception_ptr exc) {
+            {[callback](std::future<DownloadResult> result) {
                 try {
-                    std::rethrow_exception(exc);
+                    callback(result.get().data);
                 } catch (DownloadError & e) {
                     if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
-                        return success(0);
-                    failure(exc);
+                        return callback(std::shared_ptr<std::string>());
+                    callback.rethrow();
                 } catch (...) {
-                    failure(exc);
+                    callback.rethrow();
                 }
-            });
+            }});
     }
 
 };
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 5dee25308f7f..02d91ded04cd 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -84,10 +84,9 @@ struct LegacySSHStore : public Store
     }
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override
     {
-        sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> {
+        try {
             auto conn(connections->get());
 
             debug("querying remote host '%s' for info on '%s'", host, path);
@@ -97,7 +96,7 @@ struct LegacySSHStore : public Store
 
             auto info = std::make_shared<ValidPathInfo>();
             conn->from >> info->path;
-            if (info->path.empty()) return nullptr;
+            if (info->path.empty()) return callback(nullptr);
             assert(path == info->path);
 
             PathSet references;
@@ -116,8 +115,8 @@ struct LegacySSHStore : public Store
             auto s = readString(conn->from);
             assert(s == "");
 
-            return info;
-        });
+            callback(std::move(info));
+        } catch (...) { callback.rethrow(); }
     }
 
     void addToStore(const ValidPathInfo & info, Source & source,
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index 2577e90aef23..b7001795be4d 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -34,18 +34,14 @@ protected:
         const std::string & data,
         const std::string & mimeType) override;
 
-    void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+    void getFile(const std::string & path, Sink & sink) override
     {
-        sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
-            try {
-                return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path));
-            } catch (SysError & e) {
-                if (e.errNo == ENOENT) return std::shared_ptr<std::string>();
-                throw;
-            }
-        });
+        try {
+            readFile(binaryCacheDir + "/" + path, sink);
+        } catch (SysError & e) {
+            if (e.errNo == ENOENT)
+                throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
+        }
     }
 
     PathSet queryAllValidPaths() override
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index ef8c2811bd86..3b2ba65f3b46 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -629,17 +629,15 @@ uint64_t LocalStore::addValidPath(State & state,
 
 
 void LocalStore::queryPathInfoUncached(const Path & path,
-    std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-    std::function<void(std::exception_ptr exc)> failure)
+    Callback<std::shared_ptr<ValidPathInfo>> callback)
 {
-    sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
-
+    try {
         auto info = std::make_shared<ValidPathInfo>();
         info->path = path;
 
         assertStorePath(path);
 
-        return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
+        callback(retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
             auto state(_state.lock());
 
             /* Get the path info. */
@@ -679,8 +677,9 @@ void LocalStore::queryPathInfoUncached(const Path & path,
                 info->references.insert(useQueryReferences.getStr(0));
 
             return info;
-        });
-    });
+        }));
+
+    } catch (...) { callback.rethrow(); }
 }
 
 
@@ -976,7 +975,8 @@ const PublicKeys & LocalStore::getPublicKeys()
 void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
     RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
 {
-    assert(info.narHash);
+    if (!info.narHash)
+        throw Error("cannot add path '%s' because it lacks a hash", info.path);
 
     if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys()))
         throw Error("cannot add path '%s' because it lacks a valid signature", info.path);
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 1209a06356f7..746bdbeed793 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -127,8 +127,7 @@ public:
     PathSet queryAllValidPaths() override;
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override;
 
     void queryReferrers(const Path & path, PathSet & referrers) override;
 
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index a7279aa3939f..3799257f83ff 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -18,7 +18,7 @@ libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb
 $(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
 
 ifeq ($(ENABLE_S3), 1)
-	libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core
+	libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
 endif
 
 ifeq ($(OS), SunOS)
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index a82aa4e9cfa5..adcce026fa1d 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -33,9 +33,11 @@ void Store::computeFSClosure(const PathSet & startPaths,
             state->pending++;
         }
 
-        queryPathInfo(path,
-            [&, path](ref<ValidPathInfo> info) {
-                // FIXME: calls to isValidPath() should be async
+        queryPathInfo(path, {[&, path](std::future<ref<ValidPathInfo>> fut) {
+            // FIXME: calls to isValidPath() should be async
+
+            try {
+                auto info = fut.get();
 
                 if (flipDirection) {
 
@@ -75,14 +77,13 @@ void Store::computeFSClosure(const PathSet & startPaths,
                     if (!--state->pending) done.notify_one();
                 }
 
-            },
-
-            [&, path](std::exception_ptr exc) {
+            } catch (...) {
                 auto state(state_.lock());
-                if (!state->exc) state->exc = exc;
+                if (!state->exc) state->exc = std::current_exception();
                 assert(state->pending);
                 if (!--state->pending) done.notify_one();
-            });
+            };
+        }});
     };
 
     for (auto & startPath : startPaths)
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 7840167d7772..991512f21795 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -104,8 +104,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
        *.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
        https://github.com/NixOS/nix/issues/1443 for more discussion. */
 
-    if (std::regex_search(path, std::regex("\\.app/Contents/PkgInfo$")) ||
-        std::regex_search(path, std::regex("\\.app/Contents/Resources/.+\\.lproj$")))
+    if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
     {
         debug(format("'%1%' is not allowed to be linked in macOS") % path);
         return;
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index 4a607b584506..4c6af567ae6f 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -157,6 +157,29 @@ void deleteGenerations(const Path & profile, const std::set<unsigned int> & gens
     }
 }
 
+void deleteGenerationsGreaterThan(const Path & profile, int max, bool dryRun)
+{
+    PathLocks lock;
+    lockProfile(lock, profile);
+
+    int curGen;
+    bool fromCurGen = false;
+    Generations gens = findGenerations(profile, curGen);
+    for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
+        if (i->number == curGen) {
+            fromCurGen = true;
+            max--;
+            continue;
+        }
+        if (fromCurGen) {
+            if (max) {
+                max--;
+                continue;
+            }
+            deleteGeneration2(profile, i->number, dryRun);
+        }
+    }
+}
 
 void deleteOldGenerations(const Path & profile, bool dryRun)
 {
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index 1d4e6d3037db..5fa1533de311 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -39,6 +39,8 @@ void deleteGeneration(const Path & profile, unsigned int gen);
 
 void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
 
+void deleteGenerationsGreaterThan(const Path & profile, const int max, bool dryRun);
+
 void deleteOldGenerations(const Path & profile, bool dryRun);
 
 void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 080cef93d214..ea86ef052f53 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -7,6 +7,7 @@
 #include "globals.hh"
 #include "derivations.hh"
 #include "pool.hh"
+#include "finally.hh"
 
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -187,10 +188,11 @@ void RemoteStore::setOptions(Connection & conn)
        << settings.useSubstitutes;
 
     if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
-        auto overrides = settings.getSettings(true);
+        std::map<std::string, Config::SettingInfo> overrides;
+        globalConfig.getSettings(overrides, true);
         conn.to << overrides.size();
         for (auto & i : overrides)
-            conn.to << i.first << i.second;
+            conn.to << i.first << i.second.value;
     }
 
     conn.processStderr();
@@ -293,38 +295,40 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
 
 
 void RemoteStore::queryPathInfoUncached(const Path & path,
-    std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-    std::function<void(std::exception_ptr exc)> failure)
+    Callback<std::shared_ptr<ValidPathInfo>> callback)
 {
-    sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
-        auto conn(connections->get());
-        conn->to << wopQueryPathInfo << path;
-        try {
-            conn->processStderr();
-        } catch (Error & e) {
-            // Ugly backwards compatibility hack.
-            if (e.msg().find("is not valid") != std::string::npos)
-                throw InvalidPath(e.what());
-            throw;
-        }
-        if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
-            bool valid; conn->from >> valid;
-            if (!valid) throw InvalidPath(format("path '%s' is not valid") % path);
-        }
-        auto info = std::make_shared<ValidPathInfo>();
-        info->path = path;
-        info->deriver = readString(conn->from);
-        if (info->deriver != "") assertStorePath(info->deriver);
-        info->narHash = Hash(readString(conn->from), htSHA256);
-        info->references = readStorePaths<PathSet>(*this, conn->from);
-        conn->from >> info->registrationTime >> info->narSize;
-        if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
-            conn->from >> info->ultimate;
-            info->sigs = readStrings<StringSet>(conn->from);
-            conn->from >> info->ca;
+    try {
+        std::shared_ptr<ValidPathInfo> info;
+        {
+            auto conn(connections->get());
+            conn->to << wopQueryPathInfo << path;
+            try {
+                conn->processStderr();
+            } catch (Error & e) {
+                // Ugly backwards compatibility hack.
+                if (e.msg().find("is not valid") != std::string::npos)
+                    throw InvalidPath(e.what());
+                throw;
+            }
+            if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
+                bool valid; conn->from >> valid;
+                if (!valid) throw InvalidPath(format("path '%s' is not valid") % path);
+            }
+            info = std::make_shared<ValidPathInfo>();
+            info->path = path;
+            info->deriver = readString(conn->from);
+            if (info->deriver != "") assertStorePath(info->deriver);
+            info->narHash = Hash(readString(conn->from), htSHA256);
+            info->references = readStorePaths<PathSet>(*this, conn->from);
+            conn->from >> info->registrationTime >> info->narSize;
+            if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
+                conn->from >> info->ultimate;
+                info->sigs = readStrings<StringSet>(conn->from);
+                conn->from >> info->ca;
+            }
         }
-        return info;
-    });
+        callback(std::move(info));
+    } catch (...) { callback.rethrow(); }
 }
 
 
@@ -411,8 +415,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
                  << info.references << info.registrationTime << info.narSize
                  << info.ultimate << info.sigs << info.ca
                  << repair << !checkSigs;
-        copyNAR(source, conn->to);
-        conn->processStderr();
+        bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21;
+        if (!tunnel) copyNAR(source, conn->to);
+        conn->processStderr(0, tunnel ? &source : nullptr);
     }
 }
 
@@ -435,8 +440,10 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
         conn->to.written = 0;
         conn->to.warn = true;
         connections->incCapacity();
-        dumpPath(srcPath, conn->to, filter);
-        connections->decCapacity();
+        {
+            Finally cleanup([&]() { connections->decCapacity(); });
+            dumpPath(srcPath, conn->to, filter);
+        }
         conn->to.warn = false;
         conn->processStderr();
     } catch (SysError & e) {
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 95fa59a2069d..b488e34ce263 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -40,8 +40,7 @@ public:
     PathSet queryAllValidPaths() override;
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override;
 
     void queryReferrers(const Path & path, PathSet & referrers) override;
 
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 23af452094cf..26144ccb40cc 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -17,6 +17,7 @@
 #include <aws/core/client/DefaultRetryStrategy.h>
 #include <aws/core/utils/logging/FormattedLogSystem.h>
 #include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/threading/Executor.h>
 #include <aws/s3/S3Client.h>
 #include <aws/s3/model/CreateBucketRequest.h>
 #include <aws/s3/model/GetBucketLocationRequest.h>
@@ -24,6 +25,9 @@
 #include <aws/s3/model/HeadObjectRequest.h>
 #include <aws/s3/model/ListObjectsRequest.h>
 #include <aws/s3/model/PutObjectRequest.h>
+#include <aws/transfer/TransferManager.h>
+
+using namespace Aws::Transfer;
 
 namespace nix {
 
@@ -169,6 +173,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
     const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
     const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
     const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
+    const Setting<uint64_t> bufferSize{
+        this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
 
     std::string bucketName;
 
@@ -271,34 +277,76 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         const std::string & mimeType,
         const std::string & contentEncoding)
     {
-        auto request =
-            Aws::S3::Model::PutObjectRequest()
-            .WithBucket(bucketName)
-            .WithKey(path);
+        auto stream = std::make_shared<istringstream_nocopy>(data);
 
-        request.SetContentType(mimeType);
+        auto maxThreads = std::thread::hardware_concurrency();
 
-        if (contentEncoding != "")
-            request.SetContentEncoding(contentEncoding);
+        static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
+            executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
 
-        auto stream = std::make_shared<istringstream_nocopy>(data);
+        TransferManagerConfiguration transferConfig(executor.get());
 
-        request.SetBody(stream);
+        transferConfig.s3Client = s3Helper.client;
+        transferConfig.bufferSize = bufferSize;
 
-        stats.put++;
-        stats.putBytes += data.size();
+        if (contentEncoding != "")
+            transferConfig.createMultipartUploadTemplate.SetContentEncoding(
+                contentEncoding);
+
+        transferConfig.uploadProgressCallback =
+            [&](const TransferManager *transferManager,
+                const std::shared_ptr<const TransferHandle>
+                    &transferHandle) {
+              //FIXME: find a way to properly abort the multipart upload.
+              checkInterrupt();
+              debug("upload progress ('%s'): '%d' of '%d' bytes",
+                             path,
+                             transferHandle->GetBytesTransferred(),
+                             transferHandle->GetBytesTotalSize());
+            };
+
+        transferConfig.transferStatusUpdatedCallback =
+            [&](const TransferManager *,
+                const std::shared_ptr<const TransferHandle>
+                    &transferHandle) {
+              switch (transferHandle->GetStatus()) {
+                  case TransferStatus::COMPLETED:
+                      printTalkative("upload of '%s' completed", path);
+                      stats.put++;
+                      stats.putBytes += data.size();
+                      break;
+                  case TransferStatus::IN_PROGRESS:
+                      break;
+                  case TransferStatus::FAILED:
+                      throw Error("AWS error: failed to upload 's3://%s/%s'",
+                                  bucketName, path);
+                      break;
+                  default:
+                      throw Error("AWS error: transfer status of 's3://%s/%s' "
+                                  "in unexpected state",
+                                  bucketName, path);
+              };
+            };
+
+        std::shared_ptr<TransferManager> transferManager =
+            TransferManager::Create(transferConfig);
 
         auto now1 = std::chrono::steady_clock::now();
 
-        auto result = checkAws(format("AWS error uploading '%s'") % path,
-            s3Helper.client->PutObject(request));
+        std::shared_ptr<TransferHandle> transferHandle =
+            transferManager->UploadFile(stream, bucketName, path, mimeType,
+                                        Aws::Map<Aws::String, Aws::String>());
+
+        transferHandle->WaitUntilFinished();
 
         auto now2 = std::chrono::steady_clock::now();
 
-        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+        auto duration =
+            std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
+                .count();
 
-        printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms")
-            % bucketName % path % data.size() % duration);
+        printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") %
+                  bucketName % path % data.size() % duration);
 
         stats.putTimeMs += duration;
     }
@@ -316,24 +364,23 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
             uploadFile(path, data, mimeType, "");
     }
 
-    void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+    void getFile(const std::string & path, Sink & sink) override
     {
-        sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
-            stats.get++;
+        stats.get++;
 
-            auto res = s3Helper.getObject(bucketName, path);
+        // FIXME: stream output to sink.
+        auto res = s3Helper.getObject(bucketName, path);
 
-            stats.getBytes += res.data ? res.data->size() : 0;
-            stats.getTimeMs += res.durationMs;
+        stats.getBytes += res.data ? res.data->size() : 0;
+        stats.getTimeMs += res.durationMs;
 
-            if (res.data)
-                printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms",
-                    bucketName, path, res.data->size(), res.durationMs);
+        if (res.data) {
+            printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms",
+                bucketName, path, res.data->size(), res.durationMs);
 
-            return res.data;
-        });
+            sink((unsigned char *) res.data->data(), res.data->size());
+        } else
+            throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
     }
 
     PathSet queryAllValidPaths() override
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 42d40e71d8be..a061d64f36d8 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -10,6 +10,7 @@ namespace nix {
 [[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs)
 {
     int err = sqlite3_errcode(db);
+    int exterr = sqlite3_extended_errcode(db);
 
     auto path = sqlite3_db_filename(db, nullptr);
     if (!path) path = "(in-memory)";
@@ -21,7 +22,7 @@ namespace nix {
             : fmt("SQLite database '%s' is busy", path));
     }
     else
-        throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(err), path);
+        throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path);
 }
 
 SQLite::SQLite(const Path & path)
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 1a0d12ca78c2..9b0b7d6327e0 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -253,6 +253,8 @@ std::string Store::getUri()
 
 bool Store::isValidPath(const Path & storePath)
 {
+    assertStorePath(storePath);
+
     auto hashPart = storePathToHash(storePath);
 
     {
@@ -303,20 +305,20 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
     std::promise<ref<ValidPathInfo>> promise;
 
     queryPathInfo(storePath,
-        [&](ref<ValidPathInfo> info) {
-            promise.set_value(info);
-        },
-        [&](std::exception_ptr exc) {
-            promise.set_exception(exc);
-        });
+        {[&](std::future<ref<ValidPathInfo>> result) {
+            try {
+                promise.set_value(result.get());
+            } catch (...) {
+                promise.set_exception(std::current_exception());
+            }
+        }});
 
     return promise.get_future().get();
 }
 
 
 void Store::queryPathInfo(const Path & storePath,
-    std::function<void(ref<ValidPathInfo>)> success,
-    std::function<void(std::exception_ptr exc)> failure)
+    Callback<ref<ValidPathInfo>> callback)
 {
     auto hashPart = storePathToHash(storePath);
 
@@ -328,7 +330,7 @@ void Store::queryPathInfo(const Path & storePath,
                 stats.narInfoReadAverted++;
                 if (!*res)
                     throw InvalidPath(format("path '%s' is not valid") % storePath);
-                return success(ref<ValidPathInfo>(*res));
+                return callback(ref<ValidPathInfo>(*res));
             }
         }
 
@@ -344,35 +346,36 @@ void Store::queryPathInfo(const Path & storePath,
                         (res.second->path != storePath && storePathToName(storePath) != ""))
                         throw InvalidPath(format("path '%s' is not valid") % storePath);
                 }
-                return success(ref<ValidPathInfo>(res.second));
+                return callback(ref<ValidPathInfo>(res.second));
             }
         }
 
-    } catch (std::exception & e) {
-        return callFailure(failure);
-    }
+    } catch (...) { return callback.rethrow(); }
 
     queryPathInfoUncached(storePath,
-        [this, storePath, hashPart, success, failure](std::shared_ptr<ValidPathInfo> info) {
+        {[this, storePath, hashPart, callback](std::future<std::shared_ptr<ValidPathInfo>> fut) {
 
-            if (diskCache)
-                diskCache->upsertNarInfo(getUri(), hashPart, info);
+            try {
+                auto info = fut.get();
 
-            {
-                auto state_(state.lock());
-                state_->pathInfoCache.upsert(hashPart, info);
-            }
+                if (diskCache)
+                    diskCache->upsertNarInfo(getUri(), hashPart, info);
 
-            if (!info
-                || (info->path != storePath && storePathToName(storePath) != ""))
-            {
-                stats.narInfoMissing++;
-                return failure(std::make_exception_ptr(InvalidPath(format("path '%s' is not valid") % storePath)));
-            }
+                {
+                    auto state_(state.lock());
+                    state_->pathInfoCache.upsert(hashPart, info);
+                }
 
-            callSuccess(success, failure, ref<ValidPathInfo>(info));
+                if (!info
+                    || (info->path != storePath && storePathToName(storePath) != ""))
+                {
+                    stats.narInfoMissing++;
+                    throw InvalidPath("path '%s' is not valid", storePath);
+                }
 
-        }, failure);
+                callback(ref<ValidPathInfo>(info));
+            } catch (...) { callback.rethrow(); }
+        }});
 }
 
 
@@ -392,26 +395,19 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti
 
     auto doQuery = [&](const Path & path ) {
         checkInterrupt();
-        queryPathInfo(path,
-            [path, &state_, &wakeup](ref<ValidPathInfo> info) {
-                auto state(state_.lock());
+        queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<ValidPathInfo>> fut) {
+            auto state(state_.lock());
+            try {
+                auto info = fut.get();
                 state->valid.insert(path);
-                assert(state->left);
-                if (!--state->left)
-                    wakeup.notify_one();
-            },
-            [path, &state_, &wakeup](std::exception_ptr exc) {
-                auto state(state_.lock());
-                try {
-                    std::rethrow_exception(exc);
-                } catch (InvalidPath &) {
-                } catch (...) {
-                    state->exc = exc;
-                }
-                assert(state->left);
-                if (!--state->left)
-                    wakeup.notify_one();
-            });
+            } catch (InvalidPath &) {
+            } catch (...) {
+                state->exc = std::current_exception();
+            }
+            assert(state->left);
+            if (!--state->left)
+                wakeup.notify_one();
+        }});
     };
 
     for (auto & path : paths)
@@ -847,7 +843,7 @@ ref<Store> openStore(const std::string & uri_,
     for (auto fun : *RegisterStoreImplementation::implementations) {
         auto store = fun(uri, params);
         if (store) {
-            store->handleUnknownSettings();
+            store->warnUnknownSettings();
             return ref<Store>(store);
         }
     }
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index ea259f07e8ab..7c5b495a4482 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -22,6 +22,7 @@ MakeError(SubstError, Error)
 MakeError(BuildError, Error) /* denotes a permanent build failure */
 MakeError(InvalidPath, Error)
 MakeError(Unsupported, Error)
+MakeError(SubstituteGone, Error)
 
 
 struct BasicDerivation;
@@ -355,14 +356,12 @@ public:
 
     /* Asynchronous version of queryPathInfo(). */
     void queryPathInfo(const Path & path,
-        std::function<void(ref<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure);
+        Callback<ref<ValidPathInfo>> callback);
 
 protected:
 
     virtual void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) = 0;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) = 0;
 
 public:
 
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 996e1d25355f..5ebdfaf134d6 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -6,7 +6,7 @@ namespace nix {
 #define WORKER_MAGIC_1 0x6e697863
 #define WORKER_MAGIC_2 0x6478696f
 
-#define PROTOCOL_VERSION 0x114
+#define PROTOCOL_VERSION 0x115
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 154e2d20430c..1be8934a2eba 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -13,17 +13,25 @@
 
 #include "archive.hh"
 #include "util.hh"
-
+#include "config.hh"
 
 namespace nix {
 
+struct ArchiveSettings : Config
+{
+    Setting<bool> useCaseHack{this,
+        #if __APPLE__
+            true,
+        #else
+            false,
+        #endif
+        "use-case-hack",
+        "Whether to enable a Darwin-specific hack for dealing with file name collisions."};
+};
 
-bool useCaseHack =
-#if __APPLE__
-    true;
-#else
-    false;
-#endif
+static ArchiveSettings archiveSettings;
+
+static GlobalConfig::Register r1(&archiveSettings);
 
 const std::string narVersionMagic1 = "nix-archive-1";
 
@@ -78,7 +86,7 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter)
            the case hack applied by restorePath(). */
         std::map<string, string> unhacked;
         for (auto & i : readDirectory(path))
-            if (useCaseHack) {
+            if (archiveSettings.useCaseHack) {
                 string name(i.name);
                 size_t pos = i.name.find(caseHackSuffix);
                 if (pos != string::npos) {
@@ -243,7 +251,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path)
                     if (name <= prevName)
                         throw Error("NAR directory is not sorted");
                     prevName = name;
-                    if (useCaseHack) {
+                    if (archiveSettings.useCaseHack) {
                         auto i = names.find(name);
                         if (i != names.end()) {
                             debug(format("case collision between '%1%' and '%2%'") % i->first % name);
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index 7a0e688e4201..25be426c1a4d 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -78,10 +78,6 @@ void restorePath(const Path & path, Source & source);
 void copyNAR(Source & source, Sink & sink);
 
 
-// FIXME: global variables are bad m'kay.
-extern bool useCaseHack;
-
-
 extern const std::string narVersionMagic1;
 
 
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
index ce6858f0d65a..9023cb1bb6de 100644
--- a/src/libutil/config.cc
+++ b/src/libutil/config.cc
@@ -4,15 +4,13 @@
 
 namespace nix {
 
-void Config::set(const std::string & name, const std::string & value)
+bool Config::set(const std::string & name, const std::string & value)
 {
     auto i = _settings.find(name);
-    if (i == _settings.end()) {
-        extras.emplace(name, value);
-    } else {
-        i->second.setting->set(value);
-        i->second.setting->overriden = true;
-    }
+    if (i == _settings.end()) return false;
+    i->second.setting->set(value);
+    i->second.setting->overriden = true;
+    return true;
 }
 
 void Config::addSetting(AbstractSetting * setting)
@@ -23,46 +21,51 @@ void Config::addSetting(AbstractSetting * setting)
 
     bool set = false;
 
-    auto i = extras.find(setting->name);
-    if (i != extras.end()) {
+    auto i = unknownSettings.find(setting->name);
+    if (i != unknownSettings.end()) {
         setting->set(i->second);
         setting->overriden = true;
-        extras.erase(i);
+        unknownSettings.erase(i);
         set = true;
     }
 
     for (auto & alias : setting->aliases) {
-        auto i = extras.find(alias);
-        if (i != extras.end()) {
+        auto i = unknownSettings.find(alias);
+        if (i != unknownSettings.end()) {
             if (set)
                 warn("setting '%s' is set, but it's an alias of '%s' which is also set",
                     alias, setting->name);
             else {
                 setting->set(i->second);
                 setting->overriden = true;
-                extras.erase(i);
+                unknownSettings.erase(i);
                 set = true;
             }
         }
     }
 }
 
-void Config::handleUnknownSettings()
+void AbstractConfig::warnUnknownSettings()
 {
-    for (auto & s : extras)
+    for (auto & s : unknownSettings)
         warn("unknown setting '%s'", s.first);
 }
 
-StringMap Config::getSettings(bool overridenOnly)
+void AbstractConfig::reapplyUnknownSettings()
+{
+    auto unknownSettings2 = std::move(unknownSettings);
+    for (auto & s : unknownSettings2)
+        set(s.first, s.second);
+}
+
+void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly)
 {
-    StringMap res;
     for (auto & opt : _settings)
         if (!opt.second.isAlias && (!overridenOnly || opt.second.setting->overriden))
-            res.emplace(opt.first, opt.second.setting->to_string());
-    return res;
+            res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description});
 }
 
-void Config::applyConfigFile(const Path & path)
+void AbstractConfig::applyConfigFile(const Path & path)
 {
     try {
         string contents = readFile(path);
@@ -287,4 +290,49 @@ void PathSetting::set(const std::string & str)
         value = canonPath(str);
 }
 
+bool GlobalConfig::set(const std::string & name, const std::string & value)
+{
+    for (auto & config : *configRegistrations)
+        if (config->set(name, value)) return true;
+
+    unknownSettings.emplace(name, value);
+
+    return false;
+}
+
+void GlobalConfig::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly)
+{
+    for (auto & config : *configRegistrations)
+        config->getSettings(res, overridenOnly);
+}
+
+void GlobalConfig::resetOverriden()
+{
+    for (auto & config : *configRegistrations)
+        config->resetOverriden();
+}
+
+void GlobalConfig::toJSON(JSONObject & out)
+{
+    for (auto & config : *configRegistrations)
+        config->toJSON(out);
+}
+
+void GlobalConfig::convertToArgs(Args & args, const std::string & category)
+{
+    for (auto & config : *configRegistrations)
+        config->convertToArgs(args, category);
+}
+
+GlobalConfig globalConfig;
+
+GlobalConfig::ConfigRegistrations * GlobalConfig::configRegistrations;
+
+GlobalConfig::Register::Register(Config * config)
+{
+    if (!configRegistrations)
+        configRegistrations = new ConfigRegistrations;
+    configRegistrations->emplace_back(config);
+}
+
 }
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
index d2e7faf17434..d86c65ff033a 100644
--- a/src/libutil/config.hh
+++ b/src/libutil/config.hh
@@ -12,6 +12,40 @@ class AbstractSetting;
 class JSONPlaceholder;
 class JSONObject;
 
+class AbstractConfig
+{
+protected:
+    StringMap unknownSettings;
+
+    AbstractConfig(const StringMap & initials = {})
+        : unknownSettings(initials)
+    { }
+
+public:
+
+    virtual bool set(const std::string & name, const std::string & value) = 0;
+
+    struct SettingInfo
+    {
+        std::string value;
+        std::string description;
+    };
+
+    virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) = 0;
+
+    void applyConfigFile(const Path & path);
+
+    virtual void resetOverriden() = 0;
+
+    virtual void toJSON(JSONObject & out) = 0;
+
+    virtual void convertToArgs(Args & args, const std::string & category) = 0;
+
+    void warnUnknownSettings();
+
+    void reapplyUnknownSettings();
+};
+
 /* A class to simplify providing configuration settings. The typical
    use is to inherit Config and add Setting<T> members:
 
@@ -27,7 +61,7 @@ class JSONObject;
    };
 */
 
-class Config
+class Config : public AbstractConfig
 {
     friend class AbstractSetting;
 
@@ -48,31 +82,23 @@ private:
 
     Settings _settings;
 
-    StringMap extras;
-
 public:
 
-    Config(const StringMap & initials)
-        : extras(initials)
+    Config(const StringMap & initials = {})
+        : AbstractConfig(initials)
     { }
 
-    void set(const std::string & name, const std::string & value);
+    bool set(const std::string & name, const std::string & value) override;
 
     void addSetting(AbstractSetting * setting);
 
-    void handleUnknownSettings();
-
-    StringMap getSettings(bool overridenOnly = false);
+    void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override;
 
-    const Settings & _getSettings() { return _settings; }
-
-    void applyConfigFile(const Path & path);
+    void resetOverriden() override;
 
-    void resetOverriden();
+    void toJSON(JSONObject & out) override;
 
-    void toJSON(JSONObject & out);
-
-    void convertToArgs(Args & args, const std::string & category);
+    void convertToArgs(Args & args, const std::string & category) override;
 };
 
 class AbstractSetting
@@ -209,4 +235,27 @@ public:
     void operator =(const Path & v) { this->assign(v); }
 };
 
+struct GlobalConfig : public AbstractConfig
+{
+    typedef std::vector<Config*> ConfigRegistrations;
+    static ConfigRegistrations * configRegistrations;
+
+    bool set(const std::string & name, const std::string & value) override;
+
+    void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override;
+
+    void resetOverriden() override;
+
+    void toJSON(JSONObject & out) override;
+
+    void convertToArgs(Args & args, const std::string & category) override;
+
+    struct Register
+    {
+        Register(Config * config);
+    };
+};
+
+extern GlobalConfig globalConfig;
+
 }
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 33ae1ea389d7..21803edd056a 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -133,7 +133,7 @@ size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
     ssize_t n;
     do {
         checkInterrupt();
-        n = ::read(fd, (char *) data, bufSize);
+        n = ::read(fd, (char *) data, len);
     } while (n == -1 && errno == EINTR);
     if (n == -1) { _good = false; throw SysError("reading from file"); }
     if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); }
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 6e703c52a1e3..14b62fdb6774 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -77,10 +77,12 @@ struct BufferedSource : Source
 
     size_t read(unsigned char * data, size_t len) override;
 
-    /* Underlying read call, to be overridden. */
-    virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
 
     bool hasData();
+
+protected:
+    /* Underlying read call, to be overridden. */
+    virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
 };
 
 
@@ -134,8 +136,9 @@ struct FdSource : BufferedSource
         return *this;
     }
 
-    size_t readUnbuffered(unsigned char * data, size_t len) override;
     bool good() override;
+protected:
+    size_t readUnbuffered(unsigned char * data, size_t len) override;
 private:
     bool _good = true;
 };
diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh
index 3b2710f6fd30..e1d591d77a84 100644
--- a/src/libutil/sync.hh
+++ b/src/libutil/sync.hh
@@ -57,11 +57,11 @@ public:
         }
 
         template<class Rep, class Period>
-        void wait_for(std::condition_variable & cv,
+        std::cv_status wait_for(std::condition_variable & cv,
             const std::chrono::duration<Rep, Period> & duration)
         {
             assert(s);
-            cv.wait_for(lk, duration);
+            return cv.wait_for(lk, duration);
         }
 
         template<class Rep, class Period, class Predicate>
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 15962236ec65..6bc64ae75a42 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -311,6 +311,14 @@ string readFile(const Path & path, bool drain)
 }
 
 
+void readFile(const Path & path, Sink & sink)
+{
+    AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+    if (!fd) throw SysError("opening file '%s'", path);
+    drainFD(fd.get(), sink);
+}
+
+
 void writeFile(const Path & path, const string & s, mode_t mode)
 {
     AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
@@ -320,6 +328,23 @@ void writeFile(const Path & path, const string & s, mode_t mode)
 }
 
 
+void writeFile(const Path & path, Source & source, mode_t mode)
+{
+    AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
+    if (!fd)
+        throw SysError(format("opening file '%1%'") % path);
+
+    std::vector<unsigned char> buf(64 * 1024);
+
+    while (true) {
+        try {
+            auto n = source.read(buf.data(), buf.size());
+            writeFull(fd.get(), (unsigned char *) buf.data(), n);
+        } catch (EndOfFile &) { break; }
+    }
+}
+
+
 string readLine(int fd)
 {
     string s;
@@ -593,7 +618,7 @@ void drainFD(int fd, Sink & sink, bool block)
             throw SysError("making file descriptor non-blocking");
     }
 
-    std::vector<unsigned char> buf(4096);
+    std::vector<unsigned char> buf(64 * 1024);
     while (1) {
         checkInterrupt();
         ssize_t rd = read(fd, buf.data(), buf.size());
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index 743d238611fc..fc25d27758c7 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -15,6 +15,7 @@
 #include <map>
 #include <sstream>
 #include <experimental/optional>
+#include <future>
 
 #ifndef HAVE_STRUCT_DIRENT_D_TYPE
 #define DT_UNKNOWN 0
@@ -97,10 +98,13 @@ unsigned char getFileType(const Path & path);
 /* Read the contents of a file into a string. */
 string readFile(int fd);
 string readFile(const Path & path, bool drain = false);
+void readFile(const Path & path, Sink & sink);
 
 /* Write a string to a file. */
 void writeFile(const Path & path, const string & s, mode_t mode = 0666);
 
+void writeFile(const Path & path, Source & source, mode_t mode = 0666);
+
 /* Read a line from a file descriptor. */
 string readLine(int fd);
 
@@ -424,44 +428,30 @@ string get(const T & map, const string & key, const string & def = "")
 }
 
 
-/* Call ‘failure’ with the current exception as argument. If ‘failure’
-   throws an exception, abort the program. */
-void callFailure(const std::function<void(std::exception_ptr exc)> & failure,
-    std::exception_ptr exc = std::current_exception());
+/* A callback is a wrapper around a lambda that accepts a valid of
+   type T or an exception. (We abuse std::future<T> to pass the value or
+   exception.) */
+template<typename T>
+struct Callback
+{
+    std::function<void(std::future<T>)> fun;
 
+    Callback(std::function<void(std::future<T>)> fun) : fun(fun) { }
 
-/* Evaluate the function ‘f’. If it returns a value, call ‘success’
-   with that value as its argument. If it or ‘success’ throws an
-   exception, call ‘failure’. If ‘failure’ throws an exception, abort
-   the program. */
-template<class T>
-void sync2async(
-    const std::function<void(T)> & success,
-    const std::function<void(std::exception_ptr exc)> & failure,
-    const std::function<T()> & f)
-{
-    try {
-        success(f());
-    } catch (...) {
-        callFailure(failure);
+    void operator()(T && t) const
+    {
+        std::promise<T> promise;
+        promise.set_value(std::move(t));
+        fun(promise.get_future());
     }
-}
 
-
-/* Call the function ‘success’. If it throws an exception, call
-   ‘failure’. If that throws an exception, abort the program. */
-template<class T>
-void callSuccess(
-    const std::function<void(T)> & success,
-    const std::function<void(std::exception_ptr exc)> & failure,
-    T && arg)
-{
-    try {
-        success(arg);
-    } catch (...) {
-        callFailure(failure);
+    void rethrow(const std::exception_ptr & exc = std::current_exception()) const
+    {
+        std::promise<T> promise;
+        promise.set_exception(exc);
+        fun(promise.get_future());
     }
-}
+};
 
 
 /* Start a thread that handles various signals. Also block those signals
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index 21d99878a518..de0e9118fd21 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -239,10 +239,10 @@ void mainWrapped(int argc, char * * argv)
 
     auto store = openStore();
 
-    EvalState state(myArgs.searchPath, store);
-    state.repair = repair;
+    auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
+    state->repair = repair;
 
-    Bindings & autoArgs = *myArgs.getAutoArgs(state);
+    Bindings & autoArgs = *myArgs.getAutoArgs(*state);
 
     if (packages) {
         std::ostringstream joined;
@@ -268,7 +268,7 @@ void mainWrapped(int argc, char * * argv)
     std::vector<Expr *> exprs;
 
     if (readStdin)
-        exprs = {state.parseStdin()};
+        exprs = {state->parseStdin()};
     else
         for (auto i : left) {
             auto absolute = i;
@@ -276,13 +276,13 @@ void mainWrapped(int argc, char * * argv)
                 absolute = canonPath(absPath(i), true);
             } catch (Error e) {};
             if (fromArgs)
-                exprs.push_back(state.parseExprFromString(i, absPath(".")));
+                exprs.push_back(state->parseExprFromString(i, absPath(".")));
             else if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?")))
-                drvs.push_back(DrvInfo(state, store, absolute));
+                drvs.push_back(DrvInfo(*state, store, absolute));
             else
                 /* If we're in a #! script, interpret filenames
                    relative to the script. */
-                exprs.push_back(state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state,
+                exprs.push_back(state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state,
                     inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i)))));
         }
 
@@ -291,12 +291,12 @@ void mainWrapped(int argc, char * * argv)
 
     for (auto e : exprs) {
         Value vRoot;
-        state.eval(e, vRoot);
+        state->eval(e, vRoot);
 
         for (auto & i : attrPaths) {
-            Value & v(*findAlongAttrPath(state, i, autoArgs, vRoot));
-            state.forceValue(v);
-            getDerivations(state, v, "", autoArgs, drvs, false);
+            Value & v(*findAlongAttrPath(*state, i, autoArgs, vRoot));
+            state->forceValue(v);
+            getDerivations(*state, v, "", autoArgs, drvs, false);
         }
     }
 
@@ -332,12 +332,12 @@ void mainWrapped(int argc, char * * argv)
         if (shell == "") {
 
             try {
-                auto expr = state.parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
+                auto expr = state->parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
 
                 Value v;
-                state.eval(expr, v);
+                state->eval(expr, v);
 
-                auto drv = getDerivation(state, v, false);
+                auto drv = getDerivation(*state, v, false);
                 if (!drv)
                     throw Error("the 'bashInteractive' attribute in <nixpkgs> did not evaluate to a derivation");
 
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
index d1b47ede803f..55ebda438965 100755
--- a/src/nix-channel/nix-channel.cc
+++ b/src/nix-channel/nix-channel.cc
@@ -86,7 +86,7 @@ static void update(const StringSet & channelNames)
         // definition from a consistent location if the redirect changes mid-download.
         std::string effectiveUrl;
         auto dl = getDownloader();
-        auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl);
+        auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl, 0);
         url = chomp(std::move(effectiveUrl));
 
         // If the URL contains a version number, append it to the name
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 35603af7082a..423e6bb67893 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -120,8 +120,6 @@ struct TunnelLogger : public Logger
       want to send out stderr to the client. */
     void startWork()
     {
-        std::vector<std::string> pendingMsgs;
-
         auto state(state_.lock());
         state->canSendStderr = true;
 
@@ -197,7 +195,8 @@ struct TunnelSource : BufferedSource
 {
     Source & from;
     TunnelSource(Source & from) : from(from) { }
-    size_t readUnbuffered(unsigned char * data, size_t len)
+protected:
+    size_t readUnbuffered(unsigned char * data, size_t len) override
     {
         to << STDERR_READ << len;
         to.flush();
@@ -554,7 +553,7 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store,
                     ;
                 else if (trusted
                     || name == settings.buildTimeout.name
-                    || name == settings.connectTimeout.name)
+                    || name == "connect-timeout")
                     settings.set(name, value);
                 else if (setSubstituters(settings.substituters))
                     ;
@@ -691,12 +690,22 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store,
         if (!trusted)
             info.ultimate = false;
 
-        TeeSink tee(from);
-        parseDump(tee, tee.source);
+        std::string saved;
+        std::unique_ptr<Source> source;
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
+            source = std::make_unique<TunnelSource>(from);
+        else {
+            TeeSink tee(from);
+            parseDump(tee, tee.source);
+            saved = std::move(*tee.source.data);
+            source = std::make_unique<StringSource>(saved);
+        }
 
         logger->startWork();
-        store.cast<Store>()->addToStore(info, tee.source.data, (RepairFlag) repair,
+
+        store.cast<Store>()->addToStore(info, *source, (RepairFlag) repair,
             dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr);
+
         logger->stopWork();
         break;
     }
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index f60ff9e07182..a43b103f6ec6 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -1284,6 +1284,14 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr
         deleteOldGenerations(globals.profile, globals.dryRun);
     } else if (opArgs.size() == 1 && opArgs.front().find('d') != string::npos) {
         deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun);
+    } else if (opArgs.size() == 1 && opArgs.front().find('+') != string::npos) {
+        if(opArgs.front().size() < 2)
+            throw Error(format("invalid number of generations ‘%1%’") % opArgs.front());
+        string str_max = string(opArgs.front(), 1, opArgs.front().size());
+        int max;
+        if (!string2Int(str_max, max) || max == 0)
+            throw Error(format("invalid number of generations to keep ‘%1%’") % opArgs.front());
+        deleteGenerationsGreaterThan(globals.profile, max, globals.dryRun);
     } else {
         std::set<unsigned int> gens;
         for (auto & i : opArgs) {
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index 5049460c7544..eb6d34dd8219 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -158,16 +158,16 @@ int main(int argc, char * * argv)
 
         auto store = openStore();
 
-        EvalState state(myArgs.searchPath, store);
-        state.repair = repair;
+        auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
+        state->repair = repair;
 
-        Bindings & autoArgs = *myArgs.getAutoArgs(state);
+        Bindings & autoArgs = *myArgs.getAutoArgs(*state);
 
         if (attrPaths.empty()) attrPaths = {""};
 
         if (findFile) {
             for (auto & i : files) {
-                Path p = state.findFile(i);
+                Path p = state->findFile(i);
                 if (p == "") throw Error(format("unable to find '%1%'") % i);
                 std::cout << p << std::endl;
             }
@@ -175,20 +175,20 @@ int main(int argc, char * * argv)
         }
 
         if (readStdin) {
-            Expr * e = state.parseStdin();
-            processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+            Expr * e = state->parseStdin();
+            processExpr(*state, attrPaths, parseOnly, strict, autoArgs,
                 evalOnly, outputKind, xmlOutputSourceLocation, e);
         } else if (files.empty() && !fromArgs)
             files.push_back("./default.nix");
 
         for (auto & i : files) {
             Expr * e = fromArgs
-                ? state.parseExprFromString(i, absPath("."))
-                : state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, i))));
-            processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+                ? state->parseExprFromString(i, absPath("."))
+                : state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state, i))));
+            processExpr(*state, attrPaths, parseOnly, strict, autoArgs,
                 evalOnly, outputKind, xmlOutputSourceLocation, e);
         }
 
-        state.printStats();
+        state->printStats();
     });
 }
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
index fa7ee254500c..50b2c2803ec9 100644
--- a/src/nix-prefetch-url/nix-prefetch-url.cc
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -95,9 +95,9 @@ int main(int argc, char * * argv)
             throw UsageError("too many arguments");
 
         auto store = openStore();
-        EvalState state(myArgs.searchPath, store);
+        auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
 
-        Bindings & autoArgs = *myArgs.getAutoArgs(state);
+        Bindings & autoArgs = *myArgs.getAutoArgs(*state);
 
         /* If -A is given, get the URI from the specified Nix
            expression. */
@@ -107,33 +107,33 @@ int main(int argc, char * * argv)
                 throw UsageError("you must specify a URI");
             uri = args[0];
         } else {
-            Path path = resolveExprPath(lookupFileArg(state, args.empty() ? "." : args[0]));
+            Path path = resolveExprPath(lookupFileArg(*state, args.empty() ? "." : args[0]));
             Value vRoot;
-            state.evalFile(path, vRoot);
-            Value & v(*findAlongAttrPath(state, attrPath, autoArgs, vRoot));
-            state.forceAttrs(v);
+            state->evalFile(path, vRoot);
+            Value & v(*findAlongAttrPath(*state, attrPath, autoArgs, vRoot));
+            state->forceAttrs(v);
 
             /* Extract the URI. */
-            auto attr = v.attrs->find(state.symbols.create("urls"));
+            auto attr = v.attrs->find(state->symbols.create("urls"));
             if (attr == v.attrs->end())
                 throw Error("attribute set does not contain a 'urls' attribute");
-            state.forceList(*attr->value);
+            state->forceList(*attr->value);
             if (attr->value->listSize() < 1)
                 throw Error("'urls' list is empty");
-            uri = state.forceString(*attr->value->listElems()[0]);
+            uri = state->forceString(*attr->value->listElems()[0]);
 
             /* Extract the hash mode. */
-            attr = v.attrs->find(state.symbols.create("outputHashMode"));
+            attr = v.attrs->find(state->symbols.create("outputHashMode"));
             if (attr == v.attrs->end())
                 printInfo("warning: this does not look like a fetchurl call");
             else
-                unpack = state.forceString(*attr->value) == "recursive";
+                unpack = state->forceString(*attr->value) == "recursive";
 
             /* Extract the name. */
             if (name.empty()) {
-                attr = v.attrs->find(state.symbols.create("name"));
+                attr = v.attrs->find(state->symbols.create("name"));
                 if (attr != v.attrs->end())
-                    name = state.forceString(*attr->value);
+                    name = state->forceString(*attr->value);
             }
         }
 
@@ -158,7 +158,7 @@ int main(int argc, char * * argv)
 
         if (storePath.empty()) {
 
-            auto actualUri = resolveMirrorUri(state, uri);
+            auto actualUri = resolveMirrorUri(*state, uri);
 
             /* Download the file. */
             DownloadRequest req(actualUri);
diff --git a/src/nix/installables.cc b/src/nix/installables.cc
index a3fdd8a2808d..0be992b03c5a 100644
--- a/src/nix/installables.cc
+++ b/src/nix/installables.cc
@@ -86,22 +86,6 @@ Buildable Installable::toBuildable()
     return std::move(buildables[0]);
 }
 
-struct InstallableStoreDrv : Installable
-{
-    Path drvPath;
-
-    InstallableStoreDrv(const Path & drvPath) : drvPath(drvPath) { }
-
-    std::string what() override { return drvPath; }
-
-    Buildables toBuildables() override
-    {
-        Buildable b = {drvPath};
-        // FIXME: add outputs?
-        return {b};
-    }
-};
-
 struct InstallableStorePath : Installable
 {
     Path storePath;
@@ -226,12 +210,8 @@ static std::vector<std::shared_ptr<Installable>> parseInstallables(
 
             auto path = store->toStorePath(store->followLinksToStore(s));
 
-            if (store->isStorePath(path)) {
-                if (isDerivation(path))
-                    result.push_back(std::make_shared<InstallableStoreDrv>(path));
-                else
-                    result.push_back(std::make_shared<InstallableStorePath>(path));
-            }
+            if (store->isStorePath(path))
+                result.push_back(std::make_shared<InstallableStorePath>(path));
         }
 
         else if (s == "" || std::regex_match(s, attrPathRegex))
diff --git a/src/nix/main.cc b/src/nix/main.cc
index bb107ec7d3f6..9cd5d21c84b6 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -34,9 +34,10 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
             .handler([&]() {
                 std::cout << "The following configuration options are available:\n\n";
                 Table2 tbl;
-                for (const auto & s : settings._getSettings())
-                    if (!s.second.isAlias)
-                        tbl.emplace_back(s.first, s.second.setting->description);
+                std::map<std::string, Config::SettingInfo> settings;
+                globalConfig.getSettings(settings);
+                for (const auto & s : settings)
+                    tbl.emplace_back(s.first, s.second.description);
                 printTable(std::cout, tbl);
                 throw Exit();
             });
diff --git a/src/nix/progress-bar.cc b/src/nix/progress-bar.cc
index 40b905ba3243..8093d8761c4d 100644
--- a/src/nix/progress-bar.cc
+++ b/src/nix/progress-bar.cc
@@ -75,9 +75,10 @@ public:
         updateThread = std::thread([&]() {
             auto state(state_.lock());
             while (state->active) {
-                state.wait(updateCV);
+                auto r = state.wait_for(updateCV, std::chrono::seconds(1));
                 draw(*state);
-                state.wait_for(quitCV, std::chrono::milliseconds(50));
+                if (r == std::cv_status::no_timeout)
+                  state.wait_for(quitCV, std::chrono::milliseconds(50));
             }
         });
     }
diff --git a/src/nix/repl.cc b/src/nix/repl.cc
index f84774a53367..4723a1974b77 100644
--- a/src/nix/repl.cc
+++ b/src/nix/repl.cc
@@ -693,8 +693,8 @@ struct CmdRepl : StoreCommand, MixEvalArgs
 
     void run(ref<Store> store) override
     {
-        NixRepl repl(searchPath, openStore());
-        repl.mainLoop(files);
+        auto repl = std::make_unique<NixRepl>(searchPath, openStore());
+        repl->mainLoop(files);
     }
 };
 
diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc
index c64b12c8dd62..86638b50d2c6 100644
--- a/src/nix/show-config.cc
+++ b/src/nix/show-config.cc
@@ -27,10 +27,12 @@ struct CmdShowConfig : Command, MixJSON
         if (json) {
             // FIXME: use appropriate JSON types (bool, ints, etc).
             JSONObject jsonObj(std::cout);
-            settings.toJSON(jsonObj);
+            globalConfig.toJSON(jsonObj);
         } else {
-            for (auto & s : settings.getSettings())
-                std::cout << s.first + " = " + s.second + "\n";
+            std::map<std::string, Config::SettingInfo> settings;
+            globalConfig.getSettings(settings);
+            for (auto & s : settings)
+                std::cout << s.first + " = " + s.second.value + "\n";
         }
     }
 };
diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc
index 758bbbc688bc..e23ae792369c 100644
--- a/src/nix/upgrade-nix.cc
+++ b/src/nix/upgrade-nix.cc
@@ -46,7 +46,7 @@ struct CmdUpgradeNix : StoreCommand
 
     void run(ref<Store> store) override
     {
-        settings.pureEval = true;
+        evalSettings.pureEval = true;
 
         if (profileDir == "")
             profileDir = getProfileDir(store);
@@ -118,13 +118,13 @@ struct CmdUpgradeNix : StoreCommand
         auto req = DownloadRequest("https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix");
         auto res = getDownloader()->download(req);
 
-        EvalState state(Strings(), store);
-        auto v = state.allocValue();
-        state.eval(state.parseExprFromString(*res.data, "/no-such-path"), *v);
-        Bindings & bindings(*state.allocBindings(0));
-        auto v2 = findAlongAttrPath(state, settings.thisSystem, bindings, *v);
+        auto state = std::make_unique<EvalState>(Strings(), store);
+        auto v = state->allocValue();
+        state->eval(state->parseExprFromString(*res.data, "/no-such-path"), *v);
+        Bindings & bindings(*state->allocBindings(0));
+        auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v);
 
-        return state.forceString(*v2);
+        return state->forceString(*v2);
     }
 };
 
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index 7365a550e57f..eb58ae7c12a8 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -52,9 +52,6 @@ export _NIX_FORCE_HTTP_BINARY_CACHE_STORE=1
 basicTests
 
 
-unset _NIX_FORCE_HTTP_BINARY_CACHE_STORE
-
-
 # Test whether Nix notices if the NAR doesn't match the hash in the NAR info.
 clearStore
 
@@ -79,18 +76,29 @@ if nix-store --substituters "file://$cacheDir" -r $outPath; then
 fi
 
 
-# Test whether fallback works if we have cached info but the
-# corresponding NAR has disappeared.
+# Test whether fallback works if a NAR has disappeared. This does not require --fallback.
 clearStore
 
-nix-build --substituters "file://$cacheDir" dependencies.nix --dry-run # get info
+mv $cacheDir/nar $cacheDir/nar2
+
+nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result
 
-mkdir $cacheDir/tmp
-mv $cacheDir/*.nar* $cacheDir/tmp/
+mv $cacheDir/nar2 $cacheDir/nar
 
-NIX_DEBUG_SUBST=1 nix-build --substituters "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result --fallback
 
-mv $cacheDir/tmp/* $cacheDir/
+# Test whether fallback works if a NAR is corrupted. This does require --fallback.
+clearStore
+
+mv $cacheDir/nar $cacheDir/nar2
+mkdir $cacheDir/nar
+for i in $(cd $cacheDir/nar2 && echo *); do touch $cacheDir/nar/$i; done
+
+(! nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result)
+
+nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result --fallback
+
+rm -rf $cacheDir/nar
+mv $cacheDir/nar2 $cacheDir/nar
 
 
 # Test whether building works if the binary cache contains an
@@ -107,6 +115,7 @@ if [ -n "$HAVE_SODIUM" ]; then
 
 # Create a signed binary cache.
 clearCache
+clearCacheCache
 
 declare -a res=($(nix-store --generate-binary-cache-key test.nixos.org-1 $TEST_ROOT/sk1 $TEST_ROOT/pk1 ))
 publicKey="$(cat $TEST_ROOT/pk1)"
@@ -117,7 +126,7 @@ badKey="$(cat $TEST_ROOT/pk2)"
 res=($(nix-store --generate-binary-cache-key foo.nixos.org-1 $TEST_ROOT/sk3 $TEST_ROOT/pk3))
 otherKey="$(cat $TEST_ROOT/pk3)"
 
-nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath
+_NIX_FORCE_HTTP_BINARY_CACHE_STORE= nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath
 
 
 # Downloading should fail if we don't provide a key.
diff --git a/tests/common.sh.in b/tests/common.sh.in
index 195205988afb..fddd25b366bf 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -94,11 +94,9 @@ canUseSandbox() {
         return 1
     fi
 
-    if [ -e /proc/sys/kernel/unprivileged_userns_clone ]; then
-        if [ "$(cat /proc/sys/kernel/unprivileged_userns_clone)" != 1 ]; then
-            echo "Unprivileged user namespaces disabled by sysctl, skipping this test..."
-            return 1
-        fi
+    if ! unshare --user true ; then
+        echo "Unprivileged user namespaces disabled by sysctl, skipping this test..."
+        return 1
     fi
 
     return 0
diff --git a/tests/lang/eval-okay-arithmetic.exp b/tests/lang/eval-okay-arithmetic.exp
index b195055b7a09..5c54d10b7b47 100644
--- a/tests/lang/eval-okay-arithmetic.exp
+++ b/tests/lang/eval-okay-arithmetic.exp
@@ -1 +1 @@
-2188
+2216
diff --git a/tests/lang/eval-okay-arithmetic.nix b/tests/lang/eval-okay-arithmetic.nix
index bbbbc4691d75..7e9e6a0b666e 100644
--- a/tests/lang/eval-okay-arithmetic.nix
+++ b/tests/lang/eval-okay-arithmetic.nix
@@ -26,6 +26,10 @@ let {
       (56088 / 123 / 2)
       (3 + 4 * const 5 0 - 6 / id 2)
 
+      (builtins.bitAnd 12 10) # 0b1100 & 0b1010 =  8
+      (builtins.bitOr  12 10) # 0b1100 | 0b1010 = 14
+      (builtins.bitXor 12 10) # 0b1100 ^ 0b1010 =  6
+
       (if 3 < 7 then 1 else err)
       (if 7 < 3 then err else 1)
       (if 3 < 3 then err else 1)
diff --git a/tests/lang/parse-fail-uft8.nix b/tests/lang/parse-fail-uft8.nix
new file mode 100644
index 000000000000..34948d48aed2
--- /dev/null
+++ b/tests/lang/parse-fail-uft8.nix
@@ -0,0 +1 @@
+123 é 4
diff --git a/tests/plugins/plugintest.cc b/tests/plugins/plugintest.cc
index 8da15ebabd7d..c085d33295be 100644
--- a/tests/plugins/plugintest.cc
+++ b/tests/plugins/plugintest.cc
@@ -1,16 +1,21 @@
-#include "globals.hh"
+#include "config.hh"
 #include "primops.hh"
 
 using namespace nix;
 
-static BaseSetting<bool> settingSet{false, "setting-set",
+struct MySettings : Config
+{
+    Setting<bool> settingSet{this, false, "setting-set",
         "Whether the plugin-defined setting was set"};
+};
+
+MySettings mySettings;
 
-static RegisterSetting rs(&settingSet);
+static GlobalConfig::Register rs(&mySettings);
 
 static void prim_anotherNull (EvalState & state, const Pos & pos, Value ** args, Value & v)
 {
-    if (settingSet)
+    if (mySettings.settingSet)
         mkNull(v);
     else
         mkBool(v, false);