about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Makefile10
-rw-r--r--configure.ac4
-rw-r--r--doc/manual/advanced-topics/distributed-builds.xml1
-rw-r--r--doc/manual/command-ref/conf-file.xml225
-rw-r--r--doc/manual/command-ref/nix-shell.xml21
-rw-r--r--doc/manual/command-ref/nix-store.xml14
-rw-r--r--doc/manual/expressions/advanced-attributes.xml48
-rw-r--r--doc/manual/expressions/builtins.xml446
-rw-r--r--doc/manual/installation/env-variables.xml2
-rw-r--r--doc/manual/installation/installing-binary.xml178
-rw-r--r--doc/manual/installation/prerequisites-source.xml3
-rw-r--r--doc/manual/installation/supported-platforms.xml2
-rw-r--r--doc/manual/installation/upgrading.xml22
-rw-r--r--doc/manual/manual.xml1
-rw-r--r--doc/manual/packages/s3-substituter.xml202
-rw-r--r--doc/manual/release-notes/release-notes.xml1
-rw-r--r--doc/manual/release-notes/rl-2.1.xml25
-rw-r--r--doc/manual/release-notes/rl-2.2.xml25
-rw-r--r--local.mk2
-rw-r--r--release.nix2
-rw-r--r--scripts/install-nix-from-closure.sh25
-rw-r--r--scripts/install.in4
-rw-r--r--scripts/nix-profile-daemon.sh.in2
-rw-r--r--src/build-remote/build-remote.cc19
-rw-r--r--src/build-remote/local.mk9
-rw-r--r--src/libexpr/eval.cc150
-rw-r--r--src/libexpr/primops.cc2
-rw-r--r--src/libstore/build.cc436
-rw-r--r--src/libstore/builtins/fetchurl.cc5
-rw-r--r--src/libstore/derivations.cc14
-rw-r--r--src/libstore/derivations.hh4
-rw-r--r--src/libstore/download.cc47
-rw-r--r--src/libstore/globals.cc15
-rw-r--r--src/libstore/globals.hh12
-rw-r--r--src/libstore/http-binary-cache-store.cc41
-rw-r--r--src/libstore/local-store.cc6
-rw-r--r--src/libstore/parsed-derivations.cc111
-rw-r--r--src/libstore/parsed-derivations.hh35
-rw-r--r--src/libstore/remote-store.cc171
-rw-r--r--src/libstore/remote-store.hh13
-rw-r--r--src/libstore/s3-binary-cache-store.cc28
-rw-r--r--src/libstore/store-api.cc4
-rw-r--r--src/libstore/store-api.hh1
-rw-r--r--src/libutil/archive.cc2
-rw-r--r--src/libutil/compression.cc2
-rw-r--r--src/libutil/pool.hh6
-rw-r--r--src/libutil/serialise.cc31
-rw-r--r--src/libutil/serialise.hh2
-rw-r--r--src/libutil/util.cc4
-rw-r--r--src/nix-build/local.mk9
-rwxr-xr-xsrc/nix-build/nix-build.cc24
-rw-r--r--src/nix-channel/local.mk7
-rwxr-xr-xsrc/nix-channel/nix-channel.cc18
-rw-r--r--src/nix-collect-garbage/local.mk7
-rw-r--r--src/nix-collect-garbage/nix-collect-garbage.cc15
-rw-r--r--src/nix-copy-closure/local.mk7
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc13
-rw-r--r--src/nix-daemon/local.mk13
-rw-r--r--src/nix-daemon/nix-daemon.cc20
-rw-r--r--src/nix-env/local.mk7
-rw-r--r--src/nix-env/nix-env.cc22
-rw-r--r--src/nix-instantiate/local.mk7
-rw-r--r--src/nix-instantiate/nix-instantiate.cc16
-rw-r--r--src/nix-prefetch-url/local.mk7
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc14
-rw-r--r--src/nix-store/graphml.cc90
-rw-r--r--src/nix-store/graphml.hh (renamed from src/nix-store/xmlgraph.hh)2
-rw-r--r--src/nix-store/local.mk9
-rw-r--r--src/nix-store/nix-store.cc23
-rw-r--r--src/nix-store/xmlgraph.cc66
-rw-r--r--src/nix/copy.cc8
-rw-r--r--src/nix/local.mk21
-rw-r--r--src/nix/main.cc6
-rw-r--r--src/nix/repl.cc5
-rw-r--r--src/nix/search.cc4
-rw-r--r--src/nix/verify.cc2
-rw-r--r--tests/build-remote.sh3
-rw-r--r--tests/check-reqs.nix2
-rw-r--r--tests/remote-builds.nix36
-rw-r--r--tests/signing.sh4
-rw-r--r--version2
81 files changed, 1939 insertions, 985 deletions
diff --git a/Makefile b/Makefile
index 834f84b286bf..45a3338ed21c 100644
--- a/Makefile
+++ b/Makefile
@@ -5,17 +5,7 @@ makefiles = \
   src/libmain/local.mk \
   src/libexpr/local.mk \
   src/nix/local.mk \
-  src/nix-store/local.mk \
-  src/nix-instantiate/local.mk \
-  src/nix-env/local.mk \
-  src/nix-daemon/local.mk \
-  src/nix-collect-garbage/local.mk \
-  src/nix-copy-closure/local.mk \
-  src/nix-prefetch-url/local.mk \
   src/resolve-system-dependencies/local.mk \
-  src/nix-channel/local.mk \
-  src/nix-build/local.mk \
-  src/build-remote/local.mk \
   scripts/local.mk \
   corepkgs/local.mk \
   misc/systemd/local.mk \
diff --git a/configure.ac b/configure.ac
index cc354f6f3028..9c85182efbf9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -152,9 +152,9 @@ PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]
 
 # Look for libbz2, a required dependency.
 AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true],
-  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See http://www.bzip.org/.])])
+  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])])
 AC_CHECK_HEADERS([bzlib.h], [true],
-  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See http://www.bzip.org/.])])
+  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])])
 
 
 # Look for SQLite, a required dependency.
diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml
index 20fd6a0cfb0d..bbb573e35400 100644
--- a/doc/manual/advanced-topics/distributed-builds.xml
+++ b/doc/manual/advanced-topics/distributed-builds.xml
@@ -81,6 +81,7 @@ or a newline, e.g.
 
 <para>Each machine specification consists of the following elements,
 separated by spaces. Only the first element is required.
+To leave a field at its default, set it to <literal>-</literal>.
 
 <orderedlist>
 
diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml
index 6a23b8f1fda4..e9947ebc673f 100644
--- a/doc/manual/command-ref/conf-file.xml
+++ b/doc/manual/command-ref/conf-file.xml
@@ -135,7 +135,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-builders">
     <term><literal>builders</literal></term>
     <listitem>
@@ -159,7 +158,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-build-users-group"><term><literal>build-users-group</literal></term>
 
     <listitem><para>This options specifies the Unix group containing
@@ -210,7 +208,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-connect-timeout"><term><literal>connect-timeout</literal></term>
 
     <listitem>
@@ -243,7 +240,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-extra-sandbox-paths">
     <term><literal>extra-sandbox-paths</literal></term>
 
@@ -283,7 +279,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-fallback"><term><literal>fallback</literal></term>
 
     <listitem><para>If set to <literal>true</literal>, Nix will fall
@@ -293,7 +288,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-fsync-metadata"><term><literal>fsync-metadata</literal></term>
 
     <listitem><para>If set to <literal>true</literal>, changes to the
@@ -304,7 +298,6 @@ false</literal>.</para>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-hashed-mirrors"><term><literal>hashed-mirrors</literal></term>
 
     <listitem><para>A list of web servers used by
@@ -367,10 +360,8 @@ builtins.fetchurl {
     options a store path was built), so by default this option is on.
     Turn it off to save a bit of disk space (or a lot if
     <literal>keep-outputs</literal> is also turned on).</para></listitem>
-
   </varlistentry>
 
-
   <varlistentry xml:id="conf-keep-env-derivations"><term><literal>keep-env-derivations</literal></term>
 
     <listitem><para>If <literal>false</literal> (default), derivations
@@ -394,7 +385,6 @@ builtins.fetchurl {
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-keep-outputs"><term><literal>keep-outputs</literal></term>
 
     <listitem><para>If <literal>true</literal>, the garbage collector
@@ -408,10 +398,8 @@ builtins.fetchurl {
     only at build time (e.g., the C compiler, or source tarballs
     downloaded from the network).  To prevent it from doing so, set
     this option to <literal>true</literal>.</para></listitem>
-
   </varlistentry>
 
-
   <varlistentry xml:id="conf-max-build-log-size"><term><literal>max-build-log-size</literal></term>
 
     <listitem>
@@ -444,10 +432,8 @@ builtins.fetchurl {
     overridden using the <option
     linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>)
     command line switch.</para></listitem>
-
   </varlistentry>
 
-
   <varlistentry xml:id="conf-max-silent-time"><term><literal>max-silent-time</literal></term>
 
     <listitem>
@@ -527,7 +513,12 @@ password <replaceable>my-password</replaceable>
 
     For the exact syntax, see <link
     xlink:href="https://ec.haxx.se/usingcurl-netrc.html">the
-    <literal>curl</literal> documentation.</link></para></listitem>
+    <literal>curl</literal> documentation.</link></para>
+
+    <note><para>This must be an absolute path, and <literal>~</literal>
+    is not resolved. For example, <filename>~/.netrc</filename> won't
+    resolve to your home directory's <filename>.netrc</filename>.</para></note>
+    </listitem>
 
   </varlistentry>
 
@@ -598,7 +589,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-repeat"><term><literal>repeat</literal></term>
 
     <listitem><para>How many times to repeat builds to check whether
@@ -610,7 +600,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-require-sigs"><term><literal>require-sigs</literal></term>
 
     <listitem><para>If set to <literal>true</literal> (the default),
@@ -674,7 +663,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-sandbox-dev-shm-size"><term><literal>sandbox-dev-shm-size</literal></term>
 
     <listitem><para>This option determines the maximum size of the
@@ -740,7 +728,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-substituters"><term><literal>substituters</literal></term>
 
     <listitem><para>A list of URLs of substituters, separated by
@@ -749,7 +736,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-system"><term><literal>system</literal></term>
 
     <listitem><para>This option specifies the canonical Nix system
@@ -771,6 +757,33 @@ password <replaceable>my-password</replaceable>
   </varlistentry>
 
 
+  <varlistentry xml:id="conf-system-features"><term><literal>system-features</literal></term>
+
+    <listitem><para>A set of system “features” supported by this
+    machine, e.g. <literal>kvm</literal>. Derivations can express a
+    dependency on such features through the derivation attribute
+    <varname>requiredSystemFeatures</varname>. For example, the
+    attribute
+
+<programlisting>
+requiredSystemFeatures = [ "kvm" ];
+</programlisting>
+
+    ensures that the derivation can only be built on a machine with
+    the <literal>kvm</literal> feature.</para>
+
+    <para>This setting by default includes <literal>kvm</literal> if
+    <filename>/dev/kvm</filename> is accessible, and the
+    pseudo-features <literal>nixos-test</literal>,
+    <literal>benchmark</literal> and <literal>big-parallel</literal>
+    that are used in Nixpkgs to route builds to specific
+    machines.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
   <varlistentry xml:id="conf-timeout"><term><literal>timeout</literal></term>
 
     <listitem>
@@ -790,7 +803,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-trusted-public-keys"><term><literal>trusted-public-keys</literal></term>
 
     <listitem><para>A whitespace-separated list of public keys. When
@@ -801,7 +813,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-trusted-substituters"><term><literal>trusted-substituters</literal></term>
 
     <listitem><para>A list of URLs of substituters, separated by
@@ -814,7 +825,6 @@ password <replaceable>my-password</replaceable>
 
   </varlistentry>
 
-
   <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term>
 
     <listitem>
@@ -840,8 +850,177 @@ password <replaceable>my-password</replaceable>
   </varlistentry>
 
 </variablelist>
+</para>
+
+<refsection>
+  <title>Deprecated Settings</title>
+
+<para>
+
+<variablelist>
+
+  <varlistentry xml:id="conf-binary-caches">
+    <term><literal>binary-caches</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>binary-caches</literal> is now an alias to
+    <xref linkend="conf-substituters" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-binary-cache-public-keys">
+    <term><literal>binary-cache-public-keys</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>binary-cache-public-keys</literal> is now an alias to
+    <xref linkend="conf-trusted-public-keys" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-compress-log">
+    <term><literal>build-compress-log</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-compress-log</literal> is now an alias to
+    <xref linkend="conf-compress-build-log" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-cores">
+    <term><literal>build-cores</literal></term>
 
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-cores</literal> is now an alias to
+    <xref linkend="conf-cores" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-extra-chroot-dirs">
+    <term><literal>build-extra-chroot-dirs</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-extra-chroot-dirs</literal> is now an alias to
+    <xref linkend="conf-extra-sandbox-paths" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-extra-sandbox-paths">
+    <term><literal>build-extra-sandbox-paths</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-extra-sandbox-paths</literal> is now an alias to
+    <xref linkend="conf-extra-sandbox-paths" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-fallback">
+    <term><literal>build-fallback</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-fallback</literal> is now an alias to
+    <xref linkend="conf-fallback" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-max-jobs">
+    <term><literal>build-max-jobs</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-max-jobs</literal> is now an alias to
+    <xref linkend="conf-max-jobs" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-max-log-size">
+    <term><literal>build-max-log-size</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-max-log-size</literal> is now an alias to
+    <xref linkend="conf-max-build-log-size" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-max-silent-time">
+    <term><literal>build-max-silent-time</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-max-silent-time</literal> is now an alias to
+    <xref linkend="conf-max-silent-time" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-repeat">
+    <term><literal>build-repeat</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-repeat</literal> is now an alias to
+    <xref linkend="conf-repeat" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-timeout">
+    <term><literal>build-timeout</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-timeout</literal> is now an alias to
+    <xref linkend="conf-timeout" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-use-chroot">
+    <term><literal>build-use-chroot</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-use-chroot</literal> is now an alias to
+    <xref linkend="conf-sandbox" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-use-sandbox">
+    <term><literal>build-use-sandbox</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-use-sandbox</literal> is now an alias to
+    <xref linkend="conf-sandbox" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-build-use-substitutes">
+    <term><literal>build-use-substitutes</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>build-use-substitutes</literal> is now an alias to
+    <xref linkend="conf-substitute" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-gc-keep-derivations">
+    <term><literal>gc-keep-derivations</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>gc-keep-derivations</literal> is now an alias to
+    <xref linkend="conf-keep-derivations" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-gc-keep-outputs">
+    <term><literal>gc-keep-outputs</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>gc-keep-outputs</literal> is now an alias to
+    <xref linkend="conf-keep-outputs" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-env-keep-derivations">
+    <term><literal>env-keep-derivations</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>env-keep-derivations</literal> is now an alias to
+    <xref linkend="conf-keep-env-derivations" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-extra-binary-caches">
+    <term><literal>extra-binary-caches</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>extra-binary-caches</literal> is now an alias to
+    <xref linkend="conf-extra-substituters" />.</para></listitem>
+  </varlistentry>
+
+  <varlistentry xml:id="conf-trusted-binary-caches">
+    <term><literal>trusted-binary-caches</literal></term>
+
+    <listitem><para><emphasis>Deprecated:</emphasis>
+    <literal>trusted-binary-caches</literal> is now an alias to
+    <xref linkend="conf-trusted-substituters" />.</para></listitem>
+  </varlistentry>
+</variablelist>
 </para>
+</refsection>
 
 </refsection>
 
diff --git a/doc/manual/command-ref/nix-shell.xml b/doc/manual/command-ref/nix-shell.xml
index 5c44c4a8f446..cb443c888d3d 100644
--- a/doc/manual/command-ref/nix-shell.xml
+++ b/doc/manual/command-ref/nix-shell.xml
@@ -317,13 +317,28 @@ while (my $token = $p->get_tag("a")) {
 
 </para>
 
-<para>Finally, the following Haskell script uses a specific branch of
-Nixpkgs/NixOS (the 14.12 stable branch):
+<para>Sometimes you need to pass a simple Nix expression to customize
+a package like Terraform:
+
+<programlisting><![CDATA[
+#! /usr/bin/env nix-shell
+#! nix-shell -i bash -p "terraform.withPlugins (plugins: [ plugins.openstack ])"
+
+terraform apply
+]]></programlisting>
+
+<note><para>You must use double quotes (<literal>"</literal>) when
+passing a simple Nix expression in a nix-shell shebang.</para></note>
+</para>
+
+<para>Finally, using the merging of multiple nix-shell shebangs the
+following Haskell script uses a specific branch of Nixpkgs/NixOS (the
+18.03 stable branch):
 
 <programlisting><![CDATA[
 #! /usr/bin/env nix-shell
 #! nix-shell -i runghc -p haskellPackages.ghc haskellPackages.HTTP haskellPackages.tagsoup
-#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz
+#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-18.03.tar.gz
 
 import Network.HTTP
 import Text.HTML.TagSoup
diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml
index 5fff64a18f93..41a04f265d7c 100644
--- a/doc/manual/command-ref/nix-store.xml
+++ b/doc/manual/command-ref/nix-store.xml
@@ -275,7 +275,7 @@ as a means of providing Nix store access to a restricted ssh user.
 
     <listitem><para>Allow the connected client to request the realization
     of derivations. In effect, this can be used to make the host act
-    as a build slave.</para></listitem>
+    as a remote builder.</para></listitem>
 
   </varlistentry>
 
@@ -679,6 +679,18 @@ query is applied to the target of the symlink.</para>
 
   </varlistentry>
 
+  <varlistentry><term><option>--graphml</option></term>
+
+    <listitem><para>Prints the references graph of the store paths
+    <replaceable>paths</replaceable> in the <link
+    xlink:href="http://graphml.graphdrawing.org/">GraphML</link> file format.
+    This can be used to visualise dependency graphs. To obtain a
+    build-time dependency graph, apply this to a store derivation. To
+    obtain a runtime dependency graph, apply it to an output
+    path.</para></listitem>
+
+  </varlistentry>
+
   <varlistentry><term><option>--binding</option> <replaceable>name</replaceable></term>
     <term><option>-b</option> <replaceable>name</replaceable></term>
 
diff --git a/doc/manual/expressions/advanced-attributes.xml b/doc/manual/expressions/advanced-attributes.xml
index dfd013b5cf31..2af7a51acfbb 100644
--- a/doc/manual/expressions/advanced-attributes.xml
+++ b/doc/manual/expressions/advanced-attributes.xml
@@ -50,6 +50,40 @@ allowedRequisites = [ foobar ];
 
   </varlistentry>
 
+  <varlistentry><term><varname>disallowedReferences</varname></term>
+
+    <listitem><para>The optional attribute
+    <varname>disallowedReferences</varname> specifies a list of illegal
+    references (dependencies) of the output of the builder.  For
+    example,
+
+<programlisting>
+disallowedReferences = [ foo ];
+</programlisting>
+
+    enforces that the output of a derivation cannot have a direct runtime
+    dependencies on the derivation <varname>foo</varname>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><varname>disallowedRequisites</varname></term>
+
+    <listitem><para>This attribute is similar to
+    <varname>disallowedReferences</varname>, but it specifies illegal
+    requisites for the whole closure, so all the dependencies
+    recursively.  For example,
+
+<programlisting>
+disallowedRequisites = [ foobar ];
+</programlisting>
+
+    enforces that the output of a derivation cannot have any
+    runtime dependency on <varname>foobar</varname> or any other derivation
+    depending recursively on <varname>foobar</varname>.</para></listitem>
+
+  </varlistentry>
+
 
   <varlistentry><term><varname>exportReferencesGraph</varname></term>
 
@@ -182,7 +216,7 @@ fetchurl {
 <programlisting>
 { stdenv, curl }: # The <command>curl</command> program is used for downloading.
 
-{ url, md5 }:
+{ url, sha256 }:
 
 stdenv.mkDerivation {
   name = baseNameOf (toString url);
@@ -190,10 +224,10 @@ stdenv.mkDerivation {
   buildInputs = [ curl ];
 
   # This is a fixed-output derivation; the output must be a regular
-  # file with MD5 hash <varname>md5</varname>.
+  # file with SHA256 hash <varname>sha256</varname>.
   outputHashMode = "flat";
-  outputHashAlgo = "md5";
-  outputHash = md5;
+  outputHashAlgo = "sha256";
+  outputHash = sha256;
 
   inherit url;
 }
@@ -203,8 +237,8 @@ stdenv.mkDerivation {
 
     <para>The <varname>outputHashAlgo</varname> attribute specifies
     the hash algorithm used to compute the hash.  It can currently be
-    <literal>"md5"</literal>, <literal>"sha1"</literal> or
-    <literal>"sha256"</literal>.</para>
+    <literal>"sha1"</literal>, <literal>"sha256"</literal> or
+    <literal>"sha512"</literal>.</para>
 
     <para>The <varname>outputHashMode</varname> attribute determines
     how the hash is computed.  It must be one of the following two
@@ -217,7 +251,7 @@ stdenv.mkDerivation {
         <listitem><para>The output must be a non-executable regular
         file.  If it isn’t, the build fails.  The hash is simply
         computed over the contents of that file (so it’s equal to what
-        Unix commands like <command>md5sum</command> or
+        Unix commands like <command>sha256sum</command> or
         <command>sha1sum</command> produce).</para>
 
         <para>This is the default.</para></listitem>
diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml
index 07d8357b40b5..8d12da9b1356 100644
--- a/doc/manual/expressions/builtins.xml
+++ b/doc/manual/expressions/builtins.xml
@@ -21,7 +21,8 @@ available as <function>builtins.derivation</function>.</para>
 <variablelist>
 
 
-  <varlistentry><term><function>abort</function> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-abort'>
+    <term><function>abort</function> <replaceable>s</replaceable></term>
 
     <listitem><para>Abort Nix expression evaluation, print error
     message <replaceable>s</replaceable>.</para></listitem>
@@ -29,8 +30,10 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.add</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-add'>
+    <term><function>builtins.add</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable>
+    </term>
 
     <listitem><para>Return the sum of the numbers
     <replaceable>e1</replaceable> and
@@ -39,8 +42,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.all</function>
-  <replaceable>pred</replaceable> <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-all'>
+    <term><function>builtins.all</function>
+    <replaceable>pred</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if the function
     <replaceable>pred</replaceable> returns <literal>true</literal>
@@ -50,8 +54,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.any</function>
-  <replaceable>pred</replaceable> <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-any'>
+    <term><function>builtins.any</function>
+    <replaceable>pred</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if the function
     <replaceable>pred</replaceable> returns <literal>true</literal>
@@ -61,8 +66,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.attrNames</function>
-  <replaceable>set</replaceable></term>
+  <varlistentry xml:id='builtin-attrNames'>
+    <term><function>builtins.attrNames</function>
+    <replaceable>set</replaceable></term>
 
     <listitem><para>Return the names of the attributes in the set
     <replaceable>set</replaceable> in an alphabetically sorted list.  For instance,
@@ -72,8 +78,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.attrValues</function>
-  <replaceable>set</replaceable></term>
+  <varlistentry xml:id='builtin-attrValues'>
+    <term><function>builtins.attrValues</function>
+    <replaceable>set</replaceable></term>
 
     <listitem><para>Return the values of the attributes in the set
     <replaceable>set</replaceable> in the order corresponding to the
@@ -82,7 +89,8 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>baseNameOf</function> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-baseNameOf'>
+    <term><function>baseNameOf</function> <replaceable>s</replaceable></term>
 
     <listitem><para>Return the <emphasis>base name</emphasis> of the
     string <replaceable>s</replaceable>, that is, everything following
@@ -92,8 +100,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.bitAnd</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-bitAnd'>
+    <term><function>builtins.bitAnd</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return the bitwise AND of the integers
     <replaceable>e1</replaceable> and
@@ -102,8 +111,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.bitOr</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-bitOr'>
+    <term><function>builtins.bitOr</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return the bitwise OR of the integers
     <replaceable>e1</replaceable> and
@@ -112,8 +122,9 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.bitXor</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-bitXor'>
+    <term><function>builtins.bitXor</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return the bitwise XOR of the integers
     <replaceable>e1</replaceable> and
@@ -122,7 +133,8 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
-  <varlistentry><term><varname>builtins</varname></term>
+  <varlistentry xml:id='builtin-builtins'>
+    <term><varname>builtins</varname></term>
 
     <listitem><para>The set <varname>builtins</varname> contains all
     the built-in functions and values.  You can use
@@ -139,8 +151,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.compareVersions</function>
-  <replaceable>s1</replaceable> <replaceable>s2</replaceable></term>
+  <varlistentry xml:id='builtin-compareVersions'>
+    <term><function>builtins.compareVersions</function>
+    <replaceable>s1</replaceable> <replaceable>s2</replaceable></term>
 
     <listitem><para>Compare two strings representing versions and
     return <literal>-1</literal> if version
@@ -156,8 +169,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.splitVersion</function>
-  <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-splitVersion'>
+    <term><function>builtins.splitVersion</function>
+    <replaceable>s</replaceable></term>
 
     <listitem><para>Split a string representing a version into its
     components, by the same version splitting logic underlying the
@@ -167,16 +181,18 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.concatLists</function>
-  <replaceable>lists</replaceable></term>
+  <varlistentry xml:id='builtin-concatLists'>
+    <term><function>builtins.concatLists</function>
+    <replaceable>lists</replaceable></term>
 
     <listitem><para>Concatenate a list of lists into a single
     list.</para></listitem>
 
   </varlistentry>
 
-  <varlistentry><term><function>builtins.concatStringsSep</function>
-  <replaceable>separator</replaceable> <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-concatStringsSep'>
+    <term><function>builtins.concatStringsSep</function>
+    <replaceable>separator</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Concatenate a list of strings with a separator
     between each element, e.g. <literal>concatStringsSep "/"
@@ -184,8 +200,8 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
 
   </varlistentry>
 
-  <varlistentry
-  xml:id='builtin-currentSystem'><term><varname>builtins.currentSystem</varname></term>
+  <varlistentry xml:id='builtin-currentSystem'>
+    <term><varname>builtins.currentSystem</varname></term>
 
     <listitem><para>The built-in value <varname>currentSystem</varname>
     evaluates to the Nix platform identifier for the Nix installation
@@ -218,8 +234,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   -->
 
 
-  <varlistentry><term><function>builtins.deepSeq</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-deepSeq'>
+    <term><function>builtins.deepSeq</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>This is like <literal>seq
     <replaceable>e1</replaceable>
@@ -231,8 +248,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>derivation</function>
-  <replaceable>attrs</replaceable></term>
+  <varlistentry xml:id='builtin-derivation'>
+    <term><function>derivation</function>
+    <replaceable>attrs</replaceable></term>
 
     <listitem><para><function>derivation</function> is described in
     <xref linkend='ssec-derivation' />.</para></listitem>
@@ -240,7 +258,8 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>dirOf</function> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-dirOf'>
+    <term><function>dirOf</function> <replaceable>s</replaceable></term>
 
     <listitem><para>Return the directory part of the string
     <replaceable>s</replaceable>, that is, everything before the final
@@ -250,8 +269,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.div</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-div'>
+    <term><function>builtins.div</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return the quotient of the numbers
     <replaceable>e1</replaceable> and
@@ -259,8 +279,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
 
   </varlistentry>
 
-  <varlistentry><term><function>builtins.elem</function>
-  <replaceable>x</replaceable> <replaceable>xs</replaceable></term>
+  <varlistentry xml:id='builtin-elem'>
+    <term><function>builtins.elem</function>
+    <replaceable>x</replaceable> <replaceable>xs</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if a value equal to
     <replaceable>x</replaceable> occurs in the list
@@ -270,8 +291,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.elemAt</function>
-  <replaceable>xs</replaceable> <replaceable>n</replaceable></term>
+  <varlistentry xml:id='builtin-elemAt'>
+    <term><function>builtins.elemAt</function>
+    <replaceable>xs</replaceable> <replaceable>n</replaceable></term>
 
     <listitem><para>Return element <replaceable>n</replaceable> from
     the list <replaceable>xs</replaceable>.  Elements are counted
@@ -281,8 +303,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.fetchurl</function>
-  <replaceable>url</replaceable></term>
+  <varlistentry xml:id='builtin-fetchurl'>
+    <term><function>builtins.fetchurl</function>
+    <replaceable>url</replaceable></term>
 
     <listitem><para>Download the specified URL and return the path of
     the downloaded file. This function is not available if <link
@@ -292,8 +315,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>fetchTarball</function>
-  <replaceable>url</replaceable></term>
+  <varlistentry xml:id='builtin-fetchTarball'>
+    <term><function>fetchTarball</function>
+    <replaceable>url</replaceable></term>
 
     <listitem><para>Download the specified URL, unpack it and return
     the path of the unpacked tree. The file must be a tape archive
@@ -346,7 +370,7 @@ stdenv.mkDerivation { … }
 
   </varlistentry>
 
-  <varlistentry>
+  <varlistentry xml:id='builtin-fetchGit'>
     <term>
       <function>builtins.fetchGit</function>
       <replaceable>args</replaceable>
@@ -398,6 +422,84 @@ stdenv.mkDerivation { … }
           </listitem>
         </varlistentry>
       </variablelist>
+
+      <example>
+        <title>Fetching a private repository over SSH</title>
+        <programlisting>builtins.fetchGit {
+  url = "git@github.com:my-secret/repository.git";
+  ref = "master";
+  rev = "adab8b916a45068c044658c4158d81878f9ed1c3";
+}</programlisting>
+      </example>
+
+      <example>
+        <title>Fetching a repository's specific commit on an arbitrary branch</title>
+        <para>
+          If the revision you're looking for is in the default branch
+          of the gift repository you don't strictly need to specify
+          the branch name in the <varname>ref</varname> attribute.
+        </para>
+        <para>
+          However, if the revision you're looking for is in a future
+          branch for the non-default branch you will need to specify
+          the the <varname>ref</varname> attribute as well.
+        </para>
+        <programlisting>builtins.fetchGit {
+  url = "https://github.com/nixos/nix.git";
+  rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452";
+  ref = "1.11-maintenance";
+}</programlisting>
+        <note>
+          <para>
+            It is nice to always specify the branch which a revision
+            belongs to. Without the branch being specified, the
+            fetcher might fail if the default branch changes.
+            Additionally, it can be confusing to try a commit from a
+            non-default branch and see the fetch fail. If the branch
+            is specified the fault is much more obvious.
+          </para>
+        </note>
+      </example>
+
+      <example>
+        <title>Fetching a repository's specific commit on the default branch</title>
+        <para>
+          If the revision you're looking for is in the default branch
+          of the gift repository you may omit the
+          <varname>ref</varname> attribute.
+        </para>
+        <programlisting>builtins.fetchGit {
+  url = "https://github.com/nixos/nix.git";
+  rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452";
+}</programlisting>
+      </example>
+
+      <example>
+        <title>Fetching a tag</title>
+        <programlisting>builtins.fetchGit {
+  url = "https://github.com/nixos/nix.git";
+  ref = "tags/1.9";
+}</programlisting>
+        <note><para>Due to a bug (<link
+        xlink:href="https://github.com/NixOS/nix/issues/2385">#2385</link>),
+        only non-annotated tags can be fetched.</para></note>
+      </example>
+
+      <example>
+        <title>Fetching the latest version of a remote branch</title>
+        <para>
+          <function>builtins.fetchGit</function> can behave impurely
+           fetch the latest version of a remote branch.
+        </para>
+        <note><para>Nix will refetch the branch in accordance to
+        <option>tarball-ttl</option>.</para></note>
+        <note><para>This behavior is disabled in
+        <emphasis>Pure evaluation mode</emphasis>.</para></note>
+        <programlisting>builtins.fetchGit {
+  url = "ssh://git@github.com/nixos/nix.git";
+  ref = "master";
+}</programlisting>
+      </example>
     </listitem>
   </varlistentry>
 
@@ -468,7 +570,8 @@ stdenv.mkDerivation {
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.foldl’</function>
+  <varlistentry xml:id='builtin-foldl-prime'>
+    <term><function>builtins.foldl’</function>
     <replaceable>op</replaceable> <replaceable>nul</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Reduce a list by applying a binary operator, from
@@ -481,7 +584,8 @@ stdenv.mkDerivation {
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.functionArgs</function>
+  <varlistentry xml:id='builtin-functionArgs'>
+    <term><function>builtins.functionArgs</function>
     <replaceable>f</replaceable></term>
 
     <listitem><para>
@@ -499,7 +603,8 @@ stdenv.mkDerivation {
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.fromJSON</function> <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-fromJSON'>
+    <term><function>builtins.fromJSON</function> <replaceable>e</replaceable></term>
 
     <listitem><para>Convert a JSON string to a Nix
     value. For example,
@@ -514,8 +619,9 @@ builtins.fromJSON ''{"x": [1, 2, 3], "y": null}''
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.genList</function>
-  <replaceable>generator</replaceable> <replaceable>length</replaceable></term>
+  <varlistentry xml:id='builtin-genList'>
+    <term><function>builtins.genList</function>
+    <replaceable>generator</replaceable> <replaceable>length</replaceable></term>
 
     <listitem><para>Generate list of size
     <replaceable>length</replaceable>, with each element
@@ -532,8 +638,9 @@ builtins.genList (x: x * x) 5
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.getAttr</function>
-  <replaceable>s</replaceable> <replaceable>set</replaceable></term>
+  <varlistentry xml:id='builtin-getAttr'>
+    <term><function>builtins.getAttr</function>
+    <replaceable>s</replaceable> <replaceable>set</replaceable></term>
 
     <listitem><para><function>getAttr</function> returns the attribute
     named <replaceable>s</replaceable> from
@@ -545,8 +652,9 @@ builtins.genList (x: x * x) 5
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.getEnv</function>
-  <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-getEnv'>
+    <term><function>builtins.getEnv</function>
+    <replaceable>s</replaceable></term>
 
     <listitem><para><function>getEnv</function> returns the value of
     the environment variable <replaceable>s</replaceable>, or an empty
@@ -563,8 +671,9 @@ builtins.genList (x: x * x) 5
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.hasAttr</function>
-  <replaceable>s</replaceable> <replaceable>set</replaceable></term>
+  <varlistentry xml:id='builtin-hasAttr'>
+    <term><function>builtins.hasAttr</function>
+    <replaceable>s</replaceable> <replaceable>set</replaceable></term>
 
     <listitem><para><function>hasAttr</function> returns
     <literal>true</literal> if <replaceable>set</replaceable> has an
@@ -577,8 +686,9 @@ builtins.genList (x: x * x) 5
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.hashString</function>
-  <replaceable>type</replaceable> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-hashString'>
+    <term><function>builtins.hashString</function>
+    <replaceable>type</replaceable> <replaceable>s</replaceable></term>
 
     <listitem><para>Return a base-16 representation of the
     cryptographic hash of string <replaceable>s</replaceable>.  The
@@ -589,8 +699,9 @@ builtins.genList (x: x * x) 5
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.head</function>
-  <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-head'>
+    <term><function>builtins.head</function>
+    <replaceable>list</replaceable></term>
 
     <listitem><para>Return the first element of a list; abort
     evaluation if the argument isn’t a list or is an empty list.  You
@@ -600,8 +711,9 @@ builtins.genList (x: x * x) 5
   </varlistentry>
 
 
-  <varlistentry><term><function>import</function>
-  <replaceable>path</replaceable></term>
+  <varlistentry xml:id='builtin-import'>
+    <term><function>import</function>
+    <replaceable>path</replaceable></term>
 
     <listitem><para>Load, parse and return the Nix expression in the
     file <replaceable>path</replaceable>.  If <replaceable>path
@@ -655,8 +767,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.intersectAttrs</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-intersectAttrs'>
+    <term><function>builtins.intersectAttrs</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return a set consisting of the attributes in the
     set <replaceable>e2</replaceable> that also exist in the set
@@ -665,8 +778,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isAttrs</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isAttrs'>
+    <term><function>builtins.isAttrs</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to a set, and
@@ -675,8 +789,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isList</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isList'>
+    <term><function>builtins.isList</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to a list, and
@@ -685,7 +800,7 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isFunction</function>
+  <varlistentry xml:id='builtin-isFunction'><term><function>builtins.isFunction</function>
   <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
@@ -695,8 +810,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isString</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isString'>
+    <term><function>builtins.isString</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to a string, and
@@ -705,8 +821,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isInt</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isInt'>
+    <term><function>builtins.isInt</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to an int, and
@@ -715,8 +832,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isFloat</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isFloat'>
+    <term><function>builtins.isFloat</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to a float, and
@@ -725,8 +843,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.isBool</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isBool'>
+    <term><function>builtins.isBool</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to a bool, and
@@ -735,8 +854,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>isNull</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-isNull'>
+    <term><function>isNull</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if
     <replaceable>e</replaceable> evaluates to <literal>null</literal>,
@@ -750,8 +870,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.length</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-length'>
+    <term><function>builtins.length</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return the length of the list
     <replaceable>e</replaceable>.</para></listitem>
@@ -759,8 +880,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.lessThan</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-lessThan'>
+    <term><function>builtins.lessThan</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if the number
     <replaceable>e1</replaceable> is less than the number
@@ -772,8 +894,9 @@ x: x + 456</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.listToAttrs</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-listToAttrs'>
+    <term><function>builtins.listToAttrs</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Construct a set from a list specifying the names
     and values of each attribute.  Each element of the list should be
@@ -799,8 +922,9 @@ builtins.listToAttrs
 
   </varlistentry>
 
-  <varlistentry><term><function>map</function>
-  <replaceable>f</replaceable> <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-map'>
+    <term><function>map</function>
+    <replaceable>f</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Apply the function <replaceable>f</replaceable> to
     each element in the list <replaceable>list</replaceable>.  For
@@ -815,14 +939,15 @@ map (x: "foo" + x) [ "bar" "bla" "abc" ]</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.match</function>
-  <replaceable>regex</replaceable> <replaceable>str</replaceable></term>
+  <varlistentry xml:id='builtin-match'>
+    <term><function>builtins.match</function>
+    <replaceable>regex</replaceable> <replaceable>str</replaceable></term>
 
-  <listitem><para>Returns a list if the <link
-  xlink:href="http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html#tag_09_04">extended
-  POSIX regular expression</link> <replaceable>regex</replaceable>
-  matches <replaceable>str</replaceable> precisely, otherwise returns
-  <literal>null</literal>.  Each item in the list is a regex group.
+    <listitem><para>Returns a list if the <link
+    xlink:href="http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html#tag_09_04">extended
+    POSIX regular expression</link> <replaceable>regex</replaceable>
+    matches <replaceable>str</replaceable> precisely, otherwise returns
+    <literal>null</literal>.  Each item in the list is a regex group.
 
 <programlisting>
 builtins.match "ab" "abc"
@@ -848,11 +973,12 @@ builtins.match "[[:space:]]+([[:upper:]]+)[[:space:]]+" "  FOO   "
 
 Evaluates to <literal>[ "foo" ]</literal>.
 
-  </para></listitem>
+    </para></listitem>
   </varlistentry>
 
-  <varlistentry><term><function>builtins.mul</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-mul'>
+    <term><function>builtins.mul</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return the product of the numbers
     <replaceable>e1</replaceable> and
@@ -861,8 +987,9 @@ Evaluates to <literal>[ "foo" ]</literal>.
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.parseDrvName</function>
-  <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-parseDrvName'>
+    <term><function>builtins.parseDrvName</function>
+    <replaceable>s</replaceable></term>
 
     <listitem><para>Split the string <replaceable>s</replaceable> into
     a package name and version.  The package name is everything up to
@@ -875,7 +1002,7 @@ Evaluates to <literal>[ "foo" ]</literal>.
 
   </varlistentry>
 
-  <varlistentry>
+  <varlistentry xml:id='builtin-path'>
     <term>
       <function>builtins.path</function>
       <replaceable>args</replaceable>
@@ -945,8 +1072,9 @@ Evaluates to <literal>[ "foo" ]</literal>.
     </listitem>
   </varlistentry>
 
-  <varlistentry><term><function>builtins.pathExists</function>
-  <replaceable>path</replaceable></term>
+  <varlistentry xml:id='builtin-pathExists'>
+    <term><function>builtins.pathExists</function>
+    <replaceable>path</replaceable></term>
 
     <listitem><para>Return <literal>true</literal> if the path
     <replaceable>path</replaceable> exists, and
@@ -969,8 +1097,9 @@ in config.someSetting</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.readDir</function>
-  <replaceable>path</replaceable></term>
+  <varlistentry xml:id='builtin-readDir'>
+    <term><function>builtins.readDir</function>
+    <replaceable>path</replaceable></term>
 
     <listitem><para>Return the contents of the directory
     <replaceable>path</replaceable> as a set mapping directory entries
@@ -991,8 +1120,9 @@ in config.someSetting</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.readFile</function>
-  <replaceable>path</replaceable></term>
+  <varlistentry xml:id='builtin-readFile'>
+    <term><function>builtins.readFile</function>
+    <replaceable>path</replaceable></term>
 
     <listitem><para>Return the contents of the file
     <replaceable>path</replaceable> as a string.</para></listitem>
@@ -1000,8 +1130,9 @@ in config.someSetting</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>removeAttrs</function>
-  <replaceable>set</replaceable> <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-removeAttrs'>
+    <term><function>removeAttrs</function>
+    <replaceable>set</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Remove the attributes listed in
     <replaceable>list</replaceable> from
@@ -1016,8 +1147,9 @@ removeAttrs { x = 1; y = 2; z = 3; } [ "a" "x" "z" ]</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.replaceStrings</function>
-  <replaceable>from</replaceable> <replaceable>to</replaceable> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-replaceStrings'>
+    <term><function>builtins.replaceStrings</function>
+    <replaceable>from</replaceable> <replaceable>to</replaceable> <replaceable>s</replaceable></term>
 
     <listitem><para>Given string <replaceable>s</replaceable>, replace
     every occurrence of the strings in <replaceable>from</replaceable>
@@ -1033,8 +1165,9 @@ builtins.replaceStrings ["oo" "a"] ["a" "i"] "foobar"
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.seq</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-seq'>
+    <term><function>builtins.seq</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Evaluate <replaceable>e1</replaceable>, then
     evaluate and return <replaceable>e2</replaceable>. This ensures
@@ -1044,8 +1177,9 @@ builtins.replaceStrings ["oo" "a"] ["a" "i"] "foobar"
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.sort</function>
-  <replaceable>comparator</replaceable> <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-sort'>
+    <term><function>builtins.sort</function>
+    <replaceable>comparator</replaceable> <replaceable>list</replaceable></term>
 
     <listitem><para>Return <replaceable>list</replaceable> in sorted
     order. It repeatedly calls the function
@@ -1067,15 +1201,16 @@ builtins.sort builtins.lessThan [ 483 249 526 147 42 77 ]
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.split</function>
-  <replaceable>regex</replaceable> <replaceable>str</replaceable></term>
+  <varlistentry xml:id='builtin-split'>
+    <term><function>builtins.split</function>
+    <replaceable>regex</replaceable> <replaceable>str</replaceable></term>
 
-  <listitem><para>Returns a list composed of non matched strings interleaved
-  with the lists of the <link
-  xlink:href="http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html#tag_09_04">extended
-  POSIX regular expression</link> <replaceable>regex</replaceable> matches
-  of <replaceable>str</replaceable>. Each item in the lists of matched
-  sequences is a regex group.
+    <listitem><para>Returns a list composed of non matched strings interleaved
+    with the lists of the <link
+    xlink:href="http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html#tag_09_04">extended
+    POSIX regular expression</link> <replaceable>regex</replaceable> matches
+    of <replaceable>str</replaceable>. Each item in the lists of matched
+    sequences is a regex group.
 
 <programlisting>
 builtins.split "(a)b" "abc"
@@ -1101,11 +1236,12 @@ builtins.split "([[:upper:]]+)" "  FOO   "
 
 Evaluates to <literal>[ "  " [ "FOO" ] "   " ]</literal>.
 
-  </para></listitem>
+    </para></listitem>
   </varlistentry>
 
-  <varlistentry><term><function>builtins.stringLength</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-stringLength'>
+    <term><function>builtins.stringLength</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return the length of the string
     <replaceable>e</replaceable>.  If <replaceable>e</replaceable> is
@@ -1114,8 +1250,9 @@ Evaluates to <literal>[ "  " [ "FOO" ] "   " ]</literal>.
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.sub</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-sub'>
+    <term><function>builtins.sub</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Return the difference between the numbers
     <replaceable>e1</replaceable> and
@@ -1124,9 +1261,10 @@ Evaluates to <literal>[ "  " [ "FOO" ] "   " ]</literal>.
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.substring</function>
-  <replaceable>start</replaceable> <replaceable>len</replaceable>
-  <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-substring'>
+    <term><function>builtins.substring</function>
+    <replaceable>start</replaceable> <replaceable>len</replaceable>
+    <replaceable>s</replaceable></term>
 
     <listitem><para>Return the substring of
     <replaceable>s</replaceable> from character position
@@ -1149,8 +1287,9 @@ builtins.substring 0 3 "nixos"
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.tail</function>
-  <replaceable>list</replaceable></term>
+  <varlistentry xml:id='builtin-tail'>
+    <term><function>builtins.tail</function>
+    <replaceable>list</replaceable></term>
 
     <listitem><para>Return the second to last elements of a list;
     abort evaluation if the argument isn’t a list or is an empty
@@ -1159,8 +1298,9 @@ builtins.substring 0 3 "nixos"
   </varlistentry>
 
 
-  <varlistentry><term><function>throw</function>
-  <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-throw'>
+    <term><function>throw</function>
+    <replaceable>s</replaceable></term>
 
     <listitem><para>Throw an error message
     <replaceable>s</replaceable>.  This usually aborts Nix expression
@@ -1173,9 +1313,10 @@ builtins.substring 0 3 "nixos"
   </varlistentry>
 
 
-  <varlistentry
-  xml:id='builtin-toFile'><term><function>builtins.toFile</function>
-  <replaceable>name</replaceable> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-toFile'>
+    <term><function>builtins.toFile</function>
+    <replaceable>name</replaceable>
+    <replaceable>s</replaceable></term>
 
     <listitem><para>Store the string <replaceable>s</replaceable> in a
     file in the Nix store and return its path.  The file has suffix
@@ -1244,14 +1385,15 @@ in foo</programlisting>
     This is not allowed because it would cause a cyclic dependency in
     the computation of the cryptographic hashes for
     <varname>foo</varname> and <varname>bar</varname>.</para>
-    <para>It is also not possible to reference the result of a derivation. 
-    If you are using Nixpkgs, the <literal>writeTextFile</literal> function is able to 
+    <para>It is also not possible to reference the result of a derivation.
+    If you are using Nixpkgs, the <literal>writeTextFile</literal> function is able to
     do that.</para></listitem>
 
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.toJSON</function> <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-toJSON'>
+    <term><function>builtins.toJSON</function> <replaceable>e</replaceable></term>
 
     <listitem><para>Return a string containing a JSON representation
     of <replaceable>e</replaceable>.  Strings, integers, floats, booleans,
@@ -1264,7 +1406,8 @@ in foo</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.toPath</function> <replaceable>s</replaceable></term>
+  <varlistentry xml:id='builtin-toPath'>
+    <term><function>builtins.toPath</function> <replaceable>s</replaceable></term>
 
     <listitem><para>Convert the string value
     <replaceable>s</replaceable> into a path value.  The string
@@ -1277,7 +1420,8 @@ in foo</programlisting>
   </varlistentry>
 
 
-  <varlistentry><term><function>toString</function> <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-toString'>
+    <term><function>toString</function> <replaceable>e</replaceable></term>
 
     <listitem><para>Convert the expression
     <replaceable>e</replaceable> to a string.
@@ -1296,7 +1440,8 @@ in foo</programlisting>
   </varlistentry>
 
 
-  <varlistentry xml:id='builtin-toXML'><term><function>builtins.toXML</function> <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-toXML'>
+    <term><function>builtins.toXML</function> <replaceable>e</replaceable></term>
 
     <listitem><para>Return a string containing an XML representation
     of <replaceable>e</replaceable>.  The main application for
@@ -1411,8 +1556,9 @@ stdenv.mkDerivation (rec {
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.trace</function>
-  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+  <varlistentry xml:id='builtin-trace'>
+    <term><function>builtins.trace</function>
+    <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
 
     <listitem><para>Evaluate <replaceable>e1</replaceable> and print its
     abstract syntax representation on standard error.  Then return
@@ -1421,8 +1567,9 @@ stdenv.mkDerivation (rec {
 
   </varlistentry>
 
-  <varlistentry><term><function>builtins.tryEval</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-tryEval'>
+    <term><function>builtins.tryEval</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Try to evaluate <replaceable>e</replaceable>.
     Return a set containing the attributes <literal>success</literal>
@@ -1435,8 +1582,9 @@ stdenv.mkDerivation (rec {
   </varlistentry>
 
 
-  <varlistentry><term><function>builtins.typeOf</function>
-  <replaceable>e</replaceable></term>
+  <varlistentry xml:id='builtin-typeOf'>
+    <term><function>builtins.typeOf</function>
+    <replaceable>e</replaceable></term>
 
     <listitem><para>Return a string representing the type of the value
     <replaceable>e</replaceable>, namely <literal>"int"</literal>,
diff --git a/doc/manual/installation/env-variables.xml b/doc/manual/installation/env-variables.xml
index 91ecd114f6d4..d1ee0bb2e096 100644
--- a/doc/manual/installation/env-variables.xml
+++ b/doc/manual/installation/env-variables.xml
@@ -39,7 +39,7 @@ bundle.</para>
   <step><para>Set the environment variable and install Nix</para>
     <screen>
 $ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
-$ curl https://nixos.org/nix/install | sh
+$ sh &lt;(curl https://nixos.org/nix/install)
 </screen></step>
 
   <step><para>In the shell profile and rc files (for example,
diff --git a/doc/manual/installation/installing-binary.xml b/doc/manual/installation/installing-binary.xml
index 7e8dfb0db3d4..394d8053b942 100644
--- a/doc/manual/installation/installing-binary.xml
+++ b/doc/manual/installation/installing-binary.xml
@@ -6,13 +6,30 @@
 
 <title>Installing a Binary Distribution</title>
 
-<para>If you are using Linux or macOS, the easiest way to install
-Nix is to run the following command:
+<para>If you are using Linux or macOS, the easiest way to install Nix
+is to run the following command:
 
 <screen>
-$ bash &lt;(curl https://nixos.org/nix/install)
+  $ sh &lt;(curl https://nixos.org/nix/install)
 </screen>
 
+As of Nix 2.1.0, the Nix installer will always default to creating a
+single-user installation, however opting in to the multi-user
+installation is highly recommended.
+</para>
+
+<section xml:id="sect-single-user-installation">
+  <title>Single User Installation</title>
+
+  <para>
+    To explicitly select a single-user installation on your system:
+
+    <screen>
+  sh &lt;(curl https://nixos.org/nix/install) --no-daemon
+</screen>
+  </para>
+
+<para>
 This will perform a single-user installation of Nix, meaning that
 <filename>/nix</filename> is owned by the invoking user.  You should
 run this under your usual user account, <emphasis>not</emphasis> as
@@ -33,58 +50,141 @@ and <filename>.profile</filename> to source
 the <command>NIX_INSTALLER_NO_MODIFY_PROFILE</command> environment
 variable before executing the install script to disable this
 behaviour.
-
 </para>
 
-<!--
-<para>You can also manually download and install a binary package.
-Binary packages of the latest stable release are available for Fedora,
-Debian, Ubuntu, macOS and various other systems from the <link
-xlink:href="http://nixos.org/nix/download.html">Nix homepage</link>.
-You can also get builds of the latest development release from our
-<link
-xlink:href="http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents">continuous
-build system</link>.</para>
 
-<para>For Fedora, RPM packages are available.  These can be installed
-or upgraded using <command>rpm -U</command>.  For example,
+<para>You can uninstall Nix simply by running:
 
 <screen>
-$ rpm -U nix-1.8-1.i386.rpm</screen>
+$ rm -rf /nix
+</screen>
 
 </para>
-
-<para>For Debian and Ubuntu, you can download a Deb package and
-install it like this:
+</section>
+
+<section xml:id="sect-multi-user-installation">
+  <title>Multi User Installation</title>
+  <para>
+    The multi-user Nix installation creates system users, and a system
+    service for the Nix daemon.
+  </para>
+
+  <itemizedlist>
+    <title>Supported Systems</title>
+
+    <listitem>
+      <para>Linux running systemd, with SELinux disabled</para>
+    </listitem>
+    <listitem><para>macOS</para></listitem>
+  </itemizedlist>
+
+  <para>
+    You can instruct the installer to perform a multi-user
+    installation on your system:
+
+    <screen>
+  sh &lt;(curl https://nixos.org/nix/install) --daemon
+</screen>
+  </para>
+
+  <para>
+    The multi-user installation of Nix will create build users between
+    the user IDs 30001 and 30032, and a group with the group ID 30000.
+
+    You should run this under your usual user account,
+    <emphasis>not</emphasis> as root. The script will invoke
+    <command>sudo</command> as needed.
+  </para>
+
+  <note><para>
+    If you need Nix to use a different group ID or user ID set, you
+    will have to download the tarball manually and <link
+    linkend="sect-nix-install-binary-tarball">edit the install
+    script</link>.
+  </para></note>
+
+  <para>
+    The installer will modify <filename>/etc/bashrc</filename>, and
+    <filename>/etc/zshrc</filename> if they exist. The installer will
+    first back up these files with a
+    <literal>.backup-before-nix</literal> extension. The installer
+    will also create <filename>/etc/profile.d/nix.sh</filename>.
+  </para>
+
+  <para>You can uninstall Nix with the following commands:
 
 <screen>
-$ dpkg -i nix_1.8-1_amd64.deb</screen>
+sudo rm -rf /etc/profile/nix.sh /etc/nix /nix ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
+
+# If you are on Linux with systemd, you will need to run:
+sudo systemctl stop nix-daemon.socket
+sudo systemctl stop nix-daemon.service
+sudo systemctl disable nix-daemon.socket
+sudo systemctl disable nix-daemon.service
+sudo systemctl daemon-reload
+
+# If you are on macOS, you will need to run:
+sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist
+sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
+</screen>
 
-</para>
--->
+    There may also be references to Nix in
+    <filename>/etc/profile</filename>,
+    <filename>/etc/bashrc</filename>, and
+    <filename>/etc/zshrc</filename> which you may remove.
+  </para>
 
-<para>You can also download a binary tarball that contains Nix and all
-its dependencies.  (This is what the install script at
-<uri>https://nixos.org/nix/install</uri> does automatically.)  You
-should unpack it somewhere (e.g. in <filename>/tmp</filename>), and
-then run the script named <command>install</command> inside the binary
-tarball:
+</section>
 
-<screen>
-alice$ cd /tmp
-alice$ tar xfj nix-1.8-x86_64-darwin.tar.bz2
-alice$ cd nix-1.8-x86_64-darwin
-alice$ ./install
-</screen>
+<section xml:id="sect-nix-install-pinned-version-url">
+  <title>Installing a pinned Nix version from a URL</title>
 
-</para>
+  <para>
+    NixOS.org hosts version-specific installation URLs for all Nix
+    versions since 1.11.16, at
+    <literal>https://nixos.org/releases/nix/nix-VERSION/install</literal>.
+  </para>
 
-<para>You can uninstall Nix simply by running:
+  <para>
+    These install scripts can be used the same as the main
+  NixOS.org installation script:
 
-<screen>
-$ rm -rf /nix
+  <screen>
+  sh &lt;(curl https://nixos.org/nix/install)
 </screen>
+  </para>
 
-</para>
+  <para>
+    In the same directory of the install script are sha256 sums, and
+    gpg signature files.
+  </para>
+</section>
+
+<section xml:id="sect-nix-install-binary-tarball">
+  <title>Installing from a binary tarball</title>
 
+  <para>
+    You can also download a binary tarball that contains Nix and all
+    its dependencies.  (This is what the install script at
+    <uri>https://nixos.org/nix/install</uri> does automatically.)  You
+    should unpack it somewhere (e.g. in <filename>/tmp</filename>),
+    and then run the script named <command>install</command> inside
+    the binary tarball:
+
+
+<screen>
+alice$ cd /tmp
+alice$ tar xfj nix-1.8-x86_64-darwin.tar.bz2
+alice$ cd nix-1.8-x86_64-darwin
+alice$ ./install
+</screen>
+  </para>
+
+  <para>
+    If you need to edit the multi-user installation script to use
+    different group ID or a different user ID range, modify the
+    variables set in the file named
+    <filename>install-multi-user</filename>.
+  </para>
+</section>
 </chapter>
diff --git a/doc/manual/installation/prerequisites-source.xml b/doc/manual/installation/prerequisites-source.xml
index 01e9688d635f..ef14a1d753db 100644
--- a/doc/manual/installation/prerequisites-source.xml
+++ b/doc/manual/installation/prerequisites-source.xml
@@ -29,7 +29,8 @@
   <literal>libbz2</literal> library.  Thus you must have bzip2
   installed, including development headers and libraries.  If your
   distribution does not provide these, you can obtain bzip2 from <link
-  xlink:href="http://www.bzip.org/"/>.</para></listitem>
+  xlink:href="https://web.archive.org/web/20180624184756/http://www.bzip.org/"
+  />.</para></listitem>
 
   <listitem><para><literal>liblzma</literal>, which is provided by
   XZ Utils. If your distribution does not provide this, you can
diff --git a/doc/manual/installation/supported-platforms.xml b/doc/manual/installation/supported-platforms.xml
index 6858573ff407..3e74be49d1f7 100644
--- a/doc/manual/installation/supported-platforms.xml
+++ b/doc/manual/installation/supported-platforms.xml
@@ -10,7 +10,7 @@
 
 <itemizedlist>
 
-  <listitem><para>Linux (i686, x86_64).</para></listitem>
+  <listitem><para>Linux (i686, x86_64, aarch64).</para></listitem>
 
   <listitem><para>macOS (x86_64).</para></listitem>
 
diff --git a/doc/manual/installation/upgrading.xml b/doc/manual/installation/upgrading.xml
new file mode 100644
index 000000000000..30670d7fec9c
--- /dev/null
+++ b/doc/manual/installation/upgrading.xml
@@ -0,0 +1,22 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+      xmlns:xlink="http://www.w3.org/1999/xlink"
+      xmlns:xi="http://www.w3.org/2001/XInclude"
+      version="5.0"
+      xml:id="ch-upgrading-nix">
+
+  <title>Upgrading Nix</title>
+
+  <para>
+    Multi-user Nix users on macOS can upgrade Nix by running:
+    <command>sudo -i sh -c 'nix-channel --update &amp;&amp;
+    nix-env -iA nixpkgs.nix &amp;&amp;
+    launchctl remove org.nixos.nix-daemon &amp;&amp;
+    launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist'</command>
+  </para>
+
+
+  <para>
+    Single-user installations of Nix should run this:
+    <command>nix-channel --update; nix-env -iA nixpkgs.nix</command>
+  </para>
+</chapter>
diff --git a/doc/manual/manual.xml b/doc/manual/manual.xml
index b408b6817727..87d9de28ab14 100644
--- a/doc/manual/manual.xml
+++ b/doc/manual/manual.xml
@@ -32,6 +32,7 @@
 
   <xi:include href="introduction/introduction.xml" />
   <xi:include href="installation/installation.xml" />
+  <xi:include href="installation/upgrading.xml" />
   <xi:include href="packages/package-management.xml" />
   <xi:include href="expressions/writing-nix-expressions.xml" />
   <xi:include href="advanced-topics/advanced-topics.xml" />
diff --git a/doc/manual/packages/s3-substituter.xml b/doc/manual/packages/s3-substituter.xml
index bcd91cfdbccd..ea654392c6b1 100644
--- a/doc/manual/packages/s3-substituter.xml
+++ b/doc/manual/packages/s3-substituter.xml
@@ -12,8 +12,49 @@ from Amazon S3 and S3 compatible services. This uses the same
 <emphasis>binary</emphasis> cache mechanism that Nix usually uses to
 fetch prebuilt binaries from <uri>cache.nixos.org</uri>.</para>
 
+<para>The following options can be specified as URL parameters to
+the S3 URL:</para>
+
+<variablelist>
+  <varlistentry><term><literal>profile</literal></term>
+  <listitem>
+    <para>
+      The name of the AWS configuration profile to use. By default
+      Nix will use the <literal>default</literal> profile.
+    </para>
+  </listitem>
+  </varlistentry>
+
+  <varlistentry><term><literal>region</literal></term>
+  <listitem>
+    <para>
+      The region of the S3 bucket. <literal>us–east-1</literal> by
+      default.
+    </para>
+
+    <para>
+      If your bucket is not in <literal>us–east-1</literal>, you
+      should always explicitly specify the region parameter.
+    </para>
+  </listitem>
+  </varlistentry>
+
+  <varlistentry><term><literal>endpoint</literal></term>
+  <listitem>
+    <para>
+      The URL to your S3-compatible service, for when not using
+      Amazon S3. Do not specify this value if you're using Amazon
+      S3.
+    </para>
+    <note><para>This endpoint must support HTTPS and will use
+    path-based addressing instead of virtual host based
+    addressing.</para></note>
+  </listitem>
+  </varlistentry>
+</variablelist>
+
 <para>In this example we will use the bucket named
-<literal>example-bucket</literal>.</para>
+<literal>example-nix-cache</literal>.</para>
 
 <section xml:id="ssec-s3-substituter-anonymous-reads">
   <title>Anonymous Reads to your S3-compatible binary cache</title>
@@ -24,65 +65,56 @@ fetch prebuilt binaries from <uri>cache.nixos.org</uri>.</para>
   cache.</para>
 
   <para>For AWS S3 the binary cache URL for example bucket will be
-  exactly <uri>https://example-bucket.s3.amazonaws.com</uri>. For S3
-  compatible binary caches ago have to consult your software's
-  documentation.</para>
+  exactly <uri>https://example-nix-cache.s3.amazonaws.com</uri> or
+  <uri>s3://example-nix-cache</uri>. For S3 compatible binary caches,
+  consult that cache's documentation.</para>
 
   <para>Your bucket will need the following bucket policy:</para>
 
-  <programlisting>
-<![CDATA[
+  <programlisting><![CDATA[
 {
-  "Id": "DirectReads",
-  "Version": "2012-10-17",
-  "Statement": [
-    {
-      "Sid": "AlowDirectReads",
-      "Action": [
-        "s3:GetObject"
-      ],
-      "Effect": "Allow",
-      "Resource": "arn:aws:s3:::example-bucket/*",
-      "Principal": "*"
-    }
-  ]
+    "Id": "DirectReads",
+    "Version": "2012-10-17",
+    "Statement": [
+        {
+            "Sid": "AlowDirectReads",
+            "Action": [
+                "s3:GetObject",
+                "s3:GetBucketLocation"
+            ],
+            "Effect": "Allow",
+            "Resource": [
+                "arn:aws:s3:::example-nix-cache",
+                "arn:aws:s3:::example-nix-cache/*"
+            ],
+            "Principal": "*"
+        }
+    ]
 }
-]]>
-</programlisting>
+]]></programlisting>
 </section>
 
 <section xml:id="ssec-s3-substituter-authenticated-reads">
   <title>Authenticated Reads to your S3 binary cache</title>
 
   <para>For AWS S3 the binary cache URL for example bucket will be
-  exactly <uri>s3://example-bucket</uri>.</para>
+  exactly <uri>s3://example-nix-cache</uri>.</para>
 
   <para>Nix will use the <link
   xlink:href="https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default.">default
   credential provider chain</link> for authenticating requests to
   Amazon S3.</para>
 
-  <para>Nix supports authenticated writes to S3 compatible binary
-  caches but only supports Authenticated reads from Amazon S3.
-  Additionally, the following limitations are in place for
-  authenticated reads:</para>
-
-  <itemizedlist>
-    <listitem><para>The bucket must actually be hosted by Amazon S3 and
-    <emphasis>not</emphasis> an S3 compatible
-    service.</para></listitem>
-
-    <listitem><para>The bucket must be within the
-    <literal>us-east-1</literal> region.</para></listitem>
-
-    <listitem><para>The Amazon credentials, if stored in a credential
-    profile, must be stored in the <literal>default</literal>
-    profile.</para></listitem>
-  </itemizedlist>
+  <para>Nix supports authenticated reads from Amazon S3 and S3
+  compatible binary caches.</para>
 
   <para>Your bucket will need a bucket policy allowing the desired
-  users to perform the <literal>s3:GetObject</literal> action on all
-  objects in the bucket.</para>
+  users to perform the <literal>s3:GetObject</literal> and
+  <literal>s3:GetBucketLocation</literal> action on all objects in the
+  bucket. The anonymous policy in <xref
+  linkend="ssec-s3-substituter-anonymous-reads" /> can be updated to
+  have a restricted <literal>Principal</literal> to support
+  this.</para>
 </section>
 
 
@@ -91,69 +123,49 @@ fetch prebuilt binaries from <uri>cache.nixos.org</uri>.</para>
 
   <para>Nix support fully supports writing to Amazon S3 and S3
   compatible buckets. The binary cache URL for our example bucket will
-  be <uri>s3://example-bucket</uri>.</para>
+  be <uri>s3://example-nix-cache</uri>.</para>
 
   <para>Nix will use the <link
   xlink:href="https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default.">default
   credential provider chain</link> for authenticating requests to
   Amazon S3.</para>
 
-  <para>The following options can be specified as URL parameters to
-  the S3 URL:</para>
-  <variablelist>
-    <varlistentry><term><literal>profile</literal></term>
-    <listitem>
-      <para>
-        The name of the AWS configuration profile to use. By default
-        Nix will use the <literal>default</literal> profile.
-      </para>
-    </listitem>
-    </varlistentry>
-
-    <varlistentry><term><literal>region</literal></term>
-    <listitem>
-      <para>
-        The region of the S3 bucket. <literal>us–east-1</literal> by
-        default.
-      </para>
-    </listitem>
-    </varlistentry>
-
-    <varlistentry><term><literal>endpoint</literal></term>
-    <listitem>
-      <para>
-        The URL to your S3-compatible service, for when not using
-        Amazon S3. Do not specify this value if you're using Amazon
-        S3.
-      </para>
-      <note><para>This endpoint must support HTTPS and will use
-      path-based addressing instead of virtual host based
-      addressing.</para></note>
-    </listitem>
-    </varlistentry>
-  </variablelist>
-
-  <example><title>Uploading with non-default credential profile for Amazon S3</title>
-    <para><command>nix copy --to ssh://machine nixpkgs.hello s3://example-bucket?profile=cache-upload</command></para>
+  <para>Your account will need the following IAM policy to
+  upload to the cache:</para>
+
+  <programlisting><![CDATA[
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Sid": "UploadToCache",
+      "Effect": "Allow",
+      "Action": [
+        "s3:AbortMultipartUpload",
+        "s3:GetBucketLocation",
+        "s3:GetObject",
+        "s3:ListBucket",
+        "s3:ListBucketMultipartUploads",
+        "s3:ListMultipartUploadParts",
+        "s3:ListObjects",
+        "s3:PutObject"
+      ],
+      "Resource": [
+        "arn:aws:s3:::example-nix-cache",
+        "arn:aws:s3:::example-nix-cache/*"
+      ]
+    }
+  ]
+}
+]]></programlisting>
+
+
+  <example><title>Uploading with a specific credential profile for Amazon S3</title>
+    <para><command>nix copy --to 's3://example-nix-cache?profile=cache-upload&amp;region=eu-west-2' nixpkgs.hello</command></para>
   </example>
 
   <example><title>Uploading to an S3-Compatible Binary Cache</title>
-    <para><command>nix copy --to ssh://machine nixpkgs.hello s3://example-bucket?profile=cache-upload&amp;endpoint=minio.example.com</command></para>
+    <para><command>nix copy --to 's3://example-nix-cache?profile=cache-upload&amp;endpoint=minio.example.com' nixpkgs.hello</command></para>
   </example>
-
-  <para>The user writing to the bucket will need to perform the
-  following actions against the bucket:</para>
-
-  <itemizedlist>
-    <listitem><para><literal>s3:ListBucket</literal></para></listitem>
-    <listitem><para><literal>s3:GetBucketLocation</literal></para></listitem>
-    <listitem><para><literal>s3:ListObjects</literal></para></listitem>
-    <listitem><para><literal>s3:GetObject</literal></para></listitem>
-    <listitem><para><literal>s3:PutObject</literal></para></listitem>
-    <listitem><para><literal>s3:ListBucketMultipartUploads</literal></para></listitem>
-    <listitem><para><literal>s3:CreateMultipartUpload</literal></para></listitem>
-    <listitem><para><literal>s3:ListMultipartUploadParts</literal></para></listitem>
-    <listitem><para><literal>s3:AbortMultipartUpload</literal></para></listitem>
-  </itemizedlist>
 </section>
 </section>
diff --git a/doc/manual/release-notes/release-notes.xml b/doc/manual/release-notes/release-notes.xml
index ff4085cb792d..e8ff586fa43f 100644
--- a/doc/manual/release-notes/release-notes.xml
+++ b/doc/manual/release-notes/release-notes.xml
@@ -12,6 +12,7 @@
 </partintro>
 -->
 
+<xi:include href="rl-2.2.xml" />
 <xi:include href="rl-2.1.xml" />
 <xi:include href="rl-2.0.xml" />
 <xi:include href="rl-1.11.10.xml" />
diff --git a/doc/manual/release-notes/rl-2.1.xml b/doc/manual/release-notes/rl-2.1.xml
index 9a5f37f6625d..16c243fc191a 100644
--- a/doc/manual/release-notes/rl-2.1.xml
+++ b/doc/manual/release-notes/rl-2.1.xml
@@ -4,7 +4,7 @@
       version="5.0"
       xml:id="ssec-relnotes-2.1">
 
-<title>Release 2.1 (2018-08-31)</title>
+<title>Release 2.1 (2018-09-02)</title>
 
 <para>This is primarily a bug fix release. It also reduces memory
 consumption in certain situations. In addition, it has the following
@@ -13,6 +13,29 @@ new features:</para>
 <itemizedlist>
 
   <listitem>
+    <para>The Nix installer will no longer default to the Multi-User
+    installation for macOS. You can still <link
+    linkend="sect-multi-user-installation">instruct the installer to
+    run in multi-user mode</link>.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>The Nix installer now supports performing a Multi-User
+    installation for Linux computers which are running systemd. You
+    can <link
+    linkend="sect-multi-user-installation">select a Multi-User installation</link> by passing the
+    <option>--daemon</option> flag to the installer: <command>sh &lt;(curl
+    https://nixos.org/nix/install) --daemon</command>.
+    </para>
+
+    <para>The multi-user installer cannot handle systems with SELinux.
+    If your system has SELinux enabled, you can <link
+    linkend="sect-single-user-installation">force the installer to run
+    in single-user mode</link>.</para>
+  </listitem>
+
+  <listitem>
     <para>New builtin functions:
     <literal>builtins.bitAnd</literal>,
     <literal>builtins.bitOr</literal>,
diff --git a/doc/manual/release-notes/rl-2.2.xml b/doc/manual/release-notes/rl-2.2.xml
new file mode 100644
index 000000000000..bc28a56c9401
--- /dev/null
+++ b/doc/manual/release-notes/rl-2.2.xml
@@ -0,0 +1,25 @@
+<section xmlns="http://docbook.org/ns/docbook"
+      xmlns:xlink="http://www.w3.org/1999/xlink"
+      xmlns:xi="http://www.w3.org/2001/XInclude"
+      version="5.0"
+      xml:id="ssec-relnotes-2.2">
+
+<title>Release 2.2 (201?-??-??)</title>
+
+<para>This release has the following changes:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>The derivation attribute
+    <varname>requiredSystemFeatures</varname> is now enforced for
+    local builds, and not just to route builds to remote builders.
+    The supported features of a machine can be specified through the
+    configuration setting <varname>system-features</varname>.
+    </para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
diff --git a/local.mk b/local.mk
index 5d7e0fb2e428..4b380176f2ec 100644
--- a/local.mk
+++ b/local.mk
@@ -6,7 +6,7 @@ dist-files += configure config.h.in nix.spec perl/configure
 
 clean-files += Makefile.config
 
-GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr
+GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr -I src/nix
 
 $(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \
   $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644)))
diff --git a/release.nix b/release.nix
index e359ebcb2ce3..415c87da7cb3 100644
--- a/release.nix
+++ b/release.nix
@@ -1,5 +1,5 @@
 { nix ? builtins.fetchGit ./.
-, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.03"; }
+, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.09"; }
 , officialRelease ? false
 , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
 }:
diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh
index cd71d7947d77..ab20774bbf03 100644
--- a/scripts/install-nix-from-closure.sh
+++ b/scripts/install-nix-from-closure.sh
@@ -30,15 +30,14 @@ if [ "$(uname -s)" = "Darwin" ]; then
     fi
 fi
 
-# Determine if we should punt to the single-user installer or not
+# Determine if we could use the multi-user installer or not
 if [ "$(uname -s)" = "Darwin" ]; then
-    INSTALL_MODE=daemon
+    echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
 elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then
-    INSTALL_MODE=daemon
-else
-    INSTALL_MODE=no-daemon
+    echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
 fi
 
+INSTALL_MODE=no-daemon
 # Trivially handle the --daemon / --no-daemon options
 if [ "x${1:-}" = "x--no-daemon" ]; then
     INSTALL_MODE=no-daemon
@@ -47,14 +46,18 @@ elif [ "x${1:-}" = "x--daemon" ]; then
 elif [ "x${1:-}" != "x" ]; then
     (
         echo "Nix Installer [--daemon|--no-daemon]"
+
+        echo "Choose installation method."
         echo ""
-        echo " --daemon:    Force the installer to use the Daemon"
-        echo "              based installer, even though it may not"
-        echo "              work."
+        echo " --daemon:    Installs and configures a background daemon that manages the store,"
+        echo "              providing multi-user support and better isolation for local builds."
+        echo "              Both for security and reproducibility, this method is recommended if"
+        echo "              supported on your platform."
+        echo "              See https://nixos.org/nix/manual/#sect-multi-user-installation"
         echo ""
-        echo " --no-daemon: Force a no-daemon, single-user"
-        echo "              installation even when the preferred"
-        echo "              method is with the daemon."
+        echo " --no-daemon: Simple, single-user installation that does not require root and is"
+        echo "              trivial to uninstall."
+        echo "              (default)"
         echo ""
     ) >&2
     exit
diff --git a/scripts/install.in b/scripts/install.in
index 26ab85ba0992..7bff7b216d9e 100644
--- a/scripts/install.in
+++ b/scripts/install.in
@@ -11,14 +11,14 @@ oops() {
 }
 
 tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \
-          oops "Can\'t create temporary directory for downloading the Nix binary tarball")"
+          oops "Can't create temporary directory for downloading the Nix binary tarball")"
 cleanup() {
     rm -rf "$tmpDir"
 }
 trap cleanup EXIT INT QUIT TERM
 
 require_util() {
-    type "$1" > /dev/null 2>&1 || which "$1" > /dev/null 2>&1 ||
+    type "$1" > /dev/null 2>&1 || command -v "$1" > /dev/null 2>&1 ||
         oops "you do not have '$1' installed, which I need to $2"
 }
 
diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in
index 1be9a0755d85..87d9fe5061a7 100644
--- a/scripts/nix-profile-daemon.sh.in
+++ b/scripts/nix-profile-daemon.sh.in
@@ -68,4 +68,4 @@ elif [ -e "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt" ]; then #
 fi
 
 export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels"
-export PATH="$HOME/.nix-profile/bin:$HOME/.nix-profile/lib/kde4/libexec:@localstatedir@/nix/profiles/default/bin:@localstatedir@/nix/profiles/default:@localstatedir@/nix/profiles/default/lib/kde4/libexec:$PATH"
+export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 38dbe3e58b26..abf3669b5b35 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -17,6 +17,7 @@
 #include "store-api.hh"
 #include "derivations.hh"
 #include "local-store.hh"
+#include "legacy.hh"
 
 using namespace nix;
 using std::cin;
@@ -37,11 +38,9 @@ static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot)
     return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri), slot), true);
 }
 
-int main (int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-
+    {
         logger = makeJSONLogger(*logger);
 
         /* Ensure we don't get any SSH passphrase or host key popups. */
@@ -80,7 +79,7 @@ int main (int argc, char * * argv)
 
         if (machines.empty()) {
             std::cerr << "# decline-permanently\n";
-            return;
+            return 0;
         }
 
         string drvPath;
@@ -90,8 +89,8 @@ int main (int argc, char * * argv)
 
             try {
                 auto s = readString(source);
-                if (s != "try") return;
-            } catch (EndOfFile &) { return; }
+                if (s != "try") return 0;
+            } catch (EndOfFile &) { return 0; }
 
             auto amWilling = readInt(source);
             auto neededSystem = readString(source);
@@ -253,6 +252,8 @@ connected:
             copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs, NoSubstitute);
         }
 
-        return;
-    });
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("build-remote", _main);
diff --git a/src/build-remote/local.mk b/src/build-remote/local.mk
deleted file mode 100644
index 50b0409d1886..000000000000
--- a/src/build-remote/local.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-programs += build-remote
-
-build-remote_DIR := $(d)
-
-build-remote_INSTALL_DIR := $(libexecdir)/nix
-
-build-remote_LIBS = libmain libformat libstore libutil
-
-build-remote_SOURCES := $(d)/build-remote.cc
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index f41905787f9e..ab407e56907c 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -6,12 +6,15 @@
 #include "globals.hh"
 #include "eval-inline.hh"
 #include "download.hh"
+#include "json.hh"
 
 #include <algorithm>
 #include <cstring>
 #include <unistd.h>
 #include <sys/time.h>
 #include <sys/resource.h>
+#include <iostream>
+#include <fstream>
 
 #include <sys/time.h>
 #include <sys/resource.h>
@@ -23,7 +26,6 @@
 
 #endif
 
-
 namespace nix {
 
 
@@ -1723,12 +1725,9 @@ bool EvalState::eqValues(Value & v1, Value & v2)
     }
 }
 
-
 void EvalState::printStats()
 {
     bool showStats = getEnv("NIX_SHOW_STATS", "0") != "0";
-    Verbosity v = showStats ? lvlInfo : lvlDebug;
-    printMsg(v, "evaluation statistics:");
 
     struct rusage buf;
     getrusage(RUSAGE_SELF, &buf);
@@ -1739,62 +1738,101 @@ void EvalState::printStats()
     uint64_t bValues = nrValues * sizeof(Value);
     uint64_t bAttrsets = nrAttrsets * sizeof(Bindings) + nrAttrsInAttrsets * sizeof(Attr);
 
-    printMsg(v, format("  time elapsed: %1%") % cpuTime);
-    printMsg(v, format("  size of a value: %1%") % sizeof(Value));
-    printMsg(v, format("  size of an attr: %1%") % sizeof(Attr));
-    printMsg(v, format("  environments allocated count: %1%") % nrEnvs);
-    printMsg(v, format("  environments allocated bytes: %1%") % bEnvs);
-    printMsg(v, format("  list elements count: %1%") % nrListElems);
-    printMsg(v, format("  list elements bytes: %1%") % bLists);
-    printMsg(v, format("  list concatenations: %1%") % nrListConcats);
-    printMsg(v, format("  values allocated count: %1%") % nrValues);
-    printMsg(v, format("  values allocated bytes: %1%") % bValues);
-    printMsg(v, format("  sets allocated: %1% (%2% bytes)") % nrAttrsets % bAttrsets);
-    printMsg(v, format("  right-biased unions: %1%") % nrOpUpdates);
-    printMsg(v, format("  values copied in right-biased unions: %1%") % nrOpUpdateValuesCopied);
-    printMsg(v, format("  symbols in symbol table: %1%") % symbols.size());
-    printMsg(v, format("  size of symbol table: %1%") % symbols.totalSize());
-    printMsg(v, format("  number of thunks: %1%") % nrThunks);
-    printMsg(v, format("  number of thunks avoided: %1%") % nrAvoided);
-    printMsg(v, format("  number of attr lookups: %1%") % nrLookups);
-    printMsg(v, format("  number of primop calls: %1%") % nrPrimOpCalls);
-    printMsg(v, format("  number of function calls: %1%") % nrFunctionCalls);
-    printMsg(v, format("  total allocations: %1% bytes") % (bEnvs + bLists + bValues + bAttrsets));
-
 #if HAVE_BOEHMGC
     GC_word heapSize, totalBytes;
     GC_get_heap_usage_safe(&heapSize, 0, 0, 0, &totalBytes);
-    printMsg(v, format("  current Boehm heap size: %1% bytes") % heapSize);
-    printMsg(v, format("  total Boehm heap allocations: %1% bytes") % totalBytes);
 #endif
-
-    if (countCalls) {
-        v = lvlInfo;
-
-        printMsg(v, format("calls to %1% primops:") % primOpCalls.size());
-        typedef std::multimap<size_t, Symbol> PrimOpCalls_;
-        PrimOpCalls_ primOpCalls_;
-        for (auto & i : primOpCalls)
-            primOpCalls_.insert(std::pair<size_t, Symbol>(i.second, i.first));
-        for (auto i = primOpCalls_.rbegin(); i != primOpCalls_.rend(); ++i)
-            printMsg(v, format("%1$10d %2%") % i->first % i->second);
-
-        printMsg(v, format("calls to %1% functions:") % functionCalls.size());
-        typedef std::multimap<size_t, ExprLambda *> FunctionCalls_;
-        FunctionCalls_ functionCalls_;
-        for (auto & i : functionCalls)
-            functionCalls_.insert(std::pair<size_t, ExprLambda *>(i.second, i.first));
-        for (auto i = functionCalls_.rbegin(); i != functionCalls_.rend(); ++i)
-            printMsg(v, format("%1$10d %2%") % i->first % i->second->showNamePos());
-
-        printMsg(v, format("evaluations of %1% attributes:") % attrSelects.size());
-        typedef std::multimap<size_t, Pos> AttrSelects_;
-        AttrSelects_ attrSelects_;
-        for (auto & i : attrSelects)
-            attrSelects_.insert(std::pair<size_t, Pos>(i.second, i.first));
-        for (auto i = attrSelects_.rbegin(); i != attrSelects_.rend(); ++i)
-            printMsg(v, format("%1$10d %2%") % i->first % i->second);
-
+    if (showStats) {
+        auto outPath = getEnv("NIX_SHOW_STATS_PATH","-");
+        std::fstream fs;
+        if (outPath != "-")
+            fs.open(outPath, std::fstream::out);
+        JSONObject topObj(outPath == "-" ? std::cerr : fs, true);
+        topObj.attr("cpuTime",cpuTime);
+        {
+            auto envs = topObj.object("envs");
+            envs.attr("number", nrEnvs);
+            envs.attr("elements", nrValuesInEnvs);
+            envs.attr("bytes", bEnvs);
+        }
+        {
+            auto lists = topObj.object("list");
+            lists.attr("elements", nrListElems);
+            lists.attr("bytes", bLists);
+            lists.attr("concats", nrListConcats);
+        }
+        {
+            auto values = topObj.object("values");
+            values.attr("number", nrValues);
+            values.attr("bytes", bValues);
+        }
+        {
+            auto syms = topObj.object("symbols");
+            syms.attr("number", symbols.size());
+            syms.attr("bytes", symbols.totalSize());
+        }
+        {
+            auto sets = topObj.object("sets");
+            sets.attr("number", nrAttrsets);
+            sets.attr("bytes", bAttrsets);
+            sets.attr("elements", nrAttrsInAttrsets);
+        }
+        {
+            auto sizes = topObj.object("sizes");
+            sizes.attr("Env", sizeof(Env));
+            sizes.attr("Value", sizeof(Value));
+            sizes.attr("Bindings", sizeof(Bindings));
+            sizes.attr("Attr", sizeof(Attr));
+        }
+        topObj.attr("nrOpUpdates", nrOpUpdates);
+        topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied);
+        topObj.attr("nrThunks", nrThunks);
+        topObj.attr("nrAvoided", nrAvoided);
+        topObj.attr("nrLookups", nrLookups);
+        topObj.attr("nrPrimOpCalls", nrPrimOpCalls);
+        topObj.attr("nrFunctionCalls", nrFunctionCalls);
+#if HAVE_BOEHMGC
+        {
+            auto gc = topObj.object("gc");
+            gc.attr("heapSize", heapSize);
+            gc.attr("totalBytes", totalBytes);
+        }
+#endif
+        if (countCalls) {
+            {
+                auto obj = topObj.object("primops");
+                for (auto & i : primOpCalls)
+                    obj.attr(i.first, i.second);
+            }
+            {
+                auto list = topObj.list("functions");
+                for (auto & i : functionCalls) {
+                    auto obj = list.object();
+                    if (i.first->name.set())
+                        obj.attr("name", (const string &) i.first->name);
+                    else
+                        obj.attr("name", nullptr);
+                    if (i.first->pos) {
+                        obj.attr("file", (const string &) i.first->pos.file);
+                        obj.attr("line", i.first->pos.line);
+                        obj.attr("column", i.first->pos.column);
+                    }
+                    obj.attr("count", i.second);
+                }
+            }
+            {
+                auto list = topObj.list("attributes");
+                for (auto & i : attrSelects) {
+                    auto obj = list.object();
+                    if (i.first) {
+                        obj.attr("file", (const string &) i.first.file);
+                        obj.attr("line", i.first.line);
+                        obj.attr("column", i.first.column);
+                    }
+                    obj.attr("count", i.second);
+                }
+            }
+        }
     }
 }
 
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 6f82c6c404f2..7372134e2c9b 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -866,7 +866,7 @@ static void prim_baseNameOf(EvalState & state, const Pos & pos, Value * * args,
 static void prim_dirOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
     PathSet context;
-    Path dir = dirOf(state.coerceToPath(pos, *args[0], context));
+    Path dir = dirOf(state.coerceToString(pos, *args[0], context, false, false));
     if (args[0]->type == tPath) mkPath(v, dir.c_str()); else mkString(v, dir, context);
 }
 
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index cd37f7a3fc08..cf4218a261fa 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -11,6 +11,7 @@
 #include "compression.hh"
 #include "json.hh"
 #include "nar-info.hh"
+#include "parsed-derivations.hh"
 
 #include <algorithm>
 #include <iostream>
@@ -20,6 +21,7 @@
 #include <future>
 #include <chrono>
 #include <regex>
+#include <queue>
 
 #include <limits.h>
 #include <sys/time.h>
@@ -740,6 +742,8 @@ private:
     /* The derivation stored at drvPath. */
     std::unique_ptr<BasicDerivation> drv;
 
+    std::unique_ptr<ParsedDerivation> parsedDrv;
+
     /* The remainder is state held during the build. */
 
     /* Locks on the output paths. */
@@ -854,7 +858,7 @@ private:
        building multiple times. Since this contains the hash, it
        allows us to compare whether two rounds produced the same
        result. */
-    ValidPathInfos prevInfos;
+    std::map<Path, ValidPathInfo> prevInfos;
 
     const uid_t sandboxUid = 1000;
     const gid_t sandboxGid = 100;
@@ -935,6 +939,11 @@ private:
        as valid. */
     void registerOutputs();
 
+    /* Check that an output meets the requirements specified by the
+       'outputChecks' attribute (or the legacy
+       '{allowed,disallowed}{References,Requisites}' attributes). */
+    void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
+
     /* Open a log file and a pipe to it. */
     Path openLogFile();
 
@@ -1139,6 +1148,8 @@ void DerivationGoal::haveDerivation()
         return;
     }
 
+    parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv);
+
     /* We are first going to try to create the invalid output paths
        through substitutes.  If that doesn't work, we'll build
        them. */
@@ -1395,7 +1406,7 @@ void DerivationGoal::tryToBuild()
     /* Don't do a remote build if the derivation has the attribute
        `preferLocalBuild' set.  Also, check and repair modes are only
        supported for local builds. */
-    bool buildLocally = buildMode != bmNormal || drv->willBuildLocally();
+    bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally();
 
     auto started = [&]() {
         auto msg = fmt(
@@ -1641,19 +1652,13 @@ HookReply DerivationGoal::tryBuildHook()
 
     try {
 
-        /* Tell the hook about system features (beyond the system type)
-           required from the build machine.  (The hook could parse the
-           drv file itself, but this is easier.) */
-        Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
-        for (auto & i : features) checkStoreName(i); /* !!! abuse */
-
         /* Send the request to the hook. */
         worker.hook->sink
             << "try"
             << (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0)
             << drv->platform
             << drvPath
-            << features;
+            << parsedDrv->getRequiredSystemFeatures();
         worker.hook->sink.flush();
 
         /* Read the first line of input, which should be a word indicating
@@ -1793,23 +1798,26 @@ static void preloadNSS() {
 void DerivationGoal::startBuilder()
 {
     /* Right platform? */
-    if (!drv->canBuildLocally()) {
-        throw Error(
-            format("a '%1%' is required to build '%3%', but I am a '%2%'")
-            % drv->platform % settings.thisSystem % drvPath);
-    }
+    if (!parsedDrv->canBuildLocally())
+        throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
+            drv->platform,
+            concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()),
+            drvPath,
+            settings.thisSystem,
+            concatStringsSep(", ", settings.systemFeatures));
 
     if (drv->isBuiltin())
         preloadNSS();
 
 #if __APPLE__
-    additionalSandboxProfile = get(drv->env, "__sandboxProfile");
+    additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
 #endif
 
     /* Are we doing a chroot build? */
     {
+        auto noChroot = parsedDrv->getBoolAttr("__noChroot");
         if (settings.sandboxMode == smEnabled) {
-            if (get(drv->env, "__noChroot") == "1")
+            if (noChroot)
                 throw Error(format("derivation '%1%' has '__noChroot' set, "
                     "but that's not allowed when 'sandbox' is 'true'") % drvPath);
 #if __APPLE__
@@ -1822,7 +1830,7 @@ void DerivationGoal::startBuilder()
         else if (settings.sandboxMode == smDisabled)
             useChroot = false;
         else if (settings.sandboxMode == smRelaxed)
-            useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1";
+            useChroot = !fixedOutput && !noChroot;
     }
 
     if (worker.store.storeDir != worker.store.realStoreDir) {
@@ -1873,7 +1881,7 @@ void DerivationGoal::startBuilder()
     writeStructuredAttrs();
 
     /* Handle exportReferencesGraph(), if set. */
-    if (!drv->env.count("__json")) {
+    if (!parsedDrv->getStructuredAttrs()) {
         /* The `exportReferencesGraph' feature allows the references graph
            to be passed to a builder.  This attribute should be a list of
            pairs [name1 path1 name2 path2 ...].  The references graph of
@@ -1938,7 +1946,7 @@ void DerivationGoal::startBuilder()
         PathSet allowedPaths = settings.allowedImpureHostPrefixes;
 
         /* This works like the above, except on a per-derivation level */
-        Strings impurePaths = tokenizeString<Strings>(get(drv->env, "__impureHostDeps"));
+        auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings());
 
         for (auto & i : impurePaths) {
             bool found = false;
@@ -2306,7 +2314,7 @@ void DerivationGoal::initEnv()
        passAsFile is ignored in structure mode because it's not
        needed (attributes are not passed through the environment, so
        there is no size constraint). */
-    if (!drv->env.count("__json")) {
+    if (!parsedDrv->getStructuredAttrs()) {
 
         StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile"));
         int fileNr = 0;
@@ -2353,8 +2361,8 @@ void DerivationGoal::initEnv()
        fixed-output derivations is by definition pure (since we
        already know the cryptographic hash of the output). */
     if (fixedOutput) {
-        Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars"));
-        for (auto & i : varNames) env[i] = getEnv(i);
+        for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings()))
+            env[i] = getEnv(i);
     }
 
     /* Currently structured log messages piggyback on stderr, but we
@@ -2369,111 +2377,103 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
 
 void DerivationGoal::writeStructuredAttrs()
 {
-    auto jsonAttr = drv->env.find("__json");
-    if (jsonAttr == drv->env.end()) return;
-
-    try {
+    auto & structuredAttrs = parsedDrv->getStructuredAttrs();
+    if (!structuredAttrs) return;
 
-        auto jsonStr = rewriteStrings(jsonAttr->second, inputRewrites);
+    auto json = *structuredAttrs;
 
-        auto json = nlohmann::json::parse(jsonStr);
-
-        /* Add an "outputs" object containing the output paths. */
-        nlohmann::json outputs;
-        for (auto & i : drv->outputs)
-            outputs[i.first] = rewriteStrings(i.second.path, inputRewrites);
-        json["outputs"] = outputs;
-
-        /* Handle exportReferencesGraph. */
-        auto e = json.find("exportReferencesGraph");
-        if (e != json.end() && e->is_object()) {
-            for (auto i = e->begin(); i != e->end(); ++i) {
-                std::ostringstream str;
-                {
-                    JSONPlaceholder jsonRoot(str, true);
-                    PathSet storePaths;
-                    for (auto & p : *i)
-                        storePaths.insert(p.get<std::string>());
-                    worker.store.pathInfoToJSON(jsonRoot,
-                        exportReferences(storePaths), false, true);
-                }
-                json[i.key()] = nlohmann::json::parse(str.str()); // urgh
+    /* Add an "outputs" object containing the output paths. */
+    nlohmann::json outputs;
+    for (auto & i : drv->outputs)
+        outputs[i.first] = rewriteStrings(i.second.path, inputRewrites);
+    json["outputs"] = outputs;
+
+    /* Handle exportReferencesGraph. */
+    auto e = json.find("exportReferencesGraph");
+    if (e != json.end() && e->is_object()) {
+        for (auto i = e->begin(); i != e->end(); ++i) {
+            std::ostringstream str;
+            {
+                JSONPlaceholder jsonRoot(str, true);
+                PathSet storePaths;
+                for (auto & p : *i)
+                    storePaths.insert(p.get<std::string>());
+                worker.store.pathInfoToJSON(jsonRoot,
+                    exportReferences(storePaths), false, true);
             }
+            json[i.key()] = nlohmann::json::parse(str.str()); // urgh
         }
+    }
 
-        writeFile(tmpDir + "/.attrs.json", json.dump());
-
-        /* As a convenience to bash scripts, write a shell file that
-           maps all attributes that are representable in bash -
-           namely, strings, integers, nulls, Booleans, and arrays and
-           objects consisting entirely of those values. (So nested
-           arrays or objects are not supported.) */
+    writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
 
-        auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional<std::string> {
-            if (value.is_string())
-                return shellEscape(value);
+    /* As a convenience to bash scripts, write a shell file that
+       maps all attributes that are representable in bash -
+       namely, strings, integers, nulls, Booleans, and arrays and
+       objects consisting entirely of those values. (So nested
+       arrays or objects are not supported.) */
 
-            if (value.is_number()) {
-                auto f = value.get<float>();
-                if (std::ceil(f) == f)
-                    return std::to_string(value.get<int>());
-            }
+    auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional<std::string> {
+        if (value.is_string())
+            return shellEscape(value);
 
-            if (value.is_null())
-                return std::string("''");
+        if (value.is_number()) {
+            auto f = value.get<float>();
+            if (std::ceil(f) == f)
+                return std::to_string(value.get<int>());
+        }
 
-            if (value.is_boolean())
-                return value.get<bool>() ? std::string("1") : std::string("");
+        if (value.is_null())
+            return std::string("''");
 
-            return {};
-        };
+        if (value.is_boolean())
+            return value.get<bool>() ? std::string("1") : std::string("");
 
-        std::string jsonSh;
+        return {};
+    };
 
-        for (auto i = json.begin(); i != json.end(); ++i) {
+    std::string jsonSh;
 
-            if (!std::regex_match(i.key(), shVarName)) continue;
+    for (auto i = json.begin(); i != json.end(); ++i) {
 
-            auto & value = i.value();
+        if (!std::regex_match(i.key(), shVarName)) continue;
 
-            auto s = handleSimpleType(value);
-            if (s)
-                jsonSh += fmt("declare %s=%s\n", i.key(), *s);
+        auto & value = i.value();
 
-            else if (value.is_array()) {
-                std::string s2;
-                bool good = true;
+        auto s = handleSimpleType(value);
+        if (s)
+            jsonSh += fmt("declare %s=%s\n", i.key(), *s);
 
-                for (auto i = value.begin(); i != value.end(); ++i) {
-                    auto s3 = handleSimpleType(i.value());
-                    if (!s3) { good = false; break; }
-                    s2 += *s3; s2 += ' ';
-                }
+        else if (value.is_array()) {
+            std::string s2;
+            bool good = true;
 
-                if (good)
-                    jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
+            for (auto i = value.begin(); i != value.end(); ++i) {
+                auto s3 = handleSimpleType(i.value());
+                if (!s3) { good = false; break; }
+                s2 += *s3; s2 += ' ';
             }
 
-            else if (value.is_object()) {
-                std::string s2;
-                bool good = true;
+            if (good)
+                jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
+        }
 
-                for (auto i = value.begin(); i != value.end(); ++i) {
-                    auto s3 = handleSimpleType(i.value());
-                    if (!s3) { good = false; break; }
-                    s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3);
-                }
+        else if (value.is_object()) {
+            std::string s2;
+            bool good = true;
 
-                if (good)
-                    jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
+            for (auto i = value.begin(); i != value.end(); ++i) {
+                auto s3 = handleSimpleType(i.value());
+                if (!s3) { good = false; break; }
+                s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3);
             }
-        }
-
-        writeFile(tmpDir + "/.attrs.sh", jsonSh);
 
-    } catch (std::exception & e) {
-        throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what());
+            if (good)
+                jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
+        }
     }
+
+    writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
 }
 
 
@@ -2628,7 +2628,7 @@ void DerivationGoal::runChild()
                 createDirs(chrootRootDir + "/dev/shm");
                 createDirs(chrootRootDir + "/dev/pts");
                 ss.push_back("/dev/full");
-                if (pathExists("/dev/kvm"))
+                if (settings.systemFeatures.get().count("kvm") && pathExists("/dev/kvm"))
                     ss.push_back("/dev/kvm");
                 ss.push_back("/dev/null");
                 ss.push_back("/dev/random");
@@ -2917,7 +2917,7 @@ void DerivationGoal::runChild()
 
             writeFile(sandboxFile, sandboxProfile);
 
-            bool allowLocalNetworking = get(drv->env, "__darwinAllowLocalNetworking") == "1";
+            bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking");
 
             /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
                to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
@@ -2989,10 +2989,9 @@ void DerivationGoal::runChild()
 /* Parse a list of reference specifiers.  Each element must either be
    a store path, or the symbolic name of the output of the derivation
    (such as `out'). */
-PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, string attr)
+PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, const Strings & paths)
 {
     PathSet result;
-    Paths paths = tokenizeString<Paths>(attr);
     for (auto & i : paths) {
         if (store.isStorePath(i))
             result.insert(i);
@@ -3017,7 +3016,7 @@ void DerivationGoal::registerOutputs()
         if (allValid) return;
     }
 
-    ValidPathInfos infos;
+    std::map<std::string, ValidPathInfo> infos;
 
     /* Set of inodes seen during calls to canonicalisePathMetaData()
        for this build's outputs.  This needs to be shared between
@@ -3121,7 +3120,7 @@ void DerivationGoal::registerOutputs()
                the derivation to its content-addressed location. */
             Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath);
 
-            Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]);
+            Path dest = worker.store.makeFixedOutputPath(recursive, h2, storePathToName(path));
 
             if (h != h2) {
 
@@ -3202,48 +3201,6 @@ void DerivationGoal::registerOutputs()
                 debug(format("referenced input: '%1%'") % i);
         }
 
-        /* Enforce `allowedReferences' and friends. */
-        auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) {
-            if (drv->env.find(attrName) == drv->env.end()) return;
-
-            PathSet spec = parseReferenceSpecifiers(worker.store, *drv, get(drv->env, attrName));
-
-            PathSet used;
-            if (recursive) {
-                /* Our requisites are the union of the closures of our references. */
-                for (auto & i : references)
-                    /* Don't call computeFSClosure on ourselves. */
-                    if (path != i)
-                        worker.store.computeFSClosure(i, used);
-            } else
-                used = references;
-
-            PathSet badPaths;
-
-            for (auto & i : used)
-                if (allowed) {
-                    if (spec.find(i) == spec.end())
-                        badPaths.insert(i);
-                } else {
-                    if (spec.find(i) != spec.end())
-                        badPaths.insert(i);
-                }
-
-            if (!badPaths.empty()) {
-                string badPathsStr;
-                for (auto & i : badPaths) {
-                    badPathsStr += "\n\t";
-                    badPathsStr += i;
-                }
-                throw BuildError(format("output '%1%' is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr);
-            }
-        };
-
-        checkRefs("allowedReferences", true, false);
-        checkRefs("allowedRequisites", true, true);
-        checkRefs("disallowedReferences", false, false);
-        checkRefs("disallowedRequisites", false, true);
-
         if (curRound == nrRounds) {
             worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences()
             worker.markContentsGood(path);
@@ -3259,11 +3216,14 @@ void DerivationGoal::registerOutputs()
 
         if (!info.references.empty()) info.ca.clear();
 
-        infos.push_back(info);
+        infos[i.first] = info;
     }
 
     if (buildMode == bmCheck) return;
 
+    /* Apply output checks. */
+    checkOutputs(infos);
+
     /* Compare the result with the previous round, and report which
        path is different, if any.*/
     if (curRound > 1 && prevInfos != infos) {
@@ -3271,16 +3231,16 @@ void DerivationGoal::registerOutputs()
         for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
             if (!(*i == *j)) {
                 result.isNonDeterministic = true;
-                Path prev = i->path + checkSuffix;
+                Path prev = i->second.path + checkSuffix;
                 bool prevExists = keepPreviousRound && pathExists(prev);
                 auto msg = prevExists
-                    ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->path, drvPath, prev)
-                    : fmt("output '%1%' of '%2%' differs from previous round", i->path, drvPath);
+                    ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->second.path, drvPath, prev)
+                    : fmt("output '%1%' of '%2%' differs from previous round", i->second.path, drvPath);
 
                 auto diffHook = settings.diffHook;
                 if (prevExists && diffHook != "" && runDiffHook) {
                     try {
-                        auto diff = runProgram(diffHook, true, {prev, i->path});
+                        auto diff = runProgram(diffHook, true, {prev, i->second.path});
                         if (diff != "")
                             printError(chomp(diff));
                     } catch (Error & error) {
@@ -3325,7 +3285,11 @@ void DerivationGoal::registerOutputs()
     /* Register each output path as valid, and register the sets of
        paths referenced by each of them.  If there are cycles in the
        outputs, this will fail. */
-    worker.store.registerValidPaths(infos);
+    {
+        ValidPathInfos infos2;
+        for (auto & i : infos) infos2.push_back(i.second);
+        worker.store.registerValidPaths(infos2);
+    }
 
     /* In case of a fixed-output derivation hash mismatch, throw an
        exception now that we have registered the output as valid. */
@@ -3334,6 +3298,153 @@ void DerivationGoal::registerOutputs()
 }
 
 
+void DerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
+{
+    std::map<Path, const ValidPathInfo &> outputsByPath;
+    for (auto & output : outputs)
+        outputsByPath.emplace(output.second.path, output.second);
+
+    for (auto & output : outputs) {
+        auto & outputName = output.first;
+        auto & info = output.second;
+
+        struct Checks
+        {
+            std::experimental::optional<uint64_t> maxSize, maxClosureSize;
+            std::experimental::optional<Strings> allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites;
+        };
+
+        /* Compute the closure and closure size of some output. This
+           is slightly tricky because some of its references (namely
+           other outputs) may not be valid yet. */
+        auto getClosure = [&](const Path & path)
+        {
+            uint64_t closureSize = 0;
+            PathSet pathsDone;
+            std::queue<Path> pathsLeft;
+            pathsLeft.push(path);
+
+            while (!pathsLeft.empty()) {
+                auto path = pathsLeft.front();
+                pathsLeft.pop();
+                if (!pathsDone.insert(path).second) continue;
+
+                auto i = outputsByPath.find(path);
+                if (i != outputsByPath.end()) {
+                    closureSize += i->second.narSize;
+                    for (auto & ref : i->second.references)
+                        pathsLeft.push(ref);
+                } else {
+                    auto info = worker.store.queryPathInfo(path);
+                    closureSize += info->narSize;
+                    for (auto & ref : info->references)
+                        pathsLeft.push(ref);
+                }
+            }
+
+            return std::make_pair(pathsDone, closureSize);
+        };
+
+        auto checkRefs = [&](const std::experimental::optional<Strings> & value, bool allowed, bool recursive)
+        {
+            if (!value) return;
+
+            PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value);
+
+            PathSet used = recursive ? getClosure(info.path).first : info.references;
+
+            PathSet badPaths;
+
+            for (auto & i : used)
+                if (allowed) {
+                    if (spec.find(i) == spec.end())
+                        badPaths.insert(i);
+                } else {
+                    if (spec.find(i) != spec.end())
+                        badPaths.insert(i);
+                }
+
+            if (!badPaths.empty()) {
+                string badPathsStr;
+                for (auto & i : badPaths) {
+                    badPathsStr += "\n  ";
+                    badPathsStr += i;
+                }
+                throw BuildError("output '%s' is not allowed to refer to the following paths:%s", info.path, badPathsStr);
+            }
+        };
+
+        auto applyChecks = [&](const Checks & checks)
+        {
+            if (checks.maxSize && info.narSize > *checks.maxSize)
+                throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes",
+                    info.path, info.narSize, *checks.maxSize);
+
+            if (checks.maxClosureSize) {
+                uint64_t closureSize = getClosure(info.path).second;
+                if (closureSize > *checks.maxClosureSize)
+                    throw BuildError("closure of path '%s' is too large at %d bytes; limit is %d bytes",
+                        info.path, closureSize, *checks.maxClosureSize);
+            }
+
+            checkRefs(checks.allowedReferences, true, false);
+            checkRefs(checks.allowedRequisites, true, true);
+            checkRefs(checks.disallowedReferences, false, false);
+            checkRefs(checks.disallowedRequisites, false, true);
+        };
+
+        if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
+            auto outputChecks = structuredAttrs->find("outputChecks");
+            if (outputChecks != structuredAttrs->end()) {
+                auto output = outputChecks->find(outputName);
+
+                if (output != outputChecks->end()) {
+                    Checks checks;
+
+                    auto maxSize = output->find("maxSize");
+                    if (maxSize != output->end())
+                        checks.maxSize = maxSize->get<uint64_t>();
+
+                    auto maxClosureSize = output->find("maxClosureSize");
+                    if (maxClosureSize != output->end())
+                        checks.maxClosureSize = maxClosureSize->get<uint64_t>();
+
+                    auto get = [&](const std::string & name) -> std::experimental::optional<Strings> {
+                        auto i = output->find(name);
+                        if (i != output->end()) {
+                            Strings res;
+                            for (auto j = i->begin(); j != i->end(); ++j) {
+                                if (!j->is_string())
+                                    throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+                                res.push_back(j->get<std::string>());
+                            }
+                            checks.disallowedRequisites = res;
+                            return res;
+                        }
+                        return {};
+                    };
+
+                    checks.allowedReferences = get("allowedReferences");
+                    checks.allowedRequisites = get("allowedRequisites");
+                    checks.disallowedReferences = get("disallowedReferences");
+                    checks.disallowedRequisites = get("disallowedRequisites");
+
+                    applyChecks(checks);
+                }
+            }
+        } else {
+            // legacy non-structured-attributes case
+            Checks checks;
+            checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences");
+            checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites");
+            checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences");
+            checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites");
+            applyChecks(checks);
+        }
+    }
+}
+
+
 Path DerivationGoal::openLogFile()
 {
     logSize = 0;
@@ -3682,6 +3793,19 @@ void SubstitutionGoal::tryNext()
     } catch (InvalidPath &) {
         tryNext();
         return;
+    } catch (SubstituterDisabled &) {
+        if (settings.tryFallback) {
+            tryNext();
+            return;
+        }
+        throw;
+    } catch (Error & e) {
+        if (settings.tryFallback) {
+            printError(e.what());
+            tryNext();
+            return;
+        }
+        throw;
     }
 
     /* Update the total expected download size. */
diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc
index b4dcb35f951a..92aec63a0379 100644
--- a/src/libstore/builtins/fetchurl.cc
+++ b/src/libstore/builtins/fetchurl.cc
@@ -24,6 +24,7 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
 
     Path storePath = getAttr("out");
     auto mainUrl = getAttr("url");
+    bool unpack = get(drv.env, "unpack", "") == "1";
 
     /* Note: have to use a fresh downloader here because we're in
        a forked process. */
@@ -40,12 +41,12 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
             request.decompress = false;
 
             auto decompressor = makeDecompressionSink(
-                hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
+                unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
             downloader->download(std::move(request), *decompressor);
             decompressor->finish();
         });
 
-        if (get(drv.env, "unpack", "") == "1")
+        if (unpack)
             restorePath(storePath, *source);
         else
             writeFile(storePath, *source);
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 1e187ec5e954..3961126fff9c 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -36,12 +36,6 @@ Path BasicDerivation::findOutput(const string & id) const
 }
 
 
-bool BasicDerivation::willBuildLocally() const
-{
-    return get(env, "preferLocalBuild") == "1" && canBuildLocally();
-}
-
-
 bool BasicDerivation::substitutesAllowed() const
 {
     return get(env, "allowSubstitutes", "1") == "1";
@@ -54,14 +48,6 @@ bool BasicDerivation::isBuiltin() const
 }
 
 
-bool BasicDerivation::canBuildLocally() const
-{
-    return platform == settings.thisSystem
-        || settings.extraPlatforms.get().count(platform) > 0
-        || isBuiltin();
-}
-
-
 Path writeDerivation(ref<Store> store,
     const Derivation & drv, const string & name, RepairFlag repair)
 {
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 7b97730d3bf2..9753e796db5f 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -56,14 +56,10 @@ struct BasicDerivation
        the given derivation. */
     Path findOutput(const string & id) const;
 
-    bool willBuildLocally() const;
-
     bool substitutesAllowed() const;
 
     bool isBuiltin() const;
 
-    bool canBuildLocally() const;
-
     /* Return true iff this is a fixed-output derivation. */
     bool isFixedOutput() const;
 
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 973fca0b130f..f44f1836b31e 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -345,7 +345,7 @@ struct CurlDownloader : public Downloader
                 done = true;
 
                 try {
-                    act.progress(result.data->size(), result.data->size());
+                    act.progress(result.bodySize, result.bodySize);
                     callback(std::move(result));
                 } catch (...) {
                     done = true;
@@ -710,11 +710,12 @@ void Downloader::download(DownloadRequest && request, Sink & sink)
 
         /* If the buffer is full, then go to sleep until the calling
            thread wakes us up (i.e. when it has removed data from the
-           buffer). Note: this does stall the download thread. */
-        while (state->data.size() > 1024 * 1024) {
-            if (state->quit) return;
+           buffer). We don't wait forever to prevent stalling the
+           download thread. (Hopefully sleeping will throttle the
+           sender.) */
+        if (state->data.size() > 1024 * 1024) {
             debug("download buffer is full; going to sleep");
-            state.wait(state->request);
+            state.wait_for(state->request, std::chrono::seconds(10));
         }
 
         /* Append data to the buffer and wake up the calling
@@ -736,30 +737,36 @@ void Downloader::download(DownloadRequest && request, Sink & sink)
             state->request.notify_one();
         }});
 
-    auto state(_state->lock());
-
     while (true) {
         checkInterrupt();
 
-        /* If no data is available, then wait for the download thread
-           to wake us up. */
-        if (state->data.empty()) {
+        std::string chunk;
+
+        /* Grab data if available, otherwise wait for the download
+           thread to wake us up. */
+        {
+            auto state(_state->lock());
+
+            while (state->data.empty()) {
 
-            if (state->quit) {
-                if (state->exc) std::rethrow_exception(state->exc);
-                break;
+                if (state->quit) {
+                    if (state->exc) std::rethrow_exception(state->exc);
+                    return;
+                }
+
+                state.wait(state->avail);
             }
 
-            state.wait(state->avail);
-        }
+            chunk = std::move(state->data);
 
-        /* If data is available, then flush it to the sink and wake up
-           the download thread if it's blocked on a full buffer. */
-        if (!state->data.empty()) {
-            sink((unsigned char *) state->data.data(), state->data.size());
-            state->data.clear();
             state->request.notify_one();
         }
+
+        /* Flush the data to the sink and wake up the download thread
+           if it's blocked on a full buffer. We don't hold the state
+           lock while doing this to prevent blocking the download
+           thread if sink() takes a long time. */
+        sink((unsigned char *) chunk.data(), chunk.size());
     }
 }
 
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index d95db56726cb..a9c07b23a6f3 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -86,6 +86,21 @@ unsigned int Settings::getDefaultCores()
     return std::max(1U, std::thread::hardware_concurrency());
 }
 
+StringSet Settings::getDefaultSystemFeatures()
+{
+    /* For backwards compatibility, accept some "features" that are
+       used in Nixpkgs to route builds to certain machines but don't
+       actually require anything special on the machines. */
+    StringSet features{"nixos-test", "benchmark", "big-parallel"};
+
+    #if __linux__
+    if (access("/dev/kvm", R_OK | W_OK) == 0)
+        features.insert("kvm");
+    #endif
+
+    return features;
+}
+
 const string nixVersion = PACKAGE_VERSION;
 
 template<> void BaseSetting<SandboxMode>::set(const std::string & str)
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index f589078dbb98..6b3e204536f1 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -32,6 +32,8 @@ class Settings : public Config {
 
     unsigned int getDefaultCores();
 
+    StringSet getDefaultSystemFeatures();
+
 public:
 
     Settings();
@@ -80,9 +82,9 @@ public:
     /* Whether to show build log output in real time. */
     bool verboseBuild = true;
 
-    /* If verboseBuild is false, the number of lines of the tail of
-       the log to show if a build fails. */
-    size_t logLines = 10;
+    Setting<size_t> logLines{this, 10, "log-lines",
+        "If verbose-build is false, the number of lines of the tail of "
+        "the log to show if a build fails."};
 
     MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
         "Maximum number of parallel build jobs. \"auto\" means use number of cores.",
@@ -261,6 +263,10 @@ public:
         "These may be supported natively (e.g. armv7 on some aarch64 CPUs "
         "or using hacks like qemu-user."};
 
+    Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
+        "system-features",
+        "Optional features that this system implements (like \"kvm\")."};
+
     Setting<Strings> substituters{this,
         nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
         "substituters",
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index ab524d523cf2..8da0e2f9d82a 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -13,6 +13,14 @@ private:
 
     Path cacheUri;
 
+    struct State
+    {
+        bool enabled = true;
+        std::chrono::steady_clock::time_point disabledUntil;
+    };
+
+    Sync<State> _state;
+
 public:
 
     HttpBinaryCacheStore(
@@ -46,8 +54,33 @@ public:
 
 protected:
 
+    void maybeDisable()
+    {
+        auto state(_state.lock());
+        if (state->enabled && settings.tryFallback) {
+            int t = 60;
+            printError("disabling binary cache '%s' for %s seconds", getUri(), t);
+            state->enabled = false;
+            state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t);
+        }
+    }
+
+    void checkEnabled()
+    {
+        auto state(_state.lock());
+        if (state->enabled) return;
+        if (std::chrono::steady_clock::now() > state->disabledUntil) {
+            state->enabled = true;
+            debug("re-enabling binary cache '%s'", getUri());
+            return;
+        }
+        throw SubstituterDisabled("substituter '%s' is disabled", getUri());
+    }
+
     bool fileExists(const std::string & path) override
     {
+        checkEnabled();
+
         try {
             DownloadRequest request(cacheUri + "/" + path);
             request.head = true;
@@ -59,6 +92,7 @@ protected:
                bucket is unlistable, so treat 403 as 404. */
             if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
                 return false;
+            maybeDisable();
             throw;
         }
     }
@@ -86,12 +120,14 @@ protected:
 
     void getFile(const std::string & path, Sink & sink) override
     {
+        checkEnabled();
         auto request(makeRequest(path));
         try {
             getDownloader()->download(std::move(request), sink);
         } catch (DownloadError & e) {
             if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
                 throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
+            maybeDisable();
             throw;
         }
     }
@@ -99,15 +135,18 @@ protected:
     void getFile(const std::string & path,
         Callback<std::shared_ptr<std::string>> callback) override
     {
+        checkEnabled();
+
         auto request(makeRequest(path));
 
         getDownloader()->enqueueDownload(request,
-            {[callback](std::future<DownloadResult> result) {
+            {[callback, this](std::future<DownloadResult> result) {
                 try {
                     callback(result.get().data);
                 } catch (DownloadError & e) {
                     if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
                         return callback(std::shared_ptr<std::string>());
+                    maybeDisable();
                     callback.rethrow();
                 } catch (...) {
                     callback.rethrow();
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index c8117c0c6508..216f3417c4a8 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -880,6 +880,12 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
                     narInfo ? narInfo->fileSize : 0,
                     info->narSize};
             } catch (InvalidPath) {
+            } catch (SubstituterDisabled) {
+            } catch (Error & e) {
+                if (settings.tryFallback)
+                    printError(e.what());
+                else
+                    throw;
             }
         }
     }
diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc
new file mode 100644
index 000000000000..dc3286482736
--- /dev/null
+++ b/src/libstore/parsed-derivations.cc
@@ -0,0 +1,111 @@
+#include "parsed-derivations.hh"
+
+namespace nix {
+
+ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv)
+    : drvPath(drvPath), drv(drv)
+{
+    /* Parse the __json attribute, if any. */
+    auto jsonAttr = drv.env.find("__json");
+    if (jsonAttr != drv.env.end()) {
+        try {
+            structuredAttrs = nlohmann::json::parse(jsonAttr->second);
+        } catch (std::exception & e) {
+            throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what());
+        }
+    }
+}
+
+std::experimental::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const
+{
+    if (structuredAttrs) {
+        auto i = structuredAttrs->find(name);
+        if (i == structuredAttrs->end())
+            return {};
+        else {
+            if (!i->is_string())
+                throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath);
+            return i->get<std::string>();
+        }
+    } else {
+        auto i = drv.env.find(name);
+        if (i == drv.env.end())
+            return {};
+        else
+            return i->second;
+    }
+}
+
+bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const
+{
+    if (structuredAttrs) {
+        auto i = structuredAttrs->find(name);
+        if (i == structuredAttrs->end())
+            return def;
+        else {
+            if (!i->is_boolean())
+                throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath);
+            return i->get<bool>();
+        }
+    } else {
+        auto i = drv.env.find(name);
+        if (i == drv.env.end())
+            return def;
+        else
+            return i->second == "1";
+    }
+}
+
+std::experimental::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name) const
+{
+    if (structuredAttrs) {
+        auto i = structuredAttrs->find(name);
+        if (i == structuredAttrs->end())
+            return {};
+        else {
+            if (!i->is_array())
+                throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+            Strings res;
+            for (auto j = i->begin(); j != i->end(); ++j) {
+                if (!j->is_string())
+                    throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+                res.push_back(j->get<std::string>());
+            }
+            return res;
+        }
+    } else {
+        auto i = drv.env.find(name);
+        if (i == drv.env.end())
+            return {};
+        else
+            return tokenizeString<Strings>(i->second);
+    }
+}
+
+StringSet ParsedDerivation::getRequiredSystemFeatures() const
+{
+    StringSet res;
+    for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
+        res.insert(i);
+    return res;
+}
+
+bool ParsedDerivation::canBuildLocally() const
+{
+    if (drv.platform != settings.thisSystem.get()
+        && !settings.extraPlatforms.get().count(drv.platform)
+        && !drv.isBuiltin())
+        return false;
+
+    for (auto & feature : getRequiredSystemFeatures())
+        if (!settings.systemFeatures.get().count(feature)) return false;
+
+    return true;
+}
+
+bool ParsedDerivation::willBuildLocally() const
+{
+    return getBoolAttr("preferLocalBuild") && canBuildLocally();
+}
+
+}
diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh
new file mode 100644
index 000000000000..0a82c146172b
--- /dev/null
+++ b/src/libstore/parsed-derivations.hh
@@ -0,0 +1,35 @@
+#include "derivations.hh"
+
+#include <nlohmann/json.hpp>
+
+namespace nix {
+
+class ParsedDerivation
+{
+    Path drvPath;
+    BasicDerivation & drv;
+    std::experimental::optional<nlohmann::json> structuredAttrs;
+
+public:
+
+    ParsedDerivation(const Path & drvPath, BasicDerivation & drv);
+
+    const std::experimental::optional<nlohmann::json> & getStructuredAttrs() const
+    {
+        return structuredAttrs;
+    }
+
+    std::experimental::optional<std::string> getStringAttr(const std::string & name) const;
+
+    bool getBoolAttr(const std::string & name, bool def = false) const;
+
+    std::experimental::optional<Strings> getStringsAttr(const std::string & name) const;
+
+    StringSet getRequiredSystemFeatures() const;
+
+    bool canBuildLocally() const;
+
+    bool willBuildLocally() const;
+};
+
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index eff5d252419f..def140cfbe18 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -161,7 +161,8 @@ void RemoteStore::initConnection(Connection & conn)
         if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
             conn.to << false;
 
-        conn.processStderr();
+        auto ex = conn.processStderr();
+        if (ex) std::rethrow_exception(ex);
     }
     catch (Error & e) {
         throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what());
@@ -195,22 +196,68 @@ void RemoteStore::setOptions(Connection & conn)
             conn.to << i.first << i.second.value;
     }
 
-    conn.processStderr();
+    auto ex = conn.processStderr();
+    if (ex) std::rethrow_exception(ex);
+}
+
+
+/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks
+   the connection as bad (causing it to be closed) if a non-daemon
+   exception is thrown before the handle is closed. Such an exception
+   causes a deviation from the expected protocol and therefore a
+   desynchronization between the client and daemon. */
+struct ConnectionHandle
+{
+    Pool<RemoteStore::Connection>::Handle handle;
+    bool daemonException = false;
+
+    ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle)
+        : handle(std::move(handle))
+    { }
+
+    ConnectionHandle(ConnectionHandle && h)
+        : handle(std::move(h.handle))
+    { }
+
+    ~ConnectionHandle()
+    {
+        if (!daemonException && std::uncaught_exception()) {
+            handle.markBad();
+            debug("closing daemon connection because of an exception");
+        }
+    }
+
+    RemoteStore::Connection * operator -> () { return &*handle; }
+
+    void processStderr(Sink * sink = 0, Source * source = 0)
+    {
+        auto ex = handle->processStderr(sink, source);
+        if (ex) {
+            daemonException = true;
+            std::rethrow_exception(ex);
+        }
+    }
+};
+
+
+ConnectionHandle RemoteStore::getConnection()
+{
+    return ConnectionHandle(connections->get());
 }
 
 
 bool RemoteStore::isValidPathUncached(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopIsValidPath << path;
-    conn->processStderr();
+    conn.processStderr();
     return readInt(conn->from);
 }
 
 
 PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubstitute)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
         PathSet res;
         for (auto & i : paths)
@@ -218,7 +265,7 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe
         return res;
     } else {
         conn->to << wopQueryValidPaths << paths;
-        conn->processStderr();
+        conn.processStderr();
         return readStorePaths<PathSet>(*this, conn->from);
     }
 }
@@ -226,27 +273,27 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe
 
 PathSet RemoteStore::queryAllValidPaths()
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopQueryAllValidPaths;
-    conn->processStderr();
+    conn.processStderr();
     return readStorePaths<PathSet>(*this, conn->from);
 }
 
 
 PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
         PathSet res;
         for (auto & i : paths) {
             conn->to << wopHasSubstitutes << i;
-            conn->processStderr();
+            conn.processStderr();
             if (readInt(conn->from)) res.insert(i);
         }
         return res;
     } else {
         conn->to << wopQuerySubstitutablePaths << paths;
-        conn->processStderr();
+        conn.processStderr();
         return readStorePaths<PathSet>(*this, conn->from);
     }
 }
@@ -257,14 +304,14 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
 {
     if (paths.empty()) return;
 
-    auto conn(connections->get());
+    auto conn(getConnection());
 
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
 
         for (auto & i : paths) {
             SubstitutablePathInfo info;
             conn->to << wopQuerySubstitutablePathInfo << i;
-            conn->processStderr();
+            conn.processStderr();
             unsigned int reply = readInt(conn->from);
             if (reply == 0) continue;
             info.deriver = readString(conn->from);
@@ -278,7 +325,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
     } else {
 
         conn->to << wopQuerySubstitutablePathInfos << paths;
-        conn->processStderr();
+        conn.processStderr();
         size_t count = readNum<size_t>(conn->from);
         for (size_t n = 0; n < count; n++) {
             Path path = readStorePath(*this, conn->from);
@@ -300,10 +347,10 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
     try {
         std::shared_ptr<ValidPathInfo> info;
         {
-            auto conn(connections->get());
+            auto conn(getConnection());
             conn->to << wopQueryPathInfo << path;
             try {
-                conn->processStderr();
+                conn.processStderr();
             } catch (Error & e) {
                 // Ugly backwards compatibility hack.
                 if (e.msg().find("is not valid") != std::string::npos)
@@ -335,9 +382,9 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
 void RemoteStore::queryReferrers(const Path & path,
     PathSet & referrers)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopQueryReferrers << path;
-    conn->processStderr();
+    conn.processStderr();
     PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from);
     referrers.insert(referrers2.begin(), referrers2.end());
 }
@@ -345,36 +392,36 @@ void RemoteStore::queryReferrers(const Path & path,
 
 PathSet RemoteStore::queryValidDerivers(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopQueryValidDerivers << path;
-    conn->processStderr();
+    conn.processStderr();
     return readStorePaths<PathSet>(*this, conn->from);
 }
 
 
 PathSet RemoteStore::queryDerivationOutputs(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopQueryDerivationOutputs << path;
-    conn->processStderr();
+    conn.processStderr();
     return readStorePaths<PathSet>(*this, conn->from);
 }
 
 
 PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopQueryDerivationOutputNames << path;
-    conn->processStderr();
+    conn.processStderr();
     return readStrings<PathSet>(conn->from);
 }
 
 
 Path RemoteStore::queryPathFromHashPart(const string & hashPart)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopQueryPathFromHashPart << hashPart;
-    conn->processStderr();
+    conn.processStderr();
     Path path = readString(conn->from);
     if (!path.empty()) assertStorePath(path);
     return path;
@@ -384,7 +431,7 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart)
 void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
     RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
 
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
         conn->to << wopImportPaths;
@@ -403,7 +450,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
                 ;
         });
 
-        conn->processStderr(0, source2.get());
+        conn.processStderr(0, source2.get());
 
         auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
         assert(importedPaths.size() <= 1);
@@ -417,7 +464,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
                  << repair << !checkSigs;
         bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21;
         if (!tunnel) copyNAR(source, conn->to);
-        conn->processStderr(0, tunnel ? &source : nullptr);
+        conn.processStderr(0, tunnel ? &source : nullptr);
     }
 }
 
@@ -427,7 +474,7 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
 {
     if (repair) throw Error("repairing is not supported when building through the Nix daemon");
 
-    auto conn(connections->get());
+    auto conn(getConnection());
 
     Path srcPath(absPath(_srcPath));
 
@@ -445,13 +492,13 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
             dumpPath(srcPath, conn->to, filter);
         }
         conn->to.warn = false;
-        conn->processStderr();
+        conn.processStderr();
     } catch (SysError & e) {
         /* Daemon closed while we were sending the path. Probably OOM
            or I/O error. */
         if (e.errNo == EPIPE)
             try {
-                conn->processStderr();
+                conn.processStderr();
             } catch (EndOfFile & e) { }
         throw;
     }
@@ -465,17 +512,17 @@ Path RemoteStore::addTextToStore(const string & name, const string & s,
 {
     if (repair) throw Error("repairing is not supported when building through the Nix daemon");
 
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopAddTextToStore << name << s << references;
 
-    conn->processStderr();
+    conn.processStderr();
     return readStorePath(*this, conn->from);
 }
 
 
 void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopBuildPaths;
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) {
         conn->to << drvPaths;
@@ -494,7 +541,7 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
             drvPaths2.insert(string(i, 0, i.find('!')));
         conn->to << drvPaths2;
     }
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
@@ -502,9 +549,9 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
 BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
     BuildMode buildMode)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopBuildDerivation << drvPath << drv << buildMode;
-    conn->processStderr();
+    conn.processStderr();
     BuildResult res;
     unsigned int status;
     conn->from >> status >> res.errorMsg;
@@ -515,45 +562,45 @@ BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDeriva
 
 void RemoteStore::ensurePath(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopEnsurePath << path;
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
 
 void RemoteStore::addTempRoot(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopAddTempRoot << path;
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
 
 void RemoteStore::addIndirectRoot(const Path & path)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopAddIndirectRoot << path;
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
 
 void RemoteStore::syncWithGC()
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopSyncWithGC;
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
 
 Roots RemoteStore::findRoots()
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopFindRoots;
-    conn->processStderr();
+    conn.processStderr();
     size_t count = readNum<size_t>(conn->from);
     Roots result;
     while (count--) {
@@ -567,7 +614,7 @@ Roots RemoteStore::findRoots()
 
 void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
 
     conn->to
         << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
@@ -575,7 +622,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
         /* removed options */
         << 0 << 0 << 0;
 
-    conn->processStderr();
+    conn.processStderr();
 
     results.paths = readStrings<PathSet>(conn->from);
     results.bytesFreed = readLongLong(conn->from);
@@ -590,27 +637,27 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
 
 void RemoteStore::optimiseStore()
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopOptimiseStore;
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
 
 bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopVerifyStore << checkContents << repair;
-    conn->processStderr();
+    conn.processStderr();
     return readInt(conn->from);
 }
 
 
 void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
     conn->to << wopAddSignatures << storePath << sigs;
-    conn->processStderr();
+    conn.processStderr();
     readInt(conn->from);
 }
 
@@ -620,13 +667,13 @@ void RemoteStore::queryMissing(const PathSet & targets,
     unsigned long long & downloadSize, unsigned long long & narSize)
 {
     {
-        auto conn(connections->get());
+        auto conn(getConnection());
         if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
             // Don't hold the connection handle in the fallback case
             // to prevent a deadlock.
             goto fallback;
         conn->to << wopQueryMissing << targets;
-        conn->processStderr();
+        conn.processStderr();
         willBuild = readStorePaths<PathSet>(*this, conn->from);
         willSubstitute = readStorePaths<PathSet>(*this, conn->from);
         unknown = readStorePaths<PathSet>(*this, conn->from);
@@ -642,7 +689,7 @@ void RemoteStore::queryMissing(const PathSet & targets,
 
 void RemoteStore::connect()
 {
-    auto conn(connections->get());
+    auto conn(getConnection());
 }
 
 
@@ -686,7 +733,7 @@ static Logger::Fields readFields(Source & from)
 }
 
 
-void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
+std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source)
 {
     to.flush();
 
@@ -711,7 +758,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
         else if (msg == STDERR_ERROR) {
             string error = readString(from);
             unsigned int status = readInt(from);
-            throw Error(status, error);
+            return std::make_exception_ptr(Error(status, error));
         }
 
         else if (msg == STDERR_NEXT)
@@ -745,6 +792,8 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
         else
             throw Error("got unknown message type %x from Nix daemon", msg);
     }
+
+    return nullptr;
 }
 
 static std::string uriScheme = "unix://";
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 16daee8b6731..4f554b5980e8 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -14,6 +14,7 @@ class Pid;
 struct FdSink;
 struct FdSource;
 template<typename T> class Pool;
+struct ConnectionHandle;
 
 
 /* FIXME: RemoteStore is a misnomer - should be something like
@@ -105,6 +106,7 @@ protected:
 
     struct Connection
     {
+        AutoCloseFD fd;
         FdSink to;
         FdSource from;
         unsigned int daemonVersion;
@@ -112,7 +114,7 @@ protected:
 
         virtual ~Connection();
 
-        void processStderr(Sink * sink = 0, Source * source = 0);
+        std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0);
     };
 
     ref<Connection> openConnectionWrapper();
@@ -125,6 +127,10 @@ protected:
 
     virtual void setOptions(Connection & conn);
 
+    ConnectionHandle getConnection();
+
+    friend struct ConnectionHandle;
+
 private:
 
     std::atomic_bool failed{false};
@@ -142,11 +148,6 @@ public:
 
 private:
 
-    struct Connection : RemoteStore::Connection
-    {
-        AutoCloseFD fd;
-    };
-
     ref<RemoteStore::Connection> openConnection() override;
     std::experimental::optional<std::string> path;
 };
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 7711388f05a9..ba11ce6bb6de 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -19,8 +19,6 @@
 #include <aws/core/utils/logging/LogMacros.h>
 #include <aws/core/utils/threading/Executor.h>
 #include <aws/s3/S3Client.h>
-#include <aws/s3/model/CreateBucketRequest.h>
-#include <aws/s3/model/GetBucketLocationRequest.h>
 #include <aws/s3/model/GetObjectRequest.h>
 #include <aws/s3/model/HeadObjectRequest.h>
 #include <aws/s3/model/ListObjectsRequest.h>
@@ -202,32 +200,6 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
     {
         if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
 
-            /* Create the bucket if it doesn't already exists. */
-            // FIXME: HeadBucket would be more appropriate, but doesn't return
-            // an easily parsed 404 message.
-            auto res = s3Helper.client->GetBucketLocation(
-                Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
-
-            if (!res.IsSuccess()) {
-                if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET)
-                    throw Error(format("AWS error checking bucket '%s': %s") % bucketName % res.GetError().GetMessage());
-
-                printInfo("creating S3 bucket '%s'...", bucketName);
-
-                // Stupid S3 bucket locations.
-                auto bucketConfig = Aws::S3::Model::CreateBucketConfiguration();
-                if (s3Helper.config->region != "us-east-1")
-                    bucketConfig.SetLocationConstraint(
-                        Aws::S3::Model::BucketLocationConstraintMapper::GetBucketLocationConstraintForName(
-                            s3Helper.config->region));
-
-                checkAws(format("AWS error creating bucket '%s'") % bucketName,
-                    s3Helper.client->CreateBucket(
-                        Aws::S3::Model::CreateBucketRequest()
-                        .WithBucket(bucketName)
-                        .WithCreateBucketConfiguration(bucketConfig)));
-            }
-
             BinaryCacheStore::init();
 
             diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 1f42097fccfb..92e2685f7f66 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -320,6 +320,8 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
 void Store::queryPathInfo(const Path & storePath,
     Callback<ref<ValidPathInfo>> callback)
 {
+    assertStorePath(storePath);
+
     auto hashPart = storePathToHash(storePath);
 
     try {
@@ -610,7 +612,7 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
         });
         srcStore->narFromPath({storePath}, wrapperSink);
     }, [&]() {
-	throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
+        throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
     });
 
     dstStore->addToStore(*info, *source, repair, checkSigs);
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index c2f964e11f76..106b2be5e6b2 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -23,6 +23,7 @@ MakeError(BuildError, Error) /* denotes a permanent build failure */
 MakeError(InvalidPath, Error)
 MakeError(Unsupported, Error)
 MakeError(SubstituteGone, Error)
+MakeError(SubstituterDisabled, Error)
 
 
 struct BasicDerivation;
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 1be8934a2eba..bb68e82886d0 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -283,7 +283,7 @@ void parseDump(ParseSink & sink, Source & source)
 {
     string version;
     try {
-        version = readString(source);
+        version = readString(source, narVersionMagic1.size());
     } catch (SerialisationError & e) {
         /* This generally means the integer at the start couldn't be
            decoded.  Ignore and throw the exception below. */
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index 204c63cd26fc..0dd84e32034a 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -250,7 +250,7 @@ struct XzCompressionSink : CompressionSink
             ret = lzma_stream_encoder_mt(&strm, &mt_options);
             done = true;
 #else
-            printMsg(lvlError, "warning: parallel compression requested but not supported for metho  d '%1%', falling back to single-threaded compression", method);
+            printMsg(lvlError, "warning: parallel XZ compression requested but not supported, falling back to single-threaded compression");
 #endif
         }
 
diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh
index 0b142b0597c7..d49067bb95dc 100644
--- a/src/libutil/pool.hh
+++ b/src/libutil/pool.hh
@@ -97,6 +97,7 @@ public:
     private:
         Pool & pool;
         std::shared_ptr<R> r;
+        bool bad = false;
 
         friend Pool;
 
@@ -112,7 +113,8 @@ public:
             if (!r) return;
             {
                 auto state_(pool.state.lock());
-                state_->idle.push_back(ref<R>(r));
+                if (!bad)
+                    state_->idle.push_back(ref<R>(r));
                 assert(state_->inUse);
                 state_->inUse--;
             }
@@ -121,6 +123,8 @@ public:
 
         R * operator -> () { return &*r; }
         R & operator * () { return *r; }
+
+        void markBad() { bad = true; }
     };
 
     Handle get()
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 17448f70efb6..0e75eeec2bfe 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -169,17 +169,13 @@ std::unique_ptr<Source> sinkToSource(
     {
         typedef boost::coroutines2::coroutine<std::string> coro_t;
 
+        std::function<void(Sink &)> fun;
         std::function<void()> eof;
-        coro_t::pull_type coro;
+        std::experimental::optional<coro_t::pull_type> coro;
+        bool started = false;
 
         SinkToSource(std::function<void(Sink &)> fun, std::function<void()> eof)
-            : eof(eof)
-            , coro([&](coro_t::push_type & yield) {
-                LambdaSink sink([&](const unsigned char * data, size_t len) {
-                    if (len) yield(std::string((const char *) data, len));
-                });
-                fun(sink);
-            })
+            : fun(fun), eof(eof)
         {
         }
 
@@ -188,11 +184,19 @@ std::unique_ptr<Source> sinkToSource(
 
         size_t read(unsigned char * data, size_t len) override
         {
-            if (!coro) { eof(); abort(); }
+            if (!coro)
+                coro = coro_t::pull_type([&](coro_t::push_type & yield) {
+                    LambdaSink sink([&](const unsigned char * data, size_t len) {
+                            if (len) yield(std::string((const char *) data, len));
+                        });
+                    fun(sink);
+                });
+
+            if (!*coro) { eof(); abort(); }
 
             if (pos == cur.size()) {
-                if (!cur.empty()) coro();
-                cur = coro.get();
+                if (!cur.empty()) (*coro)();
+                cur = coro->get();
                 pos = 0;
             }
 
@@ -268,16 +272,17 @@ void readPadding(size_t len, Source & source)
 size_t readString(unsigned char * buf, size_t max, Source & source)
 {
     auto len = readNum<size_t>(source);
-    if (len > max) throw Error("string is too long");
+    if (len > max) throw SerialisationError("string is too long");
     source(buf, len);
     readPadding(len, source);
     return len;
 }
 
 
-string readString(Source & source)
+string readString(Source & source, size_t max)
 {
     auto len = readNum<size_t>(source);
+    if (len > max) throw SerialisationError("string is too long");
     std::string res(len, 0);
     source((unsigned char*) res.data(), len);
     readPadding(len, source);
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 4b6ad5da5b9c..969e4dff383d 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -284,7 +284,7 @@ inline uint64_t readLongLong(Source & source)
 
 void readPadding(size_t len, Source & source);
 size_t readString(unsigned char * buf, size_t max, Source & source);
-string readString(Source & source);
+string readString(Source & source, size_t max = std::numeric_limits<size_t>::max());
 template<class T> T readStrings(Source & source);
 
 Source & operator >> (Source & in, string & s);
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 6bc64ae75a42..03f0be705c1d 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -167,7 +167,7 @@ Path dirOf(const Path & path)
 {
     Path::size_type pos = path.rfind('/');
     if (pos == string::npos)
-        throw Error(format("invalid file name '%1%'") % path);
+        return ".";
     return pos == 0 ? "/" : Path(path, 0, pos);
 }
 
@@ -468,7 +468,7 @@ static Lazy<Path> getHome2([]() {
         std::vector<char> buf(16384);
         struct passwd pwbuf;
         struct passwd * pw;
-        if (getpwuid_r(getuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0
+        if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0
             || !pw || !pw->pw_dir || !pw->pw_dir[0])
             throw Error("cannot determine user's home directory");
         homeDir = pw->pw_dir;
diff --git a/src/nix-build/local.mk b/src/nix-build/local.mk
deleted file mode 100644
index a2d1c91dfd9d..000000000000
--- a/src/nix-build/local.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-programs += nix-build
-
-nix-build_DIR := $(d)
-
-nix-build_SOURCES := $(d)/nix-build.cc
-
-nix-build_LIBS = libmain libexpr libstore libutil libformat
-
-$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell))
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index 21a0756a2007..618895d387d4 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -16,6 +16,7 @@
 #include "get-drvs.hh"
 #include "common-eval-args.hh"
 #include "attr-path.hh"
+#include "legacy.hh"
 
 using namespace nix;
 using namespace std::string_literals;
@@ -66,11 +67,8 @@ std::vector<string> shellwords(const string & s)
     return res;
 }
 
-void mainWrapped(int argc, char * * argv)
+static void _main(int argc, char * * argv)
 {
-    initNix();
-    initGC();
-
     auto dryRun = false;
     auto runEnv = std::regex_search(argv[0], std::regex("nix-shell$"));
     auto pure = false;
@@ -305,6 +303,8 @@ void mainWrapped(int argc, char * * argv)
         }
     }
 
+    state->printStats();
+
     auto buildPaths = [&](const PathSet & paths) {
         /* Note: we do this even when !printMissing to efficiently
            fetch binary cache data. */
@@ -415,16 +415,20 @@ void mainWrapped(int argc, char * * argv)
                 "dontAddDisableDepTrack=1; "
                 "[ -e $stdenv/setup ] && source $stdenv/setup; "
                 "%3%"
+                "PATH=\"%4%:$PATH\"; "
+                "SHELL=%5%; "
                 "set +e; "
                 R"s([ -n "$PS1" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '; )s"
                 "if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; "
                 "unset NIX_ENFORCE_PURITY; "
                 "shopt -u nullglob; "
-                "unset TZ; %4%"
-                "%5%",
+                "unset TZ; %6%"
+                "%7%",
                 (Path) tmpDir,
                 (pure ? "" : "p=$PATH; "),
                 (pure ? "" : "PATH=$PATH:$p; unset p; "),
+                dirOf(shell),
+                shell,
                 (getenv("TZ") ? (string("export TZ='") + getenv("TZ") + "'; ") : ""),
                 envCommand));
 
@@ -498,9 +502,5 @@ void mainWrapped(int argc, char * * argv)
     }
 }
 
-int main(int argc, char * * argv)
-{
-    return handleExceptions(argv[0], [&]() {
-        return mainWrapped(argc, argv);
-    });
-}
+static RegisterLegacyCommand s1("nix-build", _main);
+static RegisterLegacyCommand s2("nix-shell", _main);
diff --git a/src/nix-channel/local.mk b/src/nix-channel/local.mk
deleted file mode 100644
index c14e8c359ca0..000000000000
--- a/src/nix-channel/local.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-programs += nix-channel
-
-nix-channel_DIR := $(d)
-
-nix-channel_LIBS = libmain libformat libstore libutil
-
-nix-channel_SOURCES := $(d)/nix-channel.cc
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
index 55ebda438965..8b66cc7e314e 100755
--- a/src/nix-channel/nix-channel.cc
+++ b/src/nix-channel/nix-channel.cc
@@ -1,9 +1,11 @@
 #include "shared.hh"
 #include "globals.hh"
 #include "download.hh"
+#include "store-api.hh"
+#include "legacy.hh"
+
 #include <fcntl.h>
 #include <regex>
-#include "store-api.hh"
 #include <pwd.h>
 
 using namespace nix;
@@ -157,11 +159,9 @@ static void update(const StringSet & channelNames)
     replaceSymlink(profile, channelLink);
 }
 
-int main(int argc, char ** argv)
+static int _main(int argc, char ** argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-
+    {
         // Figure out the name of the `.nix-channels' file to use
         auto home = getHome();
         channelsList = home + "/.nix-channels";
@@ -169,7 +169,7 @@ int main(int argc, char ** argv)
 
         // Figure out the name of the channels profile.
         ;
-        auto pw = getpwuid(getuid());
+        auto pw = getpwuid(geteuid());
         std::string name = pw ? pw->pw_name : getEnv("USER", "");
         if (name.empty())
             throw Error("cannot figure out user name");
@@ -255,5 +255,9 @@ int main(int argc, char ** argv)
                 runProgram(settings.nixBinDir + "/nix-env", false, envArgs);
                 break;
         }
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-channel", _main);
diff --git a/src/nix-collect-garbage/local.mk b/src/nix-collect-garbage/local.mk
deleted file mode 100644
index 02d14cf62199..000000000000
--- a/src/nix-collect-garbage/local.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-programs += nix-collect-garbage
-
-nix-collect-garbage_DIR := $(d)
-
-nix-collect-garbage_SOURCES := $(d)/nix-collect-garbage.cc
-
-nix-collect-garbage_LIBS = libmain libstore libutil libformat
diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc
index 37fe22f48134..d4060ac937fc 100644
--- a/src/nix-collect-garbage/nix-collect-garbage.cc
+++ b/src/nix-collect-garbage/nix-collect-garbage.cc
@@ -2,6 +2,7 @@
 #include "profiles.hh"
 #include "shared.hh"
 #include "globals.hh"
+#include "legacy.hh"
 
 #include <iostream>
 #include <cerrno>
@@ -48,12 +49,10 @@ void removeOldGenerations(std::string dir)
     }
 }
 
-int main(int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    bool removeOld = false;
-
-    return handleExceptions(argv[0], [&]() {
-        initNix();
+    {
+        bool removeOld = false;
 
         GCOptions options;
 
@@ -90,5 +89,9 @@ int main(int argc, char * * argv)
             PrintFreed freed(true, results);
             store->collectGarbage(options, results);
         }
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-collect-garbage", _main);
diff --git a/src/nix-copy-closure/local.mk b/src/nix-copy-closure/local.mk
deleted file mode 100644
index 5018ab975b44..000000000000
--- a/src/nix-copy-closure/local.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-programs += nix-copy-closure
-
-nix-copy-closure_DIR := $(d)
-
-nix-copy-closure_LIBS = libmain libformat libstore libutil
-
-nix-copy-closure_SOURCES := $(d)/nix-copy-closure.cc
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
index dfb1b8fc5dc4..fdcde8b076b5 100755
--- a/src/nix-copy-closure/nix-copy-closure.cc
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -1,13 +1,12 @@
 #include "shared.hh"
 #include "store-api.hh"
+#include "legacy.hh"
 
 using namespace nix;
 
-int main(int argc, char ** argv)
+static int _main(int argc, char ** argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-
+    {
         auto gzip = false;
         auto toMode = true;
         auto includeOutputs = false;
@@ -61,5 +60,9 @@ int main(int argc, char ** argv)
         from->computeFSClosure(storePaths2, closure, false, includeOutputs);
 
         copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes);
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-copy-closure", _main);
diff --git a/src/nix-daemon/local.mk b/src/nix-daemon/local.mk
deleted file mode 100644
index 5a4474465b3c..000000000000
--- a/src/nix-daemon/local.mk
+++ /dev/null
@@ -1,13 +0,0 @@
-programs += nix-daemon
-
-nix-daemon_DIR := $(d)
-
-nix-daemon_SOURCES := $(d)/nix-daemon.cc
-
-nix-daemon_LIBS = libmain libstore libutil libformat
-
-nix-daemon_LDFLAGS = -pthread
-
-ifeq ($(OS), SunOS)
-        nix-daemon_LDFLAGS += -lsocket
-endif
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 644fa6681de3..8368c3266142 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -9,6 +9,7 @@
 #include "monitor-fd.hh"
 #include "derivations.hh"
 #include "finally.hh"
+#include "legacy.hh"
 
 #include <algorithm>
 
@@ -557,7 +558,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
                     ;
                 else if (trusted
                     || name == settings.buildTimeout.name
-                    || name == "connect-timeout")
+                    || name == "connect-timeout"
+                    || (name == "builders" && value == ""))
                     settings.set(name, value);
                 else if (setSubstituters(settings.substituters))
                     ;
@@ -708,7 +710,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
         logger->startWork();
 
         // FIXME: race if addToStore doesn't read source?
-        store.cast<Store>()->addToStore(info, *source, (RepairFlag) repair,
+        store->addToStore(info, *source, (RepairFlag) repair,
             dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr);
 
         logger->stopWork();
@@ -1057,11 +1059,9 @@ static void daemonLoop(char * * argv)
 }
 
 
-int main(int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-
+    {
         auto stdio = false;
 
         parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
@@ -1121,7 +1121,7 @@ int main(int argc, char * * argv)
                         if (res == -1)
                             throw SysError("splicing data from stdin to daemon socket");
                         else if (res == 0)
-                            return;
+                            return 0;
                     }
                 }
             } else {
@@ -1130,5 +1130,9 @@ int main(int argc, char * * argv)
         } else {
             daemonLoop(argv);
         }
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-daemon", _main);
diff --git a/src/nix-env/local.mk b/src/nix-env/local.mk
deleted file mode 100644
index e80719cd76f7..000000000000
--- a/src/nix-env/local.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-programs += nix-env
-
-nix-env_DIR := $(d)
-
-nix-env_SOURCES := $(wildcard $(d)/*.cc)
-
-nix-env_LIBS = libexpr libmain libstore libutil libformat
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index a43b103f6ec6..56ed75daee44 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -13,6 +13,7 @@
 #include "json.hh"
 #include "value-to-json.hh"
 #include "xml-writer.hh"
+#include "legacy.hh"
 
 #include <cerrno>
 #include <ctime>
@@ -150,10 +151,8 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
     if (stat(path.c_str(), &st) == -1)
         throw SysError(format("getting information about '%1%'") % path);
 
-    if (isNixExpr(path, st)) {
+    if (isNixExpr(path, st))
         state.evalFile(path, v);
-        return;
-    }
 
     /* The path is a directory.  Put the Nix expressions in the
        directory in a set, with the file name of each expression as
@@ -161,13 +160,15 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
        set flat, not nested, to make it easier for a user to have a
        ~/.nix-defexpr directory that includes some system-wide
        directory). */
-    if (S_ISDIR(st.st_mode)) {
+    else if (S_ISDIR(st.st_mode)) {
         state.mkAttrs(v, 1024);
         state.mkList(*state.allocAttr(v, state.symbols.create("_combineChannels")), 0);
         StringSet attrs;
         getAllExprs(state, path, attrs, v);
         v.attrs->sort();
     }
+
+    else throw Error("path '%s' is not a directory or a Nix expression", path);
 }
 
 
@@ -1311,12 +1312,9 @@ static void opVersion(Globals & globals, Strings opFlags, Strings opArgs)
 }
 
 
-int main(int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-        initGC();
-
+    {
         Strings opFlags, opArgs;
         Operation op = 0;
         RepairFlag repair = NoRepair;
@@ -1428,5 +1426,9 @@ int main(int argc, char * * argv)
         op(globals, opFlags, opArgs);
 
         globals.state->printStats();
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-env", _main);
diff --git a/src/nix-instantiate/local.mk b/src/nix-instantiate/local.mk
deleted file mode 100644
index 7d1bc5ec9dfb..000000000000
--- a/src/nix-instantiate/local.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-programs += nix-instantiate
-
-nix-instantiate_DIR := $(d)
-
-nix-instantiate_SOURCES := $(d)/nix-instantiate.cc
-
-nix-instantiate_LIBS = libexpr libmain libstore libutil libformat
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index eb6d34dd8219..a736caa8f056 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -9,6 +9,7 @@
 #include "util.hh"
 #include "store-api.hh"
 #include "common-eval-args.hh"
+#include "legacy.hh"
 
 #include <map>
 #include <iostream>
@@ -83,12 +84,9 @@ void processExpr(EvalState & state, const Strings & attrPaths,
 }
 
 
-int main(int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-        initGC();
-
+    {
         Strings files;
         bool readStdin = false;
         bool fromArgs = false;
@@ -171,7 +169,7 @@ int main(int argc, char * * argv)
                 if (p == "") throw Error(format("unable to find '%1%'") % i);
                 std::cout << p << std::endl;
             }
-            return;
+            return 0;
         }
 
         if (readStdin) {
@@ -190,5 +188,9 @@ int main(int argc, char * * argv)
         }
 
         state->printStats();
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-instantiate", _main);
diff --git a/src/nix-prefetch-url/local.mk b/src/nix-prefetch-url/local.mk
deleted file mode 100644
index 3e7735406af0..000000000000
--- a/src/nix-prefetch-url/local.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-programs += nix-prefetch-url
-
-nix-prefetch-url_DIR := $(d)
-
-nix-prefetch-url_SOURCES := $(d)/nix-prefetch-url.cc
-
-nix-prefetch-url_LIBS = libmain libexpr libstore libutil libformat
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
index a3b025723cf1..ddb724913214 100644
--- a/src/nix-prefetch-url/nix-prefetch-url.cc
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -6,6 +6,7 @@
 #include "eval-inline.hh"
 #include "common-eval-args.hh"
 #include "attr-path.hh"
+#include "legacy.hh"
 
 #include <iostream>
 
@@ -44,12 +45,9 @@ string resolveMirrorUri(EvalState & state, string uri)
 }
 
 
-int main(int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-        initGC();
-
+    {
         HashType ht = htSHA256;
         std::vector<string> args;
         bool printPath = getEnv("PRINT_PATH") != "";
@@ -221,5 +219,9 @@ int main(int argc, char * * argv)
         std::cout << printHash16or32(hash) << std::endl;
         if (printPath)
             std::cout << storePath << std::endl;
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-prefetch-url", _main);
diff --git a/src/nix-store/graphml.cc b/src/nix-store/graphml.cc
new file mode 100644
index 000000000000..670fbe227a4c
--- /dev/null
+++ b/src/nix-store/graphml.cc
@@ -0,0 +1,90 @@
+#include "graphml.hh"
+#include "util.hh"
+#include "store-api.hh"
+#include "derivations.hh"
+
+#include <iostream>
+
+
+using std::cout;
+
+namespace nix {
+
+
+static inline const string & xmlQuote(const string & s)
+{
+    // Luckily, store paths shouldn't contain any character that needs to be
+    // quoted.
+    return s;
+}
+
+
+static string symbolicName(const string & path)
+{
+    string p = baseNameOf(path);
+    return string(p, p.find('-') + 1);
+}
+
+
+static string makeEdge(const string & src, const string & dst)
+{
+    return fmt("  <edge source=\"%1%\" target=\"%2%\"/>\n",
+        xmlQuote(src), xmlQuote(dst));
+}
+
+
+static string makeNode(const ValidPathInfo & info)
+{
+    return fmt(
+        "  <node id=\"%1%\">\n"
+        "    <data key=\"narSize\">%2%</data>\n"
+        "    <data key=\"name\">%3%</data>\n"
+        "    <data key=\"type\">%4%</data>\n"
+        "  </node>\n",
+        info.path,
+        info.narSize,
+        symbolicName(info.path),
+        (isDerivation(info.path) ? "derivation" : "output-path"));
+}
+
+
+void printGraphML(ref<Store> store, const PathSet & roots)
+{
+    PathSet workList(roots);
+    PathSet doneSet;
+    std::pair<PathSet::iterator,bool> ret;
+
+    cout << "<?xml version='1.0' encoding='utf-8'?>\n"
+         << "<graphml xmlns='http://graphml.graphdrawing.org/xmlns'\n"
+         << "    xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'\n"
+         << "    xsi:schemaLocation='http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'>\n"
+         << "<key id='narSize' for='node' attr.name='narSize' attr.type='int'/>"
+         << "<key id='name' for='node' attr.name='name' attr.type='string'/>"
+         << "<key id='type' for='node' attr.name='type' attr.type='string'/>"
+         << "<graph id='G' edgedefault='directed'>\n";
+
+    while (!workList.empty()) {
+        Path path = *(workList.begin());
+        workList.erase(path);
+
+        ret = doneSet.insert(path);
+        if (ret.second == false) continue;
+
+        ValidPathInfo info = *(store->queryPathInfo(path));
+        cout << makeNode(info);
+
+        for (auto & p : store->queryPathInfo(path)->references) {
+            if (p != path) {
+                workList.insert(p);
+                cout << makeEdge(path, p);
+            }
+        }
+
+    }
+
+    cout << "</graph>\n";
+    cout << "</graphml>\n";
+}
+
+
+}
diff --git a/src/nix-store/xmlgraph.hh b/src/nix-store/graphml.hh
index a6e7d4e2805a..b78df1e49a67 100644
--- a/src/nix-store/xmlgraph.hh
+++ b/src/nix-store/graphml.hh
@@ -6,6 +6,6 @@ namespace nix {
 
 class Store;
 
-void printXmlGraph(ref<Store> store, const PathSet & roots);
+void printGraphML(ref<Store> store, const PathSet & roots);
 
 }
diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk
deleted file mode 100644
index ade0b233adf3..000000000000
--- a/src/nix-store/local.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-programs += nix-store
-
-nix-store_DIR := $(d)
-
-nix-store_SOURCES := $(wildcard $(d)/*.cc)
-
-nix-store_LIBS = libmain libstore libutil libformat
-
-nix-store_LDFLAGS = -lbz2 -pthread $(SODIUM_LIBS)
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index fe68f681ae28..a9ad14762e62 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -8,7 +8,8 @@
 #include "shared.hh"
 #include "util.hh"
 #include "worker-protocol.hh"
-#include "xmlgraph.hh"
+#include "graphml.hh"
+#include "legacy.hh"
 
 #include <iostream>
 #include <algorithm>
@@ -273,7 +274,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
     enum QueryType
         { qDefault, qOutputs, qRequisites, qReferences, qReferrers
         , qReferrersClosure, qDeriver, qBinding, qHash, qSize
-        , qTree, qGraph, qXml, qResolve, qRoots };
+        , qTree, qGraph, qGraphML, qResolve, qRoots };
     QueryType query = qDefault;
     bool useOutput = false;
     bool includeOutputs = false;
@@ -299,7 +300,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
         else if (i == "--size") query = qSize;
         else if (i == "--tree") query = qTree;
         else if (i == "--graph") query = qGraph;
-        else if (i == "--xml") query = qXml;
+        else if (i == "--graphml") query = qGraphML;
         else if (i == "--resolve") query = qResolve;
         else if (i == "--roots") query = qRoots;
         else if (i == "--use-output" || i == "-u") useOutput = true;
@@ -403,13 +404,13 @@ static void opQuery(Strings opFlags, Strings opArgs)
             break;
         }
 
-        case qXml: {
+        case qGraphML: {
             PathSet roots;
             for (auto & i : opArgs) {
                 PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise);
                 roots.insert(paths.begin(), paths.end());
             }
-            printXmlGraph(ref<Store>(store), roots);
+            printGraphML(ref<Store>(store), roots);
             break;
         }
 
@@ -993,11 +994,9 @@ static void opVersion(Strings opFlags, Strings opArgs)
 /* Scan the arguments; find the operation, set global flags, put all
    other flags in a list, and put all other arguments in another
    list. */
-int main(int argc, char * * argv)
+static int _main(int argc, char * * argv)
 {
-    return handleExceptions(argv[0], [&]() {
-        initNix();
-
+    {
         Strings opFlags, opArgs;
         Operation op = 0;
 
@@ -1084,5 +1083,9 @@ int main(int argc, char * * argv)
             store = openStore();
 
         op(opFlags, opArgs);
-    });
+
+        return 0;
+    }
 }
+
+static RegisterLegacyCommand s1("nix-store", _main);
diff --git a/src/nix-store/xmlgraph.cc b/src/nix-store/xmlgraph.cc
deleted file mode 100644
index 0f7be7f7a02d..000000000000
--- a/src/nix-store/xmlgraph.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-#include "xmlgraph.hh"
-#include "util.hh"
-#include "store-api.hh"
-
-#include <iostream>
-
-
-using std::cout;
-
-namespace nix {
-
-
-static inline const string & xmlQuote(const string & s)
-{
-    // Luckily, store paths shouldn't contain any character that needs to be
-    // quoted.
-    return s;
-}
-
-
-static string makeEdge(const string & src, const string & dst)
-{
-    format f = format("  <edge src=\"%1%\" dst=\"%2%\"/>\n")
-      % xmlQuote(src) % xmlQuote(dst);
-    return f.str();
-}
-
-
-static string makeNode(const string & id)
-{
-    format f = format("  <node name=\"%1%\"/>\n") % xmlQuote(id);
-    return f.str();
-}
-
-
-void printXmlGraph(ref<Store> store, const PathSet & roots)
-{
-    PathSet workList(roots);
-    PathSet doneSet;
-
-    cout << "<?xml version='1.0' encoding='utf-8'?>\n"
-         << "<nix>\n";
-
-    while (!workList.empty()) {
-        Path path = *(workList.begin());
-        workList.erase(path);
-
-        if (doneSet.find(path) != doneSet.end()) continue;
-        doneSet.insert(path);
-
-        cout << makeNode(path);
-
-        for (auto & p : store->queryPathInfo(path)->references) {
-            if (p != path) {
-                workList.insert(p);
-                cout << makeEdge(p, path);
-            }
-        }
-
-    }
-
-    cout << "</nix>\n";
-}
-
-
-}
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index 91711c8b46da..96bd453d87b4 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -69,12 +69,12 @@ struct CmdCopy : StorePathsCommand
             },
 #ifdef ENABLE_S3
             Example{
-                "To populate the current folder build output to a S3 binary cache:",
-                "nix copy --to s3://my-bucket?region=eu-west-1"
+                "To copy Hello to an S3 binary cache:",
+                "nix copy --to s3://my-bucket?region=eu-west-1 nixpkgs.hello"
             },
             Example{
-                "To populate the current folder build output to an S3-compatible binary cache:",
-                "nix copy --to s3://my-bucket?region=eu-west-1&endpoint=example.com"
+                "To copy Hello to an S3-compatible binary cache:",
+                "nix copy --to s3://my-bucket?region=eu-west-1&endpoint=example.com nixpkgs.hello"
             },
 #endif
         };
diff --git a/src/nix/local.mk b/src/nix/local.mk
index f76da194467c..bdcca33d2a6f 100644
--- a/src/nix/local.mk
+++ b/src/nix/local.mk
@@ -2,10 +2,25 @@ programs += nix
 
 nix_DIR := $(d)
 
-nix_SOURCES := $(wildcard $(d)/*.cc) $(wildcard src/linenoise/*.cpp)
+nix_SOURCES := \
+  $(wildcard $(d)/*.cc) \
+  $(wildcard src/linenoise/*.cpp) \
+  $(wildcard src/build-remote/*.cc) \
+  $(wildcard src/nix-build/*.cc) \
+  $(wildcard src/nix-channel/*.cc) \
+  $(wildcard src/nix-collect-garbage/*.cc) \
+  $(wildcard src/nix-copy-closure/*.cc) \
+  $(wildcard src/nix-daemon/*.cc) \
+  $(wildcard src/nix-env/*.cc) \
+  $(wildcard src/nix-instantiate/*.cc) \
+  $(wildcard src/nix-prefetch-url/*.cc) \
+  $(wildcard src/nix-store/*.cc) \
 
 nix_LIBS = libexpr libmain libstore libutil libformat
 
-nix_LDFLAGS = -pthread
+nix_LDFLAGS = -pthread $(SODIUM_LIBS)
 
-$(eval $(call install-symlink, nix, $(bindir)/nix-hash))
+$(foreach name, \
+  nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \
+  $(eval $(call install-symlink, nix, $(bindir)/$(name))))
+$(eval $(call install-symlink, $(bindir)/nix, $(libexecdir)/nix/build-remote))
diff --git a/src/nix/main.cc b/src/nix/main.cc
index 69791e223c22..64c1dc35787c 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -67,9 +67,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
 
 void mainWrapped(int argc, char * * argv)
 {
-    verbosity = lvlError;
-    settings.verboseBuild = false;
-
     /* The chroot helper needs to be run before any threads have been
        started. */
     if (argc > 0 && argv[0] == chrootHelperName) {
@@ -88,6 +85,9 @@ void mainWrapped(int argc, char * * argv)
         if (legacy) return legacy(argc, argv);
     }
 
+    verbosity = lvlError;
+    settings.verboseBuild = false;
+
     NixArgs args;
 
     args.parseCmdline(argvToStrings(argc, argv));
diff --git a/src/nix/repl.cc b/src/nix/repl.cc
index b71e6f905f23..1bbe256b2d8b 100644
--- a/src/nix/repl.cc
+++ b/src/nix/repl.cc
@@ -31,6 +31,7 @@ struct NixRepl
 {
     string curDir;
     EvalState state;
+    Bindings * autoArgs;
 
     Strings loadedFiles;
 
@@ -446,8 +447,7 @@ void NixRepl::loadFile(const Path & path)
     loadedFiles.push_back(path);
     Value v, v2;
     state.evalFile(lookupFileArg(state, path), v);
-    Bindings & bindings(*state.allocBindings(0));
-    state.autoCallFunction(bindings, v, v2);
+    state.autoCallFunction(*autoArgs, v, v2);
     addAttrsToScope(v2);
 }
 
@@ -699,6 +699,7 @@ struct CmdRepl : StoreCommand, MixEvalArgs
     void run(ref<Store> store) override
     {
         auto repl = std::make_unique<NixRepl>(searchPath, openStore());
+        repl->autoArgs = getAutoArgs(repl->state);
         repl->mainLoop(files);
     }
 };
diff --git a/src/nix/search.cc b/src/nix/search.cc
index 4cb1efa7955b..e086de2260a6 100644
--- a/src/nix/search.cc
+++ b/src/nix/search.cc
@@ -173,10 +173,12 @@ struct CmdSearch : SourceExprCommand, MixJSON
                             jsonElem.attr("description", description);
 
                         } else {
+                            auto name = hilite(parsed.name, nameMatch, "\e[0;2m")
+                                + std::string(parsed.fullName, parsed.name.length());
                             results[attrPath] = fmt(
                                 "* %s (%s)\n  %s\n",
                                 wrap("\e[0;1m", hilite(attrPath, attrPathMatch, "\e[0;1m")),
-                                wrap("\e[0;2m", hilite(parsed.fullName, nameMatch, "\e[0;2m")),
+                                wrap("\e[0;2m", hilite(name, nameMatch, "\e[0;2m")),
                                 hilite(description, descriptionMatch, ANSI_NORMAL));
                         }
                     }
diff --git a/src/nix/verify.cc b/src/nix/verify.cc
index 6540208a8a2c..7ef571561a0e 100644
--- a/src/nix/verify.cc
+++ b/src/nix/verify.cc
@@ -120,7 +120,7 @@ struct CmdVerify : StorePathsCommand
                             for (auto sig : sigs) {
                                 if (sigsSeen.count(sig)) continue;
                                 sigsSeen.insert(sig);
-                                if (info->checkSignature(publicKeys, sig))
+                                if (validSigs < ValidPathInfo::maxSigs && info->checkSignature(publicKeys, sig))
                                     validSigs++;
                             }
                         };
diff --git a/tests/build-remote.sh b/tests/build-remote.sh
index 9bca0f4a3856..ddd68f327a15 100644
--- a/tests/build-remote.sh
+++ b/tests/build-remote.sh
@@ -11,7 +11,8 @@ rm -rf $TEST_ROOT/store0 $TEST_ROOT/store1
 
 nix build -f build-hook.nix -o $TEST_ROOT/result --max-jobs 0 \
   --sandbox-paths /nix/store --sandbox-build-dir /build-tmp \
-  --builders "$TEST_ROOT/store0; $TEST_ROOT/store1 - - 1 1 foo"
+  --builders "$TEST_ROOT/store0; $TEST_ROOT/store1 - - 1 1 foo" \
+  --system-features foo
 
 outPath=$TEST_ROOT/result
 
diff --git a/tests/check-reqs.nix b/tests/check-reqs.nix
index 41436cb48e08..47b5b3d9c723 100644
--- a/tests/check-reqs.nix
+++ b/tests/check-reqs.nix
@@ -33,7 +33,7 @@ rec {
   };
 
   # When specifying all the requisites, the build succeeds.
-  test1 = makeTest 1 [ dep1 dep2 deps ];
+  test1 = makeTest 1 [ "out" dep1 dep2 deps ];
 
   # But missing anything it fails.
   test2 = makeTest 2 [ dep2 deps ];
diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix
index d7a4b21989e5..b867f13b4995 100644
--- a/tests/remote-builds.nix
+++ b/tests/remote-builds.nix
@@ -8,8 +8,8 @@ makeTest (
 
 let
 
-  # The configuration of the build slaves.
-  slave =
+  # The configuration of the remote builders.
+  builder =
     { config, pkgs, ... }:
     { services.openssh.enable = true;
       virtualisation.writableStore = true;
@@ -36,21 +36,21 @@ in
 {
 
   nodes =
-    { slave1 = slave;
-      slave2 = slave;
+    { builder1 = builder;
+      builder2 = builder;
 
       client =
         { config, pkgs, ... }:
         { nix.maxJobs = 0; # force remote building
           nix.distributedBuilds = true;
           nix.buildMachines =
-            [ { hostName = "slave1";
+            [ { hostName = "builder1";
                 sshUser = "root";
                 sshKey = "/root/.ssh/id_ed25519";
                 system = "i686-linux";
                 maxJobs = 1;
               }
-              { hostName = "slave2";
+              { hostName = "builder2";
                 sshUser = "root";
                 sshKey = "/root/.ssh/id_ed25519";
                 system = "i686-linux";
@@ -75,33 +75,33 @@ in
       $client->copyFileFromHost("key", "/root/.ssh/id_ed25519");
       $client->succeed("chmod 600 /root/.ssh/id_ed25519");
 
-      # Install the SSH key on the slaves.
+      # Install the SSH key on the builders.
       $client->waitForUnit("network.target");
-      foreach my $slave ($slave1, $slave2) {
-          $slave->succeed("mkdir -p -m 700 /root/.ssh");
-          $slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");
-          $slave->waitForUnit("sshd");
-          $client->succeed("ssh -o StrictHostKeyChecking=no " . $slave->name() . " 'echo hello world'");
+      foreach my $builder ($builder1, $builder2) {
+          $builder->succeed("mkdir -p -m 700 /root/.ssh");
+          $builder->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");
+          $builder->waitForUnit("sshd");
+          $client->succeed("ssh -o StrictHostKeyChecking=no " . $builder->name() . " 'echo hello world'");
       }
 
-      # Perform a build and check that it was performed on the slave.
+      # Perform a build and check that it was performed on the builder.
       my $out = $client->succeed(
         "nix-build ${expr nodes.client.config 1} 2> build-output",
         "grep -q Hello build-output"
       );
-      $slave1->succeed("test -e $out");
+      $builder1->succeed("test -e $out");
 
       # And a parallel build.
       my ($out1, $out2) = split /\s/,
           $client->succeed('nix-store -r $(nix-instantiate ${expr nodes.client.config 2})\!out $(nix-instantiate ${expr nodes.client.config 3})\!out');
-      $slave1->succeed("test -e $out1 -o -e $out2");
-      $slave2->succeed("test -e $out1 -o -e $out2");
+      $builder1->succeed("test -e $out1 -o -e $out2");
+      $builder2->succeed("test -e $out1 -o -e $out2");
 
       # And a failing build.
       $client->fail("nix-build ${expr nodes.client.config 5}");
 
-      # Test whether the build hook automatically skips unavailable slaves.
-      $slave1->block;
+      # Test whether the build hook automatically skips unavailable builders.
+      $builder1->block;
       $client->succeed("nix-build ${expr nodes.client.config 4}");
     '';
 
diff --git a/tests/signing.sh b/tests/signing.sh
index 46929639199d..9e29e3fbf063 100644
--- a/tests/signing.sh
+++ b/tests/signing.sh
@@ -62,6 +62,10 @@ outPathCA=$(IMPURE_VAR1=foo IMPURE_VAR2=bar nix-build ./fixed.nix -A good.0 --no
 nix verify $outPathCA
 nix verify $outPathCA --sigs-needed 1000
 
+# Check that signing a content-addressed path doesn't overflow validSigs
+nix sign-paths --key-file $TEST_ROOT/sk1 $outPathCA
+nix verify -r $outPathCA --sigs-needed 1000 --trusted-public-keys $pk1
+
 # Copy to a binary cache.
 nix copy --to file://$cacheDir $outPath2
 
diff --git a/version b/version
index 42f7d2336ea8..616187889b6f 100644
--- a/version
+++ b/version
@@ -1 +1 @@
-2.1
\ No newline at end of file
+2.2
\ No newline at end of file