about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Makefile1
-rw-r--r--Makefile.config.in3
-rw-r--r--configure.ac29
-rw-r--r--corepkgs/buildenv.nix15
-rw-r--r--corepkgs/fetchurl.nix15
-rw-r--r--doc/manual/command-ref/conf-file.xml12
-rw-r--r--doc/manual/command-ref/nix-copy-closure.xml2
-rw-r--r--doc/manual/command-ref/nix-prefetch-url.xml55
-rw-r--r--doc/manual/installation/installing-binary.xml22
-rw-r--r--doc/manual/installation/prerequisites-source.xml4
-rw-r--r--doc/manual/release-notes/rl-1.8.xml2
-rw-r--r--local.mk3
-rw-r--r--misc/emacs/nix-mode.el20
-rw-r--r--mk/lib.mk4
-rw-r--r--perl/lib/Nix/Store.pm1
-rw-r--r--perl/lib/Nix/Store.xs10
-rw-r--r--release.nix8
-rw-r--r--scripts/download-from-binary-cache.pl.in7
-rw-r--r--scripts/local.mk3
-rwxr-xr-xscripts/nix-copy-closure.in1
-rwxr-xr-xscripts/nix-prefetch-url.in132
-rwxr-xr-xscripts/resolve-system-dependencies.pl.in122
-rw-r--r--src/libexpr/eval-inline.hh1
-rw-r--r--src/libexpr/eval.cc10
-rw-r--r--src/libexpr/eval.hh2
-rw-r--r--src/libexpr/local.mk5
-rw-r--r--src/libstore/build.cc179
-rw-r--r--src/libstore/builtins.cc32
-rw-r--r--src/libstore/download.cc82
-rw-r--r--src/libstore/download.hh9
-rw-r--r--src/libstore/globals.cc5
-rw-r--r--src/libstore/local.mk3
-rw-r--r--src/libstore/optimise-store.cc11
-rw-r--r--src/libstore/sandbox-defaults.sb.in63
-rw-r--r--src/libstore/store-api.hh17
-rw-r--r--src/libutil/archive.cc2
-rw-r--r--src/libutil/compression.cc46
-rw-r--r--src/libutil/compression.hh9
-rw-r--r--src/libutil/hash.cc15
-rw-r--r--src/libutil/hash.hh5
-rw-r--r--src/libutil/local.mk6
-rw-r--r--src/libutil/md32_common.h620
-rw-r--r--src/libutil/md5.c365
-rw-r--r--src/libutil/md5.h82
-rw-r--r--src/libutil/sha1.c369
-rw-r--r--src/libutil/sha1.h28
-rw-r--r--src/libutil/sha256.c238
-rw-r--r--src/libutil/sha256.h35
-rw-r--r--src/libutil/util.cc10
-rw-r--r--src/libutil/util.hh14
-rw-r--r--src/nix-collect-garbage/nix-collect-garbage.cc1
-rw-r--r--src/nix-daemon/nix-daemon.cc20
-rw-r--r--src/nix-env/nix-env.cc12
-rw-r--r--src/nix-instantiate/nix-instantiate.cc2
-rw-r--r--src/nix-prefetch-url/local.mk7
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc210
-rw-r--r--src/nix-store/nix-store.cc8
-rw-r--r--src/nix-store/serve-protocol.hh2
-rw-r--r--tests/fetchurl.nix6
-rw-r--r--tests/fetchurl.sh38
-rw-r--r--tests/fixed.sh9
-rw-r--r--tests/hash.sh19
-rw-r--r--tests/lang/eval-okay-hash.exp2
-rw-r--r--tests/lang/eval-okay-hash.nix5
64 files changed, 996 insertions, 2079 deletions
diff --git a/Makefile b/Makefile
index fe2e88a995aa..3a204de888cc 100644
--- a/Makefile
+++ b/Makefile
@@ -13,6 +13,7 @@ makefiles = \
   src/nix-collect-garbage/local.mk \
   src/download-via-ssh/local.mk \
   src/nix-log2xml/local.mk \
+  src/nix-prefetch-url/local.mk \
   src/bsdiff-4.3/local.mk \
   perl/local.mk \
   scripts/local.mk \
diff --git a/Makefile.config.in b/Makefile.config.in
index 29ccc1b146f1..f0de4da37855 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -3,12 +3,13 @@ CC = @CC@
 CFLAGS = @CFLAGS@
 CXX = @CXX@
 CXXFLAGS = @CXXFLAGS@
-HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SODIUM = @HAVE_SODIUM@
+LIBCURL_LIBS = @LIBCURL_LIBS@
 OPENSSL_LIBS = @OPENSSL_LIBS@
 PACKAGE_NAME = @PACKAGE_NAME@
 PACKAGE_VERSION = @PACKAGE_VERSION@
 SODIUM_LIBS = @SODIUM_LIBS@
+SQLITE3_LIBS = @SQLITE3_LIBS@
 bash = @bash@
 bindir = @bindir@
 bsddiff_compat_include = @bsddiff_compat_include@
diff --git a/configure.ac b/configure.ac
index 1d4b7d3b8995..88e64573da88 100644
--- a/configure.ac
+++ b/configure.ac
@@ -183,16 +183,12 @@ AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
 AC_SUBST(storedir)
 
 
-# Look for OpenSSL, an optional dependency.
+# Look for OpenSSL, a required dependency.
 AC_PATH_PROG(openssl, openssl, openssl) # if not found, call openssl in $PATH
 AC_SUBST(openssl)
 AC_DEFINE_UNQUOTED(OPENSSL_PATH, ["$openssl"], [Path of the OpenSSL binary])
 
-PKG_CHECK_MODULES([OPENSSL], [libcrypto],
-  [AC_DEFINE([HAVE_OPENSSL], [1], [Whether to use OpenSSL.])
-   CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"
-   have_openssl=1], [have_openssl=])
-AC_SUBST(HAVE_OPENSSL, [$have_openssl])
+PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
 
 
 # Look for libbz2, a required dependency.
@@ -218,6 +214,10 @@ PKG_CHECK_MODULES([SODIUM], [libsodium],
 AC_SUBST(HAVE_SODIUM, [$have_sodium])
 
 
+# Look for liblzma, a required dependency.
+PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"])
+
+
 # Whether to use the Boehm garbage collector.
 AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
   [enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=no]]),
@@ -261,6 +261,23 @@ AC_MSG_RESULT(yes)
 AC_SUBST(perlFlags)
 
 
+# Check for otool, an optional dependency on Darwin.
+AC_PATH_PROG(otool, otool)
+AC_MSG_CHECKING([that otool works])
+case $host_os in
+  darwin*)
+    if test -z "$otool" || ! $otool --version 2>/dev/null; then
+      AC_MSG_RESULT(no)
+      AC_MSG_ERROR([Can't get version from otool; do you need to install developer tools?])
+    fi
+    AC_MSG_RESULT(yes)
+    ;;
+  *)
+    AC_MSG_RESULT(not needed)
+    ;;
+esac
+
+
 # Whether to build the Perl bindings
 AC_MSG_CHECKING([whether to build the Perl bindings])
 AC_ARG_ENABLE(perl-bindings, AC_HELP_STRING([--enable-perl-bindings],
diff --git a/corepkgs/buildenv.nix b/corepkgs/buildenv.nix
index 5bf7b4e56042..ab1ce13f2cf6 100644
--- a/corepkgs/buildenv.nix
+++ b/corepkgs/buildenv.nix
@@ -23,5 +23,20 @@ derivation {
   # network traffic, so don't do that.
   preferLocalBuild = true;
 
+  __sandboxProfile = ''
+    (allow sysctl-read)
+    (allow file-read*
+           (literal "/usr/lib/libSystem.dylib")
+           (literal "/usr/lib/libSystem.B.dylib")
+           (literal "/usr/lib/libobjc.A.dylib")
+           (literal "/usr/lib/libobjc.dylib")
+           (literal "/usr/lib/libauto.dylib")
+           (literal "/usr/lib/libc++abi.dylib")
+           (literal "/usr/lib/libc++.1.dylib")
+           (literal "/usr/lib/libDiagnosticMessagesClient.dylib")
+           (subpath "/usr/lib/system")
+           (subpath "/dev"))
+  '';
+
   inherit chrootDeps;
 }
diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix
index 9ecb2225ba67..5e0ad9da3c68 100644
--- a/corepkgs/fetchurl.nix
+++ b/corepkgs/fetchurl.nix
@@ -1,12 +1,19 @@
 with import <nix/config.nix>;
 
-{system ? builtins.currentSystem, url, outputHash ? "", outputHashAlgo ? "", md5 ? "", sha1 ? "", sha256 ? "", executable ? false}:
+{ system ? builtins.currentSystem
+, url
+, outputHash ? ""
+, outputHashAlgo ? ""
+, md5 ? "", sha1 ? "", sha256 ? ""
+, executable ? false
+, unpack ? false
+, name ? baseNameOf (toString url)
+}:
 
 assert (outputHash != "" && outputHashAlgo != "")
     || md5 != "" || sha1 != "" || sha256 != "";
 
 derivation {
-  name = baseNameOf (toString url);
   builder = "builtin:fetchurl";
 
   # New-style output content requirements.
@@ -14,9 +21,9 @@ derivation {
       if sha256 != "" then "sha256" else if sha1 != "" then "sha1" else "md5";
   outputHash = if outputHash != "" then outputHash else
       if sha256 != "" then sha256 else if sha1 != "" then sha1 else md5;
-  outputHashMode = if executable then "recursive" else "flat";
+  outputHashMode = if unpack || executable then "recursive" else "flat";
 
-  inherit system url executable;
+  inherit name system url executable unpack;
 
   # No need to double the amount of network traffic
   preferLocalBuild = true;
diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml
index a23223818ea5..1ad5380497a1 100644
--- a/doc/manual/command-ref/conf-file.xml
+++ b/doc/manual/command-ref/conf-file.xml
@@ -618,6 +618,18 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
   </varlistentry>
 
 
+  <varlistentry xml:id="conf-build-repeat"><term><literal>build-repeat</literal></term>
+
+    <listitem><para>How many times to repeat builds to check whether
+    they are deterministic. The default value is 0. If the value is
+    non-zero, every build is repeated the specified number of
+    times. If the contents of any of the runs differs from the
+    previous ones, the build is rejected and the resulting store paths
+    are not registered as “valid” in Nix’s database.</para></listitem>
+
+  </varlistentry>
+
+
 </variablelist>
 
 </para>
diff --git a/doc/manual/command-ref/nix-copy-closure.xml b/doc/manual/command-ref/nix-copy-closure.xml
index 6d070c970474..5848b84a0173 100644
--- a/doc/manual/command-ref/nix-copy-closure.xml
+++ b/doc/manual/command-ref/nix-copy-closure.xml
@@ -43,7 +43,7 @@
 
 <para><command>nix-copy-closure</command> gives you an easy and
 efficient way to exchange software between machines.  Given one or
-more Nix store paths <replaceable>paths</replaceable> on the local
+more Nix store <replaceable>paths</replaceable> on the local
 machine, <command>nix-copy-closure</command> computes the closure of
 those paths (i.e. all their dependencies in the Nix store), and copies
 all paths in the closure to the remote machine via the
diff --git a/doc/manual/command-ref/nix-prefetch-url.xml b/doc/manual/command-ref/nix-prefetch-url.xml
index 5d1ab6931cd3..016d8863a94c 100644
--- a/doc/manual/command-ref/nix-prefetch-url.xml
+++ b/doc/manual/command-ref/nix-prefetch-url.xml
@@ -3,7 +3,7 @@
       xmlns:xi="http://www.w3.org/2001/XInclude"
       version="5.0"
       xml:id="sec-nix-prefetch-url">
-  
+
 <refmeta>
   <refentrytitle>nix-prefetch-url</refentrytitle>
   <manvolnum>1</manvolnum>
@@ -20,6 +20,7 @@
   <cmdsynopsis>
     <command>nix-prefetch-url</command>
     <arg><option>--type</option> <replaceable>hashAlgo</replaceable></arg>
+    <arg><option>--print-path</option></arg>
     <arg choice='plain'><replaceable>url</replaceable></arg>
     <arg><replaceable>hash</replaceable></arg>
   </cmdsynopsis>
@@ -54,8 +55,8 @@ error if signaled if the actual hash of the file does not match the
 specified hash.</para>
 
 <para>This command prints the hash on standard output.  Additionally,
-if the environment variable <envar>PRINT_PATH</envar> is set, the path
-of the downloaded file in the Nix store is also printed.</para>
+if the option <option>--print-path</option> is used, the path of the
+downloaded file in the Nix store is also printed.</para>
 
 </refsection>
 
@@ -63,7 +64,7 @@ of the downloaded file in the Nix store is also printed.</para>
 <refsection><title>Options</title>
 
 <variablelist>
-  
+
   <varlistentry><term><option>--type</option> <replaceable>hashAlgo</replaceable></term>
 
     <listitem><para>Use the specified cryptographic hash algorithm,
@@ -73,6 +74,35 @@ of the downloaded file in the Nix store is also printed.</para>
 
   </varlistentry>
 
+  <varlistentry><term><option>--print-path</option></term>
+
+    <listitem><para>Print the store path of the downloaded file on
+    standard output.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--unpack</option></term>
+
+    <listitem><para>Unpack the archive (which must be a tarball or zip
+    file) and add the result to the Nix store. The resulting hash can
+    be used with functions such as Nixpkgs’s
+    <varname>fetchzip</varname> or
+    <varname>fetchFromGitHub</varname>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--name</option></term>
+
+    <listitem><para>Override the name of the file in the Nix store. By
+    default, this is
+    <literal><replaceable>hash</replaceable>-<replaceable>basename</replaceable></literal>,
+    where <replaceable>basename</replaceable> is the last component of
+    <replaceable>url</replaceable>. Overriding the name is necessary
+    when <replaceable>basename</replaceable> contains characters that
+    are not allowed in Nix store paths.</para></listitem>
+
+  </varlistentry>
+
 </variablelist>
 
 </refsection>
@@ -81,14 +111,19 @@ of the downloaded file in the Nix store is also printed.</para>
 <refsection><title>Examples</title>
 
 <screen>
-$ nix-prefetch-url ftp://ftp.nluug.nl/pub/gnu/make/make-3.80.tar.bz2
-0bbd1df101bc0294d440471e50feca71
+$ nix-prefetch-url ftp://ftp.gnu.org/pub/gnu/hello/hello-2.10.tar.gz
+0ssi1wpaf7plaswqqjwigppsg5fyh99vdlb9kzl7c9lng89ndq1i
 
-$ PRINT_PATH=1 nix-prefetch-url ftp://ftp.nluug.nl/pub/gnu/make/make-3.80.tar.bz2
-0bbd1df101bc0294d440471e50feca71
-/nix/store/wvyz8ifdn7wyz1p3pqyn0ra45ka2l492-make-3.80.tar.bz2</screen>
+$ nix-prefetch-url --print-path mirror://gnu/hello/hello-2.10.tar.gz
+0ssi1wpaf7plaswqqjwigppsg5fyh99vdlb9kzl7c9lng89ndq1i
+/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz
+
+$ nix-prefetch-url --unpack --print-path https://github.com/NixOS/patchelf/archive/0.8.tar.gz
+079agjlv0hrv7fxnx9ngipx14gyncbkllxrp9cccnh3a50fxcmy7
+/nix/store/19zrmhm3m40xxaw81c8cqm6aljgrnwj2-0.8.tar.gz
+</screen>
 
 </refsection>
 
-    
+
 </refentry>
diff --git a/doc/manual/installation/installing-binary.xml b/doc/manual/installation/installing-binary.xml
index f9ee98c726d2..e9a09cba7030 100644
--- a/doc/manual/installation/installing-binary.xml
+++ b/doc/manual/installation/installing-binary.xml
@@ -28,6 +28,7 @@ $ chown alice /nix
 
 </para>
 
+<!--
 <para>You can also manually download and install a binary package.
 Binary packages of the latest stable release are available for Fedora,
 Debian, Ubuntu, Mac OS X and various other systems from the <link
@@ -53,12 +54,12 @@ $ dpkg -i nix_1.8-1_amd64.deb</screen>
 
 </para>
 
-<para>For other platforms, including Mac OS X and other Linux
-distributions, you can download a binary tarball that contains Nix and
-all its dependencies.  (This is what the install script at
-<uri>https://nixos.org/nix/install</uri> uses.)  You should unpack it
-somewhere (e.g. in <filename>/tmp</filename>), and then run the script
-named <command>install</command> inside the binary tarball:
+<para>You can also download a binary tarball that contains Nix and all
+its dependencies.  (This is what the install script at
+<uri>https://nixos.org/nix/install</uri> does automatically.)  You
+should unpack it somewhere (e.g. in <filename>/tmp</filename>), and
+then run the script named <command>install</command> inside the binary
+tarball:
 
 <screen>
 alice$ cd /tmp
@@ -78,5 +79,14 @@ other auxiliary data, if desired:
 $ rm -rf /nix</screen>
 
 </para>
+-->
+
+<para>You can uninstall Nix simply by running:
+
+<screen>
+$ rm -rf /nix
+</screen>
+
+</para>
 
 </chapter>
diff --git a/doc/manual/installation/prerequisites-source.xml b/doc/manual/installation/prerequisites-source.xml
index 47adc9a4fd67..01d52c74030a 100644
--- a/doc/manual/installation/prerequisites-source.xml
+++ b/doc/manual/installation/prerequisites-source.xml
@@ -30,7 +30,7 @@
   or higher.  If your distribution does not provide it, please install
   it from <link xlink:href="http://www.sqlite.org/" />.</para></listitem>
 
-  <listitem><para>The Perl DBI and DBD::SQLite libraries, which are
+  <listitem><para>The Perl DBI, DBD::SQLite, and WWW::Curl libraries, which are
   available from <link
   xlink:href="http://search.cpan.org/">CPAN</link> if your
   distribution does not provide them.</para></listitem>
@@ -70,4 +70,4 @@
 
 </itemizedlist>
 
-</section>
\ No newline at end of file
+</section>
diff --git a/doc/manual/release-notes/rl-1.8.xml b/doc/manual/release-notes/rl-1.8.xml
index e551ee06055f..b7acc1773baa 100644
--- a/doc/manual/release-notes/rl-1.8.xml
+++ b/doc/manual/release-notes/rl-1.8.xml
@@ -60,7 +60,7 @@ $ nix-store -l $(which xterm)
   <listitem><para><command>nix-copy-closure</command> now uses
   <command>nix-store --serve</command> on the remote side to send or
   receive closures. This fixes a race condition between
-  <command>nix-copy-closureE</command> and the garbage
+  <command>nix-copy-closure</command> and the garbage
   collector.</para></listitem>
 
   <listitem><para>Derivations can specify the new special attribute
diff --git a/local.mk b/local.mk
index 160057ad2625..2541f3f32290 100644
--- a/local.mk
+++ b/local.mk
@@ -1,5 +1,6 @@
 ifeq ($(MAKECMDGOALS), dist)
-  dist-files += $(shell git ls-files)
+  # Make sure we are in repo root with `--git-dir`
+  dist-files += $(shell git --git-dir=.git ls-files || find * -type f)
 endif
 
 dist-files += configure config.h.in nix.spec
diff --git a/misc/emacs/nix-mode.el b/misc/emacs/nix-mode.el
index 790799d858cc..10035e96ef30 100644
--- a/misc/emacs/nix-mode.el
+++ b/misc/emacs/nix-mode.el
@@ -9,16 +9,16 @@
 ;;; Code:
 
 (defconst nix-font-lock-keywords
-  '("\\<if\\>" "\\<then\\>" "\\<else\\>" "\\<assert\\>" "\\<with\\>"
-    "\\<let\\>" "\\<in\\>" "\\<rec\\>" "\\<inherit\\>" "\\<or\\>"
-    ("\\<true\\>" . font-lock-builtin-face)
-    ("\\<false\\>" . font-lock-builtin-face)
-    ("\\<null\\>" . font-lock-builtin-face)
-    ("\\<import\\>" . font-lock-builtin-face)
-    ("\\<derivation\\>" . font-lock-builtin-face)
-    ("\\<baseNameOf\\>" . font-lock-builtin-face)
-    ("\\<toString\\>" . font-lock-builtin-face)
-    ("\\<isNull\\>" . font-lock-builtin-face)
+  '("\\_<if\\_>" "\\_<then\\_>" "\\_<else\\_>" "\\_<assert\\_>" "\\_<with\\_>"
+    "\\_<let\\_>" "\\_<in\\_>" "\\_<rec\\_>" "\\_<inherit\\_>" "\\_<or\\_>"
+    ("\\_<true\\_>" . font-lock-builtin-face)
+    ("\\_<false\\_>" . font-lock-builtin-face)
+    ("\\_<null\\_>" . font-lock-builtin-face)
+    ("\\_<import\\_>" . font-lock-builtin-face)
+    ("\\_<derivation\\_>" . font-lock-builtin-face)
+    ("\\_<baseNameOf\\_>" . font-lock-builtin-face)
+    ("\\_<toString\\_>" . font-lock-builtin-face)
+    ("\\_<isNull\\_>" . font-lock-builtin-face)
     ("[a-zA-Z][a-zA-Z0-9\\+-\\.]*:[a-zA-Z0-9%/\\?:@&=\\+\\$,_\\.!~\\*'-]+"
      . font-lock-constant-face)
     ("\\<\\([a-zA-Z_][a-zA-Z0-9_'\-\.]*\\)[ \t]*="
diff --git a/mk/lib.mk b/mk/lib.mk
index 4ad5c636c8d4..bb82801d3b4e 100644
--- a/mk/lib.mk
+++ b/mk/lib.mk
@@ -61,7 +61,9 @@ ifeq ($(BUILD_SHARED_LIBS), 1)
   endif
   ifneq ($(OS), Darwin)
    ifneq ($(OS), SunOS)
-    GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
+    ifneq ($(OS), FreeBSD)
+     GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
+    endif
    endif
   endif
   SET_RPATH_TO_LIBS ?= 1
diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm
index af3d2fa2e719..d226264d4df3 100644
--- a/perl/lib/Nix/Store.pm
+++ b/perl/lib/Nix/Store.pm
@@ -21,6 +21,7 @@ our @EXPORT = qw(
     signString checkSignature
     addToStore makeFixedOutputPath
     derivationFromPath
+    addTempRoot
 );
 
 our $VERSION = '0.15';
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index d3bfa19fd846..07d81aa3ab05 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -356,3 +356,13 @@ SV * derivationFromPath(char * drvPath)
         }
     OUTPUT:
         RETVAL
+
+
+void addTempRoot(char * storePath)
+    PPCODE:
+        try {
+            doInit();
+            store->addTempRoot(storePath);
+        } catch (Error & e) {
+            croak(e.what());
+        }
diff --git a/release.nix b/release.nix
index 5a5a1226fe2c..4269a3f76d8c 100644
--- a/release.nix
+++ b/release.nix
@@ -23,7 +23,7 @@ let
         inherit officialRelease;
 
         buildInputs =
-          [ curl bison flex perl libxml2 libxslt bzip2
+          [ curl bison flex perl libxml2 libxslt bzip2 xz
             dblatex (dblatex.tex or tetex) nukeReferences pkgconfig sqlite libsodium
             docbook5 docbook5_xsl
           ] ++ lib.optional (!lib.inNixShell) git;
@@ -36,7 +36,9 @@ let
 
         postUnpack = ''
           # Clean up when building from a working tree.
-          (cd $sourceRoot && (git ls-files -o | xargs -r rm -v))
+          if [[ -d $sourceRoot/.git ]]; then
+            git -C $sourceRoot clean -fd
+          fi
         '';
 
         preConfigure = ''
@@ -81,7 +83,7 @@ let
         src = tarball;
 
         buildInputs =
-          [ curl perl bzip2 openssl pkgconfig sqlite boehmgc ]
+          [ curl perl bzip2 xz openssl pkgconfig sqlite boehmgc ]
           ++ lib.optional stdenv.isLinux libsodium;
 
         configureFlags = ''
diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in
index bb63eafca522..ea053bf14da4 100644
--- a/scripts/download-from-binary-cache.pl.in
+++ b/scripts/download-from-binary-cache.pl.in
@@ -80,7 +80,12 @@ sub addRequest {
     $curl->setopt(CURLOPT_WRITEDATA, $fh);
     $curl->setopt(CURLOPT_FOLLOWLOCATION, 1);
     $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle;
-    $curl->setopt(CURLOPT_SSL_VERIFYPEER, 0) unless isTrue($Nix::Config::config{"verify-https-binary-caches"} // "1");
+
+    unless (isTrue($Nix::Config::config{"verify-https-binary-caches"} // "1")) {
+        $curl->setopt(CURLOPT_SSL_VERIFYPEER, 0);
+        $curl->setopt(CURLOPT_SSL_VERIFYHOST, 0);
+    }
+
     $curl->setopt(CURLOPT_USERAGENT, $userAgent);
     $curl->setopt(CURLOPT_NOBODY, 1) if $head;
     $curl->setopt(CURLOPT_FAILONERROR, 1);
diff --git a/scripts/local.mk b/scripts/local.mk
index 39e1df611c5c..cdac56bf13cb 100644
--- a/scripts/local.mk
+++ b/scripts/local.mk
@@ -4,7 +4,6 @@ nix_bin_scripts := \
   $(d)/nix-copy-closure \
   $(d)/nix-generate-patches \
   $(d)/nix-install-package \
-  $(d)/nix-prefetch-url \
   $(d)/nix-pull \
   $(d)/nix-push
 
@@ -18,6 +17,7 @@ nix_substituters := \
 nix_noinst_scripts := \
   $(d)/build-remote.pl \
   $(d)/find-runtime-roots.pl \
+  $(d)/resolve-system-dependencies.pl \
   $(d)/nix-http-export.cgi \
   $(d)/nix-profile.sh \
   $(d)/nix-reduce-build \
@@ -30,6 +30,7 @@ profiledir = $(sysconfdir)/profile.d
 $(eval $(call install-file-as, $(d)/nix-profile.sh, $(profiledir)/nix.sh, 0644))
 $(eval $(call install-program-in, $(d)/find-runtime-roots.pl, $(libexecdir)/nix))
 $(eval $(call install-program-in, $(d)/build-remote.pl, $(libexecdir)/nix))
+$(eval $(call install-program-in, $(d)/resolve-system-dependencies.pl, $(libexecdir)/nix))
 $(foreach prog, $(nix_substituters), $(eval $(call install-program-in, $(prog), $(libexecdir)/nix/substituters)))
 $(eval $(call install-symlink, nix-build, $(bindir)/nix-shell))
 
diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in
index 55d108fbb4c2..9cbb4ede51a3 100755
--- a/scripts/nix-copy-closure.in
+++ b/scripts/nix-copy-closure.in
@@ -1,5 +1,6 @@
 #! @perl@ -w @perlFlags@
 
+use utf8;
 use strict;
 use Nix::SSH;
 use Nix::Config;
diff --git a/scripts/nix-prefetch-url.in b/scripts/nix-prefetch-url.in
deleted file mode 100755
index 6effbe208146..000000000000
--- a/scripts/nix-prefetch-url.in
+++ /dev/null
@@ -1,132 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use File::Basename;
-use File::stat;
-use Nix::Store;
-use Nix::Config;
-use Nix::Utils;
-
-binmode STDERR, ":encoding(utf8)";
-
-
-my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256"; # obsolete
-my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'};
-
-my @args;
-my $arg;
-while ($arg = shift) {
-    if ($arg eq "--help") {
-        exec "man nix-prefetch-url" or die;
-    } elsif ($arg eq "--type") {
-        $hashType = shift;
-        die "$0: ‘$arg’ requires an argument\n" unless defined $hashType;
-    } elsif (substr($arg, 0, 1) eq "-") {
-        die "$0: unknown flag ‘$arg’\n";
-    } else {
-        push @args, $arg;
-    }
-}
-
-my $url = $args[0];
-my $expHash = $args[1];
-
-
-if (!defined $url || $url eq "") {
-    print STDERR <<EOF
-Usage: nix-prefetch-url URL [EXPECTED-HASH]
-EOF
-    ;
-    exit 1;
-}
-
-my $tmpDir = mkTempDir("nix-prefetch-url");
-
-# Hack to support the mirror:// scheme from Nixpkgs.
-if ($url =~ /^mirror:\/\//) {
-    system("$Nix::Config::binDir/nix-build '<nixpkgs>' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0
-        or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n";
-    my @expanded = split ' ', readFile("$tmpDir/urls");
-    die "$0: cannot resolve ‘$url’" unless scalar @expanded > 0;
-    print STDERR "$url expands to $expanded[0]\n";
-    $url = $expanded[0];
-}
-
-# Handle escaped characters in the URI.  `+', `=' and `?' are the only
-# characters that are valid in Nix store path names but have a special
-# meaning in URIs.
-my $name = basename $url;
-die "cannot figure out file name for ‘$url’\n" if $name eq ""; 
-$name =~ s/%2b/+/g;
-$name =~ s/%3d/=/g;
-$name =~ s/%3f/?/g;
-
-my $finalPath;
-my $hash;
-
-# If the hash was given, a file with that hash may already be in the
-# store.
-if (defined $expHash) {
-    $finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name);
-    if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; }
-}
-
-# If we don't know the hash or a file with that hash doesn't exist,
-# download the file and add it to the store.
-if (!defined $finalPath) {
-
-    my $tmpFile = "$tmpDir/$name";
-    
-    # Optionally do timestamp-based caching of the download.
-    # Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is
-    # the hash and the timestamp of the file at $url.  The caching of
-    # the file *contents* is done in Nix store, where it can be
-    # garbage-collected independently.
-    my ($cachedTimestampFN, $cachedHashFN, @cacheFlags);
-    if (defined $cacheDir) {
-        my $urlHash = hashString("sha256", 1, $url);
-        writeFile "$cacheDir/$urlHash.url", $url;
-        $cachedHashFN = "$cacheDir/$urlHash.$hashType";
-        $cachedTimestampFN = "$cacheDir/$urlHash.stamp";
-        @cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN;
-    }
-    
-    # Perform the download.
-    my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || "")));
-    (system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of ‘$url’ failed\n";
-
-    if (defined $cacheDir && ! -e $tmpFile) {
-        # Curl didn't create $tmpFile, so apparently there's no newer
-        # file on the server.
-        $hash = readFile $cachedHashFN or die;
-        $finalPath = makeFixedOutputPath(0, $hashType, $hash, $name);
-        unless (isValidPath $finalPath) {
-            print STDERR "cached contents of ‘$url’ disappeared, redownloading...\n";
-            $finalPath = undef;
-            (system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of ‘$url’ failed\n";
-        }
-    }
-
-    if (!defined $finalPath) {
-        
-        # Compute the hash.
-        $hash = hashFile($hashType, $hashType ne "md5", $tmpFile);
-
-        if (defined $cacheDir) {
-            writeFile $cachedHashFN, $hash;
-            my $st = stat($tmpFile) or die;
-            open STAMP, ">$cachedTimestampFN" or die; close STAMP;
-            utime($st->atime, $st->mtime, $cachedTimestampFN) or die;
-        }
-    
-        # Add the downloaded file to the Nix store.
-        $finalPath = addToStore($tmpFile, 0, $hashType);
-    }
-
-    die "$0: hash mismatch for ‘$url’\n" if defined $expHash && $expHash ne $hash;
-}
-
-print STDERR "path is ‘$finalPath’\n" unless $ENV{'QUIET'};
-print "$hash\n";
-print "$finalPath\n" if $ENV{'PRINT_PATH'};
diff --git a/scripts/resolve-system-dependencies.pl.in b/scripts/resolve-system-dependencies.pl.in
new file mode 100755
index 000000000000..a20f0dc020fe
--- /dev/null
+++ b/scripts/resolve-system-dependencies.pl.in
@@ -0,0 +1,122 @@
+#! @perl@ -w @perlFlags@
+
+use utf8;
+use strict;
+use warnings;
+use Cwd qw(realpath);
+use Errno;
+use File::Basename qw(dirname);
+use File::Path qw(make_path);
+use File::Spec::Functions qw(catfile);
+use List::Util qw(reduce);
+use IPC::Open3;
+use Nix::Config;
+use Nix::Store qw(derivationFromPath);
+use POSIX qw(uname);
+use Storable qw(lock_retrieve lock_store);
+
+my ($sysname, undef, $version, undef, $machine) = uname;
+$sysname =~ /Darwin/ or die "This tool is only meant to be used on Darwin systems.";
+
+my $cache = "$Nix::Config::stateDir/dependency-maps/$machine-$sysname-$version.map";
+
+make_path dirname($cache);
+
+our $DEPS;
+eval {
+  $DEPS = lock_retrieve($cache);
+};
+
+if($!{ENOENT}) {
+  lock_store {}, $cache;
+  $DEPS = {};
+} elsif($@) {
+  die "Unable to obtain a lock on dependency-map file $cache: $@";
+}
+
+sub mkset(@) {
+  my %set;
+  @set{@_} = ();
+  \%set
+}
+
+sub union($$) {
+  my ($set1, $set2) = @_;
+  my %new = (%$set1, %$set2);
+  \%new
+}
+
+sub cache_filepath($) {
+  my $fp = shift;
+  $fp =~ s/-/--/g;
+  $fp =~ s/\//-/g;
+  $fp =~ s/^-//g;
+  catfile $cache, $fp
+}
+
+sub resolve_tree {
+  sub resolve_tree_inner {
+    my ($lib, $TREE) = @_;
+    return if (defined $TREE->{$lib});
+    $TREE->{$lib} = mkset(@{cache_get($lib)});
+    foreach my $dep (keys %{$TREE->{$lib}}) {
+      resolve_tree_inner($dep, $TREE);
+    }
+    values %$TREE
+  }
+
+  reduce { union($a, $b) } {}, resolve_tree_inner(@_)
+}
+
+sub cache_get {
+  my $key = shift;
+  if (defined $DEPS->{$key}) {
+    $DEPS->{$key}
+  } else {
+    cache_insert($key);
+    cache_get($key)
+  }
+}
+
+sub cache_insert($) {
+  my $key = shift;
+  print STDERR "Finding dependencies for $key...\n";
+  my @deps = find_deps($key);
+  $DEPS->{$key} = \@deps;
+}
+
+sub find_deps($) {
+  my $lib = shift;
+  my($chld_in, $chld_out, $chld_err);
+  my $pid = open3($chld_in, $chld_out, $chld_err, "@otool@", "-L", "-arch", "x86_64", $lib);
+  waitpid($pid, 0);
+  my $line = readline $chld_out;
+  if($? == 0 and $line !~ /not an object file/) {
+    my @libs;
+    while(<$chld_out>) {
+      my $dep = (split /\s+/)[1];
+      push @libs, $dep unless $dep eq $lib or $dep =~ /\@rpath/;
+    }
+    @libs
+  } elsif (-l $lib) {
+    (realpath($lib))
+  } else {
+    ()
+  }
+}
+
+if (defined $ARGV[0]) {
+  my $deps = derivationFromPath($ARGV[0])->{"env"}->{"__impureHostDeps"};
+  if (defined $deps) {
+    my @files = split(/\s+/, $deps);
+    my $depcache = {};
+    my $depset = reduce { union($a, $b) } (map { resolve_tree($_, $depcache) } @files);
+    print "extra-chroot-dirs\n";
+    print join("\n", keys %$depset);
+    print "\n";
+  }
+  lock_store($DEPS, $cache);
+} else {
+  print STDERR "Usage: $0 path/to/derivation.drv\n";
+  exit 1
+}
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index c8d83814f3ca..0748fbd3f3e1 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -78,5 +78,4 @@ inline void EvalState::forceList(Value & v, const Pos & pos)
         throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
 }
 
-
 }
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 74f88e498fed..acf1fbdc1356 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -1291,10 +1291,16 @@ bool EvalState::forceBool(Value & v)
 }
 
 
+bool EvalState::isFunctor(Value & fun)
+{
+    return fun.type == tAttrs && fun.attrs->find(sFunctor) != fun.attrs->end();
+}
+
+
 void EvalState::forceFunction(Value & v, const Pos & pos)
 {
     forceValue(v);
-    if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp)
+    if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp && !isFunctor(v))
         throwTypeError("value is %1% while a function was expected, at %2%", v, pos);
 }
 
@@ -1386,7 +1392,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
            shell scripting convenience, just like `null'. */
         if (v.type == tBool && v.boolean) return "1";
         if (v.type == tBool && !v.boolean) return "";
-        if (v.type == tInt) return int2String(v.integer);
+        if (v.type == tInt) return std::to_string(v.integer);
         if (v.type == tNull) return "";
 
         if (v.isList()) {
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 8df0084fd57a..eb55f6d4d431 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -213,6 +213,8 @@ public:
        elements and attributes are compared recursively. */
     bool eqValues(Value & v1, Value & v2);
 
+    bool isFunctor(Value & fun);
+
     void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
     void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
 
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
index d1b1987fb037..5de9ccc6d011 100644
--- a/src/libexpr/local.mk
+++ b/src/libexpr/local.mk
@@ -10,7 +10,10 @@ libexpr_CXXFLAGS := -Wno-deprecated-register
 
 libexpr_LIBS = libutil libstore libformat
 
-libexpr_LDFLAGS = -ldl
+libexpr_LDFLAGS =
+ifneq ($(OS), FreeBSD)
+ libexpr_LDFLAGS += -ldl
+endif
 
 # The dependency on libgc must be propagated (i.e. meaning that
 # programs/libraries that use libexpr must explicitly pass -lgc),
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index dde87dcda8ed..e1ccb1eaf136 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -62,7 +62,7 @@
     #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
 #else
     #define SANDBOX_ENABLED 0
-    #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/bin" "/usr/bin"
+    #define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
 #endif
 
 #if CHROOT_ENABLED
@@ -74,6 +74,7 @@
 
 #if __linux__
 #include <sys/personality.h>
+#include <sys/mman.h>
 #endif
 
 #if HAVE_STATVFS
@@ -777,6 +778,12 @@ private:
     DirsInChroot dirsInChroot;
     typedef map<string, string> Environment;
     Environment env;
+#if SANDBOX_ENABLED
+    typedef string SandboxProfile;
+    SandboxProfile additionalSandboxProfile;
+
+    AutoDelete autoDelSandbox;
+#endif
 
     /* Hash rewriting. */
     HashRewrites rewritesToTmp, rewritesFromTmp;
@@ -790,13 +797,19 @@ private:
        temporary paths. */
     PathSet redirectedBadOutputs;
 
-    /* Set of inodes seen during calls to canonicalisePathMetaData()
-       for this build's outputs.  This needs to be shared between
-       outputs to allow hard links between outputs. */
-    InodesSeen inodesSeen;
-
     BuildResult result;
 
+    /* The current round, if we're building multiple times. */
+    unsigned int curRound = 1;
+
+    unsigned int nrRounds;
+
+    /* Path registration info from the previous round, if we're
+       building multiple times. Since this contains the hash, it
+       allows us to compare whether two rounds produced the same
+       result. */
+    ValidPathInfos prevInfos;
+
 public:
     DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
         Worker & worker, BuildMode buildMode = bmNormal);
@@ -1237,6 +1250,10 @@ void DerivationGoal::inputsRealised()
     for (auto & i : drv->outputs)
         if (i.second.hash == "") fixedOutput = false;
 
+    /* Don't repeat fixed-output derivations since they're already
+       verified by their output hash.*/
+    nrRounds = fixedOutput ? 1 : settings.get("build-repeat", 0) + 1;
+
     /* Okay, try to build.  Note that here we don't wait for a build
        slot to become available, since we don't need one if there is a
        build hook. */
@@ -1245,11 +1262,22 @@ void DerivationGoal::inputsRealised()
 }
 
 
-static bool canBuildLocally(const string & platform)
+static bool isBuiltin(const BasicDerivation & drv)
+{
+    return string(drv.builder, 0, 8) == "builtin:";
+}
+
+
+static bool canBuildLocally(const BasicDerivation & drv)
 {
-    return platform == settings.thisSystem
+    return drv.platform == settings.thisSystem
+        || isBuiltin(drv)
 #if __linux__
-        || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux")
+        || (drv.platform == "i686-linux" && settings.thisSystem == "x86_64-linux")
+        || (drv.platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux")
+#elif __FreeBSD__
+        || (drv.platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd")
+        || (drv.platform == "i686-linux" && settings.thisSystem == "i686-freebsd")
 #endif
         ;
 }
@@ -1264,7 +1292,7 @@ static string get(const StringPairs & map, const string & key, const string & de
 
 bool willBuildLocally(const BasicDerivation & drv)
 {
-    return get(drv.env, "preferLocalBuild") == "1" && canBuildLocally(drv.platform);
+    return get(drv.env, "preferLocalBuild") == "1" && canBuildLocally(drv);
 }
 
 
@@ -1274,12 +1302,6 @@ bool substitutesAllowed(const BasicDerivation & drv)
 }
 
 
-static bool isBuiltin(const BasicDerivation & drv)
-{
-    return string(drv.builder, 0, 8) == "builtin:";
-}
-
-
 void DerivationGoal::tryToBuild()
 {
     trace("trying to build");
@@ -1417,6 +1439,9 @@ void replaceValidPath(const Path & storePath, const Path tmpPath)
 }
 
 
+MakeError(NotDeterministic, BuildError)
+
+
 void DerivationGoal::buildDone()
 {
     trace("build done");
@@ -1516,6 +1541,15 @@ void DerivationGoal::buildDone()
 
         deleteTmpDir(true);
 
+        /* Repeat the build if necessary. */
+        if (curRound++ < nrRounds) {
+            outputLocks.unlock();
+            buildUser.release();
+            state = &DerivationGoal::tryToBuild;
+            worker.wakeUp(shared_from_this());
+            return;
+        }
+
         /* It is now safe to delete the lock files, since all future
            lockers will see that the output paths are valid; they will
            not create new lock files with the same names as the old
@@ -1549,6 +1583,7 @@ void DerivationGoal::buildDone()
                     % drvPath % 1 % e.msg());
 
             st =
+                dynamic_cast<NotDeterministic*>(&e) ? BuildResult::NotDeterministic :
                 statusOk(status) ? BuildResult::OutputRejected :
                 fixedOutput || diskFull ? BuildResult::TransientFailure :
                 BuildResult::PermanentFailure;
@@ -1675,13 +1710,16 @@ int childEntry(void * arg)
 
 void DerivationGoal::startBuilder()
 {
-    startNest(nest, lvlInfo, format(
-            buildMode == bmRepair ? "repairing path(s) %1%" :
-            buildMode == bmCheck ? "checking path(s) %1%" :
-            "building path(s) %1%") % showPaths(missingPaths));
+    auto f = format(
+        buildMode == bmRepair ? "repairing path(s) %1%" :
+        buildMode == bmCheck ? "checking path(s) %1%" :
+        nrRounds > 1 ? "building path(s) %1% (round %2%/%3%)" :
+        "building path(s) %1%");
+    f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit);
+    startNest(nest, lvlInfo, f % showPaths(missingPaths) % curRound % nrRounds);
 
     /* Right platform? */
-    if (!canBuildLocally(drv->platform)) {
+    if (!canBuildLocally(*drv)) {
         if (settings.printBuildTrace)
             printMsg(lvlError, format("@ unsupported-platform %1% %2%") % drvPath % drv->platform);
         throw Error(
@@ -1690,6 +1728,7 @@ void DerivationGoal::startBuilder()
     }
 
     /* Construct the environment passed to the builder. */
+    env.clear();
 
     /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
        PATH is not set.  We don't want this, so we fill it in with some dummy
@@ -1729,7 +1768,7 @@ void DerivationGoal::startBuilder()
         if (passAsFile.find(i.first) == passAsFile.end()) {
             env[i.first] = i.second;
         } else {
-            Path p = tmpDir + "/.attr-" + int2String(fileNr++);
+            Path p = tmpDir + "/.attr-" + std::to_string(fileNr++);
             writeFile(p, i.second);
             filesToChown.insert(p);
             env[i.first + "Path"] = p;
@@ -1877,6 +1916,8 @@ void DerivationGoal::startBuilder()
                 settings.get("build-extra-sandbox-paths", string(""))));
         dirs.insert(dirs2.begin(), dirs2.end());
 
+        dirsInChroot.clear();
+
         for (auto & i : dirs) {
             size_t p = i.find('=');
             if (p == string::npos)
@@ -1894,6 +1935,9 @@ void DerivationGoal::startBuilder()
         for (auto & i : closure)
             dirsInChroot[i] = i;
 
+#if SANDBOX_ENABLED
+        additionalSandboxProfile = get(drv->env, "__sandboxProfile");
+#endif
         string allowed = settings.get("allowed-impure-host-deps", string(DEFAULT_ALLOWED_IMPURE_PREFIXES));
         PathSet allowedPaths = tokenizeString<StringSet>(allowed);
 
@@ -2063,7 +2107,7 @@ void DerivationGoal::startBuilder()
         auto lastPos = std::string::size_type{0};
         for (auto nlPos = lines.find('\n'); nlPos != string::npos;
                 nlPos = lines.find('\n', lastPos)) {
-            auto line = std::string{lines, lastPos, nlPos};
+            auto line = std::string{lines, lastPos, nlPos - lastPos};
             lastPos = nlPos + 1;
             if (state == stBegin) {
                 if (line == "extra-sandbox-paths" || line == "extra-chroot-dirs") {
@@ -2134,16 +2178,19 @@ void DerivationGoal::startBuilder()
         ProcessOptions options;
         options.allowVfork = false;
         Pid helper = startProcess([&]() {
-            char stack[32 * 1024];
+            size_t stackSize = 1 * 1024 * 1024;
+            char * stack = (char *) mmap(0, stackSize,
+                PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+            if (!stack) throw SysError("allocating stack");
             int flags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
             if (!fixedOutput) flags |= CLONE_NEWNET;
-            pid_t child = clone(childEntry, stack + sizeof(stack) - 8, flags, this);
+            pid_t child = clone(childEntry, stack + stackSize, flags, this);
             if (child == -1 && errno == EINVAL)
                 /* Fallback for Linux < 2.13 where CLONE_NEWPID and
                    CLONE_PARENT are not allowed together. */
-                child = clone(childEntry, stack + sizeof(stack) - 8, flags & ~CLONE_NEWPID, this);
+                child = clone(childEntry, stack + stackSize, flags & ~CLONE_NEWPID, this);
             if (child == -1) throw SysError("cloning builder process");
-            writeFull(builderOut.writeSide, int2String(child) + "\n");
+            writeFull(builderOut.writeSide, std::to_string(child) + "\n");
             _exit(0);
         }, options);
         if (helper.wait(true) != 0)
@@ -2411,9 +2458,10 @@ void DerivationGoal::runChild()
         const char *builder = "invalid";
 
         string sandboxProfile;
-        if (isBuiltin(*drv))
+        if (isBuiltin(*drv)) {
             ;
-        else if (useChroot && SANDBOX_ENABLED) {
+#if SANDBOX_ENABLED
+        } else if (useChroot) {
             /* Lots and lots and lots of file functions freak out if they can't stat their full ancestry */
             PathSet ancestry;
 
@@ -2440,7 +2488,7 @@ void DerivationGoal::runChild()
             for (auto & i : inputPaths)
                 dirsInChroot[i] = i;
 
-            /* TODO: we should factor out the policy cleanly, so we don't have to repeat the constants every time... */
+            /* This has to appear before import statements */
             sandboxProfile += "(version 1)\n";
 
             /* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */
@@ -2450,15 +2498,6 @@ void DerivationGoal::runChild()
                 sandboxProfile += "(deny default (with no-log))\n";
             }
 
-            sandboxProfile += "(allow file-read* file-write-data (literal \"/dev/null\"))\n";
-
-            sandboxProfile += "(allow file-read-metadata\n"
-                "\t(literal \"/var\")\n"
-                "\t(literal \"/tmp\")\n"
-                "\t(literal \"/etc\")\n"
-                "\t(literal \"/etc/nix\")\n"
-                "\t(literal \"/etc/nix/nix.conf\"))\n";
-
             /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
                to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
             Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true);
@@ -2466,20 +2505,6 @@ void DerivationGoal::runChild()
             /* They don't like trailing slashes on subpath directives */
             if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
 
-            /* This is where our temp folders are and where most of the building will happen, so we want rwx on it. */
-            sandboxProfile += (format("(allow file-read* file-write* process-exec (subpath \"%1%\") (subpath \"/private/tmp\"))\n") % globalTmpDir).str();
-
-            sandboxProfile += "(allow process-fork)\n";
-            sandboxProfile += "(allow sysctl-read)\n";
-            sandboxProfile += "(allow signal (target same-sandbox))\n";
-
-            /* Enables getpwuid (used by git and others) */
-            sandboxProfile += "(allow mach-lookup (global-name \"com.apple.system.notification_center\") (global-name \"com.apple.system.opendirectoryd.libinfo\"))\n";
-
-            /* Allow local networking operations, mostly because lots of test suites use it and it seems mostly harmless */
-            sandboxProfile += "(allow network* (local ip) (remote unix-socket))";
-
-
             /* Our rwx outputs */
             sandboxProfile += "(allow file-read* file-write* process-exec\n";
             for (auto & i : missingPaths) {
@@ -2488,11 +2513,9 @@ void DerivationGoal::runChild()
             sandboxProfile += ")\n";
 
             /* Our inputs (transitive dependencies and any impurities computed above)
-               Note that the sandbox profile allows file-write* even though it isn't seemingly necessary. First of all, nix's standard user permissioning
-               mechanism still prevents builders from writing to input directories, so no security/purity is lost. The reason we allow file-write* is that
-               denying it means the `access` syscall will return EPERM instead of EACCESS, which confuses a few programs that assume (understandably, since
-               it appears to be a violation of the POSIX spec) that `access` won't do that, and don't deal with it nicely if it does. The most notable of
-               these is the entire GHC Haskell ecosystem. */
+
+               without file-write* allowed, access() incorrectly returns EPERM
+             */
             sandboxProfile += "(allow file-read* file-write* process-exec\n";
             for (auto & i : dirsInChroot) {
                 if (i.first != i.second)
@@ -2509,22 +2532,32 @@ void DerivationGoal::runChild()
             }
             sandboxProfile += ")\n";
 
-            /* Our ancestry. N.B: this uses literal on folders, instead of subpath. Without that,
-               you open up the entire filesystem because you end up with (subpath "/") */
-            sandboxProfile += "(allow file-read-metadata\n";
+            /* Allow file-read* on full directory hierarchy to self. Allows realpath() */
+            sandboxProfile += "(allow file-read*\n";
             for (auto & i : ancestry) {
                 sandboxProfile += (format("\t(literal \"%1%\")\n") % i.c_str()).str();
             }
             sandboxProfile += ")\n";
 
+            sandboxProfile += additionalSandboxProfile;
+
             debug("Generated sandbox profile:");
             debug(sandboxProfile);
 
+            Path sandboxFile = drvPath + ".sb";
+            if (pathExists(sandboxFile)) deletePath(sandboxFile);
+            autoDelSandbox.reset(sandboxFile, false);
+
+            writeFile(sandboxFile, sandboxProfile);
+
             builder = "/usr/bin/sandbox-exec";
             args.push_back("sandbox-exec");
-            args.push_back("-p");
-            args.push_back(sandboxProfile);
+            args.push_back("-f");
+            args.push_back(sandboxFile);
+            args.push_back("-D");
+            args.push_back("_GLOBAL_TMP_DIR=" + globalTmpDir);
             args.push_back(drv->builder);
+#endif
         } else {
             builder = drv->builder.c_str();
             string builderBasename = baseNameOf(drv->builder);
@@ -2598,6 +2631,11 @@ void DerivationGoal::registerOutputs()
 
     ValidPathInfos infos;
 
+    /* Set of inodes seen during calls to canonicalisePathMetaData()
+       for this build's outputs.  This needs to be shared between
+       outputs to allow hard links between outputs. */
+    InodesSeen inodesSeen;
+
     /* Check whether the output paths were created, and grep each
        output path to determine what other paths it references.  Also make all
        output paths read-only. */
@@ -2690,7 +2728,7 @@ void DerivationGoal::registerOutputs()
             Hash h2 = recursive ? hashPath(ht, actualPath).first : hashFile(ht, actualPath);
             if (h != h2)
                 throw BuildError(
-                    format("output path ‘%1%’ should have %2% hash ‘%3%’, instead has ‘%4%’")
+                    format("Nix expects output path ‘%1%’ to have %2% hash ‘%3%’, instead it has ‘%4%’")
                     % path % i.second.hashAlgo % printHash16or32(h) % printHash16or32(h2));
         }
 
@@ -2769,6 +2807,16 @@ void DerivationGoal::registerOutputs()
 
     if (buildMode == bmCheck) return;
 
+    if (curRound > 1 && prevInfos != infos)
+        throw NotDeterministic(
+            format("result of ‘%1%’ differs from previous round; rejecting as non-deterministic")
+            % drvPath);
+
+    if (curRound < nrRounds) {
+        prevInfos = infos;
+        return;
+    }
+
     /* Register each output path as valid, and register the sets of
        paths referenced by each of them.  If there are cycles in the
        outputs, this will fail. */
@@ -2861,7 +2909,8 @@ void DerivationGoal::handleChildOutput(int fd, const string & data)
             printMsg(lvlError,
                 format("%1% killed after writing more than %2% bytes of log output")
                 % getName() % settings.maxLogSize);
-            timedOut(); // not really a timeout, but close enough
+            killChild();
+            done(BuildResult::LogLimitExceeded);
             return;
         }
         if (verbosity >= settings.buildVerbosity)
diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc
index 25e2e7df30e7..a1c4b48bf62e 100644
--- a/src/libstore/builtins.cc
+++ b/src/libstore/builtins.cc
@@ -1,5 +1,8 @@
 #include "builtins.hh"
 #include "download.hh"
+#include "store-api.hh"
+#include "archive.hh"
+#include "compression.hh"
 
 namespace nix {
 
@@ -7,17 +10,36 @@ void builtinFetchurl(const BasicDerivation & drv)
 {
     auto url = drv.env.find("url");
     if (url == drv.env.end()) throw Error("attribute ‘url’ missing");
-    printMsg(lvlInfo, format("downloading ‘%1%’...") % url->second);
-    auto data = downloadFile(url->second); // FIXME: show progress
+
+    /* No need to do TLS verification, because we check the hash of
+       the result anyway. */
+    DownloadOptions options;
+    options.verifyTLS = false;
+
+    /* Show a progress indicator, even though stderr is not a tty. */
+    options.forceProgress = true;
+
+    auto data = downloadFile(url->second, options);
 
     auto out = drv.env.find("out");
     if (out == drv.env.end()) throw Error("attribute ‘url’ missing");
-    writeFile(out->second, data.data);
+
+    Path storePath = out->second;
+    assertStorePath(storePath);
+
+    auto unpack = drv.env.find("unpack");
+    if (unpack != drv.env.end() && unpack->second == "1") {
+        if (string(data.data, 0, 6) == string("\xfd" "7zXZ\0", 6))
+            data.data = decompressXZ(data.data);
+        StringSource source(data.data);
+        restorePath(storePath, source);
+    } else
+        writeFile(storePath, data.data);
 
     auto executable = drv.env.find("executable");
     if (executable != drv.env.end() && executable->second == "1") {
-        if (chmod(out->second.c_str(), 0755) == -1)
-            throw SysError(format("making ‘%1%’ executable") % out->second);
+        if (chmod(storePath.c_str(), 0755) == -1)
+            throw SysError(format("making ‘%1%’ executable") % storePath);
     }
 }
 
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 9bf3e13aa9da..822e9a8db867 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -6,8 +6,18 @@
 
 #include <curl/curl.h>
 
+#include <iostream>
+
+
 namespace nix {
 
+double getTime()
+{
+    struct timeval tv;
+    gettimeofday(&tv, 0);
+    return tv.tv_sec + (tv.tv_usec / 1000000.0);
+}
+
 struct Curl
 {
     CURL * curl;
@@ -16,6 +26,10 @@ struct Curl
 
     struct curl_slist * requestHeaders;
 
+    bool showProgress;
+    double prevProgressTime{0}, startTime{0};
+    unsigned int moveBack{1};
+
     static size_t writeCallback(void * contents, size_t size, size_t nmemb, void * userp)
     {
         Curl & c(* (Curl *) userp);
@@ -56,11 +70,30 @@ struct Curl
         return realSize;
     }
 
-    static int progressCallback(void * clientp, double dltotal, double dlnow, double ultotal, double ulnow)
+    int progressCallback(double dltotal, double dlnow)
     {
+        if (showProgress) {
+            double now = getTime();
+            if (prevProgressTime <= now - 1) {
+                string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]")
+                    % (dlnow / 1024.0)
+                    % (dltotal / 1024.0)
+                    % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str();
+                std::cerr << "\e[" << moveBack << "D" << s;
+                moveBack = s.size();
+                std::cerr.flush();
+                prevProgressTime = now;
+            }
+        }
         return _isInterrupted;
     }
 
+    static int progressCallback_(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
+    {
+        Curl & c(* (Curl *) userp);
+        return c.progressCallback(dltotal, dlnow);
+    }
+
     Curl()
     {
         requestHeaders = 0;
@@ -69,7 +102,6 @@ struct Curl
         if (!curl) throw Error("unable to initialize curl");
 
         curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
-        curl_easy_setopt(curl, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str());
         curl_easy_setopt(curl, CURLOPT_USERAGENT, ("Nix/" + nixVersion).c_str());
         curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
 
@@ -79,7 +111,8 @@ struct Curl
         curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, headerCallback);
         curl_easy_setopt(curl, CURLOPT_HEADERDATA, (void *) &curl);
 
-        curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallback);
+        curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progressCallback_);
+        curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, (void *) &curl);
         curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0);
     }
 
@@ -89,10 +122,19 @@ struct Curl
         if (requestHeaders) curl_slist_free_all(requestHeaders);
     }
 
-    bool fetch(const string & url, const string & expectedETag = "")
+    bool fetch(const string & url, const DownloadOptions & options)
     {
+        showProgress = options.forceProgress || isatty(STDERR_FILENO);
+
         curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
 
+        if (options.verifyTLS)
+            curl_easy_setopt(curl, CURLOPT_CAINFO, getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt").c_str());
+        else {
+            curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0);
+            curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0);
+        }
+
         data.clear();
 
         if (requestHeaders) {
@@ -100,16 +142,25 @@ struct Curl
             requestHeaders = 0;
         }
 
-        if (!expectedETag.empty()) {
-            this->expectedETag = expectedETag;
-            requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + expectedETag).c_str());
+        if (!options.expectedETag.empty()) {
+            this->expectedETag = options.expectedETag;
+            requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + options.expectedETag).c_str());
         }
 
         curl_easy_setopt(curl, CURLOPT_HTTPHEADER, requestHeaders);
 
+        if (showProgress) {
+            std::cerr << (format("downloading ‘%1%’... ") % url);
+            std::cerr.flush();
+            startTime = getTime();
+        }
+
         CURLcode res = curl_easy_perform(curl);
+        if (showProgress)
+            //std::cerr << "\e[" << moveBack << "D\e[K\n";
+            std::cerr << "\n";
         checkInterrupt();
-        if (res == CURLE_WRITE_ERROR && etag == expectedETag) return false;
+        if (res == CURLE_WRITE_ERROR && etag == options.expectedETag) return false;
         if (res != CURLE_OK)
             throw DownloadError(format("unable to download ‘%1%’: %2% (%3%)")
                 % url % curl_easy_strerror(res) % res);
@@ -123,11 +174,11 @@ struct Curl
 };
 
 
-DownloadResult downloadFile(string url, string expectedETag)
+DownloadResult downloadFile(string url, const DownloadOptions & options)
 {
     DownloadResult res;
     Curl curl;
-    if (curl.fetch(url, expectedETag)) {
+    if (curl.fetch(url, options)) {
         res.cached = false;
         res.data = curl.data;
     } else
@@ -178,13 +229,10 @@ Path downloadFileCached(const string & url, bool unpack)
 
     if (!skip) {
 
-        if (storePath.empty())
-            printMsg(lvlInfo, format("downloading ‘%1%’...") % url);
-        else
-            printMsg(lvlInfo, format("checking ‘%1%’...") % url);
-
         try {
-            auto res = downloadFile(url, expectedETag);
+            DownloadOptions options;
+            options.expectedETag = expectedETag;
+            auto res = downloadFile(url, options);
 
             if (!res.cached)
                 storePath = store->addTextToStore(name, res.data, PathSet(), false);
@@ -192,7 +240,7 @@ Path downloadFileCached(const string & url, bool unpack)
             assert(!storePath.empty());
             replaceSymlink(storePath, fileLink);
 
-            writeFile(dataFile, url + "\n" + res.etag + "\n" + int2String(time(0)) + "\n");
+            writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
         } catch (DownloadError & e) {
             if (storePath.empty()) throw;
             printMsg(lvlError, format("warning: %1%; using cached result") % e.msg());
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index 28c9117e4227..c1cb25b90c32 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -5,13 +5,20 @@
 
 namespace nix {
 
+struct DownloadOptions
+{
+    string expectedETag;
+    bool verifyTLS{true};
+    bool forceProgress{false};
+};
+
 struct DownloadResult
 {
     bool cached;
     string data, etag;
 };
 
-DownloadResult downloadFile(string url, string expectedETag = "");
+DownloadResult downloadFile(string url, const DownloadOptions & options);
 
 Path downloadFileCached(const string & url, bool unpack);
 
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 73f8489438fc..e704837e8798 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -77,6 +77,11 @@ void Settings::processEnvironment()
     nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
     nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
     nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH);
+
+    // should be set with the other config options, but depends on nixLibexecDir
+#ifdef __APPLE__
+    preBuildHook = nixLibexecDir + "/nix/resolve-system-dependencies.pl";
+#endif
 }
 
 
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index bf5c256c949e..e78f47949ad3 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -8,7 +8,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc)
 
 libstore_LIBS = libutil libformat
 
-libstore_LDFLAGS = -lsqlite3 -lbz2 -lcurl
+libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS)
 
 ifeq ($(OS), SunOS)
 	libstore_LDFLAGS += -lsocket
@@ -33,3 +33,4 @@ $(d)/local-store.cc: $(d)/schema.sql.hh
 clean-files += $(d)/schema.sql.hh
 
 $(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/sandbox-defaults.sb, $(datadir)/nix, 0644))
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 6f66961792fb..23cbe7e26b47 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -120,9 +120,9 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
         return;
     }
 
-    /* This can still happen on top-level files */
+    /* This can still happen on top-level files. */
     if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
-        printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s).") % path % (st.st_nlink - 2));
+        printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
         return;
     }
 
@@ -141,6 +141,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
     /* Check if this is a known hash. */
     Path linkPath = linksDir + "/" + printHash32(hash);
 
+ retry:
     if (!pathExists(linkPath)) {
         /* Nope, create a hard link in the links directory. */
         if (link(path.c_str(), linkPath.c_str()) == 0) {
@@ -164,6 +165,12 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
         return;
     }
 
+    if (st.st_size != stLink.st_size) {
+        printMsg(lvlError, format("removing corrupted link ‘%1%’") % linkPath);
+        unlink(linkPath.c_str());
+        goto retry;
+    }
+
     printMsg(lvlTalkative, format("linking ‘%1%’ to ‘%2%’") % path % linkPath);
 
     /* Make the containing directory writable, but only if it's not
diff --git a/src/libstore/sandbox-defaults.sb.in b/src/libstore/sandbox-defaults.sb.in
new file mode 100644
index 000000000000..b5e80085fbe2
--- /dev/null
+++ b/src/libstore/sandbox-defaults.sb.in
@@ -0,0 +1,63 @@
+(allow file-read* file-write-data (literal "/dev/null"))
+(allow ipc-posix*)
+(allow mach-lookup (global-name "com.apple.SecurityServer"))
+
+(allow file-read*
+       (literal "/dev/dtracehelper")
+       (literal "/dev/tty")
+       (literal "/dev/autofs_nowait")
+       (literal "/System/Library/CoreServices/SystemVersion.plist")
+       (literal "/private/var/run/systemkeychaincheck.done")
+       (literal "/private/etc/protocols")
+       (literal "/private/var/tmp")
+       (literal "/private/var/db")
+       (subpath "/private/var/db/mds"))
+
+(allow file-read*
+       (subpath "/usr/share/icu")
+       (subpath "/usr/share/locale")
+       (subpath "/usr/share/zoneinfo"))
+
+(allow file-write*
+       (literal "/dev/tty")
+       (literal "/dev/dtracehelper")
+       (literal "/mds"))
+
+(allow file-ioctl (literal "/dev/dtracehelper"))
+
+(allow file-read-metadata
+       (literal "/var")
+       (literal "/tmp")
+       ; symlinks
+       (literal "@sysconfdir@")
+       (literal "@sysconfdir@/nix")
+       (literal "@sysconfdir@/nix/nix.conf")
+       (literal "/etc/resolv.conf")
+       (literal "/private/etc/resolv.conf"))
+
+(allow file-read*
+       (literal "/private@sysconfdir@/nix/nix.conf")
+       (literal "/private/var/run/resolv.conf"))
+
+; some builders use filehandles other than stdin/stdout
+(allow file*
+        (subpath "/dev/fd")
+        (literal "/dev/ptmx")
+        (regex #"^/dev/[pt]ty.*$"))
+
+; allow everything inside TMP
+(allow file* process-exec
+       (subpath (param "_GLOBAL_TMP_DIR"))
+       (subpath "/private/tmp"))
+
+(allow process-fork)
+(allow sysctl-read)
+(allow signal (target same-sandbox))
+
+; allow getpwuid (for git and other packages)
+(allow mach-lookup
+       (global-name "com.apple.system.notification_center")
+       (global-name "com.apple.system.opendirectoryd.libinfo"))
+
+; allow local networking
+(allow network* (local ip) (remote unix-socket))
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 235017503213..9cc5fd45b7c4 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -87,10 +87,17 @@ struct ValidPathInfo
     Path deriver;
     Hash hash;
     PathSet references;
-    time_t registrationTime;
-    unsigned long long narSize; // 0 = unknown
+    time_t registrationTime = 0;
+    unsigned long long narSize = 0; // 0 = unknown
     unsigned long long id; // internal use only
-    ValidPathInfo() : registrationTime(0), narSize(0) { }
+
+    bool operator == (const ValidPathInfo & i) const
+    {
+        return
+            path == i.path
+            && hash == i.hash
+            && references == i.references;
+    }
 };
 
 typedef list<ValidPathInfo> ValidPathInfos;
@@ -112,7 +119,9 @@ struct BuildResult
         CachedFailure,
         TimedOut,
         MiscFailure,
-        DependencyFailed
+        DependencyFailed,
+        LogLimitExceeded,
+        NotDeterministic,
     } status = MiscFailure;
     std::string errorMsg;
     //time_t startTime = 0, stopTime = 0;
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 0187f062b21d..6ee7981432b6 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -243,7 +243,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path)
                         if (i != names.end()) {
                             printMsg(lvlDebug, format("case collision between ‘%1%’ and ‘%2%’") % i->first % name);
                             name += caseHackSuffix;
-                            name += int2String(++i->second);
+                            name += std::to_string(++i->second);
                         } else
                             names[name] = 0;
                     }
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
new file mode 100644
index 000000000000..446fcb781564
--- /dev/null
+++ b/src/libutil/compression.cc
@@ -0,0 +1,46 @@
+#include "compression.hh"
+#include "types.hh"
+
+#include <lzma.h>
+
+namespace nix {
+
+std::string decompressXZ(const std::string & in)
+{
+    lzma_stream strm = LZMA_STREAM_INIT;
+
+    lzma_ret ret = lzma_stream_decoder(
+        &strm, UINT64_MAX, LZMA_CONCATENATED);
+    if (ret != LZMA_OK)
+        throw Error("unable to initialise lzma decoder");
+
+    lzma_action action = LZMA_RUN;
+    uint8_t outbuf[BUFSIZ];
+    string res;
+    strm.next_in = (uint8_t *) in.c_str();
+    strm.avail_in = in.size();
+    strm.next_out = outbuf;
+    strm.avail_out = sizeof(outbuf);
+
+    while (true) {
+
+        if (strm.avail_in == 0)
+            action = LZMA_FINISH;
+
+        lzma_ret ret = lzma_code(&strm, action);
+
+        if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
+            res.append((char *) outbuf, sizeof(outbuf) - strm.avail_out);
+            strm.next_out = outbuf;
+            strm.avail_out = sizeof(outbuf);
+        }
+
+        if (ret == LZMA_STREAM_END)
+            return res;
+
+        if (ret != LZMA_OK)
+            throw Error("error while decompressing xz file");
+    }
+}
+
+}
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
new file mode 100644
index 000000000000..962ce5ac7767
--- /dev/null
+++ b/src/libutil/compression.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+#include <string>
+
+namespace nix {
+
+std::string decompressXZ(const std::string & in);
+
+}
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index 1d973e7c8f14..2d97c5e6b6a7 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -3,16 +3,8 @@
 #include <iostream>
 #include <cstring>
 
-#ifdef HAVE_OPENSSL
 #include <openssl/md5.h>
 #include <openssl/sha.h>
-#else
-extern "C" {
-#include "md5.h"
-#include "sha1.h"
-#include "sha256.h"
-}
-#endif
 
 #include "hash.hh"
 #include "archive.hh"
@@ -40,6 +32,7 @@ Hash::Hash(HashType type)
     if (type == htMD5) hashSize = md5HashSize;
     else if (type == htSHA1) hashSize = sha1HashSize;
     else if (type == htSHA256) hashSize = sha256HashSize;
+    else if (type == htSHA512) hashSize = sha512HashSize;
     else throw Error("unknown hash type");
     assert(hashSize <= maxHashSize);
     memset(hash, 0, maxHashSize);
@@ -198,6 +191,7 @@ union Ctx
     MD5_CTX md5;
     SHA_CTX sha1;
     SHA256_CTX sha256;
+    SHA512_CTX sha512;
 };
 
 
@@ -206,6 +200,7 @@ static void start(HashType ht, Ctx & ctx)
     if (ht == htMD5) MD5_Init(&ctx.md5);
     else if (ht == htSHA1) SHA1_Init(&ctx.sha1);
     else if (ht == htSHA256) SHA256_Init(&ctx.sha256);
+    else if (ht == htSHA512) SHA512_Init(&ctx.sha512);
 }
 
 
@@ -215,6 +210,7 @@ static void update(HashType ht, Ctx & ctx,
     if (ht == htMD5) MD5_Update(&ctx.md5, bytes, len);
     else if (ht == htSHA1) SHA1_Update(&ctx.sha1, bytes, len);
     else if (ht == htSHA256) SHA256_Update(&ctx.sha256, bytes, len);
+    else if (ht == htSHA512) SHA512_Update(&ctx.sha512, bytes, len);
 }
 
 
@@ -223,6 +219,7 @@ static void finish(HashType ht, Ctx & ctx, unsigned char * hash)
     if (ht == htMD5) MD5_Final(hash, &ctx.md5);
     else if (ht == htSHA1) SHA1_Final(hash, &ctx.sha1);
     else if (ht == htSHA256) SHA256_Final(hash, &ctx.sha256);
+    else if (ht == htSHA512) SHA512_Final(hash, &ctx.sha512);
 }
 
 
@@ -320,6 +317,7 @@ HashType parseHashType(const string & s)
     if (s == "md5") return htMD5;
     else if (s == "sha1") return htSHA1;
     else if (s == "sha256") return htSHA256;
+    else if (s == "sha512") return htSHA512;
     else return htUnknown;
 }
 
@@ -329,6 +327,7 @@ string printHashType(HashType ht)
     if (ht == htMD5) return "md5";
     else if (ht == htSHA1) return "sha1";
     else if (ht == htSHA256) return "sha256";
+    else if (ht == htSHA512) return "sha512";
     else throw Error("cannot print unknown hash type");
 }
 
diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh
index 2c6f176ec74c..841b4cb2936c 100644
--- a/src/libutil/hash.hh
+++ b/src/libutil/hash.hh
@@ -7,19 +7,20 @@
 namespace nix {
 
 
-typedef enum { htUnknown, htMD5, htSHA1, htSHA256 } HashType;
+typedef enum { htUnknown, htMD5, htSHA1, htSHA256, htSHA512 } HashType;
 
 
 const int md5HashSize = 16;
 const int sha1HashSize = 20;
 const int sha256HashSize = 32;
+const int sha512HashSize = 64;
 
 extern const string base32Chars;
 
 
 struct Hash
 {
-    static const unsigned int maxHashSize = 32;
+    static const unsigned int maxHashSize = 64;
     unsigned int hashSize;
     unsigned char hash[maxHashSize];
 
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
index 8af2e78d9ce4..4dae3305433f 100644
--- a/src/libutil/local.mk
+++ b/src/libutil/local.mk
@@ -6,10 +6,6 @@ libutil_DIR := $(d)
 
 libutil_SOURCES := $(wildcard $(d)/*.cc)
 
-ifeq ($(HAVE_OPENSSL), 1)
-  libutil_LDFLAGS = $(OPENSSL_LIBS)
-else
-  libutil_SOURCES += $(d)/md5.c $(d)/sha1.c $(d)/sha256.c
-endif
+libutil_LDFLAGS = -llzma $(OPENSSL_LIBS)
 
 libutil_LIBS = libformat
diff --git a/src/libutil/md32_common.h b/src/libutil/md32_common.h
deleted file mode 100644
index 0cbcfaf8a20b..000000000000
--- a/src/libutil/md32_common.h
+++ /dev/null
@@ -1,620 +0,0 @@
-/* crypto/md32_common.h */
-/* ====================================================================
- * Copyright (c) 1999-2002 The OpenSSL Project.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- *    software must display the following acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- *    endorse or promote products derived from this software without
- *    prior written permission. For written permission, please contact
- *    licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- *    nor may "OpenSSL" appear in their names without prior written
- *    permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- *    acknowledgment:
- *    "This product includes software developed by the OpenSSL Project
- *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com).  This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-
-/*
- * This is a generic 32 bit "collector" for message digest algorithms.
- * Whenever needed it collects input character stream into chunks of
- * 32 bit values and invokes a block function that performs actual hash
- * calculations.
- *
- * Porting guide.
- *
- * Obligatory macros:
- *
- * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
- *	this macro defines byte order of input stream.
- * HASH_CBLOCK
- *	size of a unit chunk HASH_BLOCK operates on.
- * HASH_LONG
- *	has to be at lest 32 bit wide, if it's wider, then
- *	HASH_LONG_LOG2 *has to* be defined along
- * HASH_CTX
- *	context structure that at least contains following
- *	members:
- *		typedef struct {
- *			...
- *			HASH_LONG	Nl,Nh;
- *			HASH_LONG	data[HASH_LBLOCK];
- *			unsigned int	num;
- *			...
- *			} HASH_CTX;
- * HASH_UPDATE
- *	name of "Update" function, implemented here.
- * HASH_TRANSFORM
- *	name of "Transform" function, implemented here.
- * HASH_FINAL
- *	name of "Final" function, implemented here.
- * HASH_BLOCK_HOST_ORDER
- *	name of "block" function treating *aligned* input message
- *	in host byte order, implemented externally.
- * HASH_BLOCK_DATA_ORDER
- *	name of "block" function treating *unaligned* input message
- *	in original (data) byte order, implemented externally (it
- *	actually is optional if data and host are of the same
- *	"endianess").
- * HASH_MAKE_STRING
- *	macro convering context variables to an ASCII hash string.
- *
- * Optional macros:
- *
- * B_ENDIAN or L_ENDIAN
- *	defines host byte-order.
- * HASH_LONG_LOG2
- *	defaults to 2 if not states otherwise.
- * HASH_LBLOCK
- *	assumed to be HASH_CBLOCK/4 if not stated otherwise.
- * HASH_BLOCK_DATA_ORDER_ALIGNED
- *	alternative "block" function capable of treating
- *	aligned input message in original (data) order,
- *	implemented externally.
- *
- * MD5 example:
- *
- *	#define DATA_ORDER_IS_LITTLE_ENDIAN
- *
- *	#define HASH_LONG		MD5_LONG
- *	#define HASH_LONG_LOG2		MD5_LONG_LOG2
- *	#define HASH_CTX		MD5_CTX
- *	#define HASH_CBLOCK		MD5_CBLOCK
- *	#define HASH_LBLOCK		MD5_LBLOCK
- *	#define HASH_UPDATE		MD5_Update
- *	#define HASH_TRANSFORM		MD5_Transform
- *	#define HASH_FINAL		MD5_Final
- *	#define HASH_BLOCK_HOST_ORDER	md5_block_host_order
- *	#define HASH_BLOCK_DATA_ORDER	md5_block_data_order
- *
- *					<appro@fy.chalmers.se>
- */
-
-#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
-#error "DATA_ORDER must be defined!"
-#endif
-
-#ifndef HASH_CBLOCK
-#error "HASH_CBLOCK must be defined!"
-#endif
-#ifndef HASH_LONG
-#error "HASH_LONG must be defined!"
-#endif
-#ifndef HASH_CTX
-#error "HASH_CTX must be defined!"
-#endif
-
-#ifndef HASH_UPDATE
-#error "HASH_UPDATE must be defined!"
-#endif
-#ifndef HASH_TRANSFORM
-#error "HASH_TRANSFORM must be defined!"
-#endif
-#ifndef HASH_FINAL
-#error "HASH_FINAL must be defined!"
-#endif
-
-#ifndef HASH_BLOCK_HOST_ORDER
-#error "HASH_BLOCK_HOST_ORDER must be defined!"
-#endif
-
-#if 0
-/*
- * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
- * isn't defined.
- */
-#ifndef HASH_BLOCK_DATA_ORDER
-#error "HASH_BLOCK_DATA_ORDER must be defined!"
-#endif
-#endif
-
-#ifndef HASH_LBLOCK
-#define HASH_LBLOCK	(HASH_CBLOCK/4)
-#endif
-
-#ifndef HASH_LONG_LOG2
-#define HASH_LONG_LOG2	2
-#endif
-
-/*
- * Engage compiler specific rotate intrinsic function if available.
- */
-#undef ROTATE
-#ifndef PEDANTIC
-# if defined(_MSC_VER) || defined(__ICC)
-#  define ROTATE(a,n)	_lrotl(a,n)
-# elif defined(__MWERKS__)
-#  if defined(__POWERPC__)
-#   define ROTATE(a,n)	__rlwinm(a,n,0,31)
-#  elif defined(__MC68K__)
-    /* Motorola specific tweak. <appro@fy.chalmers.se> */
-#   define ROTATE(a,n)	( n<24 ? __rol(a,n) : __ror(a,32-n) )
-#  else
-#   define ROTATE(a,n)	__rol(a,n)
-#  endif
-# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-  /*
-   * Some GNU C inline assembler templates. Note that these are
-   * rotates by *constant* number of bits! But that's exactly
-   * what we need here...
-   * 					<appro@fy.chalmers.se>
-   */
-#  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
-#   define ROTATE(a,n)	({ register unsigned int ret;	\
-				asm (			\
-				"roll %1,%0"		\
-				: "=r"(ret)		\
-				: "I"(n), "0"(a)	\
-				: "cc");		\
-			   ret;				\
-			})
-#  elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
-#   define ROTATE(a,n)	({ register unsigned int ret;	\
-				asm (			\
-				"rlwinm %0,%1,%2,0,31"	\
-				: "=r"(ret)		\
-				: "r"(a), "I"(n));	\
-			   ret;				\
-			})
-#  endif
-# endif
-#endif /* PEDANTIC */
-
-#if HASH_LONG_LOG2==2	/* Engage only if sizeof(HASH_LONG)== 4 */
-/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
-#ifdef ROTATE
-/* 5 instructions with rotate instruction, else 9 */
-#define REVERSE_FETCH32(a,l)	(					\
-		l=*(const HASH_LONG *)(a),				\
-		((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24)))	\
-				)
-#else
-/* 6 instructions with rotate instruction, else 8 */
-#define REVERSE_FETCH32(a,l)	(				\
-		l=*(const HASH_LONG *)(a),			\
-		l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)),	\
-		ROTATE(l,16)					\
-				)
-/*
- * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
- * It's rewritten as above for two reasons:
- *	- RISCs aren't good at long constants and have to explicitely
- *	  compose 'em with several (well, usually 2) instructions in a
- *	  register before performing the actual operation and (as you
- *	  already realized:-) having same constant should inspire the
- *	  compiler to permanently allocate the only register for it;
- *	- most modern CPUs have two ALUs, but usually only one has
- *	  circuitry for shifts:-( this minor tweak inspires compiler
- *	  to schedule shift instructions in a better way...
- *
- *				<appro@fy.chalmers.se>
- */
-#endif
-#endif
-
-#ifndef ROTATE
-#define ROTATE(a,n)     (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
-#endif
-
-/*
- * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
- * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
- * and host are of the same "endianess". It's possible to mask
- * this with blank #define HASH_BLOCK_DATA_ORDER though...
- *
- *				<appro@fy.chalmers.se>
- */
-#if defined(B_ENDIAN)
-#  if defined(DATA_ORDER_IS_BIG_ENDIAN)
-#    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
-#      define HASH_BLOCK_DATA_ORDER_ALIGNED	HASH_BLOCK_HOST_ORDER
-#    endif
-#  endif
-#elif defined(L_ENDIAN)
-#  if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
-#    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
-#      define HASH_BLOCK_DATA_ORDER_ALIGNED	HASH_BLOCK_HOST_ORDER
-#    endif
-#  endif
-#endif
-
-#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
-#ifndef HASH_BLOCK_DATA_ORDER
-#error "HASH_BLOCK_DATA_ORDER must be defined!"
-#endif
-#endif
-
-#if defined(DATA_ORDER_IS_BIG_ENDIAN)
-
-#ifndef PEDANTIC
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-#  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
-    /*
-     * This gives ~30-40% performance improvement in SHA-256 compiled
-     * with gcc [on P4]. Well, first macro to be frank. We can pull
-     * this trick on x86* platforms only, because these CPUs can fetch
-     * unaligned data without raising an exception.
-     */
-#   define HOST_c2l(c,l)	({ unsigned int r=*((const unsigned int *)(c));	\
-				   asm ("bswapl %0":"=r"(r):"0"(r));	\
-				   (c)+=4; (l)=r;			})
-#   define HOST_l2c(l,c)	({ unsigned int r=(l);			\
-				   asm ("bswapl %0":"=r"(r):"0"(r));	\
-				   *((unsigned int *)(c))=r; (c)+=4; r;	})
-#  endif
-# endif
-#endif
-
-#ifndef HOST_c2l
-#define HOST_c2l(c,l)	(l =(((unsigned long)(*((c)++)))<<24),		\
-			 l|=(((unsigned long)(*((c)++)))<<16),		\
-			 l|=(((unsigned long)(*((c)++)))<< 8),		\
-			 l|=(((unsigned long)(*((c)++)))    ),		\
-			 l)
-#endif
-#define HOST_p_c2l(c,l,n)	{					\
-			switch (n) {					\
-			case 0: l =((unsigned long)(*((c)++)))<<24;	\
-			case 1: l|=((unsigned long)(*((c)++)))<<16;	\
-			case 2: l|=((unsigned long)(*((c)++)))<< 8;	\
-			case 3: l|=((unsigned long)(*((c)++)));		\
-				} }
-#define HOST_p_c2l_p(c,l,sc,len) {					\
-			switch (sc) {					\
-			case 0: l =((unsigned long)(*((c)++)))<<24;	\
-				if (--len == 0) break;			\
-			case 1: l|=((unsigned long)(*((c)++)))<<16;	\
-				if (--len == 0) break;			\
-			case 2: l|=((unsigned long)(*((c)++)))<< 8;	\
-				} }
-/* NOTE the pointer is not incremented at the end of this */
-#define HOST_c2l_p(c,l,n)	{					\
-			l=0; (c)+=n;					\
-			switch (n) {					\
-			case 3: l =((unsigned long)(*(--(c))))<< 8;	\
-			case 2: l|=((unsigned long)(*(--(c))))<<16;	\
-			case 1: l|=((unsigned long)(*(--(c))))<<24;	\
-				} }
-#ifndef HOST_l2c
-#define HOST_l2c(l,c)	(*((c)++)=(unsigned char)(((l)>>24)&0xff),	\
-			 *((c)++)=(unsigned char)(((l)>>16)&0xff),	\
-			 *((c)++)=(unsigned char)(((l)>> 8)&0xff),	\
-			 *((c)++)=(unsigned char)(((l)    )&0xff),	\
-			 l)
-#endif
-
-#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
-
-#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
-  /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
-# define HOST_c2l(c,l)	((l)=*((const unsigned int *)(c)), (c)+=4, l)
-# define HOST_l2c(l,c)	(*((unsigned int *)(c))=(l), (c)+=4, l)
-#endif
-
-#ifndef HOST_c2l
-#define HOST_c2l(c,l)	(l =(((unsigned long)(*((c)++)))    ),		\
-			 l|=(((unsigned long)(*((c)++)))<< 8),		\
-			 l|=(((unsigned long)(*((c)++)))<<16),		\
-			 l|=(((unsigned long)(*((c)++)))<<24),		\
-			 l)
-#endif
-#define HOST_p_c2l(c,l,n)	{					\
-			switch (n) {					\
-			case 0: l =((unsigned long)(*((c)++)));		\
-			case 1: l|=((unsigned long)(*((c)++)))<< 8;	\
-			case 2: l|=((unsigned long)(*((c)++)))<<16;	\
-			case 3: l|=((unsigned long)(*((c)++)))<<24;	\
-				} }
-#define HOST_p_c2l_p(c,l,sc,len) {					\
-			switch (sc) {					\
-			case 0: l =((unsigned long)(*((c)++)));		\
-				if (--len == 0) break;			\
-			case 1: l|=((unsigned long)(*((c)++)))<< 8;	\
-				if (--len == 0) break;			\
-			case 2: l|=((unsigned long)(*((c)++)))<<16;	\
-				} }
-/* NOTE the pointer is not incremented at the end of this */
-#define HOST_c2l_p(c,l,n)	{					\
-			l=0; (c)+=n;					\
-			switch (n) {					\
-			case 3: l =((unsigned long)(*(--(c))))<<16;	\
-			case 2: l|=((unsigned long)(*(--(c))))<< 8;	\
-			case 1: l|=((unsigned long)(*(--(c))));		\
-				} }
-#ifndef HOST_l2c
-#define HOST_l2c(l,c)	(*((c)++)=(unsigned char)(((l)    )&0xff),	\
-			 *((c)++)=(unsigned char)(((l)>> 8)&0xff),	\
-			 *((c)++)=(unsigned char)(((l)>>16)&0xff),	\
-			 *((c)++)=(unsigned char)(((l)>>24)&0xff),	\
-			 l)
-#endif
-
-#endif
-
-/*
- * Time for some action:-)
- */
-
-int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
-	{
-	const unsigned char *data=data_;
-	register HASH_LONG * p;
-	register HASH_LONG l;
-	size_t sw,sc,ew,ec;
-
-	if (len==0) return 1;
-
-	l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
-	/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
-	 * Wei Dai <weidai@eskimo.com> for pointing it out. */
-	if (l < c->Nl) /* overflow */
-		c->Nh++;
-	c->Nh+=(len>>29);	/* might cause compiler warning on 16-bit */
-	c->Nl=l;
-
-	if (c->num != 0)
-		{
-		p=c->data;
-		sw=c->num>>2;
-		sc=c->num&0x03;
-
-		if ((c->num+len) >= HASH_CBLOCK)
-			{
-			l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
-			for (; sw<HASH_LBLOCK; sw++)
-				{
-				HOST_c2l(data,l); p[sw]=l;
-				}
-			HASH_BLOCK_HOST_ORDER (c,p,1);
-			len-=(HASH_CBLOCK-c->num);
-			c->num=0;
-			/* drop through and do the rest */
-			}
-		else
-			{
-			c->num+=(unsigned int)len;
-			if ((sc+len) < 4) /* ugly, add char's to a word */
-				{
-				l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
-				}
-			else
-				{
-				ew=(c->num>>2);
-				ec=(c->num&0x03);
-				if (sc)
-					l=p[sw];
-				HOST_p_c2l(data,l,sc);
-				p[sw++]=l;
-				for (; sw < ew; sw++)
-					{
-					HOST_c2l(data,l); p[sw]=l;
-					}
-				if (ec)
-					{
-					HOST_c2l_p(data,l,ec); p[sw]=l;
-					}
-				}
-			return 1;
-			}
-		}
-
-	sw=len/HASH_CBLOCK;
-	if (sw > 0)
-		{
-#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
-		/*
-		 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
-		 * only if sizeof(HASH_LONG)==4.
-		 */
-		if ((((size_t)data)%4) == 0)
-			{
-			/* data is properly aligned so that we can cast it: */
-			HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
-			sw*=HASH_CBLOCK;
-			data+=sw;
-			len-=sw;
-			}
-		else
-#if !defined(HASH_BLOCK_DATA_ORDER)
-			while (sw--)
-				{
-				memcpy (p=c->data,data,HASH_CBLOCK);
-				HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
-				data+=HASH_CBLOCK;
-				len-=HASH_CBLOCK;
-				}
-#endif
-#endif
-#if defined(HASH_BLOCK_DATA_ORDER)
-			{
-			HASH_BLOCK_DATA_ORDER(c,data,sw);
-			sw*=HASH_CBLOCK;
-			data+=sw;
-			len-=sw;
-			}
-#endif
-		}
-
-	if (len!=0)
-		{
-		p = c->data;
-		c->num = len;
-		ew=len>>2;	/* words to copy */
-		ec=len&0x03;
-		for (; ew; ew--,p++)
-			{
-			HOST_c2l(data,l); *p=l;
-			}
-		HOST_c2l_p(data,l,ec);
-		*p=l;
-		}
-	return 1;
-	}
-
-
-void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
-	{
-#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
-	if ((((size_t)data)%4) == 0)
-		/* data is properly aligned so that we can cast it: */
-		HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
-	else
-#if !defined(HASH_BLOCK_DATA_ORDER)
-		{
-		memcpy (c->data,data,HASH_CBLOCK);
-		HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
-		}
-#endif
-#endif
-#if defined(HASH_BLOCK_DATA_ORDER)
-	HASH_BLOCK_DATA_ORDER (c,data,1);
-#endif
-	}
-
-
-int HASH_FINAL (unsigned char *md, HASH_CTX *c)
-	{
-	register HASH_LONG *p;
-	register unsigned long l;
-	register int i,j;
-	static const unsigned char end[4]={0x80,0x00,0x00,0x00};
-	const unsigned char *cp=end;
-
-	/* c->num should definitly have room for at least one more byte. */
-	p=c->data;
-	i=c->num>>2;
-	j=c->num&0x03;
-
-#if 0
-	/* purify often complains about the following line as an
-	 * Uninitialized Memory Read.  While this can be true, the
-	 * following p_c2l macro will reset l when that case is true.
-	 * This is because j&0x03 contains the number of 'valid' bytes
-	 * already in p[i].  If and only if j&0x03 == 0, the UMR will
-	 * occur but this is also the only time p_c2l will do
-	 * l= *(cp++) instead of l|= *(cp++)
-	 * Many thanks to Alex Tang <altitude@cic.net> for pickup this
-	 * 'potential bug' */
-#ifdef PURIFY
-	if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
-#endif
-	l=p[i];
-#else
-	l = (j==0) ? 0 : p[i];
-#endif
-	HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
-
-	if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
-		{
-		if (i<HASH_LBLOCK) p[i]=0;
-		HASH_BLOCK_HOST_ORDER (c,p,1);
-		i=0;
-		}
-	for (; i<(HASH_LBLOCK-2); i++)
-		p[i]=0;
-
-#if   defined(DATA_ORDER_IS_BIG_ENDIAN)
-	p[HASH_LBLOCK-2]=c->Nh;
-	p[HASH_LBLOCK-1]=c->Nl;
-#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
-	p[HASH_LBLOCK-2]=c->Nl;
-	p[HASH_LBLOCK-1]=c->Nh;
-#endif
-	HASH_BLOCK_HOST_ORDER (c,p,1);
-
-#ifndef HASH_MAKE_STRING
-#error "HASH_MAKE_STRING must be defined!"
-#else
-	HASH_MAKE_STRING(c,md);
-#endif
-
-	c->num=0;
-	/* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
-	 * but I'm not worried :-)
-	OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
-	 */
-	return 1;
-	}
-
-#ifndef MD32_REG_T
-#define MD32_REG_T long
-/*
- * This comment was originaly written for MD5, which is why it
- * discusses A-D. But it basically applies to all 32-bit digests,
- * which is why it was moved to common header file.
- *
- * In case you wonder why A-D are declared as long and not
- * as MD5_LONG. Doing so results in slight performance
- * boost on LP64 architectures. The catch is we don't
- * really care if 32 MSBs of a 64-bit register get polluted
- * with eventual overflows as we *save* only 32 LSBs in
- * *either* case. Now declaring 'em long excuses the compiler
- * from keeping 32 MSBs zeroed resulting in 13% performance
- * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
- * Well, to be honest it should say that this *prevents* 
- * performance degradation.
- *				<appro@fy.chalmers.se>
- * Apparently there're LP64 compilers that generate better
- * code if A-D are declared int. Most notably GCC-x86_64
- * generates better code.
- *				<appro@fy.chalmers.se>
- */
-#endif
diff --git a/src/libutil/md5.c b/src/libutil/md5.c
deleted file mode 100644
index b31640cdcced..000000000000
--- a/src/libutil/md5.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/* Functions to compute MD5 message digest of files or memory blocks.
-   according to the definition of MD5 in RFC 1321 from April 1992.
-   Copyright (C) 1995,1996,1997,1999,2000,2001 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
-
-/* Written by Ulrich Drepper <drepper@gnu.ai.mit.edu>, 1995.  */
-
-#include <sys/types.h>
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "md5.h"
-
-
-static md5_uint32 SWAP(md5_uint32 n)
-{
-  static int checked = 0;
-  static int bigendian = 0;
-  static md5_uint32 test;
-
-  if (!checked) {
-    test = 1;
-    if (* (char *) &test == 0)
-      bigendian = 1;
-    checked = 1;
-  }
-
-  if (bigendian)
-    return (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24));
-  else
-    return n;
-}
-
-
-/* This array contains the bytes used to pad the buffer to the next
-   64-byte boundary.  (RFC 1321, 3.1: Step 1)  */
-static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ...  */ };
-
-
-/* Initialize structure containing state of computation.
-   (RFC 1321, 3.3: Step 3)  */
-void
-MD5_Init (ctx)
-     struct MD5_CTX *ctx;
-{
-  ctx->A = 0x67452301;
-  ctx->B = 0xefcdab89;
-  ctx->C = 0x98badcfe;
-  ctx->D = 0x10325476;
-
-  ctx->total[0] = ctx->total[1] = 0;
-  ctx->buflen = 0;
-}
-
-/* Put result from CTX in first 16 bytes following RESBUF.  The result
-   must be in little endian byte order.
-
-   IMPORTANT: On some systems it is required that RESBUF is correctly
-   aligned for a 32 bits value.  */
-void *
-md5_read_ctx (ctx, resbuf)
-     const struct MD5_CTX *ctx;
-     void *resbuf;
-{
-  ((md5_uint32 *) resbuf)[0] = SWAP (ctx->A);
-  ((md5_uint32 *) resbuf)[1] = SWAP (ctx->B);
-  ((md5_uint32 *) resbuf)[2] = SWAP (ctx->C);
-  ((md5_uint32 *) resbuf)[3] = SWAP (ctx->D);
-
-  return resbuf;
-}
-
-/* Process the remaining bytes in the internal buffer and the usual
-   prolog according to the standard and write the result to RESBUF.
-
-   IMPORTANT: On some systems it is required that RESBUF is correctly
-   aligned for a 32 bits value.  */
-void *
-MD5_Final (resbuf, ctx)
-     void *resbuf;
-     struct MD5_CTX *ctx;
-{
-  /* Take yet unprocessed bytes into account.  */
-  md5_uint32 bytes = ctx->buflen;
-  size_t pad;
-
-  /* Now count remaining bytes.  */
-  ctx->total[0] += bytes;
-  if (ctx->total[0] < bytes)
-    ++ctx->total[1];
-
-  pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
-  memcpy (&ctx->buffer[bytes], fillbuf, pad);
-
-  /* Put the 64-bit file length in *bits* at the end of the buffer.  */
-  *(md5_uint32 *) &ctx->buffer[bytes + pad] = SWAP (ctx->total[0] << 3);
-  *(md5_uint32 *) &ctx->buffer[bytes + pad + 4] = SWAP ((ctx->total[1] << 3) |
-							(ctx->total[0] >> 29));
-
-  /* Process last bytes.  */
-  md5_process_block (ctx->buffer, bytes + pad + 8, ctx);
-
-  return md5_read_ctx (ctx, resbuf);
-}
-
-void
-MD5_Update (ctx, buffer, len)
-     struct MD5_CTX *ctx;
-     const void *buffer;
-     size_t len;
-{
-  /* When we already have some bits in our internal buffer concatenate
-     both inputs first.  */
-  if (ctx->buflen != 0)
-    {
-      size_t left_over = ctx->buflen;
-      size_t add = 128 - left_over > len ? len : 128 - left_over;
-
-      memcpy (&ctx->buffer[left_over], buffer, add);
-      ctx->buflen += add;
-
-      if (ctx->buflen > 64)
-	{
-	  md5_process_block (ctx->buffer, ctx->buflen & ~63, ctx);
-
-	  ctx->buflen &= 63;
-	  /* The regions in the following copy operation cannot overlap.  */
-	  memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
-		  ctx->buflen);
-	}
-
-      buffer = (const char *) buffer + add;
-      len -= add;
-    }
-
-  /* Process available complete blocks.  */
-  if (len >= 64)
-    {
-#if !_STRING_ARCH_unaligned
-/* To check alignment gcc has an appropriate operator.  Other
-   compilers don't.  */
-# if __GNUC__ >= 2
-#  define UNALIGNED_P(p) (((md5_uintptr) p) % __alignof__ (md5_uint32) != 0)
-# else
-#  define UNALIGNED_P(p) (((md5_uintptr) p) % sizeof (md5_uint32) != 0)
-# endif
-      if (UNALIGNED_P (buffer))
-	while (len > 64)
-	  {
-	    md5_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
-	    buffer = (const char *) buffer + 64;
-	    len -= 64;
-	  }
-      else
-#endif
-	{
-	  md5_process_block (buffer, len & ~63, ctx);
-	  buffer = (const char *) buffer + (len & ~63);
-	  len &= 63;
-	}
-    }
-
-  /* Move remaining bytes in internal buffer.  */
-  if (len > 0)
-    {
-      size_t left_over = ctx->buflen;
-
-      memcpy (&ctx->buffer[left_over], buffer, len);
-      left_over += len;
-      if (left_over >= 64)
-	{
-	  md5_process_block (ctx->buffer, 64, ctx);
-	  left_over -= 64;
-	  memcpy (ctx->buffer, &ctx->buffer[64], left_over);
-	}
-      ctx->buflen = left_over;
-    }
-}
-
-
-/* These are the four functions used in the four steps of the MD5 algorithm
-   and defined in the RFC 1321.  The first function is a little bit optimized
-   (as found in Colin Plumbs public domain implementation).  */
-/* #define FF(b, c, d) ((b & c) | (~b & d)) */
-#define FF(b, c, d) (d ^ (b & (c ^ d)))
-#define FG(b, c, d) FF (d, b, c)
-#define FH(b, c, d) (b ^ c ^ d)
-#define FI(b, c, d) (c ^ (b | ~d))
-
-/* Process LEN bytes of BUFFER, accumulating context into CTX.
-   It is assumed that LEN % 64 == 0.  */
-
-void
-md5_process_block (buffer, len, ctx)
-     const void *buffer;
-     size_t len;
-     struct MD5_CTX *ctx;
-{
-  md5_uint32 correct_words[16];
-  const md5_uint32 *words = buffer;
-  size_t nwords = len / sizeof (md5_uint32);
-  const md5_uint32 *endp = words + nwords;
-  md5_uint32 A = ctx->A;
-  md5_uint32 B = ctx->B;
-  md5_uint32 C = ctx->C;
-  md5_uint32 D = ctx->D;
-
-  /* First increment the byte count.  RFC 1321 specifies the possible
-     length of the file up to 2^64 bits.  Here we only compute the
-     number of bytes.  Do a double word increment.  */
-  ctx->total[0] += len;
-  if (ctx->total[0] < len)
-    ++ctx->total[1];
-
-  /* Process all bytes in the buffer with 64 bytes in each round of
-     the loop.  */
-  while (words < endp)
-    {
-      md5_uint32 *cwp = correct_words;
-      md5_uint32 A_save = A;
-      md5_uint32 B_save = B;
-      md5_uint32 C_save = C;
-      md5_uint32 D_save = D;
-
-      /* First round: using the given function, the context and a constant
-	 the next context is computed.  Because the algorithms processing
-	 unit is a 32-bit word and it is determined to work on words in
-	 little endian byte order we perhaps have to change the byte order
-	 before the computation.  To reduce the work for the next steps
-	 we store the swapped words in the array CORRECT_WORDS.  */
-
-#define OP(a, b, c, d, s, T)						\
-      do								\
-        {								\
-	  a += FF (b, c, d) + (*cwp++ = SWAP (*words)) + T;		\
-	  ++words;							\
-	  CYCLIC (a, s);						\
-	  a += b;							\
-        }								\
-      while (0)
-
-      /* It is unfortunate that C does not provide an operator for
-	 cyclic rotation.  Hope the C compiler is smart enough.  */
-#define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s)))
-
-      /* Before we start, one word to the strange constants.
-	 They are defined in RFC 1321 as
-
-	 T[i] = (int) (4294967296.0 * fabs (sin (i))), i=1..64
-       */
-
-      /* Round 1.  */
-      OP (A, B, C, D,  7, 0xd76aa478);
-      OP (D, A, B, C, 12, 0xe8c7b756);
-      OP (C, D, A, B, 17, 0x242070db);
-      OP (B, C, D, A, 22, 0xc1bdceee);
-      OP (A, B, C, D,  7, 0xf57c0faf);
-      OP (D, A, B, C, 12, 0x4787c62a);
-      OP (C, D, A, B, 17, 0xa8304613);
-      OP (B, C, D, A, 22, 0xfd469501);
-      OP (A, B, C, D,  7, 0x698098d8);
-      OP (D, A, B, C, 12, 0x8b44f7af);
-      OP (C, D, A, B, 17, 0xffff5bb1);
-      OP (B, C, D, A, 22, 0x895cd7be);
-      OP (A, B, C, D,  7, 0x6b901122);
-      OP (D, A, B, C, 12, 0xfd987193);
-      OP (C, D, A, B, 17, 0xa679438e);
-      OP (B, C, D, A, 22, 0x49b40821);
-
-      /* For the second to fourth round we have the possibly swapped words
-	 in CORRECT_WORDS.  Redefine the macro to take an additional first
-	 argument specifying the function to use.  */
-#undef OP
-#define OP(f, a, b, c, d, k, s, T)					\
-      do 								\
-	{								\
-	  a += f (b, c, d) + correct_words[k] + T;			\
-	  CYCLIC (a, s);						\
-	  a += b;							\
-	}								\
-      while (0)
-
-      /* Round 2.  */
-      OP (FG, A, B, C, D,  1,  5, 0xf61e2562);
-      OP (FG, D, A, B, C,  6,  9, 0xc040b340);
-      OP (FG, C, D, A, B, 11, 14, 0x265e5a51);
-      OP (FG, B, C, D, A,  0, 20, 0xe9b6c7aa);
-      OP (FG, A, B, C, D,  5,  5, 0xd62f105d);
-      OP (FG, D, A, B, C, 10,  9, 0x02441453);
-      OP (FG, C, D, A, B, 15, 14, 0xd8a1e681);
-      OP (FG, B, C, D, A,  4, 20, 0xe7d3fbc8);
-      OP (FG, A, B, C, D,  9,  5, 0x21e1cde6);
-      OP (FG, D, A, B, C, 14,  9, 0xc33707d6);
-      OP (FG, C, D, A, B,  3, 14, 0xf4d50d87);
-      OP (FG, B, C, D, A,  8, 20, 0x455a14ed);
-      OP (FG, A, B, C, D, 13,  5, 0xa9e3e905);
-      OP (FG, D, A, B, C,  2,  9, 0xfcefa3f8);
-      OP (FG, C, D, A, B,  7, 14, 0x676f02d9);
-      OP (FG, B, C, D, A, 12, 20, 0x8d2a4c8a);
-
-      /* Round 3.  */
-      OP (FH, A, B, C, D,  5,  4, 0xfffa3942);
-      OP (FH, D, A, B, C,  8, 11, 0x8771f681);
-      OP (FH, C, D, A, B, 11, 16, 0x6d9d6122);
-      OP (FH, B, C, D, A, 14, 23, 0xfde5380c);
-      OP (FH, A, B, C, D,  1,  4, 0xa4beea44);
-      OP (FH, D, A, B, C,  4, 11, 0x4bdecfa9);
-      OP (FH, C, D, A, B,  7, 16, 0xf6bb4b60);
-      OP (FH, B, C, D, A, 10, 23, 0xbebfbc70);
-      OP (FH, A, B, C, D, 13,  4, 0x289b7ec6);
-      OP (FH, D, A, B, C,  0, 11, 0xeaa127fa);
-      OP (FH, C, D, A, B,  3, 16, 0xd4ef3085);
-      OP (FH, B, C, D, A,  6, 23, 0x04881d05);
-      OP (FH, A, B, C, D,  9,  4, 0xd9d4d039);
-      OP (FH, D, A, B, C, 12, 11, 0xe6db99e5);
-      OP (FH, C, D, A, B, 15, 16, 0x1fa27cf8);
-      OP (FH, B, C, D, A,  2, 23, 0xc4ac5665);
-
-      /* Round 4.  */
-      OP (FI, A, B, C, D,  0,  6, 0xf4292244);
-      OP (FI, D, A, B, C,  7, 10, 0x432aff97);
-      OP (FI, C, D, A, B, 14, 15, 0xab9423a7);
-      OP (FI, B, C, D, A,  5, 21, 0xfc93a039);
-      OP (FI, A, B, C, D, 12,  6, 0x655b59c3);
-      OP (FI, D, A, B, C,  3, 10, 0x8f0ccc92);
-      OP (FI, C, D, A, B, 10, 15, 0xffeff47d);
-      OP (FI, B, C, D, A,  1, 21, 0x85845dd1);
-      OP (FI, A, B, C, D,  8,  6, 0x6fa87e4f);
-      OP (FI, D, A, B, C, 15, 10, 0xfe2ce6e0);
-      OP (FI, C, D, A, B,  6, 15, 0xa3014314);
-      OP (FI, B, C, D, A, 13, 21, 0x4e0811a1);
-      OP (FI, A, B, C, D,  4,  6, 0xf7537e82);
-      OP (FI, D, A, B, C, 11, 10, 0xbd3af235);
-      OP (FI, C, D, A, B,  2, 15, 0x2ad7d2bb);
-      OP (FI, B, C, D, A,  9, 21, 0xeb86d391);
-
-      /* Add the starting values of the context.  */
-      A += A_save;
-      B += B_save;
-      C += C_save;
-      D += D_save;
-    }
-
-  /* Put checksum in context given as argument.  */
-  ctx->A = A;
-  ctx->B = B;
-  ctx->C = C;
-  ctx->D = D;
-}
diff --git a/src/libutil/md5.h b/src/libutil/md5.h
deleted file mode 100644
index 228d4972320f..000000000000
--- a/src/libutil/md5.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Declaration of functions and data types used for MD5 sum computing
-   library functions.
-   Copyright (C) 1995,1996,1997,1999,2000,2001 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, write to the Free
-   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-   02111-1307 USA.  */
-
-#ifndef _MD5_H
-#define _MD5_H 1
-
-#include <inttypes.h>
-typedef uint32_t md5_uint32;
-typedef uintptr_t md5_uintptr;
-
-/* Structure to save state of computation between the single steps.  */
-struct MD5_CTX
-{
-  md5_uint32 A;
-  md5_uint32 B;
-  md5_uint32 C;
-  md5_uint32 D;
-
-  md5_uint32 total[2];
-  md5_uint32 buflen;
-  char buffer[128] __attribute__ ((__aligned__ (__alignof__ (md5_uint32))));
-};
-
-/*
- * The following three functions are build up the low level used in
- * the functions `md5_stream' and `md5_buffer'.
- */
-
-/* Initialize structure containing state of computation.
-   (RFC 1321, 3.3: Step 3)  */
-extern void MD5_Init (struct MD5_CTX *ctx);
-
-/* Starting with the result of former calls of this function (or the
-   initialization function update the context for the next LEN bytes
-   starting at BUFFER.
-   It is necessary that LEN is a multiple of 64!!! */
-extern void md5_process_block (const void *buffer, size_t len,
-				      struct MD5_CTX *ctx);
-
-/* Starting with the result of former calls of this function (or the
-   initialization function update the context for the next LEN bytes
-   starting at BUFFER.
-   It is NOT required that LEN is a multiple of 64.  */
-extern void MD5_Update (struct MD5_CTX *ctx, const void *buffer, size_t len);
-
-/* Process the remaining bytes in the buffer and put result from CTX
-   in first 16 bytes following RESBUF.  The result is always in little
-   endian byte order, so that a byte-wise output yields to the wanted
-   ASCII representation of the message digest.
-
-   IMPORTANT: On some systems it is required that RESBUF is correctly
-   aligned for a 32 bits value.  */
-extern void *MD5_Final (void *resbuf, struct MD5_CTX *ctx);
-
-
-/* Put result from CTX in first 16 bytes following RESBUF.  The result is
-   always in little endian byte order, so that a byte-wise output yields
-   to the wanted ASCII representation of the message digest.
-
-   IMPORTANT: On some systems it is required that RESBUF is correctly
-   aligned for a 32 bits value.  */
-extern void *md5_read_ctx (const struct MD5_CTX *ctx, void *resbuf);
-
-
-#endif /* md5.h */
diff --git a/src/libutil/sha1.c b/src/libutil/sha1.c
deleted file mode 100644
index d9d294d15540..000000000000
--- a/src/libutil/sha1.c
+++ /dev/null
@@ -1,369 +0,0 @@
-/* $Id$ */
-
-/* sha.c - Implementation of the Secure Hash Algorithm
- *
- * Copyright (C) 1995, A.M. Kuchling
- *
- * Distribute and use freely; there are no restrictions on further 
- * dissemination and usage except those imposed by the laws of your 
- * country of residence.
- *
- * Adapted to pike and some cleanup by Niels Mller.
- */
-
-/* $Id$ */
-
-/* SHA: NIST's Secure Hash Algorithm */
-
-/* Based on SHA code originally posted to sci.crypt by Peter Gutmann
-   in message <30ajo5$oe8@ccu2.auckland.ac.nz>.
-   Modified to test for endianness on creation of SHA objects by AMK.
-   Also, the original specification of SHA was found to have a weakness
-   by NSA/NIST.  This code implements the fixed version of SHA.
-*/
-
-/* Here's the first paragraph of Peter Gutmann's posting:
-   
-The following is my SHA (FIPS 180) code updated to allow use of the "fixed"
-SHA, thanks to Jim Gillogly and an anonymous contributor for the information on
-what's changed in the new version.  The fix is a simple change which involves
-adding a single rotate in the initial expansion function.  It is unknown
-whether this is an optimal solution to the problem which was discovered in the
-SHA or whether it's simply a bandaid which fixes the problem with a minimum of
-effort (for example the reengineering of a great many Capstone chips).
-*/
-
-#include "sha1.h"
-
-#include <string.h>
-
-void sha_copy(struct SHA_CTX *dest, struct SHA_CTX *src)
-{
-  unsigned int i;
-
-  dest->count_l=src->count_l;
-  dest->count_h=src->count_h;
-  for(i=0; i<SHA_DIGESTLEN; i++)
-    dest->digest[i]=src->digest[i];
-  for(i=0; i < src->index; i++)
-    dest->block[i] = src->block[i];
-  dest->index = src->index;
-}
-
-
-/* The SHA f()-functions.  The f1 and f3 functions can be optimized to
-   save one boolean operation each - thanks to Rich Schroeppel,
-   rcs@cs.arizona.edu for discovering this */
-
-/*#define f1(x,y,z) ( ( x & y ) | ( ~x & z ) )          // Rounds  0-19 */
-#define f1(x,y,z)   ( z ^ ( x & ( y ^ z ) ) )           /* Rounds  0-19 */
-#define f2(x,y,z)   ( x ^ y ^ z )                       /* Rounds 20-39 */
-/*#define f3(x,y,z) ( ( x & y ) | ( x & z ) | ( y & z ) )   // Rounds 40-59 */
-#define f3(x,y,z)   ( ( x & y ) | ( z & ( x | y ) ) )   /* Rounds 40-59 */
-#define f4(x,y,z)   ( x ^ y ^ z )                       /* Rounds 60-79 */
-
-/* The SHA Mysterious Constants */
-
-#define K1  0x5A827999L                                 /* Rounds  0-19 */
-#define K2  0x6ED9EBA1L                                 /* Rounds 20-39 */
-#define K3  0x8F1BBCDCL                                 /* Rounds 40-59 */
-#define K4  0xCA62C1D6L                                 /* Rounds 60-79 */
-
-/* SHA initial values */
-
-#define h0init  0x67452301L
-#define h1init  0xEFCDAB89L
-#define h2init  0x98BADCFEL
-#define h3init  0x10325476L
-#define h4init  0xC3D2E1F0L
-
-/* 32-bit rotate left - kludged with shifts */
-
-#define ROTL(n,X)  ( ( (X) << (n) ) | ( (X) >> ( 32 - (n) ) ) )
-
-/* The initial expanding function.  The hash function is defined over an
-   80-word expanded input array W, where the first 16 are copies of the input
-   data, and the remaining 64 are defined by
-
-        W[ i ] = W[ i - 16 ] ^ W[ i - 14 ] ^ W[ i - 8 ] ^ W[ i - 3 ]
-
-   This implementation generates these values on the fly in a circular
-   buffer - thanks to Colin Plumb, colin@nyx10.cs.du.edu for this
-   optimization.
-
-   The updated SHA changes the expanding function by adding a rotate of 1
-   bit.  Thanks to Jim Gillogly, jim@rand.org, and an anonymous contributor
-   for this information */
-
-#define expand(W,i) ( W[ i & 15 ] = \
-		      ROTL( 1, ( W[ i & 15 ] ^ W[ (i - 14) & 15 ] ^ \
-				 W[ (i - 8) & 15 ] ^ W[ (i - 3) & 15 ] ) ) )
-
-
-/* The prototype SHA sub-round.  The fundamental sub-round is:
-
-        a' = e + ROTL( 5, a ) + f( b, c, d ) + k + data;
-        b' = a;
-        c' = ROTL( 30, b );
-        d' = c;
-        e' = d;
-
-   but this is implemented by unrolling the loop 5 times and renaming the
-   variables ( e, a, b, c, d ) = ( a', b', c', d', e' ) each iteration.
-   This code is then replicated 20 times for each of the 4 functions, using
-   the next 20 values from the W[] array each time */
-
-#define subRound(a, b, c, d, e, f, k, data) \
-    ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) )
-
-/* Initialize the SHA values */
-
-void SHA1_Init(struct SHA_CTX *ctx)
-{
-  /* Set the h-vars to their initial values */
-  ctx->digest[ 0 ] = h0init;
-  ctx->digest[ 1 ] = h1init;
-  ctx->digest[ 2 ] = h2init;
-  ctx->digest[ 3 ] = h3init;
-  ctx->digest[ 4 ] = h4init;
-
-  /* Initialize bit count */
-  ctx->count_l = ctx->count_h = 0;
-  
-  /* Initialize buffer */
-  ctx->index = 0;
-}
-
-/* Perform the SHA transformation.  Note that this code, like MD5, seems to
-   break some optimizing compilers due to the complexity of the expressions
-   and the size of the basic block.  It may be necessary to split it into
-   sections, e.g. based on the four subrounds
-
-   Note that this function destroys the data area */
-
-static void sha_transform(struct SHA_CTX *ctx, uint32_t *data )
-{
-  uint32_t A, B, C, D, E;     /* Local vars */
-
-  /* Set up first buffer and local data buffer */
-  A = ctx->digest[0];
-  B = ctx->digest[1];
-  C = ctx->digest[2];
-  D = ctx->digest[3];
-  E = ctx->digest[4];
-
-  /* Heavy mangling, in 4 sub-rounds of 20 interations each. */
-  subRound( A, B, C, D, E, f1, K1, data[ 0] );
-  subRound( E, A, B, C, D, f1, K1, data[ 1] );
-  subRound( D, E, A, B, C, f1, K1, data[ 2] );
-  subRound( C, D, E, A, B, f1, K1, data[ 3] );
-  subRound( B, C, D, E, A, f1, K1, data[ 4] );
-  subRound( A, B, C, D, E, f1, K1, data[ 5] );
-  subRound( E, A, B, C, D, f1, K1, data[ 6] );
-  subRound( D, E, A, B, C, f1, K1, data[ 7] );
-  subRound( C, D, E, A, B, f1, K1, data[ 8] );
-  subRound( B, C, D, E, A, f1, K1, data[ 9] );
-  subRound( A, B, C, D, E, f1, K1, data[10] );
-  subRound( E, A, B, C, D, f1, K1, data[11] );
-  subRound( D, E, A, B, C, f1, K1, data[12] );
-  subRound( C, D, E, A, B, f1, K1, data[13] );
-  subRound( B, C, D, E, A, f1, K1, data[14] );
-  subRound( A, B, C, D, E, f1, K1, data[15] );
-  subRound( E, A, B, C, D, f1, K1, expand( data, 16 ) );
-  subRound( D, E, A, B, C, f1, K1, expand( data, 17 ) );
-  subRound( C, D, E, A, B, f1, K1, expand( data, 18 ) );
-  subRound( B, C, D, E, A, f1, K1, expand( data, 19 ) );
-
-  subRound( A, B, C, D, E, f2, K2, expand( data, 20 ) );
-  subRound( E, A, B, C, D, f2, K2, expand( data, 21 ) );
-  subRound( D, E, A, B, C, f2, K2, expand( data, 22 ) );
-  subRound( C, D, E, A, B, f2, K2, expand( data, 23 ) );
-  subRound( B, C, D, E, A, f2, K2, expand( data, 24 ) );
-  subRound( A, B, C, D, E, f2, K2, expand( data, 25 ) );
-  subRound( E, A, B, C, D, f2, K2, expand( data, 26 ) );
-  subRound( D, E, A, B, C, f2, K2, expand( data, 27 ) );
-  subRound( C, D, E, A, B, f2, K2, expand( data, 28 ) );
-  subRound( B, C, D, E, A, f2, K2, expand( data, 29 ) );
-  subRound( A, B, C, D, E, f2, K2, expand( data, 30 ) );
-  subRound( E, A, B, C, D, f2, K2, expand( data, 31 ) );
-  subRound( D, E, A, B, C, f2, K2, expand( data, 32 ) );
-  subRound( C, D, E, A, B, f2, K2, expand( data, 33 ) );
-  subRound( B, C, D, E, A, f2, K2, expand( data, 34 ) );
-  subRound( A, B, C, D, E, f2, K2, expand( data, 35 ) );
-  subRound( E, A, B, C, D, f2, K2, expand( data, 36 ) );
-  subRound( D, E, A, B, C, f2, K2, expand( data, 37 ) );
-  subRound( C, D, E, A, B, f2, K2, expand( data, 38 ) );
-  subRound( B, C, D, E, A, f2, K2, expand( data, 39 ) );
-
-  subRound( A, B, C, D, E, f3, K3, expand( data, 40 ) );
-  subRound( E, A, B, C, D, f3, K3, expand( data, 41 ) );
-  subRound( D, E, A, B, C, f3, K3, expand( data, 42 ) );
-  subRound( C, D, E, A, B, f3, K3, expand( data, 43 ) );
-  subRound( B, C, D, E, A, f3, K3, expand( data, 44 ) );
-  subRound( A, B, C, D, E, f3, K3, expand( data, 45 ) );
-  subRound( E, A, B, C, D, f3, K3, expand( data, 46 ) );
-  subRound( D, E, A, B, C, f3, K3, expand( data, 47 ) );
-  subRound( C, D, E, A, B, f3, K3, expand( data, 48 ) );
-  subRound( B, C, D, E, A, f3, K3, expand( data, 49 ) );
-  subRound( A, B, C, D, E, f3, K3, expand( data, 50 ) );
-  subRound( E, A, B, C, D, f3, K3, expand( data, 51 ) );
-  subRound( D, E, A, B, C, f3, K3, expand( data, 52 ) );
-  subRound( C, D, E, A, B, f3, K3, expand( data, 53 ) );
-  subRound( B, C, D, E, A, f3, K3, expand( data, 54 ) );
-  subRound( A, B, C, D, E, f3, K3, expand( data, 55 ) );
-  subRound( E, A, B, C, D, f3, K3, expand( data, 56 ) );
-  subRound( D, E, A, B, C, f3, K3, expand( data, 57 ) );
-  subRound( C, D, E, A, B, f3, K3, expand( data, 58 ) );
-  subRound( B, C, D, E, A, f3, K3, expand( data, 59 ) );
-
-  subRound( A, B, C, D, E, f4, K4, expand( data, 60 ) );
-  subRound( E, A, B, C, D, f4, K4, expand( data, 61 ) );
-  subRound( D, E, A, B, C, f4, K4, expand( data, 62 ) );
-  subRound( C, D, E, A, B, f4, K4, expand( data, 63 ) );
-  subRound( B, C, D, E, A, f4, K4, expand( data, 64 ) );
-  subRound( A, B, C, D, E, f4, K4, expand( data, 65 ) );
-  subRound( E, A, B, C, D, f4, K4, expand( data, 66 ) );
-  subRound( D, E, A, B, C, f4, K4, expand( data, 67 ) );
-  subRound( C, D, E, A, B, f4, K4, expand( data, 68 ) );
-  subRound( B, C, D, E, A, f4, K4, expand( data, 69 ) );
-  subRound( A, B, C, D, E, f4, K4, expand( data, 70 ) );
-  subRound( E, A, B, C, D, f4, K4, expand( data, 71 ) );
-  subRound( D, E, A, B, C, f4, K4, expand( data, 72 ) );
-  subRound( C, D, E, A, B, f4, K4, expand( data, 73 ) );
-  subRound( B, C, D, E, A, f4, K4, expand( data, 74 ) );
-  subRound( A, B, C, D, E, f4, K4, expand( data, 75 ) );
-  subRound( E, A, B, C, D, f4, K4, expand( data, 76 ) );
-  subRound( D, E, A, B, C, f4, K4, expand( data, 77 ) );
-  subRound( C, D, E, A, B, f4, K4, expand( data, 78 ) );
-  subRound( B, C, D, E, A, f4, K4, expand( data, 79 ) );
-
-  /* Build message digest */
-  ctx->digest[0] += A;
-  ctx->digest[1] += B;
-  ctx->digest[2] += C;
-  ctx->digest[3] += D;
-  ctx->digest[4] += E;
-}
-
-#if 1
-
-#ifndef EXTRACT_UCHAR
-#define EXTRACT_UCHAR(p)  (*(unsigned char *)(p))
-#endif
-
-#define STRING2INT(s) ((((((EXTRACT_UCHAR(s) << 8)    \
-			 | EXTRACT_UCHAR(s+1)) << 8)  \
-			 | EXTRACT_UCHAR(s+2)) << 8)  \
-			 | EXTRACT_UCHAR(s+3))
-#else
-uint32_t STRING2INT(unsigned char *s)
-{
-  uint32_t r;
-  unsigned int i;
-  
-  for (i = 0, r = 0; i < 4; i++, s++)
-    r = (r << 8) | *s;
-  return r;
-}
-#endif
-
-static void sha_block(struct SHA_CTX *ctx, const unsigned char *block)
-{
-  uint32_t data[SHA_DATALEN];
-  unsigned int i;
-  
-  /* Update block count */
-  if (!++ctx->count_l)
-    ++ctx->count_h;
-
-  /* Endian independent conversion */
-  for (i = 0; i<SHA_DATALEN; i++, block += 4)
-    data[i] = STRING2INT(block);
-
-  sha_transform(ctx, data);
-}
-
-void SHA1_Update(struct SHA_CTX *ctx, const unsigned char *buffer, uint32_t len)
-{
-  if (ctx->index)
-    { /* Try to fill partial block */
-      unsigned left = SHA_DATASIZE - ctx->index;
-      if (len < left)
-	{
-	  memcpy(ctx->block + ctx->index, buffer, len);
-	  ctx->index += len;
-	  return; /* Finished */
-	}
-      else
-	{
-	  memcpy(ctx->block + ctx->index, buffer, left);
-	  sha_block(ctx, ctx->block);
-	  buffer += left;
-	  len -= left;
-	}
-    }
-  while (len >= SHA_DATASIZE)
-    {
-      sha_block(ctx, buffer);
-      buffer += SHA_DATASIZE;
-      len -= SHA_DATASIZE;
-    }
-  if ((ctx->index = len))     /* This assignment is intended */
-    /* Buffer leftovers */
-    memcpy(ctx->block, buffer, len);
-}
-	  
-/* Final wrapup - pad to SHA_DATASIZE-byte boundary with the bit pattern
-   1 0* (64-bit count of bits processed, MSB-first) */
-
-void SHA1_Final(unsigned char *s, struct SHA_CTX *ctx)
-{
-  uint32_t data[SHA_DATALEN];
-  unsigned int i;
-  unsigned int words;
-  
-  i = ctx->index;
-  /* Set the first char of padding to 0x80.  This is safe since there is
-     always at least one byte free */
-  ctx->block[i++] = 0x80;
-
-  /* Fill rest of word */
-  for( ; i & 3; i++)
-    ctx->block[i] = 0;
-
-  /* i is now a multiple of the word size 4 */
-  words = i >> 2;
-  for (i = 0; i < words; i++)
-    data[i] = STRING2INT(ctx->block + 4*i);
-  
-  if (words > (SHA_DATALEN-2))
-    { /* No room for length in this block. Process it and
-       * pad with another one */
-      for (i = words ; i < SHA_DATALEN; i++)
-	data[i] = 0;
-      sha_transform(ctx, data);
-      for (i = 0; i < (SHA_DATALEN-2); i++)
-	data[i] = 0;
-    }
-  else
-    for (i = words ; i < SHA_DATALEN - 2; i++)
-      data[i] = 0;
-  /* Theres 512 = 2^9 bits in one block */
-  data[SHA_DATALEN-2] = (ctx->count_h << 9) | (ctx->count_l >> 23);
-  data[SHA_DATALEN-1] = (ctx->count_l << 9) | (ctx->index << 3);
-  sha_transform(ctx, data);
-  sha_digest(ctx, s);
-}
-
-void sha_digest(struct SHA_CTX *ctx, unsigned char *s)
-{
-  unsigned int i;
-
-  for (i = 0; i < SHA_DIGESTLEN; i++)
-    {
-      *s++ =         ctx->digest[i] >> 24;
-      *s++ = 0xff & (ctx->digest[i] >> 16);
-      *s++ = 0xff & (ctx->digest[i] >> 8);
-      *s++ = 0xff &  ctx->digest[i];
-    }
-}
diff --git a/src/libutil/sha1.h b/src/libutil/sha1.h
deleted file mode 100644
index 715040dd48df..000000000000
--- a/src/libutil/sha1.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef _SHA_H
-#define _SHA_H
-
-#include <inttypes.h>
-
-/* The SHA block size and message digest sizes, in bytes */
-
-#define SHA_DATASIZE    64
-#define SHA_DATALEN     16
-#define SHA_DIGESTSIZE  20
-#define SHA_DIGESTLEN    5
-/* The structure for storing SHA info */
-
-struct SHA_CTX {
-  uint32_t digest[SHA_DIGESTLEN];  /* Message digest */
-  uint32_t count_l, count_h;       /* 64-bit block count */
-  uint8_t block[SHA_DATASIZE];     /* SHA data buffer */
-  unsigned int index;            /* index into buffer */
-};
-
-void SHA1_Init(struct SHA_CTX *ctx);
-void SHA1_Update(struct SHA_CTX *ctx, const unsigned char *buffer, uint32_t len);
-void SHA1_Final(unsigned char *s, struct SHA_CTX *ctx);
-void sha_digest(struct SHA_CTX *ctx, unsigned char *s);
-void sha_copy(struct SHA_CTX *dest, struct SHA_CTX *src);
-
-
-#endif /* !_SHA_H */
diff --git a/src/libutil/sha256.c b/src/libutil/sha256.c
deleted file mode 100644
index 63ed0ba43011..000000000000
--- a/src/libutil/sha256.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/* crypto/sha/sha256.c */
-/* ====================================================================
- * Copyright (c) 2004 The OpenSSL Project.  All rights reserved
- * according to the OpenSSL license [found in ./md32_common.h].
- * ====================================================================
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "sha256.h"
-
-int SHA224_Init (SHA256_CTX *c)
-	{
-	c->h[0]=0xc1059ed8UL;	c->h[1]=0x367cd507UL;
-	c->h[2]=0x3070dd17UL;	c->h[3]=0xf70e5939UL;
-	c->h[4]=0xffc00b31UL;	c->h[5]=0x68581511UL;
-	c->h[6]=0x64f98fa7UL;	c->h[7]=0xbefa4fa4UL;
-	c->Nl=0;	c->Nh=0;
-	c->num=0;	c->md_len=SHA224_DIGEST_LENGTH;
-	return 1;
-	}
-
-int SHA256_Init (SHA256_CTX *c)
-	{
-	c->h[0]=0x6a09e667UL;	c->h[1]=0xbb67ae85UL;
-	c->h[2]=0x3c6ef372UL;	c->h[3]=0xa54ff53aUL;
-	c->h[4]=0x510e527fUL;	c->h[5]=0x9b05688cUL;
-	c->h[6]=0x1f83d9abUL;	c->h[7]=0x5be0cd19UL;
-	c->Nl=0;	c->Nh=0;
-	c->num=0;	c->md_len=SHA256_DIGEST_LENGTH;
-	return 1;
-	}
-
-unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md)
-	{
-	SHA256_CTX c;
-	static unsigned char m[SHA224_DIGEST_LENGTH];
-
-	if (md == NULL) md=m;
-	SHA224_Init(&c);
-	SHA256_Update(&c,d,n);
-	SHA256_Final(md,&c);
-	return(md);
-	}
-
-unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md)
-	{
-	SHA256_CTX c;
-	static unsigned char m[SHA256_DIGEST_LENGTH];
-
-	if (md == NULL) md=m;
-	SHA256_Init(&c);
-	SHA256_Update(&c,d,n);
-	SHA256_Final(md,&c);
-	return(md);
-	}
-
-int SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
-{   return SHA256_Update (c,data,len);   }
-int SHA224_Final (unsigned char *md, SHA256_CTX *c)
-{   return SHA256_Final (md,c);   }
-
-#define	DATA_ORDER_IS_BIG_ENDIAN
-
-#define	HASH_LONG		uint32_t
-#define	HASH_LONG_LOG2		2
-#define	HASH_CTX		SHA256_CTX
-#define	HASH_CBLOCK		SHA_CBLOCK
-#define	HASH_LBLOCK		SHA_LBLOCK
-/*
- * Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
- * default: case below covers for it. It's not clear however if it's
- * permitted to truncate to amount of bytes not divisible by 4. I bet not,
- * but if it is, then default: case shall be extended. For reference.
- * Idea behind separate cases for pre-defined lenghts is to let the
- * compiler decide if it's appropriate to unroll small loops.
- */
-#define	HASH_MAKE_STRING(c,s)	do {	\
-	unsigned long ll;		\
-	unsigned int  n;		\
-	switch ((c)->md_len)		\
-	{   case SHA224_DIGEST_LENGTH:	\
-		for (n=0;n<SHA224_DIGEST_LENGTH/4;n++)	\
-		{   ll=(c)->h[n]; HOST_l2c(ll,(s));   }	\
-		break;			\
-	    case SHA256_DIGEST_LENGTH:	\
-		for (n=0;n<SHA256_DIGEST_LENGTH/4;n++)	\
-		{   ll=(c)->h[n]; HOST_l2c(ll,(s));   }	\
-		break;			\
-	    default:			\
-		if ((c)->md_len > SHA256_DIGEST_LENGTH)	\
-		    return 0;				\
-		for (n=0;n<(c)->md_len/4;n++)		\
-		{   ll=(c)->h[n]; HOST_l2c(ll,(s));   }	\
-		break;			\
-	}				\
-	} while (0)
-
-#define	HASH_UPDATE		SHA256_Update
-#define	HASH_TRANSFORM		SHA256_Transform
-#define	HASH_FINAL		SHA256_Final
-#define	HASH_BLOCK_HOST_ORDER	sha256_block_host_order
-#define	HASH_BLOCK_DATA_ORDER	sha256_block_data_order
-void sha256_block_host_order (SHA256_CTX *ctx, const void *in, size_t num);
-void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num);
-
-#include "md32_common.h"
-
-static const uint32_t K256[64] = {
-	0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL,
-	0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL,
-	0xd807aa98UL,0x12835b01UL,0x243185beUL,0x550c7dc3UL,
-	0x72be5d74UL,0x80deb1feUL,0x9bdc06a7UL,0xc19bf174UL,
-	0xe49b69c1UL,0xefbe4786UL,0x0fc19dc6UL,0x240ca1ccUL,
-	0x2de92c6fUL,0x4a7484aaUL,0x5cb0a9dcUL,0x76f988daUL,
-	0x983e5152UL,0xa831c66dUL,0xb00327c8UL,0xbf597fc7UL,
-	0xc6e00bf3UL,0xd5a79147UL,0x06ca6351UL,0x14292967UL,
-	0x27b70a85UL,0x2e1b2138UL,0x4d2c6dfcUL,0x53380d13UL,
-	0x650a7354UL,0x766a0abbUL,0x81c2c92eUL,0x92722c85UL,
-	0xa2bfe8a1UL,0xa81a664bUL,0xc24b8b70UL,0xc76c51a3UL,
-	0xd192e819UL,0xd6990624UL,0xf40e3585UL,0x106aa070UL,
-	0x19a4c116UL,0x1e376c08UL,0x2748774cUL,0x34b0bcb5UL,
-	0x391c0cb3UL,0x4ed8aa4aUL,0x5b9cca4fUL,0x682e6ff3UL,
-	0x748f82eeUL,0x78a5636fUL,0x84c87814UL,0x8cc70208UL,
-	0x90befffaUL,0xa4506cebUL,0xbef9a3f7UL,0xc67178f2UL };
-
-/*
- * FIPS specification refers to right rotations, while our ROTATE macro
- * is left one. This is why you might notice that rotation coefficients
- * differ from those observed in FIPS document by 32-N...
- */
-#define Sigma0(x)	(ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10))
-#define Sigma1(x)	(ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7))
-#define sigma0(x)	(ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3))
-#define sigma1(x)	(ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10))
-
-#define Ch(x,y,z)	(((x) & (y)) ^ ((~(x)) & (z)))
-#define Maj(x,y,z)	(((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
-
-#define	ROUND_00_15(i,a,b,c,d,e,f,g,h)		do {	\
-	T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i];	\
-	h = Sigma0(a) + Maj(a,b,c);			\
-	d += T1;	h += T1;		} while (0)
-
-#define	ROUND_16_63(i,a,b,c,d,e,f,g,h,X)	do {	\
-	s0 = X[(i+1)&0x0f];	s0 = sigma0(s0);	\
-	s1 = X[(i+14)&0x0f];	s1 = sigma1(s1);	\
-	T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f];	\
-	ROUND_00_15(i,a,b,c,d,e,f,g,h);		} while (0)
-
-static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host)
-	{
-	uint32_t a,b,c,d,e,f,g,h,s0,s1,T1;
-	uint32_t	X[16];
-	int i;
-	const unsigned char *data=in;
-
-			while (num--) {
-
-	a = ctx->h[0];	b = ctx->h[1];	c = ctx->h[2];	d = ctx->h[3];
-	e = ctx->h[4];	f = ctx->h[5];	g = ctx->h[6];	h = ctx->h[7];
-
-	if (host)
-		{
-		const uint32_t *W=(const uint32_t *)data;
-
-		T1 = X[0] = W[0];	ROUND_00_15(0,a,b,c,d,e,f,g,h);
-		T1 = X[1] = W[1];	ROUND_00_15(1,h,a,b,c,d,e,f,g);
-		T1 = X[2] = W[2];	ROUND_00_15(2,g,h,a,b,c,d,e,f);
-		T1 = X[3] = W[3];	ROUND_00_15(3,f,g,h,a,b,c,d,e);
-		T1 = X[4] = W[4];	ROUND_00_15(4,e,f,g,h,a,b,c,d);
-		T1 = X[5] = W[5];	ROUND_00_15(5,d,e,f,g,h,a,b,c);
-		T1 = X[6] = W[6];	ROUND_00_15(6,c,d,e,f,g,h,a,b);
-		T1 = X[7] = W[7];	ROUND_00_15(7,b,c,d,e,f,g,h,a);
-		T1 = X[8] = W[8];	ROUND_00_15(8,a,b,c,d,e,f,g,h);
-		T1 = X[9] = W[9];	ROUND_00_15(9,h,a,b,c,d,e,f,g);
-		T1 = X[10] = W[10];	ROUND_00_15(10,g,h,a,b,c,d,e,f);
-		T1 = X[11] = W[11];	ROUND_00_15(11,f,g,h,a,b,c,d,e);
-		T1 = X[12] = W[12];	ROUND_00_15(12,e,f,g,h,a,b,c,d);
-		T1 = X[13] = W[13];	ROUND_00_15(13,d,e,f,g,h,a,b,c);
-		T1 = X[14] = W[14];	ROUND_00_15(14,c,d,e,f,g,h,a,b);
-		T1 = X[15] = W[15];	ROUND_00_15(15,b,c,d,e,f,g,h,a);
-
-		data += SHA256_CBLOCK;
-		}
-	else
-		{
-		uint32_t l;
-
-		HOST_c2l(data,l); T1 = X[0] = l;  ROUND_00_15(0,a,b,c,d,e,f,g,h);
-		HOST_c2l(data,l); T1 = X[1] = l;  ROUND_00_15(1,h,a,b,c,d,e,f,g);
-		HOST_c2l(data,l); T1 = X[2] = l;  ROUND_00_15(2,g,h,a,b,c,d,e,f);
-		HOST_c2l(data,l); T1 = X[3] = l;  ROUND_00_15(3,f,g,h,a,b,c,d,e);
-		HOST_c2l(data,l); T1 = X[4] = l;  ROUND_00_15(4,e,f,g,h,a,b,c,d);
-		HOST_c2l(data,l); T1 = X[5] = l;  ROUND_00_15(5,d,e,f,g,h,a,b,c);
-		HOST_c2l(data,l); T1 = X[6] = l;  ROUND_00_15(6,c,d,e,f,g,h,a,b);
-		HOST_c2l(data,l); T1 = X[7] = l;  ROUND_00_15(7,b,c,d,e,f,g,h,a);
-		HOST_c2l(data,l); T1 = X[8] = l;  ROUND_00_15(8,a,b,c,d,e,f,g,h);
-		HOST_c2l(data,l); T1 = X[9] = l;  ROUND_00_15(9,h,a,b,c,d,e,f,g);
-		HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f);
-		HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e);
-		HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d);
-		HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c);
-		HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b);
-		HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a);
-		}
-
-	for (i=16;i<64;i+=8)
-		{
-		ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X);
-		ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X);
-		ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X);
-		ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X);
-		ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X);
-		ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X);
-		ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X);
-		ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X);
-		}
-
-	ctx->h[0] += a;	ctx->h[1] += b;	ctx->h[2] += c;	ctx->h[3] += d;
-	ctx->h[4] += e;	ctx->h[5] += f;	ctx->h[6] += g;	ctx->h[7] += h;
-
-			}
-	}
-
-/*
- * Idea is to trade couple of cycles for some space. On IA-32 we save
- * about 4K in "big footprint" case. In "small footprint" case any gain
- * is appreciated:-)
- */
-void HASH_BLOCK_HOST_ORDER (SHA256_CTX *ctx, const void *in, size_t num)
-{   sha256_block (ctx,in,num,1);   }
-
-void HASH_BLOCK_DATA_ORDER (SHA256_CTX *ctx, const void *in, size_t num)
-{   sha256_block (ctx,in,num,0);   }
-
-
diff --git a/src/libutil/sha256.h b/src/libutil/sha256.h
deleted file mode 100644
index 0686b84f0e08..000000000000
--- a/src/libutil/sha256.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _SHA256_H
-#define _SHA256_H 1
-
-#include <inttypes.h>
-
-#define SHA_LBLOCK	16
-#define SHA_CBLOCK	(SHA_LBLOCK*4)	/* SHA treats input data as a
-					 * contiguous array of 32 bit
-					 * wide big-endian values. */
-
-#define SHA256_CBLOCK	(SHA_LBLOCK*4)	/* SHA-256 treats input data as a
-					 * contiguous array of 32 bit
-					 * wide big-endian values. */
-#define SHA224_DIGEST_LENGTH	28
-#define SHA256_DIGEST_LENGTH	32
-
-typedef struct SHA256state_st
-	{
-	uint32_t h[8];
-	uint32_t Nl,Nh;
-	uint32_t data[SHA_LBLOCK];
-	unsigned int num,md_len;
-	} SHA256_CTX;
-
-int SHA224_Init(SHA256_CTX *c);
-int SHA224_Update(SHA256_CTX *c, const void *data, size_t len);
-int SHA224_Final(unsigned char *md, SHA256_CTX *c);
-unsigned char *SHA224(const unsigned char *d, size_t n,unsigned char *md);
-int SHA256_Init(SHA256_CTX *c);
-int SHA256_Update(SHA256_CTX *c, const void *data, size_t len);
-int SHA256_Final(unsigned char *md, SHA256_CTX *c);
-unsigned char *SHA256(const unsigned char *d, size_t n,unsigned char *md);
-void SHA256_Transform(SHA256_CTX *c, const unsigned char *data);
-
-#endif
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 11c75d2cda4c..75032bf90d0b 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -453,7 +453,7 @@ Nest::~Nest()
 
 static string escVerbosity(Verbosity level)
 {
-    return int2String((int) level);
+    return std::to_string((int) level);
 }
 
 
@@ -599,6 +599,8 @@ string drainFD(int fd)
 //////////////////////////////////////////////////////////////////////
 
 
+AutoDelete::AutoDelete() : del{false} {}
+
 AutoDelete::AutoDelete(const string & p, bool recursive) : path(p)
 {
     del = true;
@@ -626,6 +628,12 @@ void AutoDelete::cancel()
     del = false;
 }
 
+void AutoDelete::reset(const Path & p, bool recursive) {
+    path = p;
+    this->recursive = recursive;
+    del = true;
+}
+
 
 
 //////////////////////////////////////////////////////////////////////
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index b2fb59d6f2d7..f4026a0a884b 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -150,8 +150,8 @@ void printMsg_(Verbosity level, const FormatOrString & fs);
 
 #define printMsg(level, f) \
     do { \
-        if (level <= verbosity) { \
-            printMsg_(level, (f)); \
+        if (level <= nix::verbosity) { \
+            nix::printMsg_(level, (f)); \
         } \
     } while (0)
 
@@ -199,9 +199,12 @@ class AutoDelete
     bool del;
     bool recursive;
 public:
+    AutoDelete();
     AutoDelete(const Path & p, bool recursive = true);
     ~AutoDelete();
     void cancel();
+    void reset(const Path & p, bool recursive = true);
+    operator Path() const { return path; }
 };
 
 
@@ -355,13 +358,6 @@ template<class N> bool string2Int(const string & s, N & n)
     return str && str.get() == EOF;
 }
 
-template<class N> string int2String(N n)
-{
-    std::ostringstream str;
-    str << n;
-    return str.str();
-}
-
 
 /* Return true iff `s' ends in `suffix'. */
 bool hasSuffix(const string & s, const string & suffix);
diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc
index bb4789aeac82..5a72cb712014 100644
--- a/src/nix-collect-garbage/nix-collect-garbage.cc
+++ b/src/nix-collect-garbage/nix-collect-garbage.cc
@@ -4,6 +4,7 @@
 #include "globals.hh"
 
 #include <iostream>
+#include <cerrno>
 
 using namespace nix;
 
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index aaae691e9ee8..e97d1dab17b2 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -416,8 +416,8 @@ static void performOp(bool trusted, unsigned int clientVersion,
         settings.keepGoing = readInt(from) != 0;
         settings.set("build-fallback", readInt(from) ? "true" : "false");
         verbosity = (Verbosity) readInt(from);
-        settings.set("build-max-jobs", int2String(readInt(from)));
-        settings.set("build-max-silent-time", int2String(readInt(from)));
+        settings.set("build-max-jobs", std::to_string(readInt(from)));
+        settings.set("build-max-silent-time", std::to_string(readInt(from)));
         if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
             settings.useBuildHook = readInt(from) != 0;
         if (GET_PROTOCOL_MINOR(clientVersion) >= 4) {
@@ -426,7 +426,7 @@ static void performOp(bool trusted, unsigned int clientVersion,
             settings.printBuildTrace = readInt(from) != 0;
         }
         if (GET_PROTOCOL_MINOR(clientVersion) >= 6)
-            settings.set("build-cores", int2String(readInt(from)));
+            settings.set("build-cores", std::to_string(readInt(from)));
         if (GET_PROTOCOL_MINOR(clientVersion) >= 10)
             settings.set("build-use-substitutes", readInt(from) ? "true" : "false");
         if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
@@ -692,6 +692,10 @@ static PeerInfo getPeerInfo(int remote)
 
 #elif defined(LOCAL_PEERCRED)
 
+#if !defined(SOL_LOCAL)
+#define SOL_LOCAL 0
+#endif
+
     xucred cred;
     socklen_t credLen = sizeof(cred);
     if (getsockopt(remote, SOL_LOCAL, LOCAL_PEERCRED, &cred, &credLen) == -1)
@@ -720,7 +724,7 @@ static void daemonLoop(char * * argv)
 
     /* Handle socket-based activation by systemd. */
     if (getEnv("LISTEN_FDS") != "") {
-        if (getEnv("LISTEN_PID") != int2String(getpid()) || getEnv("LISTEN_FDS") != "1")
+        if (getEnv("LISTEN_PID") != std::to_string(getpid()) || getEnv("LISTEN_FDS") != "1")
             throw Error("unexpected systemd environment variables");
         fdSocket = SD_LISTEN_FDS_START;
     }
@@ -796,10 +800,10 @@ static void daemonLoop(char * * argv)
             PeerInfo peer = getPeerInfo(remote);
 
             struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0;
-            string user = pw ? pw->pw_name : int2String(peer.uid);
+            string user = pw ? pw->pw_name : std::to_string(peer.uid);
 
             struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
-            string group = gr ? gr->gr_name : int2String(peer.gid);
+            string group = gr ? gr->gr_name : std::to_string(peer.gid);
 
             Strings trustedUsers = settings.get("trusted-users", Strings({"root"}));
             Strings allowedUsers = settings.get("allowed-users", Strings({"*"}));
@@ -811,7 +815,7 @@ static void daemonLoop(char * * argv)
                 throw Error(format("user ‘%1%’ is not allowed to connect to the Nix daemon") % user);
 
             printMsg(lvlInfo, format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""))
-                % (peer.pidKnown ? int2String(peer.pid) : "<unknown>")
+                % (peer.pidKnown ? std::to_string(peer.pid) : "<unknown>")
                 % (peer.uidKnown ? user : "<unknown>"));
 
             /* Fork a child to handle the connection. */
@@ -832,7 +836,7 @@ static void daemonLoop(char * * argv)
 
                 /* For debugging, stuff the pid into argv[1]. */
                 if (peer.pidKnown && argv[1]) {
-                    string processName = int2String(peer.pid);
+                    string processName = std::to_string(peer.pid);
                     strncpy(argv[1], processName.c_str(), strlen(argv[1]));
                 }
 
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 313f8a8a8f35..02a9f25a7a4e 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -1140,7 +1140,19 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
                                         attrs3["value"] = v->listElems()[j]->string.s;
                                         xml.writeEmptyElement("string", attrs3);
                                     }
+                              } else if (v->type == tAttrs) {
+                                  attrs2["type"] = "strings";
+                                  XMLOpenElement m(xml, "meta", attrs2);
+                                  Bindings & attrs = *v->attrs;
+                                  for (auto &i : attrs) {
+                                      Attr & a(*attrs.find(i.name));
+                                      if(a.value->type != tString) continue;
+                                      XMLAttrs attrs3;
+                                      attrs3["type"] = i.name;
+                                      attrs3["value"] = a.value->string.s;
+                                      xml.writeEmptyElement("string", attrs3);
                                 }
+                              }
                             }
                         }
                     }
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index b6845197ec71..13a145a3b53e 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -79,7 +79,7 @@ void processExpr(EvalState & state, const Strings & attrPaths,
                     printGCWarning();
                 else {
                     Path rootName = gcRoot;
-                    if (++rootNr > 1) rootName += "-" + int2String(rootNr);
+                    if (++rootNr > 1) rootName += "-" + std::to_string(rootNr);
                     drvPath = addPermRoot(*store, drvPath, rootName, indirectRoot);
                 }
                 std::cout << format("%1%%2%\n") % drvPath % (outputName != "out" ? "!" + outputName : "");
diff --git a/src/nix-prefetch-url/local.mk b/src/nix-prefetch-url/local.mk
new file mode 100644
index 000000000000..3e7735406af0
--- /dev/null
+++ b/src/nix-prefetch-url/local.mk
@@ -0,0 +1,7 @@
+programs += nix-prefetch-url
+
+nix-prefetch-url_DIR := $(d)
+
+nix-prefetch-url_SOURCES := $(d)/nix-prefetch-url.cc
+
+nix-prefetch-url_LIBS = libmain libexpr libstore libutil libformat
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
new file mode 100644
index 000000000000..73a2845e07a5
--- /dev/null
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -0,0 +1,210 @@
+#include "hash.hh"
+#include "shared.hh"
+#include "download.hh"
+#include "store-api.hh"
+#include "eval.hh"
+#include "eval-inline.hh"
+#include "common-opts.hh"
+#include "attr-path.hh"
+
+#include <iostream>
+
+using namespace nix;
+
+
+/* If ‘uri’ starts with ‘mirror://’, then resolve it using the list of
+   mirrors defined in Nixpkgs. */
+string resolveMirrorUri(EvalState & state, string uri)
+{
+    if (string(uri, 0, 9) != "mirror://") return uri;
+
+    string s(uri, 9);
+    auto p = s.find('/');
+    if (p == string::npos) throw Error("invalid mirror URI");
+    string mirrorName(s, 0, p);
+
+    Value vMirrors;
+    state.eval(state.parseExprFromString("import <nixpkgs/pkgs/build-support/fetchurl/mirrors.nix>", "."), vMirrors);
+    state.forceAttrs(vMirrors);
+
+    auto mirrorList = vMirrors.attrs->find(state.symbols.create(mirrorName));
+    if (mirrorList == vMirrors.attrs->end())
+        throw Error(format("unknown mirror name ‘%1%’") % mirrorName);
+    state.forceList(*mirrorList->value);
+
+    if (mirrorList->value->listSize() < 1)
+        throw Error(format("mirror URI ‘%1%’ did not expand to anything") % uri);
+
+    string mirror = state.forceString(*mirrorList->value->listElems()[0]);
+    return mirror + (hasSuffix(mirror, "/") ? "" : "/") + string(s, p + 1);
+}
+
+
+int main(int argc, char * * argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+        initGC();
+
+        HashType ht = htSHA256;
+        std::vector<string> args;
+        Strings searchPath;
+        bool printPath = getEnv("PRINT_PATH") != "";
+        bool fromExpr = false;
+        string attrPath;
+        std::map<string, string> autoArgs_;
+        bool unpack = false;
+        string name;
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            if (*arg == "--help")
+                showManPage("nix-prefetch-url");
+            else if (*arg == "--version")
+                printVersion("nix-prefetch-url");
+            else if (*arg == "--type") {
+                string s = getArg(*arg, arg, end);
+                ht = parseHashType(s);
+                if (ht == htUnknown)
+                    throw UsageError(format("unknown hash type ‘%1%’") % s);
+            }
+            else if (*arg == "--print-path")
+                printPath = true;
+            else if (*arg == "--attr" || *arg == "-A") {
+                fromExpr = true;
+                attrPath = getArg(*arg, arg, end);
+            }
+            else if (*arg == "--unpack")
+                unpack = true;
+            else if (*arg == "--name")
+                name = getArg(*arg, arg, end);
+            else if (parseAutoArgs(arg, end, autoArgs_))
+                ;
+            else if (parseSearchPathArg(arg, end, searchPath))
+                ;
+            else if (*arg != "" && arg->at(0) == '-')
+                return false;
+            else
+                args.push_back(*arg);
+            return true;
+        });
+
+        if (args.size() > 2)
+            throw UsageError("too many arguments");
+
+        store = openStore();
+        EvalState state(searchPath);
+
+        Bindings & autoArgs(*evalAutoArgs(state, autoArgs_));
+
+        /* If -A is given, get the URI from the specified Nix
+           expression. */
+        string uri;
+        if (!fromExpr) {
+            if (args.empty())
+                throw UsageError("you must specify a URI");
+            uri = args[0];
+        } else {
+            Path path = resolveExprPath(lookupFileArg(state, args.empty() ? "." : args[0]));
+            Value vRoot;
+            state.evalFile(path, vRoot);
+            Value & v(*findAlongAttrPath(state, attrPath, autoArgs, vRoot));
+            state.forceAttrs(v);
+
+            /* Extract the URI. */
+            auto attr = v.attrs->find(state.symbols.create("urls"));
+            if (attr == v.attrs->end())
+                throw Error("attribute set does not contain a ‘urls’ attribute");
+            state.forceList(*attr->value);
+            if (attr->value->listSize() < 1)
+                throw Error("‘urls’ list is empty");
+            uri = state.forceString(*attr->value->listElems()[0]);
+
+            /* Extract the hash mode. */
+            attr = v.attrs->find(state.symbols.create("outputHashMode"));
+            if (attr == v.attrs->end())
+                printMsg(lvlInfo, "warning: this does not look like a fetchurl call");
+            else
+                unpack = state.forceString(*attr->value) == "recursive";
+
+            /* Extract the name. */
+            if (name.empty()) {
+                attr = v.attrs->find(state.symbols.create("name"));
+                if (attr != v.attrs->end())
+                    name = state.forceString(*attr->value);
+            }
+        }
+
+        /* Figure out a name in the Nix store. */
+        if (name.empty())
+            name = baseNameOf(uri);
+        if (name.empty())
+            throw Error(format("cannot figure out file name for ‘%1%’") % uri);
+
+        /* If an expected hash is given, the file may already exist in
+           the store. */
+        Hash hash, expectedHash(ht);
+        Path storePath;
+        if (args.size() == 2) {
+            expectedHash = parseHash16or32(ht, args[1]);
+            storePath = makeFixedOutputPath(unpack, ht, expectedHash, name);
+            if (store->isValidPath(storePath))
+                hash = expectedHash;
+            else
+                storePath.clear();
+        }
+
+        if (storePath.empty()) {
+
+            auto actualUri = resolveMirrorUri(state, uri);
+
+            /* Download the file. */
+            auto result = downloadFile(actualUri, DownloadOptions());
+
+            AutoDelete tmpDir(createTempDir(), true);
+            Path tmpFile = (Path) tmpDir + "/tmp";
+            writeFile(tmpFile, result.data);
+
+            /* Optionally unpack the file. */
+            if (unpack) {
+                printMsg(lvlInfo, "unpacking...");
+                Path unpacked = (Path) tmpDir + "/unpacked";
+                createDirs(unpacked);
+                if (hasSuffix(baseNameOf(uri), ".zip"))
+                    runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}, "");
+                else
+                    // FIXME: this requires GNU tar for decompression.
+                    runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}, "");
+
+                /* If the archive unpacks to a single file/directory, then use
+                   that as the top-level. */
+                auto entries = readDirectory(unpacked);
+                if (entries.size() == 1)
+                    tmpFile = unpacked + "/" + entries[0].name;
+                else
+                    tmpFile = unpacked;
+            }
+
+            /* FIXME: inefficient; addToStore() will also hash
+               this. */
+            hash = unpack ? hashPath(ht, tmpFile).first : hashString(ht, result.data);
+
+            if (expectedHash != Hash(ht) && expectedHash != hash)
+                throw Error(format("hash mismatch for ‘%1%’") % uri);
+
+            /* Copy the file to the Nix store. FIXME: if RemoteStore
+               implemented addToStoreFromDump() and downloadFile()
+               supported a sink, we could stream the download directly
+               into the Nix store. */
+            storePath = store->addToStore(name, tmpFile, unpack, ht);
+
+            assert(storePath == makeFixedOutputPath(unpack, ht, hash, name));
+        }
+
+        if (!printPath)
+            printMsg(lvlInfo, format("path is ‘%1%’") % storePath);
+
+        std::cout << printHash16or32(hash) << std::endl;
+        if (printPath)
+            std::cout << storePath << std::endl;
+    });
+}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index d541b7b7d00a..14354f86e228 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -81,7 +81,7 @@ static PathSet realisePath(Path path, bool build = true)
                 printGCWarning();
             else {
                 Path rootName = gcRoot;
-                if (rootNr > 1) rootName += "-" + int2String(rootNr);
+                if (rootNr > 1) rootName += "-" + std::to_string(rootNr);
                 if (i->first != "out") rootName += "-" + i->first;
                 outPath = addPermRoot(*store, outPath, rootName, indirectRoot);
             }
@@ -98,7 +98,7 @@ static PathSet realisePath(Path path, bool build = true)
         else {
             Path rootName = gcRoot;
             rootNr++;
-            if (rootNr > 1) rootName += "-" + int2String(rootNr);
+            if (rootNr > 1) rootName += "-" + std::to_string(rootNr);
             path = addPermRoot(*store, path, rootName, indirectRoot);
         }
         return singleton<PathSet>(path);
@@ -853,7 +853,7 @@ static void opServe(Strings opFlags, Strings opArgs)
     if (magic != SERVE_MAGIC_1) throw Error("protocol mismatch");
     out << SERVE_MAGIC_2 << SERVE_PROTOCOL_VERSION;
     out.flush();
-    readInt(in); // Client version, unused for now
+    unsigned int clientVersion = readInt(in);
 
     auto getBuildSettings = [&]() {
         // FIXME: changing options here doesn't work if we're
@@ -863,6 +863,8 @@ static void opServe(Strings opFlags, Strings opArgs)
         settings.useSubstitutes = false;
         settings.maxSilentTime = readInt(in);
         settings.buildTimeout = readInt(in);
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
+            settings.maxLogSize = readInt(in);
     };
 
     while (true) {
diff --git a/src/nix-store/serve-protocol.hh b/src/nix-store/serve-protocol.hh
index f7f151d4603f..c4e2a370300b 100644
--- a/src/nix-store/serve-protocol.hh
+++ b/src/nix-store/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
 #define SERVE_MAGIC_1 0x390c9deb
 #define SERVE_MAGIC_2 0x5452eecb
 
-#define SERVE_PROTOCOL_VERSION 0x201
+#define SERVE_PROTOCOL_VERSION 0x202
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
diff --git a/tests/fetchurl.nix b/tests/fetchurl.nix
deleted file mode 100644
index 2abcc039a832..000000000000
--- a/tests/fetchurl.nix
+++ /dev/null
@@ -1,6 +0,0 @@
-{ filename, sha256 }:
-
-import <nix/fetchurl.nix> {
-  url = "file://${filename}";
-  inherit sha256;
-}
diff --git a/tests/fetchurl.sh b/tests/fetchurl.sh
index 6acc87eafca8..b6fa3a27edd8 100644
--- a/tests/fetchurl.sh
+++ b/tests/fetchurl.sh
@@ -2,8 +2,40 @@ source common.sh
 
 clearStore
 
-hash=$(nix-hash --flat --type sha256 ./fetchurl.nix)
+# Test fetching a flat file.
+hash=$(nix-hash --flat --type sha256 ./fetchurl.sh)
 
-outPath=$(nix-build ./fetchurl.nix --argstr filename $(pwd)/fetchurl.nix --argstr sha256 $hash --no-out-link)
+outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha256 $hash --no-out-link)
 
-cmp $outPath fetchurl.nix
+cmp $outPath fetchurl.sh
+
+# Test unpacking a NAR.
+rm -rf $TEST_ROOT/archive
+mkdir -p $TEST_ROOT/archive
+cp ./fetchurl.sh $TEST_ROOT/archive
+chmod +x $TEST_ROOT/archive/fetchurl.sh
+ln -s foo $TEST_ROOT/archive/symlink
+nar=$TEST_ROOT/archive.nar
+nix-store --dump $TEST_ROOT/archive > $nar
+
+hash=$(nix-hash --flat --type sha256 $nar)
+
+outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$nar --argstr sha256 $hash \
+          --arg unpack true --argstr name xyzzy --no-out-link)
+
+echo $outPath | grep -q 'xyzzy'
+
+test -x $outPath/fetchurl.sh
+test -L $outPath/symlink
+
+nix-store --delete $outPath
+
+# Test unpacking a compressed NAR.
+narxz=$TEST_ROOT/archive.nar.xz
+rm -f $narxz
+xz --keep $nar
+outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$narxz --argstr sha256 $hash \
+          --arg unpack true --argstr name xyzzy --no-out-link)
+
+test -x $outPath/fetchurl.sh
+test -L $outPath/symlink
diff --git a/tests/fixed.sh b/tests/fixed.sh
index ed0d06dd29cb..cac3f0be91b0 100644
--- a/tests/fixed.sh
+++ b/tests/fixed.sh
@@ -40,13 +40,10 @@ echo "Hello World!" > $TEST_ROOT/fixed/foo
 ln -s foo $TEST_ROOT/fixed/bar
 
 out2=$(nix-store --add $TEST_ROOT/fixed)
-echo $out2
-test "$out" = "$out2" || exit 1
+[ "$out" = "$out2" ]
 
 out3=$(nix-store --add-fixed --recursive sha256 $TEST_ROOT/fixed)
-echo $out3
-test "$out" = "$out3" || exit 1
+[ "$out" = "$out3" ]
 
 out4=$(nix-store --print-fixed-path --recursive sha256 "1ixr6yd3297ciyp9im522dfxpqbkhcw0pylkb2aab915278fqaik" fixed)
-echo $out4
-test "$out" = "$out4" || exit 1
+[ "$out" = "$out4" ]
diff --git a/tests/hash.sh b/tests/hash.sh
index d659bbe34e8f..a95c68683f84 100644
--- a/tests/hash.sh
+++ b/tests/hash.sh
@@ -17,12 +17,18 @@ try md5 "abcdefghijklmnopqrstuvwxyz" "c3fcd3d76192e4007dfb496cca67e13b"
 try md5 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" "d174ab98d277d9f5a5611c2c9f419d9f"
 try md5 "12345678901234567890123456789012345678901234567890123456789012345678901234567890" "57edf4a22be3c955ac49da2e2107b67a"
 
+try sha1 "" "da39a3ee5e6b4b0d3255bfef95601890afd80709"
 try sha1 "abc" "a9993e364706816aba3e25717850c26c9cd0d89d"
 try sha1 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "84983e441c3bd26ebaae4aa1f95129e5e54670f1"
 
+try sha256 "" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
 try sha256 "abc" "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
 try sha256 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1"
 
+try sha512 "" "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
+try sha512 "abc" "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"
+try sha512 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445"
+
 EXTRA=--base32
 try sha256 "abc" "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
 EXTRA=
@@ -56,7 +62,12 @@ ln -s x $TEST_ROOT/hash-path/hello
 try2 md5 "f78b733a68f5edbdf9413899339eaa4a"
 
 # Conversion.
-test $(nix-hash --type sha256 --to-base32 "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad") = "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
-test $(nix-hash --type sha256 --to-base16 "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s") = "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
-test $(nix-hash --type sha1 --to-base32 "800d59cfcd3c05e900cb4e214be48f6b886a08df") = "vw46m23bizj4n8afrc0fj19wrp7mj3c0"
-test $(nix-hash --type sha1 --to-base16 "vw46m23bizj4n8afrc0fj19wrp7mj3c0") = "800d59cfcd3c05e900cb4e214be48f6b886a08df"
+try3() {
+    h32=$(nix-hash --type "$1" --to-base32 "$2")
+    [ "$h32" = "$3" ]
+    h16=$(nix-hash --type "$1" --to-base16 "$h32")
+    [ "$h16" = "$2" ]
+}
+try3 sha1 "800d59cfcd3c05e900cb4e214be48f6b886a08df" "vw46m23bizj4n8afrc0fj19wrp7mj3c0"
+try3 sha256 "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad" "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
+try3 sha512 "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445" "12k9jiq29iyqm03swfsgiw5mlqs173qazm3n7daz43infy12pyrcdf30fkk3qwv4yl2ick8yipc2mqnlh48xsvvxl60lbx8vp38yji0"
diff --git a/tests/lang/eval-okay-hash.exp b/tests/lang/eval-okay-hash.exp
index 7bbe452bcc01..d720a082ddb3 100644
--- a/tests/lang/eval-okay-hash.exp
+++ b/tests/lang/eval-okay-hash.exp
@@ -1 +1 @@
-[ "d41d8cd98f00b204e9800998ecf8427e" "6c69ee7f211c640419d5366cc076ae46" "bb3438fbabd460ea6dbd27d153e2233b" "da39a3ee5e6b4b0d3255bfef95601890afd80709" "cd54e8568c1b37cf1e5badb0779bcbf382212189" "6d12e10b1d331dad210e47fd25d4f260802b7e77" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" "900a4469df00ccbfd0c145c6d1e4b7953dd0afafadd7534e3a4019e8d38fc663" "ad0387b3bd8652f730ca46d25f9c170af0fd589f42e7f23f5a9e6412d97d7e56" ]
+[ "d41d8cd98f00b204e9800998ecf8427e" "6c69ee7f211c640419d5366cc076ae46" "bb3438fbabd460ea6dbd27d153e2233b" "da39a3ee5e6b4b0d3255bfef95601890afd80709" "cd54e8568c1b37cf1e5badb0779bcbf382212189" "6d12e10b1d331dad210e47fd25d4f260802b7e77" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" "900a4469df00ccbfd0c145c6d1e4b7953dd0afafadd7534e3a4019e8d38fc663" "ad0387b3bd8652f730ca46d25f9c170af0fd589f42e7f23f5a9e6412d97d7e56" "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" "9d0886f8c6b389398a16257bc79780fab9831c7fc11c8ab07fa732cb7b348feade382f92617c9c5305fefba0af02ab5fd39a587d330997ff5bd0db19f7666653" "21644b72aa259e5a588cd3afbafb1d4310f4889680f6c83b9d531596a5a284f34dbebff409d23bcc86aee6bad10c891606f075c6f4755cb536da27db5693f3a7" ]
diff --git a/tests/lang/eval-okay-hash.nix b/tests/lang/eval-okay-hash.nix
index 2fff17f849bb..b0f62b245ca8 100644
--- a/tests/lang/eval-okay-hash.nix
+++ b/tests/lang/eval-okay-hash.nix
@@ -1,7 +1,4 @@
 let
-  md5 = builtins.hashString "md5";
-  sha1 = builtins.hashString "sha1";
-  sha256 = builtins.hashString "sha256";
   strings = [ "" "text 1" "text 2" ];
 in
-  (builtins.map md5 strings) ++ (builtins.map sha1 strings) ++ (builtins.map sha256 strings)
+  builtins.concatLists (map (hash: map (builtins.hashString hash) strings) ["md5" "sha1" "sha256" "sha512"])