about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Makefile.config.in1
-rwxr-xr-xconfig/config.guess983
-rwxr-xr-xconfig/config.sub1944
-rw-r--r--configure.ac15
-rw-r--r--doc/manual/advanced-topics/advanced-topics.xml1
-rw-r--r--doc/manual/advanced-topics/distributed-builds.xml1
-rw-r--r--doc/manual/command-ref/conf-file.xml5
-rw-r--r--doc/manual/command-ref/nix-env.xml15
-rw-r--r--doc/manual/command-ref/nix-shell.xml8
-rw-r--r--doc/manual/command-ref/nix-store.xml2
-rw-r--r--doc/manual/expressions/advanced-attributes.xml34
-rw-r--r--doc/manual/expressions/builtins.xml130
-rw-r--r--doc/manual/expressions/language-constructs.xml2
-rw-r--r--doc/manual/glossary/glossary.xml3
-rw-r--r--doc/manual/hacking.xml2
-rw-r--r--doc/manual/installation/env-variables.xml49
-rw-r--r--doc/manual/packages/s3-substituter.xml159
-rw-r--r--doc/manual/packages/sharing-packages.xml1
-rw-r--r--doc/manual/release-notes/release-notes.xml1
-rw-r--r--doc/manual/release-notes/rl-2.1.xml110
-rwxr-xr-xmaintainers/upload-release.pl26
-rw-r--r--misc/docker/Dockerfile28
-rw-r--r--misc/docker/README.md8
-rw-r--r--nix.spec.in20
-rw-r--r--perl/lib/Nix/Store.xs2
-rw-r--r--release-common.nix13
-rw-r--r--release.nix57
-rw-r--r--scripts/install-multi-user.sh12
-rw-r--r--scripts/install.in67
-rw-r--r--scripts/nix-profile-daemon.sh.in23
-rw-r--r--src/cpptoml/LICENSE18
-rw-r--r--src/cpptoml/cpptoml.h3457
-rw-r--r--src/libexpr/attr-set.cc16
-rw-r--r--src/libexpr/eval-inline.hh14
-rw-r--r--src/libexpr/eval.cc115
-rw-r--r--src/libexpr/eval.hh38
-rw-r--r--src/libexpr/json-to-value.cc2
-rw-r--r--src/libexpr/lexer.l8
-rw-r--r--src/libexpr/parser.y4
-rw-r--r--src/libexpr/primops.cc106
-rw-r--r--src/libexpr/primops/fetchGit.cc4
-rw-r--r--src/libexpr/primops/fetchMercurial.cc4
-rw-r--r--src/libexpr/primops/fromTOML.cc77
-rw-r--r--src/libexpr/value.hh4
-rw-r--r--src/libmain/common-args.cc4
-rw-r--r--src/libmain/shared.cc2
-rw-r--r--src/libstore/binary-cache-store.cc82
-rw-r--r--src/libstore/binary-cache-store.hh18
-rw-r--r--src/libstore/build.cc52
-rw-r--r--src/libstore/builtins/fetchurl.cc69
-rw-r--r--src/libstore/derivations.cc2
-rw-r--r--src/libstore/download.cc268
-rw-r--r--src/libstore/download.hh17
-rw-r--r--src/libstore/gc.cc29
-rw-r--r--src/libstore/globals.cc38
-rw-r--r--src/libstore/globals.hh65
-rw-r--r--src/libstore/http-binary-cache-store.cc40
-rw-r--r--src/libstore/legacy-ssh-store.cc73
-rw-r--r--src/libstore/local-binary-cache-store.cc18
-rw-r--r--src/libstore/local-store.cc18
-rw-r--r--src/libstore/local-store.hh3
-rw-r--r--src/libstore/misc.cc17
-rw-r--r--src/libstore/optimise-store.cc3
-rw-r--r--src/libstore/profiles.cc23
-rw-r--r--src/libstore/profiles.hh2
-rw-r--r--src/libstore/remote-store.cc79
-rw-r--r--src/libstore/remote-store.hh3
-rw-r--r--src/libstore/s3-binary-cache-store.cc130
-rw-r--r--src/libstore/s3.hh4
-rw-r--r--src/libstore/serve-protocol.hh3
-rw-r--r--src/libstore/sqlite.cc3
-rw-r--r--src/libstore/ssh.cc22
-rw-r--r--src/libstore/ssh.hh1
-rw-r--r--src/libstore/store-api.cc128
-rw-r--r--src/libstore/store-api.hh7
-rw-r--r--src/libstore/worker-protocol.hh2
-rw-r--r--src/libutil/archive.cc26
-rw-r--r--src/libutil/archive.hh4
-rw-r--r--src/libutil/compression.cc613
-rw-r--r--src/libutil/compression.hh12
-rw-r--r--src/libutil/config.cc88
-rw-r--r--src/libutil/config.hh81
-rw-r--r--src/libutil/json.cc1
-rw-r--r--src/libutil/serialise.cc21
-rw-r--r--src/libutil/serialise.hh15
-rw-r--r--src/libutil/sync.hh4
-rw-r--r--src/libutil/util.cc27
-rw-r--r--src/libutil/util.hh56
-rwxr-xr-xsrc/nix-build/nix-build.cc39
-rw-r--r--src/nix-daemon/nix-daemon.cc34
-rw-r--r--src/nix-env/nix-env.cc8
-rw-r--r--src/nix-instantiate/nix-instantiate.cc20
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc52
-rw-r--r--src/nix-store/nix-store.cc24
-rw-r--r--src/nix/copy.cc4
-rw-r--r--src/nix/installables.cc26
-rw-r--r--src/nix/main.cc8
-rw-r--r--src/nix/path-info.cc36
-rw-r--r--src/nix/repl.cc13
-rw-r--r--src/nix/run.cc26
-rw-r--r--src/nix/search.cc30
-rw-r--r--src/nix/show-config.cc8
-rw-r--r--src/nix/upgrade-nix.cc53
-rw-r--r--src/nix/why-depends.cc2
-rw-r--r--tests/binary-cache.sh31
-rw-r--r--tests/brotli.sh7
-rw-r--r--tests/common.sh.in9
-rw-r--r--tests/lang/eval-okay-arithmetic.exp2
-rw-r--r--tests/lang/eval-okay-arithmetic.nix4
-rw-r--r--tests/lang/eval-okay-concatmap.exp1
-rw-r--r--tests/lang/eval-okay-concatmap.nix5
-rw-r--r--tests/lang/eval-okay-float.exp1
-rw-r--r--tests/lang/eval-okay-float.nix6
-rw-r--r--tests/lang/eval-okay-fromTOML.exp1
-rw-r--r--tests/lang/eval-okay-fromTOML.nix184
-rw-r--r--tests/lang/eval-okay-mapattrs.exp1
-rw-r--r--tests/lang/eval-okay-mapattrs.nix3
-rw-r--r--tests/local.mk3
-rw-r--r--tests/nix-copy-ssh.sh20
-rw-r--r--tests/nix-shell.sh7
-rw-r--r--tests/plugins/plugintest.cc13
-rw-r--r--tests/restricted.sh11
-rw-r--r--tests/search.sh2
123 files changed, 7651 insertions, 2805 deletions
diff --git a/Makefile.config.in b/Makefile.config.in
index a9785dc73955..08edcb863ef6 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -6,7 +6,6 @@ CXXFLAGS = @CXXFLAGS@
 ENABLE_S3 = @ENABLE_S3@
 HAVE_SODIUM = @HAVE_SODIUM@
 HAVE_READLINE = @HAVE_READLINE@
-HAVE_BROTLI = @HAVE_BROTLI@
 HAVE_SECCOMP = @HAVE_SECCOMP@
 LIBCURL_LIBS = @LIBCURL_LIBS@
 OPENSSL_LIBS = @OPENSSL_LIBS@
diff --git a/config/config.guess b/config/config.guess
index 137bedf2e28b..d4fb3213ec7a 100755
--- a/config/config.guess
+++ b/config/config.guess
@@ -1,14 +1,12 @@
 #! /bin/sh
 # Attempt to guess a canonical system name.
-#   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-#   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-#   2011, 2012 Free Software Foundation, Inc.
+#   Copyright 1992-2018 Free Software Foundation, Inc.
 
-timestamp='2012-08-14'
+timestamp='2018-08-02'
 
 # This file is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 3 of the License, or
 # (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful, but
@@ -17,24 +15,22 @@ timestamp='2012-08-14'
 # General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
+# along with this program; if not, see <https://www.gnu.org/licenses/>.
 #
 # As a special exception to the GNU General Public License, if you
 # distribute this file as part of a program that contains a
 # configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner.  Please send patches (context
-# diff format) to <config-patches@gnu.org> and include a ChangeLog
-# entry.
+# the same distribution terms that you use for the rest of that
+# program.  This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
 #
-# This script attempts to guess a canonical system name similar to
-# config.sub.  If it succeeds, it prints the system name on stdout, and
-# exits with 0.  Otherwise, it exits with 1.
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
 #
 # You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+#
+# Please send patches to <config-patches@gnu.org>.
+
 
 me=`echo "$0" | sed -e 's,.*/,,'`
 
@@ -43,7 +39,7 @@ Usage: $0 [OPTION]
 
 Output the configuration name of the system \`$me' is run on.
 
-Operation modes:
+Options:
   -h, --help         print this help, then exit
   -t, --time-stamp   print date of last modification, then exit
   -v, --version      print version number, then exit
@@ -54,9 +50,7 @@ version="\
 GNU config.guess ($timestamp)
 
 Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
-Free Software Foundation, Inc.
+Copyright 1992-2018 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -90,8 +84,6 @@ if test $# != 0; then
   exit 1
 fi
 
-trap 'exit 1' 1 2 15
-
 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a
 # compiler to aid in system detection is discouraged as it requires
 # temporary files to be created and, as you can see below, it is a
@@ -102,34 +94,39 @@ trap 'exit 1' 1 2 15
 
 # Portable tmp directory creation inspired by the Autoconf team.
 
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,)    echo "int x;" > $dummy.c ;
-	for c in cc gcc c89 c99 ; do
-	  if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
-	     CC_FOR_BUILD="$c"; break ;
-	  fi ;
-	done ;
-	if test x"$CC_FOR_BUILD" = x ; then
-	  CC_FOR_BUILD=no_compiler_found ;
-	fi
-	;;
- ,,*)   CC_FOR_BUILD=$CC ;;
- ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
+tmp=
+# shellcheck disable=SC2172
+trap 'test -z "$tmp" || rm -fr "$tmp"' 1 2 13 15
+trap 'exitcode=$?; test -z "$tmp" || rm -fr "$tmp"; exit $exitcode' 0
+
+set_cc_for_build() {
+    : "${TMPDIR=/tmp}"
+    # shellcheck disable=SC2039
+    { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+	{ test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } ||
+	{ tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+	{ echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; }
+    dummy=$tmp/dummy
+    case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in
+	,,)    echo "int x;" > "$dummy.c"
+	       for driver in cc gcc c89 c99 ; do
+		   if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then
+		       CC_FOR_BUILD="$driver"
+		       break
+		   fi
+	       done
+	       if test x"$CC_FOR_BUILD" = x ; then
+		   CC_FOR_BUILD=no_compiler_found
+	       fi
+	       ;;
+	,,*)   CC_FOR_BUILD=$CC ;;
+	,*,*)  CC_FOR_BUILD=$HOST_CC ;;
+    esac
+}
 
 # This is needed to find uname on a Pyramid OSx when run in the BSD universe.
 # (ghazi@noc.rutgers.edu 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+if test -f /.attbin/uname ; then
 	PATH=$PATH:/.attbin ; export PATH
 fi
 
@@ -138,9 +135,37 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
 UNAME_SYSTEM=`(uname -s) 2>/dev/null`  || UNAME_SYSTEM=unknown
 UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
 
+case "$UNAME_SYSTEM" in
+Linux|GNU|GNU/*)
+	# If the system lacks a compiler, then just pick glibc.
+	# We could probably try harder.
+	LIBC=gnu
+
+	set_cc_for_build
+	cat <<-EOF > "$dummy.c"
+	#include <features.h>
+	#if defined(__UCLIBC__)
+	LIBC=uclibc
+	#elif defined(__dietlibc__)
+	LIBC=dietlibc
+	#else
+	LIBC=gnu
+	#endif
+	EOF
+	eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`"
+
+	# If ldd exists, use it to detect musl libc.
+	if command -v ldd >/dev/null && \
+		ldd --version 2>&1 | grep -q ^musl
+	then
+	    LIBC=musl
+	fi
+	;;
+esac
+
 # Note: order is significant - the case branches are not exclusive.
 
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
     *:NetBSD:*:*)
 	# NetBSD (nbsd) targets should (where applicable) match one or
 	# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
@@ -153,21 +178,31 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	# Note: NetBSD doesn't particularly care about the vendor
 	# portion of the name.  We always set it to "unknown".
 	sysctl="sysctl -n hw.machine_arch"
-	UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
-	    /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
-	case "${UNAME_MACHINE_ARCH}" in
+	UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
+	    "/sbin/$sysctl" 2>/dev/null || \
+	    "/usr/sbin/$sysctl" 2>/dev/null || \
+	    echo unknown)`
+	case "$UNAME_MACHINE_ARCH" in
 	    armeb) machine=armeb-unknown ;;
 	    arm*) machine=arm-unknown ;;
 	    sh3el) machine=shl-unknown ;;
 	    sh3eb) machine=sh-unknown ;;
 	    sh5el) machine=sh5le-unknown ;;
-	    *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+	    earmv*)
+		arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+		endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'`
+		machine="${arch}${endian}"-unknown
+		;;
+	    *) machine="$UNAME_MACHINE_ARCH"-unknown ;;
 	esac
 	# The Operating System including object format, if it has switched
-	# to ELF recently, or will in the future.
-	case "${UNAME_MACHINE_ARCH}" in
+	# to ELF recently (or will in the future) and ABI.
+	case "$UNAME_MACHINE_ARCH" in
+	    earm*)
+		os=netbsdelf
+		;;
 	    arm*|i386|m68k|ns32k|sh3*|sparc|vax)
-		eval $set_cc_for_build
+		set_cc_for_build
 		if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
 			| grep -q __ELF__
 		then
@@ -182,44 +217,67 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 		os=netbsd
 		;;
 	esac
+	# Determine ABI tags.
+	case "$UNAME_MACHINE_ARCH" in
+	    earm*)
+		expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+		abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"`
+		;;
+	esac
 	# The OS release
 	# Debian GNU/NetBSD machines have a different userland, and
 	# thus, need a distinct triplet. However, they do not need
 	# kernel version information, so it can be replaced with a
 	# suitable tag, in the style of linux-gnu.
-	case "${UNAME_VERSION}" in
+	case "$UNAME_VERSION" in
 	    Debian*)
 		release='-gnu'
 		;;
 	    *)
-		release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+		release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2`
 		;;
 	esac
 	# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
 	# contains redundant information, the shorter form:
 	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
-	echo "${machine}-${os}${release}"
+	echo "$machine-${os}${release}${abi-}"
 	exit ;;
     *:Bitrig:*:*)
 	UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
-	echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+	echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
 	exit ;;
     *:OpenBSD:*:*)
 	UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
-	echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+	echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
+	exit ;;
+    *:LibertyBSD:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
+	echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
+	exit ;;
+    *:MidnightBSD:*:*)
+	echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE"
 	exit ;;
     *:ekkoBSD:*:*)
-	echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE"
 	exit ;;
     *:SolidBSD:*:*)
-	echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
 	exit ;;
     macppc:MirBSD:*:*)
-	echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+	echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
 	exit ;;
     *:MirBSD:*:*)
-	echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE"
+	exit ;;
+    *:Sortix:*:*)
+	echo "$UNAME_MACHINE"-unknown-sortix
+	exit ;;
+    *:Redox:*:*)
+	echo "$UNAME_MACHINE"-unknown-redox
 	exit ;;
+    mips:OSF1:*.*)
+        echo mips-dec-osf1
+        exit ;;
     alpha:OSF1:*:*)
 	case $UNAME_RELEASE in
 	*4.0)
@@ -236,63 +294,54 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
 	case "$ALPHA_CPU_TYPE" in
 	    "EV4 (21064)")
-		UNAME_MACHINE="alpha" ;;
+		UNAME_MACHINE=alpha ;;
 	    "EV4.5 (21064)")
-		UNAME_MACHINE="alpha" ;;
+		UNAME_MACHINE=alpha ;;
 	    "LCA4 (21066/21068)")
-		UNAME_MACHINE="alpha" ;;
+		UNAME_MACHINE=alpha ;;
 	    "EV5 (21164)")
-		UNAME_MACHINE="alphaev5" ;;
+		UNAME_MACHINE=alphaev5 ;;
 	    "EV5.6 (21164A)")
-		UNAME_MACHINE="alphaev56" ;;
+		UNAME_MACHINE=alphaev56 ;;
 	    "EV5.6 (21164PC)")
-		UNAME_MACHINE="alphapca56" ;;
+		UNAME_MACHINE=alphapca56 ;;
 	    "EV5.7 (21164PC)")
-		UNAME_MACHINE="alphapca57" ;;
+		UNAME_MACHINE=alphapca57 ;;
 	    "EV6 (21264)")
-		UNAME_MACHINE="alphaev6" ;;
+		UNAME_MACHINE=alphaev6 ;;
 	    "EV6.7 (21264A)")
-		UNAME_MACHINE="alphaev67" ;;
+		UNAME_MACHINE=alphaev67 ;;
 	    "EV6.8CB (21264C)")
-		UNAME_MACHINE="alphaev68" ;;
+		UNAME_MACHINE=alphaev68 ;;
 	    "EV6.8AL (21264B)")
-		UNAME_MACHINE="alphaev68" ;;
+		UNAME_MACHINE=alphaev68 ;;
 	    "EV6.8CX (21264D)")
-		UNAME_MACHINE="alphaev68" ;;
+		UNAME_MACHINE=alphaev68 ;;
 	    "EV6.9A (21264/EV69A)")
-		UNAME_MACHINE="alphaev69" ;;
+		UNAME_MACHINE=alphaev69 ;;
 	    "EV7 (21364)")
-		UNAME_MACHINE="alphaev7" ;;
+		UNAME_MACHINE=alphaev7 ;;
 	    "EV7.9 (21364A)")
-		UNAME_MACHINE="alphaev79" ;;
+		UNAME_MACHINE=alphaev79 ;;
 	esac
 	# A Pn.n version is a patched version.
 	# A Vn.n version is a released version.
 	# A Tn.n version is a released field test version.
 	# A Xn.n version is an unreleased experimental baselevel.
 	# 1.2 uses "1.2" for uname -r.
-	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`"
 	# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
 	exitcode=$?
 	trap '' 0
 	exit $exitcode ;;
-    Alpha\ *:Windows_NT*:*)
-	# How do we know it's Interix rather than the generic POSIX subsystem?
-	# Should we change UNAME_MACHINE based on the output of uname instead
-	# of the specific Alpha model?
-	echo alpha-pc-interix
-	exit ;;
-    21064:Windows_NT:50:3)
-	echo alpha-dec-winnt3.5
-	exit ;;
     Amiga*:UNIX_System_V:4.0:*)
 	echo m68k-unknown-sysv4
 	exit ;;
     *:[Aa]miga[Oo][Ss]:*:*)
-	echo ${UNAME_MACHINE}-unknown-amigaos
+	echo "$UNAME_MACHINE"-unknown-amigaos
 	exit ;;
     *:[Mm]orph[Oo][Ss]:*:*)
-	echo ${UNAME_MACHINE}-unknown-morphos
+	echo "$UNAME_MACHINE"-unknown-morphos
 	exit ;;
     *:OS/390:*:*)
 	echo i370-ibm-openedition
@@ -304,9 +353,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	echo powerpc-ibm-os400
 	exit ;;
     arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
-	echo arm-acorn-riscix${UNAME_RELEASE}
+	echo arm-acorn-riscix"$UNAME_RELEASE"
 	exit ;;
-    arm:riscos:*:*|arm:RISCOS:*:*)
+    arm*:riscos:*:*|arm*:RISCOS:*:*)
 	echo arm-unknown-riscos
 	exit ;;
     SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
@@ -331,38 +380,33 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 	    sparc) echo sparc-icl-nx7; exit ;;
 	esac ;;
     s390x:SunOS:*:*)
-	echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
 	exit ;;
     sun4H:SunOS:5.*:*)
-	echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
 	exit ;;
     sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
-	echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
 	exit ;;
     i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
-	echo i386-pc-auroraux${UNAME_RELEASE}
+	echo i386-pc-auroraux"$UNAME_RELEASE"
 	exit ;;
     i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
-	eval $set_cc_for_build
-	SUN_ARCH="i386"
-	# If there is a compiler, see if it is configured for 64-bit objects.
-	# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
-	# This test works for both compilers.
-	if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
-	    if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
-		(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
-		grep IS_64BIT_ARCH >/dev/null
-	    then
-		SUN_ARCH="x86_64"
-	    fi
-	fi
-	echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	UNAME_REL="`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
+	case `isainfo -b` in
+	    32)
+		echo i386-pc-solaris2"$UNAME_REL"
+		;;
+	    64)
+		echo x86_64-pc-solaris2"$UNAME_REL"
+		;;
+	esac
 	exit ;;
     sun4*:SunOS:6*:*)
 	# According to config.sub, this is the proper way to canonicalize
 	# SunOS6.  Hard to guess exactly what SunOS6 will be like, but
 	# it's likely to be more like Solaris than SunOS4.
-	echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
 	exit ;;
     sun4*:SunOS:*:*)
 	case "`/usr/bin/arch -k`" in
@@ -371,25 +415,25 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 		;;
 	esac
 	# Japanese Language versions have a version number like `4.1.3-JL'.
-	echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+	echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`"
 	exit ;;
     sun3*:SunOS:*:*)
-	echo m68k-sun-sunos${UNAME_RELEASE}
+	echo m68k-sun-sunos"$UNAME_RELEASE"
 	exit ;;
     sun*:*:4.2BSD:*)
 	UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
-	test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+	test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
 	case "`/bin/arch`" in
 	    sun3)
-		echo m68k-sun-sunos${UNAME_RELEASE}
+		echo m68k-sun-sunos"$UNAME_RELEASE"
 		;;
 	    sun4)
-		echo sparc-sun-sunos${UNAME_RELEASE}
+		echo sparc-sun-sunos"$UNAME_RELEASE"
 		;;
 	esac
 	exit ;;
     aushp:SunOS:*:*)
-	echo sparc-auspex-sunos${UNAME_RELEASE}
+	echo sparc-auspex-sunos"$UNAME_RELEASE"
 	exit ;;
     # The situation for MiNT is a little confusing.  The machine name
     # can be virtually everything (everything which is not
@@ -400,44 +444,44 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
     # MiNT.  But MiNT is downward compatible to TOS, so this should
     # be no problem.
     atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
-	echo m68k-atari-mint${UNAME_RELEASE}
+	echo m68k-atari-mint"$UNAME_RELEASE"
 	exit ;;
     atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
-	echo m68k-atari-mint${UNAME_RELEASE}
+	echo m68k-atari-mint"$UNAME_RELEASE"
 	exit ;;
     *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
-	echo m68k-atari-mint${UNAME_RELEASE}
+	echo m68k-atari-mint"$UNAME_RELEASE"
 	exit ;;
     milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
-	echo m68k-milan-mint${UNAME_RELEASE}
+	echo m68k-milan-mint"$UNAME_RELEASE"
 	exit ;;
     hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
-	echo m68k-hades-mint${UNAME_RELEASE}
+	echo m68k-hades-mint"$UNAME_RELEASE"
 	exit ;;
     *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
-	echo m68k-unknown-mint${UNAME_RELEASE}
+	echo m68k-unknown-mint"$UNAME_RELEASE"
 	exit ;;
     m68k:machten:*:*)
-	echo m68k-apple-machten${UNAME_RELEASE}
+	echo m68k-apple-machten"$UNAME_RELEASE"
 	exit ;;
     powerpc:machten:*:*)
-	echo powerpc-apple-machten${UNAME_RELEASE}
+	echo powerpc-apple-machten"$UNAME_RELEASE"
 	exit ;;
     RISC*:Mach:*:*)
 	echo mips-dec-mach_bsd4.3
 	exit ;;
     RISC*:ULTRIX:*:*)
-	echo mips-dec-ultrix${UNAME_RELEASE}
+	echo mips-dec-ultrix"$UNAME_RELEASE"
 	exit ;;
     VAX*:ULTRIX*:*:*)
-	echo vax-dec-ultrix${UNAME_RELEASE}
+	echo vax-dec-ultrix"$UNAME_RELEASE"
 	exit ;;
     2020:CLIX:*:* | 2430:CLIX:*:*)
-	echo clipper-intergraph-clix${UNAME_RELEASE}
+	echo clipper-intergraph-clix"$UNAME_RELEASE"
 	exit ;;
     mips:*:*:UMIPS | mips:*:*:RISCos)
-	eval $set_cc_for_build
-	sed 's/^	//' << EOF >$dummy.c
+	set_cc_for_build
+	sed 's/^	//' << EOF > "$dummy.c"
 #ifdef __cplusplus
 #include <stdio.h>  /* for printf() prototype */
 	int main (int argc, char *argv[]) {
@@ -446,23 +490,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
 #endif
 	#if defined (host_mips) && defined (MIPSEB)
 	#if defined (SYSTYPE_SYSV)
-	  printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+	  printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0);
 	#endif
 	#if defined (SYSTYPE_SVR4)
-	  printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+	  printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0);
 	#endif
 	#if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
-	  printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+	  printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0);
 	#endif
 	#endif
 	  exit (-1);
 	}
 EOF
-	$CC_FOR_BUILD -o $dummy $dummy.c &&
-	  dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
-	  SYSTEM_NAME=`$dummy $dummyarg` &&
+	$CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
+	  dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+	  SYSTEM_NAME=`"$dummy" "$dummyarg"` &&
 	    { echo "$SYSTEM_NAME"; exit; }
-	echo mips-mips-riscos${UNAME_RELEASE}
+	echo mips-mips-riscos"$UNAME_RELEASE"
 	exit ;;
     Motorola:PowerMAX_OS:*:*)
 	echo powerpc-motorola-powermax
@@ -488,17 +532,17 @@ EOF
     AViiON:dgux:*:*)
 	# DG/UX returns AViiON for all architectures
 	UNAME_PROCESSOR=`/usr/bin/uname -p`
-	if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+	if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ]
 	then
-	    if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
-	       [ ${TARGET_BINARY_INTERFACE}x = x ]
+	    if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \
+	       [ "$TARGET_BINARY_INTERFACE"x = x ]
 	    then
-		echo m88k-dg-dgux${UNAME_RELEASE}
+		echo m88k-dg-dgux"$UNAME_RELEASE"
 	    else
-		echo m88k-dg-dguxbcs${UNAME_RELEASE}
+		echo m88k-dg-dguxbcs"$UNAME_RELEASE"
 	    fi
 	else
-	    echo i586-dg-dgux${UNAME_RELEASE}
+	    echo i586-dg-dgux"$UNAME_RELEASE"
 	fi
 	exit ;;
     M88*:DolphinOS:*:*)	# DolphinOS (SVR3)
@@ -515,7 +559,7 @@ EOF
 	echo m68k-tektronix-bsd
 	exit ;;
     *:IRIX*:*:*)
-	echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+	echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`"
 	exit ;;
     ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
 	echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
@@ -527,14 +571,14 @@ EOF
 	if [ -x /usr/bin/oslevel ] ; then
 		IBM_REV=`/usr/bin/oslevel`
 	else
-		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+		IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
 	fi
-	echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+	echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV"
 	exit ;;
     *:AIX:2:3)
 	if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
-		eval $set_cc_for_build
-		sed 's/^		//' << EOF >$dummy.c
+		set_cc_for_build
+		sed 's/^		//' << EOF > "$dummy.c"
 		#include <sys/systemcfg.h>
 
 		main()
@@ -545,7 +589,7 @@ EOF
 			exit(0);
 			}
 EOF
-		if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+		if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"`
 		then
 			echo "$SYSTEM_NAME"
 		else
@@ -559,26 +603,27 @@ EOF
 	exit ;;
     *:AIX:*:[4567])
 	IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
-	if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+	if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
 		IBM_ARCH=rs6000
 	else
 		IBM_ARCH=powerpc
 	fi
-	if [ -x /usr/bin/oslevel ] ; then
-		IBM_REV=`/usr/bin/oslevel`
+	if [ -x /usr/bin/lslpp ] ; then
+		IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+			   awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
 	else
-		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+		IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
 	fi
-	echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+	echo "$IBM_ARCH"-ibm-aix"$IBM_REV"
 	exit ;;
     *:AIX:*:*)
 	echo rs6000-ibm-aix
 	exit ;;
-    ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+    ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*)
 	echo romp-ibm-bsd4.4
 	exit ;;
     ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
-	echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
+	echo romp-ibm-bsd"$UNAME_RELEASE"   # 4.3 with uname added to
 	exit ;;                             # report: romp-ibm BSD 4.3
     *:BOSX:*:*)
 	echo rs6000-bull-bosx
@@ -593,28 +638,28 @@ EOF
 	echo m68k-hp-bsd4.4
 	exit ;;
     9000/[34678]??:HP-UX:*:*)
-	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
-	case "${UNAME_MACHINE}" in
-	    9000/31? )            HP_ARCH=m68000 ;;
-	    9000/[34]?? )         HP_ARCH=m68k ;;
+	HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
+	case "$UNAME_MACHINE" in
+	    9000/31?)            HP_ARCH=m68000 ;;
+	    9000/[34]??)         HP_ARCH=m68k ;;
 	    9000/[678][0-9][0-9])
 		if [ -x /usr/bin/getconf ]; then
 		    sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
 		    sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
-		    case "${sc_cpu_version}" in
-		      523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
-		      528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+		    case "$sc_cpu_version" in
+		      523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
+		      528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
 		      532)                      # CPU_PA_RISC2_0
-			case "${sc_kernel_bits}" in
-			  32) HP_ARCH="hppa2.0n" ;;
-			  64) HP_ARCH="hppa2.0w" ;;
-			  '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
+			case "$sc_kernel_bits" in
+			  32) HP_ARCH=hppa2.0n ;;
+			  64) HP_ARCH=hppa2.0w ;;
+			  '') HP_ARCH=hppa2.0 ;;   # HP-UX 10.20
 			esac ;;
 		    esac
 		fi
-		if [ "${HP_ARCH}" = "" ]; then
-		    eval $set_cc_for_build
-		    sed 's/^		//' << EOF >$dummy.c
+		if [ "$HP_ARCH" = "" ]; then
+		    set_cc_for_build
+		    sed 's/^		//' << EOF > "$dummy.c"
 
 		#define _HPUX_SOURCE
 		#include <stdlib.h>
@@ -647,13 +692,13 @@ EOF
 		    exit (0);
 		}
 EOF
-		    (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+		    (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"`
 		    test -z "$HP_ARCH" && HP_ARCH=hppa
 		fi ;;
 	esac
-	if [ ${HP_ARCH} = "hppa2.0w" ]
+	if [ "$HP_ARCH" = hppa2.0w ]
 	then
-	    eval $set_cc_for_build
+	    set_cc_for_build
 
 	    # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
 	    # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
@@ -664,23 +709,23 @@ EOF
 	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
 	    # => hppa64-hp-hpux11.23
 
-	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+	    if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
 		grep -q __LP64__
 	    then
-		HP_ARCH="hppa2.0w"
+		HP_ARCH=hppa2.0w
 	    else
-		HP_ARCH="hppa64"
+		HP_ARCH=hppa64
 	    fi
 	fi
-	echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+	echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
 	exit ;;
     ia64:HP-UX:*:*)
-	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
-	echo ia64-hp-hpux${HPUX_REV}
+	HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
+	echo ia64-hp-hpux"$HPUX_REV"
 	exit ;;
     3050*:HI-UX:*:*)
-	eval $set_cc_for_build
-	sed 's/^	//' << EOF >$dummy.c
+	set_cc_for_build
+	sed 's/^	//' << EOF > "$dummy.c"
 	#include <unistd.h>
 	int
 	main ()
@@ -705,11 +750,11 @@ EOF
 	  exit (0);
 	}
 EOF
-	$CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+	$CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` &&
 		{ echo "$SYSTEM_NAME"; exit; }
 	echo unknown-hitachi-hiuxwe2
 	exit ;;
-    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*)
 	echo hppa1.1-hp-bsd
 	exit ;;
     9000/8??:4.3bsd:*:*)
@@ -718,7 +763,7 @@ EOF
     *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
 	echo hppa1.0-hp-mpeix
 	exit ;;
-    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*)
 	echo hppa1.1-hp-osf
 	exit ;;
     hp8??:OSF1:*:*)
@@ -726,9 +771,9 @@ EOF
 	exit ;;
     i*86:OSF1:*:*)
 	if [ -x /usr/sbin/sysversion ] ; then
-	    echo ${UNAME_MACHINE}-unknown-osf1mk
+	    echo "$UNAME_MACHINE"-unknown-osf1mk
 	else
-	    echo ${UNAME_MACHINE}-unknown-osf1
+	    echo "$UNAME_MACHINE"-unknown-osf1
 	fi
 	exit ;;
     parisc*:Lites*:*:*)
@@ -753,127 +798,120 @@ EOF
 	echo c4-convex-bsd
 	exit ;;
     CRAY*Y-MP:*:*:*)
-	echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
 	exit ;;
     CRAY*[A-Z]90:*:*:*)
-	echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+	echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \
 	| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
 	      -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
 	      -e 's/\.[^.]*$/.X/'
 	exit ;;
     CRAY*TS:*:*:*)
-	echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
 	exit ;;
     CRAY*T3E:*:*:*)
-	echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
 	exit ;;
     CRAY*SV1:*:*:*)
-	echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
 	exit ;;
     *:UNICOS/mp:*:*)
-	echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
 	exit ;;
     F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
-	FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
-	FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+	FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+	FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+	FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'`
 	echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
 	exit ;;
     5000:UNIX_System_V:4.*:*)
-	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
-	FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+	FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+	FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
 	echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
 	exit ;;
     i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
-	echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE"
 	exit ;;
     sparc*:BSD/OS:*:*)
-	echo sparc-unknown-bsdi${UNAME_RELEASE}
+	echo sparc-unknown-bsdi"$UNAME_RELEASE"
 	exit ;;
     *:BSD/OS:*:*)
-	echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
+	exit ;;
+    arm*:FreeBSD:*:*)
+	UNAME_PROCESSOR=`uname -p`
+	set_cc_for_build
+	if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+	    | grep -q __ARM_PCS_VFP
+	then
+	    echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi
+	else
+	    echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf
+	fi
 	exit ;;
     *:FreeBSD:*:*)
 	UNAME_PROCESSOR=`/usr/bin/uname -p`
-	case ${UNAME_PROCESSOR} in
+	case "$UNAME_PROCESSOR" in
 	    amd64)
-		echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
-	    *)
-		echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+		UNAME_PROCESSOR=x86_64 ;;
+	    i386)
+		UNAME_PROCESSOR=i586 ;;
 	esac
+	echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
 	exit ;;
     i*:CYGWIN*:*)
-	echo ${UNAME_MACHINE}-pc-cygwin
+	echo "$UNAME_MACHINE"-pc-cygwin
 	exit ;;
     *:MINGW64*:*)
-	echo ${UNAME_MACHINE}-pc-mingw64
+	echo "$UNAME_MACHINE"-pc-mingw64
 	exit ;;
     *:MINGW*:*)
-	echo ${UNAME_MACHINE}-pc-mingw32
+	echo "$UNAME_MACHINE"-pc-mingw32
 	exit ;;
-    i*:MSYS*:*)
-	echo ${UNAME_MACHINE}-pc-msys
-	exit ;;
-    i*:windows32*:*)
-	# uname -m includes "-pc" on this system.
-	echo ${UNAME_MACHINE}-mingw32
+    *:MSYS*:*)
+	echo "$UNAME_MACHINE"-pc-msys
 	exit ;;
     i*:PW*:*)
-	echo ${UNAME_MACHINE}-pc-pw32
+	echo "$UNAME_MACHINE"-pc-pw32
 	exit ;;
     *:Interix*:*)
-	case ${UNAME_MACHINE} in
+	case "$UNAME_MACHINE" in
 	    x86)
-		echo i586-pc-interix${UNAME_RELEASE}
+		echo i586-pc-interix"$UNAME_RELEASE"
 		exit ;;
 	    authenticamd | genuineintel | EM64T)
-		echo x86_64-unknown-interix${UNAME_RELEASE}
+		echo x86_64-unknown-interix"$UNAME_RELEASE"
 		exit ;;
 	    IA64)
-		echo ia64-unknown-interix${UNAME_RELEASE}
+		echo ia64-unknown-interix"$UNAME_RELEASE"
 		exit ;;
 	esac ;;
-    [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
-	echo i${UNAME_MACHINE}-pc-mks
-	exit ;;
-    8664:Windows_NT:*)
-	echo x86_64-pc-mks
-	exit ;;
-    i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
-	# How do we know it's Interix rather than the generic POSIX subsystem?
-	# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
-	# UNAME_MACHINE based on the output of uname instead of i386?
-	echo i586-pc-interix
-	exit ;;
     i*:UWIN*:*)
-	echo ${UNAME_MACHINE}-pc-uwin
+	echo "$UNAME_MACHINE"-pc-uwin
 	exit ;;
     amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
 	echo x86_64-unknown-cygwin
 	exit ;;
-    p*:CYGWIN*:*)
-	echo powerpcle-unknown-cygwin
-	exit ;;
     prep*:SunOS:5.*:*)
-	echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
 	exit ;;
     *:GNU:*:*)
 	# the GNU system
-	echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+	echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`"
 	exit ;;
     *:GNU/*:*:*)
 	# other systems with GNU libc and userland
-	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+	echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC"
 	exit ;;
-    i*86:Minix:*:*)
-	echo ${UNAME_MACHINE}-pc-minix
+    *:Minix:*:*)
+	echo "$UNAME_MACHINE"-unknown-minix
 	exit ;;
     aarch64:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     aarch64_be:Linux:*:*)
 	UNAME_MACHINE=aarch64_be
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     alpha:Linux:*:*)
 	case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
@@ -886,63 +924,64 @@ EOF
 	  EV68*) UNAME_MACHINE=alphaev68 ;;
 	esac
 	objdump --private-headers /bin/sh | grep -q ld.so.1
-	if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
-	echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+	if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+	exit ;;
+    arc:Linux:*:* | arceb:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     arm*:Linux:*:*)
-	eval $set_cc_for_build
+	set_cc_for_build
 	if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
 	    | grep -q __ARM_EABI__
 	then
-	    echo ${UNAME_MACHINE}-unknown-linux-gnu
+	    echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	else
 	    if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
 		| grep -q __ARM_PCS_VFP
 	    then
-		echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+		echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi
 	    else
-		echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+		echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf
 	    fi
 	fi
 	exit ;;
     avr32*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     cris:Linux:*:*)
-	echo ${UNAME_MACHINE}-axis-linux-gnu
+	echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
 	exit ;;
     crisv32:Linux:*:*)
-	echo ${UNAME_MACHINE}-axis-linux-gnu
+	echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
+	exit ;;
+    e2k:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     frv:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     hexagon:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     i*86:Linux:*:*)
-	LIBC=gnu
-	eval $set_cc_for_build
-	sed 's/^	//' << EOF >$dummy.c
-	#ifdef __dietlibc__
-	LIBC=dietlibc
-	#endif
-EOF
-	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
-	echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+	echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
 	exit ;;
     ia64:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+	exit ;;
+    k1om:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     m32r*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     m68*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     mips:Linux:*:* | mips64:Linux:*:*)
-	eval $set_cc_for_build
-	sed 's/^	//' << EOF >$dummy.c
+	set_cc_for_build
+	sed 's/^	//' << EOF > "$dummy.c"
 	#undef CPU
 	#undef ${UNAME_MACHINE}
 	#undef ${UNAME_MACHINE}el
@@ -956,55 +995,70 @@ EOF
 	#endif
 	#endif
 EOF
-	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
-	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+	eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`"
+	test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; }
 	;;
-    or32:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+    mips64el:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+	exit ;;
+    openrisc*:Linux:*:*)
+	echo or1k-unknown-linux-"$LIBC"
+	exit ;;
+    or32:Linux:*:* | or1k*:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     padre:Linux:*:*)
-	echo sparc-unknown-linux-gnu
+	echo sparc-unknown-linux-"$LIBC"
 	exit ;;
     parisc64:Linux:*:* | hppa64:Linux:*:*)
-	echo hppa64-unknown-linux-gnu
+	echo hppa64-unknown-linux-"$LIBC"
 	exit ;;
     parisc:Linux:*:* | hppa:Linux:*:*)
 	# Look for CPU level
 	case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
-	  PA7*) echo hppa1.1-unknown-linux-gnu ;;
-	  PA8*) echo hppa2.0-unknown-linux-gnu ;;
-	  *)    echo hppa-unknown-linux-gnu ;;
+	  PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
+	  PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
+	  *)    echo hppa-unknown-linux-"$LIBC" ;;
 	esac
 	exit ;;
     ppc64:Linux:*:*)
-	echo powerpc64-unknown-linux-gnu
+	echo powerpc64-unknown-linux-"$LIBC"
 	exit ;;
     ppc:Linux:*:*)
-	echo powerpc-unknown-linux-gnu
+	echo powerpc-unknown-linux-"$LIBC"
+	exit ;;
+    ppc64le:Linux:*:*)
+	echo powerpc64le-unknown-linux-"$LIBC"
+	exit ;;
+    ppcle:Linux:*:*)
+	echo powerpcle-unknown-linux-"$LIBC"
+	exit ;;
+    riscv32:Linux:*:* | riscv64:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     s390:Linux:*:* | s390x:Linux:*:*)
-	echo ${UNAME_MACHINE}-ibm-linux
+	echo "$UNAME_MACHINE"-ibm-linux-"$LIBC"
 	exit ;;
     sh64*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     sh*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     sparc:Linux:*:* | sparc64:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     tile*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     vax:Linux:*:*)
-	echo ${UNAME_MACHINE}-dec-linux-gnu
+	echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
 	exit ;;
     x86_64:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
 	exit ;;
     xtensa*:Linux:*:*)
-	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     i*86:DYNIX/ptx:4*:*)
 	# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
@@ -1018,34 +1072,34 @@ EOF
 	# I am not positive that other SVR4 systems won't match this,
 	# I just have to hope.  -- rms.
 	# Use sysv4.2uw... so that sysv4* matches it.
-	echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+	echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION"
 	exit ;;
     i*86:OS/2:*:*)
 	# If we were able to find `uname', then EMX Unix compatibility
 	# is probably installed.
-	echo ${UNAME_MACHINE}-pc-os2-emx
+	echo "$UNAME_MACHINE"-pc-os2-emx
 	exit ;;
     i*86:XTS-300:*:STOP)
-	echo ${UNAME_MACHINE}-unknown-stop
+	echo "$UNAME_MACHINE"-unknown-stop
 	exit ;;
     i*86:atheos:*:*)
-	echo ${UNAME_MACHINE}-unknown-atheos
+	echo "$UNAME_MACHINE"-unknown-atheos
 	exit ;;
     i*86:syllable:*:*)
-	echo ${UNAME_MACHINE}-pc-syllable
+	echo "$UNAME_MACHINE"-pc-syllable
 	exit ;;
     i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
-	echo i386-unknown-lynxos${UNAME_RELEASE}
+	echo i386-unknown-lynxos"$UNAME_RELEASE"
 	exit ;;
     i*86:*DOS:*:*)
-	echo ${UNAME_MACHINE}-pc-msdosdjgpp
+	echo "$UNAME_MACHINE"-pc-msdosdjgpp
 	exit ;;
-    i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
-	UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+    i*86:*:4.*:*)
+	UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'`
 	if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
-		echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+		echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
 	else
-		echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+		echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL"
 	fi
 	exit ;;
     i*86:*:5:[678]*)
@@ -1055,12 +1109,12 @@ EOF
 	    *Pentium)	     UNAME_MACHINE=i586 ;;
 	    *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
 	esac
-	echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+	echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}"
 	exit ;;
     i*86:*:3.2:*)
 	if test -f /usr/options/cb.name; then
 		UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
-		echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+		echo "$UNAME_MACHINE"-pc-isc"$UNAME_REL"
 	elif /bin/uname -X 2>/dev/null >/dev/null ; then
 		UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
 		(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
@@ -1070,9 +1124,9 @@ EOF
 			&& UNAME_MACHINE=i686
 		(/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
 			&& UNAME_MACHINE=i686
-		echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+		echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL"
 	else
-		echo ${UNAME_MACHINE}-pc-sysv32
+		echo "$UNAME_MACHINE"-pc-sysv32
 	fi
 	exit ;;
     pc:*:*:*)
@@ -1080,7 +1134,7 @@ EOF
 	# uname -m prints for DJGPP always 'pc', but it prints nothing about
 	# the processor, so we play safe by assuming i586.
 	# Note: whatever this is, it MUST be the same as what config.sub
-	# prints for the "djgpp" host, or else GDB configury will decide that
+	# prints for the "djgpp" host, or else GDB configure will decide that
 	# this is a cross-build.
 	echo i586-pc-msdosdjgpp
 	exit ;;
@@ -1092,9 +1146,9 @@ EOF
 	exit ;;
     i860:*:4.*:*) # i860-SVR4
 	if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
-	  echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+	  echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4
 	else # Add other i860-SVR4 vendors below as they are discovered.
-	  echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
+	  echo i860-unknown-sysv"$UNAME_RELEASE"  # Unknown i860-SVR4
 	fi
 	exit ;;
     mini*:CTIX:SYS*5:*)
@@ -1114,9 +1168,9 @@ EOF
 	test -r /etc/.relid \
 	&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
 	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-	  && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	  && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
 	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
-	  && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+	  && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
     3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
 	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
 	  && { echo i486-ncr-sysv4; exit; } ;;
@@ -1125,28 +1179,28 @@ EOF
 	test -r /etc/.relid \
 	    && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
 	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
-	    && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	    && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
 	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
-	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+	    && { echo i586-ncr-sysv4.3"$OS_REL"; exit; }
 	/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
-	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+	    && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
     m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
-	echo m68k-unknown-lynxos${UNAME_RELEASE}
+	echo m68k-unknown-lynxos"$UNAME_RELEASE"
 	exit ;;
     mc68030:UNIX_System_V:4.*:*)
 	echo m68k-atari-sysv4
 	exit ;;
     TSUNAMI:LynxOS:2.*:*)
-	echo sparc-unknown-lynxos${UNAME_RELEASE}
+	echo sparc-unknown-lynxos"$UNAME_RELEASE"
 	exit ;;
     rs6000:LynxOS:2.*:*)
-	echo rs6000-unknown-lynxos${UNAME_RELEASE}
+	echo rs6000-unknown-lynxos"$UNAME_RELEASE"
 	exit ;;
     PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
-	echo powerpc-unknown-lynxos${UNAME_RELEASE}
+	echo powerpc-unknown-lynxos"$UNAME_RELEASE"
 	exit ;;
     SM[BE]S:UNIX_SV:*:*)
-	echo mips-dde-sysv${UNAME_RELEASE}
+	echo mips-dde-sysv"$UNAME_RELEASE"
 	exit ;;
     RM*:ReliantUNIX-*:*:*)
 	echo mips-sni-sysv4
@@ -1157,7 +1211,7 @@ EOF
     *:SINIX-*:*:*)
 	if uname -p 2>/dev/null >/dev/null ; then
 		UNAME_MACHINE=`(uname -p) 2>/dev/null`
-		echo ${UNAME_MACHINE}-sni-sysv4
+		echo "$UNAME_MACHINE"-sni-sysv4
 	else
 		echo ns32k-sni-sysv
 	fi
@@ -1177,23 +1231,23 @@ EOF
 	exit ;;
     i*86:VOS:*:*)
 	# From Paul.Green@stratus.com.
-	echo ${UNAME_MACHINE}-stratus-vos
+	echo "$UNAME_MACHINE"-stratus-vos
 	exit ;;
     *:VOS:*:*)
 	# From Paul.Green@stratus.com.
 	echo hppa1.1-stratus-vos
 	exit ;;
     mc68*:A/UX:*:*)
-	echo m68k-apple-aux${UNAME_RELEASE}
+	echo m68k-apple-aux"$UNAME_RELEASE"
 	exit ;;
     news*:NEWS-OS:6*:*)
 	echo mips-sony-newsos6
 	exit ;;
     R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
 	if [ -d /usr/nec ]; then
-		echo mips-nec-sysv${UNAME_RELEASE}
+		echo mips-nec-sysv"$UNAME_RELEASE"
 	else
-		echo mips-unknown-sysv${UNAME_RELEASE}
+		echo mips-unknown-sysv"$UNAME_RELEASE"
 	fi
 	exit ;;
     BeBox:BeOS:*:*)	# BeOS running on hardware made by Be, PPC only.
@@ -1212,65 +1266,93 @@ EOF
 	echo x86_64-unknown-haiku
 	exit ;;
     SX-4:SUPER-UX:*:*)
-	echo sx4-nec-superux${UNAME_RELEASE}
+	echo sx4-nec-superux"$UNAME_RELEASE"
 	exit ;;
     SX-5:SUPER-UX:*:*)
-	echo sx5-nec-superux${UNAME_RELEASE}
+	echo sx5-nec-superux"$UNAME_RELEASE"
 	exit ;;
     SX-6:SUPER-UX:*:*)
-	echo sx6-nec-superux${UNAME_RELEASE}
+	echo sx6-nec-superux"$UNAME_RELEASE"
 	exit ;;
     SX-7:SUPER-UX:*:*)
-	echo sx7-nec-superux${UNAME_RELEASE}
+	echo sx7-nec-superux"$UNAME_RELEASE"
 	exit ;;
     SX-8:SUPER-UX:*:*)
-	echo sx8-nec-superux${UNAME_RELEASE}
+	echo sx8-nec-superux"$UNAME_RELEASE"
 	exit ;;
     SX-8R:SUPER-UX:*:*)
-	echo sx8r-nec-superux${UNAME_RELEASE}
+	echo sx8r-nec-superux"$UNAME_RELEASE"
+	exit ;;
+    SX-ACE:SUPER-UX:*:*)
+	echo sxace-nec-superux"$UNAME_RELEASE"
 	exit ;;
     Power*:Rhapsody:*:*)
-	echo powerpc-apple-rhapsody${UNAME_RELEASE}
+	echo powerpc-apple-rhapsody"$UNAME_RELEASE"
 	exit ;;
     *:Rhapsody:*:*)
-	echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
 	exit ;;
     *:Darwin:*:*)
 	UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
-	case $UNAME_PROCESSOR in
-	    i386)
-		eval $set_cc_for_build
-		if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
-		  if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
-		      (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
-		      grep IS_64BIT_ARCH >/dev/null
-		  then
-		      UNAME_PROCESSOR="x86_64"
-		  fi
-		fi ;;
-	    unknown) UNAME_PROCESSOR=powerpc ;;
-	esac
-	echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+	set_cc_for_build
+	if test "$UNAME_PROCESSOR" = unknown ; then
+	    UNAME_PROCESSOR=powerpc
+	fi
+	if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then
+	    if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+		if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+		       (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+		       grep IS_64BIT_ARCH >/dev/null
+		then
+		    case $UNAME_PROCESSOR in
+			i386) UNAME_PROCESSOR=x86_64 ;;
+			powerpc) UNAME_PROCESSOR=powerpc64 ;;
+		    esac
+		fi
+		# On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc
+		if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \
+		       (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+		       grep IS_PPC >/dev/null
+		then
+		    UNAME_PROCESSOR=powerpc
+		fi
+	    fi
+	elif test "$UNAME_PROCESSOR" = i386 ; then
+	    # Avoid executing cc on OS X 10.9, as it ships with a stub
+	    # that puts up a graphical alert prompting to install
+	    # developer tools.  Any system running Mac OS X 10.7 or
+	    # later (Darwin 11 and later) is required to have a 64-bit
+	    # processor. This is not true of the ARM version of Darwin
+	    # that Apple uses in portable devices.
+	    UNAME_PROCESSOR=x86_64
+	fi
+	echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
 	exit ;;
     *:procnto*:*:* | *:QNX:[0123456789]*:*)
 	UNAME_PROCESSOR=`uname -p`
-	if test "$UNAME_PROCESSOR" = "x86"; then
+	if test "$UNAME_PROCESSOR" = x86; then
 		UNAME_PROCESSOR=i386
 		UNAME_MACHINE=pc
 	fi
-	echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+	echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE"
 	exit ;;
     *:QNX:*:4*)
 	echo i386-pc-qnx
 	exit ;;
-    NEO-?:NONSTOP_KERNEL:*:*)
-	echo neo-tandem-nsk${UNAME_RELEASE}
+    NEO-*:NONSTOP_KERNEL:*:*)
+	echo neo-tandem-nsk"$UNAME_RELEASE"
 	exit ;;
     NSE-*:NONSTOP_KERNEL:*:*)
-	echo nse-tandem-nsk${UNAME_RELEASE}
+	echo nse-tandem-nsk"$UNAME_RELEASE"
+	exit ;;
+    NSR-*:NONSTOP_KERNEL:*:*)
+	echo nsr-tandem-nsk"$UNAME_RELEASE"
 	exit ;;
-    NSR-?:NONSTOP_KERNEL:*:*)
-	echo nsr-tandem-nsk${UNAME_RELEASE}
+    NSV-*:NONSTOP_KERNEL:*:*)
+	echo nsv-tandem-nsk"$UNAME_RELEASE"
+	exit ;;
+    NSX-*:NONSTOP_KERNEL:*:*)
+	echo nsx-tandem-nsk"$UNAME_RELEASE"
 	exit ;;
     *:NonStop-UX:*:*)
 	echo mips-compaq-nonstopux
@@ -1279,18 +1361,19 @@ EOF
 	echo bs2000-siemens-sysv
 	exit ;;
     DS/*:UNIX_System_V:*:*)
-	echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+	echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE"
 	exit ;;
     *:Plan9:*:*)
 	# "uname -m" is not consistent, so use $cputype instead. 386
 	# is converted to i386 for consistency with other x86
 	# operating systems.
-	if test "$cputype" = "386"; then
+	# shellcheck disable=SC2154
+	if test "$cputype" = 386; then
 	    UNAME_MACHINE=i386
 	else
 	    UNAME_MACHINE="$cputype"
 	fi
-	echo ${UNAME_MACHINE}-unknown-plan9
+	echo "$UNAME_MACHINE"-unknown-plan9
 	exit ;;
     *:TOPS-10:*:*)
 	echo pdp10-unknown-tops10
@@ -1311,14 +1394,14 @@ EOF
 	echo pdp10-unknown-its
 	exit ;;
     SEI:*:*:SEIUX)
-	echo mips-sei-seiux${UNAME_RELEASE}
+	echo mips-sei-seiux"$UNAME_RELEASE"
 	exit ;;
     *:DragonFly:*:*)
-	echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+	echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
 	exit ;;
     *:*VMS:*:*)
 	UNAME_MACHINE=`(uname -p) 2>/dev/null`
-	case "${UNAME_MACHINE}" in
+	case "$UNAME_MACHINE" in
 	    A*) echo alpha-dec-vms ; exit ;;
 	    I*) echo ia64-dec-vms ; exit ;;
 	    V*) echo vax-dec-vms ; exit ;;
@@ -1327,182 +1410,48 @@ EOF
 	echo i386-pc-xenix
 	exit ;;
     i*86:skyos:*:*)
-	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+	echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`"
 	exit ;;
     i*86:rdos:*:*)
-	echo ${UNAME_MACHINE}-pc-rdos
+	echo "$UNAME_MACHINE"-pc-rdos
 	exit ;;
     i*86:AROS:*:*)
-	echo ${UNAME_MACHINE}-pc-aros
+	echo "$UNAME_MACHINE"-pc-aros
 	exit ;;
     x86_64:VMkernel:*:*)
-	echo ${UNAME_MACHINE}-unknown-esx
+	echo "$UNAME_MACHINE"-unknown-esx
+	exit ;;
+    amd64:Isilon\ OneFS:*:*)
+	echo x86_64-unknown-onefs
 	exit ;;
 esac
 
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
-  /* BFD wants "bsd" instead of "newsos".  Perhaps BFD should be changed,
-     I don't know....  */
-  printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
-  printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
-	"4"
-#else
-	""
-#endif
-	); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
-  printf ("arm-acorn-riscix\n"); exit (0);
-#endif
+echo "$0: unable to guess system type" >&2
 
-#if defined (hp300) && !defined (hpux)
-  printf ("m68k-hp-bsd\n"); exit (0);
-#endif
+case "$UNAME_MACHINE:$UNAME_SYSTEM" in
+    mips:Linux | mips64:Linux)
+	# If we got here on MIPS GNU/Linux, output extra information.
+	cat >&2 <<EOF
 
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
-  int version;
-  version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
-  if (version < 4)
-    printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
-  else
-    printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
-  exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
-  printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
-  printf ("ns32k-encore-mach\n"); exit (0);
-#else
-  printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
-  printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
-  printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
-  printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
-    struct utsname un;
-
-    uname(&un);
-
-    if (strncmp(un.version, "V2", 2) == 0) {
-	printf ("i386-sequent-ptx2\n"); exit (0);
-    }
-    if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
-	printf ("i386-sequent-ptx1\n"); exit (0);
-    }
-    printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-#  include <sys/param.h>
-#  if defined (BSD)
-#   if BSD == 43
-      printf ("vax-dec-bsd4.3\n"); exit (0);
-#   else
-#    if BSD == 199006
-      printf ("vax-dec-bsd4.3reno\n"); exit (0);
-#    else
-      printf ("vax-dec-bsd\n"); exit (0);
-#    endif
-#   endif
-#  else
-    printf ("vax-dec-bsd\n"); exit (0);
-#  endif
-# else
-    printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
-  printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
-  exit (1);
-}
+NOTE: MIPS GNU/Linux systems require a C compiler to fully recognize
+the system type. Please install a C compiler and try again.
 EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
-	{ echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
-    case `getsysinfo -f cpu_type` in
-    c1*)
-	echo c1-convex-bsd
-	exit ;;
-    c2*)
-	if getsysinfo -f scalar_acc
-	then echo c32-convex-bsd
-	else echo c2-convex-bsd
-	fi
-	exit ;;
-    c34*)
-	echo c34-convex-bsd
-	exit ;;
-    c38*)
-	echo c38-convex-bsd
-	exit ;;
-    c4*)
-	echo c4-convex-bsd
-	exit ;;
-    esac
-fi
+	;;
+esac
 
 cat >&2 <<EOF
-$0: unable to guess system type
 
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
+This script (version $timestamp), has failed to recognize the
+operating system you are using. If your script is old, overwrite *all*
+copies of config.guess and config.sub with the latest versions from:
 
-  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+  https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
 and
-  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+  https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
 
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <config-patches@gnu.org> in order to provide the needed
-information to handle your system.
+If $0 has already been updated, send the following data and any
+information you think might be pertinent to config-patches@gnu.org to
+provide the necessary information to handle your system.
 
 config.guess timestamp = $timestamp
 
@@ -1521,16 +1470,16 @@ hostinfo               = `(hostinfo) 2>/dev/null`
 /usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null`
 /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
 
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM  = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
+UNAME_MACHINE = "$UNAME_MACHINE"
+UNAME_RELEASE = "$UNAME_RELEASE"
+UNAME_SYSTEM  = "$UNAME_SYSTEM"
+UNAME_VERSION = "$UNAME_VERSION"
 EOF
 
 exit 1
 
 # Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
+# eval: (add-hook 'before-save-hook 'time-stamp)
 # time-stamp-start: "timestamp='"
 # time-stamp-format: "%:y-%02m-%02d"
 # time-stamp-end: "'"
diff --git a/config/config.sub b/config/config.sub
index bdda9e4a32c1..c19e671805a8 100755
--- a/config/config.sub
+++ b/config/config.sub
@@ -1,36 +1,31 @@
 #! /bin/sh
 # Configuration validation subroutine script.
-#   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-#   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-#   2011, 2012 Free Software Foundation, Inc.
+#   Copyright 1992-2018 Free Software Foundation, Inc.
 
-timestamp='2012-08-18'
+timestamp='2018-08-13'
 
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine.  It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
 # (at your option) any later version.
 #
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
+# along with this program; if not, see <https://www.gnu.org/licenses/>.
 #
 # As a special exception to the GNU General Public License, if you
 # distribute this file as part of a program that contains a
 # configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
+# the same distribution terms that you use for the rest of that
+# program.  This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
 
 
-# Please send patches to <config-patches@gnu.org>.  Submit a context
-# diff and a properly formatted GNU ChangeLog entry.
+# Please send patches to <config-patches@gnu.org>.
 #
 # Configuration subroutine to validate and canonicalize a configuration type.
 # Supply the specified configuration type as an argument.
@@ -38,7 +33,7 @@ timestamp='2012-08-18'
 # Otherwise, we print the canonical config type on stdout and succeed.
 
 # You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
 
 # This file is supposed to be the same for all GNU packages
 # and recognize all the CPU types, system types and aliases
@@ -58,12 +53,11 @@ timestamp='2012-08-18'
 me=`echo "$0" | sed -e 's,.*/,,'`
 
 usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
-       $0 [OPTION] ALIAS
+Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
 
 Canonicalize a configuration name.
 
-Operation modes:
+Options:
   -h, --help         print this help, then exit
   -t, --time-stamp   print date of last modification, then exit
   -v, --version      print version number, then exit
@@ -73,9 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
 version="\
 GNU config.sub ($timestamp)
 
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
-Free Software Foundation, Inc.
+Copyright 1992-2018 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -102,7 +94,7 @@ while test $# -gt 0 ; do
 
     *local*)
        # First pass through any local machine types.
-       echo $1
+       echo "$1"
        exit ;;
 
     * )
@@ -118,162 +110,596 @@ case $# in
     exit 1;;
 esac
 
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
-  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
-  linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
-  knetbsd*-gnu* | netbsd*-gnu* | \
-  kopensolaris*-gnu* | \
-  storm-chaos* | os2-emx* | rtmk-nova*)
-    os=-$maybe_os
-    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
-    ;;
-  android-linux)
-    os=-linux-android
-    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
-    ;;
-  *)
-    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
-    if [ $basic_machine != $1 ]
-    then os=`echo $1 | sed 's/.*-/-/'`
-    else os=; fi
-    ;;
-esac
+# Split fields of configuration type
+IFS="-" read -r field1 field2 field3 field4 <<EOF
+$1
+EOF
 
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work.  We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
-	-sun*os*)
-		# Prevent following clause from handling this invalid input.
-		;;
-	-dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
-	-att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
-	-unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
-	-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-	-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-	-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
-	-apple | -axis | -knuth | -cray | -microblaze)
-		os=
-		basic_machine=$1
-		;;
-	-bluegene*)
-		os=-cnk
-		;;
-	-sim | -cisco | -oki | -wec | -winbond)
-		os=
-		basic_machine=$1
-		;;
-	-scout)
+# Separate into logical components for further validation
+case $1 in
+	*-*-*-*-*)
+		echo Invalid configuration \`"$1"\': more than four components >&2
+		exit 1
 		;;
-	-wrs)
-		os=-vxworks
-		basic_machine=$1
+	*-*-*-*)
+		basic_machine=$field1-$field2
+		os=$field3-$field4
 		;;
-	-chorusos*)
-		os=-chorusos
-		basic_machine=$1
+	*-*-*)
+		# Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two
+		# parts
+		maybe_os=$field2-$field3
+		case $maybe_os in
+			nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \
+			| linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \
+			| uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \
+			| netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \
+			| storm-chaos* | os2-emx* | rtmk-nova*)
+				basic_machine=$field1
+				os=$maybe_os
+				;;
+			android-linux)
+				basic_machine=$field1-unknown
+				os=linux-android
+				;;
+			*)
+				basic_machine=$field1-$field2
+				os=$field3
+				;;
+		esac
 		;;
-	-chorusrdb)
-		os=-chorusrdb
-		basic_machine=$1
+	*-*)
+		# Second component is usually, but not always the OS
+		case $field2 in
+			# Prevent following clause from handling this valid os
+			sun*os*)
+				basic_machine=$field1
+				os=$field2
+				;;
+			# Manufacturers
+			dec* | mips* | sequent* | encore* | pc532* | sgi* | sony* \
+			| att* | 7300* | 3300* | delta* | motorola* | sun[234]* \
+			| unicom* | ibm* | next | hp | isi* | apollo | altos* \
+			| convergent* | ncr* | news | 32* | 3600* | 3100* | hitachi* \
+			| c[123]* | convex* | sun | crds | omron* | dg | ultra | tti* \
+			| harris | dolphin | highlevel | gould | cbm | ns | masscomp \
+			| apple | axis | knuth | cray | microblaze* \
+			| sim | cisco | oki | wec | wrs | winbond)
+				basic_machine=$field1-$field2
+				os=
+				;;
+			*)
+				basic_machine=$field1
+				os=$field2
+				;;
+		esac
 		;;
-	-hiux*)
-		os=-hiuxwe2
+	*)
+		# Convert single-component short-hands not valid as part of
+		# multi-component configurations.
+		case $field1 in
+			386bsd)
+				basic_machine=i386-pc
+				os=bsd
+				;;
+			a29khif)
+				basic_machine=a29k-amd
+				os=udi
+				;;
+			adobe68k)
+				basic_machine=m68010-adobe
+				os=scout
+				;;
+			alliant)
+				basic_machine=fx80-alliant
+				os=
+				;;
+			altos | altos3068)
+				basic_machine=m68k-altos
+				os=
+				;;
+			am29k)
+				basic_machine=a29k-none
+				os=bsd
+				;;
+			amdahl)
+				basic_machine=580-amdahl
+				os=sysv
+				;;
+			amigaos | amigados)
+				basic_machine=m68k-unknown
+				os=amigaos
+				;;
+			amigaunix | amix)
+				basic_machine=m68k-unknown
+				os=sysv4
+				;;
+			apollo68)
+				basic_machine=m68k-apollo
+				os=sysv
+				;;
+			apollo68bsd)
+				basic_machine=m68k-apollo
+				os=bsd
+				;;
+			aros)
+				basic_machine=i386-pc
+				os=aros
+				;;
+			aux)
+				basic_machine=m68k-apple
+				os=aux
+				;;
+			balance)
+				basic_machine=ns32k-sequent
+				os=dynix
+				;;
+			blackfin)
+				basic_machine=bfin-unknown
+				os=linux
+				;;
+			cegcc)
+				basic_machine=arm-unknown
+				os=cegcc
+				;;
+			convex-c1)
+				basic_machine=c1-convex
+				os=bsd
+				;;
+			convex-c2)
+				basic_machine=c2-convex
+				os=bsd
+				;;
+			convex-c32)
+				basic_machine=c32-convex
+				os=bsd
+				;;
+			convex-c34)
+				basic_machine=c34-convex
+				os=bsd
+				;;
+			convex-c38)
+				basic_machine=c38-convex
+				os=bsd
+				;;
+			cray)
+				basic_machine=j90-cray
+				os=unicos
+				;;
+			crds | unos)
+				basic_machine=m68k-crds
+				os=
+				;;
+			delta88)
+				basic_machine=m88k-motorola
+				os=sysv3
+				;;
+			dicos)
+				basic_machine=i686-pc
+				os=dicos
+				;;
+			djgpp)
+				basic_machine=i586-pc
+				os=msdosdjgpp
+				;;
+			ebmon29k)
+				basic_machine=a29k-amd
+				os=ebmon
+				;;
+			es1800 | OSE68k | ose68k | ose | OSE)
+				basic_machine=m68k-ericsson
+				os=ose
+				;;
+			gmicro)
+				basic_machine=tron-gmicro
+				os=sysv
+				;;
+			go32)
+				basic_machine=i386-pc
+				os=go32
+				;;
+			h8300hms)
+				basic_machine=h8300-hitachi
+				os=hms
+				;;
+			h8300xray)
+				basic_machine=h8300-hitachi
+				os=xray
+				;;
+			h8500hms)
+				basic_machine=h8500-hitachi
+				os=hms
+				;;
+			harris)
+				basic_machine=m88k-harris
+				os=sysv3
+				;;
+			hp300bsd)
+				basic_machine=m68k-hp
+				os=bsd
+				;;
+			hp300hpux)
+				basic_machine=m68k-hp
+				os=hpux
+				;;
+			hppaosf)
+				basic_machine=hppa1.1-hp
+				os=osf
+				;;
+			hppro)
+				basic_machine=hppa1.1-hp
+				os=proelf
+				;;
+			i386mach)
+				basic_machine=i386-mach
+				os=mach
+				;;
+			vsta)
+				basic_machine=i386-pc
+				os=vsta
+				;;
+			isi68 | isi)
+				basic_machine=m68k-isi
+				os=sysv
+				;;
+			m68knommu)
+				basic_machine=m68k-unknown
+				os=linux
+				;;
+			magnum | m3230)
+				basic_machine=mips-mips
+				os=sysv
+				;;
+			merlin)
+				basic_machine=ns32k-utek
+				os=sysv
+				;;
+			mingw64)
+				basic_machine=x86_64-pc
+				os=mingw64
+				;;
+			mingw32)
+				basic_machine=i686-pc
+				os=mingw32
+				;;
+			mingw32ce)
+				basic_machine=arm-unknown
+				os=mingw32ce
+				;;
+			monitor)
+				basic_machine=m68k-rom68k
+				os=coff
+				;;
+			morphos)
+				basic_machine=powerpc-unknown
+				os=morphos
+				;;
+			moxiebox)
+				basic_machine=moxie-unknown
+				os=moxiebox
+				;;
+			msdos)
+				basic_machine=i386-pc
+				os=msdos
+				;;
+			msys)
+				basic_machine=i686-pc
+				os=msys
+				;;
+			mvs)
+				basic_machine=i370-ibm
+				os=mvs
+				;;
+			nacl)
+				basic_machine=le32-unknown
+				os=nacl
+				;;
+			ncr3000)
+				basic_machine=i486-ncr
+				os=sysv4
+				;;
+			netbsd386)
+				basic_machine=i386-pc
+				os=netbsd
+				;;
+			netwinder)
+				basic_machine=armv4l-rebel
+				os=linux
+				;;
+			news | news700 | news800 | news900)
+				basic_machine=m68k-sony
+				os=newsos
+				;;
+			news1000)
+				basic_machine=m68030-sony
+				os=newsos
+				;;
+			necv70)
+				basic_machine=v70-nec
+				os=sysv
+				;;
+			nh3000)
+				basic_machine=m68k-harris
+				os=cxux
+				;;
+			nh[45]000)
+				basic_machine=m88k-harris
+				os=cxux
+				;;
+			nindy960)
+				basic_machine=i960-intel
+				os=nindy
+				;;
+			mon960)
+				basic_machine=i960-intel
+				os=mon960
+				;;
+			nonstopux)
+				basic_machine=mips-compaq
+				os=nonstopux
+				;;
+			os400)
+				basic_machine=powerpc-ibm
+				os=os400
+				;;
+			OSE68000 | ose68000)
+				basic_machine=m68000-ericsson
+				os=ose
+				;;
+			os68k)
+				basic_machine=m68k-none
+				os=os68k
+				;;
+			paragon)
+				basic_machine=i860-intel
+				os=osf
+				;;
+			parisc)
+				basic_machine=hppa-unknown
+				os=linux
+				;;
+			pw32)
+				basic_machine=i586-unknown
+				os=pw32
+				;;
+			rdos | rdos64)
+				basic_machine=x86_64-pc
+				os=rdos
+				;;
+			rdos32)
+				basic_machine=i386-pc
+				os=rdos
+				;;
+			rom68k)
+				basic_machine=m68k-rom68k
+				os=coff
+				;;
+			sa29200)
+				basic_machine=a29k-amd
+				os=udi
+				;;
+			sei)
+				basic_machine=mips-sei
+				os=seiux
+				;;
+			sps7)
+				basic_machine=m68k-bull
+				os=sysv2
+				;;
+			st2000)
+				basic_machine=m68k-tandem
+				os=
+				;;
+			stratus)
+				basic_machine=i860-stratus
+				os=sysv4
+				;;
+			sun2)
+				basic_machine=m68000-sun
+				os=
+				;;
+			sun2os3)
+				basic_machine=m68000-sun
+				os=sunos3
+				;;
+			sun2os4)
+				basic_machine=m68000-sun
+				os=sunos4
+				;;
+			sun3)
+				basic_machine=m68k-sun
+				os=
+				;;
+			sun3os3)
+				basic_machine=m68k-sun
+				os=sunos3
+				;;
+			sun3os4)
+				basic_machine=m68k-sun
+				os=sunos4
+				;;
+			sun4)
+				basic_machine=sparc-sun
+				os=
+				;;
+			sun4os3)
+				basic_machine=sparc-sun
+				os=sunos3
+				;;
+			sun4os4)
+				basic_machine=sparc-sun
+				os=sunos4
+				;;
+			sun4sol2)
+				basic_machine=sparc-sun
+				os=solaris2
+				;;
+			sun386 | sun386i | roadrunner)
+				basic_machine=i386-sun
+				os=
+				;;
+			sv1)
+				basic_machine=sv1-cray
+				os=unicos
+				;;
+			symmetry)
+				basic_machine=i386-sequent
+				os=dynix
+				;;
+			t3e)
+				basic_machine=alphaev5-cray
+				os=unicos
+				;;
+			t90)
+				basic_machine=t90-cray
+				os=unicos
+				;;
+			toad1)
+				basic_machine=pdp10-xkl
+				os=tops20
+				;;
+			tpf)
+				basic_machine=s390x-ibm
+				os=tpf
+				;;
+			udi29k)
+				basic_machine=a29k-amd
+				os=udi
+				;;
+			ultra3)
+				basic_machine=a29k-nyu
+				os=sym1
+				;;
+			v810 | necv810)
+				basic_machine=v810-nec
+				os=none
+				;;
+			vaxv)
+				basic_machine=vax-dec
+				os=sysv
+				;;
+			vms)
+				basic_machine=vax-dec
+				os=vms
+				;;
+			vxworks960)
+				basic_machine=i960-wrs
+				os=vxworks
+				;;
+			vxworks68)
+				basic_machine=m68k-wrs
+				os=vxworks
+				;;
+			vxworks29k)
+				basic_machine=a29k-wrs
+				os=vxworks
+				;;
+			xbox)
+				basic_machine=i686-pc
+				os=mingw32
+				;;
+			ymp)
+				basic_machine=ymp-cray
+				os=unicos
+				;;
+			*)
+				basic_machine=$1
+				os=
+				;;
+		esac
 		;;
-	-sco6)
-		os=-sco5v6
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+	# Here we handle the default manufacturer of certain CPU types.  It is in
+	# some cases the only manufacturer, in others, it is the most popular.
+	craynv)
+		basic_machine=craynv-cray
+		os=${os:-unicosmp}
 		;;
-	-sco5)
-		os=-sco3.2v5
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	fx80)
+		basic_machine=fx80-alliant
 		;;
-	-sco4)
-		os=-sco3.2v4
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	w89k)
+		basic_machine=hppa1.1-winbond
 		;;
-	-sco3.2.[4-9]*)
-		os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	op50n)
+		basic_machine=hppa1.1-oki
 		;;
-	-sco3.2v[4-9]*)
-		# Don't forget version if it is 3.2v4 or newer.
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	op60c)
+		basic_machine=hppa1.1-oki
 		;;
-	-sco5v6*)
-		# Don't forget version if it is 3.2v4 or newer.
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	romp)
+		basic_machine=romp-ibm
 		;;
-	-sco*)
-		os=-sco3.2v2
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	mmix)
+		basic_machine=mmix-knuth
 		;;
-	-udk*)
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	rs6000)
+		basic_machine=rs6000-ibm
 		;;
-	-isc)
-		os=-isc2.2
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	vax)
+		basic_machine=vax-dec
 		;;
-	-clix*)
-		basic_machine=clipper-intergraph
+	pdp11)
+		basic_machine=pdp11-dec
 		;;
-	-isc*)
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+	we32k)
+		basic_machine=we32k-att
 		;;
-	-lynx*178)
-		os=-lynxos178
+	cydra)
+		basic_machine=cydra-cydrome
 		;;
-	-lynx*5)
-		os=-lynxos5
+	i370-ibm* | ibm*)
+		basic_machine=i370-ibm
 		;;
-	-lynx*)
-		os=-lynxos
+	orion)
+		basic_machine=orion-highlevel
 		;;
-	-ptx*)
-		basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+	orion105)
+		basic_machine=clipper-highlevel
 		;;
-	-windowsnt*)
-		os=`echo $os | sed -e 's/windowsnt/winnt/'`
+	mac | mpw | mac-mpw)
+		basic_machine=m68k-apple
 		;;
-	-psos*)
-		os=-psos
+	pmac | pmac-mpw)
+		basic_machine=powerpc-apple
 		;;
-	-mint | -mint[0-9]*)
-		basic_machine=m68k-atari
-		os=-mint
+	xps | xps100)
+		basic_machine=xps100-honeywell
 		;;
-esac
 
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
 	# Recognize the basic CPU types without company name.
 	# Some are omitted here because they have special meanings below.
 	1750a | 580 \
 	| a29k \
 	| aarch64 | aarch64_be \
+	| abacus \
 	| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
 	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
 	| am33_2.0 \
-	| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
-        | be32 | be64 \
+	| arc | arceb \
+	| arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv6m | armv[78][arm] \
+	| avr | avr32 \
+	| asmjs \
+	| ba \
+	| be32 | be64 \
 	| bfin \
-	| c4x | clipper \
+	| c4x | c8051 | clipper | csky \
 	| d10v | d30v | dlx | dsp16xx \
-	| epiphany \
-	| fido | fr30 | frv \
+	| e2k | epiphany \
+	| fido | fr30 | frv | ft32 \
 	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
 	| hexagon \
-	| i370 | i860 | i960 | ia64 \
+	| i370 | i860 | i960 | ia16 | ia64 \
 	| ip2k | iq2000 \
+	| k1om \
 	| le32 | le64 \
 	| lm32 \
 	| m32c | m32r | m32rle | m68000 | m68k | m88k \
-	| maxq | mb | microblaze | mcore | mep | metag \
+	| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip \
+	| maxq | mb | microblaze | microblazeel | mcore | mep | metag \
 	| mips | mipsbe | mipseb | mipsel | mipsle \
 	| mips16 \
 	| mips64 | mips64el \
@@ -287,26 +713,31 @@ case $basic_machine in
 	| mips64vr5900 | mips64vr5900el \
 	| mipsisa32 | mipsisa32el \
 	| mipsisa32r2 | mipsisa32r2el \
+	| mipsisa32r6 | mipsisa32r6el \
 	| mipsisa64 | mipsisa64el \
 	| mipsisa64r2 | mipsisa64r2el \
+	| mipsisa64r6 | mipsisa64r6el \
 	| mipsisa64sb1 | mipsisa64sb1el \
 	| mipsisa64sr71k | mipsisa64sr71kel \
+	| mipsr5900 | mipsr5900el \
 	| mipstx39 | mipstx39el \
 	| mn10200 | mn10300 \
 	| moxie \
 	| mt \
 	| msp430 \
 	| nds32 | nds32le | nds32be \
-	| nios | nios2 \
+	| nfp \
+	| nios | nios2 | nios2eb | nios2el \
 	| ns16k | ns32k \
-	| open8 \
-	| or32 \
-	| pdp10 | pdp11 | pj | pjl \
+	| open8 | or1k | or1knd | or32 \
+	| pdp10 | pj | pjl \
 	| powerpc | powerpc64 | powerpc64le | powerpcle \
+	| pru \
 	| pyramid \
+	| riscv | riscv32 | riscv64 \
 	| rl78 | rx \
 	| score \
-	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh[23]ele \
 	| sh64 | sh64le \
 	| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
 	| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
@@ -314,8 +745,9 @@ case $basic_machine in
 	| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
 	| ubicom32 \
 	| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
-	| we32k \
-	| x86 | xc16x | xstormy16 | xtensa \
+	| visium \
+	| wasm32 \
+	| x86 | xc16x | xstormy16 | xgate | xtensa \
 	| z8k | z80)
 		basic_machine=$basic_machine-unknown
 		;;
@@ -328,23 +760,23 @@ case $basic_machine in
 	c6x)
 		basic_machine=tic6x-unknown
 		;;
-	m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
-		basic_machine=$basic_machine-unknown
-		os=-none
+	leon|leon[3-9])
+		basic_machine=sparc-$basic_machine
+		;;
+	m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65)
+		;;
+	m9s12z | m68hcs12z | hcs12z | s12z)
+		basic_machine=s12z-unknown
 		;;
-	m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+	m9s12z-* | m68hcs12z-* | hcs12z-* | s12z-*)
+		basic_machine=s12z-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	ms1)
 		basic_machine=mt-unknown
 		;;
-
 	strongarm | thumb | xscale)
 		basic_machine=arm-unknown
 		;;
-	xgate)
-		basic_machine=$basic_machine-unknown
-		os=-none
-		;;
 	xscaleeb)
 		basic_machine=armeb-unknown
 		;;
@@ -359,37 +791,40 @@ case $basic_machine in
 	i*86 | x86_64)
 	  basic_machine=$basic_machine-pc
 	  ;;
-	# Object if more than one company name word.
-	*-*-*)
-		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
-		exit 1
-		;;
 	# Recognize the basic CPU types with company name.
-	580-* \
+	1750a-* | 580-* \
 	| a29k-* \
 	| aarch64-* | aarch64_be-* \
+	| abacus-* \
 	| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
 	| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
-	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
-	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
+	| alphapca5[67]-* | alpha64pca5[67]-* \
+	| am33_2.0-* \
+	| arc-* | arceb-* \
+	| arm-*  | arm[lb]e-* | arme[lb]-* | armv*-* \
 	| avr-* | avr32-* \
+	| asmjs-* \
+	| ba-* \
 	| be32-* | be64-* \
 	| bfin-* | bs2000-* \
 	| c[123]* | c30-* | [cjt]90-* | c4x-* \
-	| clipper-* | craynv-* | cydra-* \
-	| d10v-* | d30v-* | dlx-* \
-	| elxsi-* \
-	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+	| c8051-* | clipper-* | craynv-* | csky-* | cydra-* \
+	| d10v-* | d30v-* | dlx-* | dsp16xx-* \
+	| e2k-* | elxsi-* | epiphany-* \
+	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | ft32-* | fx80-* \
 	| h8300-* | h8500-* \
 	| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
 	| hexagon-* \
-	| i*86-* | i860-* | i960-* | ia64-* \
+	| i370-* | i*86-* | i860-* | i960-* | ia16-* | ia64-* \
 	| ip2k-* | iq2000-* \
+	| k1om-* \
 	| le32-* | le64-* \
 	| lm32-* \
 	| m32c-* | m32r-* | m32rle-* \
-	| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
-	| m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+	| m5200-* | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* | v70-* | w65-* \
+	| m6811-* | m68hc11-* | m6812-* | m68hc12-* | m68hcs12x-* | nvptx-* | picochip-* \
+	| m88110-* | m88k-* | maxq-* | mb-* | mcore-* | mep-* | metag-* \
+	| microblaze-* | microblazeel-* \
 	| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
 	| mips16-* \
 	| mips64-* | mips64el-* \
@@ -403,37 +838,50 @@ case $basic_machine in
 	| mips64vr5900-* | mips64vr5900el-* \
 	| mipsisa32-* | mipsisa32el-* \
 	| mipsisa32r2-* | mipsisa32r2el-* \
+	| mipsisa32r6-* | mipsisa32r6el-* \
 	| mipsisa64-* | mipsisa64el-* \
 	| mipsisa64r2-* | mipsisa64r2el-* \
+	| mipsisa64r6-* | mipsisa64r6el-* \
 	| mipsisa64sb1-* | mipsisa64sb1el-* \
 	| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+	| mipsr5900-* | mipsr5900el-* \
 	| mipstx39-* | mipstx39el-* \
 	| mmix-* \
+	| mn10200-* | mn10300-* \
+	| moxie-* \
 	| mt-* \
 	| msp430-* \
 	| nds32-* | nds32le-* | nds32be-* \
-	| nios-* | nios2-* \
+	| nfp-* \
+	| nios-* | nios2-* | nios2eb-* | nios2el-* \
 	| none-* | np1-* | ns16k-* | ns32k-* \
 	| open8-* \
+	| or1k*-* \
+	| or32-* \
 	| orion-* \
 	| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
 	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+	| pru-* \
 	| pyramid-* \
+	| riscv-* | riscv32-* | riscv64-* \
 	| rl78-* | romp-* | rs6000-* | rx-* \
-	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
-	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+	| score-* \
+	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]ae[lb]-* | sh[23]e-* | she[lb]-* | sh[lb]e-* \
+	| sh[1234]e[lb]-* |  sh[12345][lb]e-* | sh[23]ele-* | sh64-* | sh64le-* \
 	| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
 	| sparclite-* \
-	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
+	| spu-* \
 	| tahoe-* \
 	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
-	| tile*-* \
 	| tron-* \
 	| ubicom32-* \
 	| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
 	| vax-* \
+	| visium-* \
+	| wasm32-* \
 	| we32k-* \
-	| x86-* | x86_64-* | xc16x-* | xps100-* \
+	| x86-* | x86_64-* | xc16x-* | xgate-* | xps100-* \
 	| xstormy16-* | xtensa*-* \
 	| ymp-* \
 	| z8k-* | z80-*)
@@ -444,141 +892,45 @@ case $basic_machine in
 		;;
 	# Recognize the various machine names and aliases which stand
 	# for a CPU type and a company and sometimes even an OS.
-	386bsd)
-		basic_machine=i386-unknown
-		os=-bsd
-		;;
 	3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
 		basic_machine=m68000-att
 		;;
 	3b*)
 		basic_machine=we32k-att
 		;;
-	a29khif)
-		basic_machine=a29k-amd
-		os=-udi
-		;;
-	abacus)
-		basic_machine=abacus-unknown
-		;;
-	adobe68k)
-		basic_machine=m68010-adobe
-		os=-scout
-		;;
-	alliant | fx80)
-		basic_machine=fx80-alliant
-		;;
-	altos | altos3068)
-		basic_machine=m68k-altos
-		;;
-	am29k)
-		basic_machine=a29k-none
-		os=-bsd
-		;;
 	amd64)
 		basic_machine=x86_64-pc
 		;;
 	amd64-*)
-		basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
-		;;
-	amdahl)
-		basic_machine=580-amdahl
-		os=-sysv
+		basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	amiga | amiga-*)
 		basic_machine=m68k-unknown
 		;;
-	amigaos | amigados)
-		basic_machine=m68k-unknown
-		os=-amigaos
-		;;
-	amigaunix | amix)
-		basic_machine=m68k-unknown
-		os=-sysv4
-		;;
-	apollo68)
-		basic_machine=m68k-apollo
-		os=-sysv
-		;;
-	apollo68bsd)
-		basic_machine=m68k-apollo
-		os=-bsd
-		;;
-	aros)
-		basic_machine=i386-pc
-		os=-aros
-		;;
-	aux)
-		basic_machine=m68k-apple
-		os=-aux
-		;;
-	balance)
-		basic_machine=ns32k-sequent
-		os=-dynix
-		;;
-	blackfin)
-		basic_machine=bfin-unknown
-		os=-linux
-		;;
 	blackfin-*)
-		basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
-		os=-linux
+		basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+		os=linux
 		;;
 	bluegene*)
 		basic_machine=powerpc-ibm
-		os=-cnk
+		os=cnk
 		;;
 	c54x-*)
-		basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	c55x-*)
-		basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	c6x-*)
-		basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	c90)
 		basic_machine=c90-cray
-		os=-unicos
-		;;
-	cegcc)
-		basic_machine=arm-unknown
-		os=-cegcc
-		;;
-	convex-c1)
-		basic_machine=c1-convex
-		os=-bsd
-		;;
-	convex-c2)
-		basic_machine=c2-convex
-		os=-bsd
-		;;
-	convex-c32)
-		basic_machine=c32-convex
-		os=-bsd
-		;;
-	convex-c34)
-		basic_machine=c34-convex
-		os=-bsd
-		;;
-	convex-c38)
-		basic_machine=c38-convex
-		os=-bsd
-		;;
-	cray | j90)
-		basic_machine=j90-cray
-		os=-unicos
-		;;
-	craynv)
-		basic_machine=craynv-cray
-		os=-unicosmp
+		os=${os:-unicos}
 		;;
 	cr16 | cr16-*)
 		basic_machine=cr16-unknown
-		os=-elf
-		;;
-	crds | unos)
-		basic_machine=m68k-crds
+		os=${os:-elf}
 		;;
 	crisv32 | crisv32-* | etraxfs*)
 		basic_machine=crisv32-axis
@@ -588,7 +940,7 @@ case $basic_machine in
 		;;
 	crx)
 		basic_machine=crx-unknown
-		os=-elf
+		os=${os:-elf}
 		;;
 	da30 | da30-*)
 		basic_machine=m68k-da30
@@ -598,50 +950,38 @@ case $basic_machine in
 		;;
 	decsystem10* | dec10*)
 		basic_machine=pdp10-dec
-		os=-tops10
+		os=tops10
 		;;
 	decsystem20* | dec20*)
 		basic_machine=pdp10-dec
-		os=-tops20
+		os=tops20
 		;;
 	delta | 3300 | motorola-3300 | motorola-delta \
 	      | 3300-motorola | delta-motorola)
 		basic_machine=m68k-motorola
 		;;
-	delta88)
-		basic_machine=m88k-motorola
-		os=-sysv3
-		;;
-	dicos)
-		basic_machine=i686-pc
-		os=-dicos
-		;;
-	djgpp)
-		basic_machine=i586-pc
-		os=-msdosdjgpp
-		;;
 	dpx20 | dpx20-*)
 		basic_machine=rs6000-bull
-		os=-bosx
+		os=${os:-bosx}
 		;;
-	dpx2* | dpx2*-bull)
+	dpx2*)
 		basic_machine=m68k-bull
-		os=-sysv3
+		os=sysv3
 		;;
-	ebmon29k)
-		basic_machine=a29k-amd
-		os=-ebmon
+	e500v[12])
+		basic_machine=powerpc-unknown
+		os=$os"spe"
 		;;
-	elxsi)
-		basic_machine=elxsi-elxsi
-		os=-bsd
+	e500v[12]-*)
+		basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+		os=$os"spe"
 		;;
 	encore | umax | mmax)
 		basic_machine=ns32k-encore
 		;;
-	es1800 | OSE68k | ose68k | ose | OSE)
-		basic_machine=m68k-ericsson
-		os=-ose
+	elxsi)
+		basic_machine=elxsi-elxsi
+		os=${os:-bsd}
 		;;
 	fx2800)
 		basic_machine=i860-alliant
@@ -649,45 +989,13 @@ case $basic_machine in
 	genix)
 		basic_machine=ns32k-ns
 		;;
-	gmicro)
-		basic_machine=tron-gmicro
-		os=-sysv
-		;;
-	go32)
-		basic_machine=i386-pc
-		os=-go32
-		;;
 	h3050r* | hiux*)
 		basic_machine=hppa1.1-hitachi
-		os=-hiuxwe2
-		;;
-	h8300hms)
-		basic_machine=h8300-hitachi
-		os=-hms
-		;;
-	h8300xray)
-		basic_machine=h8300-hitachi
-		os=-xray
-		;;
-	h8500hms)
-		basic_machine=h8500-hitachi
-		os=-hms
-		;;
-	harris)
-		basic_machine=m88k-harris
-		os=-sysv3
+		os=hiuxwe2
 		;;
 	hp300-*)
 		basic_machine=m68k-hp
 		;;
-	hp300bsd)
-		basic_machine=m68k-hp
-		os=-bsd
-		;;
-	hp300hpux)
-		basic_machine=m68k-hp
-		os=-hpux
-		;;
 	hp3k9[0-9][0-9] | hp9[0-9][0-9])
 		basic_machine=hppa1.0-hp
 		;;
@@ -717,193 +1025,79 @@ case $basic_machine in
 	hp9k8[0-9][0-9] | hp8[0-9][0-9])
 		basic_machine=hppa1.0-hp
 		;;
-	hppa-next)
-		os=-nextstep3
-		;;
-	hppaosf)
-		basic_machine=hppa1.1-hp
-		os=-osf
-		;;
-	hppro)
-		basic_machine=hppa1.1-hp
-		os=-proelf
-		;;
-	i370-ibm* | ibm*)
-		basic_machine=i370-ibm
-		;;
 	i*86v32)
-		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-		os=-sysv32
+		basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+		os=sysv32
 		;;
 	i*86v4*)
-		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-		os=-sysv4
+		basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+		os=sysv4
 		;;
 	i*86v)
-		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-		os=-sysv
+		basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+		os=sysv
 		;;
 	i*86sol2)
-		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
-		os=-solaris2
-		;;
-	i386mach)
-		basic_machine=i386-mach
-		os=-mach
+		basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'`
+		os=solaris2
 		;;
-	i386-vsta | vsta)
-		basic_machine=i386-unknown
-		os=-vsta
+	j90 | j90-cray)
+		basic_machine=j90-cray
+		os=${os:-unicos}
 		;;
 	iris | iris4d)
 		basic_machine=mips-sgi
 		case $os in
-		    -irix*)
+		    irix*)
 			;;
 		    *)
-			os=-irix4
+			os=irix4
 			;;
 		esac
 		;;
-	isi68 | isi)
-		basic_machine=m68k-isi
-		os=-sysv
-		;;
-	m68knommu)
-		basic_machine=m68k-unknown
-		os=-linux
+	leon-*|leon[3-9]-*)
+		basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'`
 		;;
 	m68knommu-*)
-		basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
-		os=-linux
-		;;
-	m88k-omron*)
-		basic_machine=m88k-omron
-		;;
-	magnum | m3230)
-		basic_machine=mips-mips
-		os=-sysv
+		basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+		os=linux
 		;;
-	merlin)
-		basic_machine=ns32k-utek
-		os=-sysv
-		;;
-	microblaze)
+	microblaze*)
 		basic_machine=microblaze-xilinx
 		;;
-	mingw64)
-		basic_machine=x86_64-pc
-		os=-mingw64
-		;;
-	mingw32)
-		basic_machine=i386-pc
-		os=-mingw32
-		;;
-	mingw32ce)
-		basic_machine=arm-unknown
-		os=-mingw32ce
-		;;
 	miniframe)
 		basic_machine=m68000-convergent
 		;;
-	*mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+	*mint | mint[0-9]* | *MiNT | *MiNT[0-9]*)
 		basic_machine=m68k-atari
-		os=-mint
+		os=mint
 		;;
 	mips3*-*)
-		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+		basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`
 		;;
 	mips3*)
-		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
-		;;
-	monitor)
-		basic_machine=m68k-rom68k
-		os=-coff
-		;;
-	morphos)
-		basic_machine=powerpc-unknown
-		os=-morphos
-		;;
-	msdos)
-		basic_machine=i386-pc
-		os=-msdos
+		basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown
 		;;
 	ms1-*)
-		basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
-		;;
-	msys)
-		basic_machine=i386-pc
-		os=-msys
-		;;
-	mvs)
-		basic_machine=i370-ibm
-		os=-mvs
-		;;
-	nacl)
-		basic_machine=le32-unknown
-		os=-nacl
-		;;
-	ncr3000)
-		basic_machine=i486-ncr
-		os=-sysv4
-		;;
-	netbsd386)
-		basic_machine=i386-unknown
-		os=-netbsd
-		;;
-	netwinder)
-		basic_machine=armv4l-rebel
-		os=-linux
-		;;
-	news | news700 | news800 | news900)
-		basic_machine=m68k-sony
-		os=-newsos
-		;;
-	news1000)
-		basic_machine=m68030-sony
-		os=-newsos
+		basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'`
 		;;
 	news-3600 | risc-news)
 		basic_machine=mips-sony
-		os=-newsos
-		;;
-	necv70)
-		basic_machine=v70-nec
-		os=-sysv
+		os=newsos
 		;;
-	next | m*-next )
+	next | m*-next)
 		basic_machine=m68k-next
 		case $os in
-		    -nextstep* )
+		    nextstep* )
 			;;
-		    -ns2*)
-		      os=-nextstep2
+		    ns2*)
+		      os=nextstep2
 			;;
 		    *)
-		      os=-nextstep3
+		      os=nextstep3
 			;;
 		esac
 		;;
-	nh3000)
-		basic_machine=m68k-harris
-		os=-cxux
-		;;
-	nh[45]000)
-		basic_machine=m88k-harris
-		os=-cxux
-		;;
-	nindy960)
-		basic_machine=i960-intel
-		os=-nindy
-		;;
-	mon960)
-		basic_machine=i960-intel
-		os=-mon960
-		;;
-	nonstopux)
-		basic_machine=mips-compaq
-		os=-nonstopux
-		;;
 	np1)
 		basic_machine=np1-gould
 		;;
@@ -916,40 +1110,26 @@ case $basic_machine in
 	nsr-tandem)
 		basic_machine=nsr-tandem
 		;;
+	nsv-tandem)
+		basic_machine=nsv-tandem
+		;;
+	nsx-tandem)
+		basic_machine=nsx-tandem
+		;;
 	op50n-* | op60c-*)
 		basic_machine=hppa1.1-oki
-		os=-proelf
+		os=proelf
 		;;
 	openrisc | openrisc-*)
 		basic_machine=or32-unknown
 		;;
-	os400)
-		basic_machine=powerpc-ibm
-		os=-os400
-		;;
-	OSE68000 | ose68000)
-		basic_machine=m68000-ericsson
-		os=-ose
-		;;
-	os68k)
-		basic_machine=m68k-none
-		os=-os68k
-		;;
 	pa-hitachi)
 		basic_machine=hppa1.1-hitachi
-		os=-hiuxwe2
-		;;
-	paragon)
-		basic_machine=i860-intel
-		os=-osf
-		;;
-	parisc)
-		basic_machine=hppa-unknown
-		os=-linux
+		os=hiuxwe2
 		;;
 	parisc-*)
-		basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
-		os=-linux
+		basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'`
+		os=linux
 		;;
 	pbd)
 		basic_machine=sparc-tti
@@ -964,7 +1144,7 @@ case $basic_machine in
 		basic_machine=i386-pc
 		;;
 	pc98-*)
-		basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	pentium | p5 | k5 | k6 | nexgen | viac3)
 		basic_machine=i586-pc
@@ -979,16 +1159,16 @@ case $basic_machine in
 		basic_machine=i786-pc
 		;;
 	pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
-		basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	pentiumpro-* | p6-* | 6x86-* | athlon-*)
-		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
-		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	pentium4-*)
-		basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	pn)
 		basic_machine=pn-gould
@@ -998,39 +1178,27 @@ case $basic_machine in
 	ppc | ppcbe)	basic_machine=powerpc-unknown
 		;;
 	ppc-* | ppcbe-*)
-		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
-	ppcle | powerpclittle | ppc-le | powerpc-little)
+	ppcle | powerpclittle)
 		basic_machine=powerpcle-unknown
 		;;
 	ppcle-* | powerpclittle-*)
-		basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	ppc64)	basic_machine=powerpc64-unknown
 		;;
-	ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+	ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
-	ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+	ppc64le | powerpc64little)
 		basic_machine=powerpc64le-unknown
 		;;
 	ppc64le-* | powerpc64little-*)
-		basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+		basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
 	ps2)
 		basic_machine=i386-ibm
 		;;
-	pw32)
-		basic_machine=i586-unknown
-		os=-pw32
-		;;
-	rdos)
-		basic_machine=i386-pc
-		os=-rdos
-		;;
-	rom68k)
-		basic_machine=m68k-rom68k
-		os=-coff
-		;;
 	rm[46]00)
 		basic_machine=mips-siemens
 		;;
@@ -1043,10 +1211,6 @@ case $basic_machine in
 	s390x | s390x-*)
 		basic_machine=s390x-ibm
 		;;
-	sa29200)
-		basic_machine=a29k-amd
-		os=-udi
-		;;
 	sb1)
 		basic_machine=mipsisa64sb1-unknown
 		;;
@@ -1055,105 +1219,32 @@ case $basic_machine in
 		;;
 	sde)
 		basic_machine=mipsisa32-sde
-		os=-elf
-		;;
-	sei)
-		basic_machine=mips-sei
-		os=-seiux
+		os=${os:-elf}
 		;;
 	sequent)
 		basic_machine=i386-sequent
 		;;
-	sh)
-		basic_machine=sh-hitachi
-		os=-hms
-		;;
 	sh5el)
 		basic_machine=sh5le-unknown
 		;;
-	sh64)
-		basic_machine=sh64-unknown
+	sh5el-*)
+		basic_machine=sh5le-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
-	sparclite-wrs | simso-wrs)
+	simso-wrs)
 		basic_machine=sparclite-wrs
-		os=-vxworks
-		;;
-	sps7)
-		basic_machine=m68k-bull
-		os=-sysv2
+		os=vxworks
 		;;
 	spur)
 		basic_machine=spur-unknown
 		;;
-	st2000)
-		basic_machine=m68k-tandem
-		;;
-	stratus)
-		basic_machine=i860-stratus
-		os=-sysv4
-		;;
 	strongarm-* | thumb-*)
-		basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
-		;;
-	sun2)
-		basic_machine=m68000-sun
-		;;
-	sun2os3)
-		basic_machine=m68000-sun
-		os=-sunos3
-		;;
-	sun2os4)
-		basic_machine=m68000-sun
-		os=-sunos4
+		basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'`
 		;;
-	sun3os3)
-		basic_machine=m68k-sun
-		os=-sunos3
-		;;
-	sun3os4)
-		basic_machine=m68k-sun
-		os=-sunos4
-		;;
-	sun4os3)
-		basic_machine=sparc-sun
-		os=-sunos3
-		;;
-	sun4os4)
-		basic_machine=sparc-sun
-		os=-sunos4
-		;;
-	sun4sol2)
-		basic_machine=sparc-sun
-		os=-solaris2
-		;;
-	sun3 | sun3-*)
-		basic_machine=m68k-sun
-		;;
-	sun4)
-		basic_machine=sparc-sun
-		;;
-	sun386 | sun386i | roadrunner)
-		basic_machine=i386-sun
-		;;
-	sv1)
-		basic_machine=sv1-cray
-		os=-unicos
-		;;
-	symmetry)
-		basic_machine=i386-sequent
-		os=-dynix
-		;;
-	t3e)
-		basic_machine=alphaev5-cray
-		os=-unicos
-		;;
-	t90)
-		basic_machine=t90-cray
-		os=-unicos
+	tile*-*)
 		;;
 	tile*)
 		basic_machine=$basic_machine-unknown
-		os=-linux-gnu
+		os=${os:-linux-gnu}
 		;;
 	tx39)
 		basic_machine=mipstx39-unknown
@@ -1161,146 +1252,32 @@ case $basic_machine in
 	tx39el)
 		basic_machine=mipstx39el-unknown
 		;;
-	toad1)
-		basic_machine=pdp10-xkl
-		os=-tops20
-		;;
 	tower | tower-32)
 		basic_machine=m68k-ncr
 		;;
-	tpf)
-		basic_machine=s390x-ibm
-		os=-tpf
-		;;
-	udi29k)
-		basic_machine=a29k-amd
-		os=-udi
-		;;
-	ultra3)
-		basic_machine=a29k-nyu
-		os=-sym1
-		;;
-	v810 | necv810)
-		basic_machine=v810-nec
-		os=-none
-		;;
-	vaxv)
-		basic_machine=vax-dec
-		os=-sysv
-		;;
-	vms)
-		basic_machine=vax-dec
-		os=-vms
-		;;
 	vpp*|vx|vx-*)
 		basic_machine=f301-fujitsu
 		;;
-	vxworks960)
-		basic_machine=i960-wrs
-		os=-vxworks
-		;;
-	vxworks68)
-		basic_machine=m68k-wrs
-		os=-vxworks
-		;;
-	vxworks29k)
-		basic_machine=a29k-wrs
-		os=-vxworks
-		;;
 	w65*)
 		basic_machine=w65-wdc
-		os=-none
+		os=none
 		;;
 	w89k-*)
 		basic_machine=hppa1.1-winbond
-		os=-proelf
-		;;
-	xbox)
-		basic_machine=i686-pc
-		os=-mingw32
+		os=proelf
 		;;
-	xps | xps100)
-		basic_machine=xps100-honeywell
+	x64)
+		basic_machine=x86_64-pc
 		;;
 	xscale-* | xscalee[bl]-*)
-		basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
-		;;
-	ymp)
-		basic_machine=ymp-cray
-		os=-unicos
-		;;
-	z8k-*-coff)
-		basic_machine=z8k-unknown
-		os=-sim
-		;;
-	z80-*-coff)
-		basic_machine=z80-unknown
-		os=-sim
+		basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'`
 		;;
 	none)
 		basic_machine=none-none
-		os=-none
 		;;
 
-# Here we handle the default manufacturer of certain CPU types.  It is in
-# some cases the only manufacturer, in others, it is the most popular.
-	w89k)
-		basic_machine=hppa1.1-winbond
-		;;
-	op50n)
-		basic_machine=hppa1.1-oki
-		;;
-	op60c)
-		basic_machine=hppa1.1-oki
-		;;
-	romp)
-		basic_machine=romp-ibm
-		;;
-	mmix)
-		basic_machine=mmix-knuth
-		;;
-	rs6000)
-		basic_machine=rs6000-ibm
-		;;
-	vax)
-		basic_machine=vax-dec
-		;;
-	pdp10)
-		# there are many clones, so DEC is not a safe bet
-		basic_machine=pdp10-unknown
-		;;
-	pdp11)
-		basic_machine=pdp11-dec
-		;;
-	we32k)
-		basic_machine=we32k-att
-		;;
-	sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
-		basic_machine=sh-unknown
-		;;
-	sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
-		basic_machine=sparc-sun
-		;;
-	cydra)
-		basic_machine=cydra-cydrome
-		;;
-	orion)
-		basic_machine=orion-highlevel
-		;;
-	orion105)
-		basic_machine=clipper-highlevel
-		;;
-	mac | mpw | mac-mpw)
-		basic_machine=m68k-apple
-		;;
-	pmac | pmac-mpw)
-		basic_machine=powerpc-apple
-		;;
-	*-unknown)
-		# Make sure to match an already-canonicalized machine name.
-		;;
 	*)
-		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2
 		exit 1
 		;;
 esac
@@ -1308,10 +1285,10 @@ esac
 # Here we canonicalize certain aliases for manufacturers.
 case $basic_machine in
 	*-digital*)
-		basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+		basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'`
 		;;
 	*-commodore*)
-		basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+		basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'`
 		;;
 	*)
 		;;
@@ -1319,200 +1296,246 @@ esac
 
 # Decode manufacturer-specific aliases for certain operating systems.
 
-if [ x"$os" != x"" ]
+if [ x$os != x ]
 then
 case $os in
-	# First match some system type aliases
-	# that might get confused with valid system types.
-	# -solaris* is a basic system type, with this one exception.
-	-auroraux)
-		os=-auroraux
+	# First match some system type aliases that might get confused
+	# with valid system types.
+	# solaris* is a basic system type, with this one exception.
+	auroraux)
+		os=auroraux
 		;;
-	-solaris1 | -solaris1.*)
-		os=`echo $os | sed -e 's|solaris1|sunos4|'`
+	bluegene*)
+		os=cnk
 		;;
-	-solaris)
-		os=-solaris2
+	solaris1 | solaris1.*)
+		os=`echo $os | sed -e 's|solaris1|sunos4|'`
 		;;
-	-svr4*)
-		os=-sysv4
+	solaris)
+		os=solaris2
 		;;
-	-unixware*)
-		os=-sysv4.2uw
+	unixware*)
+		os=sysv4.2uw
 		;;
-	-gnu/linux*)
+	gnu/linux*)
 		os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
 		;;
-	# First accept the basic system types.
+	# es1800 is here to avoid being matched by es* (a different OS)
+	es1800*)
+		os=ose
+		;;
+	# Some version numbers need modification
+	chorusos*)
+		os=chorusos
+		;;
+	isc)
+		os=isc2.2
+		;;
+	sco6)
+		os=sco5v6
+		;;
+	sco5)
+		os=sco3.2v5
+		;;
+	sco4)
+		os=sco3.2v4
+		;;
+	sco3.2.[4-9]*)
+		os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+		;;
+	sco3.2v[4-9]* | sco5v6*)
+		# Don't forget version if it is 3.2v4 or newer.
+		;;
+	scout)
+		# Don't match below
+		;;
+	sco*)
+		os=sco3.2v2
+		;;
+	psos*)
+		os=psos
+		;;
+	# Now accept the basic system types.
 	# The portable systems comes first.
-	# Each alternative MUST END IN A *, to match a version number.
-	# -sysv* is not here because it comes later, after sysvr4.
-	-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
-	      | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
-	      | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
-	      | -sym* | -kopensolaris* \
-	      | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
-	      | -aos* | -aros* \
-	      | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
-	      | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
-	      | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
-	      | -bitrig* | -openbsd* | -solidbsd* \
-	      | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
-	      | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
-	      | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
-	      | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
-	      | -chorusos* | -chorusrdb* | -cegcc* \
-	      | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
-	      | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
-	      | -linux-newlib* | -linux-musl* | -linux-uclibc* \
-	      | -uxpv* | -beos* | -mpeix* | -udk* \
-	      | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
-	      | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
-	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
-	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
-	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
-	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
-	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+	# Each alternative MUST end in a * to match a version number.
+	# sysv* is not here because it comes later, after sysvr4.
+	gnu* | bsd* | mach* | minix* | genix* | ultrix* | irix* \
+	     | *vms* | esix* | aix* | cnk* | sunos | sunos[34]*\
+	     | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \
+	     | sym* | kopensolaris* | plan9* \
+	     | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \
+	     | aos* | aros* | cloudabi* | sortix* \
+	     | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \
+	     | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \
+	     | knetbsd* | mirbsd* | netbsd* \
+	     | bitrig* | openbsd* | solidbsd* | libertybsd* \
+	     | ekkobsd* | kfreebsd* | freebsd* | riscix* | lynxos* \
+	     | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \
+	     | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \
+	     | udi* | eabi* | lites* | ieee* | go32* | aux* | hcos* \
+	     | chorusrdb* | cegcc* | glidix* \
+	     | cygwin* | msys* | pe* | moss* | proelf* | rtems* \
+	     | midipix* | mingw32* | mingw64* | linux-gnu* | linux-android* \
+	     | linux-newlib* | linux-musl* | linux-uclibc* \
+	     | uxpv* | beos* | mpeix* | udk* | moxiebox* \
+	     | interix* | uwin* | mks* | rhapsody* | darwin* \
+	     | openstep* | oskit* | conix* | pw32* | nonstopux* \
+	     | storm-chaos* | tops10* | tenex* | tops20* | its* \
+	     | os2* | vos* | palmos* | uclinux* | nucleus* \
+	     | morphos* | superux* | rtmk* | windiss* \
+	     | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \
+	     | skyos* | haiku* | rdos* | toppers* | drops* | es* \
+	     | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \
+	     | midnightbsd*)
 	# Remember, each alternative MUST END IN *, to match a version number.
 		;;
-	-qnx*)
+	qnx*)
 		case $basic_machine in
 		    x86-* | i*86-*)
 			;;
 		    *)
-			os=-nto$os
+			os=nto-$os
 			;;
 		esac
 		;;
-	-nto-qnx*)
+	hiux*)
+		os=hiuxwe2
 		;;
-	-nto*)
-		os=`echo $os | sed -e 's|nto|nto-qnx|'`
+	nto-qnx*)
 		;;
-	-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
-	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
-	      | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+	nto*)
+		os=`echo $os | sed -e 's|nto|nto-qnx|'`
 		;;
-	-mac*)
-		os=`echo $os | sed -e 's|mac|macos|'`
+	sim | xray | os68k* | v88r* \
+	    | windows* | osx | abug | netware* | os9* \
+	    | macos* | mpw* | magic* | mmixware* | mon960* | lnews*)
 		;;
-	-linux-dietlibc)
-		os=-linux-dietlibc
+	linux-dietlibc)
+		os=linux-dietlibc
 		;;
-	-linux*)
+	linux*)
 		os=`echo $os | sed -e 's|linux|linux-gnu|'`
 		;;
-	-sunos5*)
-		os=`echo $os | sed -e 's|sunos5|solaris2|'`
+	lynx*178)
+		os=lynxos178
 		;;
-	-sunos6*)
-		os=`echo $os | sed -e 's|sunos6|solaris3|'`
+	lynx*5)
+		os=lynxos5
 		;;
-	-opened*)
-		os=-openedition
+	lynx*)
+		os=lynxos
 		;;
-	-os400*)
-		os=-os400
+	mac*)
+		os=`echo "$os" | sed -e 's|mac|macos|'`
 		;;
-	-wince*)
-		os=-wince
+	opened*)
+		os=openedition
 		;;
-	-osfrose*)
-		os=-osfrose
+	os400*)
+		os=os400
 		;;
-	-osf*)
-		os=-osf
+	sunos5*)
+		os=`echo "$os" | sed -e 's|sunos5|solaris2|'`
 		;;
-	-utek*)
-		os=-bsd
+	sunos6*)
+		os=`echo "$os" | sed -e 's|sunos6|solaris3|'`
 		;;
-	-dynix*)
-		os=-bsd
+	wince*)
+		os=wince
 		;;
-	-acis*)
-		os=-aos
+	utek*)
+		os=bsd
 		;;
-	-atheos*)
-		os=-atheos
+	dynix*)
+		os=bsd
 		;;
-	-syllable*)
-		os=-syllable
+	acis*)
+		os=aos
 		;;
-	-386bsd)
-		os=-bsd
+	atheos*)
+		os=atheos
 		;;
-	-ctix* | -uts*)
-		os=-sysv
+	syllable*)
+		os=syllable
 		;;
-	-nova*)
-		os=-rtmk-nova
+	386bsd)
+		os=bsd
+		;;
+	ctix* | uts*)
+		os=sysv
+		;;
+	nova*)
+		os=rtmk-nova
 		;;
-	-ns2 )
-		os=-nextstep2
+	ns2)
+		os=nextstep2
 		;;
-	-nsk*)
-		os=-nsk
+	nsk*)
+		os=nsk
 		;;
 	# Preserve the version number of sinix5.
-	-sinix5.*)
+	sinix5.*)
 		os=`echo $os | sed -e 's|sinix|sysv|'`
 		;;
-	-sinix*)
-		os=-sysv4
+	sinix*)
+		os=sysv4
 		;;
-	-tpf*)
-		os=-tpf
+	tpf*)
+		os=tpf
 		;;
-	-triton*)
-		os=-sysv3
+	triton*)
+		os=sysv3
 		;;
-	-oss*)
-		os=-sysv3
+	oss*)
+		os=sysv3
 		;;
-	-svr4)
-		os=-sysv4
+	svr4*)
+		os=sysv4
 		;;
-	-svr3)
-		os=-sysv3
+	svr3)
+		os=sysv3
 		;;
-	-sysvr4)
-		os=-sysv4
+	sysvr4)
+		os=sysv4
 		;;
-	# This must come after -sysvr4.
-	-sysv*)
+	# This must come after sysvr4.
+	sysv*)
 		;;
-	-ose*)
-		os=-ose
+	ose*)
+		os=ose
 		;;
-	-es1800*)
-		os=-ose
+	*mint | mint[0-9]* | *MiNT | MiNT[0-9]*)
+		os=mint
 		;;
-	-xenix)
-		os=-xenix
+	zvmoe)
+		os=zvmoe
 		;;
-	-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
-		os=-mint
+	dicos*)
+		os=dicos
 		;;
-	-aros*)
-		os=-aros
-		;;
-	-kaos*)
-		os=-kaos
+	pikeos*)
+		# Until real need of OS specific support for
+		# particular features comes up, bare metal
+		# configurations are quite functional.
+		case $basic_machine in
+		    arm*)
+			os=eabi
+			;;
+		    *)
+			os=elf
+			;;
+		esac
 		;;
-	-zvmoe)
-		os=-zvmoe
+	nacl*)
 		;;
-	-dicos*)
-		os=-dicos
+	ios)
 		;;
-	-nacl*)
+	none)
 		;;
-	-none)
+	*-eabi)
 		;;
 	*)
-		# Get rid of the `-' at the beginning of $os.
-		os=`echo $os | sed 's/[^-]*-//'`
-		echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+		echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2
 		exit 1
 		;;
 esac
@@ -1530,173 +1553,179 @@ else
 
 case $basic_machine in
 	score-*)
-		os=-elf
+		os=elf
 		;;
 	spu-*)
-		os=-elf
+		os=elf
 		;;
 	*-acorn)
-		os=-riscix1.2
+		os=riscix1.2
 		;;
 	arm*-rebel)
-		os=-linux
+		os=linux
 		;;
 	arm*-semi)
-		os=-aout
+		os=aout
 		;;
 	c4x-* | tic4x-*)
-		os=-coff
+		os=coff
+		;;
+	c8051-*)
+		os=elf
+		;;
+	clipper-intergraph)
+		os=clix
 		;;
 	hexagon-*)
-		os=-elf
+		os=elf
 		;;
 	tic54x-*)
-		os=-coff
+		os=coff
 		;;
 	tic55x-*)
-		os=-coff
+		os=coff
 		;;
 	tic6x-*)
-		os=-coff
+		os=coff
 		;;
 	# This must come before the *-dec entry.
 	pdp10-*)
-		os=-tops20
+		os=tops20
 		;;
 	pdp11-*)
-		os=-none
+		os=none
 		;;
 	*-dec | vax-*)
-		os=-ultrix4.2
+		os=ultrix4.2
 		;;
 	m68*-apollo)
-		os=-domain
+		os=domain
 		;;
 	i386-sun)
-		os=-sunos4.0.2
+		os=sunos4.0.2
 		;;
 	m68000-sun)
-		os=-sunos3
+		os=sunos3
 		;;
 	m68*-cisco)
-		os=-aout
+		os=aout
 		;;
 	mep-*)
-		os=-elf
+		os=elf
 		;;
 	mips*-cisco)
-		os=-elf
+		os=elf
 		;;
 	mips*-*)
-		os=-elf
+		os=elf
 		;;
 	or32-*)
-		os=-coff
+		os=coff
 		;;
 	*-tti)	# must be before sparc entry or we get the wrong os.
-		os=-sysv3
+		os=sysv3
 		;;
 	sparc-* | *-sun)
-		os=-sunos4.1.1
+		os=sunos4.1.1
 		;;
-	*-be)
-		os=-beos
+	pru-*)
+		os=elf
 		;;
-	*-haiku)
-		os=-haiku
+	*-be)
+		os=beos
 		;;
 	*-ibm)
-		os=-aix
+		os=aix
 		;;
 	*-knuth)
-		os=-mmixware
+		os=mmixware
 		;;
 	*-wec)
-		os=-proelf
+		os=proelf
 		;;
 	*-winbond)
-		os=-proelf
+		os=proelf
 		;;
 	*-oki)
-		os=-proelf
+		os=proelf
 		;;
 	*-hp)
-		os=-hpux
+		os=hpux
 		;;
 	*-hitachi)
-		os=-hiux
+		os=hiux
 		;;
 	i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
-		os=-sysv
+		os=sysv
 		;;
 	*-cbm)
-		os=-amigaos
+		os=amigaos
 		;;
 	*-dg)
-		os=-dgux
+		os=dgux
 		;;
 	*-dolphin)
-		os=-sysv3
+		os=sysv3
 		;;
 	m68k-ccur)
-		os=-rtu
+		os=rtu
 		;;
 	m88k-omron*)
-		os=-luna
+		os=luna
 		;;
-	*-next )
-		os=-nextstep
+	*-next)
+		os=nextstep
 		;;
 	*-sequent)
-		os=-ptx
+		os=ptx
 		;;
 	*-crds)
-		os=-unos
+		os=unos
 		;;
 	*-ns)
-		os=-genix
+		os=genix
 		;;
 	i370-*)
-		os=-mvs
-		;;
-	*-next)
-		os=-nextstep3
+		os=mvs
 		;;
 	*-gould)
-		os=-sysv
+		os=sysv
 		;;
 	*-highlevel)
-		os=-bsd
+		os=bsd
 		;;
 	*-encore)
-		os=-bsd
+		os=bsd
 		;;
 	*-sgi)
-		os=-irix
+		os=irix
 		;;
 	*-siemens)
-		os=-sysv4
+		os=sysv4
 		;;
 	*-masscomp)
-		os=-rtu
+		os=rtu
 		;;
 	f30[01]-fujitsu | f700-fujitsu)
-		os=-uxpv
+		os=uxpv
 		;;
 	*-rom68k)
-		os=-coff
+		os=coff
 		;;
 	*-*bug)
-		os=-coff
+		os=coff
 		;;
 	*-apple)
-		os=-macos
+		os=macos
 		;;
 	*-atari*)
-		os=-mint
+		os=mint
+		;;
+	*-wrs)
+		os=vxworks
 		;;
 	*)
-		os=-none
+		os=none
 		;;
 esac
 fi
@@ -1707,79 +1736,82 @@ vendor=unknown
 case $basic_machine in
 	*-unknown)
 		case $os in
-			-riscix*)
+			riscix*)
 				vendor=acorn
 				;;
-			-sunos*)
+			sunos*)
 				vendor=sun
 				;;
-			-cnk*|-aix*)
+			cnk*|-aix*)
 				vendor=ibm
 				;;
-			-beos*)
+			beos*)
 				vendor=be
 				;;
-			-hpux*)
+			hpux*)
 				vendor=hp
 				;;
-			-mpeix*)
+			mpeix*)
 				vendor=hp
 				;;
-			-hiux*)
+			hiux*)
 				vendor=hitachi
 				;;
-			-unos*)
+			unos*)
 				vendor=crds
 				;;
-			-dgux*)
+			dgux*)
 				vendor=dg
 				;;
-			-luna*)
+			luna*)
 				vendor=omron
 				;;
-			-genix*)
+			genix*)
 				vendor=ns
 				;;
-			-mvs* | -opened*)
+			clix*)
+				vendor=intergraph
+				;;
+			mvs* | opened*)
 				vendor=ibm
 				;;
-			-os400*)
+			os400*)
 				vendor=ibm
 				;;
-			-ptx*)
+			ptx*)
 				vendor=sequent
 				;;
-			-tpf*)
+			tpf*)
 				vendor=ibm
 				;;
-			-vxsim* | -vxworks* | -windiss*)
+			vxsim* | vxworks* | windiss*)
 				vendor=wrs
 				;;
-			-aux*)
+			aux*)
 				vendor=apple
 				;;
-			-hms*)
+			hms*)
 				vendor=hitachi
 				;;
-			-mpw* | -macos*)
+			mpw* | macos*)
 				vendor=apple
 				;;
-			-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+			*mint | mint[0-9]* | *MiNT | MiNT[0-9]*)
 				vendor=atari
 				;;
-			-vos*)
+			vos*)
 				vendor=stratus
 				;;
 		esac
-		basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+		basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"`
 		;;
 esac
 
-echo $basic_machine$os
+echo "$basic_machine-$os"
 exit
 
 # Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
+# eval: (add-hook 'before-save-hook 'time-stamp)
 # time-stamp-start: "timestamp='"
 # time-stamp-format: "%:y-%02m-%02d"
 # time-stamp-end: "'"
diff --git a/configure.ac b/configure.ac
index c41a83c97646..cc354f6f3028 100644
--- a/configure.ac
+++ b/configure.ac
@@ -179,12 +179,9 @@ AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
   [AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])])
 
 
-# Look for libbrotli{enc,dec}, optional dependencies
-PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec],
-  [AC_DEFINE([HAVE_BROTLI], [1], [Whether to use libbrotli.])
-   CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"]
-   have_brotli=1], [have_brotli=])
-AC_SUBST(HAVE_BROTLI, [$have_brotli])
+# Look for libbrotli{enc,dec}.
+PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
+
 
 # Look for libseccomp, required for Linux sandboxing.
 if test "$sys_name" = linux; then
@@ -232,12 +229,6 @@ if test "$gc" = yes; then
 fi
 
 
-AC_ARG_ENABLE(init-state, AC_HELP_STRING([--disable-init-state],
-  [do not initialise DB etc. in `make install']),
-  init_state=$enableval, init_state=yes)
-#AM_CONDITIONAL(INIT_STATE, test "$init_state" = "yes")
-
-
 # documentation generation switch
 AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
   [disable documentation generation]),
diff --git a/doc/manual/advanced-topics/advanced-topics.xml b/doc/manual/advanced-topics/advanced-topics.xml
index 338aa6f3a238..b710f9f2b518 100644
--- a/doc/manual/advanced-topics/advanced-topics.xml
+++ b/doc/manual/advanced-topics/advanced-topics.xml
@@ -1,6 +1,7 @@
 <part xmlns="http://docbook.org/ns/docbook"
       xmlns:xlink="http://www.w3.org/1999/xlink"
       xmlns:xi="http://www.w3.org/2001/XInclude"
+      xml:id="part-advanced-topics"
       version="5.0">
 
 <title>Advanced Topics</title>
diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml
index 20fd6a0cfb0d..bbb573e35400 100644
--- a/doc/manual/advanced-topics/distributed-builds.xml
+++ b/doc/manual/advanced-topics/distributed-builds.xml
@@ -81,6 +81,7 @@ or a newline, e.g.
 
 <para>Each machine specification consists of the following elements,
 separated by spaces. Only the first element is required.
+To leave a field at its default, set it to <literal>-</literal>.
 
 <orderedlist>
 
diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml
index 5045f85d392e..26fe58d043b7 100644
--- a/doc/manual/command-ref/conf-file.xml
+++ b/doc/manual/command-ref/conf-file.xml
@@ -437,7 +437,10 @@ builtins.fetchurl {
     <listitem><para>This option defines the maximum number of jobs
     that Nix will try to build in parallel.  The default is
     <literal>1</literal>. The special value <literal>auto</literal>
-    causes Nix to use the number of CPUs in your system.  It can be
+    causes Nix to use the number of CPUs in your system.  <literal>0</literal>
+    is useful when using remote builders to prevent any local builds (except for
+    <literal>preferLocalBuild</literal> derivation attribute which executes locally
+    regardless).  It can be
     overridden using the <option
     linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>)
     command line switch.</para></listitem>
diff --git a/doc/manual/command-ref/nix-env.xml b/doc/manual/command-ref/nix-env.xml
index eac7739558be..56c466268ea0 100644
--- a/doc/manual/command-ref/nix-env.xml
+++ b/doc/manual/command-ref/nix-env.xml
@@ -1346,11 +1346,12 @@ $ nix-env --list-generations
 <para>This operation deletes the specified generations of the current
 profile.  The generations can be a list of generation numbers, the
 special value <literal>old</literal> to delete all non-current
-generations, or a value such as <literal>30d</literal> to delete all
+generations,  a value such as <literal>30d</literal> to delete all
 generations older than the specified number of days (except for the
-generation that was active at that point in time).
-Periodically deleting old generations is important to make garbage
-collection effective.</para>
+generation that was active at that point in time), or a value such as.
+<literal>+5</literal> to only keep the specified items older than the
+current generation. Periodically deleting old generations is important
+to make garbage collection effective.</para>
 
 </refsection>
 
@@ -1359,6 +1360,8 @@ collection effective.</para>
 <screen>
 $ nix-env --delete-generations 3 4 8
 
+$ nix-env --delete-generations +5
+
 $ nix-env --delete-generations 30d
 
 $ nix-env -p other_profile --delete-generations old</screen>
@@ -1458,7 +1461,7 @@ error: no generation older than the current (91) exists</screen>
 <refsection condition="manpage"><title>Environment variables</title>
 
 <variablelist>
-  
+
   <varlistentry><term><envar>NIX_PROFILE</envar></term>
 
     <listitem><para>Location of the Nix profile.  Defaults to the
@@ -1472,6 +1475,6 @@ error: no generation older than the current (91) exists</screen>
 </variablelist>
 
 </refsection>
-  
+
 
 </refentry>
diff --git a/doc/manual/command-ref/nix-shell.xml b/doc/manual/command-ref/nix-shell.xml
index 62d026ac238e..5c44c4a8f446 100644
--- a/doc/manual/command-ref/nix-shell.xml
+++ b/doc/manual/command-ref/nix-shell.xml
@@ -32,6 +32,7 @@
     <arg><option>--run</option> <replaceable>cmd</replaceable></arg>
     <arg><option>--exclude</option> <replaceable>regexp</replaceable></arg>
     <arg><option>--pure</option></arg>
+    <arg><option>--keep</option> <replaceable>name</replaceable></arg>
     <group choice='req'>
       <arg choice='plain'>
         <group choice='req'>
@@ -165,6 +166,13 @@ also <xref linkend="sec-common-options" />.</phrase></para>
 
     </listitem></varlistentry>
 
+  <varlistentry><term><option>--keep</option> <replaceable>name</replaceable></term>
+
+    <listitem><para>When a <option>--pure</option> shell is started,
+    keep the listed environment variables.</para></listitem>
+
+  </varlistentry>
+
 </variablelist>
 
 <para>The following common options are supported:</para>
diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml
index f2dc6ed8540d..5fff64a18f93 100644
--- a/doc/manual/command-ref/nix-store.xml
+++ b/doc/manual/command-ref/nix-store.xml
@@ -204,7 +204,7 @@ printed.)</para>
     with <option>-K</option>, if an output path is not identical to
     the corresponding output from the previous build, the new output
     path is left in
-    <filename>/nix/store/<replaceable>name</replaceable>-check.</filename></para>
+    <filename>/nix/store/<replaceable>name</replaceable>.check.</filename></para>
 
     <para>See also the <option>build-repeat</option> configuration
     option, which repeats a derivation a number of times and prevents
diff --git a/doc/manual/expressions/advanced-attributes.xml b/doc/manual/expressions/advanced-attributes.xml
index dfd013b5cf31..9422e82ff362 100644
--- a/doc/manual/expressions/advanced-attributes.xml
+++ b/doc/manual/expressions/advanced-attributes.xml
@@ -50,6 +50,40 @@ allowedRequisites = [ foobar ];
 
   </varlistentry>
 
+  <varlistentry><term><varname>disallowedReferences</varname></term>
+
+    <listitem><para>The optional attribute
+    <varname>disallowedReferences</varname> specifies a list of illegal
+    references (dependencies) of the output of the builder.  For
+    example,
+
+<programlisting>
+disallowedReferences = [ foo ];
+</programlisting>
+
+    enforces that the output of a derivation cannot have a direct runtime
+    dependencies on the derivation <varname>foo</varname>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><varname>disallowedRequisites</varname></term>
+
+    <listitem><para>This attribute is similar to
+    <varname>disallowedReferences</varname>, but it specifies illegal
+    requisites for the whole closure, so all the dependencies
+    recursively.  For example,
+
+<programlisting>
+disallowedRequisites = [ foobar ];
+</programlisting>
+
+    enforces that the output of a derivation cannot have any
+    runtime dependency on <varname>foobar</varname> or any other derivation
+    depending recursively on <varname>foobar</varname>.</para></listitem>
+
+  </varlistentry>
+
 
   <varlistentry><term><varname>exportReferencesGraph</varname></term>
 
diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml
index ac1fe7e2fafe..873f30b062ee 100644
--- a/doc/manual/expressions/builtins.xml
+++ b/doc/manual/expressions/builtins.xml
@@ -92,6 +92,36 @@ available as <function>builtins.derivation</function>.</para>
   </varlistentry>
 
 
+  <varlistentry><term><function>builtins.bitAnd</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the bitwise AND of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.bitOr</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the bitwise OR of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.bitXor</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the bitwise XOR of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
   <varlistentry><term><varname>builtins</varname></term>
 
     <listitem><para>The set <varname>builtins</varname> contains all
@@ -280,8 +310,17 @@ with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixo
 
 stdenv.mkDerivation { … }
 </programlisting>
+    </para>
+
+    <para>The fetched tarball is cached for a certain amount of time
+    (1 hour by default) in <filename>~/.cache/nix/tarballs/</filename>.
+    You can change the cache timeout either on the command line with
+    <option>--option tarball-ttl <replaceable>number of seconds</replaceable></option> or
+    in the Nix configuration file with this option:
+    <literal>tarball-ttl <replaceable>number of seconds to cache</replaceable></literal>.
+    </para>
 
-    Note that when obtaining the hash with <varname>nix-prefetch-url
+    <para>Note that when obtaining the hash with <varname>nix-prefetch-url
     </varname> the option <varname>--unpack</varname> is required.
     </para>
 
@@ -359,6 +398,84 @@ stdenv.mkDerivation { … }
           </listitem>
         </varlistentry>
       </variablelist>
+
+      <example>
+        <title>Fetching a private repository over SSH</title>
+        <programlisting>builtins.fetchGit {
+  url = "git@github.com:my-secret/repository.git";
+  ref = "master";
+  rev = "adab8b916a45068c044658c4158d81878f9ed1c3";
+}</programlisting>
+      </example>
+
+      <example>
+        <title>Fetching a repository's specific commit on an arbitrary branch</title>
+        <para>
+          If the revision you're looking for is in the default branch
+          of the gift repository you don't strictly need to specify
+          the branch name in the <varname>ref</varname> attribute.
+        </para>
+        <para>
+          However, if the revision you're looking for is in a future
+          branch for the non-default branch you will need to specify
+          the the <varname>ref</varname> attribute as well.
+        </para>
+        <programlisting>builtins.fetchGit {
+  url = "https://github.com/nixos/nix.git";
+  rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452";
+  ref = "1.11-maintenance";
+}</programlisting>
+        <note>
+          <para>
+            It is nice to always specify the branch which a revision
+            belongs to. Without the branch being specified, the
+            fetcher might fail if the default branch changes.
+            Additionally, it can be confusing to try a commit from a
+            non-default branch and see the fetch fail. If the branch
+            is specified the fault is much more obvious.
+          </para>
+        </note>
+      </example>
+
+      <example>
+        <title>Fetching a repository's specific commit on the default branch</title>
+        <para>
+          If the revision you're looking for is in the default branch
+          of the gift repository you may omit the
+          <varname>ref</varname> attribute.
+        </para>
+        <programlisting>builtins.fetchGit {
+  url = "https://github.com/nixos/nix.git";
+  rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452";
+}</programlisting>
+      </example>
+
+      <example>
+        <title>Fetching a tag</title>
+        <programlisting>builtins.fetchGit {
+  url = "https://github.com/nixos/nix.git";
+  ref = "tags/1.9";
+}</programlisting>
+        <note><para>Due to a bug (<link
+        xlink:href="https://github.com/NixOS/nix/issues/2385">#2385</link>),
+        only non-annotated tags can be fetched.</para></note>
+      </example>
+
+      <example>
+        <title>Fetching the latest version of a remote branch</title>
+        <para>
+          <function>builtins.fetchGit</function> can behave impurely
+           fetch the latest version of a remote branch.
+        </para>
+        <note><para>Nix will refetch the branch in accordance to
+        <option>tarball-ttl</option>.</para></note>
+        <note><para>This behavior is disabled in
+        <emphasis>Pure evaluation mode</emphasis>.</para></note>
+        <programlisting>builtins.fetchGit {
+  url = "ssh://git@github.com/nixos/nix.git";
+  ref = "master";
+}</programlisting>
+      </example>
     </listitem>
   </varlistentry>
 
@@ -420,7 +537,9 @@ stdenv.mkDerivation {
       <literal>"unknown"</literal> (for other kinds of files such as
       device nodes or fifos — but note that those cannot be copied to
       the Nix store, so if the predicate returns
-      <literal>true</literal> for them, the copy will fail).</para>
+      <literal>true</literal> for them, the copy will fail). If you
+      exclude a directory, the entire corresponding subtree of
+      <replaceable>e2</replaceable> will be excluded.</para>
 
     </listitem>
 
@@ -468,8 +587,7 @@ builtins.fromJSON ''{"x": [1, 2, 3], "y": null}''
 </programlisting>
 
     returns the value <literal>{ x = [ 1 2 3 ]; y = null;
-    }</literal>. Floating point numbers are not
-    supported.</para></listitem>
+    }</literal>.</para></listitem>
 
   </varlistentry>
 
@@ -1204,8 +1322,8 @@ in foo</programlisting>
     This is not allowed because it would cause a cyclic dependency in
     the computation of the cryptographic hashes for
     <varname>foo</varname> and <varname>bar</varname>.</para>
-    <para>It is also not possible to reference the result of a derivation. 
-    If you are using Nixpkgs, the <literal>writeTextFile</literal> function is able to 
+    <para>It is also not possible to reference the result of a derivation.
+    If you are using Nixpkgs, the <literal>writeTextFile</literal> function is able to
     do that.</para></listitem>
 
   </varlistentry>
diff --git a/doc/manual/expressions/language-constructs.xml b/doc/manual/expressions/language-constructs.xml
index 47d95f8a13e3..f961ed921bca 100644
--- a/doc/manual/expressions/language-constructs.xml
+++ b/doc/manual/expressions/language-constructs.xml
@@ -41,7 +41,7 @@ encountered</quote>).</para></footnote>.</para>
 </simplesect>
 
 
-<simplesect><title>Let-expressions</title>
+<simplesect xml:id="sect-let-expressions"><title>Let-expressions</title>
 
 <para>A let-expression allows you define local variables for an
 expression.  For instance,
diff --git a/doc/manual/glossary/glossary.xml b/doc/manual/glossary/glossary.xml
index 4977825578f1..e3162ed8d469 100644
--- a/doc/manual/glossary/glossary.xml
+++ b/doc/manual/glossary/glossary.xml
@@ -1,5 +1,6 @@
 <appendix xmlns="http://docbook.org/ns/docbook"
-          xmlns:xlink="http://www.w3.org/1999/xlink">
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xml:id="part-glossary">
 
 <title>Glossary</title>
 
diff --git a/doc/manual/hacking.xml b/doc/manual/hacking.xml
index 183aed7adff2..b671811d3a30 100644
--- a/doc/manual/hacking.xml
+++ b/doc/manual/hacking.xml
@@ -30,7 +30,7 @@ To build Nix itself in this shell:
 [nix-shell]$ configurePhase
 [nix-shell]$ make
 </screen>
-To install it in <literal>$(pwd)/nix</literal> and test it:
+To install it in <literal>$(pwd)/inst</literal> and test it:
 <screen>
 [nix-shell]$ make install
 [nix-shell]$ make installcheck
diff --git a/doc/manual/installation/env-variables.xml b/doc/manual/installation/env-variables.xml
index fc39cdd9dfef..91ecd114f6d4 100644
--- a/doc/manual/installation/env-variables.xml
+++ b/doc/manual/installation/env-variables.xml
@@ -21,4 +21,51 @@ in your <filename>~/.profile</filename> (or similar), like this:</para>
 <screen>
 source <replaceable>prefix</replaceable>/etc/profile.d/nix.sh</screen>
 
-</chapter>
\ No newline at end of file
+<section xml:id="sec-nix-ssl-cert-file">
+
+<title><envar>NIX_SSL_CERT_FILE</envar></title>
+
+<para>If you need to specify a custom certificate bundle to account
+for an HTTPS-intercepting man in the middle proxy, you must specify
+the path to the certificate bundle in the environment variable
+<envar>NIX_SSL_CERT_FILE</envar>.</para>
+
+
+<para>If you don't specify a <envar>NIX_SSL_CERT_FILE</envar>
+manually, Nix will install and use its own certificate
+bundle.</para>
+
+<procedure>
+  <step><para>Set the environment variable and install Nix</para>
+    <screen>
+$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
+$ curl https://nixos.org/nix/install | sh
+</screen></step>
+
+  <step><para>In the shell profile and rc files (for example,
+  <filename>/etc/bashrc</filename>, <filename>/etc/zshrc</filename>),
+  add the following line:</para>
+<programlisting>
+export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
+</programlisting>
+</step>
+</procedure>
+
+<note><para>You must not add the export and then do the install, as
+the Nix installer will detect the presense of Nix configuration, and
+abort.</para></note>
+
+<section xml:id="sec-nix-ssl-cert-file-with-nix-daemon-and-macos">
+<title><envar>NIX_SSL_CERT_FILE</envar> with macOS and the Nix daemon</title>
+
+<para>On macOS you must specify the environment variable for the Nix
+daemon service, then restart it:</para>
+
+<screen>
+$ sudo launchctl setenv NIX_SSL_CERT_FILE /etc/ssl/my-certificate-bundle.crt
+$ sudo launchctl kickstart -k system/org.nixos.nix-daemon
+</screen>
+</section>
+
+</section>
+</chapter>
diff --git a/doc/manual/packages/s3-substituter.xml b/doc/manual/packages/s3-substituter.xml
new file mode 100644
index 000000000000..bcd91cfdbccd
--- /dev/null
+++ b/doc/manual/packages/s3-substituter.xml
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="utf-8"?>
+<section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="ssec-s3-substituter">
+
+<title>Serving a Nix store via AWS S3 or S3-compatible Service</title>
+
+<para>Nix has built-in support for storing and fetching store paths
+from Amazon S3 and S3 compatible services. This uses the same
+<emphasis>binary</emphasis> cache mechanism that Nix usually uses to
+fetch prebuilt binaries from <uri>cache.nixos.org</uri>.</para>
+
+<para>In this example we will use the bucket named
+<literal>example-bucket</literal>.</para>
+
+<section xml:id="ssec-s3-substituter-anonymous-reads">
+  <title>Anonymous Reads to your S3-compatible binary cache</title>
+
+  <para>If your binary cache is publicly accessible and does not
+  require authentication, the simplest and easiest way to use Nix with
+  your S3 compatible binary cache is to use the HTTP URL for that
+  cache.</para>
+
+  <para>For AWS S3 the binary cache URL for example bucket will be
+  exactly <uri>https://example-bucket.s3.amazonaws.com</uri>. For S3
+  compatible binary caches ago have to consult your software's
+  documentation.</para>
+
+  <para>Your bucket will need the following bucket policy:</para>
+
+  <programlisting>
+<![CDATA[
+{
+  "Id": "DirectReads",
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Sid": "AlowDirectReads",
+      "Action": [
+        "s3:GetObject"
+      ],
+      "Effect": "Allow",
+      "Resource": "arn:aws:s3:::example-bucket/*",
+      "Principal": "*"
+    }
+  ]
+}
+]]>
+</programlisting>
+</section>
+
+<section xml:id="ssec-s3-substituter-authenticated-reads">
+  <title>Authenticated Reads to your S3 binary cache</title>
+
+  <para>For AWS S3 the binary cache URL for example bucket will be
+  exactly <uri>s3://example-bucket</uri>.</para>
+
+  <para>Nix will use the <link
+  xlink:href="https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default.">default
+  credential provider chain</link> for authenticating requests to
+  Amazon S3.</para>
+
+  <para>Nix supports authenticated writes to S3 compatible binary
+  caches but only supports Authenticated reads from Amazon S3.
+  Additionally, the following limitations are in place for
+  authenticated reads:</para>
+
+  <itemizedlist>
+    <listitem><para>The bucket must actually be hosted by Amazon S3 and
+    <emphasis>not</emphasis> an S3 compatible
+    service.</para></listitem>
+
+    <listitem><para>The bucket must be within the
+    <literal>us-east-1</literal> region.</para></listitem>
+
+    <listitem><para>The Amazon credentials, if stored in a credential
+    profile, must be stored in the <literal>default</literal>
+    profile.</para></listitem>
+  </itemizedlist>
+
+  <para>Your bucket will need a bucket policy allowing the desired
+  users to perform the <literal>s3:GetObject</literal> action on all
+  objects in the bucket.</para>
+</section>
+
+
+<section xml:id="ssec-s3-substituter-authenticated-writes">
+  <title>Authenticated Writes to your S3-compatible binary cache</title>
+
+  <para>Nix support fully supports writing to Amazon S3 and S3
+  compatible buckets. The binary cache URL for our example bucket will
+  be <uri>s3://example-bucket</uri>.</para>
+
+  <para>Nix will use the <link
+  xlink:href="https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default.">default
+  credential provider chain</link> for authenticating requests to
+  Amazon S3.</para>
+
+  <para>The following options can be specified as URL parameters to
+  the S3 URL:</para>
+  <variablelist>
+    <varlistentry><term><literal>profile</literal></term>
+    <listitem>
+      <para>
+        The name of the AWS configuration profile to use. By default
+        Nix will use the <literal>default</literal> profile.
+      </para>
+    </listitem>
+    </varlistentry>
+
+    <varlistentry><term><literal>region</literal></term>
+    <listitem>
+      <para>
+        The region of the S3 bucket. <literal>us–east-1</literal> by
+        default.
+      </para>
+    </listitem>
+    </varlistentry>
+
+    <varlistentry><term><literal>endpoint</literal></term>
+    <listitem>
+      <para>
+        The URL to your S3-compatible service, for when not using
+        Amazon S3. Do not specify this value if you're using Amazon
+        S3.
+      </para>
+      <note><para>This endpoint must support HTTPS and will use
+      path-based addressing instead of virtual host based
+      addressing.</para></note>
+    </listitem>
+    </varlistentry>
+  </variablelist>
+
+  <example><title>Uploading with non-default credential profile for Amazon S3</title>
+    <para><command>nix copy --to ssh://machine nixpkgs.hello s3://example-bucket?profile=cache-upload</command></para>
+  </example>
+
+  <example><title>Uploading to an S3-Compatible Binary Cache</title>
+    <para><command>nix copy --to ssh://machine nixpkgs.hello s3://example-bucket?profile=cache-upload&amp;endpoint=minio.example.com</command></para>
+  </example>
+
+  <para>The user writing to the bucket will need to perform the
+  following actions against the bucket:</para>
+
+  <itemizedlist>
+    <listitem><para><literal>s3:ListBucket</literal></para></listitem>
+    <listitem><para><literal>s3:GetBucketLocation</literal></para></listitem>
+    <listitem><para><literal>s3:ListObjects</literal></para></listitem>
+    <listitem><para><literal>s3:GetObject</literal></para></listitem>
+    <listitem><para><literal>s3:PutObject</literal></para></listitem>
+    <listitem><para><literal>s3:ListBucketMultipartUploads</literal></para></listitem>
+    <listitem><para><literal>s3:CreateMultipartUpload</literal></para></listitem>
+    <listitem><para><literal>s3:ListMultipartUploadParts</literal></para></listitem>
+    <listitem><para><literal>s3:AbortMultipartUpload</literal></para></listitem>
+  </itemizedlist>
+</section>
+</section>
diff --git a/doc/manual/packages/sharing-packages.xml b/doc/manual/packages/sharing-packages.xml
index 8465c182ee72..bb6c52b8f8c1 100644
--- a/doc/manual/packages/sharing-packages.xml
+++ b/doc/manual/packages/sharing-packages.xml
@@ -15,5 +15,6 @@ packages between machines.</para>
 <xi:include href="binary-cache-substituter.xml" />
 <xi:include href="copy-closure.xml" />
 <xi:include href="ssh-substituter.xml" />
+<xi:include href="s3-substituter.xml" />
 
 </chapter>
diff --git a/doc/manual/release-notes/release-notes.xml b/doc/manual/release-notes/release-notes.xml
index b8392a647af9..ff4085cb792d 100644
--- a/doc/manual/release-notes/release-notes.xml
+++ b/doc/manual/release-notes/release-notes.xml
@@ -12,6 +12,7 @@
 </partintro>
 -->
 
+<xi:include href="rl-2.1.xml" />
 <xi:include href="rl-2.0.xml" />
 <xi:include href="rl-1.11.10.xml" />
 <xi:include href="rl-1.11.xml" />
diff --git a/doc/manual/release-notes/rl-2.1.xml b/doc/manual/release-notes/rl-2.1.xml
new file mode 100644
index 000000000000..9a5f37f6625d
--- /dev/null
+++ b/doc/manual/release-notes/rl-2.1.xml
@@ -0,0 +1,110 @@
+<section xmlns="http://docbook.org/ns/docbook"
+      xmlns:xlink="http://www.w3.org/1999/xlink"
+      xmlns:xi="http://www.w3.org/2001/XInclude"
+      version="5.0"
+      xml:id="ssec-relnotes-2.1">
+
+<title>Release 2.1 (2018-08-31)</title>
+
+<para>This is primarily a bug fix release. It also reduces memory
+consumption in certain situations. In addition, it has the following
+new features:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>New builtin functions:
+    <literal>builtins.bitAnd</literal>,
+    <literal>builtins.bitOr</literal>,
+    <literal>builtins.bitXor</literal>,
+    <literal>builtins.fromTOML</literal>,
+    <literal>builtins.concatMap</literal>,
+    <literal>builtins.mapAttrs</literal>.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>The S3 binary cache store now supports uploading NARs larger
+    than 5 GiB.</para>
+  </listitem>
+
+  <listitem>
+    <para>The S3 binary cache store now supports uploading to
+    S3-compatible services with the <literal>endpoint</literal>
+    option.</para>
+  </listitem>
+
+  <listitem>
+    <para>The flag <option>--fallback</option> is no longer required
+    to recover from disappeared NARs in binary caches.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-daemon</command> now respects
+    <option>--store</option>.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix run</command> now respects
+    <varname>nix-support/propagated-user-env-packages</varname>.</para>
+  </listitem>
+
+</itemizedlist>
+
+<para>This release has contributions from
+
+Adrien Devresse,
+Aleksandr Pashkov,
+Alexandre Esteves,
+Amine Chikhaoui,
+Andrew Dunham,
+Asad Saeeduddin,
+aszlig,
+Ben Challenor,
+Ben Gamari,
+Benjamin Hipple,
+Bogdan Seniuc,
+Corey O'Connor,
+Daiderd Jordan,
+Daniel Peebles,
+Daniel Poelzleithner,
+Danylo Hlynskyi,
+Dmitry Kalinkin,
+Domen Kožar,
+Doug Beardsley,
+Eelco Dolstra,
+Erik Arvstedt,
+Félix Baylac-Jacqué,
+Gleb Peregud,
+Graham Christensen,
+Guillaume Maudoux,
+Ivan Kozik,
+John Arnold,
+Justin Humm,
+Linus Heckemann,
+Lorenzo Manacorda,
+Matthew Justin Bauer,
+Matthew O'Gorman,
+Maximilian Bosch,
+Michael Bishop,
+Michael Fiano,
+Michael Mercier,
+Michael Raskin,
+Michael Weiss,
+Nicolas Dudebout,
+Peter Simons,
+Ryan Trinkle,
+Samuel Dionne-Riel,
+Sean Seefried,
+Shea Levy,
+Symphorien Gibol,
+Tim Engler,
+Tim Sears,
+Tuomas Tynkkynen,
+volth,
+Will Dietz,
+Yorick van Pelt and
+zimbatm.
+</para>
+
+</section>
diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl
index aa7633a709c1..8432c95960ca 100755
--- a/maintainers/upload-release.pl
+++ b/maintainers/upload-release.pl
@@ -76,15 +76,22 @@ sub downloadFile {
 
     write_file("$dstFile.sha256", $sha256_expected);
 
+    if (! -e "$dstFile.asc") {
+        system("gpg2 --detach-sign --armor $dstFile") == 0 or die "unable to sign $dstFile\n";
+    }
+
     return ($dstFile, $sha256_expected);
 }
 
 downloadFile("tarball", "2"); # .tar.bz2
 my ($tarball, $tarballHash) = downloadFile("tarball", "3"); # .tar.xz
-my ($tarball_i686_linux, $tarball_i686_linux_hash) = downloadFile("binaryTarball.i686-linux", "1");
-my ($tarball_x86_64_linux, $tarball_x86_64_linux_hash) = downloadFile("binaryTarball.x86_64-linux", "1");
-my ($tarball_aarch64_linux, $tarball_aarch64_linux_hash) = downloadFile("binaryTarball.aarch64-linux", "1");
-my ($tarball_x86_64_darwin, $tarball_x86_64_darwin_hash) = downloadFile("binaryTarball.x86_64-darwin", "1");
+downloadFile("binaryTarball.i686-linux", "1");
+downloadFile("binaryTarball.x86_64-linux", "1");
+downloadFile("binaryTarball.aarch64-linux", "1");
+downloadFile("binaryTarball.x86_64-darwin", "1");
+downloadFile("installerScript", "1");
+
+exit if $version =~ /pre/;
 
 # Update Nixpkgs in a very hacky way.
 system("cd $nixpkgsDir && git pull") == 0 or die;
@@ -144,17 +151,6 @@ system("cd $siteDir && git pull") == 0 or die;
 write_file("$siteDir/nix-release.tt",
            "[%-\n" .
            "latestNixVersion = \"$version\"\n" .
-           "nix_hash_i686_linux = \"$tarball_i686_linux_hash\"\n" .
-           "nix_hash_x86_64_linux = \"$tarball_x86_64_linux_hash\"\n" .
-           "nix_hash_aarch64_linux = \"$tarball_aarch64_linux_hash\"\n" .
-           "nix_hash_x86_64_darwin = \"$tarball_x86_64_darwin_hash\"\n" .
            "-%]\n");
 
-system("cd $siteDir && nix-shell --run 'make nix/install nix/install.sig'") == 0 or die;
-
-copy("$siteDir/nix/install", "$siteDir/nix/install-$version") or die;
-copy("$siteDir/nix/install.sig", "$siteDir/nix/install-$version.sig") or die;
-
-system("cd $siteDir && git add nix/install-$version nix/install-$version.sig") == 0 or die;
-
 system("cd $siteDir && git commit -a -m 'Nix $version released'") == 0 or die;
diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile
deleted file mode 100644
index 0f69d02df25f..000000000000
--- a/misc/docker/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-FROM alpine
-
-# Enable HTTPS support in wget.
-RUN apk add --update openssl
-
-# Download Nix and install it into the system.
-RUN wget https://nixos.org/releases/nix/nix-2.0.2/nix-2.0.2-x86_64-linux.tar.bz2 \
-  && echo "d0c2492d7d8f824e3b1ace15a1a58f64a0a8faacc59936ebedfe18905d982d7c  nix-2.0.2-x86_64-linux.tar.bz2" | sha256sum -c \
-  && tar xjf nix-*-x86_64-linux.tar.bz2 \
-  && addgroup -g 30000 -S nixbld \
-  && for i in $(seq 1 30); do adduser -S -D -h /var/empty -g "Nix build user $i" -u $((30000 + i)) -G nixbld nixbld$i ; done \
-  && mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \
-  && ln -s /nix/var/nix/profiles/default/etc/profile.d/nix.sh /etc/profile.d/ \
-  && rm -r /nix-*-x86_64-linux \
-  && rm -r /var/cache/apk/*
-
-ONBUILD ENV \
-    ENV=/etc/profile \
-    PATH=/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin \
-    GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \
-    NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
-
-ENV \
-    ENV=/etc/profile \
-    PATH=/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin \
-    GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \
-    NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \
-    NIX_PATH=/nix/var/nix/profiles/per-user/root/channels
diff --git a/misc/docker/README.md b/misc/docker/README.md
deleted file mode 100644
index 491be7408964..000000000000
--- a/misc/docker/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-To update https://hub.docker.com/r/nixos/nix/
-
-    $ docker build . -t nixos/nix:2.0
-    $ docker tag nixos/nix:2.0 nixos/nix:latest
-    $ docker push nixos/nix:latest
-    $ docker push nixos/nix:2.0
-
-Write access: @domenkozar
diff --git a/nix.spec.in b/nix.spec.in
index cd053dbfce5c..477768c6a68c 100644
--- a/nix.spec.in
+++ b/nix.spec.in
@@ -152,11 +152,11 @@ systemctl start  nix-daemon.socket
 %{_prefix}/lib/systemd/system/nix-daemon.service
 %endif
 %{_datadir}/nix
-%if ! %{without docgen}
-%{_mandir}/man1/*.1*
-%{_mandir}/man5/*.5*
-%{_mandir}/man8/*.8*
-%endif
+#%if ! %{without docgen}
+#%{_mandir}/man1/*.1*
+#%{_mandir}/man5/*.5*
+#%{_mandir}/man8/*.8*
+#%endif
 %config(noreplace) %{_sysconfdir}/profile.d/nix.sh
 %config(noreplace) %{_sysconfdir}/profile.d/nix-daemon.sh
 /nix
@@ -166,8 +166,8 @@ systemctl start  nix-daemon.socket
 %{_prefix}/lib/pkgconfig/*.pc
 
 
-%if ! %{without docgen}
-%files doc
-%docdir %{_defaultdocdir}/%{name}-doc-%{version}
-%{_defaultdocdir}/%{name}-doc-%{version}
-%endif
+#%if ! %{without docgen}
+#%files doc
+#%docdir %{_defaultdocdir}/%{name}-doc-%{version}
+#%{_defaultdocdir}/%{name}-doc-%{version}
+#%endif
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index bbfb2934315b..ce553bb53ebc 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -27,7 +27,7 @@ static ref<Store> store()
     static std::shared_ptr<Store> _store;
     if (!_store) {
         try {
-            settings.loadConfFile();
+            loadConfFile();
             settings.lockCPU = false;
             _store = openStore();
         } catch (Error & e) {
diff --git a/release-common.nix b/release-common.nix
index d7fb8125f25e..ace2a4f9b91f 100644
--- a/release-common.nix
+++ b/release-common.nix
@@ -30,7 +30,7 @@ rec {
   });
 
   configureFlags =
-    [ "--disable-init-state"
+    [
       "--enable-gc"
     ] ++ lib.optionals stdenv.isLinux [
       "--with-sandbox-shell=${sh}/bin/busybox"
@@ -57,13 +57,18 @@ rec {
       git
       mercurial
     ]
-    ++ lib.optional stdenv.isLinux libseccomp
+    ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal]
     ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
     ++ lib.optional (stdenv.isLinux || stdenv.isDarwin)
-      (aws-sdk-cpp.override {
+      ((aws-sdk-cpp.override {
         apis = ["s3" "transfer"];
         customMemoryManagement = false;
-      });
+      }).overrideDerivation (args: {
+        patches = args.patches or [] ++ [ (fetchpatch {
+          url = https://github.com/edolstra/aws-sdk-cpp/commit/3e07e1f1aae41b4c8b340735ff9e8c735f0c063f.patch;
+          sha256 = "1pij0v449p166f9l29x7ppzk8j7g9k9mp15ilh5qxp29c7fnvxy2";
+        }) ];
+      }));
 
   perlDeps =
     [ perl
diff --git a/release.nix b/release.nix
index 37deb8e7ee38..e359ebcb2ce3 100644
--- a/release.nix
+++ b/release.nix
@@ -67,6 +67,14 @@ let
 
         buildInputs = buildDeps;
 
+        preConfigure =
+          # Copy libboost_context so we don't get all of Boost in our closure.
+          # https://github.com/NixOS/nixpkgs/issues/45462
+          ''
+            mkdir -p $out/lib
+            cp ${boost}/lib/libboost_context* $out/lib
+          '';
+
         configureFlags = configureFlags ++
           [ "--sysconfdir=/etc" ];
 
@@ -74,8 +82,6 @@ let
 
         makeFlags = "profiledir=$(out)/etc/profile.d";
 
-        preBuild = "unset NIX_INDENT_MAKE";
-
         installFlags = "sysconfdir=$(out)/etc";
 
         doInstallCheck = true;
@@ -103,8 +109,6 @@ let
         enableParallelBuilding = true;
 
         postUnpack = "sourceRoot=$sourceRoot/perl";
-
-        preBuild = "unset NIX_INDENT_MAKE";
       });
 
 
@@ -189,10 +193,6 @@ let
 
         buildInputs = buildDeps;
 
-        configureFlags = ''
-          --disable-init-state
-        '';
-
         dontInstall = false;
 
         doInstallCheck = true;
@@ -206,14 +206,14 @@ let
       };
 
 
-    rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ];
+    #rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ];
 
 
     #deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ];
     #deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ];
 
-    deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ];
-    deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ];
+    #deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ];
+    #deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ];
 
 
     # System tests.
@@ -241,6 +241,7 @@ let
         { diskImage = vmTools.diskImages.ubuntu1204x86_64;
         }
         ''
+          set -x
           useradd -m alice
           su - alice -c 'tar xf ${binaryTarball.x86_64-linux}/*.tar.*'
           mkdir /dest-nix
@@ -249,6 +250,17 @@ let
           su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install'
           su - alice -c 'nix-store --verify'
           su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}'
+
+          # Check whether 'nix upgrade-nix' works.
+          cat > /tmp/paths.nix <<EOF
+          {
+            x86_64-linux = "${build.x86_64-linux}";
+          }
+          EOF
+          su - alice -c 'nix upgrade-nix -vvv --nix-store-paths-url file:///tmp/paths.nix'
+          (! [ -L /home/alice/.profile-1-link ])
+          su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}'
+
           mkdir -p $out/nix-support
           touch $out/nix-support/hydra-build-products
           umount /nix
@@ -275,6 +287,24 @@ let
         '';
 
 
+    installerScript =
+      pkgs.runCommand "installer-script"
+        { buildInputs = [ build.x86_64-linux ];
+        }
+        ''
+          mkdir -p $out/nix-support
+
+          substitute ${./scripts/install.in} $out/install \
+            ${pkgs.lib.concatMapStrings
+              (system: "--replace '@binaryTarball_${system}@' $(nix hash-file --type sha256 ${binaryTarball.${system}}/*.tar.bz2) ")
+              [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
+            } \
+            --replace '@nixVersion@' ${build.x86_64-linux.src.version}
+
+          echo "file installer $out/install" >> $out/nix-support/hydra-build-products
+        '';
+
+
     # Aggregate job containing the release-critical jobs.
     release = pkgs.releaseTools.aggregate {
       name = "nix-${tarball.version}";
@@ -284,14 +314,17 @@ let
           build.i686-linux
           build.x86_64-darwin
           build.x86_64-linux
+          build.aarch64-linux
           binaryTarball.i686-linux
           binaryTarball.x86_64-darwin
           binaryTarball.x86_64-linux
+          binaryTarball.aarch64-linux
           tests.remoteBuilds
           tests.nix-copy-closure
           tests.binaryTarball
           tests.evalNixpkgs
           tests.evalNixOS
+          installerScript
         ];
     };
 
@@ -311,7 +344,7 @@ let
       src = jobs.tarball;
       diskImage = (diskImageFun vmTools.diskImageFuns)
         { extraPackages =
-            [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" ]
+            [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" "bison" "flex" ]
             ++ extraPackages; };
       # At most 2047MB can be simulated in qemu-system-i386
       memSize = 2047;
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index 5f6542355e0c..6ee8dd48582e 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -727,11 +727,17 @@ setup_default_profile() {
     _sudo "to installing a bootstrapping Nix in to the default Profile" \
           HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
 
-    _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
-          HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
+    if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then
+        _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
+              HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
+        export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
+    fi
 
+    # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
+    # otherwise it will be lost in environments where sudo doesn't pass
+    # all the environment variables by default.
     _sudo "to update the default channel in the default profile" \
-          HOME="$ROOT_HOME" NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs
+          HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs
 }
 
 
diff --git a/scripts/install.in b/scripts/install.in
new file mode 100644
index 000000000000..26ab85ba0992
--- /dev/null
+++ b/scripts/install.in
@@ -0,0 +1,67 @@
+#!/bin/sh
+
+# This script installs the Nix package manager on your system by
+# downloading a binary distribution and running its installer script
+# (which in turn creates and populates /nix).
+
+{ # Prevent execution if this script was only partially downloaded
+oops() {
+    echo "$0:" "$@" >&2
+    exit 1
+}
+
+tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \
+          oops "Can\'t create temporary directory for downloading the Nix binary tarball")"
+cleanup() {
+    rm -rf "$tmpDir"
+}
+trap cleanup EXIT INT QUIT TERM
+
+require_util() {
+    type "$1" > /dev/null 2>&1 || which "$1" > /dev/null 2>&1 ||
+        oops "you do not have '$1' installed, which I need to $2"
+}
+
+case "$(uname -s).$(uname -m)" in
+    Linux.x86_64) system=x86_64-linux; hash=@binaryTarball_x86_64-linux@;;
+    Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;;
+    Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;;
+    Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;;
+    *) oops "sorry, there is no binary distribution of Nix for your platform";;
+esac
+
+url="https://nixos.org/releases/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.bz2"
+
+tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.bz2")"
+
+require_util curl "download the binary tarball"
+require_util bzcat "decompress the binary tarball"
+require_util tar "unpack the binary tarball"
+
+echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
+curl -L "$url" -o "$tarball" || oops "failed to download '$url'"
+
+if type sha256sum > /dev/null 2>&1; then
+    hash2="$(sha256sum -b "$tarball" | cut -c1-64)"
+elif type shasum > /dev/null 2>&1; then
+    hash2="$(shasum -a 256 -b "$tarball" | cut -c1-64)"
+elif type openssl > /dev/null 2>&1; then
+    hash2="$(openssl dgst -r -sha256 "$tarball" | cut -c1-64)"
+else
+    oops "cannot verify the SHA-256 hash of '$url'; you need one of 'shasum', 'sha256sum', or 'openssl'"
+fi
+
+if [ "$hash" != "$hash2" ]; then
+    oops "SHA-256 hash mismatch in '$url'; expected $hash, got $hash2"
+fi
+
+unpack=$tmpDir/unpack
+mkdir -p "$unpack"
+< "$tarball" bzcat | tar -xf - -C "$unpack" || oops "failed to unpack '$url'"
+
+script=$(echo "$unpack"/*/install)
+
+[ -e "$script" ] || oops "installation script is missing from the binary tarball!"
+"$script" "$@"
+
+} # End of wrapping
diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in
index 43c7156062de..1be9a0755d85 100644
--- a/scripts/nix-profile-daemon.sh.in
+++ b/scripts/nix-profile-daemon.sh.in
@@ -1,5 +1,5 @@
 # Only execute this file once per shell.
-if [ -n "$__ETC_PROFILE_NIX_SOURCED" ]; then return; fi
+if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi
 __ETC_PROFILE_NIX_SOURCED=1
 
 # Set up secure multi-user builds: non-root users build through the
@@ -49,6 +49,23 @@ if test -w $HOME; then
   fi
 fi
 
-export NIX_SSL_CERT_FILE="@localstatedir@/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
-export NIX_PATH="@localstatedir@/nix/profiles/per-user/root/channels"
+
+# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
+if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then
+    : # Allow users to override the NIX_SSL_CERT_FILE
+elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
+    export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
+elif [ -e /etc/ssl/ca-bundle.pem ]; then # openSUSE Tumbleweed
+    export NIX_SSL_CERT_FILE=/etc/ssl/ca-bundle.pem
+elif [ -e /etc/ssl/certs/ca-bundle.crt ]; then # Old NixOS
+    export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt
+elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
+    export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
+elif [ -e "$NIX_USER_PROFILE_DIR/etc/ssl/certs/ca-bundle.crt" ]; then # fall back to cacert in the user's Nix profile
+    export NIX_SSL_CERT_FILE=$NIX_USER_PROFILE_DIR/etc/ssl/certs/ca-bundle.crt
+elif [ -e "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt" ]; then # fall back to cacert in the default Nix profile
+    export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
+fi
+
+export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels"
 export PATH="$HOME/.nix-profile/bin:$HOME/.nix-profile/lib/kde4/libexec:@localstatedir@/nix/profiles/default/bin:@localstatedir@/nix/profiles/default:@localstatedir@/nix/profiles/default/lib/kde4/libexec:$PATH"
diff --git a/src/cpptoml/LICENSE b/src/cpptoml/LICENSE
new file mode 100644
index 000000000000..8802c4fa5a36
--- /dev/null
+++ b/src/cpptoml/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2014 Chase Geigle
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/cpptoml/cpptoml.h b/src/cpptoml/cpptoml.h
new file mode 100644
index 000000000000..07de010b1520
--- /dev/null
+++ b/src/cpptoml/cpptoml.h
@@ -0,0 +1,3457 @@
+/**
+ * @file cpptoml.h
+ * @author Chase Geigle
+ * @date May 2013
+ */
+
+#ifndef _CPPTOML_H_
+#define _CPPTOML_H_
+
+#include <algorithm>
+#include <cassert>
+#include <clocale>
+#include <cstdint>
+#include <cstring>
+#include <fstream>
+#include <iomanip>
+#include <map>
+#include <memory>
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#if __cplusplus > 201103L
+#define CPPTOML_DEPRECATED(reason) [[deprecated(reason)]]
+#elif defined(__clang__)
+#define CPPTOML_DEPRECATED(reason) __attribute__((deprecated(reason)))
+#elif defined(__GNUG__)
+#define CPPTOML_DEPRECATED(reason) __attribute__((deprecated))
+#elif defined(_MSC_VER)
+#if _MSC_VER < 1910
+#define CPPTOML_DEPRECATED(reason) __declspec(deprecated)
+#else
+#define CPPTOML_DEPRECATED(reason) [[deprecated(reason)]]
+#endif
+#endif
+
+namespace cpptoml
+{
+class writer; // forward declaration
+class base;   // forward declaration
+#if defined(CPPTOML_USE_MAP)
+// a std::map will ensure that entries a sorted, albeit at a slight
+// performance penalty relative to the (default) unordered_map
+using string_to_base_map = std::map<std::string, std::shared_ptr<base>>;
+#else
+// by default an unordered_map is used for best performance as the
+// toml specification does not require entries to be sorted
+using string_to_base_map
+    = std::unordered_map<std::string, std::shared_ptr<base>>;
+#endif
+
+// if defined, `base` will retain type information in form of an enum class
+// such that static_cast can be used instead of dynamic_cast
+// #define CPPTOML_NO_RTTI
+
+template <class T>
+class option
+{
+  public:
+    option() : empty_{true}
+    {
+        // nothing
+    }
+
+    option(T value) : empty_{false}, value_(std::move(value))
+    {
+        // nothing
+    }
+
+    explicit operator bool() const
+    {
+        return !empty_;
+    }
+
+    const T& operator*() const
+    {
+        return value_;
+    }
+
+    const T* operator->() const
+    {
+        return &value_;
+    }
+
+    const T& value_or(const T& alternative) const
+    {
+        if (!empty_)
+            return value_;
+        return alternative;
+    }
+
+  private:
+    bool empty_;
+    T value_;
+};
+
+struct local_date
+{
+    int year = 0;
+    int month = 0;
+    int day = 0;
+};
+
+struct local_time
+{
+    int hour = 0;
+    int minute = 0;
+    int second = 0;
+    int microsecond = 0;
+};
+
+struct zone_offset
+{
+    int hour_offset = 0;
+    int minute_offset = 0;
+};
+
+struct local_datetime : local_date, local_time
+{
+};
+
+struct offset_datetime : local_datetime, zone_offset
+{
+    static inline struct offset_datetime from_zoned(const struct tm& t)
+    {
+        offset_datetime dt;
+        dt.year = t.tm_year + 1900;
+        dt.month = t.tm_mon + 1;
+        dt.day = t.tm_mday;
+        dt.hour = t.tm_hour;
+        dt.minute = t.tm_min;
+        dt.second = t.tm_sec;
+
+        char buf[16];
+        strftime(buf, 16, "%z", &t);
+
+        int offset = std::stoi(buf);
+        dt.hour_offset = offset / 100;
+        dt.minute_offset = offset % 100;
+        return dt;
+    }
+
+    CPPTOML_DEPRECATED("from_local has been renamed to from_zoned")
+    static inline struct offset_datetime from_local(const struct tm& t)
+    {
+        return from_zoned(t);
+    }
+
+    static inline struct offset_datetime from_utc(const struct tm& t)
+    {
+        offset_datetime dt;
+        dt.year = t.tm_year + 1900;
+        dt.month = t.tm_mon + 1;
+        dt.day = t.tm_mday;
+        dt.hour = t.tm_hour;
+        dt.minute = t.tm_min;
+        dt.second = t.tm_sec;
+        return dt;
+    }
+};
+
+CPPTOML_DEPRECATED("datetime has been renamed to offset_datetime")
+typedef offset_datetime datetime;
+
+class fill_guard
+{
+  public:
+    fill_guard(std::ostream& os) : os_(os), fill_{os.fill()}
+    {
+        // nothing
+    }
+
+    ~fill_guard()
+    {
+        os_.fill(fill_);
+    }
+
+  private:
+    std::ostream& os_;
+    std::ostream::char_type fill_;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const local_date& dt)
+{
+    fill_guard g{os};
+    os.fill('0');
+
+    using std::setw;
+    os << setw(4) << dt.year << "-" << setw(2) << dt.month << "-" << setw(2)
+       << dt.day;
+
+    return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const local_time& ltime)
+{
+    fill_guard g{os};
+    os.fill('0');
+
+    using std::setw;
+    os << setw(2) << ltime.hour << ":" << setw(2) << ltime.minute << ":"
+       << setw(2) << ltime.second;
+
+    if (ltime.microsecond > 0)
+    {
+        os << ".";
+        int power = 100000;
+        for (int curr_us = ltime.microsecond; curr_us; power /= 10)
+        {
+            auto num = curr_us / power;
+            os << num;
+            curr_us -= num * power;
+        }
+    }
+
+    return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const zone_offset& zo)
+{
+    fill_guard g{os};
+    os.fill('0');
+
+    using std::setw;
+
+    if (zo.hour_offset != 0 || zo.minute_offset != 0)
+    {
+        if (zo.hour_offset > 0)
+        {
+            os << "+";
+        }
+        else
+        {
+            os << "-";
+        }
+        os << setw(2) << std::abs(zo.hour_offset) << ":" << setw(2)
+           << std::abs(zo.minute_offset);
+    }
+    else
+    {
+        os << "Z";
+    }
+
+    return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const local_datetime& dt)
+{
+    return os << static_cast<const local_date&>(dt) << "T"
+              << static_cast<const local_time&>(dt);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const offset_datetime& dt)
+{
+    return os << static_cast<const local_datetime&>(dt)
+              << static_cast<const zone_offset&>(dt);
+}
+
+template <class T, class... Ts>
+struct is_one_of;
+
+template <class T, class V>
+struct is_one_of<T, V> : std::is_same<T, V>
+{
+};
+
+template <class T, class V, class... Ts>
+struct is_one_of<T, V, Ts...>
+{
+    const static bool value
+        = std::is_same<T, V>::value || is_one_of<T, Ts...>::value;
+};
+
+template <class T>
+class value;
+
+template <class T>
+struct valid_value
+    : is_one_of<T, std::string, int64_t, double, bool, local_date, local_time,
+                local_datetime, offset_datetime>
+{
+};
+
+template <class T, class Enable = void>
+struct value_traits;
+
+template <class T>
+struct valid_value_or_string_convertible
+{
+
+    const static bool value = valid_value<typename std::decay<T>::type>::value
+                              || std::is_convertible<T, std::string>::value;
+};
+
+template <class T>
+struct value_traits<T, typename std::
+                           enable_if<valid_value_or_string_convertible<T>::
+                                         value>::type>
+{
+    using value_type = typename std::
+        conditional<valid_value<typename std::decay<T>::type>::value,
+                    typename std::decay<T>::type, std::string>::type;
+
+    using type = value<value_type>;
+
+    static value_type construct(T&& val)
+    {
+        return value_type(val);
+    }
+};
+
+template <class T>
+struct value_traits<T,
+                    typename std::
+                        enable_if<!valid_value_or_string_convertible<T>::value
+                                  && std::is_floating_point<
+                                         typename std::decay<T>::type>::value>::
+                            type>
+{
+    using value_type = typename std::decay<T>::type;
+
+    using type = value<double>;
+
+    static value_type construct(T&& val)
+    {
+        return value_type(val);
+    }
+};
+
+template <class T>
+struct value_traits<T,
+                    typename std::
+                        enable_if<!valid_value_or_string_convertible<T>::value
+                                  && std::is_signed<typename std::decay<T>::
+                                                        type>::value>::type>
+{
+    using value_type = int64_t;
+
+    using type = value<int64_t>;
+
+    static value_type construct(T&& val)
+    {
+        if (val < (std::numeric_limits<int64_t>::min)())
+            throw std::underflow_error{"constructed value cannot be "
+                                       "represented by a 64-bit signed "
+                                       "integer"};
+
+        if (val > (std::numeric_limits<int64_t>::max)())
+            throw std::overflow_error{"constructed value cannot be represented "
+                                      "by a 64-bit signed integer"};
+
+        return static_cast<int64_t>(val);
+    }
+};
+
+template <class T>
+struct value_traits<T,
+                    typename std::
+                        enable_if<!valid_value_or_string_convertible<T>::value
+                                  && std::is_unsigned<typename std::decay<T>::
+                                                          type>::value>::type>
+{
+    using value_type = int64_t;
+
+    using type = value<int64_t>;
+
+    static value_type construct(T&& val)
+    {
+        if (val > static_cast<uint64_t>((std::numeric_limits<int64_t>::max)()))
+            throw std::overflow_error{"constructed value cannot be represented "
+                                      "by a 64-bit signed integer"};
+
+        return static_cast<int64_t>(val);
+    }
+};
+
+class array;
+class table;
+class table_array;
+
+template <class T>
+struct array_of_trait
+{
+    using return_type = option<std::vector<T>>;
+};
+
+template <>
+struct array_of_trait<array>
+{
+    using return_type = option<std::vector<std::shared_ptr<array>>>;
+};
+
+template <class T>
+inline std::shared_ptr<typename value_traits<T>::type> make_value(T&& val);
+inline std::shared_ptr<array> make_array();
+template <class T>
+inline std::shared_ptr<T> make_element();
+inline std::shared_ptr<table> make_table();
+inline std::shared_ptr<table_array> make_table_array();
+
+#if defined(CPPTOML_NO_RTTI)
+/// Base type used to store underlying data type explicitly if RTTI is disabled
+enum class base_type
+{
+    NONE,
+    STRING,
+    LOCAL_TIME,
+    LOCAL_DATE,
+    LOCAL_DATETIME,
+    OFFSET_DATETIME,
+    INT,
+    FLOAT,
+    BOOL,
+    TABLE,
+    ARRAY,
+    TABLE_ARRAY
+};
+
+/// Type traits class to convert C++ types to enum member
+template <class T>
+struct base_type_traits;
+
+template <>
+struct base_type_traits<std::string>
+{
+    static const base_type type = base_type::STRING;
+};
+
+template <>
+struct base_type_traits<local_time>
+{
+    static const base_type type = base_type::LOCAL_TIME;
+};
+
+template <>
+struct base_type_traits<local_date>
+{
+    static const base_type type = base_type::LOCAL_DATE;
+};
+
+template <>
+struct base_type_traits<local_datetime>
+{
+    static const base_type type = base_type::LOCAL_DATETIME;
+};
+
+template <>
+struct base_type_traits<offset_datetime>
+{
+    static const base_type type = base_type::OFFSET_DATETIME;
+};
+
+template <>
+struct base_type_traits<int64_t>
+{
+    static const base_type type = base_type::INT;
+};
+
+template <>
+struct base_type_traits<double>
+{
+    static const base_type type = base_type::FLOAT;
+};
+
+template <>
+struct base_type_traits<bool>
+{
+    static const base_type type = base_type::BOOL;
+};
+
+template <>
+struct base_type_traits<table>
+{
+    static const base_type type = base_type::TABLE;
+};
+
+template <>
+struct base_type_traits<array>
+{
+    static const base_type type = base_type::ARRAY;
+};
+
+template <>
+struct base_type_traits<table_array>
+{
+    static const base_type type = base_type::TABLE_ARRAY;
+};
+#endif
+
+/**
+ * A generic base TOML value used for type erasure.
+ */
+class base : public std::enable_shared_from_this<base>
+{
+  public:
+    virtual ~base() = default;
+
+    virtual std::shared_ptr<base> clone() const = 0;
+
+    /**
+     * Determines if the given TOML element is a value.
+     */
+    virtual bool is_value() const
+    {
+        return false;
+    }
+
+    /**
+     * Determines if the given TOML element is a table.
+     */
+    virtual bool is_table() const
+    {
+        return false;
+    }
+
+    /**
+     * Converts the TOML element into a table.
+     */
+    std::shared_ptr<table> as_table()
+    {
+        if (is_table())
+            return std::static_pointer_cast<table>(shared_from_this());
+        return nullptr;
+    }
+    /**
+     * Determines if the TOML element is an array of "leaf" elements.
+     */
+    virtual bool is_array() const
+    {
+        return false;
+    }
+
+    /**
+     * Converts the TOML element to an array.
+     */
+    std::shared_ptr<array> as_array()
+    {
+        if (is_array())
+            return std::static_pointer_cast<array>(shared_from_this());
+        return nullptr;
+    }
+
+    /**
+     * Determines if the given TOML element is an array of tables.
+     */
+    virtual bool is_table_array() const
+    {
+        return false;
+    }
+
+    /**
+     * Converts the TOML element into a table array.
+     */
+    std::shared_ptr<table_array> as_table_array()
+    {
+        if (is_table_array())
+            return std::static_pointer_cast<table_array>(shared_from_this());
+        return nullptr;
+    }
+
+    /**
+     * Attempts to coerce the TOML element into a concrete TOML value
+     * of type T.
+     */
+    template <class T>
+    std::shared_ptr<value<T>> as();
+
+    template <class T>
+    std::shared_ptr<const value<T>> as() const;
+
+    template <class Visitor, class... Args>
+    void accept(Visitor&& visitor, Args&&... args) const;
+
+#if defined(CPPTOML_NO_RTTI)
+    base_type type() const
+    {
+      return type_;
+    }
+
+  protected:
+    base(const base_type t) : type_(t)
+    {
+        // nothing
+    }
+
+  private:
+    const base_type type_ = base_type::NONE;
+
+#else
+  protected:
+    base()
+    {
+        // nothing
+    }
+#endif
+};
+
+/**
+ * A concrete TOML value representing the "leaves" of the "tree".
+ */
+template <class T>
+class value : public base
+{
+    struct make_shared_enabler
+    {
+        // nothing; this is a private key accessible only to friends
+    };
+
+    template <class U>
+    friend std::shared_ptr<typename value_traits<U>::type>
+    cpptoml::make_value(U&& val);
+
+  public:
+    static_assert(valid_value<T>::value, "invalid value type");
+
+    std::shared_ptr<base> clone() const override;
+
+    value(const make_shared_enabler&, const T& val) : value(val)
+    {
+        // nothing; note that users cannot actually invoke this function
+        // because they lack access to the make_shared_enabler.
+    }
+
+    bool is_value() const override
+    {
+        return true;
+    }
+
+    /**
+     * Gets the data associated with this value.
+     */
+    T& get()
+    {
+        return data_;
+    }
+
+    /**
+     * Gets the data associated with this value. Const version.
+     */
+    const T& get() const
+    {
+        return data_;
+    }
+
+  private:
+    T data_;
+
+    /**
+     * Constructs a value from the given data.
+     */
+#if defined(CPPTOML_NO_RTTI)
+    value(const T& val) : base(base_type_traits<T>::type), data_(val)
+    {
+    }
+#else
+    value(const T& val) : data_(val)
+    {
+    }
+#endif
+
+    value(const value& val) = delete;
+    value& operator=(const value& val) = delete;
+};
+
+template <class T>
+std::shared_ptr<typename value_traits<T>::type> make_value(T&& val)
+{
+    using value_type = typename value_traits<T>::type;
+    using enabler = typename value_type::make_shared_enabler;
+    return std::make_shared<value_type>(
+        enabler{}, value_traits<T>::construct(std::forward<T>(val)));
+}
+
+template <class T>
+inline std::shared_ptr<value<T>> base::as()
+{
+#if defined(CPPTOML_NO_RTTI)
+    if (type() == base_type_traits<T>::type)
+        return std::static_pointer_cast<value<T>>(shared_from_this());
+    else
+        return nullptr;
+#else
+    return std::dynamic_pointer_cast<value<T>>(shared_from_this());
+#endif
+}
+
+// special case value<double> to allow getting an integer parameter as a
+// double value
+template <>
+inline std::shared_ptr<value<double>> base::as()
+{
+#if defined(CPPTOML_NO_RTTI)
+    if (type() == base_type::FLOAT)
+        return std::static_pointer_cast<value<double>>(shared_from_this());
+
+    if (type() == base_type::INT)
+    {
+        auto v = std::static_pointer_cast<value<int64_t>>(shared_from_this());
+        return make_value<double>(static_cast<double>(v->get()));;
+    }
+#else
+    if (auto v = std::dynamic_pointer_cast<value<double>>(shared_from_this()))
+        return v;
+
+    if (auto v = std::dynamic_pointer_cast<value<int64_t>>(shared_from_this()))
+        return make_value<double>(static_cast<double>(v->get()));
+#endif
+
+    return nullptr;
+}
+
+template <class T>
+inline std::shared_ptr<const value<T>> base::as() const
+{
+#if defined(CPPTOML_NO_RTTI)
+    if (type() == base_type_traits<T>::type)
+        return std::static_pointer_cast<const value<T>>(shared_from_this());
+    else
+        return nullptr;
+#else
+    return std::dynamic_pointer_cast<const value<T>>(shared_from_this());
+#endif
+}
+
+// special case value<double> to allow getting an integer parameter as a
+// double value
+template <>
+inline std::shared_ptr<const value<double>> base::as() const
+{
+#if defined(CPPTOML_NO_RTTI)
+    if (type() == base_type::FLOAT)
+        return std::static_pointer_cast<const value<double>>(shared_from_this());
+
+    if (type() == base_type::INT)
+    {
+        auto v = as<int64_t>();
+        // the below has to be a non-const value<double> due to a bug in
+        // libc++: https://llvm.org/bugs/show_bug.cgi?id=18843
+        return make_value<double>(static_cast<double>(v->get()));
+    }
+#else
+    if (auto v
+        = std::dynamic_pointer_cast<const value<double>>(shared_from_this()))
+        return v;
+
+    if (auto v = as<int64_t>())
+    {
+        // the below has to be a non-const value<double> due to a bug in
+        // libc++: https://llvm.org/bugs/show_bug.cgi?id=18843
+        return make_value<double>(static_cast<double>(v->get()));
+    }
+#endif
+
+    return nullptr;
+}
+
+/**
+ * Exception class for array insertion errors.
+ */
+class array_exception : public std::runtime_error
+{
+  public:
+    array_exception(const std::string& err) : std::runtime_error{err}
+    {
+    }
+};
+
+class array : public base
+{
+  public:
+    friend std::shared_ptr<array> make_array();
+
+    std::shared_ptr<base> clone() const override;
+
+    virtual bool is_array() const override
+    {
+        return true;
+    }
+
+    using size_type = std::size_t;
+
+    /**
+     * arrays can be iterated over
+     */
+    using iterator = std::vector<std::shared_ptr<base>>::iterator;
+
+    /**
+     * arrays can be iterated over.  Const version.
+     */
+    using const_iterator = std::vector<std::shared_ptr<base>>::const_iterator;
+
+    iterator begin()
+    {
+        return values_.begin();
+    }
+
+    const_iterator begin() const
+    {
+        return values_.begin();
+    }
+
+    iterator end()
+    {
+        return values_.end();
+    }
+
+    const_iterator end() const
+    {
+        return values_.end();
+    }
+
+    /**
+     * Obtains the array (vector) of base values.
+     */
+    std::vector<std::shared_ptr<base>>& get()
+    {
+        return values_;
+    }
+
+    /**
+     * Obtains the array (vector) of base values. Const version.
+     */
+    const std::vector<std::shared_ptr<base>>& get() const
+    {
+        return values_;
+    }
+
+    std::shared_ptr<base> at(size_t idx) const
+    {
+        return values_.at(idx);
+    }
+
+    /**
+     * Obtains an array of value<T>s. Note that elements may be
+     * nullptr if they cannot be converted to a value<T>.
+     */
+    template <class T>
+    std::vector<std::shared_ptr<value<T>>> array_of() const
+    {
+        std::vector<std::shared_ptr<value<T>>> result(values_.size());
+
+        std::transform(values_.begin(), values_.end(), result.begin(),
+                       [&](std::shared_ptr<base> v) { return v->as<T>(); });
+
+        return result;
+    }
+
+    /**
+     * Obtains a option<vector<T>>. The option will be empty if the array
+     * contains values that are not of type T.
+     */
+    template <class T>
+    inline typename array_of_trait<T>::return_type get_array_of() const
+    {
+        std::vector<T> result;
+        result.reserve(values_.size());
+
+        for (const auto& val : values_)
+        {
+            if (auto v = val->as<T>())
+                result.push_back(v->get());
+            else
+                return {};
+        }
+
+        return {std::move(result)};
+    }
+
+    /**
+     * Obtains an array of arrays. Note that elements may be nullptr
+     * if they cannot be converted to a array.
+     */
+    std::vector<std::shared_ptr<array>> nested_array() const
+    {
+        std::vector<std::shared_ptr<array>> result(values_.size());
+
+        std::transform(values_.begin(), values_.end(), result.begin(),
+                       [&](std::shared_ptr<base> v) -> std::shared_ptr<array> {
+                           if (v->is_array())
+                               return std::static_pointer_cast<array>(v);
+                           return std::shared_ptr<array>{};
+                       });
+
+        return result;
+    }
+
+    /**
+     * Add a value to the end of the array
+     */
+    template <class T>
+    void push_back(const std::shared_ptr<value<T>>& val)
+    {
+        if (values_.empty() || values_[0]->as<T>())
+        {
+            values_.push_back(val);
+        }
+        else
+        {
+            throw array_exception{"Arrays must be homogenous."};
+        }
+    }
+
+    /**
+     * Add an array to the end of the array
+     */
+    void push_back(const std::shared_ptr<array>& val)
+    {
+        if (values_.empty() || values_[0]->is_array())
+        {
+            values_.push_back(val);
+        }
+        else
+        {
+            throw array_exception{"Arrays must be homogenous."};
+        }
+    }
+
+    /**
+     * Convenience function for adding a simple element to the end
+     * of the array.
+     */
+    template <class T>
+    void push_back(T&& val, typename value_traits<T>::type* = 0)
+    {
+        push_back(make_value(std::forward<T>(val)));
+    }
+
+    /**
+     * Insert a value into the array
+     */
+    template <class T>
+    iterator insert(iterator position, const std::shared_ptr<value<T>>& value)
+    {
+        if (values_.empty() || values_[0]->as<T>())
+        {
+            return values_.insert(position, value);
+        }
+        else
+        {
+            throw array_exception{"Arrays must be homogenous."};
+        }
+    }
+
+    /**
+     * Insert an array into the array
+     */
+    iterator insert(iterator position, const std::shared_ptr<array>& value)
+    {
+        if (values_.empty() || values_[0]->is_array())
+        {
+            return values_.insert(position, value);
+        }
+        else
+        {
+            throw array_exception{"Arrays must be homogenous."};
+        }
+    }
+
+    /**
+     * Convenience function for inserting a simple element in the array
+     */
+    template <class T>
+    iterator insert(iterator position, T&& val,
+                    typename value_traits<T>::type* = 0)
+    {
+        return insert(position, make_value(std::forward<T>(val)));
+    }
+
+    /**
+     * Erase an element from the array
+     */
+    iterator erase(iterator position)
+    {
+        return values_.erase(position);
+    }
+
+    /**
+     * Clear the array
+     */
+    void clear()
+    {
+        values_.clear();
+    }
+
+    /**
+     * Reserve space for n values.
+     */
+    void reserve(size_type n)
+    {
+        values_.reserve(n);
+    }
+
+  private:
+#if defined(CPPTOML_NO_RTTI)
+    array() : base(base_type::ARRAY)
+    {
+        // empty
+    }
+#else
+    array() = default;
+#endif
+
+    template <class InputIterator>
+    array(InputIterator begin, InputIterator end) : values_{begin, end}
+    {
+        // nothing
+    }
+
+    array(const array& obj) = delete;
+    array& operator=(const array& obj) = delete;
+
+    std::vector<std::shared_ptr<base>> values_;
+};
+
+inline std::shared_ptr<array> make_array()
+{
+    struct make_shared_enabler : public array
+    {
+        make_shared_enabler()
+        {
+            // nothing
+        }
+    };
+
+    return std::make_shared<make_shared_enabler>();
+}
+
+template <>
+inline std::shared_ptr<array> make_element<array>()
+{
+    return make_array();
+}
+
+/**
+ * Obtains a option<vector<T>>. The option will be empty if the array
+ * contains values that are not of type T.
+ */
+template <>
+inline typename array_of_trait<array>::return_type
+array::get_array_of<array>() const
+{
+    std::vector<std::shared_ptr<array>> result;
+    result.reserve(values_.size());
+
+    for (const auto& val : values_)
+    {
+        if (auto v = val->as_array())
+            result.push_back(v);
+        else
+            return {};
+    }
+
+    return {std::move(result)};
+}
+
+class table;
+
+class table_array : public base
+{
+    friend class table;
+    friend std::shared_ptr<table_array> make_table_array();
+
+  public:
+    std::shared_ptr<base> clone() const override;
+
+    using size_type = std::size_t;
+
+    /**
+     * arrays can be iterated over
+     */
+    using iterator = std::vector<std::shared_ptr<table>>::iterator;
+
+    /**
+     * arrays can be iterated over.  Const version.
+     */
+    using const_iterator = std::vector<std::shared_ptr<table>>::const_iterator;
+
+    iterator begin()
+    {
+        return array_.begin();
+    }
+
+    const_iterator begin() const
+    {
+        return array_.begin();
+    }
+
+    iterator end()
+    {
+        return array_.end();
+    }
+
+    const_iterator end() const
+    {
+        return array_.end();
+    }
+
+    virtual bool is_table_array() const override
+    {
+        return true;
+    }
+
+    std::vector<std::shared_ptr<table>>& get()
+    {
+        return array_;
+    }
+
+    const std::vector<std::shared_ptr<table>>& get() const
+    {
+        return array_;
+    }
+
+    /**
+     * Add a table to the end of the array
+     */
+    void push_back(const std::shared_ptr<table>& val)
+    {
+        array_.push_back(val);
+    }
+
+    /**
+     * Insert a table into the array
+     */
+    iterator insert(iterator position, const std::shared_ptr<table>& value)
+    {
+        return array_.insert(position, value);
+    }
+
+    /**
+     * Erase an element from the array
+     */
+    iterator erase(iterator position)
+    {
+        return array_.erase(position);
+    }
+
+    /**
+     * Clear the array
+     */
+    void clear()
+    {
+        array_.clear();
+    }
+
+    /**
+     * Reserve space for n tables.
+     */
+    void reserve(size_type n)
+    {
+        array_.reserve(n);
+    }
+
+  private:
+#if defined(CPPTOML_NO_RTTI)
+    table_array() : base(base_type::TABLE_ARRAY)
+    {
+        // nothing
+    }
+#else
+    table_array()
+    {
+        // nothing
+    }
+#endif
+
+    table_array(const table_array& obj) = delete;
+    table_array& operator=(const table_array& rhs) = delete;
+
+    std::vector<std::shared_ptr<table>> array_;
+};
+
+inline std::shared_ptr<table_array> make_table_array()
+{
+    struct make_shared_enabler : public table_array
+    {
+        make_shared_enabler()
+        {
+            // nothing
+        }
+    };
+
+    return std::make_shared<make_shared_enabler>();
+}
+
+template <>
+inline std::shared_ptr<table_array> make_element<table_array>()
+{
+    return make_table_array();
+}
+
+// The below are overloads for fetching specific value types out of a value
+// where special casting behavior (like bounds checking) is desired
+
+template <class T>
+typename std::enable_if<!std::is_floating_point<T>::value
+                            && std::is_signed<T>::value,
+                        option<T>>::type
+get_impl(const std::shared_ptr<base>& elem)
+{
+    if (auto v = elem->as<int64_t>())
+    {
+        if (v->get() < (std::numeric_limits<T>::min)())
+            throw std::underflow_error{
+                "T cannot represent the value requested in get"};
+
+        if (v->get() > (std::numeric_limits<T>::max)())
+            throw std::overflow_error{
+                "T cannot represent the value requested in get"};
+
+        return {static_cast<T>(v->get())};
+    }
+    else
+    {
+        return {};
+    }
+}
+
+template <class T>
+typename std::enable_if<!std::is_same<T, bool>::value
+                            && std::is_unsigned<T>::value,
+                        option<T>>::type
+get_impl(const std::shared_ptr<base>& elem)
+{
+    if (auto v = elem->as<int64_t>())
+    {
+        if (v->get() < 0)
+            throw std::underflow_error{"T cannot store negative value in get"};
+
+        if (static_cast<uint64_t>(v->get()) > (std::numeric_limits<T>::max)())
+            throw std::overflow_error{
+                "T cannot represent the value requested in get"};
+
+        return {static_cast<T>(v->get())};
+    }
+    else
+    {
+        return {};
+    }
+}
+
+template <class T>
+typename std::enable_if<!std::is_integral<T>::value
+                            || std::is_same<T, bool>::value,
+                        option<T>>::type
+get_impl(const std::shared_ptr<base>& elem)
+{
+    if (auto v = elem->as<T>())
+    {
+        return {v->get()};
+    }
+    else
+    {
+        return {};
+    }
+}
+
+/**
+ * Represents a TOML keytable.
+ */
+class table : public base
+{
+  public:
+    friend class table_array;
+    friend std::shared_ptr<table> make_table();
+
+    std::shared_ptr<base> clone() const override;
+
+    /**
+     * tables can be iterated over.
+     */
+    using iterator = string_to_base_map::iterator;
+
+    /**
+     * tables can be iterated over. Const version.
+     */
+    using const_iterator = string_to_base_map::const_iterator;
+
+    iterator begin()
+    {
+        return map_.begin();
+    }
+
+    const_iterator begin() const
+    {
+        return map_.begin();
+    }
+
+    iterator end()
+    {
+        return map_.end();
+    }
+
+    const_iterator end() const
+    {
+        return map_.end();
+    }
+
+    bool is_table() const override
+    {
+        return true;
+    }
+
+    bool empty() const
+    {
+        return map_.empty();
+    }
+
+    /**
+     * Determines if this key table contains the given key.
+     */
+    bool contains(const std::string& key) const
+    {
+        return map_.find(key) != map_.end();
+    }
+
+    /**
+     * Determines if this key table contains the given key. Will
+     * resolve "qualified keys". Qualified keys are the full access
+     * path separated with dots like "grandparent.parent.child".
+     */
+    bool contains_qualified(const std::string& key) const
+    {
+        return resolve_qualified(key);
+    }
+
+    /**
+     * Obtains the base for a given key.
+     * @throw std::out_of_range if the key does not exist
+     */
+    std::shared_ptr<base> get(const std::string& key) const
+    {
+        return map_.at(key);
+    }
+
+    /**
+     * Obtains the base for a given key. Will resolve "qualified
+     * keys". Qualified keys are the full access path separated with
+     * dots like "grandparent.parent.child".
+     *
+     * @throw std::out_of_range if the key does not exist
+     */
+    std::shared_ptr<base> get_qualified(const std::string& key) const
+    {
+        std::shared_ptr<base> p;
+        resolve_qualified(key, &p);
+        return p;
+    }
+
+    /**
+     * Obtains a table for a given key, if possible.
+     */
+    std::shared_ptr<table> get_table(const std::string& key) const
+    {
+        if (contains(key) && get(key)->is_table())
+            return std::static_pointer_cast<table>(get(key));
+        return nullptr;
+    }
+
+    /**
+     * Obtains a table for a given key, if possible. Will resolve
+     * "qualified keys".
+     */
+    std::shared_ptr<table> get_table_qualified(const std::string& key) const
+    {
+        if (contains_qualified(key) && get_qualified(key)->is_table())
+            return std::static_pointer_cast<table>(get_qualified(key));
+        return nullptr;
+    }
+
+    /**
+     * Obtains an array for a given key.
+     */
+    std::shared_ptr<array> get_array(const std::string& key) const
+    {
+        if (!contains(key))
+            return nullptr;
+        return get(key)->as_array();
+    }
+
+    /**
+     * Obtains an array for a given key. Will resolve "qualified keys".
+     */
+    std::shared_ptr<array> get_array_qualified(const std::string& key) const
+    {
+        if (!contains_qualified(key))
+            return nullptr;
+        return get_qualified(key)->as_array();
+    }
+
+    /**
+     * Obtains a table_array for a given key, if possible.
+     */
+    std::shared_ptr<table_array> get_table_array(const std::string& key) const
+    {
+        if (!contains(key))
+            return nullptr;
+        return get(key)->as_table_array();
+    }
+
+    /**
+     * Obtains a table_array for a given key, if possible. Will resolve
+     * "qualified keys".
+     */
+    std::shared_ptr<table_array>
+    get_table_array_qualified(const std::string& key) const
+    {
+        if (!contains_qualified(key))
+            return nullptr;
+        return get_qualified(key)->as_table_array();
+    }
+
+    /**
+     * Helper function that attempts to get a value corresponding
+     * to the template parameter from a given key.
+     */
+    template <class T>
+    option<T> get_as(const std::string& key) const
+    {
+        try
+        {
+            return get_impl<T>(get(key));
+        }
+        catch (const std::out_of_range&)
+        {
+            return {};
+        }
+    }
+
+    /**
+     * Helper function that attempts to get a value corresponding
+     * to the template parameter from a given key. Will resolve "qualified
+     * keys".
+     */
+    template <class T>
+    option<T> get_qualified_as(const std::string& key) const
+    {
+        try
+        {
+            return get_impl<T>(get_qualified(key));
+        }
+        catch (const std::out_of_range&)
+        {
+            return {};
+        }
+    }
+
+    /**
+     * Helper function that attempts to get an array of values of a given
+     * type corresponding to the template parameter for a given key.
+     *
+     * If the key doesn't exist, doesn't exist as an array type, or one or
+     * more keys inside the array type are not of type T, an empty option
+     * is returned. Otherwise, an option containing a vector of the values
+     * is returned.
+     */
+    template <class T>
+    inline typename array_of_trait<T>::return_type
+    get_array_of(const std::string& key) const
+    {
+        if (auto v = get_array(key))
+        {
+            std::vector<T> result;
+            result.reserve(v->get().size());
+
+            for (const auto& b : v->get())
+            {
+                if (auto val = b->as<T>())
+                    result.push_back(val->get());
+                else
+                    return {};
+            }
+            return {std::move(result)};
+        }
+
+        return {};
+    }
+
+    /**
+     * Helper function that attempts to get an array of values of a given
+     * type corresponding to the template parameter for a given key. Will
+     * resolve "qualified keys".
+     *
+     * If the key doesn't exist, doesn't exist as an array type, or one or
+     * more keys inside the array type are not of type T, an empty option
+     * is returned. Otherwise, an option containing a vector of the values
+     * is returned.
+     */
+    template <class T>
+    inline typename array_of_trait<T>::return_type
+    get_qualified_array_of(const std::string& key) const
+    {
+        if (auto v = get_array_qualified(key))
+        {
+            std::vector<T> result;
+            result.reserve(v->get().size());
+
+            for (const auto& b : v->get())
+            {
+                if (auto val = b->as<T>())
+                    result.push_back(val->get());
+                else
+                    return {};
+            }
+            return {std::move(result)};
+        }
+
+        return {};
+    }
+
+    /**
+     * Adds an element to the keytable.
+     */
+    void insert(const std::string& key, const std::shared_ptr<base>& value)
+    {
+        map_[key] = value;
+    }
+
+    /**
+     * Convenience shorthand for adding a simple element to the
+     * keytable.
+     */
+    template <class T>
+    void insert(const std::string& key, T&& val,
+                typename value_traits<T>::type* = 0)
+    {
+        insert(key, make_value(std::forward<T>(val)));
+    }
+
+    /**
+     * Removes an element from the table.
+     */
+    void erase(const std::string& key)
+    {
+        map_.erase(key);
+    }
+
+  private:
+#if defined(CPPTOML_NO_RTTI)
+    table() : base(base_type::TABLE)
+    {
+        // nothing
+    }
+#else
+    table()
+    {
+        // nothing
+    }
+#endif
+
+    table(const table& obj) = delete;
+    table& operator=(const table& rhs) = delete;
+
+    std::vector<std::string> split(const std::string& value,
+                                   char separator) const
+    {
+        std::vector<std::string> result;
+        std::string::size_type p = 0;
+        std::string::size_type q;
+        while ((q = value.find(separator, p)) != std::string::npos)
+        {
+            result.emplace_back(value, p, q - p);
+            p = q + 1;
+        }
+        result.emplace_back(value, p);
+        return result;
+    }
+
+    // If output parameter p is specified, fill it with the pointer to the
+    // specified entry and throw std::out_of_range if it couldn't be found.
+    //
+    // Otherwise, just return true if the entry could be found or false
+    // otherwise and do not throw.
+    bool resolve_qualified(const std::string& key,
+                           std::shared_ptr<base>* p = nullptr) const
+    {
+        auto parts = split(key, '.');
+        auto last_key = parts.back();
+        parts.pop_back();
+
+        auto cur_table = this;
+        for (const auto& part : parts)
+        {
+            cur_table = cur_table->get_table(part).get();
+            if (!cur_table)
+            {
+                if (!p)
+                    return false;
+
+                throw std::out_of_range{key + " is not a valid key"};
+            }
+        }
+
+        if (!p)
+            return cur_table->map_.count(last_key) != 0;
+
+        *p = cur_table->map_.at(last_key);
+        return true;
+    }
+
+    string_to_base_map map_;
+};
+
+/**
+ * Helper function that attempts to get an array of arrays for a given
+ * key.
+ *
+ * If the key doesn't exist, doesn't exist as an array type, or one or
+ * more keys inside the array type are not of type T, an empty option
+ * is returned. Otherwise, an option containing a vector of the values
+ * is returned.
+ */
+template <>
+inline typename array_of_trait<array>::return_type
+table::get_array_of<array>(const std::string& key) const
+{
+    if (auto v = get_array(key))
+    {
+        std::vector<std::shared_ptr<array>> result;
+        result.reserve(v->get().size());
+
+        for (const auto& b : v->get())
+        {
+            if (auto val = b->as_array())
+                result.push_back(val);
+            else
+                return {};
+        }
+
+        return {std::move(result)};
+    }
+
+    return {};
+}
+
+/**
+ * Helper function that attempts to get an array of arrays for a given
+ * key. Will resolve "qualified keys".
+ *
+ * If the key doesn't exist, doesn't exist as an array type, or one or
+ * more keys inside the array type are not of type T, an empty option
+ * is returned. Otherwise, an option containing a vector of the values
+ * is returned.
+ */
+template <>
+inline typename array_of_trait<array>::return_type
+table::get_qualified_array_of<array>(const std::string& key) const
+{
+    if (auto v = get_array_qualified(key))
+    {
+        std::vector<std::shared_ptr<array>> result;
+        result.reserve(v->get().size());
+
+        for (const auto& b : v->get())
+        {
+            if (auto val = b->as_array())
+                result.push_back(val);
+            else
+                return {};
+        }
+
+        return {std::move(result)};
+    }
+
+    return {};
+}
+
+std::shared_ptr<table> make_table()
+{
+    struct make_shared_enabler : public table
+    {
+        make_shared_enabler()
+        {
+            // nothing
+        }
+    };
+
+    return std::make_shared<make_shared_enabler>();
+}
+
+template <>
+inline std::shared_ptr<table> make_element<table>()
+{
+    return make_table();
+}
+
+template <class T>
+std::shared_ptr<base> value<T>::clone() const
+{
+    return make_value(data_);
+}
+
+inline std::shared_ptr<base> array::clone() const
+{
+    auto result = make_array();
+    result->reserve(values_.size());
+    for (const auto& ptr : values_)
+        result->values_.push_back(ptr->clone());
+    return result;
+}
+
+inline std::shared_ptr<base> table_array::clone() const
+{
+    auto result = make_table_array();
+    result->reserve(array_.size());
+    for (const auto& ptr : array_)
+        result->array_.push_back(ptr->clone()->as_table());
+    return result;
+}
+
+inline std::shared_ptr<base> table::clone() const
+{
+    auto result = make_table();
+    for (const auto& pr : map_)
+        result->insert(pr.first, pr.second->clone());
+    return result;
+}
+
+/**
+ * Exception class for all TOML parsing errors.
+ */
+class parse_exception : public std::runtime_error
+{
+  public:
+    parse_exception(const std::string& err) : std::runtime_error{err}
+    {
+    }
+
+    parse_exception(const std::string& err, std::size_t line_number)
+        : std::runtime_error{err + " at line " + std::to_string(line_number)}
+    {
+    }
+};
+
+inline bool is_number(char c)
+{
+    return c >= '0' && c <= '9';
+}
+
+/**
+ * Helper object for consuming expected characters.
+ */
+template <class OnError>
+class consumer
+{
+  public:
+    consumer(std::string::iterator& it, const std::string::iterator& end,
+             OnError&& on_error)
+        : it_(it), end_(end), on_error_(std::forward<OnError>(on_error))
+    {
+        // nothing
+    }
+
+    void operator()(char c)
+    {
+        if (it_ == end_ || *it_ != c)
+            on_error_();
+        ++it_;
+    }
+
+    template <std::size_t N>
+    void operator()(const char (&str)[N])
+    {
+        std::for_each(std::begin(str), std::end(str) - 1,
+                      [&](char c) { (*this)(c); });
+    }
+
+    int eat_digits(int len)
+    {
+        int val = 0;
+        for (int i = 0; i < len; ++i)
+        {
+            if (!is_number(*it_) || it_ == end_)
+                on_error_();
+            val = 10 * val + (*it_++ - '0');
+        }
+        return val;
+    }
+
+    void error()
+    {
+        on_error_();
+    }
+
+  private:
+    std::string::iterator& it_;
+    const std::string::iterator& end_;
+    OnError on_error_;
+};
+
+template <class OnError>
+consumer<OnError> make_consumer(std::string::iterator& it,
+                                const std::string::iterator& end,
+                                OnError&& on_error)
+{
+    return consumer<OnError>(it, end, std::forward<OnError>(on_error));
+}
+
+// replacement for std::getline to handle incorrectly line-ended files
+// https://stackoverflow.com/questions/6089231/getting-std-ifstream-to-handle-lf-cr-and-crlf
+namespace detail
+{
+inline std::istream& getline(std::istream& input, std::string& line)
+{
+    line.clear();
+
+    std::istream::sentry sentry{input, true};
+    auto sb = input.rdbuf();
+
+    while (true)
+    {
+        auto c = sb->sbumpc();
+        if (c == '\r')
+        {
+            if (sb->sgetc() == '\n')
+                c = sb->sbumpc();
+        }
+
+        if (c == '\n')
+            return input;
+
+        if (c == std::istream::traits_type::eof())
+        {
+            if (line.empty())
+                input.setstate(std::ios::eofbit);
+            return input;
+        }
+
+        line.push_back(static_cast<char>(c));
+    }
+}
+}
+
+/**
+ * The parser class.
+ */
+class parser
+{
+  public:
+    /**
+     * Parsers are constructed from streams.
+     */
+    parser(std::istream& stream) : input_(stream)
+    {
+        // nothing
+    }
+
+    parser& operator=(const parser& parser) = delete;
+
+    /**
+     * Parses the stream this parser was created on until EOF.
+     * @throw parse_exception if there are errors in parsing
+     */
+    std::shared_ptr<table> parse()
+    {
+        std::shared_ptr<table> root = make_table();
+
+        table* curr_table = root.get();
+
+        while (detail::getline(input_, line_))
+        {
+            line_number_++;
+            auto it = line_.begin();
+            auto end = line_.end();
+            consume_whitespace(it, end);
+            if (it == end || *it == '#')
+                continue;
+            if (*it == '[')
+            {
+                curr_table = root.get();
+                parse_table(it, end, curr_table);
+            }
+            else
+            {
+                parse_key_value(it, end, curr_table);
+                consume_whitespace(it, end);
+                eol_or_comment(it, end);
+            }
+        }
+        return root;
+    }
+
+  private:
+#if defined _MSC_VER
+    __declspec(noreturn)
+#elif defined __GNUC__
+    __attribute__((noreturn))
+#endif
+        void throw_parse_exception(const std::string& err)
+    {
+        throw parse_exception{err, line_number_};
+    }
+
+    void parse_table(std::string::iterator& it,
+                     const std::string::iterator& end, table*& curr_table)
+    {
+        // remove the beginning keytable marker
+        ++it;
+        if (it == end)
+            throw_parse_exception("Unexpected end of table");
+        if (*it == '[')
+            parse_table_array(it, end, curr_table);
+        else
+            parse_single_table(it, end, curr_table);
+    }
+
+    void parse_single_table(std::string::iterator& it,
+                            const std::string::iterator& end,
+                            table*& curr_table)
+    {
+        if (it == end || *it == ']')
+            throw_parse_exception("Table name cannot be empty");
+
+        std::string full_table_name;
+        bool inserted = false;
+        while (it != end && *it != ']')
+        {
+            auto part = parse_key(it, end,
+                                  [](char c) { return c == '.' || c == ']'; });
+
+            if (part.empty())
+                throw_parse_exception("Empty component of table name");
+
+            if (!full_table_name.empty())
+                full_table_name += ".";
+            full_table_name += part;
+
+            if (curr_table->contains(part))
+            {
+                auto b = curr_table->get(part);
+                if (b->is_table())
+                    curr_table = static_cast<table*>(b.get());
+                else if (b->is_table_array())
+                    curr_table = std::static_pointer_cast<table_array>(b)
+                                     ->get()
+                                     .back()
+                                     .get();
+                else
+                    throw_parse_exception("Key " + full_table_name
+                                          + "already exists as a value");
+            }
+            else
+            {
+                inserted = true;
+                curr_table->insert(part, make_table());
+                curr_table = static_cast<table*>(curr_table->get(part).get());
+            }
+            consume_whitespace(it, end);
+            if (it != end && *it == '.')
+                ++it;
+            consume_whitespace(it, end);
+        }
+
+        if (it == end)
+            throw_parse_exception(
+                "Unterminated table declaration; did you forget a ']'?");
+
+        // table already existed
+        if (!inserted)
+        {
+            auto is_value
+                = [](const std::pair<const std::string&,
+                                     const std::shared_ptr<base>&>& p) {
+                      return p.second->is_value();
+                  };
+
+            // if there are any values, we can't add values to this table
+            // since it has already been defined. If there aren't any
+            // values, then it was implicitly created by something like
+            // [a.b]
+            if (curr_table->empty() || std::any_of(curr_table->begin(),
+                                                   curr_table->end(), is_value))
+            {
+                throw_parse_exception("Redefinition of table "
+                                      + full_table_name);
+            }
+        }
+
+        ++it;
+        consume_whitespace(it, end);
+        eol_or_comment(it, end);
+    }
+
+    void parse_table_array(std::string::iterator& it,
+                           const std::string::iterator& end, table*& curr_table)
+    {
+        ++it;
+        if (it == end || *it == ']')
+            throw_parse_exception("Table array name cannot be empty");
+
+        std::string full_ta_name;
+        while (it != end && *it != ']')
+        {
+            auto part = parse_key(it, end,
+                                  [](char c) { return c == '.' || c == ']'; });
+
+            if (part.empty())
+                throw_parse_exception("Empty component of table array name");
+
+            if (!full_ta_name.empty())
+                full_ta_name += ".";
+            full_ta_name += part;
+
+            consume_whitespace(it, end);
+            if (it != end && *it == '.')
+                ++it;
+            consume_whitespace(it, end);
+
+            if (curr_table->contains(part))
+            {
+                auto b = curr_table->get(part);
+
+                // if this is the end of the table array name, add an
+                // element to the table array that we just looked up
+                if (it != end && *it == ']')
+                {
+                    if (!b->is_table_array())
+                        throw_parse_exception("Key " + full_ta_name
+                                              + " is not a table array");
+                    auto v = b->as_table_array();
+                    v->get().push_back(make_table());
+                    curr_table = v->get().back().get();
+                }
+                // otherwise, just keep traversing down the key name
+                else
+                {
+                    if (b->is_table())
+                        curr_table = static_cast<table*>(b.get());
+                    else if (b->is_table_array())
+                        curr_table = std::static_pointer_cast<table_array>(b)
+                                         ->get()
+                                         .back()
+                                         .get();
+                    else
+                        throw_parse_exception("Key " + full_ta_name
+                                              + " already exists as a value");
+                }
+            }
+            else
+            {
+                // if this is the end of the table array name, add a new
+                // table array and a new table inside that array for us to
+                // add keys to next
+                if (it != end && *it == ']')
+                {
+                    curr_table->insert(part, make_table_array());
+                    auto arr = std::static_pointer_cast<table_array>(
+                        curr_table->get(part));
+                    arr->get().push_back(make_table());
+                    curr_table = arr->get().back().get();
+                }
+                // otherwise, create the implicitly defined table and move
+                // down to it
+                else
+                {
+                    curr_table->insert(part, make_table());
+                    curr_table
+                        = static_cast<table*>(curr_table->get(part).get());
+                }
+            }
+        }
+
+        // consume the last "]]"
+        if (it == end)
+            throw_parse_exception("Unterminated table array name");
+        ++it;
+        if (it == end)
+            throw_parse_exception("Unterminated table array name");
+        ++it;
+
+        consume_whitespace(it, end);
+        eol_or_comment(it, end);
+    }
+
+    void parse_key_value(std::string::iterator& it, std::string::iterator& end,
+                         table* curr_table)
+    {
+        auto key = parse_key(it, end, [](char c) { return c == '='; });
+        if (curr_table->contains(key))
+            throw_parse_exception("Key " + key + " already present");
+        if (it == end || *it != '=')
+            throw_parse_exception("Value must follow after a '='");
+        ++it;
+        consume_whitespace(it, end);
+        curr_table->insert(key, parse_value(it, end));
+        consume_whitespace(it, end);
+    }
+
+    template <class Function>
+    std::string parse_key(std::string::iterator& it,
+                          const std::string::iterator& end, Function&& fun)
+    {
+        consume_whitespace(it, end);
+        if (*it == '"')
+        {
+            return parse_quoted_key(it, end);
+        }
+        else
+        {
+            auto bke = std::find_if(it, end, std::forward<Function>(fun));
+            return parse_bare_key(it, bke);
+        }
+    }
+
+    std::string parse_bare_key(std::string::iterator& it,
+                               const std::string::iterator& end)
+    {
+        if (it == end)
+        {
+            throw_parse_exception("Bare key missing name");
+        }
+
+        auto key_end = end;
+        --key_end;
+        consume_backwards_whitespace(key_end, it);
+        ++key_end;
+        std::string key{it, key_end};
+
+        if (std::find(it, key_end, '#') != key_end)
+        {
+            throw_parse_exception("Bare key " + key + " cannot contain #");
+        }
+
+        if (std::find_if(it, key_end,
+                         [](char c) { return c == ' ' || c == '\t'; })
+            != key_end)
+        {
+            throw_parse_exception("Bare key " + key
+                                  + " cannot contain whitespace");
+        }
+
+        if (std::find_if(it, key_end,
+                         [](char c) { return c == '[' || c == ']'; })
+            != key_end)
+        {
+            throw_parse_exception("Bare key " + key
+                                  + " cannot contain '[' or ']'");
+        }
+
+        it = end;
+        return key;
+    }
+
+    std::string parse_quoted_key(std::string::iterator& it,
+                                 const std::string::iterator& end)
+    {
+        return string_literal(it, end, '"');
+    }
+
+    enum class parse_type
+    {
+        STRING = 1,
+        LOCAL_TIME,
+        LOCAL_DATE,
+        LOCAL_DATETIME,
+        OFFSET_DATETIME,
+        INT,
+        FLOAT,
+        BOOL,
+        ARRAY,
+        INLINE_TABLE
+    };
+
+    std::shared_ptr<base> parse_value(std::string::iterator& it,
+                                      std::string::iterator& end)
+    {
+        parse_type type = determine_value_type(it, end);
+        switch (type)
+        {
+            case parse_type::STRING:
+                return parse_string(it, end);
+            case parse_type::LOCAL_TIME:
+                return parse_time(it, end);
+            case parse_type::LOCAL_DATE:
+            case parse_type::LOCAL_DATETIME:
+            case parse_type::OFFSET_DATETIME:
+                return parse_date(it, end);
+            case parse_type::INT:
+            case parse_type::FLOAT:
+                return parse_number(it, end);
+            case parse_type::BOOL:
+                return parse_bool(it, end);
+            case parse_type::ARRAY:
+                return parse_array(it, end);
+            case parse_type::INLINE_TABLE:
+                return parse_inline_table(it, end);
+            default:
+                throw_parse_exception("Failed to parse value");
+        }
+    }
+
+    parse_type determine_value_type(const std::string::iterator& it,
+                                    const std::string::iterator& end)
+    {
+        if(it == end)
+        {
+            throw_parse_exception("Failed to parse value type");
+        }
+        if (*it == '"' || *it == '\'')
+        {
+            return parse_type::STRING;
+        }
+        else if (is_time(it, end))
+        {
+            return parse_type::LOCAL_TIME;
+        }
+        else if (auto dtype = date_type(it, end))
+        {
+            return *dtype;
+        }
+        else if (is_number(*it) || *it == '-' || *it == '+')
+        {
+            return determine_number_type(it, end);
+        }
+        else if (*it == 't' || *it == 'f')
+        {
+            return parse_type::BOOL;
+        }
+        else if (*it == '[')
+        {
+            return parse_type::ARRAY;
+        }
+        else if (*it == '{')
+        {
+            return parse_type::INLINE_TABLE;
+        }
+        throw_parse_exception("Failed to parse value type");
+    }
+
+    parse_type determine_number_type(const std::string::iterator& it,
+                                     const std::string::iterator& end)
+    {
+        // determine if we are an integer or a float
+        auto check_it = it;
+        if (*check_it == '-' || *check_it == '+')
+            ++check_it;
+        while (check_it != end && is_number(*check_it))
+            ++check_it;
+        if (check_it != end && *check_it == '.')
+        {
+            ++check_it;
+            while (check_it != end && is_number(*check_it))
+                ++check_it;
+            return parse_type::FLOAT;
+        }
+        else
+        {
+            return parse_type::INT;
+        }
+    }
+
+    std::shared_ptr<value<std::string>> parse_string(std::string::iterator& it,
+                                                     std::string::iterator& end)
+    {
+        auto delim = *it;
+        assert(delim == '"' || delim == '\'');
+
+        // end is non-const here because we have to be able to potentially
+        // parse multiple lines in a string, not just one
+        auto check_it = it;
+        ++check_it;
+        if (check_it != end && *check_it == delim)
+        {
+            ++check_it;
+            if (check_it != end && *check_it == delim)
+            {
+                it = ++check_it;
+                return parse_multiline_string(it, end, delim);
+            }
+        }
+        return make_value<std::string>(string_literal(it, end, delim));
+    }
+
+    std::shared_ptr<value<std::string>>
+    parse_multiline_string(std::string::iterator& it,
+                           std::string::iterator& end, char delim)
+    {
+        std::stringstream ss;
+
+        auto is_ws = [](char c) { return c == ' ' || c == '\t'; };
+
+        bool consuming = false;
+        std::shared_ptr<value<std::string>> ret;
+
+        auto handle_line
+            = [&](std::string::iterator& local_it,
+                  std::string::iterator& local_end) {
+                  if (consuming)
+                  {
+                      local_it = std::find_if_not(local_it, local_end, is_ws);
+
+                      // whole line is whitespace
+                      if (local_it == local_end)
+                          return;
+                  }
+
+                  consuming = false;
+
+                  while (local_it != local_end)
+                  {
+                      // handle escaped characters
+                      if (delim == '"' && *local_it == '\\')
+                      {
+                          auto check = local_it;
+                          // check if this is an actual escape sequence or a
+                          // whitespace escaping backslash
+                          ++check;
+                          consume_whitespace(check, local_end);
+                          if (check == local_end)
+                          {
+                              consuming = true;
+                              break;
+                          }
+
+                          ss << parse_escape_code(local_it, local_end);
+                          continue;
+                      }
+
+                      // if we can end the string
+                      if (std::distance(local_it, local_end) >= 3)
+                      {
+                          auto check = local_it;
+                          // check for """
+                          if (*check++ == delim && *check++ == delim
+                              && *check++ == delim)
+                          {
+                              local_it = check;
+                              ret = make_value<std::string>(ss.str());
+                              break;
+                          }
+                      }
+
+                      ss << *local_it++;
+                  }
+              };
+
+        // handle the remainder of the current line
+        handle_line(it, end);
+        if (ret)
+            return ret;
+
+        // start eating lines
+        while (detail::getline(input_, line_))
+        {
+            ++line_number_;
+
+            it = line_.begin();
+            end = line_.end();
+
+            handle_line(it, end);
+
+            if (ret)
+                return ret;
+
+            if (!consuming)
+                ss << std::endl;
+        }
+
+        throw_parse_exception("Unterminated multi-line basic string");
+    }
+
+    std::string string_literal(std::string::iterator& it,
+                               const std::string::iterator& end, char delim)
+    {
+        ++it;
+        std::string val;
+        while (it != end)
+        {
+            // handle escaped characters
+            if (delim == '"' && *it == '\\')
+            {
+                val += parse_escape_code(it, end);
+            }
+            else if (*it == delim)
+            {
+                ++it;
+                consume_whitespace(it, end);
+                return val;
+            }
+            else
+            {
+                val += *it++;
+            }
+        }
+        throw_parse_exception("Unterminated string literal");
+    }
+
+    std::string parse_escape_code(std::string::iterator& it,
+                                  const std::string::iterator& end)
+    {
+        ++it;
+        if (it == end)
+            throw_parse_exception("Invalid escape sequence");
+        char value;
+        if (*it == 'b')
+        {
+            value = '\b';
+        }
+        else if (*it == 't')
+        {
+            value = '\t';
+        }
+        else if (*it == 'n')
+        {
+            value = '\n';
+        }
+        else if (*it == 'f')
+        {
+            value = '\f';
+        }
+        else if (*it == 'r')
+        {
+            value = '\r';
+        }
+        else if (*it == '"')
+        {
+            value = '"';
+        }
+        else if (*it == '\\')
+        {
+            value = '\\';
+        }
+        else if (*it == 'u' || *it == 'U')
+        {
+            return parse_unicode(it, end);
+        }
+        else
+        {
+            throw_parse_exception("Invalid escape sequence");
+        }
+        ++it;
+        return std::string(1, value);
+    }
+
+    std::string parse_unicode(std::string::iterator& it,
+                              const std::string::iterator& end)
+    {
+        bool large = *it++ == 'U';
+        auto codepoint = parse_hex(it, end, large ? 0x10000000 : 0x1000);
+
+        if ((codepoint > 0xd7ff && codepoint < 0xe000) || codepoint > 0x10ffff)
+        {
+            throw_parse_exception(
+                "Unicode escape sequence is not a Unicode scalar value");
+        }
+
+        std::string result;
+        // See Table 3-6 of the Unicode standard
+        if (codepoint <= 0x7f)
+        {
+            // 1-byte codepoints: 00000000 0xxxxxxx
+            // repr: 0xxxxxxx
+            result += static_cast<char>(codepoint & 0x7f);
+        }
+        else if (codepoint <= 0x7ff)
+        {
+            // 2-byte codepoints: 00000yyy yyxxxxxx
+            // repr: 110yyyyy 10xxxxxx
+            //
+            // 0x1f = 00011111
+            // 0xc0 = 11000000
+            //
+            result += static_cast<char>(0xc0 | ((codepoint >> 6) & 0x1f));
+            //
+            // 0x80 = 10000000
+            // 0x3f = 00111111
+            //
+            result += static_cast<char>(0x80 | (codepoint & 0x3f));
+        }
+        else if (codepoint <= 0xffff)
+        {
+            // 3-byte codepoints: zzzzyyyy yyxxxxxx
+            // repr: 1110zzzz 10yyyyyy 10xxxxxx
+            //
+            // 0xe0 = 11100000
+            // 0x0f = 00001111
+            //
+            result += static_cast<char>(0xe0 | ((codepoint >> 12) & 0x0f));
+            result += static_cast<char>(0x80 | ((codepoint >> 6) & 0x1f));
+            result += static_cast<char>(0x80 | (codepoint & 0x3f));
+        }
+        else
+        {
+            // 4-byte codepoints: 000uuuuu zzzzyyyy yyxxxxxx
+            // repr: 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
+            //
+            // 0xf0 = 11110000
+            // 0x07 = 00000111
+            //
+            result += static_cast<char>(0xf0 | ((codepoint >> 18) & 0x07));
+            result += static_cast<char>(0x80 | ((codepoint >> 12) & 0x3f));
+            result += static_cast<char>(0x80 | ((codepoint >> 6) & 0x3f));
+            result += static_cast<char>(0x80 | (codepoint & 0x3f));
+        }
+        return result;
+    }
+
+    uint32_t parse_hex(std::string::iterator& it,
+                       const std::string::iterator& end, uint32_t place)
+    {
+        uint32_t value = 0;
+        while (place > 0)
+        {
+            if (it == end)
+                throw_parse_exception("Unexpected end of unicode sequence");
+
+            if (!is_hex(*it))
+                throw_parse_exception("Invalid unicode escape sequence");
+
+            value += place * hex_to_digit(*it++);
+            place /= 16;
+        }
+        return value;
+    }
+
+    bool is_hex(char c)
+    {
+        return is_number(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
+    }
+
+    uint32_t hex_to_digit(char c)
+    {
+        if (is_number(c))
+            return static_cast<uint32_t>(c - '0');
+        return 10 + static_cast<uint32_t>(
+                        c - ((c >= 'a' && c <= 'f') ? 'a' : 'A'));
+    }
+
+    std::shared_ptr<base> parse_number(std::string::iterator& it,
+                                       const std::string::iterator& end)
+    {
+        auto check_it = it;
+        auto check_end = find_end_of_number(it, end);
+
+        auto eat_sign = [&]() {
+            if (check_it != end && (*check_it == '-' || *check_it == '+'))
+                ++check_it;
+        };
+
+        eat_sign();
+
+        auto eat_numbers = [&]() {
+            auto beg = check_it;
+            while (check_it != end && is_number(*check_it))
+            {
+                ++check_it;
+                if (check_it != end && *check_it == '_')
+                {
+                    ++check_it;
+                    if (check_it == end || !is_number(*check_it))
+                        throw_parse_exception("Malformed number");
+                }
+            }
+
+            if (check_it == beg)
+                throw_parse_exception("Malformed number");
+        };
+
+        auto check_no_leading_zero = [&]() {
+            if (check_it != end && *check_it == '0' && check_it + 1 != check_end
+                && check_it[1] != '.')
+            {
+                throw_parse_exception("Numbers may not have leading zeros");
+            }
+        };
+
+        check_no_leading_zero();
+        eat_numbers();
+
+        if (check_it != end
+            && (*check_it == '.' || *check_it == 'e' || *check_it == 'E'))
+        {
+            bool is_exp = *check_it == 'e' || *check_it == 'E';
+
+            ++check_it;
+            if (check_it == end)
+                throw_parse_exception("Floats must have trailing digits");
+
+            auto eat_exp = [&]() {
+                eat_sign();
+                check_no_leading_zero();
+                eat_numbers();
+            };
+
+            if (is_exp)
+                eat_exp();
+            else
+                eat_numbers();
+
+            if (!is_exp && check_it != end
+                && (*check_it == 'e' || *check_it == 'E'))
+            {
+                ++check_it;
+                eat_exp();
+            }
+
+            return parse_float(it, check_it);
+        }
+        else
+        {
+            return parse_int(it, check_it);
+        }
+    }
+
+    std::shared_ptr<value<int64_t>> parse_int(std::string::iterator& it,
+                                              const std::string::iterator& end)
+    {
+        std::string v{it, end};
+        v.erase(std::remove(v.begin(), v.end(), '_'), v.end());
+        it = end;
+        try
+        {
+            return make_value<int64_t>(std::stoll(v));
+        }
+        catch (const std::invalid_argument& ex)
+        {
+            throw_parse_exception("Malformed number (invalid argument: "
+                                  + std::string{ex.what()} + ")");
+        }
+        catch (const std::out_of_range& ex)
+        {
+            throw_parse_exception("Malformed number (out of range: "
+                                  + std::string{ex.what()} + ")");
+        }
+    }
+
+    std::shared_ptr<value<double>> parse_float(std::string::iterator& it,
+                                               const std::string::iterator& end)
+    {
+        std::string v{it, end};
+        v.erase(std::remove(v.begin(), v.end(), '_'), v.end());
+        it = end;
+        char decimal_point = std::localeconv()->decimal_point[0];
+        std::replace(v.begin(), v.end(), '.', decimal_point);
+        try
+        {
+            return make_value<double>(std::stod(v));
+        }
+        catch (const std::invalid_argument& ex)
+        {
+            throw_parse_exception("Malformed number (invalid argument: "
+                                  + std::string{ex.what()} + ")");
+        }
+        catch (const std::out_of_range& ex)
+        {
+            throw_parse_exception("Malformed number (out of range: "
+                                  + std::string{ex.what()} + ")");
+        }
+    }
+
+    std::shared_ptr<value<bool>> parse_bool(std::string::iterator& it,
+                                            const std::string::iterator& end)
+    {
+        auto eat = make_consumer(it, end, [this]() {
+            throw_parse_exception("Attempted to parse invalid boolean value");
+        });
+
+        if (*it == 't')
+        {
+            eat("true");
+            return make_value<bool>(true);
+        }
+        else if (*it == 'f')
+        {
+            eat("false");
+            return make_value<bool>(false);
+        }
+
+        eat.error();
+        return nullptr;
+    }
+
+    std::string::iterator find_end_of_number(std::string::iterator it,
+                                             std::string::iterator end)
+    {
+        return std::find_if(it, end, [](char c) {
+            return !is_number(c) && c != '_' && c != '.' && c != 'e' && c != 'E'
+                   && c != '-' && c != '+';
+        });
+    }
+
+    std::string::iterator find_end_of_date(std::string::iterator it,
+                                           std::string::iterator end)
+    {
+        return std::find_if(it, end, [](char c) {
+            return !is_number(c) && c != 'T' && c != 'Z' && c != ':' && c != '-'
+                   && c != '+' && c != '.';
+        });
+    }
+
+    std::string::iterator find_end_of_time(std::string::iterator it,
+                                           std::string::iterator end)
+    {
+        return std::find_if(it, end, [](char c) {
+            return !is_number(c) && c != ':' && c != '.';
+        });
+    }
+
+    local_time read_time(std::string::iterator& it,
+                         const std::string::iterator& end)
+    {
+        auto time_end = find_end_of_time(it, end);
+
+        auto eat = make_consumer(
+            it, time_end, [&]() { throw_parse_exception("Malformed time"); });
+
+        local_time ltime;
+
+        ltime.hour = eat.eat_digits(2);
+        eat(':');
+        ltime.minute = eat.eat_digits(2);
+        eat(':');
+        ltime.second = eat.eat_digits(2);
+
+        int power = 100000;
+        if (it != time_end && *it == '.')
+        {
+            ++it;
+            while (it != time_end && is_number(*it))
+            {
+                ltime.microsecond += power * (*it++ - '0');
+                power /= 10;
+            }
+        }
+
+        if (it != time_end)
+            throw_parse_exception("Malformed time");
+
+        return ltime;
+    }
+
+    std::shared_ptr<value<local_time>>
+    parse_time(std::string::iterator& it, const std::string::iterator& end)
+    {
+        return make_value(read_time(it, end));
+    }
+
+    std::shared_ptr<base> parse_date(std::string::iterator& it,
+                                     const std::string::iterator& end)
+    {
+        auto date_end = find_end_of_date(it, end);
+
+        auto eat = make_consumer(
+            it, date_end, [&]() { throw_parse_exception("Malformed date"); });
+
+        local_date ldate;
+        ldate.year = eat.eat_digits(4);
+        eat('-');
+        ldate.month = eat.eat_digits(2);
+        eat('-');
+        ldate.day = eat.eat_digits(2);
+
+        if (it == date_end)
+            return make_value(ldate);
+
+        eat('T');
+
+        local_datetime ldt;
+        static_cast<local_date&>(ldt) = ldate;
+        static_cast<local_time&>(ldt) = read_time(it, date_end);
+
+        if (it == date_end)
+            return make_value(ldt);
+
+        offset_datetime dt;
+        static_cast<local_datetime&>(dt) = ldt;
+
+        int hoff = 0;
+        int moff = 0;
+        if (*it == '+' || *it == '-')
+        {
+            auto plus = *it == '+';
+            ++it;
+
+            hoff = eat.eat_digits(2);
+            dt.hour_offset = (plus) ? hoff : -hoff;
+            eat(':');
+            moff = eat.eat_digits(2);
+            dt.minute_offset = (plus) ? moff : -moff;
+        }
+        else if (*it == 'Z')
+        {
+            ++it;
+        }
+
+        if (it != date_end)
+            throw_parse_exception("Malformed date");
+
+        return make_value(dt);
+    }
+
+    std::shared_ptr<base> parse_array(std::string::iterator& it,
+                                      std::string::iterator& end)
+    {
+        // this gets ugly because of the "homogeneity" restriction:
+        // arrays can either be of only one type, or contain arrays
+        // (each of those arrays could be of different types, though)
+        //
+        // because of the latter portion, we don't really have a choice
+        // but to represent them as arrays of base values...
+        ++it;
+
+        // ugh---have to read the first value to determine array type...
+        skip_whitespace_and_comments(it, end);
+
+        // edge case---empty array
+        if (*it == ']')
+        {
+            ++it;
+            return make_array();
+        }
+
+        auto val_end = std::find_if(
+            it, end, [](char c) { return c == ',' || c == ']' || c == '#'; });
+        parse_type type = determine_value_type(it, val_end);
+        switch (type)
+        {
+            case parse_type::STRING:
+                return parse_value_array<std::string>(it, end);
+            case parse_type::LOCAL_TIME:
+                return parse_value_array<local_time>(it, end);
+            case parse_type::LOCAL_DATE:
+                return parse_value_array<local_date>(it, end);
+            case parse_type::LOCAL_DATETIME:
+                return parse_value_array<local_datetime>(it, end);
+            case parse_type::OFFSET_DATETIME:
+                return parse_value_array<offset_datetime>(it, end);
+            case parse_type::INT:
+                return parse_value_array<int64_t>(it, end);
+            case parse_type::FLOAT:
+                return parse_value_array<double>(it, end);
+            case parse_type::BOOL:
+                return parse_value_array<bool>(it, end);
+            case parse_type::ARRAY:
+                return parse_object_array<array>(&parser::parse_array, '[', it,
+                                                 end);
+            case parse_type::INLINE_TABLE:
+                return parse_object_array<table_array>(
+                    &parser::parse_inline_table, '{', it, end);
+            default:
+                throw_parse_exception("Unable to parse array");
+        }
+    }
+
+    template <class Value>
+    std::shared_ptr<array> parse_value_array(std::string::iterator& it,
+                                             std::string::iterator& end)
+    {
+        auto arr = make_array();
+        while (it != end && *it != ']')
+        {
+            auto value = parse_value(it, end);
+            if (auto v = value->as<Value>())
+                arr->get().push_back(value);
+            else
+                throw_parse_exception("Arrays must be homogeneous");
+            skip_whitespace_and_comments(it, end);
+            if (*it != ',')
+                break;
+            ++it;
+            skip_whitespace_and_comments(it, end);
+        }
+        if (it != end)
+            ++it;
+        return arr;
+    }
+
+    template <class Object, class Function>
+    std::shared_ptr<Object> parse_object_array(Function&& fun, char delim,
+                                               std::string::iterator& it,
+                                               std::string::iterator& end)
+    {
+        auto arr = make_element<Object>();
+
+        while (it != end && *it != ']')
+        {
+            if (*it != delim)
+                throw_parse_exception("Unexpected character in array");
+
+            arr->get().push_back(((*this).*fun)(it, end));
+            skip_whitespace_and_comments(it, end);
+
+            if (*it != ',')
+                break;
+
+            ++it;
+            skip_whitespace_and_comments(it, end);
+        }
+
+        if (it == end || *it != ']')
+            throw_parse_exception("Unterminated array");
+
+        ++it;
+        return arr;
+    }
+
+    std::shared_ptr<table> parse_inline_table(std::string::iterator& it,
+                                              std::string::iterator& end)
+    {
+        auto tbl = make_table();
+        do
+        {
+            ++it;
+            if (it == end)
+                throw_parse_exception("Unterminated inline table");
+
+            consume_whitespace(it, end);
+            parse_key_value(it, end, tbl.get());
+            consume_whitespace(it, end);
+        } while (*it == ',');
+
+        if (it == end || *it != '}')
+            throw_parse_exception("Unterminated inline table");
+
+        ++it;
+        consume_whitespace(it, end);
+
+        return tbl;
+    }
+
+    void skip_whitespace_and_comments(std::string::iterator& start,
+                                      std::string::iterator& end)
+    {
+        consume_whitespace(start, end);
+        while (start == end || *start == '#')
+        {
+            if (!detail::getline(input_, line_))
+                throw_parse_exception("Unclosed array");
+            line_number_++;
+            start = line_.begin();
+            end = line_.end();
+            consume_whitespace(start, end);
+        }
+    }
+
+    void consume_whitespace(std::string::iterator& it,
+                            const std::string::iterator& end)
+    {
+        while (it != end && (*it == ' ' || *it == '\t'))
+            ++it;
+    }
+
+    void consume_backwards_whitespace(std::string::iterator& back,
+                                      const std::string::iterator& front)
+    {
+        while (back != front && (*back == ' ' || *back == '\t'))
+            --back;
+    }
+
+    void eol_or_comment(const std::string::iterator& it,
+                        const std::string::iterator& end)
+    {
+        if (it != end && *it != '#')
+            throw_parse_exception("Unidentified trailing character '"
+                                  + std::string{*it}
+                                  + "'---did you forget a '#'?");
+    }
+
+    bool is_time(const std::string::iterator& it,
+                 const std::string::iterator& end)
+    {
+        auto time_end = find_end_of_time(it, end);
+        auto len = std::distance(it, time_end);
+
+        if (len < 8)
+            return false;
+
+        if (it[2] != ':' || it[5] != ':')
+            return false;
+
+        if (len > 8)
+            return it[8] == '.' && len > 9;
+
+        return true;
+    }
+
+    option<parse_type> date_type(const std::string::iterator& it,
+                                 const std::string::iterator& end)
+    {
+        auto date_end = find_end_of_date(it, end);
+        auto len = std::distance(it, date_end);
+
+        if (len < 10)
+            return {};
+
+        if (it[4] != '-' || it[7] != '-')
+            return {};
+
+        if (len >= 19 && it[10] == 'T' && is_time(it + 11, date_end))
+        {
+            // datetime type
+            auto time_end = find_end_of_time(it + 11, date_end);
+            if (time_end == date_end)
+                return {parse_type::LOCAL_DATETIME};
+            else
+                return {parse_type::OFFSET_DATETIME};
+        }
+        else if (len == 10)
+        {
+            // just a regular date
+            return {parse_type::LOCAL_DATE};
+        }
+
+        return {};
+    }
+
+    std::istream& input_;
+    std::string line_;
+    std::size_t line_number_ = 0;
+};
+
+/**
+ * Utility function to parse a file as a TOML file. Returns the root table.
+ * Throws a parse_exception if the file cannot be opened.
+ */
+inline std::shared_ptr<table> parse_file(const std::string& filename)
+{
+#if defined(BOOST_NOWIDE_FSTREAM_INCLUDED_HPP)
+    boost::nowide::ifstream file{filename.c_str()};
+#elif defined(NOWIDE_FSTREAM_INCLUDED_HPP)
+    nowide::ifstream file{filename.c_str()};
+#else
+    std::ifstream file{filename};
+#endif
+    if (!file.is_open())
+        throw parse_exception{filename + " could not be opened for parsing"};
+    parser p{file};
+    return p.parse();
+}
+
+template <class... Ts>
+struct value_accept;
+
+template <>
+struct value_accept<>
+{
+    template <class Visitor, class... Args>
+    static void accept(const base&, Visitor&&, Args&&...)
+    {
+        // nothing
+    }
+};
+
+template <class T, class... Ts>
+struct value_accept<T, Ts...>
+{
+    template <class Visitor, class... Args>
+    static void accept(const base& b, Visitor&& visitor, Args&&... args)
+    {
+        if (auto v = b.as<T>())
+        {
+            visitor.visit(*v, std::forward<Args>(args)...);
+        }
+        else
+        {
+            value_accept<Ts...>::accept(b, std::forward<Visitor>(visitor),
+                                        std::forward<Args>(args)...);
+        }
+    }
+};
+
+/**
+ * base implementation of accept() that calls visitor.visit() on the concrete
+ * class.
+ */
+template <class Visitor, class... Args>
+void base::accept(Visitor&& visitor, Args&&... args) const
+{
+    if (is_value())
+    {
+        using value_acceptor
+            = value_accept<std::string, int64_t, double, bool, local_date,
+                           local_time, local_datetime, offset_datetime>;
+        value_acceptor::accept(*this, std::forward<Visitor>(visitor),
+                               std::forward<Args>(args)...);
+    }
+    else if (is_table())
+    {
+        visitor.visit(static_cast<const table&>(*this),
+                      std::forward<Args>(args)...);
+    }
+    else if (is_array())
+    {
+        visitor.visit(static_cast<const array&>(*this),
+                      std::forward<Args>(args)...);
+    }
+    else if (is_table_array())
+    {
+        visitor.visit(static_cast<const table_array&>(*this),
+                      std::forward<Args>(args)...);
+    }
+}
+
+/**
+ * Writer that can be passed to accept() functions of cpptoml objects and
+ * will output valid TOML to a stream.
+ */
+class toml_writer
+{
+  public:
+    /**
+     * Construct a toml_writer that will write to the given stream
+     */
+    toml_writer(std::ostream& s, const std::string& indent_space = "\t")
+        : stream_(s), indent_(indent_space), has_naked_endline_(false)
+    {
+        // nothing
+    }
+
+  public:
+    /**
+     * Output a base value of the TOML tree.
+     */
+    template <class T>
+    void visit(const value<T>& v, bool = false)
+    {
+        write(v);
+    }
+
+    /**
+     * Output a table element of the TOML tree
+     */
+    void visit(const table& t, bool in_array = false)
+    {
+        write_table_header(in_array);
+        std::vector<std::string> values;
+        std::vector<std::string> tables;
+
+        for (const auto& i : t)
+        {
+            if (i.second->is_table() || i.second->is_table_array())
+            {
+                tables.push_back(i.first);
+            }
+            else
+            {
+                values.push_back(i.first);
+            }
+        }
+
+        for (unsigned int i = 0; i < values.size(); ++i)
+        {
+            path_.push_back(values[i]);
+
+            if (i > 0)
+                endline();
+
+            write_table_item_header(*t.get(values[i]));
+            t.get(values[i])->accept(*this, false);
+            path_.pop_back();
+        }
+
+        for (unsigned int i = 0; i < tables.size(); ++i)
+        {
+            path_.push_back(tables[i]);
+
+            if (values.size() > 0 || i > 0)
+                endline();
+
+            write_table_item_header(*t.get(tables[i]));
+            t.get(tables[i])->accept(*this, false);
+            path_.pop_back();
+        }
+
+        endline();
+    }
+
+    /**
+     * Output an array element of the TOML tree
+     */
+    void visit(const array& a, bool = false)
+    {
+        write("[");
+
+        for (unsigned int i = 0; i < a.get().size(); ++i)
+        {
+            if (i > 0)
+                write(", ");
+
+            if (a.get()[i]->is_array())
+            {
+                a.get()[i]->as_array()->accept(*this, true);
+            }
+            else
+            {
+                a.get()[i]->accept(*this, true);
+            }
+        }
+
+        write("]");
+    }
+
+    /**
+     * Output a table_array element of the TOML tree
+     */
+    void visit(const table_array& t, bool = false)
+    {
+        for (unsigned int j = 0; j < t.get().size(); ++j)
+        {
+            if (j > 0)
+                endline();
+
+            t.get()[j]->accept(*this, true);
+        }
+
+        endline();
+    }
+
+    /**
+     * Escape a string for output.
+     */
+    static std::string escape_string(const std::string& str)
+    {
+        std::string res;
+        for (auto it = str.begin(); it != str.end(); ++it)
+        {
+            if (*it == '\b')
+            {
+                res += "\\b";
+            }
+            else if (*it == '\t')
+            {
+                res += "\\t";
+            }
+            else if (*it == '\n')
+            {
+                res += "\\n";
+            }
+            else if (*it == '\f')
+            {
+                res += "\\f";
+            }
+            else if (*it == '\r')
+            {
+                res += "\\r";
+            }
+            else if (*it == '"')
+            {
+                res += "\\\"";
+            }
+            else if (*it == '\\')
+            {
+                res += "\\\\";
+            }
+            else if ((const uint32_t)*it <= 0x001f)
+            {
+                res += "\\u";
+                std::stringstream ss;
+                ss << std::hex << static_cast<uint32_t>(*it);
+                res += ss.str();
+            }
+            else
+            {
+                res += *it;
+            }
+        }
+        return res;
+    }
+
+  protected:
+    /**
+     * Write out a string.
+     */
+    void write(const value<std::string>& v)
+    {
+        write("\"");
+        write(escape_string(v.get()));
+        write("\"");
+    }
+
+    /**
+     * Write out a double.
+     */
+    void write(const value<double>& v)
+    {
+        std::ios::fmtflags flags{stream_.flags()};
+
+        stream_ << std::showpoint;
+        write(v.get());
+
+        stream_.flags(flags);
+    }
+
+    /**
+     * Write out an integer, local_date, local_time, local_datetime, or
+     * offset_datetime.
+     */
+    template <class T>
+    typename std::enable_if<is_one_of<T, int64_t, local_date, local_time,
+                                      local_datetime,
+                                      offset_datetime>::value>::type
+    write(const value<T>& v)
+    {
+        write(v.get());
+    }
+
+    /**
+     * Write out a boolean.
+     */
+    void write(const value<bool>& v)
+    {
+        write((v.get() ? "true" : "false"));
+    }
+
+    /**
+     * Write out the header of a table.
+     */
+    void write_table_header(bool in_array = false)
+    {
+        if (!path_.empty())
+        {
+            indent();
+
+            write("[");
+
+            if (in_array)
+            {
+                write("[");
+            }
+
+            for (unsigned int i = 0; i < path_.size(); ++i)
+            {
+                if (i > 0)
+                {
+                    write(".");
+                }
+
+                if (path_[i].find_first_not_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
+                                               "fghijklmnopqrstuvwxyz0123456789"
+                                               "_-")
+                    == std::string::npos)
+                {
+                    write(path_[i]);
+                }
+                else
+                {
+                    write("\"");
+                    write(escape_string(path_[i]));
+                    write("\"");
+                }
+            }
+
+            if (in_array)
+            {
+                write("]");
+            }
+
+            write("]");
+            endline();
+        }
+    }
+
+    /**
+     * Write out the identifier for an item in a table.
+     */
+    void write_table_item_header(const base& b)
+    {
+        if (!b.is_table() && !b.is_table_array())
+        {
+            indent();
+
+            if (path_.back().find_first_not_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
+                                               "fghijklmnopqrstuvwxyz0123456789"
+                                               "_-")
+                == std::string::npos)
+            {
+                write(path_.back());
+            }
+            else
+            {
+                write("\"");
+                write(escape_string(path_.back()));
+                write("\"");
+            }
+
+            write(" = ");
+        }
+    }
+
+  private:
+    /**
+     * Indent the proper number of tabs given the size of
+     * the path.
+     */
+    void indent()
+    {
+        for (std::size_t i = 1; i < path_.size(); ++i)
+            write(indent_);
+    }
+
+    /**
+     * Write a value out to the stream.
+     */
+    template <class T>
+    void write(const T& v)
+    {
+        stream_ << v;
+        has_naked_endline_ = false;
+    }
+
+    /**
+     * Write an endline out to the stream
+     */
+    void endline()
+    {
+        if (!has_naked_endline_)
+        {
+            stream_ << "\n";
+            has_naked_endline_ = true;
+        }
+    }
+
+  private:
+    std::ostream& stream_;
+    const std::string indent_;
+    std::vector<std::string> path_;
+    bool has_naked_endline_;
+};
+
+inline std::ostream& operator<<(std::ostream& stream, const base& b)
+{
+    toml_writer writer{stream};
+    b.accept(writer);
+    return stream;
+}
+
+template <class T>
+std::ostream& operator<<(std::ostream& stream, const value<T>& v)
+{
+    toml_writer writer{stream};
+    v.accept(writer);
+    return stream;
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const table& t)
+{
+    toml_writer writer{stream};
+    t.accept(writer);
+    return stream;
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const table_array& t)
+{
+    toml_writer writer{stream};
+    t.accept(writer);
+    return stream;
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const array& a)
+{
+    toml_writer writer{stream};
+    a.accept(writer);
+    return stream;
+}
+}
+#endif
diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc
index 0474865c6d7d..0785897d2513 100644
--- a/src/libexpr/attr-set.cc
+++ b/src/libexpr/attr-set.cc
@@ -1,5 +1,5 @@
 #include "attr-set.hh"
-#include "eval.hh"
+#include "eval-inline.hh"
 
 #include <algorithm>
 
@@ -7,20 +7,6 @@
 namespace nix {
 
 
-/* Note: Various places expect the allocated memory to be zeroed. */
-static void * allocBytes(size_t n)
-{
-    void * p;
-#if HAVE_BOEHMGC
-    p = GC_malloc(n);
-#else
-    p = calloc(n, 1);
-#endif
-    if (!p) throw std::bad_alloc();
-    return p;
-}
-
-
 /* Allocate a new array of attributes for an attribute set with a specific
    capacity. The space is implicitly reserved after the Bindings
    structure. */
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index 8cc50e561354..c27116e3b448 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -78,4 +78,18 @@ inline void EvalState::forceList(Value & v, const Pos & pos)
         throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
 }
 
+/* Note: Various places expect the allocated memory to be zeroed. */
+inline void * allocBytes(size_t n)
+{
+    void * p;
+#if HAVE_BOEHMGC
+    p = GC_MALLOC(n);
+#else
+    p = calloc(n, 1);
+#endif
+    if (!p) throw std::bad_alloc();
+    return p;
+}
+
+
 }
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 353097f89713..f41905787f9e 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -13,17 +13,14 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 
+#include <sys/time.h>
+#include <sys/resource.h>
+
 #if HAVE_BOEHMGC
 
 #include <gc/gc.h>
 #include <gc/gc_cpp.h>
 
-#define NEW new (UseGC)
-
-#else
-
-#define NEW new
-
 #endif
 
 
@@ -34,7 +31,7 @@ static char * dupString(const char * s)
 {
     char * t;
 #if HAVE_BOEHMGC
-    t = GC_strdup(s);
+    t = GC_STRDUP(s);
 #else
     t = strdup(s);
 #endif
@@ -43,20 +40,6 @@ static char * dupString(const char * s)
 }
 
 
-/* Note: Various places expect the allocated memory to be zeroed. */
-static void * allocBytes(size_t n)
-{
-    void * p;
-#if HAVE_BOEHMGC
-    p = GC_malloc(n);
-#else
-    p = calloc(n, 1);
-#endif
-    if (!p) throw std::bad_alloc();
-    return p;
-}
-
-
 static void printValue(std::ostream & str, std::set<const Value *> & active, const Value & v)
 {
     checkInterrupt();
@@ -199,8 +182,15 @@ void initGC()
 
 #if HAVE_BOEHMGC
     /* Initialise the Boehm garbage collector. */
+
+    /* Don't look for interior pointers. This reduces the odds of
+       misdetection a bit. */
     GC_set_all_interior_pointers(0);
 
+    /* We don't have any roots in data segments, so don't scan from
+       there. */
+    GC_set_no_dls(1);
+
     GC_INIT();
 
     GC_set_oom_fn(oomHandler);
@@ -307,15 +297,17 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
 
     assert(gcInitialised);
 
+    static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes");
+
     /* Initialise the Nix expression search path. */
-    if (!settings.pureEval) {
+    if (!evalSettings.pureEval) {
         Strings paths = parseNixPath(getEnv("NIX_PATH", ""));
         for (auto & i : _searchPath) addToSearchPath(i);
         for (auto & i : paths) addToSearchPath(i);
     }
     addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true));
 
-    if (settings.restrictEval || settings.pureEval) {
+    if (evalSettings.restrictEval || evalSettings.pureEval) {
         allowedPaths = PathSet();
 
         for (auto & i : searchPath) {
@@ -344,7 +336,6 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
 
 EvalState::~EvalState()
 {
-    fileEvalCache.clear();
 }
 
 
@@ -352,25 +343,37 @@ Path EvalState::checkSourcePath(const Path & path_)
 {
     if (!allowedPaths) return path_;
 
+    auto i = resolvedPaths.find(path_);
+    if (i != resolvedPaths.end())
+        return i->second;
+
     bool found = false;
 
+    /* First canonicalize the path without symlinks, so we make sure an
+     * attacker can't append ../../... to a path that would be in allowedPaths
+     * and thus leak symlink targets.
+     */
+    Path abspath = canonPath(path_);
+
     for (auto & i : *allowedPaths) {
-        if (isDirOrInDir(path_, i)) {
+        if (isDirOrInDir(abspath, i)) {
             found = true;
             break;
         }
     }
 
     if (!found)
-        throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path_);
+        throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", abspath);
 
     /* Resolve symlinks. */
-    debug(format("checking access to '%s'") % path_);
-    Path path = canonPath(path_, true);
+    debug(format("checking access to '%s'") % abspath);
+    Path path = canonPath(abspath, true);
 
     for (auto & i : *allowedPaths) {
-        if (isDirOrInDir(path, i))
+        if (isDirOrInDir(path, i)) {
+            resolvedPaths[path_] = path;
             return path;
+        }
     }
 
     throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path);
@@ -379,13 +382,13 @@ Path EvalState::checkSourcePath(const Path & path_)
 
 void EvalState::checkURI(const std::string & uri)
 {
-    if (!settings.restrictEval) return;
+    if (!evalSettings.restrictEval) return;
 
     /* 'uri' should be equal to a prefix, or in a subdirectory of a
        prefix. Thus, the prefix https://github.co does not permit
        access to https://github.com. Note: this allows 'http://' and
        'https://' as prefixes for any http/https URI. */
-    for (auto & prefix : settings.allowedUris.get())
+    for (auto & prefix : evalSettings.allowedUris.get())
         if (uri == prefix ||
             (uri.size() > prefix.size()
             && prefix.size() > 0
@@ -443,7 +446,7 @@ Value * EvalState::addPrimOp(const string & name,
     string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
     Symbol sym = symbols.create(name2);
     v->type = tPrimOp;
-    v->primOp = NEW PrimOp(primOp, arity, sym);
+    v->primOp = new PrimOp(primOp, arity, sym);
     staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
     baseEnv.values[baseEnvDispl++] = v;
     baseEnv.values[0]->attrs->push_back(Attr(sym, v));
@@ -562,12 +565,12 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
     if (!var.fromWith) return env->values[var.displ];
 
     while (1) {
-        if (!env->haveWithAttrs) {
+        if (env->type == Env::HasWithExpr) {
             if (noEval) return 0;
             Value * v = allocValue();
             evalAttrs(*env->up, (Expr *) env->values[0], *v);
             env->values[0] = v;
-            env->haveWithAttrs = true;
+            env->type = Env::HasWithAttrs;
         }
         Bindings::iterator j = env->values[0]->attrs->find(var.name);
         if (j != env->values[0]->attrs->end()) {
@@ -581,10 +584,19 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
 }
 
 
+std::atomic<uint64_t> nrValuesFreed{0};
+
+void finalizeValue(void * obj, void * data)
+{
+    nrValuesFreed++;
+}
+
 Value * EvalState::allocValue()
 {
     nrValues++;
-    return (Value *) allocBytes(sizeof(Value));
+    auto v = (Value *) allocBytes(sizeof(Value));
+    //GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr);
+    return v;
 }
 
 
@@ -597,6 +609,7 @@ Env & EvalState::allocEnv(size_t size)
     nrValuesInEnvs += size;
     Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
     env->size = (decltype(Env::size)) size;
+    env->type = Env::Plain;
 
     /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */
 
@@ -716,7 +729,17 @@ void EvalState::evalFile(const Path & path_, Value & v)
     }
 
     printTalkative("evaluating file '%1%'", path2);
-    Expr * e = parseExprFromFile(checkSourcePath(path2));
+    Expr * e = nullptr;
+
+    auto j = fileParseCache.find(path2);
+    if (j != fileParseCache.end())
+        e = j->second;
+
+    if (!e)
+        e = parseExprFromFile(checkSourcePath(path2));
+
+    fileParseCache[path2] = e;
+
     try {
         eval(e, v);
     } catch (Error & e) {
@@ -1059,6 +1082,8 @@ void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
 
 void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
 {
+    forceValue(fun, pos);
+
     if (fun.type == tPrimOp || fun.type == tPrimOpApp) {
         callPrimOp(fun, arg, v, pos);
         return;
@@ -1074,10 +1099,8 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po
         auto & fun2 = *allocValue();
         fun2 = fun;
         /* !!! Should we use the attr pos here? */
-        forceValue(*found->value, pos);
         Value v2;
         callFunction(*found->value, fun2, v2, pos);
-        forceValue(v2, pos);
         return callFunction(v2, arg, v, pos);
       }
     }
@@ -1164,7 +1187,6 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
     if (fun.type == tAttrs) {
         auto found = fun.attrs->find(sFunctor);
         if (found != fun.attrs->end()) {
-            forceValue(*found->value);
             Value * v = allocValue();
             callFunction(*found->value, fun, *v, noPos);
             forceValue(*v);
@@ -1199,7 +1221,7 @@ void ExprWith::eval(EvalState & state, Env & env, Value & v)
     Env & env2(state.allocEnv(1));
     env2.up = &env;
     env2.prevWith = prevWith;
-    env2.haveWithAttrs = false;
+    env2.type = Env::HasWithExpr;
     env2.values[0] = (Value *) attrs;
 
     body->eval(state, env2, v);
@@ -1548,7 +1570,6 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
     if (v.type == tAttrs) {
         auto i = v.attrs->find(sToString);
         if (i != v.attrs->end()) {
-            forceValue(*i->value, pos);
             Value v1;
             callFunction(*i->value, v, v1, pos);
             return coerceToString(pos, v1, context, coerceMore, copyToStore);
@@ -1857,9 +1878,10 @@ size_t valueSize(Value & v)
 
         size_t sz = sizeof(Env) + sizeof(Value *) * env.size;
 
-        for (size_t i = 0; i < env.size; ++i)
-            if (env.values[i])
-                sz += doValue(*env.values[i]);
+        if (env.type != Env::HasWithExpr)
+            for (size_t i = 0; i < env.size; ++i)
+                if (env.values[i])
+                    sz += doValue(*env.values[i]);
 
         if (env.up) sz += doEnv(*env.up);
 
@@ -1888,4 +1910,9 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) {
 }
 
 
+EvalSettings evalSettings;
+
+static GlobalConfig::Register r1(&evalSettings);
+
+
 }
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 86e93a5ac9ce..d0f298e168e9 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -5,8 +5,10 @@
 #include "nixexpr.hh"
 #include "symbol-table.hh"
 #include "hash.hh"
+#include "config.hh"
 
 #include <map>
+#include <unordered_map>
 
 
 namespace nix {
@@ -34,8 +36,8 @@ struct Env
 {
     Env * up;
     unsigned short size; // used by ‘valueSize’
-    unsigned short prevWith:15; // nr of levels up to next `with' environment
-    unsigned short haveWithAttrs:1;
+    unsigned short prevWith:14; // nr of levels up to next `with' environment
+    enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2;
     Value * values[0];
 };
 
@@ -88,6 +90,14 @@ public:
 private:
     SrcToStore srcToStore;
 
+    /* A cache from path names to parse trees. */
+#if HAVE_BOEHMGC
+    typedef std::map<Path, Expr *, std::less<Path>, traceable_allocator<std::pair<const Path, Expr *> > > FileParseCache;
+#else
+    typedef std::map<Path, Expr *> FileParseCache;
+#endif
+    FileParseCache fileParseCache;
+
     /* A cache from path names to values. */
 #if HAVE_BOEHMGC
     typedef std::map<Path, Value, std::less<Path>, traceable_allocator<std::pair<const Path, Value> > > FileEvalCache;
@@ -100,6 +110,9 @@ private:
 
     std::map<std::string, std::pair<bool, std::string>> searchPathResolved;
 
+    /* Cache used by checkSourcePath(). */
+    std::unordered_map<Path, Path> resolvedPaths;
+
 public:
 
     EvalState(const Strings & _searchPath, ref<Store> store);
@@ -316,4 +329,25 @@ struct InvalidPathError : EvalError
 #endif
 };
 
+struct EvalSettings : Config
+{
+    Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
+        "Whether builtin functions that allow executing native code should be enabled."};
+
+    Setting<bool> restrictEval{this, false, "restrict-eval",
+        "Whether to restrict file system access to paths in $NIX_PATH, "
+        "and network access to the URI prefixes listed in 'allowed-uris'."};
+
+    Setting<bool> pureEval{this, false, "pure-eval",
+        "Whether to restrict file system and network access to files specified by cryptographic hash."};
+
+    Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
+        "Whether the evaluator allows importing the result of a derivation."};
+
+    Setting<Strings> allowedUris{this, {}, "allowed-uris",
+        "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
+};
+
+extern EvalSettings evalSettings;
+
 }
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
index 8b1404595548..3f6017957782 100644
--- a/src/libexpr/json-to-value.cc
+++ b/src/libexpr/json-to-value.cc
@@ -110,7 +110,7 @@ static void parseJSON(EvalState & state, const char * & s, Value & v)
             if (number_type == tFloat)
                 mkFloat(v, stod(tmp_number));
             else
-                mkInt(v, stoi(tmp_number));
+                mkInt(v, stol(tmp_number));
         } catch (std::invalid_argument e) {
             throw JSONParseError("invalid JSON number");
         } catch (std::out_of_range e) {
diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l
index 29ca327c1e4e..a052447d3dce 100644
--- a/src/libexpr/lexer.l
+++ b/src/libexpr/lexer.l
@@ -12,6 +12,8 @@
 
 
 %{
+#include <boost/lexical_cast.hpp>
+
 #include "nixexpr.hh"
 #include "parser-tab.hh"
 
@@ -124,9 +126,11 @@ or          { return OR_KW; }
 
 {ID}        { yylval->id = strdup(yytext); return ID; }
 {INT}       { errno = 0;
-              yylval->n = strtol(yytext, 0, 10);
-              if (errno != 0)
+              try {
+                  yylval->n = boost::lexical_cast<int64_t>(yytext);
+              } catch (const boost::bad_lexical_cast &) {
                   throw ParseError(format("invalid integer '%1%'") % yytext);
+              }
               return INT;
             }
 {FLOAT}     { errno = 0;
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index eee48887dc22..cbd576d7d126 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -273,11 +273,11 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err
 %token IND_STRING_OPEN IND_STRING_CLOSE
 %token ELLIPSIS
 
-%nonassoc IMPL
+%right IMPL
 %left OR
 %left AND
 %nonassoc EQ NEQ
-%left '<' '>' LEQ GEQ
+%nonassoc '<' '>' LEQ GEQ
 %right UPDATE
 %left NOT
 %left '+' '-'
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 57dc7bd1279d..6f82c6c404f2 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -73,7 +73,7 @@ void EvalState::realiseContext(const PathSet & context)
 
     if (drvs.empty()) return;
 
-    if (!settings.enableImportFromDerivation)
+    if (!evalSettings.enableImportFromDerivation)
         throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin()));
 
     /* For performance, prefetch all substitute info. */
@@ -464,7 +464,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val
 static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
     string name = state.forceStringNoCtx(*args[0], pos);
-    mkString(v, settings.restrictEval || settings.pureEval ? "" : getEnv(name));
+    mkString(v, evalSettings.restrictEval || evalSettings.pureEval ? "" : getEnv(name));
 }
 
 
@@ -1031,7 +1031,7 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu
 static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_,
     Value * filterFun, bool recursive, const Hash & expectedHash, Value & v)
 {
-    const auto path = settings.pureEval && expectedHash ?
+    const auto path = evalSettings.pureEval && expectedHash ?
         path_ :
         state.checkSourcePath(path_);
     PathFilter filter = filterFun ? ([&](const Path & path) {
@@ -1356,6 +1356,24 @@ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args
 }
 
 
+/* Apply a function to every element of an attribute set. */
+static void prim_mapAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceAttrs(*args[1], pos);
+
+    state.mkAttrs(v, args[1]->attrs->size());
+
+    for (auto & i : *args[1]->attrs) {
+        Value * vName = state.allocValue();
+        Value * vFun2 = state.allocValue();
+        mkString(*vName, i.name);
+        mkApp(*vFun2, *args[0], *vName);
+        mkApp(*state.allocAttr(v, i.name), *vFun2, *i.value);
+    }
+}
+
+
+
 /*************************************************************
  * Lists
  *************************************************************/
@@ -1410,7 +1428,6 @@ static void prim_tail(EvalState & state, const Pos & pos, Value * * args, Value
 /* Apply a function to every element of a list. */
 static void prim_map(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
-    state.forceFunction(*args[0], pos);
     state.forceList(*args[1], pos);
 
     state.mkList(v, args[1]->listSize());
@@ -1489,19 +1506,20 @@ static void prim_foldlStrict(EvalState & state, const Pos & pos, Value * * args,
     state.forceFunction(*args[0], pos);
     state.forceList(*args[2], pos);
 
-    Value * vCur = args[1];
+    if (args[2]->listSize()) {
+        Value * vCur = args[1];
 
-    if (args[2]->listSize())
         for (unsigned int n = 0; n < args[2]->listSize(); ++n) {
             Value vTmp;
             state.callFunction(*args[0], *vCur, vTmp, pos);
             vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue();
             state.callFunction(vTmp, *args[2]->listElems()[n], *vCur, pos);
         }
-    else
-        v = *vCur;
-
-    state.forceValue(v);
+        state.forceValue(v);
+    } else {
+        state.forceValue(*args[1]);
+        v = *args[1];
+    }
 }
 
 
@@ -1538,7 +1556,6 @@ static void prim_all(EvalState & state, const Pos & pos, Value * * args, Value &
 
 static void prim_genList(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
-    state.forceFunction(*args[0], pos);
     auto len = state.forceInt(*args[1], pos);
 
     if (len < 0)
@@ -1627,6 +1644,35 @@ static void prim_partition(EvalState & state, const Pos & pos, Value * * args, V
 }
 
 
+/* concatMap = f: list: concatLists (map f list); */
+/* C++-version is to avoid allocating `mkApp', call `f' eagerly */
+static void prim_concatMap(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceFunction(*args[0], pos);
+    state.forceList(*args[1], pos);
+    auto nrLists = args[1]->listSize();
+
+    Value lists[nrLists];
+    size_t len = 0;
+
+    for (unsigned int n = 0; n < nrLists; ++n) {
+        Value * vElem = args[1]->listElems()[n];
+        state.callFunction(*args[0], *vElem, lists[n], pos);
+        state.forceList(lists[n], pos);
+        len += lists[n].listSize();
+    }
+
+    state.mkList(v, len);
+    auto out = v.listElems();
+    for (unsigned int n = 0, pos = 0; n < nrLists; ++n) {
+        auto l = lists[n].listSize();
+        if (l)
+            memcpy(out + pos, lists[n].listElems(), l * sizeof(Value *));
+        pos += l;
+    }
+}
+
+
 /*************************************************************
  * Integer arithmetic
  *************************************************************/
@@ -1634,6 +1680,8 @@ static void prim_partition(EvalState & state, const Pos & pos, Value * * args, V
 
 static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
+    state.forceValue(*args[0], pos);
+    state.forceValue(*args[1], pos);
     if (args[0]->type == tFloat || args[1]->type == tFloat)
         mkFloat(v, state.forceFloat(*args[0], pos) + state.forceFloat(*args[1], pos));
     else
@@ -1643,6 +1691,8 @@ static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value &
 
 static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
+    state.forceValue(*args[0], pos);
+    state.forceValue(*args[1], pos);
     if (args[0]->type == tFloat || args[1]->type == tFloat)
         mkFloat(v, state.forceFloat(*args[0], pos) - state.forceFloat(*args[1], pos));
     else
@@ -1652,6 +1702,8 @@ static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value &
 
 static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
+    state.forceValue(*args[0], pos);
+    state.forceValue(*args[1], pos);
     if (args[0]->type == tFloat || args[1]->type == tFloat)
         mkFloat(v, state.forceFloat(*args[0], pos) * state.forceFloat(*args[1], pos));
     else
@@ -1661,6 +1713,9 @@ static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value &
 
 static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
+    state.forceValue(*args[0], pos);
+    state.forceValue(*args[1], pos);
+
     NixFloat f2 = state.forceFloat(*args[1], pos);
     if (f2 == 0) throw EvalError(format("division by zero, at %1%") % pos);
 
@@ -1676,6 +1731,20 @@ static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value &
     }
 }
 
+static void prim_bitAnd(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) & state.forceInt(*args[1], pos));
+}
+
+static void prim_bitOr(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) | state.forceInt(*args[1], pos));
+}
+
+static void prim_bitXor(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) ^ state.forceInt(*args[1], pos));
+}
 
 static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
@@ -2042,7 +2111,7 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
 
     state.checkURI(url);
 
-    if (settings.pureEval && !expectedHash)
+    if (evalSettings.pureEval && !expectedHash)
         throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who);
 
     Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash);
@@ -2110,12 +2179,12 @@ void EvalState::createBaseEnv()
         addConstant(name, v);
     };
 
-    if (!settings.pureEval) {
+    if (!evalSettings.pureEval) {
         mkInt(v, time(0));
         addConstant("__currentTime", v);
     }
 
-    if (!settings.pureEval) {
+    if (!evalSettings.pureEval) {
         mkString(v, settings.thisSystem);
         addConstant("__currentSystem", v);
     }
@@ -2140,7 +2209,7 @@ void EvalState::createBaseEnv()
     mkApp(v, *vScopedImport, *v2);
     forceValue(v);
     addConstant("import", v);
-    if (settings.enableNativeCode) {
+    if (evalSettings.enableNativeCode) {
         addPrimOp("__importNative", 2, prim_importNative);
         addPrimOp("__exec", 1, prim_exec);
     }
@@ -2167,7 +2236,7 @@ void EvalState::createBaseEnv()
 
     // Paths
     addPrimOp("__toPath", 1, prim_toPath);
-    if (settings.pureEval)
+    if (evalSettings.pureEval)
         addPurityError("__storePath");
     else
         addPrimOp("__storePath", 1, prim_storePath);
@@ -2198,6 +2267,7 @@ void EvalState::createBaseEnv()
     addPrimOp("__intersectAttrs", 2, prim_intersectAttrs);
     addPrimOp("__catAttrs", 2, prim_catAttrs);
     addPrimOp("__functionArgs", 1, prim_functionArgs);
+    addPrimOp("__mapAttrs", 2, prim_mapAttrs);
 
     // Lists
     addPrimOp("__isList", 1, prim_isList);
@@ -2215,12 +2285,16 @@ void EvalState::createBaseEnv()
     addPrimOp("__genList", 2, prim_genList);
     addPrimOp("__sort", 2, prim_sort);
     addPrimOp("__partition", 2, prim_partition);
+    addPrimOp("__concatMap", 2, prim_concatMap);
 
     // Integer arithmetic
     addPrimOp("__add", 2, prim_add);
     addPrimOp("__sub", 2, prim_sub);
     addPrimOp("__mul", 2, prim_mul);
     addPrimOp("__div", 2, prim_div);
+    addPrimOp("__bitAnd", 2, prim_bitAnd);
+    addPrimOp("__bitOr", 2, prim_bitOr);
+    addPrimOp("__bitXor", 2, prim_bitXor);
     addPrimOp("__lessThan", 2, prim_lessThan);
 
     // String manipulation
diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc
index 8bb74dad639e..0c6539959bf6 100644
--- a/src/libexpr/primops/fetchGit.cc
+++ b/src/libexpr/primops/fetchGit.cc
@@ -28,7 +28,7 @@ GitInfo exportGit(ref<Store> store, const std::string & uri,
     std::experimental::optional<std::string> ref, std::string rev,
     const std::string & name)
 {
-    if (settings.pureEval && rev == "")
+    if (evalSettings.pureEval && rev == "")
         throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision");
 
     if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) {
@@ -219,8 +219,6 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va
     } else
         url = state.coerceToString(pos, *args[0], context, false, false);
 
-    if (!isUri(url)) url = absPath(url);
-
     // FIXME: git externals probably can be used to bypass the URI
     // whitelist. Ah well.
     state.checkURI(url);
diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc
index a75c5fc2ddff..97cda2458c9b 100644
--- a/src/libexpr/primops/fetchMercurial.cc
+++ b/src/libexpr/primops/fetchMercurial.cc
@@ -27,7 +27,7 @@ std::regex commitHashRegex("^[0-9a-fA-F]{40}$");
 HgInfo exportMercurial(ref<Store> store, const std::string & uri,
     std::string rev, const std::string & name)
 {
-    if (settings.pureEval && rev == "")
+    if (evalSettings.pureEval && rev == "")
         throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision");
 
     if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) {
@@ -184,8 +184,6 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
     } else
         url = state.coerceToString(pos, *args[0], context, false, false);
 
-    if (!isUri(url)) url = absPath(url);
-
     // FIXME: git externals probably can be used to bypass the URI
     // whitelist. Ah well.
     state.checkURI(url);
diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc
new file mode 100644
index 000000000000..4128de05d0cf
--- /dev/null
+++ b/src/libexpr/primops/fromTOML.cc
@@ -0,0 +1,77 @@
+#include "primops.hh"
+#include "eval-inline.hh"
+
+#include "cpptoml/cpptoml.h"
+
+namespace nix {
+
+static void prim_fromTOML(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    using namespace cpptoml;
+
+    auto toml = state.forceStringNoCtx(*args[0], pos);
+
+    std::istringstream tomlStream(toml);
+
+    std::function<void(Value &, std::shared_ptr<base>)> visit;
+
+    visit = [&](Value & v, std::shared_ptr<base> t) {
+
+        if (auto t2 = t->as_table()) {
+
+            size_t size = 0;
+            for (auto & i : *t2) { (void) i; size++; }
+
+            state.mkAttrs(v, size);
+
+            for (auto & i : *t2) {
+                auto & v2 = *state.allocAttr(v, state.symbols.create(i.first));
+
+                if (auto i2 = i.second->as_table_array()) {
+                    size_t size2 = i2->get().size();
+                    state.mkList(v2, size2);
+                    for (size_t j = 0; j < size2; ++j)
+                        visit(*(v2.listElems()[j] = state.allocValue()), i2->get()[j]);
+                }
+                else
+                    visit(v2, i.second);
+            }
+
+            v.attrs->sort();
+        }
+
+        else if (auto t2 = t->as_array()) {
+            size_t size = t2->get().size();
+
+            state.mkList(v, size);
+
+            for (size_t i = 0; i < size; ++i)
+                visit(*(v.listElems()[i] = state.allocValue()), t2->get()[i]);
+        }
+
+        else if (t->is_value()) {
+            if (auto val = t->as<int64_t>())
+                mkInt(v, val->get());
+            else if (auto val = t->as<NixFloat>())
+                mkFloat(v, val->get());
+            else if (auto val = t->as<bool>())
+                mkBool(v, val->get());
+            else if (auto val = t->as<std::string>())
+                mkString(v, val->get());
+            else
+                throw EvalError("unsupported value type in TOML");
+        }
+
+        else abort();
+    };
+
+    try {
+        visit(v, parser(tomlStream).parse());
+    } catch (std::runtime_error & e) {
+        throw EvalError("while parsing a TOML string at %s: %s", pos, e.what());
+    }
+}
+
+static RegisterPrimOp r("fromTOML", 1, prim_fromTOML);
+
+}
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 66b41a158400..e1ec87d3b84c 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -43,8 +43,8 @@ class XMLWriter;
 class JSONPlaceholder;
 
 
-typedef long NixInt;
-typedef float NixFloat;
+typedef int64_t NixInt;
+typedef double NixFloat;
 
 /* External values must descend from ExternalValueBase, so that
  * type-agnostic nix functions (e.g. showType) can be implemented
diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc
index bcc05c2cdad6..4c35a4199590 100644
--- a/src/libmain/common-args.cc
+++ b/src/libmain/common-args.cc
@@ -29,14 +29,14 @@ MixCommonArgs::MixCommonArgs(const string & programName)
         .arity(2)
         .handler([](std::vector<std::string> ss) {
             try {
-                settings.set(ss[0], ss[1]);
+                globalConfig.set(ss[0], ss[1]);
             } catch (UsageError & e) {
                 warn(e.what());
             }
         });
 
     std::string cat = "config";
-    settings.convertToArgs(*this, cat);
+    globalConfig.convertToArgs(*this, cat);
 
     // Backward compatibility hack: nix-env already had a --system flag.
     if (programName == "nix-env") longFlags.erase("system");
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 91a4eaf922a6..4ed34e54dc55 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -109,7 +109,7 @@ void initNix()
     opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
     CRYPTO_set_locking_callback(opensslLockCallback);
 
-    settings.loadConfFile();
+    loadConfFile();
 
     startSignalHandlerThread();
 
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 2e9a13e564ca..4527ee6ba660 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -54,17 +54,38 @@ void BinaryCacheStore::init()
     }
 }
 
-std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+void BinaryCacheStore::getFile(const std::string & path,
+    Callback<std::shared_ptr<std::string>> callback)
+{
+    try {
+        callback(getFile(path));
+    } catch (...) { callback.rethrow(); }
+}
+
+void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
 {
     std::promise<std::shared_ptr<std::string>> promise;
     getFile(path,
-        [&](std::shared_ptr<std::string> result) {
-            promise.set_value(result);
-        },
-        [&](std::exception_ptr exc) {
-            promise.set_exception(exc);
-        });
-    return promise.get_future().get();
+        {[&](std::future<std::shared_ptr<std::string>> result) {
+            try {
+                promise.set_value(result.get());
+            } catch (...) {
+                promise.set_exception(std::current_exception());
+            }
+        }});
+    auto data = promise.get_future().get();
+    sink((unsigned char *) data->data(), data->size());
+}
+
+std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+{
+    StringSink sink;
+    try {
+        getFile(path, sink);
+    } catch (NoSuchBinaryCacheFile &) {
+        return nullptr;
+    }
+    return sink.s;
 }
 
 Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
@@ -196,30 +217,30 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
 {
     auto info = queryPathInfo(storePath).cast<const NarInfo>();
 
-    auto nar = getFile(info->url);
-
-    if (!nar) throw Error(format("file '%s' missing from binary cache") % info->url);
-
-    stats.narRead++;
-    stats.narReadCompressedBytes += nar->size();
-
     uint64_t narSize = 0;
 
-    StringSource source(*nar);
-
     LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
         sink(data, len);
         narSize += len;
     });
 
-    decompress(info->compression, source, wrapperSink);
+    auto decompressor = makeDecompressionSink(info->compression, wrapperSink);
 
+    try {
+        getFile(info->url, *decompressor);
+    } catch (NoSuchBinaryCacheFile & e) {
+        throw SubstituteGone(e.what());
+    }
+
+    decompressor->finish();
+
+    stats.narRead++;
+    //stats.narReadCompressedBytes += nar->size(); // FIXME
     stats.narReadBytes += narSize;
 }
 
 void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure)
+    Callback<std::shared_ptr<ValidPathInfo>> callback)
 {
     auto uri = getUri();
     auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
@@ -229,17 +250,22 @@ void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
     auto narInfoFile = narInfoFileFor(storePath);
 
     getFile(narInfoFile,
-        [=](std::shared_ptr<std::string> data) {
-            if (!data) return success(0);
+        {[=](std::future<std::shared_ptr<std::string>> fut) {
+            try {
+                auto data = fut.get();
 
-            stats.narInfoRead++;
+                if (!data) return callback(nullptr);
 
-            callSuccess(success, failure, (std::shared_ptr<ValidPathInfo>)
-                std::make_shared<NarInfo>(*this, *data, narInfoFile));
+                stats.narInfoRead++;
 
-            (void) act; // force Activity into this lambda to ensure it stays alive
-        },
-        failure);
+                callback((std::shared_ptr<ValidPathInfo>)
+                    std::make_shared<NarInfo>(*this, *data, narInfoFile));
+
+                (void) act; // force Activity into this lambda to ensure it stays alive
+            } catch (...) {
+                callback.rethrow();
+            }
+        }});
 }
 
 Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index e20b968442b7..6bc83fc50ca1 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -38,11 +38,16 @@ public:
         const std::string & data,
         const std::string & mimeType) = 0;
 
-    /* Return the contents of the specified file, or null if it
-       doesn't exist. */
+    /* Note: subclasses must implement at least one of the two
+       following getFile() methods. */
+
+    /* Dump the contents of the specified file to a sink. */
+    virtual void getFile(const std::string & path, Sink & sink);
+
+    /* Fetch the specified file and call the specified callback with
+       the result. A subclass may implement this asynchronously. */
     virtual void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) = 0;
+        Callback<std::shared_ptr<std::string>> callback);
 
     std::shared_ptr<std::string> getFile(const std::string & path);
 
@@ -71,8 +76,7 @@ public:
     { unsupported(); }
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override;
 
     void queryReferrers(const Path & path,
         PathSet & referrers) override
@@ -131,4 +135,6 @@ public:
 
 };
 
+MakeError(NoSuchBinaryCacheFile, Error);
+
 }
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index f70ab8108fd7..cd37f7a3fc08 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -29,7 +29,9 @@
 #include <sys/utsname.h>
 #include <sys/select.h>
 #include <sys/resource.h>
+#include <sys/socket.h>
 #include <fcntl.h>
+#include <netdb.h>
 #include <unistd.h>
 #include <errno.h>
 #include <cstring>
@@ -672,8 +674,10 @@ HookInstance::HookInstance()
     toHook.readSide = -1;
 
     sink = FdSink(toHook.writeSide.get());
-    for (auto & setting : settings.getSettings())
-        sink << 1 << setting.first << setting.second;
+    std::map<std::string, Config::SettingInfo> settings;
+    globalConfig.getSettings(settings);
+    for (auto & setting : settings)
+        sink << 1 << setting.first << setting.second.value;
     sink << 0;
 }
 
@@ -731,7 +735,7 @@ private:
 
     /* Whether to retry substituting the outputs after building the
        inputs. */
-    bool retrySubstitution = false;
+    bool retrySubstitution;
 
     /* The derivation stored at drvPath. */
     std::unique_ptr<BasicDerivation> drv;
@@ -1121,6 +1125,8 @@ void DerivationGoal::haveDerivation()
 {
     trace("have derivation");
 
+    retrySubstitution = false;
+
     for (auto & i : drv->outputs)
         worker.store.addTempRoot(i.second.path);
 
@@ -1159,7 +1165,7 @@ void DerivationGoal::outputsSubstituted()
     /*  If the substitutes form an incomplete closure, then we should
         build the dependencies of this derivation, but after that, we
         can still use the substitutes for this derivation itself. */
-    if (nrIncompleteClosure > 0 && !retrySubstitution) retrySubstitution = true;
+    if (nrIncompleteClosure > 0) retrySubstitution = true;
 
     nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
 
@@ -1773,12 +1779,14 @@ static std::once_flag dns_resolve_flag;
 static void preloadNSS() {
     /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
        one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
-       been loaded in the parent. So we force a download of an invalid URL to force the NSS machinery to
+       been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
        load its lookup libraries in the parent before any child gets a chance to. */
     std::call_once(dns_resolve_flag, []() {
-        DownloadRequest request("http://this.pre-initializes.the.dns.resolvers.invalid");
-        request.tries = 1; // We only need to do it once, and this also suppresses an annoying warning
-        try { getDownloader()->download(request); } catch (...) {}
+        struct addrinfo *res = NULL;
+
+        if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) {
+            if (res) freeaddrinfo(res);
+        }
     });
 }
 
@@ -1999,7 +2007,7 @@ void DerivationGoal::startBuilder()
 
         /* Create /etc/hosts with localhost entry. */
         if (!fixedOutput)
-            writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
+            writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n::1 localhost\n");
 
         /* Make the closure of the inputs available in the chroot,
            rather than the whole Nix store.  This prevents any access
@@ -3522,8 +3530,8 @@ private:
     /* The current substituter. */
     std::shared_ptr<Store> sub;
 
-    /* Whether any substituter can realise this path. */
-    bool hasSubstitute;
+    /* Whether a substituter failed. */
+    bool substituterFailed = false;
 
     /* Path info returned by the substituter's query info operation. */
     std::shared_ptr<const ValidPathInfo> info;
@@ -3587,7 +3595,6 @@ public:
 
 SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, RepairFlag repair)
     : Goal(worker)
-    , hasSubstitute(false)
     , repair(repair)
 {
     this->storePath = storePath;
@@ -3651,9 +3658,9 @@ void SubstitutionGoal::tryNext()
         /* Hack: don't indicate failure if there were no substituters.
            In that case the calling derivation should just do a
            build. */
-        amDone(hasSubstitute ? ecFailed : ecNoSubstituters);
+        amDone(substituterFailed ? ecFailed : ecNoSubstituters);
 
-        if (hasSubstitute) {
+        if (substituterFailed) {
             worker.failedSubstitutions++;
             worker.updateProgress();
         }
@@ -3689,8 +3696,6 @@ void SubstitutionGoal::tryNext()
 
     worker.updateProgress();
 
-    hasSubstitute = true;
-
     /* Bail out early if this substituter lacks a valid
        signature. LocalStore::addToStore() also checks for this, but
        only after we've downloaded the path. */
@@ -3805,8 +3810,19 @@ void SubstitutionGoal::finished()
         state = &SubstitutionGoal::init;
         worker.waitForAWhile(shared_from_this());
         return;
-    } catch (Error & e) {
-        printError(e.msg());
+    } catch (std::exception & e) {
+        printError(e.what());
+
+        /* Cause the parent build to fail unless --fallback is given,
+           or the substitute has disappeared. The latter case behaves
+           the same as the substitute never having existed in the
+           first place. */
+        try {
+            throw;
+        } catch (SubstituteGone &) {
+        } catch (...) {
+            substituterFailed = true;
+        }
 
         /* Try the next substitute. */
         state = &SubstitutionGoal::tryNext;
diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc
index 4ca4a838e3c4..b4dcb35f951a 100644
--- a/src/libstore/builtins/fetchurl.cc
+++ b/src/libstore/builtins/fetchurl.cc
@@ -22,52 +22,55 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
         return i->second;
     };
 
-    auto fetch = [&](const string & url) {
-        /* No need to do TLS verification, because we check the hash of
-           the result anyway. */
-        DownloadRequest request(url);
-        request.verifyTLS = false;
-        request.decompress = false;
+    Path storePath = getAttr("out");
+    auto mainUrl = getAttr("url");
 
-        /* Note: have to use a fresh downloader here because we're in
-           a forked process. */
-        auto data = makeDownloader()->download(request);
-        assert(data.data);
+    /* Note: have to use a fresh downloader here because we're in
+       a forked process. */
+    auto downloader = makeDownloader();
 
-        return data.data;
-    };
+    auto fetch = [&](const std::string & url) {
+
+        auto source = sinkToSource([&](Sink & sink) {
+
+            /* No need to do TLS verification, because we check the hash of
+               the result anyway. */
+            DownloadRequest request(url);
+            request.verifyTLS = false;
+            request.decompress = false;
+
+            auto decompressor = makeDecompressionSink(
+                hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
+            downloader->download(std::move(request), *decompressor);
+            decompressor->finish();
+        });
+
+        if (get(drv.env, "unpack", "") == "1")
+            restorePath(storePath, *source);
+        else
+            writeFile(storePath, *source);
 
-    std::shared_ptr<std::string> data;
+        auto executable = drv.env.find("executable");
+        if (executable != drv.env.end() && executable->second == "1") {
+            if (chmod(storePath.c_str(), 0755) == -1)
+                throw SysError(format("making '%1%' executable") % storePath);
+        }
+    };
 
+    /* Try the hashed mirrors first. */
     if (getAttr("outputHashMode") == "flat")
         for (auto hashedMirror : settings.hashedMirrors.get())
             try {
                 if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
                 auto ht = parseHashType(getAttr("outputHashAlgo"));
-                data = fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false));
-                break;
+                fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false));
+                return;
             } catch (Error & e) {
                 debug(e.what());
             }
 
-    if (!data) data = fetch(getAttr("url"));
-
-    Path storePath = getAttr("out");
-
-    auto unpack = drv.env.find("unpack");
-    if (unpack != drv.env.end() && unpack->second == "1") {
-        if (string(*data, 0, 6) == string("\xfd" "7zXZ\0", 6))
-            data = decompress("xz", *data);
-        StringSource source(*data);
-        restorePath(storePath, source);
-    } else
-        writeFile(storePath, *data);
-
-    auto executable = drv.env.find("executable");
-    if (executable != drv.env.end() && executable->second == "1") {
-        if (chmod(storePath.c_str(), 0755) == -1)
-            throw SysError(format("making '%1%' executable") % storePath);
-    }
+    /* Otherwise try the specified URL. */
+    fetch(mainUrl);
 }
 
 }
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 74b861281ee0..1e187ec5e954 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -342,7 +342,7 @@ Hash hashDerivationModulo(Store & store, Derivation drv)
         Hash h = drvHashes[i.first];
         if (!h) {
             assert(store.isValidPath(i.first));
-            Derivation drv2 = readDerivation(i.first);
+            Derivation drv2 = readDerivation(store.toRealPath(i.first));
             h = hashDerivationModulo(store, drv2);
             drvHashes[i.first] = h;
         }
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 54f4dd218007..973fca0b130f 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -7,6 +7,7 @@
 #include "s3.hh"
 #include "compression.hh"
 #include "pathlocks.hh"
+#include "finally.hh"
 
 #ifdef ENABLE_S3
 #include <aws/core/client/ClientConfiguration.h>
@@ -29,12 +30,25 @@ using namespace std::string_literals;
 
 namespace nix {
 
-double getTime()
+struct DownloadSettings : Config
 {
-    struct timeval tv;
-    gettimeofday(&tv, 0);
-    return tv.tv_sec + (tv.tv_usec / 1000000.0);
-}
+    Setting<bool> enableHttp2{this, true, "http2",
+        "Whether to enable HTTP/2 support."};
+
+    Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
+        "String appended to the user agent in HTTP requests."};
+
+    Setting<size_t> httpConnections{this, 25, "http-connections",
+        "Number of parallel HTTP connections.",
+        {"binary-caches-parallel-connections"}};
+
+    Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
+        "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
+};
+
+static DownloadSettings downloadSettings;
+
+static GlobalConfig::Register r1(&downloadSettings);
 
 std::string resolveUri(const std::string & uri)
 {
@@ -44,16 +58,6 @@ std::string resolveUri(const std::string & uri)
         return uri;
 }
 
-ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data)
-{
-    if (encoding == "")
-        return data;
-    else if (encoding == "br")
-        return decompress(encoding, *data);
-    else
-        throw Error("unsupported Content-Encoding '%s'", encoding);
-}
-
 struct CurlDownloader : public Downloader
 {
     CURLM * curlm = 0;
@@ -61,8 +65,6 @@ struct CurlDownloader : public Downloader
     std::random_device rd;
     std::mt19937 mt19937;
 
-    bool enableHttp2;
-
     struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
     {
         CurlDownloader & downloader;
@@ -70,8 +72,7 @@ struct CurlDownloader : public Downloader
         DownloadResult result;
         Activity act;
         bool done = false; // whether either the success or failure function has been called
-        std::function<void(const DownloadResult &)> success;
-        std::function<void(std::exception_ptr exc)> failure;
+        Callback<DownloadResult> callback;
         CURL * req = 0;
         bool active = false; // whether the handle has been added to the multi object
         std::string status;
@@ -86,10 +87,21 @@ struct CurlDownloader : public Downloader
 
         std::string encoding;
 
-        DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
+        DownloadItem(CurlDownloader & downloader,
+            const DownloadRequest & request,
+            Callback<DownloadResult> callback)
             : downloader(downloader)
             , request(request)
-            , act(*logger, lvlTalkative, actDownload, fmt("downloading '%s'", request.uri), {request.uri}, request.parentAct)
+            , act(*logger, lvlTalkative, actDownload,
+                fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
+                {request.uri}, request.parentAct)
+            , callback(callback)
+            , finalSink([this](const unsigned char * data, size_t len) {
+                if (this->request.dataCallback)
+                    this->request.dataCallback((char *) data, len);
+                else
+                    this->result.data->append((char *) data, len);
+              })
         {
             if (!request.expectedETag.empty())
                 requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
@@ -113,19 +125,40 @@ struct CurlDownloader : public Downloader
             }
         }
 
-        template<class T>
-        void fail(const T & e)
+        void failEx(std::exception_ptr ex)
         {
             assert(!done);
             done = true;
-            callFailure(failure, std::make_exception_ptr(e));
+            callback.rethrow(ex);
+        }
+
+        template<class T>
+        void fail(const T & e)
+        {
+            failEx(std::make_exception_ptr(e));
         }
 
+        LambdaSink finalSink;
+        std::shared_ptr<CompressionSink> decompressionSink;
+
+        std::exception_ptr writeException;
+
         size_t writeCallback(void * contents, size_t size, size_t nmemb)
         {
-            size_t realSize = size * nmemb;
-            result.data->append((char *) contents, realSize);
-            return realSize;
+            try {
+                size_t realSize = size * nmemb;
+                result.bodySize += realSize;
+
+                if (!decompressionSink)
+                    decompressionSink = makeDecompressionSink(encoding, finalSink);
+
+                (*decompressionSink)((unsigned char *) contents, realSize);
+
+                return realSize;
+            } catch (...) {
+                writeException = std::current_exception();
+                return 0;
+            }
         }
 
         static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
@@ -143,6 +176,7 @@ struct CurlDownloader : public Downloader
                 auto ss = tokenizeString<vector<string>>(line, " ");
                 status = ss.size() >= 2 ? ss[1] : "";
                 result.data = std::make_shared<std::string>();
+                result.bodySize = 0;
                 encoding = "";
             } else {
                 auto i = line.find(':');
@@ -194,7 +228,7 @@ struct CurlDownloader : public Downloader
         }
 
         size_t readOffset = 0;
-        int readCallback(char *buffer, size_t size, size_t nitems)
+        size_t readCallback(char *buffer, size_t size, size_t nitems)
         {
             if (readOffset == request.data->length())
                 return 0;
@@ -205,7 +239,7 @@ struct CurlDownloader : public Downloader
             return count;
         }
 
-        static int readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
+        static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
         {
             return ((DownloadItem *) userp)->readCallback(buffer, size, nitems);
         }
@@ -225,15 +259,16 @@ struct CurlDownloader : public Downloader
 
             curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
             curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
+            curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10);
             curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
             curl_easy_setopt(req, CURLOPT_USERAGENT,
                 ("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
-                    (settings.userAgentSuffix != "" ? " " + settings.userAgentSuffix.get() : "")).c_str());
+                    (downloadSettings.userAgentSuffix != "" ? " " + downloadSettings.userAgentSuffix.get() : "")).c_str());
             #if LIBCURL_VERSION_NUM >= 0x072b00
             curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
             #endif
             #if LIBCURL_VERSION_NUM >= 0x072f00
-            if (downloader.enableHttp2)
+            if (downloadSettings.enableHttp2)
                 curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
             #endif
             curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
@@ -265,7 +300,7 @@ struct CurlDownloader : public Downloader
                 curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
             }
 
-            curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, settings.connectTimeout.get());
+            curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get());
 
             curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
             curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, lowSpeedTimeout);
@@ -276,6 +311,7 @@ struct CurlDownloader : public Downloader
             curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
 
             result.data = std::make_shared<std::string>();
+            result.bodySize = 0;
         }
 
         void finish(CURLcode code)
@@ -288,34 +324,40 @@ struct CurlDownloader : public Downloader
             if (effectiveUrlCStr)
                 result.effectiveUrl = effectiveUrlCStr;
 
-            debug(format("finished download of '%s'; curl status = %d, HTTP status = %d, body = %d bytes")
-                % request.uri % code % httpStatus % (result.data ? result.data->size() : 0));
+            debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
+                request.verb(), request.uri, code, httpStatus, result.bodySize);
+
+            if (decompressionSink)
+                decompressionSink->finish();
 
             if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
                 code = CURLE_OK;
                 httpStatus = 304;
             }
 
-            if (code == CURLE_OK &&
+            if (writeException)
+                failEx(writeException);
+
+            else if (code == CURLE_OK &&
                 (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
             {
                 result.cached = httpStatus == 304;
                 done = true;
 
                 try {
-                    if (request.decompress)
-                        result.data = decodeContent(encoding, ref<std::string>(result.data));
-                    callSuccess(success, failure, const_cast<const DownloadResult &>(result));
                     act.progress(result.data->size(), result.data->size());
+                    callback(std::move(result));
                 } catch (...) {
                     done = true;
-                    callFailure(failure, std::current_exception());
+                    callback.rethrow();
                 }
-            } else {
+            }
+
+            else {
                 // We treat most errors as transient, but won't retry when hopeless
                 Error err = Transient;
 
-                if (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) {
+                if (httpStatus == 404 || httpStatus == 410 || code == CURLE_FILE_COULDNT_READ_FILE) {
                     // The file is definitely not there
                     err = NotFound;
                 } else if (httpStatus == 401 || httpStatus == 403 || httpStatus == 407) {
@@ -345,6 +387,8 @@ struct CurlDownloader : public Downloader
                         case CURLE_INTERFACE_FAILED:
                         case CURLE_UNKNOWN_OPTION:
                         case CURLE_SSL_CACERT_BADFILE:
+                        case CURLE_TOO_MANY_REDIRECTS:
+                        case CURLE_WRITE_ERROR:
                             err = Misc;
                             break;
                         default: // Shut up warnings
@@ -356,10 +400,16 @@ struct CurlDownloader : public Downloader
 
                 auto exc =
                     code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
-                    ? DownloadError(Interrupted, format("download of '%s' was interrupted") % request.uri)
+                    ? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
                     : httpStatus != 0
-                      ? DownloadError(err, format("unable to download '%s': HTTP error %d (curl error: %s)") % request.uri % httpStatus % curl_easy_strerror(code))
-                      : DownloadError(err, format("unable to download '%s': %s (%d)") % request.uri % curl_easy_strerror(code) % code);
+                    ? DownloadError(err,
+                        fmt("unable to %s '%s': HTTP error %d",
+                            request.verb(), request.uri, httpStatus)
+                        + (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
+                        )
+                    : DownloadError(err,
+                        fmt("unable to %s '%s': %s (%d)",
+                            request.verb(), request.uri, curl_easy_strerror(code), code));
 
                 /* If this is a transient error, then maybe retry the
                    download after a while. */
@@ -408,11 +458,9 @@ struct CurlDownloader : public Downloader
         #endif
         #if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
         curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
-            settings.binaryCachesParallelConnections.get());
+            downloadSettings.httpConnections.get());
         #endif
 
-        enableHttp2 = settings.enableHttp2;
-
         wakeupPipe.create();
         fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
 
@@ -522,7 +570,7 @@ struct CurlDownloader : public Downloader
             }
 
             for (auto & item : incoming) {
-                debug(format("starting download of %s") % item->request.uri);
+                debug("starting %s of %s", item->request.verb(), item->request.uri);
                 item->init();
                 curl_multi_add_handle(curlm, item->req);
                 item->active = true;
@@ -551,6 +599,11 @@ struct CurlDownloader : public Downloader
 
     void enqueueItem(std::shared_ptr<DownloadItem> item)
     {
+        if (item->request.data
+            && !hasPrefix(item->request.uri, "http://")
+            && !hasPrefix(item->request.uri, "https://"))
+            throw nix::Error("uploading to '%s' is not supported", item->request.uri);
+
         {
             auto state(state_.lock());
             if (state->quit)
@@ -561,15 +614,14 @@ struct CurlDownloader : public Downloader
     }
 
     void enqueueDownload(const DownloadRequest & request,
-        std::function<void(const DownloadResult &)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+        Callback<DownloadResult> callback) override
     {
         /* Ugly hack to support s3:// URIs. */
         if (hasPrefix(request.uri, "s3://")) {
             // FIXME: do this on a worker thread
-            sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult {
+            try {
 #ifdef ENABLE_S3
-                S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable
+                S3Helper s3Helper("", Aws::Region::US_EAST_1, ""); // FIXME: make configurable
                 auto slash = request.uri.find('/', 5);
                 if (slash == std::string::npos)
                     throw nix::Error("bad S3 URI '%s'", request.uri);
@@ -581,27 +633,22 @@ struct CurlDownloader : public Downloader
                 if (!s3Res.data)
                     throw DownloadError(NotFound, fmt("S3 object '%s' does not exist", request.uri));
                 res.data = s3Res.data;
-                return res;
+                callback(std::move(res));
 #else
                 throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
 #endif
-            });
+            } catch (...) { callback.rethrow(); }
             return;
         }
 
-        auto item = std::make_shared<DownloadItem>(*this, request);
-        item->success = success;
-        item->failure = failure;
-        enqueueItem(item);
+        enqueueItem(std::make_shared<DownloadItem>(*this, request, callback));
     }
 };
 
 ref<Downloader> getDownloader()
 {
-    static std::shared_ptr<Downloader> downloader;
-    static std::once_flag downloaderCreated;
-    std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); });
-    return ref<Downloader>(downloader);
+    static ref<Downloader> downloader = makeDownloader();
+    return downloader;
 }
 
 ref<Downloader> makeDownloader()
@@ -613,8 +660,13 @@ std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest &
 {
     auto promise = std::make_shared<std::promise<DownloadResult>>();
     enqueueDownload(request,
-        [promise](const DownloadResult & result) { promise->set_value(result); },
-        [promise](std::exception_ptr exc) { promise->set_exception(exc); });
+        {[promise](std::future<DownloadResult> fut) {
+            try {
+                promise->set_value(fut.get());
+            } catch (...) {
+                promise->set_exception(std::current_exception());
+            }
+        }});
     return promise->get_future();
 }
 
@@ -623,6 +675,94 @@ DownloadResult Downloader::download(const DownloadRequest & request)
     return enqueueDownload(request).get();
 }
 
+void Downloader::download(DownloadRequest && request, Sink & sink)
+{
+    /* Note: we can't call 'sink' via request.dataCallback, because
+       that would cause the sink to execute on the downloader
+       thread. If 'sink' is a coroutine, this will fail. Also, if the
+       sink is expensive (e.g. one that does decompression and writing
+       to the Nix store), it would stall the download thread too much.
+       Therefore we use a buffer to communicate data between the
+       download thread and the calling thread. */
+
+    struct State {
+        bool quit = false;
+        std::exception_ptr exc;
+        std::string data;
+        std::condition_variable avail, request;
+    };
+
+    auto _state = std::make_shared<Sync<State>>();
+
+    /* In case of an exception, wake up the download thread. FIXME:
+       abort the download request. */
+    Finally finally([&]() {
+        auto state(_state->lock());
+        state->quit = true;
+        state->request.notify_one();
+    });
+
+    request.dataCallback = [_state](char * buf, size_t len) {
+
+        auto state(_state->lock());
+
+        if (state->quit) return;
+
+        /* If the buffer is full, then go to sleep until the calling
+           thread wakes us up (i.e. when it has removed data from the
+           buffer). Note: this does stall the download thread. */
+        while (state->data.size() > 1024 * 1024) {
+            if (state->quit) return;
+            debug("download buffer is full; going to sleep");
+            state.wait(state->request);
+        }
+
+        /* Append data to the buffer and wake up the calling
+           thread. */
+        state->data.append(buf, len);
+        state->avail.notify_one();
+    };
+
+    enqueueDownload(request,
+        {[_state](std::future<DownloadResult> fut) {
+            auto state(_state->lock());
+            state->quit = true;
+            try {
+                fut.get();
+            } catch (...) {
+                state->exc = std::current_exception();
+            }
+            state->avail.notify_one();
+            state->request.notify_one();
+        }});
+
+    auto state(_state->lock());
+
+    while (true) {
+        checkInterrupt();
+
+        /* If no data is available, then wait for the download thread
+           to wake us up. */
+        if (state->data.empty()) {
+
+            if (state->quit) {
+                if (state->exc) std::rethrow_exception(state->exc);
+                break;
+            }
+
+            state.wait(state->avail);
+        }
+
+        /* If data is available, then flush it to the sink and wake up
+           the download thread if it's blocked on a full buffer. */
+        if (!state->data.empty()) {
+            sink((unsigned char *) state->data.data(), state->data.size());
+            state->data.clear();
+            state->request.notify_one();
+        }
+    }
+}
+
 Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl)
 {
     auto url = resolveUri(url_);
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index 7ade756fc356..f0228f7d053a 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -21,9 +21,15 @@ struct DownloadRequest
     bool decompress = true;
     std::shared_ptr<std::string> data;
     std::string mimeType;
+    std::function<void(char *, size_t)> dataCallback;
 
     DownloadRequest(const std::string & uri)
         : uri(uri), parentAct(getCurActivity()) { }
+
+    std::string verb()
+    {
+        return data ? "upload" : "download";
+    }
 };
 
 struct DownloadResult
@@ -32,6 +38,7 @@ struct DownloadResult
     std::string etag;
     std::string effectiveUrl;
     std::shared_ptr<std::string> data;
+    uint64_t bodySize = 0;
 };
 
 class Store;
@@ -42,14 +49,17 @@ struct Downloader
        the download. The future may throw a DownloadError
        exception. */
     virtual void enqueueDownload(const DownloadRequest & request,
-        std::function<void(const DownloadResult &)> success,
-        std::function<void(std::exception_ptr exc)> failure) = 0;
+        Callback<DownloadResult> callback) = 0;
 
     std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
 
     /* Synchronously download a file. */
     DownloadResult download(const DownloadRequest & request);
 
+    /* Download a file, writing its data to a sink. The sink will be
+       invoked on the thread of the caller. */
+    void download(DownloadRequest && request, Sink & sink);
+
     /* Check if the specified file is already in ~/.cache/nix/tarballs
        and is more recent than ‘tarball-ttl’ seconds. Otherwise,
        use the recorded ETag to verify if the server has a more
@@ -78,7 +88,4 @@ public:
 
 bool isUri(const string & s);
 
-/* Decode data according to the Content-Encoding header. */
-ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data);
-
 }
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index ba49749d830a..b415d5421476 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -7,6 +7,7 @@
 #include <queue>
 #include <algorithm>
 #include <regex>
+#include <random>
 
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -365,7 +366,7 @@ try_again:
     char buf[bufsiz];
     auto res = readlink(file.c_str(), buf, bufsiz);
     if (res == -1) {
-        if (errno == ENOENT || errno == EACCES)
+        if (errno == ENOENT || errno == EACCES || errno == ESRCH)
             return;
         throw SysError("reading symlink");
     }
@@ -425,25 +426,28 @@ PathSet LocalStore::findRuntimeRoots()
                         readProcLink((format("%1%/%2%") % fdStr % fd_ent->d_name).str(), paths);
                     }
                 }
-                if (errno)
+                if (errno) {
+                    if (errno == ESRCH)
+                        continue;
                     throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
-                fdDir.reset();
-
-                auto mapLines =
-                    tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
-                for (const auto& line : mapLines) {
-                    auto match = std::smatch{};
-                    if (std::regex_match(line, match, mapRegex))
-                        paths.emplace(match[1]);
                 }
+                fdDir.reset();
 
                 try {
+                    auto mapLines =
+                        tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
+                    for (const auto& line : mapLines) {
+                        auto match = std::smatch{};
+                        if (std::regex_match(line, match, mapRegex))
+                            paths.emplace(match[1]);
+                    }
+
                     auto envString = readFile((format("/proc/%1%/environ") % ent->d_name).str(), true);
                     auto env_end = std::sregex_iterator{};
                     for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
                         paths.emplace(i->str());
                 } catch (SysError & e) {
-                    if (errno == ENOENT || errno == EACCES)
+                    if (errno == ENOENT || errno == EACCES || errno == ESRCH)
                         continue;
                     throw;
                 }
@@ -829,7 +833,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
                alphabetically first (e.g. /nix/store/000...).  This
                matters when using --max-freed etc. */
             vector<Path> entries_(entries.begin(), entries.end());
-            random_shuffle(entries_.begin(), entries_.end());
+            std::mt19937 gen(1);
+            std::shuffle(entries_.begin(), entries_.end(), gen);
 
             for (auto & i : entries_)
                 tryToDelete(state, i);
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 544566e0b573..d95db56726cb 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -28,9 +28,10 @@ namespace nix {
 
 Settings settings;
 
+static GlobalConfig::Register r1(&settings);
+
 Settings::Settings()
-    : Config({})
-    , nixPrefix(NIX_PREFIX)
+    : nixPrefix(NIX_PREFIX)
     , nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
     , nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
     , nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
@@ -69,20 +70,15 @@ Settings::Settings()
     allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
 }
 
-void Settings::loadConfFile()
+void loadConfFile()
 {
-    applyConfigFile(nixConfDir + "/nix.conf");
+    globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf");
 
     /* We only want to send overrides to the daemon, i.e. stuff from
        ~/.nix/nix.conf or the command line. */
-    resetOverriden();
+    globalConfig.resetOverriden();
 
-    applyConfigFile(getConfigDir() + "/nix/nix.conf");
-}
-
-void Settings::set(const string & name, const string & value)
-{
-    Config::set(name, value);
+    globalConfig.applyConfigFile(getConfigDir() + "/nix/nix.conf");
 }
 
 unsigned int Settings::getDefaultCores()
@@ -162,23 +158,11 @@ void initPlugins()
                 throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
         }
     }
-    /* We handle settings registrations here, since plugins can add settings */
-    if (RegisterSetting::settingRegistrations) {
-        for (auto & registration : *RegisterSetting::settingRegistrations)
-            settings.addSetting(registration);
-        delete RegisterSetting::settingRegistrations;
-    }
-    settings.handleUnknownSettings();
-}
-
-RegisterSetting::SettingRegistrations * RegisterSetting::settingRegistrations;
 
-RegisterSetting::RegisterSetting(AbstractSetting * s)
-{
-    if (!settingRegistrations)
-        settingRegistrations = new SettingRegistrations;
-    settingRegistrations->emplace_back(s);
+    /* Since plugins can add settings, try to re-apply previously
+       unknown settings. */
+    globalConfig.reapplyUnknownSettings();
+    globalConfig.warnUnknownSettings();
 }
 
-
 }
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 9360096aae8c..f589078dbb98 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -13,26 +13,6 @@ namespace nix {
 
 typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
 
-extern bool useCaseHack; // FIXME
-
-struct CaseHackSetting : public BaseSetting<bool>
-{
-    CaseHackSetting(Config * options,
-        const std::string & name,
-        const std::string & description,
-        const std::set<std::string> & aliases = {})
-        : BaseSetting<bool>(useCaseHack, name, description, aliases)
-    {
-        options->addSetting(this);
-    }
-
-    void set(const std::string & str) override
-    {
-        BaseSetting<bool>::set(str);
-        nix::useCaseHack = value;
-    }
-};
-
 struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
 {
     MaxBuildJobsSetting(Config * options,
@@ -56,10 +36,6 @@ public:
 
     Settings();
 
-    void loadConfFile();
-
-    void set(const string & name, const string & value);
-
     Path nixPrefix;
 
     /* The directory where we store sources and derived files. */
@@ -217,9 +193,6 @@ public:
     Setting<bool> showTrace{this, false, "show-trace",
         "Whether to show a stack trace on evaluation errors."};
 
-    Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
-        "Whether builtin functions that allow executing native code should be enabled."};
-
     Setting<SandboxMode> sandboxMode{this, smDisabled, "sandbox",
         "Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
         {"build-use-chroot", "build-use-sandbox"}};
@@ -232,13 +205,6 @@ public:
         "Additional paths to make available inside the build sandbox.",
         {"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
 
-    Setting<bool> restrictEval{this, false, "restrict-eval",
-        "Whether to restrict file system access to paths in $NIX_PATH, "
-        "and network access to the URI prefixes listed in 'allowed-uris'."};
-
-    Setting<bool> pureEval{this, false, "pure-eval",
-        "Whether to restrict file system and network access to files specified by cryptographic hash."};
-
     Setting<size_t> buildRepeat{this, 0, "repeat",
         "The number of times to repeat a build in order to verify determinism.",
         {"build-repeat"}};
@@ -280,13 +246,6 @@ public:
     Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
         "Secret keys with which to sign local builds."};
 
-    Setting<size_t> binaryCachesParallelConnections{this, 25, "http-connections",
-        "Number of parallel HTTP connections.",
-        {"binary-caches-parallel-connections"}};
-
-    Setting<bool> enableHttp2{this, true, "http2",
-        "Whether to enable HTTP/2 support."};
-
     Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
         "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."};
 
@@ -350,18 +309,6 @@ public:
     /* Path to the SSL CA file used */
     Path caFile;
 
-    Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
-        "Whether the evaluator allows importing the result of a derivation."};
-
-    CaseHackSetting useCaseHack{this, "use-case-hack",
-        "Whether to enable a Darwin-specific hack for dealing with file name collisions."};
-
-    Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
-        "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
-
-    Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
-        "String appended to the user agent in HTTP requests."};
-
 #if __linux__
     Setting<bool> filterSyscalls{this, true, "filter-syscalls",
             "Whether to prevent certain dangerous system calls, such as "
@@ -383,9 +330,6 @@ public:
     Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
         "Stop deleting garbage when free disk space is above the specified amount."};
 
-    Setting<Strings> allowedUris{this, {}, "allowed-uris",
-        "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
-
     Setting<Paths> pluginFiles{this, {}, "plugin-files",
         "Plugins to dynamically load at nix initialization time."};
 };
@@ -398,15 +342,8 @@ extern Settings settings;
    anything else */
 void initPlugins();
 
+void loadConfFile();
 
 extern const string nixVersion;
 
-struct RegisterSetting
-{
-    typedef std::vector<AbstractSetting *> SettingRegistrations;
-    static SettingRegistrations * settingRegistrations;
-    RegisterSetting(AbstractSetting * s);
-};
-
-
 }
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index b9e9cd5daba5..ab524d523cf2 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -73,32 +73,46 @@ protected:
         try {
             getDownloader()->download(req);
         } catch (DownloadError & e) {
-            throw UploadToHTTP(format("uploading to HTTP binary cache at %1% not supported: %2%") % cacheUri % e.msg());
+            throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg());
         }
     }
 
-    void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+    DownloadRequest makeRequest(const std::string & path)
     {
         DownloadRequest request(cacheUri + "/" + path);
         request.tries = 8;
+        return request;
+    }
+
+    void getFile(const std::string & path, Sink & sink) override
+    {
+        auto request(makeRequest(path));
+        try {
+            getDownloader()->download(std::move(request), sink);
+        } catch (DownloadError & e) {
+            if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+                throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
+            throw;
+        }
+    }
+
+    void getFile(const std::string & path,
+        Callback<std::shared_ptr<std::string>> callback) override
+    {
+        auto request(makeRequest(path));
 
         getDownloader()->enqueueDownload(request,
-            [success](const DownloadResult & result) {
-                success(result.data);
-            },
-            [success, failure](std::exception_ptr exc) {
+            {[callback](std::future<DownloadResult> result) {
                 try {
-                    std::rethrow_exception(exc);
+                    callback(result.get().data);
                 } catch (DownloadError & e) {
                     if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
-                        return success(0);
-                    failure(exc);
+                        return callback(std::shared_ptr<std::string>());
+                    callback.rethrow();
                 } catch (...) {
-                    failure(exc);
+                    callback.rethrow();
                 }
-            });
+            }});
     }
 
 };
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 5dee25308f7f..88d2574e86ef 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -17,6 +17,7 @@ struct LegacySSHStore : public Store
     const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
     const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
     const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
+    const Setting<std::string> remoteStore{this, "", "remote-store", "URI of the store on the remote system"};
 
     // Hack for getting remote build log output.
     const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
@@ -27,6 +28,7 @@ struct LegacySSHStore : public Store
         FdSink to;
         FdSource from;
         int remoteVersion;
+        bool good = true;
     };
 
     std::string host;
@@ -41,7 +43,7 @@ struct LegacySSHStore : public Store
         , connections(make_ref<Pool<Connection>>(
             std::max(1, (int) maxConnections),
             [this]() { return openConnection(); },
-            [](const ref<Connection> & r) { return true; }
+            [](const ref<Connection> & r) { return r->good; }
             ))
         , master(
             host,
@@ -56,7 +58,9 @@ struct LegacySSHStore : public Store
     ref<Connection> openConnection()
     {
         auto conn = make_ref<Connection>();
-        conn->sshConn = master.startCommand(fmt("%s --serve --write", remoteProgram));
+        conn->sshConn = master.startCommand(
+            fmt("%s --serve --write", remoteProgram)
+            + (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get())));
         conn->to = FdSink(conn->sshConn->in.get());
         conn->from = FdSource(conn->sshConn->out.get());
 
@@ -84,10 +88,9 @@ struct LegacySSHStore : public Store
     }
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override
     {
-        sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> {
+        try {
             auto conn(connections->get());
 
             debug("querying remote host '%s' for info on '%s'", host, path);
@@ -97,7 +100,7 @@ struct LegacySSHStore : public Store
 
             auto info = std::make_shared<ValidPathInfo>();
             conn->from >> info->path;
-            if (info->path.empty()) return nullptr;
+            if (info->path.empty()) return callback(nullptr);
             assert(path == info->path);
 
             PathSet references;
@@ -116,8 +119,8 @@ struct LegacySSHStore : public Store
             auto s = readString(conn->from);
             assert(s == "");
 
-            return info;
-        });
+            callback(std::move(info));
+        } catch (...) { callback.rethrow(); }
     }
 
     void addToStore(const ValidPathInfo & info, Source & source,
@@ -128,18 +131,48 @@ struct LegacySSHStore : public Store
 
         auto conn(connections->get());
 
-        conn->to
-            << cmdImportPaths
-            << 1;
-        copyNAR(source, conn->to);
-        conn->to
-            << exportMagic
-            << info.path
-            << info.references
-            << info.deriver
-            << 0
-            << 0;
-        conn->to.flush();
+        if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
+
+            conn->to
+                << cmdAddToStoreNar
+                << info.path
+                << info.deriver
+                << info.narHash.to_string(Base16, false)
+                << info.references
+                << info.registrationTime
+                << info.narSize
+                << info.ultimate
+                << info.sigs
+                << info.ca;
+            try {
+                copyNAR(source, conn->to);
+            } catch (...) {
+                conn->good = false;
+                throw;
+            }
+            conn->to.flush();
+
+        } else {
+
+            conn->to
+                << cmdImportPaths
+                << 1;
+            try {
+                copyNAR(source, conn->to);
+            } catch (...) {
+                conn->good = false;
+                throw;
+            }
+            conn->to
+                << exportMagic
+                << info.path
+                << info.references
+                << info.deriver
+                << 0
+                << 0;
+            conn->to.flush();
+
+        }
 
         if (readInt(conn->from) != 1)
             throw Error("failed to add path '%s' to remote host '%s', info.path, host");
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index 2577e90aef23..b7001795be4d 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -34,18 +34,14 @@ protected:
         const std::string & data,
         const std::string & mimeType) override;
 
-    void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+    void getFile(const std::string & path, Sink & sink) override
     {
-        sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
-            try {
-                return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path));
-            } catch (SysError & e) {
-                if (e.errNo == ENOENT) return std::shared_ptr<std::string>();
-                throw;
-            }
-        });
+        try {
+            readFile(binaryCacheDir + "/" + path, sink);
+        } catch (SysError & e) {
+            if (e.errNo == ENOENT)
+                throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
+        }
     }
 
     PathSet queryAllValidPaths() override
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index ef8c2811bd86..c91dbf241bcf 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -450,7 +450,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
     ssize_t eaSize = llistxattr(path.c_str(), nullptr, 0);
 
     if (eaSize < 0) {
-        if (errno != ENOTSUP)
+        if (errno != ENOTSUP && errno != ENODATA)
             throw SysError("querying extended attributes of '%s'", path);
     } else if (eaSize > 0) {
         std::vector<char> eaBuf(eaSize);
@@ -629,17 +629,15 @@ uint64_t LocalStore::addValidPath(State & state,
 
 
 void LocalStore::queryPathInfoUncached(const Path & path,
-    std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-    std::function<void(std::exception_ptr exc)> failure)
+    Callback<std::shared_ptr<ValidPathInfo>> callback)
 {
-    sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
-
+    try {
         auto info = std::make_shared<ValidPathInfo>();
         info->path = path;
 
         assertStorePath(path);
 
-        return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
+        callback(retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
             auto state(_state.lock());
 
             /* Get the path info. */
@@ -679,8 +677,9 @@ void LocalStore::queryPathInfoUncached(const Path & path,
                 info->references.insert(useQueryReferences.getStr(0));
 
             return info;
-        });
-    });
+        }));
+
+    } catch (...) { callback.rethrow(); }
 }
 
 
@@ -976,7 +975,8 @@ const PublicKeys & LocalStore::getPublicKeys()
 void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
     RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
 {
-    assert(info.narHash);
+    if (!info.narHash)
+        throw Error("cannot add path '%s' because it lacks a hash", info.path);
 
     if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys()))
         throw Error("cannot add path '%s' because it lacks a valid signature", info.path);
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 1209a06356f7..746bdbeed793 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -127,8 +127,7 @@ public:
     PathSet queryAllValidPaths() override;
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override;
 
     void queryReferrers(const Path & path, PathSet & referrers) override;
 
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index a82aa4e9cfa5..adcce026fa1d 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -33,9 +33,11 @@ void Store::computeFSClosure(const PathSet & startPaths,
             state->pending++;
         }
 
-        queryPathInfo(path,
-            [&, path](ref<ValidPathInfo> info) {
-                // FIXME: calls to isValidPath() should be async
+        queryPathInfo(path, {[&, path](std::future<ref<ValidPathInfo>> fut) {
+            // FIXME: calls to isValidPath() should be async
+
+            try {
+                auto info = fut.get();
 
                 if (flipDirection) {
 
@@ -75,14 +77,13 @@ void Store::computeFSClosure(const PathSet & startPaths,
                     if (!--state->pending) done.notify_one();
                 }
 
-            },
-
-            [&, path](std::exception_ptr exc) {
+            } catch (...) {
                 auto state(state_.lock());
-                if (!state->exc) state->exc = exc;
+                if (!state->exc) state->exc = std::current_exception();
                 assert(state->pending);
                 if (!--state->pending) done.notify_one();
-            });
+            };
+        }});
     };
 
     for (auto & startPath : startPaths)
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 7840167d7772..991512f21795 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -104,8 +104,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
        *.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
        https://github.com/NixOS/nix/issues/1443 for more discussion. */
 
-    if (std::regex_search(path, std::regex("\\.app/Contents/PkgInfo$")) ||
-        std::regex_search(path, std::regex("\\.app/Contents/Resources/.+\\.lproj$")))
+    if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
     {
         debug(format("'%1%' is not allowed to be linked in macOS") % path);
         return;
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index 4a607b584506..4c6af567ae6f 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -157,6 +157,29 @@ void deleteGenerations(const Path & profile, const std::set<unsigned int> & gens
     }
 }
 
+void deleteGenerationsGreaterThan(const Path & profile, int max, bool dryRun)
+{
+    PathLocks lock;
+    lockProfile(lock, profile);
+
+    int curGen;
+    bool fromCurGen = false;
+    Generations gens = findGenerations(profile, curGen);
+    for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
+        if (i->number == curGen) {
+            fromCurGen = true;
+            max--;
+            continue;
+        }
+        if (fromCurGen) {
+            if (max) {
+                max--;
+                continue;
+            }
+            deleteGeneration2(profile, i->number, dryRun);
+        }
+    }
+}
 
 void deleteOldGenerations(const Path & profile, bool dryRun)
 {
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index 1d4e6d3037db..5fa1533de311 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -39,6 +39,8 @@ void deleteGeneration(const Path & profile, unsigned int gen);
 
 void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
 
+void deleteGenerationsGreaterThan(const Path & profile, const int max, bool dryRun);
+
 void deleteOldGenerations(const Path & profile, bool dryRun);
 
 void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 080cef93d214..ea86ef052f53 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -7,6 +7,7 @@
 #include "globals.hh"
 #include "derivations.hh"
 #include "pool.hh"
+#include "finally.hh"
 
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -187,10 +188,11 @@ void RemoteStore::setOptions(Connection & conn)
        << settings.useSubstitutes;
 
     if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
-        auto overrides = settings.getSettings(true);
+        std::map<std::string, Config::SettingInfo> overrides;
+        globalConfig.getSettings(overrides, true);
         conn.to << overrides.size();
         for (auto & i : overrides)
-            conn.to << i.first << i.second;
+            conn.to << i.first << i.second.value;
     }
 
     conn.processStderr();
@@ -293,38 +295,40 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
 
 
 void RemoteStore::queryPathInfoUncached(const Path & path,
-    std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-    std::function<void(std::exception_ptr exc)> failure)
+    Callback<std::shared_ptr<ValidPathInfo>> callback)
 {
-    sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
-        auto conn(connections->get());
-        conn->to << wopQueryPathInfo << path;
-        try {
-            conn->processStderr();
-        } catch (Error & e) {
-            // Ugly backwards compatibility hack.
-            if (e.msg().find("is not valid") != std::string::npos)
-                throw InvalidPath(e.what());
-            throw;
-        }
-        if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
-            bool valid; conn->from >> valid;
-            if (!valid) throw InvalidPath(format("path '%s' is not valid") % path);
-        }
-        auto info = std::make_shared<ValidPathInfo>();
-        info->path = path;
-        info->deriver = readString(conn->from);
-        if (info->deriver != "") assertStorePath(info->deriver);
-        info->narHash = Hash(readString(conn->from), htSHA256);
-        info->references = readStorePaths<PathSet>(*this, conn->from);
-        conn->from >> info->registrationTime >> info->narSize;
-        if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
-            conn->from >> info->ultimate;
-            info->sigs = readStrings<StringSet>(conn->from);
-            conn->from >> info->ca;
+    try {
+        std::shared_ptr<ValidPathInfo> info;
+        {
+            auto conn(connections->get());
+            conn->to << wopQueryPathInfo << path;
+            try {
+                conn->processStderr();
+            } catch (Error & e) {
+                // Ugly backwards compatibility hack.
+                if (e.msg().find("is not valid") != std::string::npos)
+                    throw InvalidPath(e.what());
+                throw;
+            }
+            if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
+                bool valid; conn->from >> valid;
+                if (!valid) throw InvalidPath(format("path '%s' is not valid") % path);
+            }
+            info = std::make_shared<ValidPathInfo>();
+            info->path = path;
+            info->deriver = readString(conn->from);
+            if (info->deriver != "") assertStorePath(info->deriver);
+            info->narHash = Hash(readString(conn->from), htSHA256);
+            info->references = readStorePaths<PathSet>(*this, conn->from);
+            conn->from >> info->registrationTime >> info->narSize;
+            if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
+                conn->from >> info->ultimate;
+                info->sigs = readStrings<StringSet>(conn->from);
+                conn->from >> info->ca;
+            }
         }
-        return info;
-    });
+        callback(std::move(info));
+    } catch (...) { callback.rethrow(); }
 }
 
 
@@ -411,8 +415,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
                  << info.references << info.registrationTime << info.narSize
                  << info.ultimate << info.sigs << info.ca
                  << repair << !checkSigs;
-        copyNAR(source, conn->to);
-        conn->processStderr();
+        bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21;
+        if (!tunnel) copyNAR(source, conn->to);
+        conn->processStderr(0, tunnel ? &source : nullptr);
     }
 }
 
@@ -435,8 +440,10 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
         conn->to.written = 0;
         conn->to.warn = true;
         connections->incCapacity();
-        dumpPath(srcPath, conn->to, filter);
-        connections->decCapacity();
+        {
+            Finally cleanup([&]() { connections->decCapacity(); });
+            dumpPath(srcPath, conn->to, filter);
+        }
         conn->to.warn = false;
         conn->processStderr();
     } catch (SysError & e) {
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 95fa59a2069d..b488e34ce263 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -40,8 +40,7 @@ public:
     PathSet queryAllValidPaths() override;
 
     void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) override;
 
     void queryReferrers(const Path & path, PathSet & referrers) override;
 
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 103f141a1a11..7711388f05a9 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -84,8 +84,8 @@ static void initAWS()
     });
 }
 
-S3Helper::S3Helper(const std::string & profile, const std::string & region)
-    : config(makeConfig(region))
+S3Helper::S3Helper(const std::string & profile, const std::string & region, const std::string & endpoint)
+    : config(makeConfig(region, endpoint))
     , client(make_ref<Aws::S3::S3Client>(
             profile == ""
             ? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
@@ -99,7 +99,7 @@ S3Helper::S3Helper(const std::string & profile, const std::string & region)
 #else
             Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
 #endif
-            false))
+            endpoint.empty()))
 {
 }
 
@@ -116,11 +116,14 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
     }
 };
 
-ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region, const string & endpoint)
 {
     initAWS();
     auto res = make_ref<Aws::Client::ClientConfiguration>();
     res->region = region;
+    if (!endpoint.empty()) {
+        res->endpointOverride = endpoint;
+    }
     res->requestTimeoutMs = 600 * 1000;
     res->retryStrategy = std::make_shared<RetryStrategy>();
     res->caFile = settings.caFile;
@@ -150,10 +153,8 @@ S3Helper::DownloadResult S3Helper::getObject(
         auto result = checkAws(fmt("AWS error fetching '%s'", key),
             client->GetObject(request));
 
-        res.data = decodeContent(
-            result.GetContentEncoding(),
-            make_ref<std::string>(
-                dynamic_cast<std::stringstream &>(result.GetBody()).str()));
+        res.data = decompress(result.GetContentEncoding(),
+            dynamic_cast<std::stringstream &>(result.GetBody()).str());
 
     } catch (S3Error & e) {
         if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
@@ -170,6 +171,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
 {
     const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."};
     const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
+    const Setting<std::string> endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
     const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
     const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
     const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
@@ -186,7 +188,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         const Params & params, const std::string & bucketName)
         : S3BinaryCacheStore(params)
         , bucketName(bucketName)
-        , s3Helper(profile, region)
+        , s3Helper(profile, region, endpoint)
     {
         diskCache = getNarInfoDiskCache();
     }
@@ -273,6 +275,9 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         return true;
     }
 
+    std::shared_ptr<TransferManager> transferManager;
+    std::once_flag transferManagerCreated;
+
     void uploadFile(const std::string & path, const std::string & data,
         const std::string & mimeType,
         const std::string & contentEncoding)
@@ -284,61 +289,49 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
             executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
 
-        TransferManagerConfiguration transferConfig(executor.get());
-
-        transferConfig.s3Client = s3Helper.client;
-        transferConfig.bufferSize = bufferSize;
-
-        if (contentEncoding != "")
-            transferConfig.createMultipartUploadTemplate.SetContentEncoding(
-                contentEncoding);
-
-        transferConfig.uploadProgressCallback =
-            [&](const TransferManager *transferManager,
-                const std::shared_ptr<const TransferHandle>
-                    &transferHandle) {
-              //FIXME: find a way to properly abort the multipart upload.
-              checkInterrupt();
-              printTalkative("upload progress ('%s'): '%d' of '%d' bytes",
-                             path,
-                             transferHandle->GetBytesTransferred(),
-                             transferHandle->GetBytesTotalSize());
-            };
+        std::call_once(transferManagerCreated, [&]() {
 
-        transferConfig.transferStatusUpdatedCallback =
-            [&](const TransferManager *,
-                const std::shared_ptr<const TransferHandle>
-                    &transferHandle) {
-              switch (transferHandle->GetStatus()) {
-                  case TransferStatus::COMPLETED:
-                      printTalkative("upload of '%s' completed", path);
-                      stats.put++;
-                      stats.putBytes += data.size();
-                      break;
-                  case TransferStatus::IN_PROGRESS:
-                      break;
-                  case TransferStatus::FAILED:
-                      throw Error("AWS error: failed to upload 's3://%s/%s'",
-                                  bucketName, path);
-                      break;
-                  default:
-                      throw Error("AWS error: transfer status of 's3://%s/%s' "
-                                  "in unexpected state",
-                                  bucketName, path);
-              };
-            };
+            TransferManagerConfiguration transferConfig(executor.get());
+
+            transferConfig.s3Client = s3Helper.client;
+            transferConfig.bufferSize = bufferSize;
 
-        std::shared_ptr<TransferManager> transferManager =
-            TransferManager::Create(transferConfig);
+            transferConfig.uploadProgressCallback =
+                [&](const TransferManager *transferManager,
+                    const std::shared_ptr<const TransferHandle>
+                    &transferHandle)
+                {
+                    //FIXME: find a way to properly abort the multipart upload.
+                    //checkInterrupt();
+                    debug("upload progress ('%s'): '%d' of '%d' bytes",
+                        path,
+                        transferHandle->GetBytesTransferred(),
+                        transferHandle->GetBytesTotalSize());
+                };
+
+            transferManager = TransferManager::Create(transferConfig);
+        });
 
         auto now1 = std::chrono::steady_clock::now();
 
         std::shared_ptr<TransferHandle> transferHandle =
-            transferManager->UploadFile(stream, bucketName, path, mimeType,
-                                        Aws::Map<Aws::String, Aws::String>());
+            transferManager->UploadFile(
+                stream, bucketName, path, mimeType,
+                Aws::Map<Aws::String, Aws::String>(),
+                nullptr, contentEncoding);
 
         transferHandle->WaitUntilFinished();
 
+        if (transferHandle->GetStatus() == TransferStatus::FAILED)
+            throw Error("AWS error: failed to upload 's3://%s/%s': %s",
+                bucketName, path, transferHandle->GetLastError().GetMessage());
+
+        if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
+            throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
+                bucketName, path);
+
+        printTalkative("upload of '%s' completed", path);
+
         auto now2 = std::chrono::steady_clock::now();
 
         auto duration =
@@ -349,6 +342,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
                   bucketName % path % data.size() % duration);
 
         stats.putTimeMs += duration;
+        stats.putBytes += data.size();
+        stats.put++;
     }
 
     void upsertFile(const std::string & path, const std::string & data,
@@ -364,24 +359,23 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
             uploadFile(path, data, mimeType, "");
     }
 
-    void getFile(const std::string & path,
-        std::function<void(std::shared_ptr<std::string>)> success,
-        std::function<void(std::exception_ptr exc)> failure) override
+    void getFile(const std::string & path, Sink & sink) override
     {
-        sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
-            stats.get++;
+        stats.get++;
 
-            auto res = s3Helper.getObject(bucketName, path);
+        // FIXME: stream output to sink.
+        auto res = s3Helper.getObject(bucketName, path);
 
-            stats.getBytes += res.data ? res.data->size() : 0;
-            stats.getTimeMs += res.durationMs;
+        stats.getBytes += res.data ? res.data->size() : 0;
+        stats.getTimeMs += res.durationMs;
 
-            if (res.data)
-                printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms",
-                    bucketName, path, res.data->size(), res.durationMs);
+        if (res.data) {
+            printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms",
+                bucketName, path, res.data->size(), res.durationMs);
 
-            return res.data;
-        });
+            sink((unsigned char *) res.data->data(), res.data->size());
+        } else
+            throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
     }
 
     PathSet queryAllValidPaths() override
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
index 4f996400343c..95d612b66335 100644
--- a/src/libstore/s3.hh
+++ b/src/libstore/s3.hh
@@ -14,9 +14,9 @@ struct S3Helper
     ref<Aws::Client::ClientConfiguration> config;
     ref<Aws::S3::S3Client> client;
 
-    S3Helper(const std::string & profile, const std::string & region);
+    S3Helper(const std::string & profile, const std::string & region, const std::string & endpoint);
 
-    ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
+    ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & endpoint);
 
     struct DownloadResult
     {
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index f67d1e2580a5..9fae6d5349f1 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
 #define SERVE_MAGIC_1 0x390c9deb
 #define SERVE_MAGIC_2 0x5452eecb
 
-#define SERVE_PROTOCOL_VERSION 0x204
+#define SERVE_PROTOCOL_VERSION 0x205
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
@@ -18,6 +18,7 @@ typedef enum {
     cmdBuildPaths = 6,
     cmdQueryClosure = 7,
     cmdBuildDerivation = 8,
+    cmdAddToStoreNar = 9,
 } ServeCommand;
 
 }
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 42d40e71d8be..a061d64f36d8 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -10,6 +10,7 @@ namespace nix {
 [[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs)
 {
     int err = sqlite3_errcode(db);
+    int exterr = sqlite3_extended_errcode(db);
 
     auto path = sqlite3_db_filename(db, nullptr);
     if (!path) path = "(in-memory)";
@@ -21,7 +22,7 @@ namespace nix {
             : fmt("SQLite database '%s' is busy", path));
     }
     else
-        throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(err), path);
+        throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path);
 }
 
 SQLite::SQLite(const Path & path)
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
index 033c580936ad..5e0e44935cca 100644
--- a/src/libstore/ssh.cc
+++ b/src/libstore/ssh.cc
@@ -4,8 +4,9 @@ namespace nix {
 
 SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD)
     : host(host)
+    , fakeSSH(host == "localhost")
     , keyFile(keyFile)
-    , useMaster(useMaster)
+    , useMaster(useMaster && !fakeSSH)
     , compress(compress)
     , logFD(logFD)
 {
@@ -45,12 +46,19 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
         if (logFD != -1 && dup2(logFD, STDERR_FILENO) == -1)
             throw SysError("duping over stderr");
 
-        Strings args = { "ssh", host.c_str(), "-x", "-a" };
-        addCommonSSHOpts(args);
-        if (socketPath != "")
-            args.insert(args.end(), {"-S", socketPath});
-        if (verbosity >= lvlChatty)
-            args.push_back("-v");
+        Strings args;
+
+        if (fakeSSH) {
+            args = { "bash", "-c" };
+        } else {
+            args = { "ssh", host.c_str(), "-x", "-a" };
+            addCommonSSHOpts(args);
+            if (socketPath != "")
+                args.insert(args.end(), {"-S", socketPath});
+            if (verbosity >= lvlChatty)
+                args.push_back("-v");
+        }
+
         args.push_back(command);
         execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
 
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
index 1268e6d00054..4f0f0bd29f9f 100644
--- a/src/libstore/ssh.hh
+++ b/src/libstore/ssh.hh
@@ -10,6 +10,7 @@ class SSHMaster
 private:
 
     const std::string host;
+    bool fakeSSH;
     const std::string keyFile;
     const bool useMaster;
     const bool compress;
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 1a0d12ca78c2..1f42097fccfb 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -253,6 +253,8 @@ std::string Store::getUri()
 
 bool Store::isValidPath(const Path & storePath)
 {
+    assertStorePath(storePath);
+
     auto hashPart = storePathToHash(storePath);
 
     {
@@ -303,20 +305,20 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
     std::promise<ref<ValidPathInfo>> promise;
 
     queryPathInfo(storePath,
-        [&](ref<ValidPathInfo> info) {
-            promise.set_value(info);
-        },
-        [&](std::exception_ptr exc) {
-            promise.set_exception(exc);
-        });
+        {[&](std::future<ref<ValidPathInfo>> result) {
+            try {
+                promise.set_value(result.get());
+            } catch (...) {
+                promise.set_exception(std::current_exception());
+            }
+        }});
 
     return promise.get_future().get();
 }
 
 
 void Store::queryPathInfo(const Path & storePath,
-    std::function<void(ref<ValidPathInfo>)> success,
-    std::function<void(std::exception_ptr exc)> failure)
+    Callback<ref<ValidPathInfo>> callback)
 {
     auto hashPart = storePathToHash(storePath);
 
@@ -328,7 +330,7 @@ void Store::queryPathInfo(const Path & storePath,
                 stats.narInfoReadAverted++;
                 if (!*res)
                     throw InvalidPath(format("path '%s' is not valid") % storePath);
-                return success(ref<ValidPathInfo>(*res));
+                return callback(ref<ValidPathInfo>(*res));
             }
         }
 
@@ -344,35 +346,36 @@ void Store::queryPathInfo(const Path & storePath,
                         (res.second->path != storePath && storePathToName(storePath) != ""))
                         throw InvalidPath(format("path '%s' is not valid") % storePath);
                 }
-                return success(ref<ValidPathInfo>(res.second));
+                return callback(ref<ValidPathInfo>(res.second));
             }
         }
 
-    } catch (std::exception & e) {
-        return callFailure(failure);
-    }
+    } catch (...) { return callback.rethrow(); }
 
     queryPathInfoUncached(storePath,
-        [this, storePath, hashPart, success, failure](std::shared_ptr<ValidPathInfo> info) {
+        {[this, storePath, hashPart, callback](std::future<std::shared_ptr<ValidPathInfo>> fut) {
 
-            if (diskCache)
-                diskCache->upsertNarInfo(getUri(), hashPart, info);
+            try {
+                auto info = fut.get();
 
-            {
-                auto state_(state.lock());
-                state_->pathInfoCache.upsert(hashPart, info);
-            }
+                if (diskCache)
+                    diskCache->upsertNarInfo(getUri(), hashPart, info);
 
-            if (!info
-                || (info->path != storePath && storePathToName(storePath) != ""))
-            {
-                stats.narInfoMissing++;
-                return failure(std::make_exception_ptr(InvalidPath(format("path '%s' is not valid") % storePath)));
-            }
+                {
+                    auto state_(state.lock());
+                    state_->pathInfoCache.upsert(hashPart, info);
+                }
 
-            callSuccess(success, failure, ref<ValidPathInfo>(info));
+                if (!info
+                    || (info->path != storePath && storePathToName(storePath) != ""))
+                {
+                    stats.narInfoMissing++;
+                    throw InvalidPath("path '%s' is not valid", storePath);
+                }
 
-        }, failure);
+                callback(ref<ValidPathInfo>(info));
+            } catch (...) { callback.rethrow(); }
+        }});
 }
 
 
@@ -392,26 +395,19 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti
 
     auto doQuery = [&](const Path & path ) {
         checkInterrupt();
-        queryPathInfo(path,
-            [path, &state_, &wakeup](ref<ValidPathInfo> info) {
-                auto state(state_.lock());
+        queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<ValidPathInfo>> fut) {
+            auto state(state_.lock());
+            try {
+                auto info = fut.get();
                 state->valid.insert(path);
-                assert(state->left);
-                if (!--state->left)
-                    wakeup.notify_one();
-            },
-            [path, &state_, &wakeup](std::exception_ptr exc) {
-                auto state(state_.lock());
-                try {
-                    std::rethrow_exception(exc);
-                } catch (InvalidPath &) {
-                } catch (...) {
-                    state->exc = exc;
-                }
-                assert(state->left);
-                if (!--state->left)
-                    wakeup.notify_one();
-            });
+            } catch (InvalidPath &) {
+            } catch (...) {
+                state->exc = std::current_exception();
+            }
+            assert(state->left);
+            if (!--state->left)
+                wakeup.notify_one();
+        }});
     };
 
     for (auto & path : paths)
@@ -613,6 +609,8 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
             act.progress(total, info->narSize);
         });
         srcStore->narFromPath({storePath}, wrapperSink);
+    }, [&]() {
+	throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
     });
 
     dstStore->addToStore(*info, *source, repair, checkSigs);
@@ -633,11 +631,12 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, const PathSet & storePa
     Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
 
     std::atomic<size_t> nrDone{0};
+    std::atomic<size_t> nrFailed{0};
     std::atomic<uint64_t> bytesExpected{0};
     std::atomic<uint64_t> nrRunning{0};
 
     auto showProgress = [&]() {
-        act.progress(nrDone, missing.size(), nrRunning);
+        act.progress(nrDone, missing.size(), nrRunning, nrFailed);
     };
 
     ThreadPool pool;
@@ -666,7 +665,16 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, const PathSet & storePa
             if (!dstStore->isValidPath(storePath)) {
                 MaintainCount<decltype(nrRunning)> mc(nrRunning);
                 showProgress();
-                copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
+                try {
+                    copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
+                } catch (Error &e) {
+                    nrFailed++;
+                    if (!settings.keepGoing)
+                        throw e;
+                    logger->log(lvlError, format("could not copy %s: %s") % storePath % e.what());
+                    showProgress();
+                    return;
+                }
             }
 
             nrDone++;
@@ -838,8 +846,24 @@ ref<Store> openStore(const std::string & uri_,
     if (q != std::string::npos) {
         for (auto s : tokenizeString<Strings>(uri.substr(q + 1), "&")) {
             auto e = s.find('=');
-            if (e != std::string::npos)
-                params[s.substr(0, e)] = s.substr(e + 1);
+            if (e != std::string::npos) {
+                auto value = s.substr(e + 1);
+                std::string decoded;
+                for (size_t i = 0; i < value.size(); ) {
+                    if (value[i] == '%') {
+                        if (i + 2 >= value.size())
+                            throw Error("invalid URI parameter '%s'", value);
+                        try {
+                            decoded += std::stoul(std::string(value, i + 1, 2), 0, 16);
+                            i += 3;
+                        } catch (...) {
+                            throw Error("invalid URI parameter '%s'", value);
+                        }
+                    } else
+                        decoded += value[i++];
+                }
+                params[s.substr(0, e)] = decoded;
+            }
         }
         uri = uri_.substr(0, q);
     }
@@ -847,7 +871,7 @@ ref<Store> openStore(const std::string & uri_,
     for (auto fun : *RegisterStoreImplementation::implementations) {
         auto store = fun(uri, params);
         if (store) {
-            store->handleUnknownSettings();
+            store->warnUnknownSettings();
             return ref<Store>(store);
         }
     }
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index ea259f07e8ab..7c5b495a4482 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -22,6 +22,7 @@ MakeError(SubstError, Error)
 MakeError(BuildError, Error) /* denotes a permanent build failure */
 MakeError(InvalidPath, Error)
 MakeError(Unsupported, Error)
+MakeError(SubstituteGone, Error)
 
 
 struct BasicDerivation;
@@ -355,14 +356,12 @@ public:
 
     /* Asynchronous version of queryPathInfo(). */
     void queryPathInfo(const Path & path,
-        std::function<void(ref<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure);
+        Callback<ref<ValidPathInfo>> callback);
 
 protected:
 
     virtual void queryPathInfoUncached(const Path & path,
-        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
-        std::function<void(std::exception_ptr exc)> failure) = 0;
+        Callback<std::shared_ptr<ValidPathInfo>> callback) = 0;
 
 public:
 
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 996e1d25355f..5ebdfaf134d6 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -6,7 +6,7 @@ namespace nix {
 #define WORKER_MAGIC_1 0x6e697863
 #define WORKER_MAGIC_2 0x6478696f
 
-#define PROTOCOL_VERSION 0x114
+#define PROTOCOL_VERSION 0x115
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 154e2d20430c..1be8934a2eba 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -13,17 +13,25 @@
 
 #include "archive.hh"
 #include "util.hh"
-
+#include "config.hh"
 
 namespace nix {
 
+struct ArchiveSettings : Config
+{
+    Setting<bool> useCaseHack{this,
+        #if __APPLE__
+            true,
+        #else
+            false,
+        #endif
+        "use-case-hack",
+        "Whether to enable a Darwin-specific hack for dealing with file name collisions."};
+};
 
-bool useCaseHack =
-#if __APPLE__
-    true;
-#else
-    false;
-#endif
+static ArchiveSettings archiveSettings;
+
+static GlobalConfig::Register r1(&archiveSettings);
 
 const std::string narVersionMagic1 = "nix-archive-1";
 
@@ -78,7 +86,7 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter)
            the case hack applied by restorePath(). */
         std::map<string, string> unhacked;
         for (auto & i : readDirectory(path))
-            if (useCaseHack) {
+            if (archiveSettings.useCaseHack) {
                 string name(i.name);
                 size_t pos = i.name.find(caseHackSuffix);
                 if (pos != string::npos) {
@@ -243,7 +251,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path)
                     if (name <= prevName)
                         throw Error("NAR directory is not sorted");
                     prevName = name;
-                    if (useCaseHack) {
+                    if (archiveSettings.useCaseHack) {
                         auto i = names.find(name);
                         if (i != names.end()) {
                             debug(format("case collision between '%1%' and '%2%'") % i->first % name);
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index 7a0e688e4201..25be426c1a4d 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -78,10 +78,6 @@ void restorePath(const Path & path, Source & source);
 void copyNAR(Source & source, Sink & sink);
 
 
-// FIXME: global variables are bad m'kay.
-extern bool useCaseHack;
-
-
 extern const std::string narVersionMagic1;
 
 
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index e1782f8c4bd9..204c63cd26fc 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -8,246 +8,265 @@
 #include <cstdio>
 #include <cstring>
 
-#if HAVE_BROTLI
 #include <brotli/decode.h>
 #include <brotli/encode.h>
-#endif // HAVE_BROTLI
 
 #include <iostream>
 
 namespace nix {
 
-static const size_t bufSize = 32 * 1024;
-
-static void decompressNone(Source & source, Sink & sink)
+// Don't feed brotli too much at once.
+struct ChunkedCompressionSink : CompressionSink
 {
-    std::vector<unsigned char> buf(bufSize);
-    while (true) {
-        size_t n;
-        try {
-            n = source.read(buf.data(), buf.size());
-        } catch (EndOfFile &) {
-            break;
+    uint8_t outbuf[32 * 1024];
+
+    void write(const unsigned char * data, size_t len) override
+    {
+        const size_t CHUNK_SIZE = sizeof(outbuf) << 2;
+        while (len) {
+            size_t n = std::min(CHUNK_SIZE, len);
+            writeInternal(data, n);
+            data += n;
+            len -= n;
         }
-        sink(buf.data(), n);
     }
-}
 
-static void decompressXZ(Source & source, Sink & sink)
+    virtual void writeInternal(const unsigned char * data, size_t len) = 0;
+};
+
+struct NoneSink : CompressionSink
 {
-    lzma_stream strm(LZMA_STREAM_INIT);
-
-    lzma_ret ret = lzma_stream_decoder(
-        &strm, UINT64_MAX, LZMA_CONCATENATED);
-    if (ret != LZMA_OK)
-        throw CompressionError("unable to initialise lzma decoder");
-
-    Finally free([&]() { lzma_end(&strm); });
-
-    lzma_action action = LZMA_RUN;
-    std::vector<uint8_t> inbuf(bufSize), outbuf(bufSize);
-    strm.next_in = nullptr;
-    strm.avail_in = 0;
-    strm.next_out = outbuf.data();
-    strm.avail_out = outbuf.size();
-    bool eof = false;
-
-    while (true) {
-        checkInterrupt();
-
-        if (strm.avail_in == 0 && !eof) {
-            strm.next_in = inbuf.data();
-            try {
-                strm.avail_in = source.read((unsigned char *) strm.next_in, inbuf.size());
-            } catch (EndOfFile &) {
-                eof = true;
-            }
-        }
+    Sink & nextSink;
+    NoneSink(Sink & nextSink) : nextSink(nextSink) { }
+    void finish() override { flush(); }
+    void write(const unsigned char * data, size_t len) override { nextSink(data, len); }
+};
 
-        if (strm.avail_in == 0)
-            action = LZMA_FINISH;
+struct XzDecompressionSink : CompressionSink
+{
+    Sink & nextSink;
+    uint8_t outbuf[BUFSIZ];
+    lzma_stream strm = LZMA_STREAM_INIT;
+    bool finished = false;
 
-        lzma_ret ret = lzma_code(&strm, action);
+    XzDecompressionSink(Sink & nextSink) : nextSink(nextSink)
+    {
+        lzma_ret ret = lzma_stream_decoder(
+            &strm, UINT64_MAX, LZMA_CONCATENATED);
+        if (ret != LZMA_OK)
+            throw CompressionError("unable to initialise lzma decoder");
 
-        if (strm.avail_out < outbuf.size()) {
-            sink((unsigned char *) outbuf.data(), outbuf.size() - strm.avail_out);
-            strm.next_out = outbuf.data();
-            strm.avail_out = outbuf.size();
-        }
+        strm.next_out = outbuf;
+        strm.avail_out = sizeof(outbuf);
+    }
 
-        if (ret == LZMA_STREAM_END) return;
+    ~XzDecompressionSink()
+    {
+        lzma_end(&strm);
+    }
 
-        if (ret != LZMA_OK)
-            throw CompressionError("error %d while decompressing xz file", ret);
+    void finish() override
+    {
+        CompressionSink::flush();
+        write(nullptr, 0);
     }
-}
 
-static void decompressBzip2(Source & source, Sink & sink)
-{
-    bz_stream strm;
-    memset(&strm, 0, sizeof(strm));
-
-    int ret = BZ2_bzDecompressInit(&strm, 0, 0);
-    if (ret != BZ_OK)
-        throw CompressionError("unable to initialise bzip2 decoder");
-
-    Finally free([&]() { BZ2_bzDecompressEnd(&strm); });
-
-    std::vector<char> inbuf(bufSize), outbuf(bufSize);
-    strm.next_in = nullptr;
-    strm.avail_in = 0;
-    strm.next_out = outbuf.data();
-    strm.avail_out = outbuf.size();
-    bool eof = false;
-
-    while (true) {
-        checkInterrupt();
-
-        if (strm.avail_in == 0 && !eof) {
-            strm.next_in = inbuf.data();
-            try {
-                strm.avail_in = source.read((unsigned char *) strm.next_in, inbuf.size());
-            } catch (EndOfFile &) {
-                eof = true;
-            }
-        }
+    void write(const unsigned char * data, size_t len) override
+    {
+        strm.next_in = data;
+        strm.avail_in = len;
+
+        while (!finished && (!data || strm.avail_in)) {
+            checkInterrupt();
 
-        int ret = BZ2_bzDecompress(&strm);
+            lzma_ret ret = lzma_code(&strm, data ? LZMA_RUN : LZMA_FINISH);
+            if (ret != LZMA_OK && ret != LZMA_STREAM_END)
+                throw CompressionError("error %d while decompressing xz file", ret);
 
-        if (strm.avail_in == 0 && strm.avail_out == outbuf.size() && eof)
-            throw CompressionError("bzip2 data ends prematurely");
+            finished = ret == LZMA_STREAM_END;
 
-        if (strm.avail_out < outbuf.size()) {
-            sink((unsigned char *) outbuf.data(), outbuf.size() - strm.avail_out);
-            strm.next_out = outbuf.data();
-            strm.avail_out = outbuf.size();
+            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
+                nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
+                strm.next_out = outbuf;
+                strm.avail_out = sizeof(outbuf);
+            }
         }
+    }
+};
 
-        if (ret == BZ_STREAM_END) return;
+struct BzipDecompressionSink : ChunkedCompressionSink
+{
+    Sink & nextSink;
+    bz_stream strm;
+    bool finished = false;
 
+    BzipDecompressionSink(Sink & nextSink) : nextSink(nextSink)
+    {
+        memset(&strm, 0, sizeof(strm));
+        int ret = BZ2_bzDecompressInit(&strm, 0, 0);
         if (ret != BZ_OK)
-            throw CompressionError("error while decompressing bzip2 file");
+            throw CompressionError("unable to initialise bzip2 decoder");
+
+        strm.next_out = (char *) outbuf;
+        strm.avail_out = sizeof(outbuf);
     }
-}
 
-static void decompressBrotli(Source & source, Sink & sink)
-{
-#if !HAVE_BROTLI
-    RunOptions options(BROTLI, {"-d"});
-    options.standardIn = &source;
-    options.standardOut = &sink;
-    runProgram2(options);
-#else
-    auto *s = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr);
-    if (!s)
-        throw CompressionError("unable to initialize brotli decoder");
-
-    Finally free([s]() { BrotliDecoderDestroyInstance(s); });
-
-    std::vector<uint8_t> inbuf(bufSize), outbuf(bufSize);
-    const uint8_t * next_in = nullptr;
-    size_t avail_in = 0;
-    bool eof = false;
-
-    while (true) {
-        checkInterrupt();
-
-        if (avail_in == 0 && !eof) {
-            next_in = inbuf.data();
-            try {
-                avail_in = source.read((unsigned char *) next_in, inbuf.size());
-            } catch (EndOfFile &) {
-                eof = true;
+    ~BzipDecompressionSink()
+    {
+        BZ2_bzDecompressEnd(&strm);
+    }
+
+    void finish() override
+    {
+        flush();
+        write(nullptr, 0);
+    }
+
+    void writeInternal(const unsigned char * data, size_t len) override
+    {
+        assert(len <= std::numeric_limits<decltype(strm.avail_in)>::max());
+
+        strm.next_in = (char *) data;
+        strm.avail_in = len;
+
+        while (strm.avail_in) {
+            checkInterrupt();
+
+            int ret = BZ2_bzDecompress(&strm);
+            if (ret != BZ_OK && ret != BZ_STREAM_END)
+                throw CompressionError("error while decompressing bzip2 file");
+
+            finished = ret == BZ_STREAM_END;
+
+            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
+                nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
+                strm.next_out = (char *) outbuf;
+                strm.avail_out = sizeof(outbuf);
             }
         }
+    }
+};
 
-        uint8_t * next_out = outbuf.data();
-        size_t avail_out = outbuf.size();
-
-        auto ret = BrotliDecoderDecompressStream(s,
-                &avail_in, &next_in,
-                &avail_out, &next_out,
-                nullptr);
-
-        switch (ret) {
-        case BROTLI_DECODER_RESULT_ERROR:
-            throw CompressionError("error while decompressing brotli file");
-        case BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT:
-            if (eof)
-                throw CompressionError("incomplete or corrupt brotli file");
-            break;
-        case BROTLI_DECODER_RESULT_SUCCESS:
-            if (avail_in != 0)
-                throw CompressionError("unexpected input after brotli decompression");
-            break;
-        case BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT:
-            // I'm not sure if this can happen, but abort if this happens with empty buffer
-            if (avail_out == outbuf.size())
-                throw CompressionError("brotli decompression requires larger buffer");
-            break;
-        }
+struct BrotliDecompressionSink : ChunkedCompressionSink
+{
+    Sink & nextSink;
+    BrotliDecoderState * state;
+    bool finished = false;
 
-        // Always ensure we have full buffer for next invocation
-        if (avail_out < outbuf.size())
-            sink((unsigned char *) outbuf.data(), outbuf.size() - avail_out);
+    BrotliDecompressionSink(Sink & nextSink) : nextSink(nextSink)
+    {
+        state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr);
+        if (!state)
+            throw CompressionError("unable to initialize brotli decoder");
+    }
 
-        if (ret == BROTLI_DECODER_RESULT_SUCCESS) return;
+    ~BrotliDecompressionSink()
+    {
+        BrotliDecoderDestroyInstance(state);
     }
-#endif // HAVE_BROTLI
-}
+
+    void finish() override
+    {
+        flush();
+        writeInternal(nullptr, 0);
+    }
+
+    void writeInternal(const unsigned char * data, size_t len) override
+    {
+        const uint8_t * next_in = data;
+        size_t avail_in = len;
+        uint8_t * next_out = outbuf;
+        size_t avail_out = sizeof(outbuf);
+
+        while (!finished && (!data || avail_in)) {
+            checkInterrupt();
+
+            if (!BrotliDecoderDecompressStream(state,
+                    &avail_in, &next_in,
+                    &avail_out, &next_out,
+                    nullptr))
+                throw CompressionError("error while decompressing brotli file");
+
+            if (avail_out < sizeof(outbuf) || avail_in == 0) {
+                nextSink(outbuf, sizeof(outbuf) - avail_out);
+                next_out = outbuf;
+                avail_out = sizeof(outbuf);
+            }
+
+            finished = BrotliDecoderIsFinished(state);
+        }
+    }
+};
 
 ref<std::string> decompress(const std::string & method, const std::string & in)
 {
-    StringSource source(in);
-    StringSink sink;
-    decompress(method, source, sink);
-    return sink.s;
+    StringSink ssink;
+    auto sink = makeDecompressionSink(method, ssink);
+    (*sink)(in);
+    sink->finish();
+    return ssink.s;
 }
 
-void decompress(const std::string & method, Source & source, Sink & sink)
+ref<CompressionSink> makeDecompressionSink(const std::string & method, Sink & nextSink)
 {
-    if (method == "none")
-        return decompressNone(source, sink);
+    if (method == "none" || method == "")
+        return make_ref<NoneSink>(nextSink);
     else if (method == "xz")
-        return decompressXZ(source, sink);
+        return make_ref<XzDecompressionSink>(nextSink);
     else if (method == "bzip2")
-        return decompressBzip2(source, sink);
+        return make_ref<BzipDecompressionSink>(nextSink);
     else if (method == "br")
-        return decompressBrotli(source, sink);
+        return make_ref<BrotliDecompressionSink>(nextSink);
     else
         throw UnknownCompressionMethod("unknown compression method '%s'", method);
 }
 
-struct NoneSink : CompressionSink
-{
-    Sink & nextSink;
-    NoneSink(Sink & nextSink) : nextSink(nextSink) { }
-    void finish() override { flush(); }
-    void write(const unsigned char * data, size_t len) override { nextSink(data, len); }
-};
-
-struct XzSink : CompressionSink
+struct XzCompressionSink : CompressionSink
 {
     Sink & nextSink;
     uint8_t outbuf[BUFSIZ];
     lzma_stream strm = LZMA_STREAM_INIT;
     bool finished = false;
 
-    template <typename F>
-    XzSink(Sink & nextSink, F&& initEncoder) : nextSink(nextSink) {
-        lzma_ret ret = initEncoder();
+    XzCompressionSink(Sink & nextSink, bool parallel) : nextSink(nextSink)
+    {
+        lzma_ret ret;
+        bool done = false;
+
+        if (parallel) {
+#ifdef HAVE_LZMA_MT
+            lzma_mt mt_options = {};
+            mt_options.flags = 0;
+            mt_options.timeout = 300; // Using the same setting as the xz cmd line
+            mt_options.preset = LZMA_PRESET_DEFAULT;
+            mt_options.filters = NULL;
+            mt_options.check = LZMA_CHECK_CRC64;
+            mt_options.threads = lzma_cputhreads();
+            mt_options.block_size = 0;
+            if (mt_options.threads == 0)
+                mt_options.threads = 1;
+            // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the
+            // number of threads.
+            ret = lzma_stream_encoder_mt(&strm, &mt_options);
+            done = true;
+#else
+            printMsg(lvlError, "warning: parallel compression requested but not supported for metho  d '%1%', falling back to single-threaded compression", method);
+#endif
+        }
+
+        if (!done)
+            ret = lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64);
+
         if (ret != LZMA_OK)
             throw CompressionError("unable to initialise lzma encoder");
+
         // FIXME: apply the x86 BCJ filter?
 
         strm.next_out = outbuf;
         strm.avail_out = sizeof(outbuf);
     }
-    XzSink(Sink & nextSink) : XzSink(nextSink, [this]() {
-        return lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64);
-    }) {}
 
-    ~XzSink()
+    ~XzCompressionSink()
     {
         lzma_end(&strm);
     }
@@ -255,43 +274,25 @@ struct XzSink : CompressionSink
     void finish() override
     {
         CompressionSink::flush();
-
-        assert(!finished);
-        finished = true;
-
-        while (true) {
-            checkInterrupt();
-
-            lzma_ret ret = lzma_code(&strm, LZMA_FINISH);
-            if (ret != LZMA_OK && ret != LZMA_STREAM_END)
-                throw CompressionError("error while flushing xz file");
-
-            if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
-                nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
-                strm.next_out = outbuf;
-                strm.avail_out = sizeof(outbuf);
-            }
-
-            if (ret == LZMA_STREAM_END) break;
-        }
+        write(nullptr, 0);
     }
 
     void write(const unsigned char * data, size_t len) override
     {
-        assert(!finished);
-
         strm.next_in = data;
         strm.avail_in = len;
 
-        while (strm.avail_in) {
+        while (!finished && (!data || strm.avail_in)) {
             checkInterrupt();
 
-            lzma_ret ret = lzma_code(&strm, LZMA_RUN);
-            if (ret != LZMA_OK)
-                throw CompressionError("error while compressing xz file");
+            lzma_ret ret = lzma_code(&strm, data ? LZMA_RUN : LZMA_FINISH);
+            if (ret != LZMA_OK && ret != LZMA_STREAM_END)
+                throw CompressionError("error %d while compressing xz file", ret);
+
+            finished = ret == LZMA_STREAM_END;
 
-            if (strm.avail_out == 0) {
-                nextSink(outbuf, sizeof(outbuf));
+            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
+                nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
                 strm.next_out = outbuf;
                 strm.avail_out = sizeof(outbuf);
             }
@@ -299,46 +300,24 @@ struct XzSink : CompressionSink
     }
 };
 
-#ifdef HAVE_LZMA_MT
-struct ParallelXzSink : public XzSink
-{
-  ParallelXzSink(Sink &nextSink) : XzSink(nextSink, [this]() {
-        lzma_mt mt_options = {};
-        mt_options.flags = 0;
-        mt_options.timeout = 300; // Using the same setting as the xz cmd line
-        mt_options.preset = LZMA_PRESET_DEFAULT;
-        mt_options.filters = NULL;
-        mt_options.check = LZMA_CHECK_CRC64;
-        mt_options.threads = lzma_cputhreads();
-        mt_options.block_size = 0;
-        if (mt_options.threads == 0)
-            mt_options.threads = 1;
-        // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the
-        // number of threads.
-        return lzma_stream_encoder_mt(&strm, &mt_options);
-  }) {}
-};
-#endif
-
-struct BzipSink : CompressionSink
+struct BzipCompressionSink : ChunkedCompressionSink
 {
     Sink & nextSink;
-    char outbuf[BUFSIZ];
     bz_stream strm;
     bool finished = false;
 
-    BzipSink(Sink & nextSink) : nextSink(nextSink)
+    BzipCompressionSink(Sink & nextSink) : nextSink(nextSink)
     {
         memset(&strm, 0, sizeof(strm));
         int ret = BZ2_bzCompressInit(&strm, 9, 0, 30);
         if (ret != BZ_OK)
             throw CompressionError("unable to initialise bzip2 encoder");
 
-        strm.next_out = outbuf;
+        strm.next_out = (char *) outbuf;
         strm.avail_out = sizeof(outbuf);
     }
 
-    ~BzipSink()
+    ~BzipCompressionSink()
     {
         BZ2_bzCompressEnd(&strm);
     }
@@ -346,114 +325,49 @@ struct BzipSink : CompressionSink
     void finish() override
     {
         flush();
-
-        assert(!finished);
-        finished = true;
-
-        while (true) {
-            checkInterrupt();
-
-            int ret = BZ2_bzCompress(&strm, BZ_FINISH);
-            if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
-                throw CompressionError("error while flushing bzip2 file");
-
-            if (strm.avail_out == 0 || ret == BZ_STREAM_END) {
-                nextSink((unsigned char *) outbuf, sizeof(outbuf) - strm.avail_out);
-                strm.next_out = outbuf;
-                strm.avail_out = sizeof(outbuf);
-            }
-
-            if (ret == BZ_STREAM_END) break;
-        }
-    }
-
-    void write(const unsigned char * data, size_t len) override
-    {
-        /* Bzip2's 'avail_in' parameter is an unsigned int, so we need
-           to split the input into chunks of at most 4 GiB. */
-        while (len) {
-            auto n = std::min((size_t) std::numeric_limits<decltype(strm.avail_in)>::max(), len);
-            writeInternal(data, n);
-            data += n;
-            len -= n;
-        }
+        writeInternal(nullptr, 0);
     }
 
-    void writeInternal(const unsigned char * data, size_t len)
+    void writeInternal(const unsigned char * data, size_t len) override
     {
-        assert(!finished);
         assert(len <= std::numeric_limits<decltype(strm.avail_in)>::max());
 
         strm.next_in = (char *) data;
         strm.avail_in = len;
 
-        while (strm.avail_in) {
+        while (!finished && (!data || strm.avail_in)) {
             checkInterrupt();
 
-            int ret = BZ2_bzCompress(&strm, BZ_RUN);
-            if (ret != BZ_OK)
-                CompressionError("error while compressing bzip2 file");
+            int ret = BZ2_bzCompress(&strm, data ? BZ_RUN : BZ_FINISH);
+            if (ret != BZ_RUN_OK && ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
+                throw CompressionError("error %d while compressing bzip2 file", ret);
 
-            if (strm.avail_out == 0) {
-                nextSink((unsigned char *) outbuf, sizeof(outbuf));
-                strm.next_out = outbuf;
+            finished = ret == BZ_STREAM_END;
+
+            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
+                nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
+                strm.next_out = (char *) outbuf;
                 strm.avail_out = sizeof(outbuf);
             }
         }
     }
 };
 
-struct LambdaCompressionSink : CompressionSink
-{
-    Sink & nextSink;
-    std::string data;
-    using CompressFnTy = std::function<std::string(const std::string&)>;
-    CompressFnTy compressFn;
-    LambdaCompressionSink(Sink& nextSink, CompressFnTy compressFn)
-        : nextSink(nextSink)
-        , compressFn(std::move(compressFn))
-    {
-    };
-
-    void finish() override
-    {
-        flush();
-        nextSink(compressFn(data));
-    }
-
-    void write(const unsigned char * data, size_t len) override
-    {
-        checkInterrupt();
-        this->data.append((const char *) data, len);
-    }
-};
-
-struct BrotliCmdSink : LambdaCompressionSink
-{
-    BrotliCmdSink(Sink& nextSink)
-        : LambdaCompressionSink(nextSink, [](const std::string& data) {
-            return runProgram(BROTLI, true, {}, data);
-        })
-    {
-    }
-};
-
-#if HAVE_BROTLI
-struct BrotliSink : CompressionSink
+struct BrotliCompressionSink : ChunkedCompressionSink
 {
     Sink & nextSink;
     uint8_t outbuf[BUFSIZ];
     BrotliEncoderState *state;
     bool finished = false;
 
-    BrotliSink(Sink & nextSink) : nextSink(nextSink)
+    BrotliCompressionSink(Sink & nextSink) : nextSink(nextSink)
     {
         state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr);
         if (!state)
             throw CompressionError("unable to initialise brotli encoder");
     }
 
-    ~BrotliSink()
+    ~BrotliCompressionSink()
     {
         BrotliEncoderDestroyInstance(state);
     }
@@ -461,94 +375,47 @@ struct BrotliSink : CompressionSink
     void finish() override
     {
         flush();
-        assert(!finished);
-
-        const uint8_t *next_in = nullptr;
-        size_t avail_in = 0;
-        uint8_t *next_out = outbuf;
-        size_t avail_out = sizeof(outbuf);
-        while (!finished) {
-            checkInterrupt();
-
-            if (!BrotliEncoderCompressStream(state,
-                        BROTLI_OPERATION_FINISH,
-                        &avail_in, &next_in,
-                        &avail_out, &next_out,
-                        nullptr))
-                throw CompressionError("error while finishing brotli file");
-
-            finished = BrotliEncoderIsFinished(state);
-            if (avail_out == 0 || finished) {
-                nextSink(outbuf, sizeof(outbuf) - avail_out);
-                next_out = outbuf;
-                avail_out = sizeof(outbuf);
-            }
-        }
+        writeInternal(nullptr, 0);
     }
 
-    void write(const unsigned char * data, size_t len) override
+    void writeInternal(const unsigned char * data, size_t len) override
     {
-        // Don't feed brotli too much at once
-        const size_t CHUNK_SIZE = sizeof(outbuf) << 2;
-        while (len) {
-          size_t n = std::min(CHUNK_SIZE, len);
-          writeInternal(data, n);
-          data += n;
-          len -= n;
-        }
-    }
-
-    void writeInternal(const unsigned char * data, size_t len)
-    {
-        assert(!finished);
-
-        const uint8_t *next_in = data;
+        const uint8_t * next_in = data;
         size_t avail_in = len;
-        uint8_t *next_out = outbuf;
+        uint8_t * next_out = outbuf;
         size_t avail_out = sizeof(outbuf);
 
-        while (avail_in > 0) {
+        while (!finished && (!data || avail_in)) {
             checkInterrupt();
 
             if (!BrotliEncoderCompressStream(state,
-                      BROTLI_OPERATION_PROCESS,
-                      &avail_in, &next_in,
-                      &avail_out, &next_out,
-                      nullptr))
-                throw CompressionError("error while compressing brotli file");
+                    data ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH,
+                    &avail_in, &next_in,
+                    &avail_out, &next_out,
+                    nullptr))
+                throw CompressionError("error while compressing brotli compression");
 
             if (avail_out < sizeof(outbuf) || avail_in == 0) {
                 nextSink(outbuf, sizeof(outbuf) - avail_out);
                 next_out = outbuf;
                 avail_out = sizeof(outbuf);
             }
+
+            finished = BrotliEncoderIsFinished(state);
         }
     }
 };
-#endif // HAVE_BROTLI
 
 ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel)
 {
-    if (parallel) {
-#ifdef HAVE_LZMA_MT
-        if (method == "xz")
-            return make_ref<ParallelXzSink>(nextSink);
-#endif
-        printMsg(lvlError, format("Warning: parallel compression requested but not supported for method '%1%', falling back to single-threaded compression") % method);
-    }
-
     if (method == "none")
         return make_ref<NoneSink>(nextSink);
     else if (method == "xz")
-        return make_ref<XzSink>(nextSink);
+        return make_ref<XzCompressionSink>(nextSink, parallel);
     else if (method == "bzip2")
-        return make_ref<BzipSink>(nextSink);
+        return make_ref<BzipCompressionSink>(nextSink);
     else if (method == "br")
-#if HAVE_BROTLI
-        return make_ref<BrotliSink>(nextSink);
-#else
-        return make_ref<BrotliCmdSink>(nextSink);
-#endif
+        return make_ref<BrotliCompressionSink>(nextSink);
     else
         throw UnknownCompressionMethod(format("unknown compression method '%s'") % method);
 }
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
index f7a3e3fbd32e..dd666a4e19fd 100644
--- a/src/libutil/compression.hh
+++ b/src/libutil/compression.hh
@@ -8,17 +8,17 @@
 
 namespace nix {
 
-ref<std::string> decompress(const std::string & method, const std::string & in);
-
-void decompress(const std::string & method, Source & source, Sink & sink);
-
-ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false);
-
 struct CompressionSink : BufferedSink
 {
     virtual void finish() = 0;
 };
 
+ref<std::string> decompress(const std::string & method, const std::string & in);
+
+ref<CompressionSink> makeDecompressionSink(const std::string & method, Sink & nextSink);
+
+ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false);
+
 ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false);
 
 MakeError(UnknownCompressionMethod, Error);
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
index ce6858f0d65a..9023cb1bb6de 100644
--- a/src/libutil/config.cc
+++ b/src/libutil/config.cc
@@ -4,15 +4,13 @@
 
 namespace nix {
 
-void Config::set(const std::string & name, const std::string & value)
+bool Config::set(const std::string & name, const std::string & value)
 {
     auto i = _settings.find(name);
-    if (i == _settings.end()) {
-        extras.emplace(name, value);
-    } else {
-        i->second.setting->set(value);
-        i->second.setting->overriden = true;
-    }
+    if (i == _settings.end()) return false;
+    i->second.setting->set(value);
+    i->second.setting->overriden = true;
+    return true;
 }
 
 void Config::addSetting(AbstractSetting * setting)
@@ -23,46 +21,51 @@ void Config::addSetting(AbstractSetting * setting)
 
     bool set = false;
 
-    auto i = extras.find(setting->name);
-    if (i != extras.end()) {
+    auto i = unknownSettings.find(setting->name);
+    if (i != unknownSettings.end()) {
         setting->set(i->second);
         setting->overriden = true;
-        extras.erase(i);
+        unknownSettings.erase(i);
         set = true;
     }
 
     for (auto & alias : setting->aliases) {
-        auto i = extras.find(alias);
-        if (i != extras.end()) {
+        auto i = unknownSettings.find(alias);
+        if (i != unknownSettings.end()) {
             if (set)
                 warn("setting '%s' is set, but it's an alias of '%s' which is also set",
                     alias, setting->name);
             else {
                 setting->set(i->second);
                 setting->overriden = true;
-                extras.erase(i);
+                unknownSettings.erase(i);
                 set = true;
             }
         }
     }
 }
 
-void Config::handleUnknownSettings()
+void AbstractConfig::warnUnknownSettings()
 {
-    for (auto & s : extras)
+    for (auto & s : unknownSettings)
         warn("unknown setting '%s'", s.first);
 }
 
-StringMap Config::getSettings(bool overridenOnly)
+void AbstractConfig::reapplyUnknownSettings()
+{
+    auto unknownSettings2 = std::move(unknownSettings);
+    for (auto & s : unknownSettings2)
+        set(s.first, s.second);
+}
+
+void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly)
 {
-    StringMap res;
     for (auto & opt : _settings)
         if (!opt.second.isAlias && (!overridenOnly || opt.second.setting->overriden))
-            res.emplace(opt.first, opt.second.setting->to_string());
-    return res;
+            res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description});
 }
 
-void Config::applyConfigFile(const Path & path)
+void AbstractConfig::applyConfigFile(const Path & path)
 {
     try {
         string contents = readFile(path);
@@ -287,4 +290,49 @@ void PathSetting::set(const std::string & str)
         value = canonPath(str);
 }
 
+bool GlobalConfig::set(const std::string & name, const std::string & value)
+{
+    for (auto & config : *configRegistrations)
+        if (config->set(name, value)) return true;
+
+    unknownSettings.emplace(name, value);
+
+    return false;
+}
+
+void GlobalConfig::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly)
+{
+    for (auto & config : *configRegistrations)
+        config->getSettings(res, overridenOnly);
+}
+
+void GlobalConfig::resetOverriden()
+{
+    for (auto & config : *configRegistrations)
+        config->resetOverriden();
+}
+
+void GlobalConfig::toJSON(JSONObject & out)
+{
+    for (auto & config : *configRegistrations)
+        config->toJSON(out);
+}
+
+void GlobalConfig::convertToArgs(Args & args, const std::string & category)
+{
+    for (auto & config : *configRegistrations)
+        config->convertToArgs(args, category);
+}
+
+GlobalConfig globalConfig;
+
+GlobalConfig::ConfigRegistrations * GlobalConfig::configRegistrations;
+
+GlobalConfig::Register::Register(Config * config)
+{
+    if (!configRegistrations)
+        configRegistrations = new ConfigRegistrations;
+    configRegistrations->emplace_back(config);
+}
+
 }
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
index d2e7faf17434..d86c65ff033a 100644
--- a/src/libutil/config.hh
+++ b/src/libutil/config.hh
@@ -12,6 +12,40 @@ class AbstractSetting;
 class JSONPlaceholder;
 class JSONObject;
 
+class AbstractConfig
+{
+protected:
+    StringMap unknownSettings;
+
+    AbstractConfig(const StringMap & initials = {})
+        : unknownSettings(initials)
+    { }
+
+public:
+
+    virtual bool set(const std::string & name, const std::string & value) = 0;
+
+    struct SettingInfo
+    {
+        std::string value;
+        std::string description;
+    };
+
+    virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) = 0;
+
+    void applyConfigFile(const Path & path);
+
+    virtual void resetOverriden() = 0;
+
+    virtual void toJSON(JSONObject & out) = 0;
+
+    virtual void convertToArgs(Args & args, const std::string & category) = 0;
+
+    void warnUnknownSettings();
+
+    void reapplyUnknownSettings();
+};
+
 /* A class to simplify providing configuration settings. The typical
    use is to inherit Config and add Setting<T> members:
 
@@ -27,7 +61,7 @@ class JSONObject;
    };
 */
 
-class Config
+class Config : public AbstractConfig
 {
     friend class AbstractSetting;
 
@@ -48,31 +82,23 @@ private:
 
     Settings _settings;
 
-    StringMap extras;
-
 public:
 
-    Config(const StringMap & initials)
-        : extras(initials)
+    Config(const StringMap & initials = {})
+        : AbstractConfig(initials)
     { }
 
-    void set(const std::string & name, const std::string & value);
+    bool set(const std::string & name, const std::string & value) override;
 
     void addSetting(AbstractSetting * setting);
 
-    void handleUnknownSettings();
-
-    StringMap getSettings(bool overridenOnly = false);
+    void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override;
 
-    const Settings & _getSettings() { return _settings; }
-
-    void applyConfigFile(const Path & path);
+    void resetOverriden() override;
 
-    void resetOverriden();
+    void toJSON(JSONObject & out) override;
 
-    void toJSON(JSONObject & out);
-
-    void convertToArgs(Args & args, const std::string & category);
+    void convertToArgs(Args & args, const std::string & category) override;
 };
 
 class AbstractSetting
@@ -209,4 +235,27 @@ public:
     void operator =(const Path & v) { this->assign(v); }
 };
 
+struct GlobalConfig : public AbstractConfig
+{
+    typedef std::vector<Config*> ConfigRegistrations;
+    static ConfigRegistrations * configRegistrations;
+
+    bool set(const std::string & name, const std::string & value) override;
+
+    void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override;
+
+    void resetOverriden() override;
+
+    void toJSON(JSONObject & out) override;
+
+    void convertToArgs(Args & args, const std::string & category) override;
+
+    struct Register
+    {
+        Register(Config * config);
+    };
+};
+
+extern GlobalConfig globalConfig;
+
 }
diff --git a/src/libutil/json.cc b/src/libutil/json.cc
index 813b257016e4..0a6fb65f0605 100644
--- a/src/libutil/json.cc
+++ b/src/libutil/json.cc
@@ -31,6 +31,7 @@ template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long &
 template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
 template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
 template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
+template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
 
 template<> void toJSON<std::string>(std::ostream & str, const std::string & s)
 {
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 33ae1ea389d7..17448f70efb6 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -133,7 +133,7 @@ size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
     ssize_t n;
     do {
         checkInterrupt();
-        n = ::read(fd, (char *) data, bufSize);
+        n = ::read(fd, (char *) data, len);
     } while (n == -1 && errno == EINTR);
     if (n == -1) { _good = false; throw SysError("reading from file"); }
     if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); }
@@ -157,16 +157,24 @@ size_t StringSource::read(unsigned char * data, size_t len)
 }
 
 
-std::unique_ptr<Source> sinkToSource(std::function<void(Sink &)> fun)
+#if BOOST_VERSION >= 106300 && BOOST_VERSION < 106600
+#error Coroutines are broken in this version of Boost!
+#endif
+
+std::unique_ptr<Source> sinkToSource(
+    std::function<void(Sink &)> fun,
+    std::function<void()> eof)
 {
     struct SinkToSource : Source
     {
         typedef boost::coroutines2::coroutine<std::string> coro_t;
 
+        std::function<void()> eof;
         coro_t::pull_type coro;
 
-        SinkToSource(std::function<void(Sink &)> fun)
-            : coro([&](coro_t::push_type & yield) {
+        SinkToSource(std::function<void(Sink &)> fun, std::function<void()> eof)
+            : eof(eof)
+            , coro([&](coro_t::push_type & yield) {
                 LambdaSink sink([&](const unsigned char * data, size_t len) {
                     if (len) yield(std::string((const char *) data, len));
                 });
@@ -180,8 +188,7 @@ std::unique_ptr<Source> sinkToSource(std::function<void(Sink &)> fun)
 
         size_t read(unsigned char * data, size_t len) override
         {
-            if (!coro)
-                throw EndOfFile("coroutine has finished");
+            if (!coro) { eof(); abort(); }
 
             if (pos == cur.size()) {
                 if (!cur.empty()) coro();
@@ -197,7 +204,7 @@ std::unique_ptr<Source> sinkToSource(std::function<void(Sink &)> fun)
         }
     };
 
-    return std::make_unique<SinkToSource>(fun);
+    return std::make_unique<SinkToSource>(fun, eof);
 }
 
 
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 6e703c52a1e3..4b6ad5da5b9c 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -77,10 +77,12 @@ struct BufferedSource : Source
 
     size_t read(unsigned char * data, size_t len) override;
 
-    /* Underlying read call, to be overridden. */
-    virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
 
     bool hasData();
+
+protected:
+    /* Underlying read call, to be overridden. */
+    virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
 };
 
 
@@ -134,8 +136,9 @@ struct FdSource : BufferedSource
         return *this;
     }
 
-    size_t readUnbuffered(unsigned char * data, size_t len) override;
     bool good() override;
+protected:
+    size_t readUnbuffered(unsigned char * data, size_t len) override;
 private:
     bool _good = true;
 };
@@ -211,7 +214,11 @@ struct LambdaSource : Source
 
 /* Convert a function that feeds data into a Sink into a Source. The
    Source executes the function as a coroutine. */
-std::unique_ptr<Source> sinkToSource(std::function<void(Sink &)> fun);
+std::unique_ptr<Source> sinkToSource(
+    std::function<void(Sink &)> fun,
+    std::function<void()> eof = []() {
+        throw EndOfFile("coroutine has finished");
+    });
 
 
 void writePadding(size_t len, Sink & sink);
diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh
index 3b2710f6fd30..e1d591d77a84 100644
--- a/src/libutil/sync.hh
+++ b/src/libutil/sync.hh
@@ -57,11 +57,11 @@ public:
         }
 
         template<class Rep, class Period>
-        void wait_for(std::condition_variable & cv,
+        std::cv_status wait_for(std::condition_variable & cv,
             const std::chrono::duration<Rep, Period> & duration)
         {
             assert(s);
-            cv.wait_for(lk, duration);
+            return cv.wait_for(lk, duration);
         }
 
         template<class Rep, class Period, class Predicate>
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 15962236ec65..6bc64ae75a42 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -311,6 +311,14 @@ string readFile(const Path & path, bool drain)
 }
 
 
+void readFile(const Path & path, Sink & sink)
+{
+    AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+    if (!fd) throw SysError("opening file '%s'", path);
+    drainFD(fd.get(), sink);
+}
+
+
 void writeFile(const Path & path, const string & s, mode_t mode)
 {
     AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
@@ -320,6 +328,23 @@ void writeFile(const Path & path, const string & s, mode_t mode)
 }
 
 
+void writeFile(const Path & path, Source & source, mode_t mode)
+{
+    AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode);
+    if (!fd)
+        throw SysError(format("opening file '%1%'") % path);
+
+    std::vector<unsigned char> buf(64 * 1024);
+
+    while (true) {
+        try {
+            auto n = source.read(buf.data(), buf.size());
+            writeFull(fd.get(), (unsigned char *) buf.data(), n);
+        } catch (EndOfFile &) { break; }
+    }
+}
+
+
 string readLine(int fd)
 {
     string s;
@@ -593,7 +618,7 @@ void drainFD(int fd, Sink & sink, bool block)
             throw SysError("making file descriptor non-blocking");
     }
 
-    std::vector<unsigned char> buf(4096);
+    std::vector<unsigned char> buf(64 * 1024);
     while (1) {
         checkInterrupt();
         ssize_t rd = read(fd, buf.data(), buf.size());
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index 743d238611fc..fc25d27758c7 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -15,6 +15,7 @@
 #include <map>
 #include <sstream>
 #include <experimental/optional>
+#include <future>
 
 #ifndef HAVE_STRUCT_DIRENT_D_TYPE
 #define DT_UNKNOWN 0
@@ -97,10 +98,13 @@ unsigned char getFileType(const Path & path);
 /* Read the contents of a file into a string. */
 string readFile(int fd);
 string readFile(const Path & path, bool drain = false);
+void readFile(const Path & path, Sink & sink);
 
 /* Write a string to a file. */
 void writeFile(const Path & path, const string & s, mode_t mode = 0666);
 
+void writeFile(const Path & path, Source & source, mode_t mode = 0666);
+
 /* Read a line from a file descriptor. */
 string readLine(int fd);
 
@@ -424,44 +428,30 @@ string get(const T & map, const string & key, const string & def = "")
 }
 
 
-/* Call ‘failure’ with the current exception as argument. If ‘failure’
-   throws an exception, abort the program. */
-void callFailure(const std::function<void(std::exception_ptr exc)> & failure,
-    std::exception_ptr exc = std::current_exception());
+/* A callback is a wrapper around a lambda that accepts a valid of
+   type T or an exception. (We abuse std::future<T> to pass the value or
+   exception.) */
+template<typename T>
+struct Callback
+{
+    std::function<void(std::future<T>)> fun;
 
+    Callback(std::function<void(std::future<T>)> fun) : fun(fun) { }
 
-/* Evaluate the function ‘f’. If it returns a value, call ‘success’
-   with that value as its argument. If it or ‘success’ throws an
-   exception, call ‘failure’. If ‘failure’ throws an exception, abort
-   the program. */
-template<class T>
-void sync2async(
-    const std::function<void(T)> & success,
-    const std::function<void(std::exception_ptr exc)> & failure,
-    const std::function<T()> & f)
-{
-    try {
-        success(f());
-    } catch (...) {
-        callFailure(failure);
+    void operator()(T && t) const
+    {
+        std::promise<T> promise;
+        promise.set_value(std::move(t));
+        fun(promise.get_future());
     }
-}
 
-
-/* Call the function ‘success’. If it throws an exception, call
-   ‘failure’. If that throws an exception, abort the program. */
-template<class T>
-void callSuccess(
-    const std::function<void(T)> & success,
-    const std::function<void(std::exception_ptr exc)> & failure,
-    T && arg)
-{
-    try {
-        success(arg);
-    } catch (...) {
-        callFailure(failure);
+    void rethrow(const std::exception_ptr & exc = std::current_exception()) const
+    {
+        std::promise<T> promise;
+        promise.set_exception(exc);
+        fun(promise.get_future());
     }
-}
+};
 
 
 /* Start a thread that handles various signals. Also block those signals
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index 21d99878a518..94d3a27560fe 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -85,7 +85,6 @@ void mainWrapped(int argc, char * * argv)
     BuildMode buildMode = bmNormal;
     bool readStdin = false;
 
-    auto shell = getEnv("SHELL", "/bin/sh");
     std::string envCommand; // interactive shell
     Strings envExclude;
 
@@ -99,6 +98,9 @@ void mainWrapped(int argc, char * * argv)
 
     std::string outLink = "./result";
 
+    // List of environment variables kept for --pure
+    std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"};
+
     Strings args;
     for (int i = 1; i < argc; ++i)
         args.push_back(argv[i]);
@@ -218,6 +220,9 @@ void mainWrapped(int argc, char * * argv)
             }
         }
 
+        else if (*arg == "--keep")
+            keepVars.insert(getArg(*arg, arg, end));
+
         else if (*arg == "-")
             readStdin = true;
 
@@ -239,10 +244,10 @@ void mainWrapped(int argc, char * * argv)
 
     auto store = openStore();
 
-    EvalState state(myArgs.searchPath, store);
-    state.repair = repair;
+    auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
+    state->repair = repair;
 
-    Bindings & autoArgs = *myArgs.getAutoArgs(state);
+    Bindings & autoArgs = *myArgs.getAutoArgs(*state);
 
     if (packages) {
         std::ostringstream joined;
@@ -268,7 +273,7 @@ void mainWrapped(int argc, char * * argv)
     std::vector<Expr *> exprs;
 
     if (readStdin)
-        exprs = {state.parseStdin()};
+        exprs = {state->parseStdin()};
     else
         for (auto i : left) {
             auto absolute = i;
@@ -276,13 +281,13 @@ void mainWrapped(int argc, char * * argv)
                 absolute = canonPath(absPath(i), true);
             } catch (Error e) {};
             if (fromArgs)
-                exprs.push_back(state.parseExprFromString(i, absPath(".")));
+                exprs.push_back(state->parseExprFromString(i, absPath(".")));
             else if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?")))
-                drvs.push_back(DrvInfo(state, store, absolute));
+                drvs.push_back(DrvInfo(*state, store, absolute));
             else
                 /* If we're in a #! script, interpret filenames
                    relative to the script. */
-                exprs.push_back(state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state,
+                exprs.push_back(state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state,
                     inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i)))));
         }
 
@@ -291,15 +296,17 @@ void mainWrapped(int argc, char * * argv)
 
     for (auto e : exprs) {
         Value vRoot;
-        state.eval(e, vRoot);
+        state->eval(e, vRoot);
 
         for (auto & i : attrPaths) {
-            Value & v(*findAlongAttrPath(state, i, autoArgs, vRoot));
-            state.forceValue(v);
-            getDerivations(state, v, "", autoArgs, drvs, false);
+            Value & v(*findAlongAttrPath(*state, i, autoArgs, vRoot));
+            state->forceValue(v);
+            getDerivations(*state, v, "", autoArgs, drvs, false);
         }
     }
 
+    state->printStats();
+
     auto buildPaths = [&](const PathSet & paths) {
         /* Note: we do this even when !printMissing to efficiently
            fetch binary cache data. */
@@ -332,12 +339,12 @@ void mainWrapped(int argc, char * * argv)
         if (shell == "") {
 
             try {
-                auto expr = state.parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
+                auto expr = state->parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
 
                 Value v;
-                state.eval(expr, v);
+                state->eval(expr, v);
 
-                auto drv = getDerivation(state, v, false);
+                auto drv = getDerivation(*state, v, false);
                 if (!drv)
                     throw Error("the 'bashInteractive' attribute in <nixpkgs> did not evaluate to a derivation");
 
@@ -368,7 +375,6 @@ void mainWrapped(int argc, char * * argv)
         auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp"));
 
         if (pure) {
-            std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"};
             decltype(env) newEnv;
             for (auto & i : env)
                 if (keepVars.count(i.first))
@@ -415,7 +421,6 @@ void mainWrapped(int argc, char * * argv)
                 R"s([ -n "$PS1" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '; )s"
                 "if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; "
                 "unset NIX_ENFORCE_PURITY; "
-                "unset NIX_INDENT_MAKE; "
                 "shopt -u nullglob; "
                 "unset TZ; %4%"
                 "%5%",
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 35603af7082a..644fa6681de3 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -120,8 +120,6 @@ struct TunnelLogger : public Logger
       want to send out stderr to the client. */
     void startWork()
     {
-        std::vector<std::string> pendingMsgs;
-
         auto state(state_.lock());
         state->canSendStderr = true;
 
@@ -197,7 +195,8 @@ struct TunnelSource : BufferedSource
 {
     Source & from;
     TunnelSource(Source & from) : from(from) { }
-    size_t readUnbuffered(unsigned char * data, size_t len)
+protected:
+    size_t readUnbuffered(unsigned char * data, size_t len) override
     {
         to << STDERR_READ << len;
         to.flush();
@@ -234,7 +233,7 @@ struct RetrieveRegularNARSink : ParseSink
 };
 
 
-static void performOp(TunnelLogger * logger, ref<LocalStore> store,
+static void performOp(TunnelLogger * logger, ref<Store> store,
     bool trusted, unsigned int clientVersion,
     Source & from, Sink & to, unsigned int op)
 {
@@ -363,7 +362,11 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store,
 
         logger->startWork();
         if (!savedRegular.regular) throw Error("regular file expected");
-        Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
+
+        auto store2 = store.dynamic_pointer_cast<LocalStore>();
+        if (!store2) throw Error("operation is only supported by LocalStore");
+
+        Path path = store2->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
         logger->stopWork();
 
         to << path;
@@ -554,7 +557,7 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store,
                     ;
                 else if (trusted
                     || name == settings.buildTimeout.name
-                    || name == settings.connectTimeout.name)
+                    || name == "connect-timeout")
                     settings.set(name, value);
                 else if (setSubstituters(settings.substituters))
                     ;
@@ -691,12 +694,23 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store,
         if (!trusted)
             info.ultimate = false;
 
-        TeeSink tee(from);
-        parseDump(tee, tee.source);
+        std::string saved;
+        std::unique_ptr<Source> source;
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
+            source = std::make_unique<TunnelSource>(from);
+        else {
+            TeeSink tee(from);
+            parseDump(tee, tee.source);
+            saved = std::move(*tee.source.data);
+            source = std::make_unique<StringSource>(saved);
+        }
 
         logger->startWork();
-        store.cast<Store>()->addToStore(info, tee.source.data, (RepairFlag) repair,
+
+        // FIXME: race if addToStore doesn't read source?
+        store.cast<Store>()->addToStore(info, *source, (RepairFlag) repair,
             dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr);
+
         logger->stopWork();
         break;
     }
@@ -767,7 +781,7 @@ static void processConnection(bool trusted)
         Store::Params params; // FIXME: get params from somewhere
         // Disable caching since the client already does that.
         params["path-info-cache-size"] = "0";
-        auto store = make_ref<LocalStore>(params);
+        auto store = openStore(settings.storeUri, params);
 
         tunnelLogger->stopWork();
         to.flush();
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index f60ff9e07182..a43b103f6ec6 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -1284,6 +1284,14 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr
         deleteOldGenerations(globals.profile, globals.dryRun);
     } else if (opArgs.size() == 1 && opArgs.front().find('d') != string::npos) {
         deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun);
+    } else if (opArgs.size() == 1 && opArgs.front().find('+') != string::npos) {
+        if(opArgs.front().size() < 2)
+            throw Error(format("invalid number of generations ‘%1%’") % opArgs.front());
+        string str_max = string(opArgs.front(), 1, opArgs.front().size());
+        int max;
+        if (!string2Int(str_max, max) || max == 0)
+            throw Error(format("invalid number of generations to keep ‘%1%’") % opArgs.front());
+        deleteGenerationsGreaterThan(globals.profile, max, globals.dryRun);
     } else {
         std::set<unsigned int> gens;
         for (auto & i : opArgs) {
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index 5049460c7544..eb6d34dd8219 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -158,16 +158,16 @@ int main(int argc, char * * argv)
 
         auto store = openStore();
 
-        EvalState state(myArgs.searchPath, store);
-        state.repair = repair;
+        auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
+        state->repair = repair;
 
-        Bindings & autoArgs = *myArgs.getAutoArgs(state);
+        Bindings & autoArgs = *myArgs.getAutoArgs(*state);
 
         if (attrPaths.empty()) attrPaths = {""};
 
         if (findFile) {
             for (auto & i : files) {
-                Path p = state.findFile(i);
+                Path p = state->findFile(i);
                 if (p == "") throw Error(format("unable to find '%1%'") % i);
                 std::cout << p << std::endl;
             }
@@ -175,20 +175,20 @@ int main(int argc, char * * argv)
         }
 
         if (readStdin) {
-            Expr * e = state.parseStdin();
-            processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+            Expr * e = state->parseStdin();
+            processExpr(*state, attrPaths, parseOnly, strict, autoArgs,
                 evalOnly, outputKind, xmlOutputSourceLocation, e);
         } else if (files.empty() && !fromArgs)
             files.push_back("./default.nix");
 
         for (auto & i : files) {
             Expr * e = fromArgs
-                ? state.parseExprFromString(i, absPath("."))
-                : state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, i))));
-            processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+                ? state->parseExprFromString(i, absPath("."))
+                : state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state, i))));
+            processExpr(*state, attrPaths, parseOnly, strict, autoArgs,
                 evalOnly, outputKind, xmlOutputSourceLocation, e);
         }
 
-        state.printStats();
+        state->printStats();
     });
 }
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
index fa7ee254500c..a3b025723cf1 100644
--- a/src/nix-prefetch-url/nix-prefetch-url.cc
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -9,6 +9,10 @@
 
 #include <iostream>
 
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
 using namespace nix;
 
 
@@ -95,9 +99,9 @@ int main(int argc, char * * argv)
             throw UsageError("too many arguments");
 
         auto store = openStore();
-        EvalState state(myArgs.searchPath, store);
+        auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
 
-        Bindings & autoArgs = *myArgs.getAutoArgs(state);
+        Bindings & autoArgs = *myArgs.getAutoArgs(*state);
 
         /* If -A is given, get the URI from the specified Nix
            expression. */
@@ -107,33 +111,33 @@ int main(int argc, char * * argv)
                 throw UsageError("you must specify a URI");
             uri = args[0];
         } else {
-            Path path = resolveExprPath(lookupFileArg(state, args.empty() ? "." : args[0]));
+            Path path = resolveExprPath(lookupFileArg(*state, args.empty() ? "." : args[0]));
             Value vRoot;
-            state.evalFile(path, vRoot);
-            Value & v(*findAlongAttrPath(state, attrPath, autoArgs, vRoot));
-            state.forceAttrs(v);
+            state->evalFile(path, vRoot);
+            Value & v(*findAlongAttrPath(*state, attrPath, autoArgs, vRoot));
+            state->forceAttrs(v);
 
             /* Extract the URI. */
-            auto attr = v.attrs->find(state.symbols.create("urls"));
+            auto attr = v.attrs->find(state->symbols.create("urls"));
             if (attr == v.attrs->end())
                 throw Error("attribute set does not contain a 'urls' attribute");
-            state.forceList(*attr->value);
+            state->forceList(*attr->value);
             if (attr->value->listSize() < 1)
                 throw Error("'urls' list is empty");
-            uri = state.forceString(*attr->value->listElems()[0]);
+            uri = state->forceString(*attr->value->listElems()[0]);
 
             /* Extract the hash mode. */
-            attr = v.attrs->find(state.symbols.create("outputHashMode"));
+            attr = v.attrs->find(state->symbols.create("outputHashMode"));
             if (attr == v.attrs->end())
                 printInfo("warning: this does not look like a fetchurl call");
             else
-                unpack = state.forceString(*attr->value) == "recursive";
+                unpack = state->forceString(*attr->value) == "recursive";
 
             /* Extract the name. */
             if (name.empty()) {
-                attr = v.attrs->find(state.symbols.create("name"));
+                attr = v.attrs->find(state->symbols.create("name"));
                 if (attr != v.attrs->end())
-                    name = state.forceString(*attr->value);
+                    name = state->forceString(*attr->value);
             }
         }
 
@@ -158,16 +162,22 @@ int main(int argc, char * * argv)
 
         if (storePath.empty()) {
 
-            auto actualUri = resolveMirrorUri(state, uri);
-
-            /* Download the file. */
-            DownloadRequest req(actualUri);
-            req.decompress = false;
-            auto result = getDownloader()->download(req);
+            auto actualUri = resolveMirrorUri(*state, uri);
 
             AutoDelete tmpDir(createTempDir(), true);
             Path tmpFile = (Path) tmpDir + "/tmp";
-            writeFile(tmpFile, *result.data);
+
+            /* Download the file. */
+            {
+                AutoCloseFD fd = open(tmpFile.c_str(), O_WRONLY | O_CREAT | O_EXCL, 0600);
+                if (!fd) throw SysError("creating temporary file '%s'", tmpFile);
+
+                FdSink sink(fd.get());
+
+                DownloadRequest req(actualUri);
+                req.decompress = false;
+                getDownloader()->download(std::move(req), sink);
+            }
 
             /* Optionally unpack the file. */
             if (unpack) {
@@ -191,7 +201,7 @@ int main(int argc, char * * argv)
 
             /* FIXME: inefficient; addToStore() will also hash
                this. */
-            hash = unpack ? hashPath(ht, tmpFile).first : hashString(ht, *result.data);
+            hash = unpack ? hashPath(ht, tmpFile).first : hashFile(ht, tmpFile);
 
             if (expectedHash != Hash(ht) && expectedHash != hash)
                 throw Error(format("hash mismatch for '%1%'") % uri);
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index e1e27ceef94d..fe68f681ae28 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -860,7 +860,7 @@ static void opServe(Strings opFlags, Strings opArgs)
             }
 
             case cmdDumpStorePath:
-                dumpPath(readStorePath(*store, in), out);
+                store->narFromPath(readStorePath(*store, in), out);
                 break;
 
             case cmdImportPaths: {
@@ -924,6 +924,28 @@ static void opServe(Strings opFlags, Strings opArgs)
                 break;
             }
 
+            case cmdAddToStoreNar: {
+                if (!writeAllowed) throw Error("importing paths is not allowed");
+
+                ValidPathInfo info;
+                info.path = readStorePath(*store, in);
+                in >> info.deriver;
+                if (!info.deriver.empty())
+                    store->assertStorePath(info.deriver);
+                info.narHash = Hash(readString(in), htSHA256);
+                info.references = readStorePaths<PathSet>(*store, in);
+                in >> info.registrationTime >> info.narSize >> info.ultimate;
+                info.sigs = readStrings<StringSet>(in);
+                in >> info.ca;
+
+                // FIXME: race if addToStore doesn't read source?
+                store->addToStore(info, in, NoRepair, NoCheckSigs);
+
+                out << 1; // indicate success
+
+                break;
+            }
+
             default:
                 throw Error(format("unknown serve command %1%") % cmd);
         }
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index e4e6c3e303ed..91711c8b46da 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -72,6 +72,10 @@ struct CmdCopy : StorePathsCommand
                 "To populate the current folder build output to a S3 binary cache:",
                 "nix copy --to s3://my-bucket?region=eu-west-1"
             },
+            Example{
+                "To populate the current folder build output to an S3-compatible binary cache:",
+                "nix copy --to s3://my-bucket?region=eu-west-1&endpoint=example.com"
+            },
 #endif
         };
     }
diff --git a/src/nix/installables.cc b/src/nix/installables.cc
index a3fdd8a2808d..0c1ad3ab3db0 100644
--- a/src/nix/installables.cc
+++ b/src/nix/installables.cc
@@ -86,22 +86,6 @@ Buildable Installable::toBuildable()
     return std::move(buildables[0]);
 }
 
-struct InstallableStoreDrv : Installable
-{
-    Path drvPath;
-
-    InstallableStoreDrv(const Path & drvPath) : drvPath(drvPath) { }
-
-    std::string what() override { return drvPath; }
-
-    Buildables toBuildables() override
-    {
-        Buildable b = {drvPath};
-        // FIXME: add outputs?
-        return {b};
-    }
-};
-
 struct InstallableStorePath : Installable
 {
     Path storePath;
@@ -112,7 +96,7 @@ struct InstallableStorePath : Installable
 
     Buildables toBuildables() override
     {
-        return {{"", {{"out", storePath}}}};
+        return {{isDerivation(storePath) ? storePath : "", {{"out", storePath}}}};
     }
 };
 
@@ -226,12 +210,8 @@ static std::vector<std::shared_ptr<Installable>> parseInstallables(
 
             auto path = store->toStorePath(store->followLinksToStore(s));
 
-            if (store->isStorePath(path)) {
-                if (isDerivation(path))
-                    result.push_back(std::make_shared<InstallableStoreDrv>(path));
-                else
-                    result.push_back(std::make_shared<InstallableStorePath>(path));
-            }
+            if (store->isStorePath(path))
+                result.push_back(std::make_shared<InstallableStorePath>(path));
         }
 
         else if (s == "" || std::regex_match(s, attrPathRegex))
diff --git a/src/nix/main.cc b/src/nix/main.cc
index bb107ec7d3f6..69791e223c22 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -24,7 +24,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
     {
         mkFlag()
             .longName("help")
-            .shortName('h')
             .description("show usage information")
             .handler([&]() { showHelpAndExit(); });
 
@@ -34,9 +33,10 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
             .handler([&]() {
                 std::cout << "The following configuration options are available:\n\n";
                 Table2 tbl;
-                for (const auto & s : settings._getSettings())
-                    if (!s.second.isAlias)
-                        tbl.emplace_back(s.first, s.second.setting->description);
+                std::map<std::string, Config::SettingInfo> settings;
+                globalConfig.getSettings(settings);
+                for (const auto & s : settings)
+                    tbl.emplace_back(s.first, s.second.description);
                 printTable(std::cout, tbl);
                 throw Exit();
             });
diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc
index 47caa401d3c9..dea5f0557b81 100644
--- a/src/nix/path-info.cc
+++ b/src/nix/path-info.cc
@@ -4,8 +4,8 @@
 #include "json.hh"
 #include "common-args.hh"
 
-#include <iomanip>
 #include <algorithm>
+#include <array>
 
 using namespace nix;
 
@@ -13,12 +13,14 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
 {
     bool showSize = false;
     bool showClosureSize = false;
+    bool humanReadable = false;
     bool showSigs = false;
 
     CmdPathInfo()
     {
         mkFlag('s', "size", "print size of the NAR dump of each path", &showSize);
         mkFlag('S', "closure-size", "print sum size of the NAR dumps of the closure of each path", &showClosureSize);
+        mkFlag('h', "human-readable", "with -s and -S, print sizes like 1K 234M 5.67G etc.", &humanReadable);
         mkFlag(0, "sigs", "show signatures", &showSigs);
     }
 
@@ -40,6 +42,10 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
                 "nix path-info -rS /run/current-system | sort -nk2"
             },
             Example{
+                "To show a package's closure size and all its dependencies with human readable sizes:",
+                "nix path-info -rsSh nixpkgs.rust"
+            },
+            Example{
                 "To check the existence of a path in a binary cache:",
                 "nix path-info -r /nix/store/7qvk5c91...-geeqie-1.1 --store https://cache.nixos.org/"
             },
@@ -58,6 +64,25 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
         };
     }
 
+    void printSize(unsigned long long value)
+    {
+        if (!humanReadable) {
+            std::cout << fmt("\t%11d", value);
+            return;
+        }
+
+        static const std::array<char, 9> idents{{
+            ' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
+        }};
+        size_t power = 0;
+        double res = value;
+        while (res > 1024 && power < idents.size()) {
+            ++power;
+            res /= 1024;
+        }
+        std::cout << fmt("\t%6.1f%c", res, idents.at(power));
+    }
+
     void run(ref<Store> store, Paths storePaths) override
     {
         size_t pathLen = 0;
@@ -78,13 +103,16 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
                 auto info = store->queryPathInfo(storePath);
                 storePath = info->path; // FIXME: screws up padding
 
-                std::cout << storePath << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' ');
+                std::cout << storePath;
+
+                if (showSize || showClosureSize || showSigs)
+                    std::cout << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' ');
 
                 if (showSize)
-                    std::cout << '\t' << std::setw(11) << info->narSize;
+                    printSize(info->narSize);
 
                 if (showClosureSize)
-                    std::cout << '\t' << std::setw(11) << store->getClosureSize(storePath).first;
+                    printSize(store->getClosureSize(storePath).first);
 
                 if (showSigs) {
                     std::cout << '\t';
diff --git a/src/nix/repl.cc b/src/nix/repl.cc
index f84774a53367..b71e6f905f23 100644
--- a/src/nix/repl.cc
+++ b/src/nix/repl.cc
@@ -173,9 +173,14 @@ void NixRepl::mainLoop(const std::vector<std::string> & files)
             printMsg(lvlError, format(error + "%1%%2%") % (settings.showTrace ? e.prefix() : "") % e.msg());
         }
 
+        if (input.size() > 0) {
+            // Remove trailing newline before adding to history
+            input.erase(input.size() - 1);
+            linenoiseHistoryAdd(input.c_str());
+        }
+
         // We handled the current input fully, so we should clear it
         // and read brand new input.
-        linenoiseHistoryAdd(input.c_str());
         input.clear();
         std::cout << std::endl;
     }
@@ -385,7 +390,7 @@ bool NixRepl::processLine(string line)
             /* We could do the build in this process using buildPaths(),
                but doing it in a child makes it easier to recover from
                problems / SIGINT. */
-            if (runProgram(settings.nixBinDir + "/nix-store", Strings{"-r", drvPath}) == 0) {
+            if (runProgram(settings.nixBinDir + "/nix", Strings{"build", drvPath}) == 0) {
                 Derivation drv = readDerivation(drvPath);
                 std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
                 for (auto & i : drv.outputs)
@@ -693,8 +698,8 @@ struct CmdRepl : StoreCommand, MixEvalArgs
 
     void run(ref<Store> store) override
     {
-        NixRepl repl(searchPath, openStore());
-        repl.mainLoop(files);
+        auto repl = std::make_unique<NixRepl>(searchPath, openStore());
+        repl->mainLoop(files);
     }
 };
 
diff --git a/src/nix/run.cc b/src/nix/run.cc
index d04e106e037b..35b763345872 100644
--- a/src/nix/run.cc
+++ b/src/nix/run.cc
@@ -7,11 +7,14 @@
 #include "finally.hh"
 #include "fs-accessor.hh"
 #include "progress-bar.hh"
+#include "affinity.hh"
 
 #if __linux__
 #include <sys/mount.h>
 #endif
 
+#include <queue>
+
 using namespace nix;
 
 std::string chrootHelperName = "__run_in_chroot";
@@ -121,10 +124,27 @@ struct CmdRun : InstallablesCommand
                 unsetenv(var.c_str());
         }
 
+        std::unordered_set<Path> done;
+        std::queue<Path> todo;
+        for (auto & path : outPaths) todo.push(path);
+
         auto unixPath = tokenizeString<Strings>(getEnv("PATH"), ":");
-        for (auto & path : outPaths)
-            if (accessor->stat(path + "/bin").type != FSAccessor::tMissing)
+
+        while (!todo.empty()) {
+            Path path = todo.front();
+            todo.pop();
+            if (!done.insert(path).second) continue;
+
+            if (true)
                 unixPath.push_front(path + "/bin");
+
+            auto propPath = path + "/nix-support/propagated-user-env-packages";
+            if (accessor->stat(propPath).type == FSAccessor::tRegular) {
+                for (auto & p : tokenizeString<Paths>(readFile(propPath)))
+                    todo.push(p);
+            }
+        }
+
         setenv("PATH", concatStringsSep(":", unixPath).c_str(), 1);
 
         std::string cmd = *command.begin();
@@ -135,6 +155,8 @@ struct CmdRun : InstallablesCommand
 
         restoreSignals();
 
+        restoreAffinity();
+
         /* If this is a diverted store (i.e. its "logical" location
            (typically /nix/store) differs from its "physical" location
            (e.g. /home/eelco/nix/store), then run the command in a
diff --git a/src/nix/search.cc b/src/nix/search.cc
index 539676698086..4cb1efa7955b 100644
--- a/src/nix/search.cc
+++ b/src/nix/search.cc
@@ -7,19 +7,25 @@
 #include "common-args.hh"
 #include "json.hh"
 #include "json-to-value.hh"
+#include "shared.hh"
 
 #include <regex>
 #include <fstream>
 
 using namespace nix;
 
-std::string hilite(const std::string & s, const std::smatch & m)
+std::string wrap(std::string prefix, std::string s)
+{
+    return prefix + s + ANSI_NORMAL;
+}
+
+std::string hilite(const std::string & s, const std::smatch & m, std::string postfix)
 {
     return
         m.empty()
         ? s
         : std::string(m.prefix())
-          + ANSI_RED + std::string(m.str()) + ANSI_NORMAL
+          + ANSI_RED + std::string(m.str()) + postfix
           + std::string(m.suffix());
 }
 
@@ -75,6 +81,10 @@ struct CmdSearch : SourceExprCommand, MixJSON
                 "To search for git and frontend or gui:",
                 "nix search git 'frontend|gui'"
             },
+            Example{
+                "To display the description of the found packages:",
+                "nix search git --verbose"
+            }
         };
     }
 
@@ -164,14 +174,10 @@ struct CmdSearch : SourceExprCommand, MixJSON
 
                         } else {
                             results[attrPath] = fmt(
-                                "Attribute name: %s\n"
-                                "Package name: %s\n"
-                                "Version: %s\n"
-                                "Description: %s\n",
-                                hilite(attrPath, attrPathMatch),
-                                hilite(name, nameMatch),
-                                parsed.version,
-                                hilite(description, descriptionMatch));
+                                "* %s (%s)\n  %s\n",
+                                wrap("\e[0;1m", hilite(attrPath, attrPathMatch, "\e[0;1m")),
+                                wrap("\e[0;2m", hilite(parsed.fullName, nameMatch, "\e[0;2m")),
+                                hilite(description, descriptionMatch, ANSI_NORMAL));
                         }
                     }
 
@@ -263,6 +269,10 @@ struct CmdSearch : SourceExprCommand, MixJSON
                 throw SysError("cannot rename '%s' to '%s'", tmpFile, jsonCacheFileName);
         }
 
+        if (results.size() == 0)
+            throw Error("no results for the given search term(s)!");
+
+        RunPager pager;
         for (auto el : results) std::cout << el.second << "\n";
 
     }
diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc
index c64b12c8dd62..86638b50d2c6 100644
--- a/src/nix/show-config.cc
+++ b/src/nix/show-config.cc
@@ -27,10 +27,12 @@ struct CmdShowConfig : Command, MixJSON
         if (json) {
             // FIXME: use appropriate JSON types (bool, ints, etc).
             JSONObject jsonObj(std::cout);
-            settings.toJSON(jsonObj);
+            globalConfig.toJSON(jsonObj);
         } else {
-            for (auto & s : settings.getSettings())
-                std::cout << s.first + " = " + s.second + "\n";
+            std::map<std::string, Config::SettingInfo> settings;
+            globalConfig.getSettings(settings);
+            for (auto & s : settings)
+                std::cout << s.first + " = " + s.second.value + "\n";
         }
     }
 };
diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc
index 758bbbc688bc..35c44a70cf52 100644
--- a/src/nix/upgrade-nix.cc
+++ b/src/nix/upgrade-nix.cc
@@ -1,14 +1,18 @@
 #include "command.hh"
+#include "common-args.hh"
 #include "store-api.hh"
 #include "download.hh"
 #include "eval.hh"
 #include "attr-path.hh"
+#include "names.hh"
+#include "progress-bar.hh"
 
 using namespace nix;
 
-struct CmdUpgradeNix : StoreCommand
+struct CmdUpgradeNix : MixDryRun, StoreCommand
 {
     Path profileDir;
+    std::string storePathsUrl = "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix";
 
     CmdUpgradeNix()
     {
@@ -18,6 +22,12 @@ struct CmdUpgradeNix : StoreCommand
             .labels({"profile-dir"})
             .description("the Nix profile to upgrade")
             .dest(&profileDir);
+
+        mkFlag()
+            .longName("nix-store-paths-url")
+            .labels({"url"})
+            .description("URL of the file that contains the store paths of the latest Nix release")
+            .dest(&storePathsUrl);
     }
 
     std::string name() override
@@ -46,7 +56,7 @@ struct CmdUpgradeNix : StoreCommand
 
     void run(ref<Store> store) override
     {
-        settings.pureEval = true;
+        evalSettings.pureEval = true;
 
         if (profileDir == "")
             profileDir = getProfileDir(store);
@@ -59,6 +69,14 @@ struct CmdUpgradeNix : StoreCommand
             storePath = getLatestNix(store);
         }
 
+        auto version = DrvName(storePathToName(storePath)).version;
+
+        if (dryRun) {
+            stopProgressBar();
+            printError("would upgrade to version %s", version);
+            return;
+        }
+
         {
             Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", storePath));
             store->ensurePath(storePath);
@@ -72,11 +90,15 @@ struct CmdUpgradeNix : StoreCommand
                 throw Error("could not verify that '%s' works", program);
         }
 
+        stopProgressBar();
+
         {
             Activity act(*logger, lvlInfo, actUnknown, fmt("installing '%s' into profile '%s'...", storePath, profileDir));
             runProgram(settings.nixBinDir + "/nix-env", false,
                 {"--profile", profileDir, "-i", storePath, "--no-sandbox"});
         }
+
+        printError(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version);
     }
 
     /* Return the profile in which Nix is installed. */
@@ -98,11 +120,18 @@ struct CmdUpgradeNix : StoreCommand
         if (hasPrefix(where, "/run/current-system"))
             throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'");
 
-        Path profileDir;
-        Path userEnv;
+        Path profileDir = dirOf(where);
+
+        // Resolve profile to /nix/var/nix/profiles/<name> link.
+        while (canonPath(profileDir).find("/profiles/") == std::string::npos && isLink(profileDir))
+            profileDir = readLink(profileDir);
+
+        printInfo("found profile '%s'", profileDir);
+
+        Path userEnv = canonPath(profileDir, true);
 
         if (baseNameOf(where) != "bin" ||
-            !hasSuffix(userEnv = canonPath(profileDir = dirOf(where), true), "user-environment"))
+            !hasSuffix(userEnv, "user-environment"))
             throw Error("directory '%s' does not appear to be part of a Nix profile", where);
 
         if (!store->isValidPath(userEnv))
@@ -115,16 +144,16 @@ struct CmdUpgradeNix : StoreCommand
     Path getLatestNix(ref<Store> store)
     {
         // FIXME: use nixos.org?
-        auto req = DownloadRequest("https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix");
+        auto req = DownloadRequest(storePathsUrl);
         auto res = getDownloader()->download(req);
 
-        EvalState state(Strings(), store);
-        auto v = state.allocValue();
-        state.eval(state.parseExprFromString(*res.data, "/no-such-path"), *v);
-        Bindings & bindings(*state.allocBindings(0));
-        auto v2 = findAlongAttrPath(state, settings.thisSystem, bindings, *v);
+        auto state = std::make_unique<EvalState>(Strings(), store);
+        auto v = state->allocValue();
+        state->eval(state->parseExprFromString(*res.data, "/no-such-path"), *v);
+        Bindings & bindings(*state->allocBindings(0));
+        auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v);
 
-        return state.forceString(*v2);
+        return state->forceString(*v2);
     }
 };
 
diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc
index 17e0595ae887..325a2be0a793 100644
--- a/src/nix/why-depends.cc
+++ b/src/nix/why-depends.cc
@@ -2,6 +2,7 @@
 #include "store-api.hh"
 #include "progress-bar.hh"
 #include "fs-accessor.hh"
+#include "shared.hh"
 
 #include <queue>
 
@@ -237,6 +238,7 @@ struct CmdWhyDepends : SourceExprCommand
 
             visitPath(node.path);
 
+            RunPager pager;
             for (auto & ref : refs) {
                 auto hash = storePathToHash(ref.second->path);
 
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index 7365a550e57f..eb58ae7c12a8 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -52,9 +52,6 @@ export _NIX_FORCE_HTTP_BINARY_CACHE_STORE=1
 basicTests
 
 
-unset _NIX_FORCE_HTTP_BINARY_CACHE_STORE
-
-
 # Test whether Nix notices if the NAR doesn't match the hash in the NAR info.
 clearStore
 
@@ -79,18 +76,29 @@ if nix-store --substituters "file://$cacheDir" -r $outPath; then
 fi
 
 
-# Test whether fallback works if we have cached info but the
-# corresponding NAR has disappeared.
+# Test whether fallback works if a NAR has disappeared. This does not require --fallback.
 clearStore
 
-nix-build --substituters "file://$cacheDir" dependencies.nix --dry-run # get info
+mv $cacheDir/nar $cacheDir/nar2
+
+nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result
 
-mkdir $cacheDir/tmp
-mv $cacheDir/*.nar* $cacheDir/tmp/
+mv $cacheDir/nar2 $cacheDir/nar
 
-NIX_DEBUG_SUBST=1 nix-build --substituters "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result --fallback
 
-mv $cacheDir/tmp/* $cacheDir/
+# Test whether fallback works if a NAR is corrupted. This does require --fallback.
+clearStore
+
+mv $cacheDir/nar $cacheDir/nar2
+mkdir $cacheDir/nar
+for i in $(cd $cacheDir/nar2 && echo *); do touch $cacheDir/nar/$i; done
+
+(! nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result)
+
+nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result --fallback
+
+rm -rf $cacheDir/nar
+mv $cacheDir/nar2 $cacheDir/nar
 
 
 # Test whether building works if the binary cache contains an
@@ -107,6 +115,7 @@ if [ -n "$HAVE_SODIUM" ]; then
 
 # Create a signed binary cache.
 clearCache
+clearCacheCache
 
 declare -a res=($(nix-store --generate-binary-cache-key test.nixos.org-1 $TEST_ROOT/sk1 $TEST_ROOT/pk1 ))
 publicKey="$(cat $TEST_ROOT/pk1)"
@@ -117,7 +126,7 @@ badKey="$(cat $TEST_ROOT/pk2)"
 res=($(nix-store --generate-binary-cache-key foo.nixos.org-1 $TEST_ROOT/sk3 $TEST_ROOT/pk3))
 otherKey="$(cat $TEST_ROOT/pk3)"
 
-nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath
+_NIX_FORCE_HTTP_BINARY_CACHE_STORE= nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath
 
 
 # Downloading should fail if we don't provide a key.
diff --git a/tests/brotli.sh b/tests/brotli.sh
index 645dd4214ec6..a3c6e55a8fad 100644
--- a/tests/brotli.sh
+++ b/tests/brotli.sh
@@ -1,10 +1,5 @@
 source common.sh
 
-
-# Only test if we found brotli libraries
-# (CLI tool is likely unavailable if libraries are missing)
-if [ -n "$HAVE_BROTLI" ]; then
-
 clearStore
 clearCache
 
@@ -24,5 +19,3 @@ nix copy --from $cacheURI $outPath --no-check-sigs
 HASH2=$(nix hash-path $outPath)
 
 [[ $HASH = $HASH2 ]]
-
-fi # HAVE_BROTLI
diff --git a/tests/common.sh.in b/tests/common.sh.in
index 195205988afb..2ee2f589dae4 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -31,7 +31,6 @@ export xmllint="@xmllint@"
 export SHELL="@bash@"
 export PAGER=cat
 export HAVE_SODIUM="@HAVE_SODIUM@"
-export HAVE_BROTLI="@HAVE_BROTLI@"
 
 export version=@PACKAGE_VERSION@
 export system=@system@
@@ -94,11 +93,9 @@ canUseSandbox() {
         return 1
     fi
 
-    if [ -e /proc/sys/kernel/unprivileged_userns_clone ]; then
-        if [ "$(cat /proc/sys/kernel/unprivileged_userns_clone)" != 1 ]; then
-            echo "Unprivileged user namespaces disabled by sysctl, skipping this test..."
-            return 1
-        fi
+    if ! unshare --user true ; then
+        echo "Unprivileged user namespaces disabled by sysctl, skipping this test..."
+        return 1
     fi
 
     return 0
diff --git a/tests/lang/eval-okay-arithmetic.exp b/tests/lang/eval-okay-arithmetic.exp
index b195055b7a09..5c54d10b7b47 100644
--- a/tests/lang/eval-okay-arithmetic.exp
+++ b/tests/lang/eval-okay-arithmetic.exp
@@ -1 +1 @@
-2188
+2216
diff --git a/tests/lang/eval-okay-arithmetic.nix b/tests/lang/eval-okay-arithmetic.nix
index bbbbc4691d75..7e9e6a0b666e 100644
--- a/tests/lang/eval-okay-arithmetic.nix
+++ b/tests/lang/eval-okay-arithmetic.nix
@@ -26,6 +26,10 @@ let {
       (56088 / 123 / 2)
       (3 + 4 * const 5 0 - 6 / id 2)
 
+      (builtins.bitAnd 12 10) # 0b1100 & 0b1010 =  8
+      (builtins.bitOr  12 10) # 0b1100 | 0b1010 = 14
+      (builtins.bitXor 12 10) # 0b1100 ^ 0b1010 =  6
+
       (if 3 < 7 then 1 else err)
       (if 7 < 3 then err else 1)
       (if 3 < 3 then err else 1)
diff --git a/tests/lang/eval-okay-concatmap.exp b/tests/lang/eval-okay-concatmap.exp
new file mode 100644
index 000000000000..3b8be7739deb
--- /dev/null
+++ b/tests/lang/eval-okay-concatmap.exp
@@ -0,0 +1 @@
+[ [ 1 3 5 7 9 ] [ "a" "z" "b" "z" ] ]
diff --git a/tests/lang/eval-okay-concatmap.nix b/tests/lang/eval-okay-concatmap.nix
new file mode 100644
index 000000000000..97da5d37a412
--- /dev/null
+++ b/tests/lang/eval-okay-concatmap.nix
@@ -0,0 +1,5 @@
+with import ./lib.nix;
+
+[ (builtins.concatMap (x: if x / 2 * 2 == x then [] else [ x ]) (range 0 10))
+  (builtins.concatMap (x: [x] ++ ["z"]) ["a" "b"])
+]
diff --git a/tests/lang/eval-okay-float.exp b/tests/lang/eval-okay-float.exp
new file mode 100644
index 000000000000..3c50a8adce86
--- /dev/null
+++ b/tests/lang/eval-okay-float.exp
@@ -0,0 +1 @@
+[ 3.4 3.5 2.5 1.5 ]
diff --git a/tests/lang/eval-okay-float.nix b/tests/lang/eval-okay-float.nix
new file mode 100644
index 000000000000..b2702c7b1668
--- /dev/null
+++ b/tests/lang/eval-okay-float.nix
@@ -0,0 +1,6 @@
+[
+  (1.1 + 2.3)
+  (builtins.add (0.5 + 0.5) (2.0 + 0.5))
+  ((0.5 + 0.5) * (2.0 + 0.5))
+  ((1.5 + 1.5) / (0.5 * 4.0))
+]
diff --git a/tests/lang/eval-okay-fromTOML.exp b/tests/lang/eval-okay-fromTOML.exp
new file mode 100644
index 000000000000..5b9d47122cc5
--- /dev/null
+++ b/tests/lang/eval-okay-fromTOML.exp
@@ -0,0 +1 @@
+[ { clients = { data = [ [ "gamma" "delta" ] [ 1 2 ] ]; hosts = [ "alpha" "omega" ]; }; database = { connection_max = 5000; enabled = true; ports = [ 8001 8001 8002 ]; server = "192.168.1.1"; }; owner = { name = "Tom Preston-Werner"; }; servers = { alpha = { dc = "eqdc10"; ip = "10.0.0.1"; }; beta = { dc = "eqdc10"; ip = "10.0.0.2"; }; }; title = "TOML Example"; } { "'key2'" = "value"; "1234" = "value"; "127.0.0.1" = "value"; a = { b = { c = { }; }; }; arr1 = [ 1 2 3 ]; arr2 = [ "red" "yellow" "green" ]; arr3 = [ [ 1 2 ] [ 3 4 5 ] ]; arr4 = [ "all" "strings" "are the same" "type" ]; arr5 = [ [ 1 2 ] [ "a" "b" "c" ] ]; arr7 = [ 1 2 3 ]; arr8 = [ 1 2 ]; bare-key = "value"; bare_key = "value"; bool1 = true; bool2 = false; "character encoding" = "value"; d = { e = { f = { }; }; }; flt1 = 1; flt2 = 3.1415; flt3 = -0.01; flt4 = 5e+22; flt5 = 1e+06; flt6 = -0.02; flt7 = 6.626e-34; flt8 = 9.22462e+06; g = { h = { i = { }; }; }; int1 = 99; int2 = 42; int3 = 0; int4 = -17; int5 = 1000; int6 = 5349221; int7 = 12345; j = { "ʞ" = { "'l'" = { }; }; }; key = "value"; name = "Orange"; products = [ { name = "Hammer"; sku = 738594937; } { } { color = "gray"; name = "Nail"; sku = 284758393; } ]; str = "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."; table-1 = { key1 = "some string"; key2 = 123; }; table-2 = { key1 = "another string"; key2 = 456; }; x = { y = { z = { w = { name = { first = "Tom"; last = "Preston-Werner"; }; point = { x = 1; y = 2; }; }; }; }; }; "ʎǝʞ" = "value"; } { metadata = { "checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4"; "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"; "checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"; "checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef"; }; package = [ { dependencies = [ "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" ]; name = "aho-corasick"; source = "registry+https://github.com/rust-lang/crates.io-index"; version = "0.6.4"; } { name = "ansi_term"; source = "registry+https://github.com/rust-lang/crates.io-index"; version = "0.9.0"; } { dependencies = [ "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)" "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" ]; name = "atty"; source = "registry+https://github.com/rust-lang/crates.io-index"; version = "0.2.10"; } ]; } ]
diff --git a/tests/lang/eval-okay-fromTOML.nix b/tests/lang/eval-okay-fromTOML.nix
new file mode 100644
index 000000000000..8e7cbd1c61e9
--- /dev/null
+++ b/tests/lang/eval-okay-fromTOML.nix
@@ -0,0 +1,184 @@
+[
+
+  (builtins.fromTOML ''
+    # This is a TOML document.
+
+    title = "TOML Example"
+
+    [owner]
+    name = "Tom Preston-Werner"
+    #dob = 1979-05-27T07:32:00-08:00 # First class dates
+
+    [database]
+    server = "192.168.1.1"
+    ports = [ 8001, 8001, 8002 ]
+    connection_max = 5000
+    enabled = true
+
+    [servers]
+
+      # Indentation (tabs and/or spaces) is allowed but not required
+      [servers.alpha]
+      ip = "10.0.0.1"
+      dc = "eqdc10"
+
+      [servers.beta]
+      ip = "10.0.0.2"
+      dc = "eqdc10"
+
+    [clients]
+    data = [ ["gamma", "delta"], [1, 2] ]
+
+    # Line breaks are OK when inside arrays
+    hosts = [
+      "alpha",
+      "omega"
+    ]
+  '')
+
+  (builtins.fromTOML ''
+    key = "value"
+    bare_key = "value"
+    bare-key = "value"
+    1234 = "value"
+
+    "127.0.0.1" = "value"
+    "character encoding" = "value"
+    "ʎǝʞ" = "value"
+    'key2' = "value"
+    #'quoted "value"' = "value"
+
+    name = "Orange"
+
+    # FIXME: cpptoml doesn't handle dotted keys properly yet.
+    #physical.color = "orange"
+    #physical.shape = "round"
+    #site."google.com" = true
+
+    #a.b.c = 1
+    #a.d = 2
+
+    str = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+
+    int1 = +99
+    int2 = 42
+    int3 = 0
+    int4 = -17
+    int5 = 1_000
+    int6 = 5_349_221
+    int7 = 1_2_3_4_5
+
+    # FIXME: cpptoml doesn't support these yet:
+
+    #hex1 = 0xDEADBEEF
+    #hex2 = 0xdeadbeef
+    #hex3 = 0xdead_beef
+
+    #oct1 = 0o01234567
+    #oct2 = 0o755
+
+    #bin1 = 0b11010110
+
+    flt1 = +1.0
+    flt2 = 3.1415
+    flt3 = -0.01
+    flt4 = 5e+22
+    flt5 = 1e6
+    flt6 = -2E-2
+    flt7 = 6.626e-34
+    flt8 = 9_224_617.445_991_228_313
+
+    bool1 = true
+    bool2 = false
+
+    # FIXME: not supported because Nix doesn't have a date/time type.
+    #odt1 = 1979-05-27T07:32:00Z
+    #odt2 = 1979-05-27T00:32:00-07:00
+    #odt3 = 1979-05-27T00:32:00.999999-07:00
+    #odt4 = 1979-05-27 07:32:00Z
+    #ldt1 = 1979-05-27T07:32:00
+    #ldt2 = 1979-05-27T00:32:00.999999
+    #ld1 = 1979-05-27
+    #lt1 = 07:32:00
+    #lt2 = 00:32:00.999999
+
+    arr1 = [ 1, 2, 3 ]
+    arr2 = [ "red", "yellow", "green" ]
+    arr3 = [ [ 1, 2 ], [3, 4, 5] ]
+    arr4 = [ "all", 'strings', """are the same""", ''''type'''']
+    arr5 = [ [ 1, 2 ], ["a", "b", "c"] ]
+
+    arr7 = [
+      1, 2, 3
+    ]
+
+    arr8 = [
+      1,
+      2, # this is ok
+    ]
+
+    [table-1]
+    key1 = "some string"
+    key2 = 123
+
+
+    [table-2]
+    key1 = "another string"
+    key2 = 456
+
+    #[dog."tater.man"]
+    #type.name = "pug"
+
+    [a.b.c]
+    [ d.e.f ]
+    [ g .  h  . i ]
+    [ j . "ʞ" . 'l' ]
+    [x.y.z.w]
+
+    name = { first = "Tom", last = "Preston-Werner" }
+    point = { x = 1, y = 2 }
+    #animal = { type.name = "pug" }
+
+    [[products]]
+    name = "Hammer"
+    sku = 738594937
+
+    [[products]]
+
+    [[products]]
+    name = "Nail"
+    sku = 284758393
+    color = "gray"
+  '')
+
+  (builtins.fromTOML ''
+    [[package]]
+    name = "aho-corasick"
+    version = "0.6.4"
+    source = "registry+https://github.com/rust-lang/crates.io-index"
+    dependencies = [
+     "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+    ]
+
+    [[package]]
+    name = "ansi_term"
+    version = "0.9.0"
+    source = "registry+https://github.com/rust-lang/crates.io-index"
+
+    [[package]]
+    name = "atty"
+    version = "0.2.10"
+    source = "registry+https://github.com/rust-lang/crates.io-index"
+    dependencies = [
+     "libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
+     "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+     "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+    ]
+
+    [metadata]
+    "checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4"
+    "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
+    "checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
+    "checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef"
+  '')
+]
diff --git a/tests/lang/eval-okay-mapattrs.exp b/tests/lang/eval-okay-mapattrs.exp
new file mode 100644
index 000000000000..3f113f17bab1
--- /dev/null
+++ b/tests/lang/eval-okay-mapattrs.exp
@@ -0,0 +1 @@
+{ x = "x-foo"; y = "y-bar"; }
diff --git a/tests/lang/eval-okay-mapattrs.nix b/tests/lang/eval-okay-mapattrs.nix
new file mode 100644
index 000000000000..f075b6275e5a
--- /dev/null
+++ b/tests/lang/eval-okay-mapattrs.nix
@@ -0,0 +1,3 @@
+with import ./lib.nix;
+
+builtins.mapAttrs (name: value: name + "-" + value) { x = "foo"; y = "bar"; }
diff --git a/tests/local.mk b/tests/local.mk
index 9df0adf1bfd8..1ff68348b3c7 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -25,7 +25,8 @@ nix_tests = \
   pure-eval.sh \
   check.sh \
   plugins.sh \
-  search.sh
+  search.sh \
+  nix-copy-ssh.sh
   # parallel.sh
 
 install-tests += $(foreach x, $(nix_tests), tests/$(x))
diff --git a/tests/nix-copy-ssh.sh b/tests/nix-copy-ssh.sh
new file mode 100644
index 000000000000..6aba667a45a6
--- /dev/null
+++ b/tests/nix-copy-ssh.sh
@@ -0,0 +1,20 @@
+source common.sh
+
+clearStore
+clearCache
+
+remoteRoot=$TEST_ROOT/store2
+chmod -R u+w "$remoteRoot" || true
+rm -rf "$remoteRoot"
+
+outPath=$(nix-build dependencies.nix)
+
+nix copy --to "ssh://localhost?store=$NIX_STORE_DIR&remote-store=$remoteRoot%3fstore=$NIX_STORE_DIR%26real=$remoteRoot$NIX_STORE_DIR" $outPath
+
+[ -f $remoteRoot$outPath/foobar ]
+
+clearStore
+
+nix copy --no-check-sigs --from "ssh://localhost?store=$NIX_STORE_DIR&remote-store=$remoteRoot%3fstore=$NIX_STORE_DIR%26real=$remoteRoot$NIX_STORE_DIR" $outPath
+
+[ -f $outPath/foobar ]
diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh
index d25c456cedfb..6024ea399750 100644
--- a/tests/nix-shell.sh
+++ b/tests/nix-shell.sh
@@ -4,12 +4,19 @@ clearStore
 
 # Test nix-shell -A
 export IMPURE_VAR=foo
+export SELECTED_IMPURE_VAR=baz
 export NIX_BUILD_SHELL=$SHELL
 output=$(nix-shell --pure shell.nix -A shellDrv --run \
     'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"')
 
 [ "$output" = " - foo - bar" ]
 
+# Test --keep
+output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR shell.nix -A shellDrv --run \
+    'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $SELECTED_IMPURE_VAR"')
+
+[ "$output" = " - foo - bar - baz" ]
+
 # Test nix-shell on a .drv
 [[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \
     'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]]
diff --git a/tests/plugins/plugintest.cc b/tests/plugins/plugintest.cc
index 8da15ebabd7d..c085d33295be 100644
--- a/tests/plugins/plugintest.cc
+++ b/tests/plugins/plugintest.cc
@@ -1,16 +1,21 @@
-#include "globals.hh"
+#include "config.hh"
 #include "primops.hh"
 
 using namespace nix;
 
-static BaseSetting<bool> settingSet{false, "setting-set",
+struct MySettings : Config
+{
+    Setting<bool> settingSet{this, false, "setting-set",
         "Whether the plugin-defined setting was set"};
+};
+
+MySettings mySettings;
 
-static RegisterSetting rs(&settingSet);
+static GlobalConfig::Register rs(&mySettings);
 
 static void prim_anotherNull (EvalState & state, const Pos & pos, Value ** args, Value & v)
 {
-    if (settingSet)
+    if (mySettings.settingSet)
         mkNull(v);
     else
         mkBool(v, false);
diff --git a/tests/restricted.sh b/tests/restricted.sh
index a87d8ec2c940..e02becc60e38 100644
--- a/tests/restricted.sh
+++ b/tests/restricted.sh
@@ -38,3 +38,14 @@ ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix
 nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT -I .
 
 [[ $(nix eval --raw --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]]
+
+# Check whether we can leak symlink information through directory traversal.
+traverseDir="$(pwd)/restricted-traverse-me"
+ln -sfn "$(pwd)/restricted-secret" "$(pwd)/restricted-innocent"
+mkdir -p "$traverseDir"
+goUp="..$(echo "$traverseDir" | sed -e 's,[^/]\+,..,g')"
+output="$(nix eval --raw --restrict-eval -I "$traverseDir" \
+    "(builtins.readFile \"$traverseDir/$goUp$(pwd)/restricted-innocent\")" \
+    2>&1 || :)"
+echo "$output" | grep "is forbidden"
+! echo "$output" | grep -F restricted-secret
diff --git a/tests/search.sh b/tests/search.sh
index 0b26a125120f..14da3127b0d5 100644
--- a/tests/search.sh
+++ b/tests/search.sh
@@ -30,7 +30,7 @@ clearCache
 (( $(nix search nosuchpackageexists | wc -l) == 0 ))
 
 # Search for multiple arguments
-(( $(nix search hello empty | wc -l) == 5 ))
+(( $(nix search hello empty | wc -l) == 3 ))
 
 # Multiple arguments will not exist
 (( $(nix search hello broken | wc -l) == 0 ))