about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--aterm-gc.supp32
-rwxr-xr-xbootstrap.sh1
-rw-r--r--configure.ac66
-rw-r--r--corepkgs/buildenv/default.nix4
-rw-r--r--corepkgs/nar/nar.sh.in2
-rw-r--r--doc/manual/conf-file.xml2
-rw-r--r--doc/manual/nix-store.xml19
-rw-r--r--doc/manual/quick-start.xml2
-rw-r--r--externals/Makefile.am54
-rw-r--r--release.nix15
-rwxr-xr-xscripts/build-remote.pl.in305
-rw-r--r--scripts/copy-from-other-stores.pl.in65
-rw-r--r--scripts/download-using-manifests.pl.in311
-rw-r--r--scripts/nix-build.in7
-rw-r--r--scripts/nix-push.in17
-rw-r--r--scripts/readmanifest.pm.in29
-rw-r--r--scripts/ssh.pm11
-rw-r--r--src/bin2c/bin2c.c4
-rw-r--r--src/libmain/shared.cc23
-rw-r--r--src/libstore/Makefile.am11
-rw-r--r--src/libstore/build.cc539
-rw-r--r--src/libstore/derivations.hh4
-rw-r--r--src/libstore/gc.cc88
-rw-r--r--src/libstore/globals.cc2
-rw-r--r--src/libstore/local-store.cc1124
-rw-r--r--src/libstore/local-store.hh108
-rw-r--r--src/libstore/misc.cc13
-rw-r--r--src/libstore/misc.hh2
-rw-r--r--src/libstore/optimise-store.cc2
-rw-r--r--src/libstore/references.cc2
-rw-r--r--src/libstore/references.hh2
-rw-r--r--src/libstore/remote-store.cc64
-rw-r--r--src/libstore/remote-store.hh8
-rw-r--r--src/libstore/schema.sql44
-rw-r--r--src/libstore/store-api.cc36
-rw-r--r--src/libstore/store-api.hh74
-rw-r--r--src/libstore/worker-protocol.hh7
-rw-r--r--src/libutil/Makefile.am2
-rw-r--r--src/libutil/archive.cc8
-rw-r--r--src/libutil/archive.hh1
-rw-r--r--src/libutil/hash.cc17
-rw-r--r--src/libutil/hash.hh10
-rw-r--r--src/libutil/types.hh2
-rw-r--r--src/libutil/util.cc14
-rw-r--r--src/libutil/util.hh7
-rw-r--r--src/nix-env/Makefile.am2
-rw-r--r--src/nix-env/nix-env.cc2
-rw-r--r--src/nix-hash/Makefile.am2
-rw-r--r--src/nix-hash/nix-hash.cc4
-rw-r--r--src/nix-instantiate/Makefile.am2
-rw-r--r--src/nix-instantiate/nix-instantiate.cc2
-rw-r--r--src/nix-log2xml/log2xml.cc14
-rw-r--r--src/nix-setuid-helper/Makefile.am3
-rw-r--r--src/nix-store/Makefile.am2
-rw-r--r--src/nix-store/help.txt6
-rw-r--r--src/nix-store/nix-store.cc54
-rw-r--r--src/nix-worker/Makefile.am2
-rw-r--r--src/nix-worker/nix-worker.cc48
-rw-r--r--substitute.mk1
-rw-r--r--tests/Makefile.am4
-rw-r--r--tests/binary-patching.nix18
-rw-r--r--tests/binary-patching.sh58
-rwxr-xr-xtests/build-hook.hook.sh28
-rw-r--r--tests/common.sh.in1
-rw-r--r--tests/init.sh10
-rw-r--r--tests/nix-push.sh2
-rw-r--r--tests/referrers.sh38
-rwxr-xr-xtests/substituter.sh3
-rwxr-xr-xtests/substituter2.sh1
-rw-r--r--tests/substitutes.sh2
70 files changed, 2092 insertions, 1377 deletions
diff --git a/aterm-gc.supp b/aterm-gc.supp
index dcd6371fe5aa..21b9a2372856 100644
--- a/aterm-gc.supp
+++ b/aterm-gc.supp
@@ -115,3 +115,35 @@
    fun:*
    fun:AT_collect
 }
+
+{
+   ATerm library conservatively scans for GC roots
+   Memcheck:Value4
+   fun:*
+   fun:*
+   fun:mark_phase
+}
+
+{
+   ATerm library conservatively scans for GC roots
+   Memcheck:Cond
+   fun:*
+   fun:*
+   fun:mark_phase
+}
+
+{
+   ATerm library conservatively scans for GC roots
+   Memcheck:Value4
+   fun:*
+   fun:*
+   fun:mark_phase_young
+}
+
+{
+   ATerm library conservatively scans for GC roots
+   Memcheck:Cond
+   fun:*
+   fun:*
+   fun:mark_phase_young
+}
diff --git a/bootstrap.sh b/bootstrap.sh
index f007c713bf54..2547f5dc88db 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -1,4 +1,5 @@
 #! /bin/sh -e
+rm -f aclocal.m4
 mkdir -p config
 libtoolize --copy
 aclocal
diff --git a/configure.ac b/configure.ac
index f108c53beee4..c2599a75f1ba 100644
--- a/configure.ac
+++ b/configure.ac
@@ -50,39 +50,24 @@ AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
 test "$localstatedir" = '${prefix}/var' && localstatedir=/nix/var
 
 
-# Whether to produce a statically linked binary.  On Cygwin, this is
-# the default: dynamically linking against the ATerm DLL does work,
-# except that it requires the ATerm "lib" directory to be in $PATH, as
-# Windows doesn't have anything like an RPATH embedded in executable.
-# Since this is kind of annoying, we use static libraries for now.
-
-AC_ARG_ENABLE(static-nix, AC_HELP_STRING([--enable-static-nix],
-  [produce statically linked binaries]),
-  static_nix=$enableval, static_nix=no)
-
-if test "$sys_name" = cygwin; then
-   static_nix=yes
-fi
-
-if test "$static_nix" = yes; then
+# Windows-specific stuff.  On Cygwin, dynamically linking against the
+# ATerm DLL works, except that it requires the ATerm "lib" directory
+# to be in $PATH, as Windows doesn't have anything like an RPATH
+# embedded in executable.  Since this is kind of annoying, we use
+# static libraries for now.
+if test "$sys_name" = "cygwin"; then
     AC_DISABLE_SHARED
     AC_ENABLE_STATIC
 fi
 
 
-# Windows-specific stuff.
-if test "$sys_name" = "cygwin"; then
-    # We cannot delete open files.
-    AC_DEFINE(CANNOT_DELETE_OPEN_FILES, 1, [Whether it is impossible to delete open files.])
-fi
-
 # Solaris-specific stuff.
 if test "$sys_name" = "sunos"; then
     # Solaris requires -lsocket -lnsl for network functions
-    ADDITIONAL_NETWORK_LIBS="-lsocket -lnsl"
-    AC_SUBST(ADDITIONAL_NETWORK_LIBS)
+    LIBS="-lsocket -lnsl $LIBS"
 fi
 
+
 AC_PROG_CC
 AC_PROG_CXX
 
@@ -101,6 +86,13 @@ AC_DISABLE_STATIC
 AC_ENABLE_SHARED
 AC_PROG_LIBTOOL
 
+if test "$enable_shared" = yes; then
+   SUB_CONFIGURE_FLAGS="--enable-shared --disable-static"
+else
+   SUB_CONFIGURE_FLAGS="--enable-static --disable-shared"
+fi
+AC_SUBST(SUB_CONFIGURE_FLAGS)
+
 
 # Use 64-bit file system calls so that we can support files > 2 GiB.
 AC_SYS_LARGEFILE
@@ -229,6 +221,8 @@ AC_ARG_WITH(bzip2, AC_HELP_STRING([--with-bzip2=PATH],
   [prefix of bzip2]),
   bzip2=$withval, bzip2=)
 AM_CONDITIONAL(HAVE_BZIP2, test -n "$bzip2")
+ATERM_VERSION=2.5
+AC_SUBST(ATERM_VERSION)
 if test -z "$bzip2"; then
   # Headers and libraries will be used from the temporary installation
   # in externals/inst-bzip2.
@@ -249,6 +243,24 @@ AC_SUBST(bzip2_include)
 AC_SUBST(bzip2_bin)
 AC_SUBST(bzip2_bin_test)
 
+AC_ARG_WITH(sqlite, AC_HELP_STRING([--with-sqlite=PATH],
+  [prefix of SQLite]),
+  sqlite=$withval, sqlite=)
+AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite")
+SQLITE_VERSION=3.7.2
+AC_SUBST(SQLITE_VERSION)
+if test -z "$sqlite"; then
+  sqlite_lib='${top_builddir}/externals/sqlite-$(SQLITE_VERSION)/libsqlite3.la'
+  sqlite_include='-I${top_builddir}/externals/sqlite-$(SQLITE_VERSION)'
+  sqlite_bin='${top_builddir}/externals/sqlite-$(SQLITE_VERSION)'
+else
+  sqlite_lib="-L$sqlite/lib -lsqlite3"
+  sqlite_include="-I$sqlite/include"
+  sqlite_bin="$sqlite/bin"
+fi
+AC_SUBST(sqlite_lib)
+AC_SUBST(sqlite_include)
+AC_SUBST(sqlite_bin)
 
 # Whether to use the Boehm garbage collector.
 AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
@@ -285,14 +297,6 @@ if test "$(uname)" = "Darwin"; then
 fi
 
 
-if test "$static_nix" = yes; then
-    # `-all-static' has to be added at the end of configure, because
-    # the C compiler doesn't know about -all-static (it's filtered out
-    # by libtool, but configure doesn't use libtool).
-    LDFLAGS="-all-static $LDFLAGS"
-fi
-
-
 AM_CONFIG_HEADER([config.h])
 AC_CONFIG_FILES([Makefile
    externals/Makefile
diff --git a/corepkgs/buildenv/default.nix b/corepkgs/buildenv/default.nix
index 36dd9d0c6a0a..d76f5274099a 100644
--- a/corepkgs/buildenv/default.nix
+++ b/corepkgs/buildenv/default.nix
@@ -11,4 +11,8 @@ derivation {
   paths = derivations;
   active = map (x: if x ? meta && x.meta ? active then x.meta.active else "true") derivations;
   priority = map (x: if x ? meta && x.meta ? priority then x.meta.priority else "5") derivations;
+
+  # Building user environments remotely just causes huge amounts of
+  # network traffic, so don't do that.
+  preferLocalBuild = true;
 }
diff --git a/corepkgs/nar/nar.sh.in b/corepkgs/nar/nar.sh.in
index 67933ac67f0b..1369d3a21fb2 100644
--- a/corepkgs/nar/nar.sh.in
+++ b/corepkgs/nar/nar.sh.in
@@ -7,8 +7,6 @@ dst=$out/tmp.nar.bz2
 
 @bzip2@ < tmp > $dst
 
-@bindir@/nix-hash -vvvvv --flat --type $hashAlgo --base32 tmp > $out/nar-hash
-
 @bindir@/nix-hash --flat --type $hashAlgo --base32 $dst > $out/narbz2-hash
 
 @coreutils@/mv $out/tmp.nar.bz2 $out/$(@coreutils@/cat $out/narbz2-hash).nar.bz2
diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml
index ec64e8b114f7..cb47b99418b6 100644
--- a/doc/manual/conf-file.xml
+++ b/doc/manual/conf-file.xml
@@ -260,7 +260,7 @@ build-use-chroot = /dev /proc /bin</programlisting>
     Nix store metadata (in <filename>/nix/var/nix/db</filename>) are
     synchronously flushed to disk.  This improves robustness in case
     of system crashes, but reduces performance.  The default is
-    <literal>false</literal>.</para></listitem>
+    <literal>true</literal>.</para></listitem>
 
   </varlistentry>
     
diff --git a/doc/manual/nix-store.xml b/doc/manual/nix-store.xml
index 10bb3eda5774..a32559c033f4 100644
--- a/doc/manual/nix-store.xml
+++ b/doc/manual/nix-store.xml
@@ -404,6 +404,7 @@ error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4'
     <arg choice='plain'><option>--tree</option></arg>
     <arg choice='plain'><option>--binding</option> <replaceable>name</replaceable></arg>
     <arg choice='plain'><option>--hash</option></arg>
+    <arg choice='plain'><option>--size</option></arg>
     <arg choice='plain'><option>--roots</option></arg>
   </group>
   <arg><option>--use-output</option></arg>
@@ -587,9 +588,21 @@ query is applied to the target of the symlink.</para>
   <varlistentry><term><option>--hash</option></term>
   
     <listitem><para>Prints the SHA-256 hash of the contents of the
-    store paths <replaceable>paths</replaceable>.  Since the hash is
-    stored in the Nix database, this is a fast
-    operation.</para></listitem>
+    store paths <replaceable>paths</replaceable> (that is, the hash of
+    the output of <command>nix-store --dump</command> on the given
+    paths).  Since the hash is stored in the Nix database, this is a
+    fast operation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--size</option></term>
+  
+    <listitem><para>Prints the size in bytes of the contents of the
+    store paths <replaceable>paths</replaceable> โ€” to be precise, the
+    size of the output of <command>nix-store --dump</command> on the
+    given paths.  Note that the actual disk space required by the
+    store paths may be higher, especially on filesystems with large
+    cluster sizes.</para></listitem>
 
   </varlistentry>
 
diff --git a/doc/manual/quick-start.xml b/doc/manual/quick-start.xml
index 6d96cb5ac0bc..d2431151f360 100644
--- a/doc/manual/quick-start.xml
+++ b/doc/manual/quick-start.xml
@@ -60,7 +60,7 @@ available remotely.</para></listitem>
 in the channel:
 
 <screen>
-$ nix-env -qa โ€™*โ€™ <lineannotation>(mind the quotes!)</lineannotation>
+$ nix-env -qa \*
 docbook-xml-4.2
 firefox-1.0pre-PR-0.10.1
 hello-2.1.1
diff --git a/externals/Makefile.am b/externals/Makefile.am
index 63150b1f5e71..bdc29fcb5251 100644
--- a/externals/Makefile.am
+++ b/externals/Makefile.am
@@ -12,30 +12,56 @@ $(BZIP2).tar.gz:
 $(BZIP2): $(BZIP2).tar.gz
 	gunzip < $(srcdir)/$(BZIP2).tar.gz | tar xvf -
 
-have-bzip2:
-	$(MAKE) $(BZIP2)
-	touch have-bzip2
-
 if HAVE_BZIP2
 build-bzip2:
 else
-build-bzip2: have-bzip2
-	(pfx=`pwd` && \
-	cd $(BZIP2) && \
+build-bzip2: $(BZIP2)
+	(cd $(BZIP2) && \
 	$(MAKE) && \
-	$(MAKE) install PREFIX=$$pfx/inst-bzip2)
+	$(MAKE) install PREFIX=$(abs_builddir)/inst-bzip2)
 	touch build-bzip2
 
-install:
+install-exec-local:: build-bzip2
 	mkdir -p $(DESTDIR)${bzip2_bin}
 	$(INSTALL_PROGRAM) $(bzip2_bin_test)/bzip2 $(bzip2_bin_test)/bunzip2 $(DESTDIR)${bzip2_bin}
 endif
 
 
-all: build-bzip2
+# SQLite
+
+SQLITE = sqlite-$(SQLITE_VERSION)
+SQLITE_TAR = sqlite-amalgamation-$(SQLITE_VERSION).tar.gz
+
+$(SQLITE_TAR):
+	@echo "Nix requires the SQLite library to build."
+	@echo "Please download version $(SQLITE_VERSION) from"
+	@echo "  http://www.sqlite.org/$(SQLITE_TAR)"
+	@echo "and place it in the externals/ directory."
+	false
+
+$(SQLITE): $(SQLITE_TAR)
+	gzip -d < $(srcdir)/$(SQLITE_TAR) | tar xvf -
+
+if HAVE_SQLITE
+build-sqlite:
+else
+build-sqlite: $(SQLITE)
+	(cd $(SQLITE) && \
+	CC="$(CC)" CFLAGS="-DSQLITE_ENABLE_COLUMN_METADATA=1" ./configure --disable-static --prefix=$(pkglibdir)/dummy --libdir=${pkglibdir} $(SUB_CONFIGURE_FLAGS) && \
+	$(MAKE) )
+	touch build-sqlite
+
+install-exec-local:: build-sqlite
+	cd $(SQLITE) && make install
+	rm -rf "$(DESTDIR)/$(pkglibdir)/dummy"
+endif
+
+
+all: build-bzip2 build-sqlite
 
-EXTRA_DIST = $(BZIP2).tar.gz
+EXTRA_DIST = $(BZIP2).tar.gz $(SQLITE_TAR)
 
-ext-clean:
-	$(RM) -f have-bzip2 build-bzip2
-	$(RM) -rf $(BZIP2)
+clean:
+	$(RM) -f build-bzip2 build-sqlite
+	$(RM) -rf $(BZIP2) $(SQLITE)
+	$(RM) -rf inst-bzip2
diff --git a/release.nix b/release.nix
index c89d79a7d4c9..1fc9405bdb28 100644
--- a/release.nix
+++ b/release.nix
@@ -33,6 +33,9 @@ let
           stripHash ${bzip2.src}
           cp -pv ${bzip2.src} externals/$strippedName
 
+          stripHash ${sqlite.src}
+          cp -pv ${sqlite.src} externals/$strippedName
+
           # TeX needs a writable font cache.
           export VARTEXFONTS=$TMPDIR/texfonts
         '';
@@ -71,7 +74,7 @@ let
 
         configureFlags = ''
           --disable-init-state
-          --with-bzip2=${bzip2}
+          --with-bzip2=${bzip2} --with-sqlite=${sqlite}
           --enable-gc
         '';
       };
@@ -92,10 +95,10 @@ let
 
         configureFlags = ''
           --disable-init-state --disable-shared
-          --with-bzip2=${bzip2}
+          --with-bzip2=${bzip2} --with-sqlite=${sqlite}
         '';
 
-        lcovFilter = ["*/boost/*" "*-tab.*"];
+        lcovFilter = [ "*/boost/*" "*-tab.*" ];
 
         # We call `dot', and even though we just use it to
         # syntax-check generated dot files, it still requires some
@@ -144,11 +147,11 @@ let
     with import nixpkgs { inherit system; };
 
     releaseTools.rpmBuild rec {
-      name = "nix-rpm";
+      name = "nix-rpm-${diskImage.name}";
       src = jobs.tarball;
       diskImage = diskImageFun vmTools.diskImages;
       memSize = 1024;
-      meta = { schedulingPriority = prio; };
+      meta.schedulingPriority = prio;
     };
 
 
@@ -165,7 +168,7 @@ let
       src = jobs.tarball;
       diskImage = diskImageFun vmTools.diskImages;
       memSize = 1024;
-      meta = { schedulingPriority = prio; };
+      meta.schedulingPriority = prio;
       configureFlags = "--sysconfdir=/etc";
       debRequires = [ "curl" ];
     };
diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in
index c440b6a0f142..f9bff9c41d72 100755
--- a/scripts/build-remote.pl.in
+++ b/scripts/build-remote.pl.in
@@ -4,6 +4,7 @@ use Fcntl ':flock';
 use English '-no_match_vars';
 use IO::Handle;
 use ssh qw/sshOpts openSSHConnection/;
+no warnings('once');
 
 
 # General operation:
@@ -31,57 +32,22 @@ $ENV{"DISPLAY"} = "";
 $ENV{"SSH_ASKPASS"} = "";
 
 
-my $loadIncreased = 0;
-
-my ($amWilling, $localSystem, $neededSystem, $drvPath, $maxSilentTime) = @ARGV;
-$maxSilentTime = 0 unless defined $maxSilentTime;
-
 sub sendReply {
     my $reply = shift;
     print STDERR "# $reply\n";
 }
 
-sub decline {
-    sendReply "decline";
-    exit 0;
-}
-
-my $currentLoad = $ENV{"NIX_CURRENT_LOAD"};
-decline unless defined $currentLoad;
-mkdir $currentLoad, 0777 or die unless -d $currentLoad;
+sub all { $_ || return 0 for @_; 1 }
 
-my $conf = $ENV{"NIX_REMOTE_SYSTEMS"};
-decline if !defined $conf || ! -e $conf;
-
-my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem);
-
-
-# Read the list of machines.
-my @machines;
-open CONF, "< $conf" or die;
-
-while (<CONF>) {
-    chomp;
-    s/\#.*$//g;
-    next if /^\s*$/;
-    /^\s*(\S+)\s+(\S+)\s+(\S+)\s+(\d+)(\s+([0-9\.]+))?\s*$/ or die;
-    push @machines,
-        { hostName => $1
-        , systemTypes => [split(/,/, $2)]
-        , sshKeys => $3
-        , maxJobs => $4
-        , speedFactor => 1.0 * ($6 || 1)
-        , enabled => 1
-        };
-}
 
-close CONF;
+# Initialisation.
+my $loadIncreased = 0;
 
+my ($localSystem, $maxSilentTime, $printBuildTrace) = @ARGV;
+$maxSilentTime = 0 unless defined $maxSilentTime;
 
-# Acquire the exclusive lock on $currentLoad/main-lock.
-my $mainLock = "$currentLoad/main-lock";
-open MAINLOCK, ">>$mainLock" or die;
-flock(MAINLOCK, LOCK_EX) or die;
+my $currentLoad = $ENV{"NIX_CURRENT_LOAD"};
+my $conf = $ENV{"NIX_REMOTE_SYSTEMS"};
 
 
 sub openSlotLock {
@@ -91,134 +57,189 @@ sub openSlotLock {
     open $slotLock, ">>$slotLockFn" or die;
     return $slotLock;
 }
-    
 
-my $hostName;
-my $slotLock;
 
-while (1) {
+# Read the list of machines.
+my @machines;
+if (defined $conf && -e $conf) {
+    open CONF, "< $conf" or die;
+    while (<CONF>) {
+        chomp;
+        s/\#.*$//g;
+        next if /^\s*$/;
+        my @tokens = split /\s/, $_;
+        push @machines,
+            { hostName => $tokens[0]
+            , systemTypes => [ split(/,/, $tokens[1]) ]
+            , sshKeys => $tokens[2]
+            , maxJobs => int($tokens[3])
+            , speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1)
+            , features => [ split(/,/, $tokens[5] || "") ]
+            , enabled => 1
+            };
+    }
+    close CONF;
+}
+
+
+
+# Wait for the calling process to ask us whether we can build some derivation.
+my ($drvPath, $hostName, $slotLock);
+
+REQ: while (1) {
+    $_ = <STDIN> || exit 0;
+    my ($amWilling, $neededSystem);
+    ($amWilling, $neededSystem, $drvPath, $requiredFeatures) = split;
+    my @requiredFeatures = split /,/, $requiredFeatures;
+
+    my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem);
+
+    if (!defined $currentLoad) {
+        sendReply "decline";
+        next;
+    }
     
-    # Find all machine that can execute this build, i.e., that support
-    # builds for the given platform and are not at their job limit.
-    my $rightType = 0;
-    my @available = ();
-    LOOP: foreach my $cur (@machines) {
-        if ($cur->{enabled} && grep { $neededSystem eq $_ } @{$cur->{systemTypes}}) {
-            $rightType = 1;
-
-            # We have a machine of the right type.  Determine the load on
-            # the machine.
-            my $slot = 0;
-            my $load = 0;
-            my $free;
-            while ($slot < $cur->{maxJobs}) {
-                my $slotLock = openSlotLock($cur, $slot);
-                if (flock($slotLock, LOCK_EX | LOCK_NB)) {
-                    $free = $slot unless defined $free;
-                    flock($slotLock, LOCK_UN) or die;
-                } else {
-                    $load++;
+    # Acquire the exclusive lock on $currentLoad/main-lock.
+    mkdir $currentLoad, 0777 or die unless -d $currentLoad;
+    my $mainLock = "$currentLoad/main-lock";
+    open MAINLOCK, ">>$mainLock" or die;
+    flock(MAINLOCK, LOCK_EX) or die;
+    
+    
+    while (1) {
+        # Find all machine that can execute this build, i.e., that
+        # support builds for the given platform and features, and are
+        # not at their job limit.
+        my $rightType = 0;
+        my @available = ();
+        LOOP: foreach my $cur (@machines) {
+            if ($cur->{enabled}
+                && (grep { $neededSystem eq $_ } @{$cur->{systemTypes}})
+                && all(map { my $f = $_; 0 != grep { $f eq $_ } @{$cur->{features}} } @requiredFeatures))
+            {
+                $rightType = 1;
+
+                # We have a machine of the right type.  Determine the load on
+                # the machine.
+                my $slot = 0;
+                my $load = 0;
+                my $free;
+                while ($slot < $cur->{maxJobs}) {
+                    my $slotLock = openSlotLock($cur, $slot);
+                    if (flock($slotLock, LOCK_EX | LOCK_NB)) {
+                        $free = $slot unless defined $free;
+                        flock($slotLock, LOCK_UN) or die;
+                    } else {
+                        $load++;
+                    }
+                    close $slotLock;
+                    $slot++;
                 }
-                close $slotLock;
-                $slot++;
+                
+                push @available, { machine => $cur, load => $load, free => $free }
+                if $load < $cur->{maxJobs};
             }
-
-            push @available, { machine => $cur, load => $load, free => $free }
-            if $load < $cur->{maxJobs};
         }
-    }
 
-    if (defined $ENV{NIX_DEBUG_HOOK}) {
-        print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n"
-            foreach @available;
-    }
+        if (defined $ENV{NIX_DEBUG_HOOK}) {
+            print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n"
+                foreach @available;
+        }
 
 
-    # Didn't find any available machine?  Then decline or postpone.
-    if (scalar @available == 0) {
-        # Postpone if we have a machine of the right type, except if the
-        # local system can and wants to do the build.
-        if ($rightType && !$canBuildLocally) {
-            sendReply "postpone";
-            exit 0;
-        } else {
-            decline;
+        # Didn't find any available machine?  Then decline or postpone.
+        if (scalar @available == 0) {
+            # Postpone if we have a machine of the right type, except
+            # if the local system can and wants to do the build.
+            if ($rightType && !$canBuildLocally) {
+                sendReply "postpone";
+            } else {
+                sendReply "decline";                
+            }
+            close MAINLOCK;
+            next REQ;
         }
-    }
-
 
-    # Prioritise the available machines as follows:
-    # - First by load divided by speed factor, rounded to the nearest
-    #   integer.  This causes fast machines to be preferred over slow
-    #   machines with similar loads.
-    # - Then by speed factor.
-    # - Finally by load.
-    sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); }
-    @available = sort
-        { lf($a) <=> lf($b)
-              || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor}
-              || $a->{load} <=> $b->{load}
-        } @available;
 
+        # Prioritise the available machines as follows:
+        # - First by load divided by speed factor, rounded to the nearest
+        #   integer.  This causes fast machines to be preferred over slow
+        #   machines with similar loads.
+        # - Then by speed factor.
+        # - Finally by load.
+        sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); }
+        @available = sort
+            { lf($a) <=> lf($b)
+                  || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor}
+                  || $a->{load} <=> $b->{load}
+            } @available;
 
-    # Select the best available machine and lock a free slot.
-    my $selected = $available[0]; 
-    my $machine = $selected->{machine};
 
-    $slotLock = openSlotLock($machine, $selected->{free});
-    flock($slotLock, LOCK_EX | LOCK_NB) or die;
-    utime undef, undef, $slotLock;
+        # Select the best available machine and lock a free slot.
+        my $selected = $available[0]; 
+        my $machine = $selected->{machine};
+        
+        $slotLock = openSlotLock($machine, $selected->{free});
+        flock($slotLock, LOCK_EX | LOCK_NB) or die;
+        utime undef, undef, $slotLock;
 
-    close MAINLOCK;
+        close MAINLOCK;
 
 
-    # Connect to the selected machine.
-    @sshOpts = ("-i", $machine->{sshKeys}, "-x");
-    $hostName = $machine->{hostName};
-    last if openSSHConnection $hostName;
+        # Connect to the selected machine.
+        @sshOpts = ("-i", $machine->{sshKeys}, "-x");
+        $hostName = $machine->{hostName};
+        last REQ if openSSHConnection $hostName;
     
-    warn "unable to open SSH connection to $hostName, trying other available machines...\n";
-    $machine->{enabled} = 0;
+        warn "unable to open SSH connection to $hostName, trying other available machines...\n";
+        $machine->{enabled} = 0;
+    }
 }
 
 
 # Tell Nix we've accepted the build.
 sendReply "accept";
-my $x = <STDIN>;
-chomp $x;
-
-if ($x ne "okay") {
-    exit 0;
-}
+my @inputs = split /\s/, readline(STDIN);
+my @outputs = split /\s/, readline(STDIN);
 
 
-# Do the actual build.
 print STDERR "building `$drvPath' on `$hostName'\n";
+print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace;
 
-my $inputs = `cat inputs`; die if ($? != 0);
-$inputs =~ s/\n/ /g;
-
-my $outputs = `cat outputs`; die if ($? != 0);
-$outputs =~ s/\n/ /g;
-
-print "copying inputs...\n";
 
 my $maybeSign = "";
 $maybeSign = "--sign" if -e "/nix/etc/nix/signing-key.sec";
 
-system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath $inputs") == 0
+
+# Register the derivation as a temporary GC root.  Note that $PPID is
+# the PID of the remote SSH process, which, due to the use of a
+# persistant SSH connection, should be the same across all remote
+# command invocations for this session.
+my $rootsDir = "@localstatedir@/nix/gcroots/tmp";
+system("ssh $hostName @sshOpts 'mkdir -m 1777 -p $rootsDir; ln -sfn $drvPath $rootsDir/\$PPID.drv'");
+
+sub removeRoots {
+    system("ssh $hostName @sshOpts 'rm -f $rootsDir/\$PPID.drv $rootsDir/\$PPID.out'");
+}
+
+
+# Copy the derivation and its dependencies to the build machine.
+system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath @inputs") == 0
     or die "cannot copy inputs to $hostName: $?";
 
-print "building...\n";
 
-my $buildFlags = "--max-silent-time $maxSilentTime --fallback";
+# Perform the build.
+my $buildFlags = "--max-silent-time $maxSilentTime --fallback --add-root $rootsDir/\$PPID.out --option verbosity 0";
 
-# `-tt' forces allocation of a pseudo-terminal.  This is required to
-# make the remote nix-store process receive a signal when the
-# connection dies.  Without it, the remote process might continue to
-# run indefinitely (that is, until it next tries to write to
-# stdout/stderr).
-if (system("ssh $hostName @sshOpts -tt 'nix-store -r $drvPath $buildFlags > /dev/null'") != 0) {
+# We let the remote side kill its process group when the connection is
+# closed unexpectedly.  This is necessary to ensure that no processes
+# are left running on the remote system if the local Nix process is
+# killed.  (SSH itself doesn't kill child processes if the connection
+# is interrupted unless the `-tt' flag is used to force a pseudo-tty,
+# in which case every child receives SIGHUP; however, `-tt' doesn't
+# work on some platforms when connection sharing is used.)
+pipe STDIN, DUMMY; # make sure we have a readable STDIN
+if (system("ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
     # If we couldn't run ssh or there was an ssh problem (indicated by
     # exit code 255), then we return exit code 1; otherwise we assume
     # that the builder failed, which we indicate to Nix using exit
@@ -226,15 +247,23 @@ if (system("ssh $hostName @sshOpts -tt 'nix-store -r $drvPath $buildFlags > /dev
     # the first is a transient failure and the latter is permanent.
     my $res = $? == -1 || ($? >> 8) == 255 ? 1 : 100;
     print STDERR "build of `$drvPath' on `$hostName' failed with exit code $?\n";
+    removeRoots;
     exit $res;
 }
 
-print "build of `$drvPath' on `$hostName' succeeded\n";
+#print "build of `$drvPath' on `$hostName' succeeded\n";
 
-foreach my $output (split '\n', $outputs) {
+
+# Copy the output from the build machine.
+foreach my $output (@outputs) {
     my $maybeSignRemote = "";
     $maybeSignRemote = "--sign" if $UID != 0;
     
-    system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output' | @bindir@/nix-store --import > /dev/null") == 0
+    system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
+           "| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0
 	or die "cannot copy $output from $hostName: $?";
 }
+
+
+# Get rid of the temporary GC roots.
+removeRoots;
diff --git a/scripts/copy-from-other-stores.pl.in b/scripts/copy-from-other-stores.pl.in
index 8f0ff4ca8df8..10130c0893ea 100644
--- a/scripts/copy-from-other-stores.pl.in
+++ b/scripts/copy-from-other-stores.pl.in
@@ -17,25 +17,19 @@ foreach my $dir (@remoteStoresAll) {
 }
 
 
+$ENV{"NIX_REMOTE"} = "";
+
+
 sub findStorePath {
     my $storePath = shift;
-    
-    my $storePathName = basename $storePath;
-    
     foreach my $store (@remoteStores) {
-        # Determine whether $storePath exists by looking for the
-        # existence of the info file, and if so, get store path info
-        # from that file.  This rather breaks abstraction: we should
-        # be using `nix-store' for that.  But right now there is no
-        # good way to tell nix-store to access a store mounted under a
-        # different location (there's $NIX_STORE, but that only works
-        # if the remote store is mounted under its "real" location).
-        my $infoFile = "$store/var/nix/db/info/$storePathName";
-        my $storePath2 = "$store/store/$storePathName";
-        if (-f $infoFile && -e $storePath2) {
-            return ($infoFile, $storePath2);
-        }
+        my $sourcePath = "$store/store/" . basename $storePath;
+        next unless -e $sourcePath || -l $sourcePath;
+        $ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
+        return ($store, $sourcePath) if
+            system("@bindir@/nix-store --check-validity $storePath") == 0;
     }
+    return undef;
 }
 
 
@@ -46,37 +40,38 @@ if ($ARGV[0] eq "--query") {
 
         if ($cmd eq "have") {
             my $storePath = <STDIN>; chomp $storePath;
-            (my $infoFile) = findStorePath $storePath;
-            print STDOUT ($infoFile ? "1\n" : "0\n");
+            print STDOUT (defined findStorePath($storePath) ? "1\n" : "0\n");
         }
 
         elsif ($cmd eq "info") {
             my $storePath = <STDIN>; chomp $storePath;
-            (my $infoFile) = findStorePath $storePath;
-            if (!$infoFile) {
+            my ($store, $sourcePath) = findStorePath($storePath);
+            if (!defined $store) {
                 print "0\n";
                 next; # not an error
             }
             print "1\n";
 
-            my $deriver = "";
-            my @references = ();
-
-            open INFO, "<$infoFile" or die "cannot read info file $infoFile\n";
-            while (<INFO>) {
-                chomp;
-                /^([\w-]+): (.*)$/ or die "bad info file";
-                my $key = $1;
-                my $value = $2;
-                if ($key eq "Deriver") { $deriver = $value; }
-                elsif ($key eq "References") { @references = split ' ', $value; }
-            }
-            close INFO;
+            $ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
+            
+            my $deriver = `@bindir@/nix-store --query --deriver $storePath`;
+            die "cannot query deriver of `$storePath'" if $? != 0;
+            chomp $deriver;
+            $deriver = "" if $deriver eq "unknown-deriver";
+
+            my @references = split "\n",
+                `@bindir@/nix-store --query --references $storePath`;
+            die "cannot query references of `$storePath'" if $? != 0;
+
+            my $narSize = `@bindir@/nix-store --query --size $storePath`;
+            die "cannot query size of `$storePath'" if $? != 0;
+            chomp $narSize;
 
             print "$deriver\n";
             print scalar @references, "\n";
             print "$_\n" foreach @references;
-            print "0\n"; # !!! showing size not supported (yet)
+            print "$narSize\n";
+            print "$narSize\n";
         }
 
         else { die "unknown command `$cmd'"; }
@@ -87,8 +82,8 @@ if ($ARGV[0] eq "--query") {
 elsif ($ARGV[0] eq "--substitute") {
     die unless scalar @ARGV == 2;
     my $storePath = $ARGV[1];
-    (my $infoFile, my $sourcePath) = findStorePath $storePath;
-    die unless $infoFile;
+    my ($store, $sourcePath) = findStorePath $storePath;
+    die unless $store;
     print "\n*** Copying `$storePath' from `$sourcePath'\n\n";
     system("$binDir/nix-store --dump $sourcePath | $binDir/nix-store --restore $storePath") == 0
         or die "cannot copy `$sourcePath' to `$storePath'";
diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in
index d48c7dd4bc3f..c50f540f3464 100644
--- a/scripts/download-using-manifests.pl.in
+++ b/scripts/download-using-manifests.pl.in
@@ -31,6 +31,148 @@ for my $manifest (glob "$manifestDir/*.nixmanifest") {
 }
 
 
+sub isValidPath {
+    my $p = shift;
+    return system("$binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
+}
+
+
+sub parseHash {
+    my $hash = shift;
+    if ($hash =~ /^(.+):(.+)$/) {
+        return ($1, $2);
+    } else {
+        return ("md5", $hash);
+    }
+}
+
+
+# Compute the most efficient sequence of downloads to produce the
+# given path.
+sub computeSmallestDownload {
+    my $targetPath = shift;
+    my $fast = shift;
+    
+    # Build a graph of all store paths that might contribute to the
+    # construction of $targetPath, and the special node "start".  The
+    # edges are either patch operations, or downloads of full NAR
+    # files.  The latter edges only occur between "start" and a store
+    # path.
+    my %graph;
+
+    $graph{"start"} = {d => 0, pred => undef, edges => []};
+
+    my @queue = ();
+    my $queueFront = 0;
+    my %done;
+
+    sub addNode {
+        my $graph = shift;
+        my $u = shift;
+        $$graph{$u} = {d => 999999999999, pred => undef, edges => []}
+            unless defined $$graph{$u};
+    }
+
+    sub addEdge {
+        my $graph = shift;
+        my $u = shift;
+        my $v = shift;
+        my $w = shift;
+        my $type = shift;
+        my $info = shift;
+        addNode $graph, $u;
+        push @{$$graph{$u}->{edges}},
+            {weight => $w, start => $u, end => $v, type => $type, info => $info};
+        my $n = scalar @{$$graph{$u}->{edges}};
+    }
+
+    push @queue, $targetPath;
+
+    while ($queueFront < scalar @queue) {
+        my $u = $queue[$queueFront++];
+        next if defined $done{$u};
+        $done{$u} = 1;
+
+        addNode \%graph, $u;
+
+        # If the path already exists, it has distance 0 from the
+        # "start" node.
+        if (isValidPath($u)) {
+            addEdge \%graph, "start", $u, 0, "present", undef;
+        }
+
+        else {
+
+            # Add patch edges.
+            my $patchList = $patches{$u};
+            foreach my $patch (@{$patchList}) {
+                if (isValidPath($patch->{basePath})) {
+                    # !!! this should be cached
+                    my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
+                    my $format = "--base32";
+                    $format = "" if $baseHashAlgo eq "md5";
+                    my $hash = $fast && $baseHashAlgo eq "sha256"
+                        ? `$binDir/nix-store -q --hash "$patch->{basePath}"`
+                        : `$binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`;
+                    chomp $hash;
+                    $hash =~ s/.*://;
+                    next if $hash ne $baseHash;
+                }
+                push @queue, $patch->{basePath};
+                addEdge \%graph, $patch->{basePath}, $u, $patch->{size}, "patch", $patch;
+            }
+
+            # Add NAR file edges to the start node.
+            my $narFileList = $narFiles{$u};
+            foreach my $narFile (@{$narFileList}) {
+                # !!! how to handle files whose size is not known in advance?
+                # For now, assume some arbitrary size (1 MB).
+                addEdge \%graph, "start", $u, ($narFile->{size} || 1000000), "narfile", $narFile;
+            }
+        }
+    }
+
+
+    # Run Dijkstra's shortest path algorithm to determine the shortest
+    # sequence of download and/or patch actions that will produce
+    # $targetPath.
+
+    my @todo = keys %graph;
+
+    while (scalar @todo > 0) {
+
+        # Remove the closest element from the todo list.
+        # !!! inefficient, use a priority queue
+        @todo = sort { -($graph{$a}->{d} <=> $graph{$b}->{d}) } @todo;
+        my $u = pop @todo;
+
+        my $u_ = $graph{$u};
+
+        foreach my $edge (@{$u_->{edges}}) {
+            my $v_ = $graph{$edge->{end}};
+            if ($v_->{d} > $u_->{d} + $edge->{weight}) {
+                $v_->{d} = $u_->{d} + $edge->{weight};
+                # Store the edge; to edge->start is actually the
+                # predecessor.
+                $v_->{pred} = $edge; 
+            }
+        }
+    }
+
+
+    # Retrieve the shortest path from "start" to $targetPath.
+    my @path = ();
+    my $cur = $targetPath;
+    return () unless defined $graph{$targetPath}->{pred};
+    while ($cur ne "start") {
+        push @path, $graph{$cur}->{pred};
+        $cur = $graph{$cur}->{pred}->{start};
+    }
+
+    return @path;
+}
+
+
 # Parse the arguments.
 
 if ($ARGV[0] eq "--query") {
@@ -46,6 +188,7 @@ if ($ARGV[0] eq "--query") {
 
         elsif ($cmd eq "info") {
             my $storePath = <STDIN>; chomp $storePath;
+
             my $info;
             if (defined $narFiles{$storePath}) {
                 $info = @{$narFiles{$storePath}}[0];
@@ -57,13 +200,32 @@ if ($ARGV[0] eq "--query") {
                 print "0\n";
                 next; # not an error
             }
+
             print "1\n";
             print "$info->{deriver}\n";
             my @references = split " ", $info->{references};
             print scalar @references, "\n";
             print "$_\n" foreach @references;
-            my $size = $info->{size} || 0;
-            print "$size\n";
+
+            my @path = computeSmallestDownload $storePath, 1;
+            
+            my $downloadSize = 0;
+            while (scalar @path > 0) {
+                my $edge = pop @path;
+                my $u = $edge->{start};
+                my $v = $edge->{end};
+                if ($edge->{type} eq "patch") {
+                    $downloadSize += $edge->{info}->{size};
+                }
+                elsif ($edge->{type} eq "narfile") {
+                    $downloadSize += $edge->{info}->{size};
+                }
+            }
+                
+            print "$downloadSize\n";
+            
+            my $narSize = $info->{narSize} || 0;
+            print "$narSize\n";
         }
         
         else { die "unknown command `$cmd'"; }
@@ -110,148 +272,9 @@ foreach my $localPath (@{$localPathList}) {
 }
 
 
-# Build a graph of all store paths that might contribute to the
-# construction of $targetPath, and the special node "start".  The
-# edges are either patch operations, or downloads of full NAR files.
-# The latter edges only occur between "start" and a store path.
-
-my %graph;
-
-$graph{"start"} = {d => 0, pred => undef, edges => []};
-
-my @queue = ();
-my $queueFront = 0;
-my %done;
-
-sub addToQueue {
-    my $v = shift;
-    return if defined $done{$v};
-    $done{$v} = 1;
-    push @queue, $v;
-}
-
-sub addNode {
-    my $u = shift;
-    $graph{$u} = {d => 999999999999, pred => undef, edges => []}
-        unless defined $graph{$u};
-}
-
-sub addEdge {
-    my $u = shift;
-    my $v = shift;
-    my $w = shift;
-    my $type = shift;
-    my $info = shift;
-    addNode $u;
-    push @{$graph{$u}->{edges}},
-        {weight => $w, start => $u, end => $v, type => $type, info => $info};
-    my $n = scalar @{$graph{$u}->{edges}};
-}
-
-addToQueue $targetPath;
-
-sub isValidPath {
-    my $p = shift;
-    return system("$binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
-}
-
-sub parseHash {
-    my $hash = shift;
-    if ($hash =~ /^(.+):(.+)$/) {
-        return ($1, $2);
-    } else {
-        return ("md5", $hash);
-    }
-}
-
-while ($queueFront < scalar @queue) {
-    my $u = $queue[$queueFront++];
-#    print "$u\n";
-
-    addNode $u;
-
-    # If the path already exists, it has distance 0 from the "start"
-    # node.
-    if (isValidPath($u)) {
-        addEdge "start", $u, 0, "present", undef;
-    }
-
-    else {
-
-        # Add patch edges.
-        my $patchList = $patches{$u};
-        foreach my $patch (@{$patchList}) {
-            if (isValidPath($patch->{basePath})) {
-                # !!! this should be cached
-                my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
-                my $format = "--base32";
-                $format = "" if $baseHashAlgo eq "md5";
-                my $hash = `$binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`;
-                chomp $hash;
-                if ($hash ne $baseHash) {
-                    print LOGFILE "$$ rejecting $patch->{basePath}\n";
-                    next;
-                }
-            }
-            addToQueue $patch->{basePath};
-            addEdge $patch->{basePath}, $u, $patch->{size}, "patch", $patch;
-        }
-
-        # Add NAR file edges to the start node.
-        my $narFileList = $narFiles{$u};
-        foreach my $narFile (@{$narFileList}) {
-            # !!! how to handle files whose size is not known in advance?
-            # For now, assume some arbitrary size (1 MB).
-            addEdge "start", $u, ($narFile->{size} || 1000000), "narfile", $narFile;
-            if ($u eq $targetPath) {
-                my $size = $narFile->{size} || -1;
-                print LOGFILE "$$ full-download-would-be $size\n";
-            }
-        }
-
-    }
-}
-
-
-# Run Dijkstra's shortest path algorithm to determine the shortest
-# sequence of download and/or patch actions that will produce
-# $targetPath.
-
-sub byDistance { # sort by distance, reversed
-    return -($graph{$a}->{d} <=> $graph{$b}->{d});
-}
-
-my @todo = keys %graph;
-
-while (scalar @todo > 0) {
-
-    # Remove the closest element from the todo list.
-    @todo = sort byDistance @todo;
-    my $u = pop @todo;
-
-    my $u_ = $graph{$u};
-
-    foreach my $edge (@{$u_->{edges}}) {
-        my $v_ = $graph{$edge->{end}};
-        if ($v_->{d} > $u_->{d} + $edge->{weight}) {
-            $v_->{d} = $u_->{d} + $edge->{weight};
-            # Store the edge; to edge->start is actually the
-            # predecessor.
-            $v_->{pred} = $edge; 
-        }
-    }
-}
-
-
-# Retrieve the shortest path from "start" to $targetPath.
-my @path = ();
-my $cur = $targetPath;
-die "don't know how to produce $targetPath\n"
-    unless defined $graph{$targetPath}->{pred};
-while ($cur ne "start") {
-    push @path, $graph{$cur}->{pred};
-    $cur = $graph{$cur}->{pred}->{start};
-}
+# Compute the shortest path.
+my @path = computeSmallestDownload $targetPath, 0;
+die "don't know how to produce $targetPath\n" if scalar @path == 0;
 
 
 # Traverse the shortest path, perform the actions described by the
diff --git a/scripts/nix-build.in b/scripts/nix-build.in
index ed85d5712128..f9d81b36c7a0 100644
--- a/scripts/nix-build.in
+++ b/scripts/nix-build.in
@@ -123,6 +123,11 @@ EOF
         $verbose = 1;
     }
     
+    elsif ($arg eq "--quiet") {
+        push @buildArgs, $arg;
+        push @instArgs, $arg;
+    }
+    
     elsif (substr($arg, 0, 1) eq "-") {
         push @buildArgs, $arg;
     }
@@ -165,7 +170,7 @@ foreach my $expr (@exprs) {
 
     # Build.
     my @outPaths;
-    $pid = open(OUTPATHS, "-|") || exec "$binDir/nix-store", "--add-root", $outLink, "--indirect", "-rv",
+    $pid = open(OUTPATHS, "-|") || exec "$binDir/nix-store", "--add-root", $outLink, "--indirect", "-r",
         @buildArgs, @drvPaths;
     while (<OUTPATHS>) {chomp; push @outPaths, $_;}
     if (!close OUTPATHS) {
diff --git a/scripts/nix-push.in b/scripts/nix-push.in
index 38097f740162..6760baa7e937 100644
--- a/scripts/nix-push.in
+++ b/scripts/nix-push.in
@@ -172,12 +172,6 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
     $narbz2Hash =~ /^[0-9a-z]+$/ or die "invalid hash";
     close HASH;
 
-    open HASH, "$narDir/nar-hash" or die "cannot open nar-hash";
-    my $narHash = <HASH>;
-    chomp $narHash;
-    $narHash =~ /^[0-9a-z]+$/ or die "invalid hash";
-    close HASH;
-    
     my $narName = "$narbz2Hash.nar.bz2";
 
     my $narFile = "$narDir/$narName";
@@ -195,6 +189,14 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
     chomp $deriver;
     $deriver = "" if $deriver eq "unknown-deriver";
 
+    my $narHash = `$binDir/nix-store --query --hash '$storePath'`;
+    die "cannot query hash for `$storePath'" if $? != 0;
+    chomp $narHash;
+
+    my $narSize = `$binDir/nix-store --query --size '$storePath'`;
+    die "cannot query size for `$storePath'" if $? != 0;
+    chomp $narSize;
+
     my $url;
     if ($localCopy) {
         $url = "$targetArchivesUrl/$narName";
@@ -205,7 +207,8 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
         { url => $url
         , hash => "$hashAlgo:$narbz2Hash"
         , size => $narbz2Size
-        , narHash => "$hashAlgo:$narHash"
+        , narHash => "$narHash"
+        , narSize => $narSize
         , references => $references
         , deriver => $deriver
         }
diff --git a/scripts/readmanifest.pm.in b/scripts/readmanifest.pm.in
index 7244984ead6a..c2c4be89b271 100644
--- a/scripts/readmanifest.pm.in
+++ b/scripts/readmanifest.pm.in
@@ -33,18 +33,8 @@ sub readManifest {
 
     my $manifestVersion = 2;
 
-    my $storePath;
-    my $url;
-    my $hash;
-    my $size;
-    my $basePath;
-    my $baseHash;
-    my $patchType;
-    my $narHash;
-    my $references;
-    my $deriver;
-    my $hashAlgo;
-    my $copyFrom;
+    my ($storePath, $url, $hash, $size, $basePath, $baseHash, $patchType);
+    my ($narHash, $narSize, $references, $deriver, $hashAlgo, $copyFrom);
 
     while (<MANIFEST>) {
         chomp;
@@ -62,6 +52,7 @@ sub readManifest {
                 undef $hash;
                 undef $size;
                 undef $narHash;
+                undef $narSize;
                 undef $basePath;
                 undef $baseHash;
                 undef $patchType;
@@ -89,7 +80,8 @@ sub readManifest {
                     if (!$found) {
                         push @{$narFileList},
                             { url => $url, hash => $hash, size => $size
-                            , narHash => $narHash, references => $references
+                            , narHash => $narHash, narSize => $narSize
+                            , references => $references
                             , deriver => $deriver, hashAlgo => $hashAlgo
                             };
                     }
@@ -100,8 +92,8 @@ sub readManifest {
                     addPatch $patches, $storePath,
                         { url => $url, hash => $hash, size => $size
                         , basePath => $basePath, baseHash => $baseHash
-                        , narHash => $narHash, patchType => $patchType
-                        , hashAlgo => $hashAlgo
+                        , narHash => $narHash, narSize => $narSize
+                        , patchType => $patchType, hashAlgo => $hashAlgo
                         };
                 }
 
@@ -132,6 +124,7 @@ sub readManifest {
             elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; }
             elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; }
             elsif (/^\s*NarHash:\s*(\S+)\s*$/) { $narHash = $1; }
+            elsif (/^\s*NarSize:\s*(\d+)\s*$/) { $narSize = $1; }
             elsif (/^\s*References:\s*(.*)\s*$/) { $references = $1; }
             elsif (/^\s*Deriver:\s*(\S+)\s*$/) { $deriver = $1; }
             elsif (/^\s*ManifestVersion:\s*(\d+)\s*$/) { $manifestVersion = $1; }
@@ -165,8 +158,9 @@ sub writeManifest {
             print MANIFEST "  StorePath: $storePath\n";
             print MANIFEST "  NarURL: $narFile->{url}\n";
             print MANIFEST "  Hash: $narFile->{hash}\n" if defined $narFile->{hash};
-            print MANIFEST "  NarHash: $narFile->{narHash}\n";
             print MANIFEST "  Size: $narFile->{size}\n" if defined $narFile->{size};
+            print MANIFEST "  NarHash: $narFile->{narHash}\n";
+            print MANIFEST "  NarSize: $narFile->{narSize}\n" if $narFile->{narSize};
             print MANIFEST "  References: $narFile->{references}\n"
                 if defined $narFile->{references} && $narFile->{references} ne "";
             print MANIFEST "  Deriver: $narFile->{deriver}\n"
@@ -182,8 +176,9 @@ sub writeManifest {
             print MANIFEST "  StorePath: $storePath\n";
             print MANIFEST "  NarURL: $patch->{url}\n";
             print MANIFEST "  Hash: $patch->{hash}\n";
-            print MANIFEST "  NarHash: $patch->{narHash}\n";
             print MANIFEST "  Size: $patch->{size}\n";
+            print MANIFEST "  NarHash: $patch->{narHash}\n";
+            print MANIFEST "  NarSize: $patch->{narSize}\n" if $patch->{narSize};
             print MANIFEST "  BasePath: $patch->{basePath}\n";
             print MANIFEST "  BaseHash: $patch->{baseHash}\n";
             print MANIFEST "  Type: $patch->{patchType}\n";
diff --git a/scripts/ssh.pm b/scripts/ssh.pm
index c6d667a65d90..44a0e6f31b1c 100644
--- a/scripts/ssh.pm
+++ b/scripts/ssh.pm
@@ -25,13 +25,16 @@ sub openSSHConnection {
     # print "started" when it has established the connection, and wait
     # until we see that.
     open SSH, "ssh $sshHost @sshOpts -M -N -o LocalCommand='echo started' -o PermitLocalCommand=yes |" or die;
+
     while (<SSH>) {
         chomp;
-        last if /started/;
+        if ($_ eq "started") {
+            $sshStarted = 1;
+            return 1;
+        }
     }
-    
-    $sshStarted = 1;
-    return 1;
+
+    return 0;
 }
 
 # Tell the master SSH client to exit.
diff --git a/src/bin2c/bin2c.c b/src/bin2c/bin2c.c
index 18bf81d69e25..5ed8a57082fd 100644
--- a/src/bin2c/bin2c.c
+++ b/src/bin2c/bin2c.c
@@ -14,10 +14,10 @@ int main(int argc, char * * argv)
 {
     int c;
     if (argc != 2) abort();
-    print("static unsigned char %s[] = {", argv[1]);
+    print("static unsigned char %s[] = { ", argv[1]);
     while ((c = getchar()) != EOF) {
         print("0x%02x, ", (unsigned char) c);
     }
-    print("};\n");
+    print("0 };\n");
     return 0;
 }
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 29fc13e33627..7c2d9203063e 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -54,25 +54,26 @@ void printGCWarning()
 
 void printMissing(const PathSet & paths)
 {
-    unsigned long long downloadSize;
+    unsigned long long downloadSize, narSize;
     PathSet willBuild, willSubstitute, unknown;
-    queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize);
+    queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize);
 
     if (!willBuild.empty()) {
-        printMsg(lvlInfo, format("the following derivations will be built:"));
+        printMsg(lvlInfo, format("these derivations will be built:"));
         foreach (PathSet::iterator, i, willBuild)
             printMsg(lvlInfo, format("  %1%") % *i);
     }
 
     if (!willSubstitute.empty()) {
-        printMsg(lvlInfo, format("the following paths will be downloaded/copied (%.2f MiB):") %
-            (downloadSize / (1024.0 * 1024.0)));
+        printMsg(lvlInfo, format("these paths will be downloaded/copied (%.2f MiB download, %.2f MiB unpacked):")
+            % (downloadSize / (1024.0 * 1024.0))
+            % (narSize / (1024.0 * 1024.0)));
         foreach (PathSet::iterator, i, willSubstitute)
             printMsg(lvlInfo, format("  %1%") % *i);
     }
 
     if (!unknown.empty()) {
-        printMsg(lvlInfo, format("don't know how to build the following paths%1%:")
+        printMsg(lvlInfo, format("don't know how to build these paths%1%:")
             % (readOnlyMode ? " (may be caused by read-only store access)" : ""));
         foreach (PathSet::iterator, i, unknown)
             printMsg(lvlInfo, format("  %1%") % *i);
@@ -200,17 +201,16 @@ static void initAndRun(int argc, char * * argv)
     remaining.clear();
 
     /* Process default options. */
+    int verbosityDelta = 0;
     for (Strings::iterator i = args.begin(); i != args.end(); ++i) {
         string arg = *i;
-        if (arg == "--verbose" || arg == "-v")
-            verbosity = (Verbosity) ((int) verbosity + 1);
+        if (arg == "--verbose" || arg == "-v") verbosityDelta++;
+        else if (arg == "--quiet") verbosityDelta--;
         else if (arg == "--log-type") {
             ++i;
             if (i == args.end()) throw UsageError("`--log-type' requires an argument");
             setLogType(*i);
         }
-        else if (arg == "--build-output" || arg == "-B")
-            ; /* !!! obsolete - remove eventually */
         else if (arg == "--no-build-output" || arg == "-Q")
             buildVerbosity = lvlVomit;
         else if (arg == "--print-build-trace")
@@ -251,6 +251,9 @@ static void initAndRun(int argc, char * * argv)
         else remaining.push_back(arg);
     }
 
+    verbosityDelta += queryIntSetting("verbosity", lvlInfo);
+    verbosity = (Verbosity) (verbosityDelta < 0 ? 0 : verbosityDelta);
+    
     /* Automatically clean up the temporary roots file when we
        exit. */
     RemoveTempRoots removeTempRoots __attribute__((unused));
diff --git a/src/libstore/Makefile.am b/src/libstore/Makefile.am
index 9accc3005fc3..e19256b925ea 100644
--- a/src/libstore/Makefile.am
+++ b/src/libstore/Makefile.am
@@ -10,7 +10,14 @@ pkginclude_HEADERS = \
   globals.hh references.hh pathlocks.hh \
   worker-protocol.hh
 
-libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la ${aterm_lib} ${sqlite_lib}
+
+EXTRA_DIST = schema.sql
 
 AM_CXXFLAGS = -Wall \
- -I$(srcdir)/.. -I$(srcdir)/../libutil
+ ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil
+
+local-store.lo: schema.sql.hh
+
+%.sql.hh: %.sql
+	../bin2c/bin2c schema < $< > $@ || (rm $@ && exit 1)
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index ef2f7adf3194..8b8be3e80dde 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -64,6 +64,7 @@ static const uid_t rootUserId = 0;
 
 /* Forward definition. */
 class Worker;
+class HookInstance;
 
 
 /* A pointer to a goal. */
@@ -215,6 +216,8 @@ public:
 
     LocalStore & store;
 
+    boost::shared_ptr<HookInstance> hook;
+    
     Worker(LocalStore & store);
     ~Worker();
 
@@ -615,6 +618,107 @@ void deletePathWrapped(const Path & path)
 //////////////////////////////////////////////////////////////////////
 
 
+struct HookInstance
+{
+    /* Pipes for talking to the build hook. */
+    Pipe toHook;
+
+    /* Pipe for the hook's standard output/error. */
+    Pipe fromHook;
+
+    /* Pipe for the builder's standard output/error. */
+    Pipe builderOut;
+    
+    /* The process ID of the hook. */
+    Pid pid;
+
+    HookInstance();
+
+    ~HookInstance();
+};
+
+
+HookInstance::HookInstance()
+{
+    debug("starting build hook");
+    
+    Path buildHook = absPath(getEnv("NIX_BUILD_HOOK"));
+    
+    /* Create a pipe to get the output of the child. */
+    fromHook.create();
+    
+    /* Create the communication pipes. */
+    toHook.create();
+
+    /* Create a pipe to get the output of the builder. */
+    builderOut.create();
+
+    /* Fork the hook. */
+    pid = fork();
+    switch (pid) {
+        
+    case -1:
+        throw SysError("unable to fork");
+
+    case 0:
+        try { /* child */
+
+            commonChildInit(fromHook);
+
+            if (chdir("/") == -1) throw SysError("changing into `/");
+            
+            /* Dup the communication pipes. */
+            toHook.writeSide.close();
+            if (dup2(toHook.readSide, STDIN_FILENO) == -1)
+                throw SysError("dupping to-hook read side");
+
+            /* Use fd 4 for the builder's stdout/stderr. */
+            builderOut.readSide.close();
+            if (dup2(builderOut.writeSide, 4) == -1)
+                throw SysError("dupping builder's stdout/stderr");
+            
+            execl(buildHook.c_str(), buildHook.c_str(), thisSystem.c_str(),
+                (format("%1%") % maxSilentTime).str().c_str(),
+                (format("%1%") % printBuildTrace).str().c_str(),
+                NULL);
+            
+            throw SysError(format("executing `%1%'") % buildHook);
+            
+        } catch (std::exception & e) {
+            std::cerr << format("build hook error: %1%") % e.what() << std::endl;
+        }
+        quickExit(1);
+    }
+    
+    /* parent */
+    pid.setSeparatePG(true);
+    pid.setKillSignal(SIGTERM);
+    fromHook.writeSide.close();
+    toHook.readSide.close();
+}
+
+
+HookInstance::~HookInstance()
+{
+    try {
+        /* Cleanly shut down the hook by closing its stdin if it's not
+           already building.  Otherwise pid's destructor will kill
+           it. */
+        if (pid != -1 && toHook.writeSide != -1) {
+            toHook.writeSide.close();
+            pid.wait(true);
+        }
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
+
 class DerivationGoal : public Goal
 {
 private:
@@ -649,14 +753,11 @@ private:
     AutoCloseFD fdLogFile;
 
     /* Pipe for the builder's standard output/error. */
-    Pipe logPipe;
-
-    /* Whether we're building using a build hook. */
-    bool usingBuildHook;
-
-    /* Pipes for talking to the build hook (if any). */
-    Pipe toHook;
+    Pipe builderOut;
 
+    /* The build hook. */
+    boost::shared_ptr<HookInstance> hook;
+    
     /* Whether we're currently doing a chroot build. */
     bool useChroot;
     
@@ -694,12 +795,8 @@ private:
     void buildDone();
 
     /* Is the build hook willing to perform the build? */
-    typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
     HookReply tryBuildHook();
 
-    /* Synchronously wait for a build hook to finish. */
-    void terminateBuildHook(bool kill = false);
-
     /* Start building a derivation. */
     void startBuilder();
 
@@ -711,10 +808,6 @@ private:
     /* Open a log file and a pipe to it. */
     Path openLogFile();
 
-    /* Common initialisation to be performed in child processes (i.e.,
-       both in builders and in build hooks). */
-    void initChild();
-    
     /* Delete the temporary directory, if we have one. */
     void deleteTmpDir(bool force);
 
@@ -742,6 +835,7 @@ DerivationGoal::DerivationGoal(const Path & drvPath, Worker & worker)
     trace("created");
 }
 
+
 DerivationGoal::~DerivationGoal()
 {
     /* Careful: we should never ever throw an exception from a
@@ -754,6 +848,7 @@ DerivationGoal::~DerivationGoal()
     }
 }
 
+
 void DerivationGoal::killChild()
 {
     if (pid != -1) {
@@ -778,6 +873,8 @@ void DerivationGoal::killChild()
         
         assert(pid == -1);
     }
+
+    hook.reset();
 }
 
 
@@ -887,7 +984,10 @@ void DerivationGoal::outputsSubstituted()
     foreach (PathSet::iterator, i, drv.inputSrcs)
         addWaitee(worker.makeSubstitutionGoal(*i));
 
-    state = &DerivationGoal::inputsRealised;
+    if (waitees.empty()) /* to prevent hang (no wake-up event) */
+        inputsRealised();
+    else
+        state = &DerivationGoal::inputsRealised;
 }
 
 
@@ -961,6 +1061,16 @@ PathSet outputPaths(const DerivationOutputs & outputs)
 }
 
 
+static bool canBuildLocally(const string & platform)
+{
+    return platform == thisSystem 
+#ifdef CAN_DO_LINUX32_BUILDS
+        || (platform == "i686-linux" && thisSystem == "x86_64-linux")
+#endif
+        ;
+}
+
+
 void DerivationGoal::tryToBuild()
 {
     trace("trying to build");
@@ -1028,28 +1138,36 @@ void DerivationGoal::tryToBuild()
     foreach (DerivationOutputs::iterator, i, drv.outputs)
         if (pathFailed(i->second.path)) return;
 
+    /* Don't do a remote build if the derivation has the attribute
+       `preferLocalBuild' set. */
+    bool preferLocalBuild =
+        drv.env["preferLocalBuild"] == "1" && canBuildLocally(drv.platform);
+
     /* Is the build hook willing to accept this job? */
-    usingBuildHook = true;
-    switch (tryBuildHook()) {
-        case rpAccept:
-            /* Yes, it has started doing so.  Wait until we get EOF
-               from the hook. */
-            state = &DerivationGoal::buildDone;
-            return;
-        case rpPostpone:
-            /* Not now; wait until at least one child finishes. */
-            worker.waitForAWhile(shared_from_this());
-            outputLocks.unlock();
-            return;
-        case rpDecline:
-            /* We should do it ourselves. */
-            break;
+    if (!preferLocalBuild) {
+        switch (tryBuildHook()) {
+            case rpAccept:
+                /* Yes, it has started doing so.  Wait until we get
+                   EOF from the hook. */
+                state = &DerivationGoal::buildDone;
+                return;
+            case rpPostpone:
+                /* Not now; wait until at least one child finishes or
+                   the wake-up timeout expires. */
+                worker.waitForAWhile(shared_from_this());
+                outputLocks.unlock();
+                return;
+            case rpDecline:
+                /* We should do it ourselves. */
+                break;
+        }
     }
-
-    usingBuildHook = false;
-
-    /* Make sure that we are allowed to start a build. */
-    if (worker.getNrLocalBuilds() >= maxBuildJobs) {
+    
+    /* Make sure that we are allowed to start a build.  If this
+       derivation prefers to be done locally, do it even if
+       maxBuildJobs is 0. */
+    unsigned int curBuilds = worker.getNrLocalBuilds();
+    if (curBuilds >= maxBuildJobs && !(preferLocalBuild && curBuilds == 0)) {
         worker.waitForBuildSlot(shared_from_this());
         outputLocks.unlock();
         return;
@@ -1085,18 +1203,29 @@ void DerivationGoal::buildDone()
        to have terminated.  In fact, the builder could also have
        simply have closed its end of the pipe --- just don't do that
        :-) */
-    /* !!! this could block! security problem! solution: kill the
-       child */
-    pid_t savedPid = pid;
-    int status = pid.wait(true);
+    int status;
+    pid_t savedPid;
+    if (hook) {
+        savedPid = hook->pid;
+        status = hook->pid.wait(true);
+    } else {
+        /* !!! this could block! security problem! solution: kill the
+           child */
+        savedPid = pid;
+        status = pid.wait(true);
+    }
 
     debug(format("builder process for `%1%' finished") % drvPath);
 
     /* So the child is gone now. */
     worker.childTerminated(savedPid);
-
+    
     /* Close the read side of the logger pipe. */
-    logPipe.readSide.close();
+    if (hook) {
+        hook->builderOut.readSide.close();
+        hook->fromHook.readSide.close();
+    }
+    else builderOut.readSide.close();
 
     /* Close the log file. */
     fdLogFile.close();
@@ -1169,11 +1298,11 @@ void DerivationGoal::buildDone()
         /* When using a build hook, the hook will return a remote
            build failure using exit code 100.  Anything else is a hook
            problem. */
-        bool hookError = usingBuildHook &&
+        bool hookError = hook &&
             (!WIFEXITED(status) || WEXITSTATUS(status) != 100);
         
         if (printBuildTrace) {
-            if (usingBuildHook && hookError)
+            if (hook && hookError)
                 printMsg(lvlError, format("@ hook-failed %1% %2% %3% %4%")
                     % drvPath % drv.outputs["out"].path % status % e.msg());
             else
@@ -1208,162 +1337,85 @@ void DerivationGoal::buildDone()
 }
 
 
-DerivationGoal::HookReply DerivationGoal::tryBuildHook()
+HookReply DerivationGoal::tryBuildHook()
 {
-    if (!useBuildHook) return rpDecline;
-    Path buildHook = getEnv("NIX_BUILD_HOOK");
-    if (buildHook == "") return rpDecline;
-    buildHook = absPath(buildHook);
+    if (!useBuildHook || getEnv("NIX_BUILD_HOOK") == "") return rpDecline;
 
-    /* Create a directory where we will store files used for
-       communication between us and the build hook. */
-    tmpDir = createTempDir();
-    
-    /* Create the log file and pipe. */
-    Path logFile = openLogFile();
+    if (!worker.hook)
+        worker.hook = boost::shared_ptr<HookInstance>(new HookInstance);
 
-    /* Create the communication pipes. */
-    toHook.create();
+    /* Tell the hook about system features (beyond the system type)
+       required from the build machine.  (The hook could parse the
+       drv file itself, but this is easier.) */
+    Strings features = tokenizeString(drv.env["requiredSystemFeatures"]);
+    foreach (Strings::iterator, i, features) checkStoreName(*i); /* !!! abuse */
 
-    /* Fork the hook. */
-    pid = fork();
-    switch (pid) {
-        
-    case -1:
-        throw SysError("unable to fork");
-
-    case 0:
-        try { /* child */
-
-            initChild();
-
-            string s;
-            foreach (DerivationOutputs::const_iterator, i, drv.outputs)
-                s += i->second.path + " ";
-            if (setenv("NIX_HELD_LOCKS", s.c_str(), 1))
-                throw SysError("setting an environment variable");
-
-            execl(buildHook.c_str(), buildHook.c_str(),
-                (worker.getNrLocalBuilds() < maxBuildJobs ? (string) "1" : "0").c_str(),
-                thisSystem.c_str(),
-                drv.platform.c_str(),
-                drvPath.c_str(),
-                (format("%1%") % maxSilentTime).str().c_str(),
-                NULL);
-            
-            throw SysError(format("executing `%1%'") % buildHook);
-            
-        } catch (std::exception & e) {
-            std::cerr << format("build hook error: %1%") % e.what() << std::endl;
-        }
-        quickExit(1);
-    }
-    
-    /* parent */
-    pid.setSeparatePG(true);
-    pid.setKillSignal(SIGTERM);
-    logPipe.writeSide.close();
-    worker.childStarted(shared_from_this(),
-        pid, singleton<set<int> >(logPipe.readSide), false, false);
-
-    toHook.readSide.close();
+    /* Send the request to the hook. */
+    writeLine(worker.hook->toHook.writeSide, (format("%1% %2% %3% %4%")
+        % (worker.getNrLocalBuilds() < maxBuildJobs ? "1" : "0")
+        % drv.platform % drvPath % concatStringsSep(",", features)).str());
 
     /* Read the first line of input, which should be a word indicating
        whether the hook wishes to perform the build. */
     string reply;
-    try {
-        while (true) {
-            string s = readLine(logPipe.readSide);
-            if (string(s, 0, 2) == "# ") {
-                reply = string(s, 2);
-                break;
-            }
-            handleChildOutput(logPipe.readSide, s + "\n");
+    while (true) {
+        string s = readLine(worker.hook->fromHook.readSide);
+        if (string(s, 0, 2) == "# ") {
+            reply = string(s, 2);
+            break;
         }
-    } catch (Error & e) {
-        terminateBuildHook(true);
-        throw;
+        s += "\n";
+        writeToStderr((unsigned char *) s.c_str(), s.size());
     }
 
     debug(format("hook reply is `%1%'") % reply);
 
-    if (reply == "decline" || reply == "postpone") {
-        /* Clean up the child.  !!! hacky / should verify */
-        terminateBuildHook();
+    if (reply == "decline" || reply == "postpone")
         return reply == "decline" ? rpDecline : rpPostpone;
-    }
+    else if (reply != "accept")
+        throw Error(format("bad hook reply `%1%'") % reply);
 
-    else if (reply == "accept") {
+    printMsg(lvlTalkative, format("using hook to build path(s) %1%")
+        % showPaths(outputPaths(drv.outputs)));
 
-        printMsg(lvlInfo, format("using hook to build path(s) %1%")
-            % showPaths(outputPaths(drv.outputs)));
+    hook = worker.hook;
+    worker.hook.reset();
         
-        /* Write the information that the hook needs to perform the
-           build, i.e., the set of input paths, the set of output
-           paths, and the references (pointer graph) in the input
-           paths. */
-        
-        Path inputListFN = tmpDir + "/inputs";
-        Path outputListFN = tmpDir + "/outputs";
-        Path referencesFN = tmpDir + "/references";
-
-        /* The `inputs' file lists all inputs that have to be copied
-           to the remote system.  This unfortunately has to contain
-           the entire derivation closure to ensure that the validity
-           invariant holds on the remote system.  (I.e., it's
-           unfortunate that we have to list it since the remote system
-           *probably* already has it.) */
-        PathSet allInputs;
-        allInputs.insert(inputPaths.begin(), inputPaths.end());
-        computeFSClosure(drvPath, allInputs);
+    /* Tell the hook all the inputs that have to be copied to the
+       remote system.  This unfortunately has to contain the entire
+       derivation closure to ensure that the validity invariant holds
+       on the remote system.  (I.e., it's unfortunate that we have to
+       list it since the remote system *probably* already has it.) */
+    PathSet allInputs;
+    allInputs.insert(inputPaths.begin(), inputPaths.end());
+    computeFSClosure(drvPath, allInputs);
         
-        string s;
-        foreach (PathSet::iterator, i, allInputs) s += *i + "\n";
+    string s;
+    foreach (PathSet::iterator, i, allInputs) s += *i + " ";
+    writeLine(hook->toHook.writeSide, s);
         
-        writeFile(inputListFN, s);
-
-        /* The `outputs' file lists all outputs that have to be copied
-           from the remote system. */
-        s = "";
-        foreach (DerivationOutputs::iterator, i, drv.outputs)
-            s += i->second.path + "\n";
-        writeFile(outputListFN, s);
-
-        /* The `references' file has exactly the format accepted by
-           `nix-store --register-validity'. */
-        writeFile(referencesFN,
-            makeValidityRegistration(allInputs, true, false));
+    /* Tell the hooks the outputs that have to be copied back from the
+       remote system. */
+    s = "";
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        s += i->second.path + " ";
+    writeLine(hook->toHook.writeSide, s);
+    
+    hook->toHook.writeSide.close();
 
-        /* Tell the hook to proceed. */
-        writeLine(toHook.writeSide, "okay");
-        toHook.writeSide.close();
+    /* Create the log file and pipe. */
+    Path logFile = openLogFile();
 
-        if (printBuildTrace)
-            printMsg(lvlError, format("@ build-started %1% %2% %3% %4%")
-                % drvPath % drv.outputs["out"].path % drv.platform % logFile);
+    set<int> fds;
+    fds.insert(hook->fromHook.readSide);
+    fds.insert(hook->builderOut.readSide);
+    worker.childStarted(shared_from_this(), hook->pid, fds, false, false);
+    
+    if (printBuildTrace)
+        printMsg(lvlError, format("@ build-started %1% %2% %3% %4%")
+            % drvPath % drv.outputs["out"].path % drv.platform % logFile);
         
-        return rpAccept;
-    }
-
-    else throw Error(format("bad hook reply `%1%'") % reply);
-}
-
-
-void DerivationGoal::terminateBuildHook(bool kill)
-{
-    debug("terminating build hook");
-    pid_t savedPid = pid;
-    if (kill)
-        pid.kill();
-    else
-        pid.wait(true);
-    /* `false' means don't wake up waiting goals, since we want to
-       keep this build slot ourselves. */
-    worker.childTerminated(savedPid, false);
-    toHook.writeSide.close();
-    fdLogFile.close();
-    logPipe.readSide.close();
-    deleteTmpDir(true); /* get rid of the hook's temporary directory */
+    return rpAccept;    
 }
 
 
@@ -1380,11 +1432,7 @@ void DerivationGoal::startBuilder()
         format("building path(s) %1%") % showPaths(outputPaths(drv.outputs)))
     
     /* Right platform? */
-    if (drv.platform != thisSystem 
-#ifdef CAN_DO_LINUX32_BUILDS
-        && !(drv.platform == "i686-linux" && thisSystem == "x86_64-linux")
-#endif
-        )
+    if (!canBuildLocally(drv.platform))
         throw Error(
             format("a `%1%' is required to build `%3%', but I am a `%2%'")
             % drv.platform % thisSystem % drvPath);
@@ -1499,7 +1547,7 @@ void DerivationGoal::startBuilder()
 
         /* Write closure info to `fileName'. */
         writeFile(tmpDir + "/" + fileName,
-            makeValidityRegistration(paths, false, false));
+            worker.store.makeValidityRegistration(paths, false, false));
     }
 
     
@@ -1549,6 +1597,9 @@ void DerivationGoal::startBuilder()
 
     if (fixedOutput) useChroot = false;
 
+    /* Hack to allow derivations to disable chroot builds. */
+    if (drv.env["__noChroot"] == "1") useChroot = false;
+
     if (useChroot) {
 #if CHROOT_ENABLED
         /* Create a temporary directory in which we set up the chroot
@@ -1572,7 +1623,7 @@ void DerivationGoal::startBuilder()
 
         /* Create a /etc/passwd with entries for the build user and the
            nobody account.  The latter is kind of a hack to support
-           Samba-in-QEMU.  */
+           Samba-in-QEMU. */
         createDirs(chrootRootDir + "/etc");
 
         writeFile(chrootRootDir + "/etc/passwd",
@@ -1580,13 +1631,13 @@ void DerivationGoal::startBuilder()
                 "nixbld:x:%1%:%2%:Nix build user:/:/noshell\n"
                 "nobody:x:65534:65534:Nobody:/:/noshell\n")
                 % (buildUser.enabled() ? buildUser.getUID() : getuid())
-	        % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+                % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
 
 	/* Declare the build user's group so that programs get a consistent
-	   view of the system (e.g., "id -gn").  */
-	writeFile(chrootRootDir + "/etc/group",
-		  (format("nixbld:!:%1%:\n")
-		   % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+	   view of the system (e.g., "id -gn"). */
+        writeFile(chrootRootDir + "/etc/group",
+            (format("nixbld:!:%1%:\n")
+                % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
 
         /* Bind-mount a user-configurable set of directories from the
            host file system.  The `/dev/pts' directory must be mounted
@@ -1645,9 +1696,12 @@ void DerivationGoal::startBuilder()
     printMsg(lvlChatty, format("executing builder `%1%'") %
         drv.builder);
 
-    /* Create the log file and pipe. */
+    /* Create the log file. */
     Path logFile = openLogFile();
     
+    /* Create a pipe to get the output of the builder. */
+    builderOut.create();
+
     /* Fork a child to build the package.  Note that while we
        currently use forks to run and wait for the children, it
        shouldn't be hard to use threads for this on systems where
@@ -1688,18 +1742,23 @@ void DerivationGoal::startBuilder()
                         throw SysError(format("bind mount from `%1%' to `%2%' failed") % source % target);
                 }
                     
-                /* Do the chroot().  initChild() will do a chdir() to
-                   the temporary build directory to make sure the
-                   current directory is in the chroot.  (Actually the
-                   order doesn't matter, since due to the bind mount
-                   tmpDir and tmpRootDit/tmpDir are the same
-                   directories.) */
+                /* Do the chroot().  Below we do a chdir() to the
+                   temporary build directory to make sure the current
+                   directory is in the chroot.  (Actually the order
+                   doesn't matter, since due to the bind mount tmpDir
+                   and tmpRootDit/tmpDir are the same directories.) */
                 if (chroot(chrootRootDir.c_str()) == -1)
                     throw SysError(format("cannot change root directory to `%1%'") % chrootRootDir);
             }
 #endif
             
-            initChild();
+            commonChildInit(builderOut);
+    
+            if (chdir(tmpDir.c_str()) == -1)
+                throw SysError(format("changing into `%1%'") % tmpDir);
+
+            /* Close all other file descriptors. */
+            closeMostFDs(set<int>());
 
 #ifdef CAN_DO_LINUX32_BUILDS
             if (drv.platform == "i686-linux" && thisSystem == "x86_64-linux") {
@@ -1720,10 +1779,10 @@ void DerivationGoal::startBuilder()
             
             /* If we are running in `build-users' mode, then switch to
                the user we allocated above.  Make sure that we drop
-               all root privileges.  Note that initChild() above has
-               closed all file descriptors except std*, so that's
-               safe.  Also note that setuid() when run as root sets
-               the real, effective and saved UIDs. */
+               all root privileges.  Note that above we have closed
+               all file descriptors except std*, so that's safe.  Also
+               note that setuid() when run as root sets the real,
+               effective and saved UIDs. */
             if (buildUser.enabled()) {
                 printMsg(lvlChatty, format("switching to user `%1%'") % buildUser.getUser());
 
@@ -1777,9 +1836,9 @@ void DerivationGoal::startBuilder()
     
     /* parent */
     pid.setSeparatePG(true);
-    logPipe.writeSide.close();
+    builderOut.writeSide.close();
     worker.childStarted(shared_from_this(), pid,
-        singleton<set<int> >(logPipe.readSide), true, true);
+        singleton<set<int> >(builderOut.readSide), true, true);
 
     if (printBuildTrace) {
         printMsg(lvlError, format("@ build-started %1% %2% %3% %4%")
@@ -1811,12 +1870,12 @@ PathSet parseReferenceSpecifiers(const Derivation & drv, string attr)
 void DerivationGoal::computeClosure()
 {
     map<Path, PathSet> allReferences;
-    map<Path, Hash> contentHashes;
+    map<Path, HashResult> contentHashes;
 
     /* When using a build hook, the build hook can register the output
        as valid (by doing `nix-store --import').  If so we don't have
        to do anything here. */
-    if (usingBuildHook) {
+    if (hook) {
         bool allValid = true;
         foreach (DerivationOutputs::iterator, i, drv.outputs)
             if (!worker.store.isValidPath(i->second.path)) allValid = false;
@@ -1868,7 +1927,7 @@ void DerivationGoal::computeClosure()
             if (ht == htUnknown)
                 throw BuildError(format("unknown hash algorithm `%1%'") % algo);
             Hash h = parseHash(ht, i->second.hash);
-            Hash h2 = recursive ? hashPath(ht, path) : hashFile(ht, path);
+            Hash h2 = recursive ? hashPath(ht, path).first : hashFile(ht, path);
             if (h != h2)
                 throw BuildError(
                     format("output path `%1%' should have %2% hash `%3%', instead has `%4%'")
@@ -1882,7 +1941,7 @@ void DerivationGoal::computeClosure()
 	   contained in it.  Compute the SHA-256 NAR hash at the same
 	   time.  The hash is stored in the database so that we can
 	   verify later on whether nobody has messed with the store. */
-        Hash hash;
+        HashResult hash;
         PathSet references = scanForReferences(path, allPaths, hash);
         contentHashes[path] = hash;
 
@@ -1911,14 +1970,18 @@ void DerivationGoal::computeClosure()
     }
 
     /* Register each output path as valid, and register the sets of
-       paths referenced by each of them.  !!! this should be
-       atomic so that either all paths are registered as valid, or
-       none are. */
-    foreach (DerivationOutputs::iterator, i, drv.outputs)
-        worker.store.registerValidPath(i->second.path,
-            contentHashes[i->second.path],
-            allReferences[i->second.path],
-            drvPath);
+       paths referenced by each of them. */
+    ValidPathInfos infos;
+    foreach (DerivationOutputs::iterator, i, drv.outputs) {
+        ValidPathInfo info;
+        info.path = i->second.path;
+        info.hash = contentHashes[i->second.path].first;
+        info.narSize = contentHashes[i->second.path].second;
+        info.references = allReferences[i->second.path];
+        info.deriver = drvPath;
+        infos.push_back(info);
+    }
+    worker.store.registerValidPaths(infos);
 
     /* It is now safe to delete the lock files, since all future
        lockers will see that the output paths are valid; they will not
@@ -1944,32 +2007,10 @@ Path DerivationGoal::openLogFile()
     if (fdLogFile == -1)
         throw SysError(format("creating log file `%1%'") % logFileName);
 
-    /* Create a pipe to get the output of the child. */
-    logPipe.create();
-
     return logFileName;
 }
 
 
-void DerivationGoal::initChild()
-{
-    commonChildInit(logPipe);
-    
-    if (chdir(tmpDir.c_str()) == -1)
-        throw SysError(format("changing into `%1%'") % tmpDir);
-
-    /* When running a hook, dup the communication pipes. */
-    if (usingBuildHook) {
-        toHook.writeSide.close();
-        if (dup2(toHook.readSide, STDIN_FILENO) == -1)
-            throw SysError("dupping to-hook read side");
-    }
-
-    /* Close all other file descriptors. */
-    closeMostFDs(set<int>());
-}
-
-
 void DerivationGoal::deleteTmpDir(bool force)
 {
     if (tmpDir != "") {
@@ -1989,19 +2030,22 @@ void DerivationGoal::deleteTmpDir(bool force)
 
 void DerivationGoal::handleChildOutput(int fd, const string & data)
 {
-    if (fd == logPipe.readSide) {
+    if ((hook && fd == hook->builderOut.readSide) ||
+        (!hook && fd == builderOut.readSide))
+    {
         if (verbosity >= buildVerbosity)
             writeToStderr((unsigned char *) data.c_str(), data.size());
         writeFull(fdLogFile, (unsigned char *) data.c_str(), data.size());
     }
 
-    else abort();
+    if (hook && fd == hook->fromHook.readSide)
+        writeToStderr((unsigned char *) data.c_str(), data.size());
 }
 
 
 void DerivationGoal::handleEOF(int fd)
 {
-    if (fd == logPipe.readSide) worker.wakeUp(shared_from_this());
+    worker.wakeUp(shared_from_this());
 }
 
 
@@ -2345,10 +2389,15 @@ void SubstitutionGoal::finished()
 
     canonicalisePathMetaData(storePath);
 
-    Hash contentHash = hashPath(htSHA256, storePath);
-
-    worker.store.registerValidPath(storePath, contentHash,
-        info.references, info.deriver);
+    HashResult hash = hashPath(htSHA256, storePath);
+    
+    ValidPathInfo info2;
+    info2.path = storePath;
+    info2.hash = hash.first;
+    info2.narSize = hash.second;
+    info2.references = info.references;
+    info2.deriver = info.deriver;
+    worker.store.registerValidPath(info2);
 
     outputLock->setDeletion(true);
     
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 95e49d42c9ea..c14be48afb8e 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -1,10 +1,10 @@
 #ifndef __DERIVATIONS_H
 #define __DERIVATIONS_H
 
-#include "hash.hh"
-
 #include <map>
 
+#include "types.hh"
+
 
 namespace nix {
 
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index f58f691c99dd..b8395bfc4352 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -1,6 +1,5 @@
 #include "globals.hh"
 #include "misc.hh"
-#include "pathlocks.hh"
 #include "local-store.hh"
 
 #include <boost/shared_ptr.hpp>
@@ -31,7 +30,7 @@ static const int defaultGcLevel = 1000;
    read.  To be precise: when they try to create a new temporary root
    file, they will block until the garbage collector has finished /
    yielded the GC lock. */
-static int openGCLock(LockType lockType)
+int LocalStore::openGCLock(LockType lockType)
 {
     Path fnGCLock = (format("%1%/%2%")
         % nixStateDir % gcLockName).str();
@@ -127,7 +126,7 @@ Path addPermRoot(const Path & _storePath, const Path & _gcRoot,
        Instead of reading all the roots, it would be more efficient to
        check if the root is in a directory in or linked from the
        gcroots directory. */
-    if (queryBoolSetting("gc-check-reachability", true)) {
+    if (queryBoolSetting("gc-check-reachability", false)) {
         Roots roots = store->findRoots();
         if (roots.find(gcRoot) == roots.end())
             printMsg(lvlError, 
@@ -136,7 +135,7 @@ Path addPermRoot(const Path & _storePath, const Path & _gcRoot,
                     "therefore, `%2%' might be removed by the garbage collector")
                 % gcRoot % storePath);
     }
-        
+
     /* Grab the global GC root, causing us to block while a GC is in
        progress.  This prevents the set of permanent roots from
        increasing while a GC is in progress. */
@@ -416,18 +415,13 @@ struct LocalStore::GCState
     PathSet busy;
     bool gcKeepOutputs;
     bool gcKeepDerivations;
-
-    bool drvsIndexed;
-    typedef std::multimap<string, Path> DrvsByName;
-    DrvsByName drvsByName; // derivation paths hashed by name attribute
-
-    GCState(GCResults & results_) : results(results_), drvsIndexed(false)
+    GCState(GCResults & results_) : results(results_)
     {
     }
 };
 
 
-static bool doDelete(GCOptions::GCAction action)
+static bool shouldDelete(GCOptions::GCAction action)
 {
     return action == GCOptions::gcDeleteDead
         || action == GCOptions::gcDeleteSpecific;
@@ -441,45 +435,11 @@ bool LocalStore::isActiveTempFile(const GCState & state,
         && state.tempRoots.find(string(path, 0, path.size() - suffix.size())) != state.tempRoots.end();
 }
 
-
-/* Return all the derivations in the Nix store that have `path' as an
-   output.  This function assumes that derivations have the same name
-   as their outputs. */
-PathSet LocalStore::findDerivers(GCState & state, const Path & path)
-{
-    PathSet derivers;
-
-    Path deriver = queryDeriver(path);
-    if (deriver != "") derivers.insert(deriver);
-
-    if (!state.drvsIndexed) {
-        Paths entries = readDirectory(nixStore);
-        foreach (Paths::iterator, i, entries)
-            if (isDerivation(*i))
-                state.drvsByName.insert(std::pair<string, Path>(
-                        getNameOfStorePath(*i), nixStore + "/" + *i));
-        state.drvsIndexed = true;
-    }
-    
-    string name = getNameOfStorePath(path);
-
-    // Urgh, I should have used Haskell...
-    std::pair<GCState::DrvsByName::iterator, GCState::DrvsByName::iterator> range =
-        state.drvsByName.equal_range(name);
-
-    for (GCState::DrvsByName::iterator i = range.first; i != range.second; ++i)
-        if (isValidPath(i->second)) {
-            Derivation drv = derivationFromPath(i->second);
-            foreach (DerivationOutputs::iterator, j, drv.outputs)
-                if (j->second.path == path) derivers.insert(i->second);
-        }
-
-    return derivers;
-}
-
     
 bool LocalStore::tryToDelete(GCState & state, const Path & path)
 {
+    checkInterrupt();
+    
     if (!pathExists(path)) return true;
     if (state.deleted.find(path) != state.deleted.end()) return true;
     if (state.live.find(path) != state.live.end()) return false;
@@ -508,10 +468,10 @@ bool LocalStore::tryToDelete(GCState & state, const Path & path)
            then don't delete the derivation if any of the outputs are
            live. */
         if (state.gcKeepDerivations && isDerivation(path)) {
-            Derivation drv = derivationFromPath(path);
-            foreach (DerivationOutputs::iterator, i, drv.outputs)
-                if (!tryToDelete(state, i->second.path)) {
-                    printMsg(lvlDebug, format("cannot delete derivation `%1%' because its output is alive") % path);
+            PathSet outputs = queryDerivationOutputs(path);
+            foreach (PathSet::iterator, i, outputs)
+                if (!tryToDelete(state, *i)) {
+                    printMsg(lvlDebug, format("cannot delete derivation `%1%' because its output `%2%' is alive") % path % *i);
                     goto isLive;
                 }
         }
@@ -522,18 +482,9 @@ bool LocalStore::tryToDelete(GCState & state, const Path & path)
         if (!pathExists(path)) return true;
 
         /* If gc-keep-outputs is set, then don't delete this path if
-           its deriver is not garbage.  !!! Nix does not reliably
-           store derivers, so we have to look at all derivations to
-           determine which of them derive `path'.  Since this makes
-           the garbage collector very slow to start on large Nix
-           stores, here we just look for all derivations that have the
-           same name as `path' (where the name is the part of the
-           filename after the hash, i.e. the `name' attribute of the
-           derivation).  This is somewhat hacky: currently, the
-           deriver of a path always has the same name as the output,
-           but this might change in the future. */
+           there are derivers of this path that are not garbage. */
         if (state.gcKeepOutputs) {
-            PathSet derivers = findDerivers(state, path);
+            PathSet derivers = queryValidDerivers(path);
             foreach (PathSet::iterator, deriver, derivers) {
                 /* Break an infinite recursion if gc-keep-derivations
                    and gc-keep-outputs are both set by tentatively
@@ -567,7 +518,7 @@ bool LocalStore::tryToDelete(GCState & state, const Path & path)
     }
 
     /* The path is garbage, so delete it. */
-    if (doDelete(state.options.action)) {
+    if (shouldDelete(state.options.action)) {
         printMsg(lvlInfo, format("deleting `%1%'") % path);
 
         unsigned long long bytesFreed, blocksFreed;
@@ -613,6 +564,15 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
     
     state.gcKeepOutputs = queryBoolSetting("gc-keep-outputs", false);
     state.gcKeepDerivations = queryBoolSetting("gc-keep-derivations", true);
+
+    /* Using `--ignore-liveness' with `--delete' can have unintended
+       consequences if `gc-keep-outputs' or `gc-keep-derivations' are
+       true (the garbage collector will recurse into deleting the
+       outputs or derivers, respectively).  So disable them. */
+    if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
+        state.gcKeepOutputs = false;
+        state.gcKeepDerivations = false;
+    }
     
     /* Acquire the global GC root.  This prevents
        a) New roots from being added.
@@ -667,7 +627,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
         vector<Path> entries_(entries.begin(), entries.end());
         random_shuffle(entries_.begin(), entries_.end());
 
-        if (doDelete(state.options.action))
+        if (shouldDelete(state.options.action))
             printMsg(lvlError, format("deleting garbage..."));
         else
             printMsg(lvlError, format("determining live/dead paths..."));
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 75d2f69c2b72..7069d104aae4 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -20,7 +20,7 @@ string nixBinDir = "/UNINIT";
 bool keepFailed = false;
 bool keepGoing = false;
 bool tryFallback = false;
-Verbosity buildVerbosity = lvlInfo;
+Verbosity buildVerbosity = lvlError;
 unsigned int maxBuildJobs = 1;
 unsigned int buildCores = 1;
 bool readOnlyMode = false;
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index f430492fd407..37bbbfdad886 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -4,6 +4,7 @@
 #include "archive.hh"
 #include "pathlocks.hh"
 #include "worker-protocol.hh"
+#include "derivations.hh"
     
 #include <iostream>
 #include <algorithm>
@@ -16,10 +17,144 @@
 #include <errno.h>
 #include <stdio.h>
 
+#include <sqlite3.h>
+
 
 namespace nix {
 
     
+class SQLiteError : public Error
+{
+public:
+    SQLiteError(sqlite3 * db, const format & f)
+        : Error(format("%1%: %2%") % f.str() % sqlite3_errmsg(db))
+    {
+    }
+};
+
+
+SQLite::~SQLite()
+{
+    try {
+        if (db && sqlite3_close(db) != SQLITE_OK)
+            throw SQLiteError(db, "closing database");
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void SQLiteStmt::create(sqlite3 * db, const string & s)
+{
+    checkInterrupt();
+    assert(!stmt);
+    if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK)
+        throw SQLiteError(db, "creating statement");
+    this->db = db;
+}
+
+
+void SQLiteStmt::reset()
+{
+    assert(stmt);
+    if (sqlite3_reset(stmt) != SQLITE_OK)
+        throw SQLiteError(db, "resetting statement");
+    curArg = 1;
+}
+
+
+SQLiteStmt::~SQLiteStmt()
+{
+    try {
+        if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
+            throw SQLiteError(db, "finalizing statement");
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void SQLiteStmt::bind(const string & value)
+{
+    if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
+        throw SQLiteError(db, "binding argument");
+}
+
+
+void SQLiteStmt::bind(int value)
+{
+    if (sqlite3_bind_int(stmt, curArg++, value) != SQLITE_OK)
+        throw SQLiteError(db, "binding argument");
+}
+
+
+void SQLiteStmt::bind64(long long value)
+{
+    if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
+        throw SQLiteError(db, "binding argument");
+}
+
+
+void SQLiteStmt::bind()
+{
+    if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
+        throw SQLiteError(db, "binding argument");
+}
+
+
+/* Helper class to ensure that prepared statements are reset when
+   leaving the scope that uses them.  Unfinished prepared statements
+   prevent transactions from being aborted, and can cause locks to be
+   kept when they should be released. */
+struct SQLiteStmtUse
+{
+    SQLiteStmt & stmt;
+    SQLiteStmtUse(SQLiteStmt & stmt) : stmt(stmt)
+    {
+        stmt.reset();
+    }
+    ~SQLiteStmtUse()
+    {
+        try {
+            stmt.reset();
+        } catch (...) {
+            ignoreException();
+        }
+    }
+};
+
+
+struct SQLiteTxn 
+{
+    bool active;
+    sqlite3 * db;
+    
+    SQLiteTxn(sqlite3 * db) : active(false) {
+        this->db = db;
+        if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
+            throw SQLiteError(db, "starting transaction");
+        active = true;
+    }
+
+    void commit() 
+    {
+        if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
+            throw SQLiteError(db, "committing transaction");
+        active = false;
+    }
+    
+    ~SQLiteTxn() 
+    {
+        try {
+            if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
+                throw SQLiteError(db, "aborting transaction");
+        } catch (...) {
+            ignoreException();
+        }
+    }
+};
+
+
 void checkStoreNotSymlink()
 {
     if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return;
@@ -44,13 +179,13 @@ LocalStore::LocalStore()
     
     schemaPath = nixDBPath + "/schema";
     
-    if (readOnlyMode) return;
+    if (readOnlyMode) {
+        openDB(false);
+        return;
+    }
 
     /* Create missing state directories if they don't already exist. */
     createDirs(nixStore);
-    createDirs(nixDBPath + "/info");
-    createDirs(nixDBPath + "/referrer");
-    createDirs(nixDBPath + "/failed");
     Path profilesDir = nixStateDir + "/profiles";
     createDirs(nixStateDir + "/profiles");
     createDirs(nixStateDir + "/temproots");
@@ -63,12 +198,15 @@ LocalStore::LocalStore()
   
     checkStoreNotSymlink();
 
+    /* Acquire the big fat lock in shared mode to make sure that no
+       schema upgrade is in progress. */
     try {
         Path globalLockPath = nixDBPath + "/big-lock";
         globalLock = openLockFile(globalLockPath.c_str(), true);
     } catch (SysError & e) {
         if (e.errNo != EACCES) throw;
         readOnlyMode = true;
+        openDB(false);
         return;
     }
     
@@ -76,33 +214,55 @@ LocalStore::LocalStore()
         printMsg(lvlError, "waiting for the big Nix store lock...");
         lockFile(globalLock, ltRead, true);
     }
-    
+
+    /* Check the current database schema and if necessary do an
+       upgrade.  */
     int curSchema = getSchema();
     if (curSchema > nixSchemaVersion)
         throw Error(format("current Nix store schema is version %1%, but I only support %2%")
             % curSchema % nixSchemaVersion);
-    if (curSchema == 0) { /* new store */
-        curSchema = nixSchemaVersion; 
+    
+    else if (curSchema == 0) { /* new store */
+        curSchema = nixSchemaVersion;
+        openDB(true);
         writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
     }
-    if (curSchema == 1) throw Error("your Nix store is no longer supported");
-    if (curSchema < nixSchemaVersion) upgradeStore12();
+    
+    else if (curSchema < nixSchemaVersion) {
+        if (curSchema < 5)
+            throw Error(
+                "Your Nix store has a database in Berkeley DB format,\n"
+                "which is no longer supported. To convert to the new format,\n"
+                "please upgrade Nix to version 0.12 first.");
+        
+        if (!lockFile(globalLock, ltWrite, false)) {
+            printMsg(lvlError, "waiting for exclusive access to the Nix store...");
+            lockFile(globalLock, ltWrite, true);
+        }
+
+        /* Get the schema version again, because another process may
+           have performed the upgrade already. */
+        curSchema = getSchema();
 
-    doFsync = queryBoolSetting("fsync-metadata", false);
+        if (curSchema < 6) upgradeStore6();
+
+        writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+
+        lockFile(globalLock, ltRead, true);
+    }
+    
+    else openDB(false);
 }
 
 
 LocalStore::~LocalStore()
 {
     try {
-        flushDelayedUpdates();
-
         foreach (RunningSubstituters::iterator, i, runningSubstituters) {
             i->second.to.close();
             i->second.from.close();
             i->second.pid.wait(true);
         }
-                
     } catch (...) {
         ignoreException();
     }
@@ -121,6 +281,96 @@ int LocalStore::getSchema()
 }
 
 
+void LocalStore::openDB(bool create)
+{
+    /* Open the Nix database. */
+    if (sqlite3_open_v2((nixDBPath + "/db.sqlite").c_str(), &db.db,
+            SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
+        throw Error("cannot open SQLite database");
+
+    if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
+        throw SQLiteError(db, "setting timeout");
+
+    if (sqlite3_exec(db, "pragma foreign_keys = 1;", 0, 0, 0) != SQLITE_OK)
+        throw SQLiteError(db, "enabling foreign keys");
+
+    /* !!! check whether sqlite has been built with foreign key
+       support */
+    
+    /* Whether SQLite should fsync().  "Normal" synchronous mode
+       should be safe enough.  If the user asks for it, don't sync at
+       all.  This can cause database corruption if the system
+       crashes. */
+    string syncMode = queryBoolSetting("fsync-metadata", true) ? "normal" : "off";
+    if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
+        throw SQLiteError(db, "setting synchronous mode");
+
+    /* Set the SQLite journal mode.  WAL mode is fastest, but doesn't
+       seem entirely stable at the moment (Oct. 2010).  Thus, use
+       truncate mode by default. */
+    string mode = queryBoolSetting("use-sqlite-wal", false) ? "wal" : "truncate";
+    string prevMode;
+    {
+        SQLiteStmt stmt;
+        stmt.create(db, "pragma main.journal_mode;");
+        if (sqlite3_step(stmt) != SQLITE_ROW)
+            throw SQLiteError(db, "querying journal mode");
+        prevMode = string((const char *) sqlite3_column_text(stmt, 0));
+    }
+    if (prevMode != mode &&
+        sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
+        throw SQLiteError(db, "setting journal mode");
+
+    /* Increase the auto-checkpoint interval to 8192 pages.  This
+       seems enough to ensure that instantiating the NixOS system
+       derivation is done in a single fsync(). */
+    if (sqlite3_exec(db, "pragma wal_autocheckpoint = 8192;", 0, 0, 0) != SQLITE_OK)
+        throw SQLiteError(db, "setting autocheckpoint interval");
+    
+    /* Initialise the database schema, if necessary. */
+    if (create) {
+#include "schema.sql.hh"
+        if (sqlite3_exec(db, (const char *) schema, 0, 0, 0) != SQLITE_OK)
+            throw SQLiteError(db, "initialising database schema");
+    }
+
+    /* Backwards compatibility with old (pre-release) databases.  Can
+       remove this eventually. */
+    if (sqlite3_table_column_metadata(db, 0, "ValidPaths", "narSize", 0, 0, 0, 0, 0) != SQLITE_OK) {
+        if (sqlite3_exec(db, "alter table ValidPaths add column narSize integer" , 0, 0, 0) != SQLITE_OK)
+            throw SQLiteError(db, "adding column narSize");
+    }
+
+    /* Prepare SQL statements. */
+    stmtRegisterValidPath.create(db,
+        "insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
+    stmtAddReference.create(db,
+        "insert or replace into Refs (referrer, reference) values (?, ?);");
+    stmtQueryPathInfo.create(db,
+        "select id, hash, registrationTime, deriver, narSize from ValidPaths where path = ?;");
+    stmtQueryReferences.create(db,
+        "select path from Refs join ValidPaths on reference = id where referrer = ?;");
+    stmtQueryReferrers.create(db,
+        "select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
+    stmtInvalidatePath.create(db,
+        "delete from ValidPaths where path = ?;");
+    stmtRegisterFailedPath.create(db,
+        "insert into FailedPaths (path, time) values (?, ?);");
+    stmtHasPathFailed.create(db,
+        "select time from FailedPaths where path = ?;");
+    stmtQueryFailedPaths.create(db,
+        "select path from FailedPaths;");
+    stmtClearFailedPath.create(db,
+        "delete from FailedPaths where ?1 = '*' or path = ?1;");
+    stmtAddDerivationOutput.create(db,
+        "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
+    stmtQueryValidDerivers.create(db,
+        "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
+    stmtQueryDerivationOutputs.create(db,
+        "select id, path from DerivationOutputs where drv = ?;");
+}
+
+
 void canonicalisePathMetaData(const Path & path, bool recurse)
 {
     checkInterrupt();
@@ -195,181 +445,124 @@ void canonicalisePathMetaData(const Path & path)
 }
 
 
-static Path infoFileFor(const Path & path)
-{
-    string baseName = baseNameOf(path);
-    return (format("%1%/info/%2%") % nixDBPath % baseName).str();
-}
-
-
-static Path referrersFileFor(const Path & path)
+unsigned long long LocalStore::addValidPath(const ValidPathInfo & info)
 {
-    string baseName = baseNameOf(path);
-    return (format("%1%/referrer/%2%") % nixDBPath % baseName).str();
-}
-
-
-static Path failedFileFor(const Path & path)
-{
-    string baseName = baseNameOf(path);
-    return (format("%1%/failed/%2%") % nixDBPath % baseName).str();
-}
-
+    SQLiteStmtUse use(stmtRegisterValidPath);
+    stmtRegisterValidPath.bind(info.path);
+    stmtRegisterValidPath.bind("sha256:" + printHash(info.hash));
+    stmtRegisterValidPath.bind(info.registrationTime);
+    if (info.deriver != "")
+        stmtRegisterValidPath.bind(info.deriver);
+    else
+        stmtRegisterValidPath.bind(); // null
+    if (info.narSize != 0)
+        stmtRegisterValidPath.bind64(info.narSize);
+    else
+        stmtRegisterValidPath.bind(); // null
+    if (sqlite3_step(stmtRegisterValidPath) != SQLITE_DONE)
+        throw SQLiteError(db, format("registering valid path `%1%' in database") % info.path);
+    unsigned long long id = sqlite3_last_insert_rowid(db);
+
+    /* If this is a derivation, then store the derivation outputs in
+       the database.  This is useful for the garbage collector: it can
+       efficiently query whether a path is an output of some
+       derivation. */
+    if (isDerivation(info.path)) {
+        Derivation drv = parseDerivation(readFile(info.path));
+        foreach (DerivationOutputs::iterator, i, drv.outputs) {
+            SQLiteStmtUse use(stmtAddDerivationOutput);
+            stmtAddDerivationOutput.bind(id);
+            stmtAddDerivationOutput.bind(i->first);
+            stmtAddDerivationOutput.bind(i->second.path);
+            if (sqlite3_step(stmtAddDerivationOutput) != SQLITE_DONE)
+                throw SQLiteError(db, format("adding derivation output for `%1%' in database") % info.path);
+        }
+    }
 
-static Path tmpFileForAtomicUpdate(const Path & path)
-{
-    return (format("%1%/.%2%.%3%") % dirOf(path) % getpid() % baseNameOf(path)).str();
+    return id;
 }
 
 
-void LocalStore::appendReferrer(const Path & from, const Path & to, bool lock)
+void LocalStore::addReference(unsigned long long referrer, unsigned long long reference)
 {
-    Path referrersFile = referrersFileFor(from);
-    
-    PathLocks referrersLock;
-    if (lock) {
-        referrersLock.lockPaths(singleton<PathSet, Path>(referrersFile));
-        referrersLock.setDeletion(true);
-    }
-
-    AutoCloseFD fd = open(referrersFile.c_str(), O_WRONLY | O_APPEND | O_CREAT, 0666);
-    if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile);
-    
-    string s = " " + to;
-    writeFull(fd, (const unsigned char *) s.c_str(), s.size());
-
-    if (doFsync) fsync(fd);
+    SQLiteStmtUse use(stmtAddReference);
+    stmtAddReference.bind(referrer);
+    stmtAddReference.bind(reference);
+    if (sqlite3_step(stmtAddReference) != SQLITE_DONE)
+        throw SQLiteError(db, "adding reference to database");
 }
 
 
-/* Atomically update the referrers file.  If `purge' is true, the set
-   of referrers is set to `referrers'.  Otherwise, the current set of
-   referrers is purged of invalid paths. */
-void LocalStore::rewriteReferrers(const Path & path, bool purge, PathSet referrers)
+void LocalStore::registerValidPath(const ValidPathInfo & info)
 {
-    Path referrersFile = referrersFileFor(path);
-    
-    PathLocks referrersLock(singleton<PathSet, Path>(referrersFile));
-    referrersLock.setDeletion(true);
-
-    if (purge)
-        /* queryReferrers() purges invalid paths, so that's all we
-           need. */
-        queryReferrers(path, referrers);
-
-    Path tmpFile = tmpFileForAtomicUpdate(referrersFile);
-    
-    AutoCloseFD fd = open(tmpFile.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0666);
-    if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile);
-    
-    string s;
-    foreach (PathSet::const_iterator, i, referrers) {
-        s += " "; s += *i;
-    }
+    assert(info.hash.type == htSHA256);
+    ValidPathInfo info2(info);
+    if (info2.registrationTime == 0) info2.registrationTime = time(0);
     
-    writeFull(fd, (const unsigned char *) s.c_str(), s.size());
-
-    if (doFsync) fsync(fd);
+    SQLiteTxn txn(db);
     
-    fd.close(); /* for Windows; can't rename open file */
+    unsigned long long id = addValidPath(info2);
 
-    if (rename(tmpFile.c_str(), referrersFile.c_str()) == -1)
-        throw SysError(format("cannot rename `%1%' to `%2%'") % tmpFile % referrersFile);
+    foreach (PathSet::const_iterator, i, info2.references)
+        addReference(id, queryValidPathId(*i));
+        
+    txn.commit();
 }
 
 
-void LocalStore::flushDelayedUpdates()
+void LocalStore::registerFailedPath(const Path & path)
 {
-    foreach (PathSet::iterator, i, delayedUpdates) {
-        rewriteReferrers(*i, true, PathSet());
-    }
-    delayedUpdates.clear();
+    if (hasPathFailed(path)) return;
+    SQLiteStmtUse use(stmtRegisterFailedPath);
+    stmtRegisterFailedPath.bind(path);
+    stmtRegisterFailedPath.bind(time(0));
+    if (sqlite3_step(stmtRegisterFailedPath) != SQLITE_DONE)
+        throw SQLiteError(db, format("registering failed path `%1%'") % path);
 }
 
 
-void LocalStore::registerValidPath(const Path & path,
-    const Hash & hash, const PathSet & references,
-    const Path & deriver)
+bool LocalStore::hasPathFailed(const Path & path)
 {
-    ValidPathInfo info;
-    info.path = path;
-    info.hash = hash;
-    info.references = references;
-    info.deriver = deriver;
-    registerValidPath(info);
+    SQLiteStmtUse use(stmtHasPathFailed);
+    stmtHasPathFailed.bind(path);
+    int res = sqlite3_step(stmtHasPathFailed);
+    if (res != SQLITE_DONE && res != SQLITE_ROW)
+        throw SQLiteError(db, "querying whether path failed");
+    return res == SQLITE_ROW;
 }
 
 
-void LocalStore::registerValidPath(const ValidPathInfo & info, bool ignoreValidity)
+PathSet LocalStore::queryFailedPaths()
 {
-    Path infoFile = infoFileFor(info.path);
-
-    ValidPathInfo oldInfo;
-    if (pathExists(infoFile)) oldInfo = queryPathInfo(info.path);
-
-    /* Note that it's possible for infoFile to already exist. */
-
-    /* Acquire a lock on each referrer file.  This prevents those
-       paths from being invalidated.  (It would be a violation of the
-       store invariants if we registered info.path as valid while some
-       of its references are invalid.)  NB: there can be no deadlock
-       here since we're acquiring the locks in sorted order. */
-    PathSet lockNames;
-    foreach (PathSet::const_iterator, i, info.references)
-        if (*i != info.path) lockNames.insert(referrersFileFor(*i));
-    PathLocks referrerLocks(lockNames);
-    referrerLocks.setDeletion(true);
-        
-    string refs;
-    foreach (PathSet::const_iterator, i, info.references) {
-        if (!refs.empty()) refs += " ";
-        refs += *i;
-
-        if (!ignoreValidity && *i != info.path && !isValidPath(*i))
-            throw Error(format("cannot register `%1%' as valid, because its reference `%2%' isn't valid")
-                % info.path % *i);
-
-        /* Update the referrer mapping for *i.  This must be done
-           before the info file is written to maintain the invariant
-           that if `path' is a valid path, then all its references
-           have referrer mappings back to `path'.  A " " is prefixed
-           to separate it from the previous entry.  It's not suffixed
-           to deal with interrupted partial writes to this file. */
-        if (oldInfo.references.find(*i) == oldInfo.references.end())
-            appendReferrer(*i, info.path, false);
+    SQLiteStmtUse use(stmtQueryFailedPaths);
+
+    PathSet res;
+    int r;
+    while ((r = sqlite3_step(stmtQueryFailedPaths)) == SQLITE_ROW) {
+        const char * s = (const char *) sqlite3_column_text(stmtQueryFailedPaths, 0);
+        assert(s);
+        res.insert(s);
     }
 
-    assert(info.hash.type == htSHA256);
+    if (r != SQLITE_DONE)
+        throw SQLiteError(db, "error querying failed paths");
 
-    string s = (format(
-        "Hash: sha256:%1%\n"
-        "References: %2%\n"
-        "Deriver: %3%\n"
-        "Registered-At: %4%\n")
-        % printHash(info.hash) % refs % info.deriver %
-        (oldInfo.registrationTime ? oldInfo.registrationTime : time(0))).str();
-
-    /* Atomically rewrite the info file. */
-    Path tmpFile = tmpFileForAtomicUpdate(infoFile);
-    writeFile(tmpFile, s, doFsync);
-    if (rename(tmpFile.c_str(), infoFile.c_str()) == -1)
-        throw SysError(format("cannot rename `%1%' to `%2%'") % tmpFile % infoFile);
-
-    pathInfoCache[info.path] = info;
+    return res;
 }
 
 
-void LocalStore::registerFailedPath(const Path & path)
+void LocalStore::clearFailedPaths(const PathSet & paths)
 {
-    /* Write an empty file in the .../failed directory to denote the
-       failure of the builder for `path'. */
-    writeFile(failedFileFor(path), "");
-}
+    SQLiteTxn txn(db);
 
+    foreach (PathSet::const_iterator, i, paths) {
+        SQLiteStmtUse use(stmtClearFailedPath);
+        stmtClearFailedPath.bind(*i);
+        if (sqlite3_step(stmtClearFailedPath) != SQLITE_DONE)
+            throw SQLiteError(db, format("clearing failed path `%1%' in database") % *i);
+    }
 
-bool LocalStore::hasPathFailed(const Path & path)
-{
-    return pathExists(failedFileFor(path));
+    txn.commit();
 }
 
 
@@ -387,91 +580,94 @@ Hash parseHashField(const Path & path, const string & s)
 }
 
 
-ValidPathInfo LocalStore::queryPathInfo(const Path & path, bool ignoreErrors)
+ValidPathInfo LocalStore::queryPathInfo(const Path & path)
 {
-    ValidPathInfo res;
-    res.path = path;
+    ValidPathInfo info;
+    info.path = path;
 
     assertStorePath(path);
 
-    if (!isValidPath(path))
-        throw Error(format("path `%1%' is not valid") % path);
+    /* Get the path info. */
+    SQLiteStmtUse use1(stmtQueryPathInfo);
 
-    std::map<Path, ValidPathInfo>::iterator lookup = pathInfoCache.find(path);
-    if (lookup != pathInfoCache.end()) return lookup->second;
+    stmtQueryPathInfo.bind(path);
     
-    /* Read the info file. */
-    Path infoFile = infoFileFor(path);
-    if (!pathExists(infoFile))
-        throw Error(format("path `%1%' is not valid") % path);
-    string info = readFile(infoFile);
+    int r = sqlite3_step(stmtQueryPathInfo);
+    if (r == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
+    if (r != SQLITE_ROW) throw SQLiteError(db, "querying path in database");
 
-    /* Parse it. */
-    Strings lines = tokenizeString(info, "\n");
+    info.id = sqlite3_column_int(stmtQueryPathInfo, 0);
 
-    foreach (Strings::iterator, i, lines) {
-        string::size_type p = i->find(':');
-        if (p == string::npos) {
-            if (!ignoreErrors)
-                throw Error(format("corrupt line in `%1%': %2%") % infoFile % *i);
-            continue; /* bad line */
-        }
-        string name(*i, 0, p);
-        string value(*i, p + 2);
-        if (name == "References") {
-            Strings refs = tokenizeString(value, " ");
-            res.references = PathSet(refs.begin(), refs.end());
-        } else if (name == "Deriver") {
-            res.deriver = value;
-        } else if (name == "Hash") {
-            try {
-                res.hash = parseHashField(path, value);
-            } catch (Error & e) {
-                if (!ignoreErrors) throw;
-                printMsg(lvlError, format("cannot parse hash field in `%1%': %2%") % infoFile % e.msg());
-            }
-        } else if (name == "Registered-At") {
-            int n = 0;
-            string2Int(value, n);
-            res.registrationTime = n;
-        }
+    const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1);
+    assert(s);
+    info.hash = parseHashField(path, s);
+    
+    info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2);
+
+    s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3);
+    if (s) info.deriver = s;
+
+    /* Note that narSize = NULL yields 0. */
+    info.narSize = sqlite3_column_int64(stmtQueryPathInfo, 4);
+
+    /* Get the references. */
+    SQLiteStmtUse use2(stmtQueryReferences);
+
+    stmtQueryReferences.bind(info.id);
+
+    while ((r = sqlite3_step(stmtQueryReferences)) == SQLITE_ROW) {
+        s = (const char *) sqlite3_column_text(stmtQueryReferences, 0);
+        assert(s);
+        info.references.insert(s);
     }
 
-    return pathInfoCache[path] = res;
+    if (r != SQLITE_DONE)
+        throw SQLiteError(db, format("error getting references of `%1%'") % path);
+
+    return info;
 }
 
 
-bool LocalStore::isValidPath(const Path & path)
+unsigned long long LocalStore::queryValidPathId(const Path & path)
 {
-    /* Files in the info directory starting with a `.' are temporary
-       files. */
-    if (baseNameOf(path).at(0) == '.') return false;
+    SQLiteStmtUse use(stmtQueryPathInfo);
+    stmtQueryPathInfo.bind(path);
+    int res = sqlite3_step(stmtQueryPathInfo);
+    if (res == SQLITE_ROW) return sqlite3_column_int(stmtQueryPathInfo, 0);
+    if (res == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
+    throw SQLiteError(db, "querying path in database");
+}
 
-    /* A path is valid if its info file exists and has a non-zero
-       size.  (The non-zero size restriction is to be robust to
-       certain kinds of filesystem corruption, particularly with
-       ext4.) */
-    Path infoFile = infoFileFor(path);
 
-    struct stat st;
-    if (lstat(infoFile.c_str(), &st)) {
-        if (errno == ENOENT) return false;
-        throw SysError(format("getting status of `%1%'") % infoFile);
-    }
-
-    if (st.st_size == 0) return false;
-    
-    return true;
+bool LocalStore::isValidPath(const Path & path)
+{
+    SQLiteStmtUse use(stmtQueryPathInfo);
+    stmtQueryPathInfo.bind(path);
+    int res = sqlite3_step(stmtQueryPathInfo);
+    if (res != SQLITE_DONE && res != SQLITE_ROW)
+        throw SQLiteError(db, "querying path in database");
+    return res == SQLITE_ROW;
 }
 
 
 PathSet LocalStore::queryValidPaths()
 {
-    PathSet paths;
-    Strings entries = readDirectory(nixDBPath + "/info");
-    foreach (Strings::iterator, i, entries)
-        if (i->at(0) != '.') paths.insert(nixStore + "/" + *i);
-    return paths;
+    SQLiteStmt stmt;
+    stmt.create(db, "select path from ValidPaths");
+    
+    PathSet res;
+    
+    int r;
+    while ((r = sqlite3_step(stmt)) == SQLITE_ROW) {
+        const char * s = (const char *) sqlite3_column_text(stmt, 0);
+        assert(s);
+        res.insert(s);
+    }
+
+    if (r != SQLITE_DONE)
+        throw SQLiteError(db, "error getting valid paths");
+
+    return res;
 }
 
 
@@ -483,45 +679,73 @@ void LocalStore::queryReferences(const Path & path,
 }
 
 
-bool LocalStore::queryReferrersInternal(const Path & path, PathSet & referrers)
+void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
 {
-    bool allValid = true;
-    
-    if (!isValidPath(path))
-        throw Error(format("path `%1%' is not valid") % path);
+    assertStorePath(path);
 
-    /* No locking is necessary here: updates are only done by
-       appending or by atomically replacing the file.  When appending,
-       there is a possibility that we see a partial entry, but it will
-       just be filtered out below (the partially written path will not
-       be valid, so it will be ignored). */
+    SQLiteStmtUse use(stmtQueryReferrers);
 
-    Path referrersFile = referrersFileFor(path);
-    if (!pathExists(referrersFile)) return true;
-    
-    AutoCloseFD fd = open(referrersFile.c_str(), O_RDONLY);
-    if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile);
+    stmtQueryReferrers.bind(path);
 
-    Paths refs = tokenizeString(readFile(fd), " ");
+    int r;
+    while ((r = sqlite3_step(stmtQueryReferrers)) == SQLITE_ROW) {
+        const char * s = (const char *) sqlite3_column_text(stmtQueryReferrers, 0);
+        assert(s);
+        referrers.insert(s);
+    }
 
-    foreach (Paths::iterator, i, refs)
-        /* Referrers can be invalid (see registerValidPath() for the
-           invariant), so we only return one if it is valid. */
-        if (isStorePath(*i) && isValidPath(*i)) referrers.insert(*i); else allValid = false;
+    if (r != SQLITE_DONE)
+        throw SQLiteError(db, format("error getting references of `%1%'") % path);
+}
 
-    return allValid;
+
+Path LocalStore::queryDeriver(const Path & path)
+{
+    return queryPathInfo(path).deriver;
 }
 
 
-void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
+PathSet LocalStore::queryValidDerivers(const Path & path)
 {
-    queryReferrersInternal(path, referrers);
+    assertStorePath(path);
+
+    SQLiteStmtUse use(stmtQueryValidDerivers);
+    stmtQueryValidDerivers.bind(path);
+
+    PathSet derivers;
+    int r;
+    while ((r = sqlite3_step(stmtQueryValidDerivers)) == SQLITE_ROW) {
+        const char * s = (const char *) sqlite3_column_text(stmtQueryValidDerivers, 1);
+        assert(s);
+        derivers.insert(s);
+    }
+    
+    if (r != SQLITE_DONE)
+        throw SQLiteError(db, format("error getting valid derivers of `%1%'") % path);
+    
+    return derivers;
 }
 
 
-Path LocalStore::queryDeriver(const Path & path)
+PathSet LocalStore::queryDerivationOutputs(const Path & path)
 {
-    return queryPathInfo(path).deriver;
+    SQLiteTxn txn(db);
+    
+    SQLiteStmtUse use(stmtQueryDerivationOutputs);
+    stmtQueryDerivationOutputs.bind(queryValidPathId(path));
+    
+    PathSet outputs;
+    int r;
+    while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
+        const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 1);
+        assert(s);
+        outputs.insert(s);
+    }
+    
+    if (r != SQLITE_DONE)
+        throw SQLiteError(db, format("error getting outputs of `%1%'") % path);
+
+    return outputs;
 }
 
 
@@ -615,6 +839,7 @@ bool LocalStore::querySubstitutablePathInfo(const Path & substituter,
         info.references.insert(p);
     }
     info.downloadSize = getIntLine<long long>(run.from);
+    info.narSize = getIntLine<long long>(run.from);
     
     return true;
 }
@@ -635,39 +860,22 @@ Hash LocalStore::queryPathHash(const Path & path)
 }
 
 
-static void dfsVisit(std::map<Path, ValidPathInfo> & infos,
-    const Path & path, PathSet & visited, Paths & sorted)
-{
-    if (visited.find(path) != visited.end()) return;
-    visited.insert(path);
-
-    ValidPathInfo & info(infos[path]);
-    
-    foreach (PathSet::iterator, i, info.references)
-        if (infos.find(*i) != infos.end())
-            dfsVisit(infos, *i, visited, sorted);
-
-    sorted.push_back(path);
-}
-
-
 void LocalStore::registerValidPaths(const ValidPathInfos & infos)
 {
-    std::map<Path, ValidPathInfo> infosMap;
+    SQLiteTxn txn(db);
     
-    /* Sort the paths topologically under the references relation, so
-       that if path A is referenced by B, then A is registered before
-       B. */
-    foreach (ValidPathInfos::const_iterator, i, infos)
-        infosMap[i->path] = *i;
-
-    PathSet visited;
-    Paths sorted;
     foreach (ValidPathInfos::const_iterator, i, infos)
-        dfsVisit(infosMap, i->path, visited, sorted);
+        /* !!! Maybe the registration info should be updated if the
+           path is already valid. */
+        if (!isValidPath(i->path)) addValidPath(*i);
+
+    foreach (ValidPathInfos::const_iterator, i, infos) {
+        unsigned long long referrer = queryValidPathId(i->path);
+        foreach (PathSet::iterator, j, i->references)
+            addReference(referrer, queryValidPathId(*j));
+    }
 
-    foreach (Paths::iterator, i, sorted)
-        registerValidPath(infosMap[*i]);
+    txn.commit();
 }
 
 
@@ -677,43 +885,15 @@ void LocalStore::invalidatePath(const Path & path)
 {
     debug(format("invalidating path `%1%'") % path);
 
-    ValidPathInfo info;
+    SQLiteStmtUse use(stmtInvalidatePath);
 
-    if (pathExists(infoFileFor(path))) {
-        info = queryPathInfo(path);
+    stmtInvalidatePath.bind(path);
 
-        /* Remove the info file. */
-        Path p = infoFileFor(path);
-        if (unlink(p.c_str()) == -1)
-            throw SysError(format("unlinking `%1%'") % p);
-    }
+    if (sqlite3_step(stmtInvalidatePath) != SQLITE_DONE)
+        throw SQLiteError(db, format("invalidating path `%1%' in database") % path);
 
-    /* Remove the referrers file for `path'. */
-    Path p = referrersFileFor(path);
-    if (pathExists(p) && unlink(p.c_str()) == -1)
-        throw SysError(format("unlinking `%1%'") % p);
-
-    /* Clear `path' from the info cache. */
-    pathInfoCache.erase(path);
-    delayedUpdates.erase(path);
-
-    /* Cause the referrer files for each path referenced by this one
-       to be updated.  This has to happen after removing the info file
-       to preserve the invariant (see registerValidPath()).
-
-       The referrer files are updated lazily in flushDelayedUpdates()
-       to prevent quadratic performance in the garbage collector
-       (i.e., when N referrers to some path X are deleted, we have to
-       rewrite the referrers file for X N times, causing O(N^2) I/O).
-
-       What happens if we die before the referrer file can be updated?
-       That's not a problem, because stale (invalid) entries in the
-       referrer file are ignored by queryReferrers().  Thus a referrer
-       file is allowed to have stale entries; removing them is just an
-       optimisation.  verifyStore() gets rid of them eventually.
-    */
-    foreach (PathSet::iterator, i, info.references)
-        if (*i != path) delayedUpdates.insert(*i);
+    /* Note that the foreign key constraints on the Refs table take
+       care of deleting the references entries for `path'. */
 }
 
 
@@ -749,10 +929,18 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
                the path in the database.  We may just have computed it
                above (if called with recursive == true and hashAlgo ==
                sha256); otherwise, compute it here. */
-            registerValidPath(dstPath,
-                (recursive && hashAlgo == htSHA256) ? h :
-                (recursive ? hashString(htSHA256, dump) : hashPath(htSHA256, dstPath)),
-                PathSet(), "");
+            HashResult hash;
+            if (recursive) {
+                hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
+                hash.second = dump.size();
+            } else
+                hash = hashPath(htSHA256, dstPath);
+            
+            ValidPathInfo info;
+            info.path = dstPath;
+            info.hash = hash.first;
+            info.narSize = hash.second;
+            registerValidPath(info);
         }
 
         outputLock.setDeletion(true);
@@ -799,9 +987,15 @@ Path LocalStore::addTextToStore(const string & name, const string & s,
             writeFile(dstPath, s);
 
             canonicalisePathMetaData(dstPath);
+
+            HashResult hash = hashPath(htSHA256, dstPath);
             
-            registerValidPath(dstPath,
-                hashPath(htSHA256, dstPath), references, "");
+            ValidPathInfo info;
+            info.path = dstPath;
+            info.hash = hash.first;
+            info.narSize = hash.second;
+            info.references = references;
+            registerValidPath(info);
         }
 
         outputLock.setDeletion(true);
@@ -815,16 +1009,19 @@ struct HashAndWriteSink : Sink
 {
     Sink & writeSink;
     HashSink hashSink;
-    bool hashing;
     HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
     {
-        hashing = true;
     }
     virtual void operator ()
         (const unsigned char * data, unsigned int len)
     {
         writeSink(data, len);
-        if (hashing) hashSink(data, len);
+        hashSink(data, len);
+    }
+    Hash currentHash()
+    {
+        HashSink hashSinkClone(hashSink);
+        return hashSinkClone.finish().first;
     }
 };
 
@@ -855,6 +1052,15 @@ void LocalStore::exportPath(const Path & path, bool sign,
     
     dumpPath(path, hashAndWriteSink);
 
+    /* Refuse to export paths that have changed.  This prevents
+       filesystem corruption from spreading to other machines.
+       Don't complain if the stored hash is zero (unknown). */
+    Hash hash = hashAndWriteSink.currentHash();
+    Hash storedHash = queryPathHash(path);
+    if (hash != storedHash && storedHash != Hash(storedHash.type))
+        throw Error(format("hash of path `%1%' has changed from `%2%' to `%3%'!") % path
+            % printHash(storedHash) % printHash(hash));
+
     writeInt(EXPORT_MAGIC, hashAndWriteSink);
 
     writeString(path, hashAndWriteSink);
@@ -867,14 +1073,11 @@ void LocalStore::exportPath(const Path & path, bool sign,
     writeString(deriver, hashAndWriteSink);
 
     if (sign) {
-        Hash hash = hashAndWriteSink.hashSink.finish();
-        hashAndWriteSink.hashing = false;
-
+        Hash hash = hashAndWriteSink.currentHash();
+ 
         writeInt(1, hashAndWriteSink);
         
         Path tmpDir = createTempDir();
-        PathLocks tmpDirLock(singleton<PathSet, Path>(tmpDir));
-        tmpDirLock.setDeletion(true);
         AutoDelete delTmp(tmpDir);
         Path hashFile = tmpDir + "/hash";
         writeFile(hashFile, printHash(hash));
@@ -916,6 +1119,22 @@ struct HashAndReadSource : Source
 };
 
 
+/* Create a temporary directory in the store that won't be
+   garbage-collected. */
+Path LocalStore::createTempDirInStore()
+{
+    Path tmpDir;
+    do {
+        /* There is a slight possibility that `tmpDir' gets deleted by
+           the GC between createTempDir() and addTempRoot(), so repeat
+           until `tmpDir' exists. */
+        tmpDir = createTempDir(nixStore);
+        addTempRoot(tmpDir);
+    } while (!pathExists(tmpDir));
+    return tmpDir;
+}
+
+
 Path LocalStore::importPath(bool requireSignature, Source & source)
 {
     HashAndReadSource hashAndReadSource(source);
@@ -923,10 +1142,8 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
     /* We don't yet know what store path this archive contains (the
        store path follows the archive data proper), and besides, we
        don't know yet whether the signature is valid. */
-    Path tmpDir = createTempDir(nixStore);
-    PathLocks tmpDirLock(singleton<PathSet, Path>(tmpDir));
-    tmpDirLock.setDeletion(true);
-    AutoDelete delTmp(tmpDir); /* !!! could be GC'ed! */
+    Path tmpDir = createTempDirInStore();
+    AutoDelete delTmp(tmpDir);
     Path unpacked = tmpDir + "/unpacked";
 
     restorePath(unpacked, hashAndReadSource);
@@ -942,7 +1159,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
     Path deriver = readString(hashAndReadSource);
     if (deriver != "") assertStorePath(deriver);
 
-    Hash hash = hashAndReadSource.hashSink.finish();
+    Hash hash = hashAndReadSource.hashSink.finish().first;
     hashAndReadSource.hashing = false;
 
     bool haveSignature = readInt(hashAndReadSource) == 1;
@@ -1006,9 +1223,15 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
             
             /* !!! if we were clever, we could prevent the hashPath()
                here. */
-            if (deriver != "" && !isValidPath(deriver)) deriver = "";
-            registerValidPath(dstPath,
-                hashPath(htSHA256, dstPath), references, deriver);
+            HashResult hash = hashPath(htSHA256, dstPath);
+            
+            ValidPathInfo info;
+            info.path = dstPath;
+            info.hash = hash.first;
+            info.narSize = hash.second;
+            info.references = references;
+            info.deriver = deriver != "" && isValidPath(deriver) ? deriver : "";
+            registerValidPath(info);
         }
         
         outputLock.setDeletion(true);
@@ -1025,12 +1248,9 @@ void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFr
 
     assertStorePath(path);
 
+    SQLiteTxn txn(db);
+
     if (isValidPath(path)) {
-        /* Acquire a lock on the referrers file to prevent new
-           referrers to this path from appearing while we're deleting
-           it. */
-        PathLocks referrersLock(singleton<PathSet, Path>(referrersFileFor(path)));
-        referrersLock.setDeletion(true);
         PathSet referrers; queryReferrers(path, referrers);
         referrers.erase(path); /* ignore self-references */
         if (!referrers.empty())
@@ -1040,148 +1260,170 @@ void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFr
     }
 
     deletePathWrapped(path, bytesFreed, blocksFreed);
+
+    txn.commit();
 }
 
 
 void LocalStore::verifyStore(bool checkContents)
 {
-    /* Check whether all valid paths actually exist. */
-    printMsg(lvlInfo, "checking path existence");
+    printMsg(lvlError, format("reading the Nix store..."));
 
-    PathSet validPaths2 = queryValidPaths(), validPaths;
+    /* Acquire the global GC lock to prevent a garbage collection. */
+    AutoCloseFD fdGCLock = openGCLock(ltWrite);
     
-    foreach (PathSet::iterator, i, validPaths2) {
-        checkInterrupt();
-        if (!isStorePath(*i)) {
-            printMsg(lvlError, format("path `%1%' is not in the Nix store") % *i);
-            invalidatePath(*i);
-        } else if (!pathExists(*i)) {
-            printMsg(lvlError, format("path `%1%' disappeared") % *i);
-            invalidatePath(*i);
-        } else {
-            Path infoFile = infoFileFor(*i);
-            struct stat st;
-            if (lstat(infoFile.c_str(), &st))
-                throw SysError(format("getting status of `%1%'") % infoFile);
-            if (st.st_size == 0) {
-                printMsg(lvlError, format("removing corrupt info file `%1%'") % infoFile);
-                if (unlink(infoFile.c_str()) == -1)
-                    throw SysError(format("unlinking `%1%'") % infoFile);
-            }
-            else validPaths.insert(*i);
-        }
-    }
+    Paths entries = readDirectory(nixStore);
+    PathSet store(entries.begin(), entries.end());
 
+    /* Check whether all valid paths actually exist. */
+    printMsg(lvlInfo, "checking path existence...");
 
-    /* Check the store path meta-information. */
-    printMsg(lvlInfo, "checking path meta-information");
+    PathSet validPaths2 = queryValidPaths(), validPaths, done;
 
-    std::map<Path, PathSet> referrersCache;
-    
-    foreach (PathSet::iterator, i, validPaths) {
-        bool update = false;
-        ValidPathInfo info = queryPathInfo(*i, true);
-
-        /* Check the references: each reference should be valid, and
-           it should have a matching referrer. */
-        foreach (PathSet::iterator, j, info.references) {
-            if (validPaths.find(*j) == validPaths.end()) {
-                printMsg(lvlError, format("incomplete closure: `%1%' needs missing `%2%'")
-                    % *i % *j);
-                /* nothing we can do about it... */
-            } else {
-                if (referrersCache.find(*j) == referrersCache.end())
-                    queryReferrers(*j, referrersCache[*j]);
-                if (referrersCache[*j].find(*i) == referrersCache[*j].end()) {
-                    printMsg(lvlError, format("adding missing referrer mapping from `%1%' to `%2%'")
-                        % *j % *i);
-                    appendReferrer(*j, *i, true);
-                }
-            }
-        }
+    foreach (PathSet::iterator, i, validPaths2)
+        verifyPath(*i, store, done, validPaths);
 
-        /* Check the deriver.  (Note that the deriver doesn't have to
-           be a valid path.) */
-        if (!info.deriver.empty() && !isStorePath(info.deriver)) {
-            info.deriver = "";
-            update = true;
-        }
+    /* Optionally, check the content hashes (slow). */
+    if (checkContents) {
+        printMsg(lvlInfo, "checking hashes...");
 
-        /* Check the content hash (optionally - slow). */
-        if (info.hash.hashSize == 0) {
-            printMsg(lvlError, format("re-hashing `%1%'") % *i);
-            info.hash = hashPath(htSHA256, *i);
-            update = true;
-        } else if (checkContents) {
-            debug(format("checking contents of `%1%'") % *i);
-            Hash current = hashPath(info.hash.type, *i);
+        foreach (PathSet::iterator, i, validPaths) {
+            ValidPathInfo info = queryPathInfo(*i);
+
+            /* Check the content hash (optionally - slow). */
+            printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
+            Hash current = hashPath(info.hash.type, *i).first;
             if (current != info.hash) {
                 printMsg(lvlError, format("path `%1%' was modified! "
                         "expected hash `%2%', got `%3%'")
                     % *i % printHash(info.hash) % printHash(current));
             }
+            
+            /* !!! Check info.narSize */
         }
-
-        if (update) registerValidPath(info);
     }
+}
+
 
-    referrersCache.clear();
+void LocalStore::verifyPath(const Path & path, const PathSet & store,
+    PathSet & done, PathSet & validPaths)
+{
+    checkInterrupt();
     
+    if (done.find(path) != done.end()) return;
+    done.insert(path);
 
-    /* Check the referrers. */
-    printMsg(lvlInfo, "checking referrers");
+    if (!isStorePath(path)) {
+        printMsg(lvlError, format("path `%1%' is not in the Nix store") % path);
+        invalidatePath(path);
+        return;
+    }
 
-    std::map<Path, PathSet> referencesCache;
-    
-    Strings entries = readDirectory(nixDBPath + "/referrer");
-    foreach (Strings::iterator, i, entries) {
-        Path from = nixStore + "/" + *i;
+    if (store.find(baseNameOf(path)) == store.end()) {
+        /* Check any referrers first.  If we can invalidate them
+           first, then we can invalidate this path as well. */
+        bool canInvalidate = true;
+        PathSet referrers; queryReferrers(path, referrers);
+        foreach (PathSet::iterator, i, referrers)
+            if (*i != path) {
+                verifyPath(*i, store, done, validPaths);
+                if (validPaths.find(*i) != validPaths.end())
+                    canInvalidate = false;
+            }
+
+        if (canInvalidate) {
+            printMsg(lvlError, format("path `%1%' disappeared, removing from database...") % path);
+            invalidatePath(path);
+        } else
+            printMsg(lvlError, format("path `%1%' disappeared, but it still has valid referrers!") % path);
         
-        if (validPaths.find(from) == validPaths.end()) {
-            /* !!! This removes lock files as well.  Need to check
-               whether that's okay. */
-            printMsg(lvlError, format("removing referrers file for invalid `%1%'") % from);
-            Path p = referrersFileFor(from);
-            if (unlink(p.c_str()) == -1)
-                throw SysError(format("unlinking `%1%'") % p);
-            continue;
-        }
+        return;
+    }
+    
+    validPaths.insert(path);
+}
 
-        PathSet referrers;
-        bool allValid = queryReferrersInternal(from, referrers);
-        bool update = false;
 
-        if (!allValid) {
-            printMsg(lvlError, format("removing some stale referrers for `%1%'") % from);
-            update = true;
-        }
+/* Functions for upgrading from the pre-SQLite database. */
+
+PathSet LocalStore::queryValidPathsOld()
+{
+    PathSet paths;
+    Strings entries = readDirectory(nixDBPath + "/info");
+    foreach (Strings::iterator, i, entries)
+        if (i->at(0) != '.') paths.insert(nixStore + "/" + *i);
+    return paths;
+}
 
-        /* Each referrer should have a matching reference. */
-        PathSet referrersNew;
-        foreach (PathSet::iterator, j, referrers) {
-            if (referencesCache.find(*j) == referencesCache.end())
-                queryReferences(*j, referencesCache[*j]);
-            if (referencesCache[*j].find(from) == referencesCache[*j].end()) {
-                printMsg(lvlError, format("removing unexpected referrer mapping from `%1%' to `%2%'")
-                    % from % *j);
-                update = true;
-            } else referrersNew.insert(*j);
-        }
 
-        if (update) rewriteReferrers(from, false, referrersNew);
+ValidPathInfo LocalStore::queryPathInfoOld(const Path & path)
+{
+    ValidPathInfo res;
+    res.path = path;
+
+    /* Read the info file. */
+    string baseName = baseNameOf(path);
+    Path infoFile = (format("%1%/info/%2%") % nixDBPath % baseName).str();
+    if (!pathExists(infoFile))
+        throw Error(format("path `%1%' is not valid") % path);
+    string info = readFile(infoFile);
+
+    /* Parse it. */
+    Strings lines = tokenizeString(info, "\n");
+
+    foreach (Strings::iterator, i, lines) {
+        string::size_type p = i->find(':');
+        if (p == string::npos)
+            throw Error(format("corrupt line in `%1%': %2%") % infoFile % *i);
+        string name(*i, 0, p);
+        string value(*i, p + 2);
+        if (name == "References") {
+            Strings refs = tokenizeString(value, " ");
+            res.references = PathSet(refs.begin(), refs.end());
+        } else if (name == "Deriver") {
+            res.deriver = value;
+        } else if (name == "Hash") {
+            res.hash = parseHashField(path, value);
+        } else if (name == "Registered-At") {
+            int n = 0;
+            string2Int(value, n);
+            res.registrationTime = n;
+        }
     }
+
+    return res;
 }
 
 
-/* Upgrade from schema 4 (Nix 0.11) to schema 5 (Nix >= 0.12).  The
-   old schema uses Berkeley DB, the new one stores store path
-   meta-information in files. */
-void LocalStore::upgradeStore12()
+/* Upgrade from schema 5 (Nix 0.12) to schema 6 (Nix >= 0.15). */
+void LocalStore::upgradeStore6()
 {
-    throw Error(
-        "Your Nix store has a database in Berkeley DB format,\n"
-        "which is no longer supported. To convert to the new format,\n"
-        "please upgrade Nix to version 0.12 first.");
+    printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)...");
+
+    openDB(true);
+
+    PathSet validPaths = queryValidPathsOld();
+
+    SQLiteTxn txn(db);
+    
+    foreach (PathSet::iterator, i, validPaths) {
+        addValidPath(queryPathInfoOld(*i));
+        std::cerr << ".";
+    }
+
+    std::cerr << "|";
+    
+    foreach (PathSet::iterator, i, validPaths) {
+        ValidPathInfo info = queryPathInfoOld(*i);
+        unsigned long long referrer = queryValidPathId(*i);
+        foreach (PathSet::iterator, j, info.references)
+            addReference(referrer, queryValidPathId(*j));
+        std::cerr << ".";
+    }
+
+    std::cerr << "\n";
+
+    txn.commit();
 }
 
 
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 31f8d91096a3..4076e595748a 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -5,6 +5,11 @@
 
 #include "store-api.hh"
 #include "util.hh"
+#include "pathlocks.hh"
+
+
+class sqlite3;
+class sqlite3_stmt;
 
 
 namespace nix {
@@ -12,8 +17,9 @@ namespace nix {
 
 /* Nix store and database schema version.  Version 1 (or 0) was Nix <=
    0.7.  Version 2 was Nix 0.8 and 0.9.  Version 3 is Nix 0.10.
-   Version 4 is Nix 0.11.  Version 5 is Nix 0.12*/
-const int nixSchemaVersion = 5;
+   Version 4 is Nix 0.11.  Version 5 is Nix 0.12-0.16.  Version 6 is
+   Nix 1.0. */
+const int nixSchemaVersion = 6;
 
 
 extern string drvsLogDir;
@@ -41,6 +47,34 @@ struct RunningSubstituter
 };
 
 
+/* Wrapper object to close the SQLite database automatically. */
+struct SQLite
+{
+    sqlite3 * db;
+    SQLite() { db = 0; }
+    ~SQLite();
+    operator sqlite3 * () { return db; }
+};
+
+
+/* Wrapper object to create and destroy SQLite prepared statements. */
+struct SQLiteStmt
+{
+    sqlite3 * db;
+    sqlite3_stmt * stmt;
+    unsigned int curArg;
+    SQLiteStmt() { stmt = 0; }
+    void create(sqlite3 * db, const string & s);
+    void reset();
+    ~SQLiteStmt();
+    operator sqlite3_stmt * () { return stmt; }
+    void bind(const string & value);
+    void bind(int value);
+    void bind64(long long value);
+    void bind();
+};
+    
+
 class LocalStore : public StoreAPI
 {
 private:
@@ -64,6 +98,8 @@ public:
 
     PathSet queryValidPaths();
     
+    ValidPathInfo queryPathInfo(const Path & path);
+
     Hash queryPathHash(const Path & path);
 
     void queryReferences(const Path & path, PathSet & references);
@@ -71,6 +107,14 @@ public:
     void queryReferrers(const Path & path, PathSet & referrers);
 
     Path queryDeriver(const Path & path);
+
+    /* Return all currently valid derivations that have `path' as an
+       output.  (Note that the result of `queryDeriver()' is the
+       derivation that was actually used to produce `path', which may
+       not exist anymore.) */
+    PathSet queryValidDerivers(const Path & path);
+
+    PathSet queryDerivationOutputs(const Path & path);
     
     PathSet querySubstitutablePaths();
     
@@ -132,8 +176,7 @@ public:
        execution of the derivation (or something equivalent).  Also
        register the hash of the file system contents of the path.  The
        hash must be a SHA-256 hash. */
-    void registerValidPath(const Path & path,
-        const Hash & hash, const PathSet & references, const Path & deriver);
+    void registerValidPath(const ValidPathInfo & info);
 
     void registerValidPaths(const ValidPathInfos & infos);
 
@@ -144,6 +187,10 @@ public:
     /* Query whether `path' previously failed to build. */
     bool hasPathFailed(const Path & path);
 
+    PathSet queryFailedPaths();
+
+    void clearFailedPaths(const PathSet & paths);
+
 private:
 
     Path schemaPath;
@@ -151,45 +198,60 @@ private:
     /* Lock file used for upgrading. */
     AutoCloseFD globalLock;
 
-    /* !!! The cache can grow very big.  Maybe it should be pruned
-       every once in a while. */
-    std::map<Path, ValidPathInfo> pathInfoCache;
-
-    /* Store paths for which the referrers file must be purged. */
-    PathSet delayedUpdates;
-
-    /* Whether to do an fsync() after writing Nix metadata. */
-    bool doFsync;
+    /* The SQLite database object. */
+    SQLite db;
+
+    /* Some precompiled SQLite statements. */
+    SQLiteStmt stmtRegisterValidPath;
+    SQLiteStmt stmtAddReference;
+    SQLiteStmt stmtQueryPathInfo;
+    SQLiteStmt stmtQueryReferences;
+    SQLiteStmt stmtQueryReferrers;
+    SQLiteStmt stmtInvalidatePath;
+    SQLiteStmt stmtRegisterFailedPath;
+    SQLiteStmt stmtHasPathFailed;
+    SQLiteStmt stmtQueryFailedPaths;
+    SQLiteStmt stmtClearFailedPath;
+    SQLiteStmt stmtAddDerivationOutput;
+    SQLiteStmt stmtQueryValidDerivers;
+    SQLiteStmt stmtQueryDerivationOutputs;
 
     int getSchema();
 
-    void registerValidPath(const ValidPathInfo & info, bool ignoreValidity = false);
+    void openDB(bool create);
 
-    ValidPathInfo queryPathInfo(const Path & path, bool ignoreErrors = false);
+    unsigned long long queryValidPathId(const Path & path);
 
+    unsigned long long addValidPath(const ValidPathInfo & info);
+        
+    void addReference(unsigned long long referrer, unsigned long long reference);
+    
     void appendReferrer(const Path & from, const Path & to, bool lock);
     
     void rewriteReferrers(const Path & path, bool purge, PathSet referrers);
 
-    void flushDelayedUpdates();
-    
-    bool queryReferrersInternal(const Path & path, PathSet & referrers);
-    
     void invalidatePath(const Path & path);
-    
-    void upgradeStore12();
+
+    void verifyPath(const Path & path, const PathSet & store,
+        PathSet & done, PathSet & validPaths);
+
+    void upgradeStore6();
+    PathSet queryValidPathsOld();
+    ValidPathInfo queryPathInfoOld(const Path & path);
 
     struct GCState;
 
     bool tryToDelete(GCState & state, const Path & path);
     
-    PathSet findDerivers(GCState & state, const Path & path);
-    
     bool isActiveTempFile(const GCState & state,
         const Path & path, const string & suffix);
         
+    int openGCLock(LockType lockType);
+    
     void startSubstituter(const Path & substituter,
         RunningSubstituter & runningSubstituter);
+
+    Path createTempDirInStore();
 };
 
 
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index f2cc20626915..01d6a97ae6fb 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -27,10 +27,10 @@ void computeFSClosure(const Path & storePath,
         store->queryReferences(storePath, references);
 
     if (includeOutputs && isDerivation(storePath)) {
-        Derivation drv = derivationFromPath(storePath);
-        foreach (DerivationOutputs::iterator, i, drv.outputs)
-            if (store->isValidPath(i->second.path))
-                computeFSClosure(i->second.path, paths, flipDirection, true);
+        PathSet outputs = store->queryDerivationOutputs(storePath);
+        foreach (PathSet::iterator, i, outputs)
+            if (store->isValidPath(*i))
+                computeFSClosure(*i, paths, flipDirection, true);
     }
 
     foreach (PathSet::iterator, i, references)
@@ -48,9 +48,9 @@ Path findOutput(const Derivation & drv, string id)
 
 void queryMissing(const PathSet & targets,
     PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
-    unsigned long long & downloadSize)
+    unsigned long long & downloadSize, unsigned long long & narSize)
 {
-    downloadSize = 0;
+    downloadSize = narSize = 0;
     
     PathSet todo(targets.begin(), targets.end()), done;
 
@@ -88,6 +88,7 @@ void queryMissing(const PathSet & targets,
             if (store->querySubstitutablePathInfo(p, info)) {
                 willSubstitute.insert(p);
                 downloadSize += info.downloadSize;
+                narSize += info.narSize;
                 todo.insert(info.references.begin(), info.references.end());
             } else
                 unknown.insert(p);
diff --git a/src/libstore/misc.hh b/src/libstore/misc.hh
index 0bc9a2ee0d9c..abef6237e7f6 100644
--- a/src/libstore/misc.hh
+++ b/src/libstore/misc.hh
@@ -31,7 +31,7 @@ Path findOutput(const Derivation & drv, string id);
    will be substituted. */
 void queryMissing(const PathSet & targets,
     PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
-    unsigned long long & downloadSize);
+    unsigned long long & downloadSize, unsigned long long & narSize);
 
 
 }
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 3ed54e24d773..89be6ac6529a 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -68,7 +68,7 @@ static void hashAndLink(bool dryRun, HashToPath & hashToPath,
            the contents of the symlink (i.e. the result of
            readlink()), not the contents of the target (which may not
            even exist). */
-        Hash hash = hashPath(htSHA256, path);
+        Hash hash = hashPath(htSHA256, path).first;
         stats.totalFiles++;
         printMsg(lvlDebug, format("`%1%' has hash `%2%'") % path % printHash(hash));
 
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
index a6f6e85fc84b..ade9c9aa20e3 100644
--- a/src/libstore/references.cc
+++ b/src/libstore/references.cc
@@ -81,7 +81,7 @@ void RefScanSink::operator () (const unsigned char * data, unsigned int len)
 
 
 PathSet scanForReferences(const string & path,
-    const PathSet & refs, Hash & hash)
+    const PathSet & refs, HashResult & hash)
 {
     RefScanSink sink;
     std::map<string, Path> backMap;
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
index 7d068eb51700..158c08a77646 100644
--- a/src/libstore/references.hh
+++ b/src/libstore/references.hh
@@ -7,7 +7,7 @@
 namespace nix {
 
 PathSet scanForReferences(const Path & path, const PathSet & refs,
-    Hash & hash);
+    HashResult & hash);
     
 }
 
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 3d8b2a0c4263..26093a5d3abc 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -97,10 +97,6 @@ void RemoteStore::forkSlave()
     if (worker == "")
         worker = nixBinDir + "/nix-worker";
 
-    string verbosityArg = "-";
-    for (int i = 1; i < verbosity; ++i)
-        verbosityArg += "v";
-
     child = fork();
     
     switch (child) {
@@ -120,10 +116,7 @@ void RemoteStore::forkSlave()
             close(fdSocket);
             close(fdChild);
 
-            execlp(worker.c_str(), worker.c_str(), "--slave",
-                /* hacky - must be at the end */
-                verbosityArg == "-" ? NULL : verbosityArg.c_str(),
-                NULL);
+            execlp(worker.c_str(), worker.c_str(), "--slave", NULL);
 
             throw SysError(format("executing `%1%'") % worker);
             
@@ -198,9 +191,8 @@ void RemoteStore::setOptions()
         writeInt(logType, to);
         writeInt(printBuildTrace, to);
     }
-    if (GET_PROTOCOL_MINOR(daemonVersion) >= 6) {
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 6)
         writeInt(buildCores, to);
-    }
     processStderr();
 }
 
@@ -219,7 +211,9 @@ bool RemoteStore::isValidPath(const Path & path)
 PathSet RemoteStore::queryValidPaths()
 {
     openConnection();
-    throw Error("not implemented");
+    writeInt(wopQueryValidPaths, to);
+    processStderr();
+    return readStorePaths(from);
 }
 
 
@@ -248,10 +242,29 @@ bool RemoteStore::querySubstitutablePathInfo(const Path & path,
     if (info.deriver != "") assertStorePath(info.deriver);
     info.references = readStorePaths(from);
     info.downloadSize = readLongLong(from);
+    info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
     return true;
 }
 
 
+ValidPathInfo RemoteStore::queryPathInfo(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryPathInfo, to);
+    writeString(path, to);
+    processStderr();
+    ValidPathInfo info;
+    info.path = path;
+    info.deriver = readString(from);
+    if (info.deriver != "") assertStorePath(info.deriver);
+    info.hash = parseHash(htSHA256, readString(from));
+    info.references = readStorePaths(from);
+    info.registrationTime = readInt(from);
+    info.narSize = readLongLong(from);
+    return info;
+}
+
+
 Hash RemoteStore::queryPathHash(const Path & path)
 {
     openConnection();
@@ -299,6 +312,16 @@ Path RemoteStore::queryDeriver(const Path & path)
 }
 
 
+PathSet RemoteStore::queryDerivationOutputs(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryDerivationOutputs, to);
+    writeString(path, to);
+    processStderr();
+    return readStorePaths(from);
+}
+
+
 Path RemoteStore::addToStore(const Path & _srcPath,
     bool recursive, HashType hashAlgo, PathFilter & filter)
 {
@@ -444,6 +467,25 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
 }
 
 
+PathSet RemoteStore::queryFailedPaths()
+{
+    openConnection();
+    writeInt(wopQueryFailedPaths, to);
+    processStderr();
+    return readStorePaths(from);
+}
+
+
+void RemoteStore::clearFailedPaths(const PathSet & paths)
+{
+    openConnection();
+    writeInt(wopClearFailedPaths, to);
+    writeStringSet(paths, to);
+    processStderr();
+    readInt(from);
+}
+
+
 void RemoteStore::processStderr(Sink * sink, Source * source)
 {
     unsigned int msg;
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 3d55d23d958e..519f46fd1a6d 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -29,6 +29,8 @@ public:
 
     PathSet queryValidPaths();
     
+    ValidPathInfo queryPathInfo(const Path & path);
+
     Hash queryPathHash(const Path & path);
 
     void queryReferences(const Path & path, PathSet & references);
@@ -37,6 +39,8 @@ public:
 
     Path queryDeriver(const Path & path);
     
+    PathSet queryDerivationOutputs(const Path & path);
+    
     bool hasSubstitutes(const Path & path);
     
     bool querySubstitutablePathInfo(const Path & path,
@@ -68,6 +72,10 @@ public:
 
     void collectGarbage(const GCOptions & options, GCResults & results);
     
+    PathSet queryFailedPaths();
+
+    void clearFailedPaths(const PathSet & paths);
+    
 private:
     AutoCloseFD fdSocket;
     FdSink to;
diff --git a/src/libstore/schema.sql b/src/libstore/schema.sql
new file mode 100644
index 000000000000..c1b4a689afcb
--- /dev/null
+++ b/src/libstore/schema.sql
@@ -0,0 +1,44 @@
+create table if not exists ValidPaths (
+    id               integer primary key autoincrement not null,
+    path             text unique not null,
+    hash             text not null,
+    registrationTime integer not null,
+    deriver          text,
+    narSize          integer
+);
+
+create table if not exists Refs (
+    referrer  integer not null,
+    reference integer not null,
+    primary key (referrer, reference),
+    foreign key (referrer) references ValidPaths(id) on delete cascade,
+    foreign key (reference) references ValidPaths(id) on delete restrict
+);
+
+create index if not exists IndexReferrer on Refs(referrer);
+create index if not exists IndexReference on Refs(reference);
+
+-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
+-- table.  This causes a deletion of the corresponding row in
+-- ValidPaths to cause a foreign key constraint violation (due to `on
+-- delete restrict' on the `reference' column).  Therefore, explicitly
+-- get rid of self-references.
+create trigger if not exists DeleteSelfRefs before delete on ValidPaths
+  begin
+    delete from Refs where referrer = old.id and reference = old.id;
+  end;
+
+create table if not exists DerivationOutputs (
+    drv  integer not null,
+    id   text not null, -- symbolic output id, usually "out"
+    path text not null,
+    primary key (drv, id),
+    foreign key (drv) references ValidPaths(id) on delete cascade
+);
+
+create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
+
+create table if not exists FailedPaths (
+    path text primary key not null,
+    time integer not null
+);
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index f0abe61ad1e1..4b04f5751ce8 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -1,7 +1,6 @@
 #include "store-api.hh"
 #include "globals.hh"
 #include "util.hh"
-#include "derivations.hh"
 
 #include <limits.h>
 
@@ -53,18 +52,6 @@ Path toStorePath(const Path & path)
 }
 
 
-string getNameOfStorePath(const Path & path)
-{
-    Path::size_type slash = path.rfind('/');
-    string p = slash == Path::npos ? path : string(path, slash + 1);
-    Path::size_type dash = p.find('-');
-    assert(dash != Path::npos);
-    string p2 = string(p, dash + 1);
-    if (isDerivation(p2)) p2 = string(p2, 0, p2.size() - 4);
-    return p2;
-}
-
-
 Path followLinksToStore(const Path & _path)
 {
     Path path = absPath(_path);
@@ -203,7 +190,7 @@ std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
     bool recursive, HashType hashAlgo, PathFilter & filter)
 {
     HashType ht(hashAlgo);
-    Hash h = recursive ? hashPath(ht, srcPath, filter) : hashFile(ht, srcPath);
+    Hash h = recursive ? hashPath(ht, srcPath, filter).first : hashFile(ht, srcPath);
     string name = baseNameOf(srcPath);
     Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
     return std::pair<Path, Hash>(dstPath, h);
@@ -229,7 +216,7 @@ Path computeStorePathForText(const string & name, const string & s,
 /* Return a string accepted by decodeValidPathInfo() that
    registers the specified paths as valid.  Note: it's the
    responsibility of the caller to provide a closure. */
-string makeValidityRegistration(const PathSet & paths,
+string StoreAPI::makeValidityRegistration(const PathSet & paths,
     bool showDerivers, bool showHash)
 {
     string s = "";
@@ -237,18 +224,19 @@ string makeValidityRegistration(const PathSet & paths,
     foreach (PathSet::iterator, i, paths) {
         s += *i + "\n";
 
-        if (showHash)
-            s += printHash(store->queryPathHash(*i)) + "\n";
+        ValidPathInfo info = queryPathInfo(*i);
 
-        Path deriver = showDerivers ? store->queryDeriver(*i) : "";
+        if (showHash) {
+            s += printHash(info.hash) + "\n";
+            s += (format("%1%\n") % info.narSize).str();
+        }
+
+        Path deriver = showDerivers ? info.deriver : "";
         s += deriver + "\n";
 
-        PathSet references;
-        store->queryReferences(*i, references);
+        s += (format("%1%\n") % info.references.size()).str();
 
-        s += (format("%1%\n") % references.size()).str();
-            
-        foreach (PathSet::iterator, j, references)
+        foreach (PathSet::iterator, j, info.references)
             s += *j + "\n";
     }
 
@@ -265,6 +253,8 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven)
         string s;
         getline(str, s);
         info.hash = parseHash(htSHA256, s);
+        getline(str, s);
+        if (!string2Int(s, info.narSize)) throw Error("number expected");
     }
     getline(str, info.deriver);
     string s; int n;
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 6fc0689ba1af..40ac887140d7 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -87,9 +87,25 @@ struct SubstitutablePathInfo
     Path deriver;
     PathSet references;
     unsigned long long downloadSize; /* 0 = unknown or inapplicable */
+    unsigned long long narSize; /* 0 = unknown */
 };
 
 
+struct ValidPathInfo 
+{
+    Path path;
+    Path deriver;
+    Hash hash;
+    PathSet references;
+    time_t registrationTime;
+    unsigned long long narSize; // 0 = unknown
+    unsigned long long id; // internal use only
+    ValidPathInfo() : registrationTime(0), narSize(0) { }
+};
+
+typedef list<ValidPathInfo> ValidPathInfos;
+
+
 class StoreAPI 
 {
 public:
@@ -102,6 +118,9 @@ public:
     /* Query the set of valid paths. */
     virtual PathSet queryValidPaths() = 0;
 
+    /* Query information about a valid path. */
+    virtual ValidPathInfo queryPathInfo(const Path & path) = 0;
+
     /* Queries the hash of a valid path. */ 
     virtual Hash queryPathHash(const Path & path) = 0;
 
@@ -110,33 +129,18 @@ public:
     virtual void queryReferences(const Path & path,
         PathSet & references) = 0;
 
-    /* Like queryReferences, but with self-references filtered out. */
-    PathSet queryReferencesNoSelf(const Path & path)
-    {
-        PathSet res;
-        queryReferences(path, res);
-        res.erase(path);
-        return res;
-    }
-
     /* Queries the set of incoming FS references for a store path.
        The result is not cleared. */
     virtual void queryReferrers(const Path & path,
         PathSet & referrers) = 0;
 
-    /* Like queryReferrers, but with self-references filtered out. */
-    PathSet queryReferrersNoSelf(const Path & path)
-    {
-        PathSet res;
-        queryReferrers(path, res);
-        res.erase(path);
-        return res;
-    }
-
     /* Query the deriver of a store path.  Return the empty string if
        no deriver has been set. */
     virtual Path queryDeriver(const Path & path) = 0;
 
+    /* Query the outputs of the derivation denoted by `path'. */
+    virtual PathSet queryDerivationOutputs(const Path & path) = 0;
+    
     /* Query whether a path has substitutes. */
     virtual bool hasSubstitutes(const Path & path) = 0;
 
@@ -222,6 +226,19 @@ public:
 
     /* Perform a garbage collection. */
     virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
+
+    /* Return the set of paths that have failed to build.*/
+    virtual PathSet queryFailedPaths() = 0;
+
+    /* Clear the "failed" status of the given paths.  The special
+       value `*' causes all failed paths to be cleared. */
+    virtual void clearFailedPaths(const PathSet & paths) = 0;
+
+    /* Return a string representing information about the path that
+       can be loaded into the database using `nix-store --load-db' or
+       `nix-store --register-validity'. */
+    string makeValidityRegistration(const PathSet & paths,
+        bool showDerivers, bool showHash);
 };
 
 
@@ -241,12 +258,6 @@ void checkStoreName(const string & name);
 Path toStorePath(const Path & path);
 
 
-/* Get the "name" part of a store path, that is, the part after the
-   hash and the dash, and with any ".drv" suffix removed
-   (e.g. /nix/store/<hash>-foo-1.2.3.drv => foo-1.2.3). */
-string getNameOfStorePath(const Path & path);
-
-
 /* Follow symlinks until we end up with a path in the Nix store. */
 Path followLinksToStore(const Path & path);
 
@@ -321,21 +332,6 @@ boost::shared_ptr<StoreAPI> openStore();
 string showPaths(const PathSet & paths);
 
 
-string makeValidityRegistration(const PathSet & paths,
-    bool showDerivers, bool showHash);
-    
-struct ValidPathInfo 
-{
-    Path path;
-    Path deriver;
-    Hash hash;
-    PathSet references;
-    time_t registrationTime;
-    ValidPathInfo() : registrationTime(0) { }
-};
-
-typedef list<ValidPathInfo> ValidPathInfos;
-
 ValidPathInfo decodeValidPathInfo(std::istream & str,
     bool hashGiven = false);
 
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 5504773b8f91..d77049bc7188 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -8,7 +8,7 @@ namespace nix {
 #define WORKER_MAGIC_1 0x6e697863
 #define WORKER_MAGIC_2 0x6478696f
 
-#define PROTOCOL_VERSION 0x106
+#define PROTOCOL_VERSION 0x107
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
@@ -34,6 +34,11 @@ typedef enum {
     wopSetOptions = 19,
     wopCollectGarbage = 20,
     wopQuerySubstitutablePathInfo = 21,
+    wopQueryDerivationOutputs = 22,
+    wopQueryValidPaths = 23,
+    wopQueryFailedPaths = 24,
+    wopClearFailedPaths = 25,
+    wopQueryPathInfo = 26,
 } WorkerOp;
 
 
diff --git a/src/libutil/Makefile.am b/src/libutil/Makefile.am
index aa862208c6f9..98f32633b894 100644
--- a/src/libutil/Makefile.am
+++ b/src/libutil/Makefile.am
@@ -3,7 +3,7 @@ pkglib_LTLIBRARIES = libutil.la
 libutil_la_SOURCES = util.cc hash.cc serialise.cc \
   archive.cc xml-writer.cc
 
-libutil_la_LIBADD = ../boost/format/libformat.la
+libutil_la_LIBADD = ../boost/format/libformat.la ${aterm_lib} ${sqlite_lib}
 
 pkginclude_HEADERS = util.hh hash.hh serialise.hh \
   archive.hh xml-writer.hh types.hh
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 8fde4328c47e..999b17cd2f19 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -181,8 +181,6 @@ static void parseContents(ParseSink & sink, Source & source, const Path & path)
         left -= n;
     }
 
-    sink.finalizeContents(size);
-
     readPadding(size, source);
 }
 
@@ -317,12 +315,6 @@ struct RestoreSink : ParseSink
         writeFull(fd, data, len);
     }
 
-    void finalizeContents(unsigned long long size)
-    {
-        errno = ftruncate(fd, size);
-        if (errno) throw SysError(format("truncating file to its allocated length of %1% bytes") % size);
-    }
-
     void createSymlink(const Path & path, const string & target)
     {
         Path p = dstPath + path;
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index f358a2a6be17..fff62031397c 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -64,7 +64,6 @@ struct ParseSink
     virtual void isExecutable() { };
     virtual void preallocateContents(unsigned long long size) { };
     virtual void receiveContents(unsigned char * data, unsigned int len) { };
-    virtual void finalizeContents(unsigned long long size) { };
 
     virtual void createSymlink(const Path & path, const string & target) { };
 };
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index eef01fe4d609..b9e7846992f6 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -286,9 +286,18 @@ Hash hashFile(HashType ht, const Path & path)
 HashSink::HashSink(HashType ht) : ht(ht)
 {
     ctx = new Ctx;
+    bytes = 0;
     start(ht, *ctx);
 }
     
+HashSink::HashSink(const HashSink & h)
+{
+    ht = h.ht;
+    bytes = h.bytes;
+    ctx = new Ctx;
+    *ctx = *h.ctx;
+}
+    
 HashSink::~HashSink()
 {
     delete ctx;
@@ -297,18 +306,20 @@ HashSink::~HashSink()
 void HashSink::operator ()
     (const unsigned char * data, unsigned int len)
 {
+    bytes += len;
     update(ht, *ctx, data, len);
 }
 
-Hash HashSink::finish()
+HashResult HashSink::finish()
 {
     Hash hash(ht);
     nix::finish(ht, *ctx, hash.hash);
-    return hash;
+    return HashResult(hash, bytes);
 }
 
 
-Hash hashPath(HashType ht, const Path & path, PathFilter & filter)
+HashResult hashPath(
+    HashType ht, const Path & path, PathFilter & filter)
 {
     HashSink sink(ht);
     dumpPath(path, sink, filter);
diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh
index 062d97254bfc..13740954d625 100644
--- a/src/libutil/hash.hh
+++ b/src/libutil/hash.hh
@@ -40,7 +40,6 @@ struct Hash
 
     /* For sorting. */
     bool operator < (const Hash & h) const;
-
 };
 
 
@@ -72,7 +71,8 @@ Hash hashFile(HashType ht, const Path & path);
    (essentially) hashString(ht, dumpPath(path)). */
 struct PathFilter;
 extern PathFilter defaultPathFilter;
-Hash hashPath(HashType ht, const Path & path,
+typedef std::pair<Hash, unsigned long long> HashResult;
+HashResult hashPath(HashType ht, const Path & path,
     PathFilter & filter = defaultPathFilter);
 
 /* Compress a hash to the specified number of bytes by cyclically
@@ -93,16 +93,18 @@ class HashSink : public Sink
 private:
     HashType ht;
     Ctx * ctx;
+    unsigned long long bytes;
 
 public:
     HashSink(HashType ht);
+    HashSink(const HashSink & h);
     ~HashSink();
     virtual void operator () (const unsigned char * data, unsigned int len);
-    Hash finish();
+    HashResult finish();
 };
 
 
 }
 
-    
+
 #endif /* !__HASH_H */
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
index fd61746e3f04..854a0f689a14 100644
--- a/src/libutil/types.hh
+++ b/src/libutil/types.hh
@@ -65,7 +65,7 @@ typedef set<Path> PathSet;
 
  
 typedef enum { 
-    lvlError,
+    lvlError = 0,
     lvlInfo,
     lvlTalkative,
     lvlChatty,
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 32c8fce9a1b1..e7c0700cf41d 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -228,13 +228,12 @@ string readFile(const Path & path)
 }
 
 
-void writeFile(const Path & path, const string & s, bool doFsync)
+void writeFile(const Path & path, const string & s)
 {
     AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0666);
     if (fd == -1)
         throw SysError(format("opening file `%1%'") % path);
     writeFull(fd, (unsigned char *) s.c_str(), s.size());
-    if (doFsync) fsync(fd);
 }
 
 
@@ -976,6 +975,17 @@ Strings tokenizeString(const string & s, const string & separators)
 }
 
 
+string concatStringsSep(const string & sep, const Strings & ss)
+{
+    string s;
+    foreach (Strings::const_iterator, i, ss) {
+        if (s.size() != 0) s += sep;
+        s += *i;
+    }
+    return s;
+}
+
+
 string statusToString(int status)
 {
     if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index ff710077ceaa..27ad46904958 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -60,7 +60,7 @@ string readFile(int fd);
 string readFile(const Path & path);
 
 /* Write a string to a file. */
-void writeFile(const Path & path, const string & s, bool doFsync = false);
+void writeFile(const Path & path, const string & s);
 
 /* Read a line from a file descriptor. */
 string readLine(int fd);
@@ -280,6 +280,11 @@ MakeError(Interrupted, BaseError)
 Strings tokenizeString(const string & s, const string & separators = " \t\n\r");
 
 
+/* Concatenate the given strings with a separator between the
+   elements. */
+string concatStringsSep(const string & sep, const Strings & ss);
+
+
 /* Convert the exit status of a child as returned by wait() into an
    error string. */
 string statusToString(int status);
diff --git a/src/nix-env/Makefile.am b/src/nix-env/Makefile.am
index 7dfa7425a062..113baabc41cd 100644
--- a/src/nix-env/Makefile.am
+++ b/src/nix-env/Makefile.am
@@ -4,7 +4,7 @@ nix_env_SOURCES = nix-env.cc profiles.cc profiles.hh user-env.cc user-env.hh hel
 
 nix_env_LDADD = ../libmain/libmain.la ../libexpr/libexpr.la \
  ../libstore/libstore.la ../libutil/libutil.la \
- ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+ ../boost/format/libformat.la
 
 nix-env.o: help.txt.hh
 
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 58bc9af12598..bfcc25872715 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -69,7 +69,7 @@ typedef void (* Operation) (Globals & globals,
 
 void printHelp()
 {
-    cout << string((char *) helpText, sizeof helpText);
+    cout << string((char *) helpText);
 }
 
 
diff --git a/src/nix-hash/Makefile.am b/src/nix-hash/Makefile.am
index 5f84eb34d6da..a4fdb324629d 100644
--- a/src/nix-hash/Makefile.am
+++ b/src/nix-hash/Makefile.am
@@ -2,7 +2,7 @@ bin_PROGRAMS = nix-hash
 
 nix_hash_SOURCES = nix-hash.cc help.txt
 nix_hash_LDADD = ../libmain/libmain.la ../libstore/libstore.la ../libutil/libutil.la \
- ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+ ../boost/format/libformat.la
 
 nix-hash.o: help.txt.hh
 
diff --git a/src/nix-hash/nix-hash.cc b/src/nix-hash/nix-hash.cc
index f268e4986701..4867234bffc1 100644
--- a/src/nix-hash/nix-hash.cc
+++ b/src/nix-hash/nix-hash.cc
@@ -10,7 +10,7 @@ using namespace nix;
 
 void printHelp()
 {
-    std::cout << string((char *) helpText, sizeof helpText);
+    std::cout << string((char *) helpText);
 }
 
 
@@ -44,7 +44,7 @@ void run(Strings args)
 
     if (op == opHash) {
         for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) {
-            Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i);
+            Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i).first;
             if (truncate && h.hashSize > 20) h = compressHash(h, 20);
             std::cout << format("%1%\n") %
                 (base32 ? printHash32(h) : printHash(h));
diff --git a/src/nix-instantiate/Makefile.am b/src/nix-instantiate/Makefile.am
index a65907a8d40a..b48dbd9d410f 100644
--- a/src/nix-instantiate/Makefile.am
+++ b/src/nix-instantiate/Makefile.am
@@ -3,7 +3,7 @@ bin_PROGRAMS = nix-instantiate
 nix_instantiate_SOURCES = nix-instantiate.cc help.txt
 nix_instantiate_LDADD = ../libmain/libmain.la ../libexpr/libexpr.la \
  ../libstore/libstore.la ../libutil/libutil.la \
- ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+ ../boost/format/libformat.la
 
 nix-instantiate.o: help.txt.hh
 
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index b330f0cf11f8..3d3b643cfd6e 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -19,7 +19,7 @@ using namespace nix;
 
 void printHelp()
 {
-    std::cout << string((char *) helpText, sizeof helpText);
+    std::cout << string((char *) helpText);
 }
 
 
diff --git a/src/nix-log2xml/log2xml.cc b/src/nix-log2xml/log2xml.cc
index b2a25eefa269..6645dc500fed 100644
--- a/src/nix-log2xml/log2xml.cc
+++ b/src/nix-log2xml/log2xml.cc
@@ -18,6 +18,8 @@ struct Decoder
     int priority;
     bool ignoreLF;
     int lineNo, charNo;
+    bool warning;
+    bool error;
 
     Decoder()
     {
@@ -29,6 +31,8 @@ struct Decoder
         ignoreLF = false;
         lineNo = 1;
         charNo = 0;
+        warning = false;
+        error = false;
     }
 
     void pushChar(char c);
@@ -95,6 +99,12 @@ void Decoder::pushChar(char c)
                     case 'b':
                         ignoreLF = false;
                         break;
+                    case 'e':
+                        error = true;
+                        break;
+                    case 'w':
+                        warning = true;
+                        break;
                 }
             } else if (c >= '0' && c <= '9') {
                 int n = 0;
@@ -118,6 +128,8 @@ void Decoder::finishLine()
     string tag = inHeader ? "head" : "line";
     cout << "<" << tag;
     if (priority != 1) cout << " priority='" << priority << "'";
+    if (warning) cout << " warning='1'";
+    if (error) cout << " error='1'";
     cout << ">";
 
     for (unsigned int i = 0; i < line.size(); i++) {
@@ -158,6 +170,8 @@ void Decoder::finishLine()
     line = "";
     inHeader = false;
     priority = 1;
+    warning = false;
+    error = false;
 }
 
 
diff --git a/src/nix-setuid-helper/Makefile.am b/src/nix-setuid-helper/Makefile.am
index 35528458c5f6..a04701636d84 100644
--- a/src/nix-setuid-helper/Makefile.am
+++ b/src/nix-setuid-helper/Makefile.am
@@ -4,5 +4,4 @@ nix_setuid_helper_SOURCES = nix-setuid-helper.cc
 nix_setuid_helper_LDADD = ../libutil/libutil.la \
  ../boost/format/libformat.la
 
-AM_CXXFLAGS = \
- -I$(srcdir)/.. -I$(srcdir)/../libutil
+AM_CXXFLAGS = -I$(srcdir)/.. -I$(srcdir)/../libutil
diff --git a/src/nix-store/Makefile.am b/src/nix-store/Makefile.am
index b6dd37a6114a..44ff54674ade 100644
--- a/src/nix-store/Makefile.am
+++ b/src/nix-store/Makefile.am
@@ -5,7 +5,7 @@ nix_store_SOURCES =				\
   xmlgraph.cc xmlgraph.hh
 
 nix_store_LDADD = ../libmain/libmain.la ../libstore/libstore.la ../libutil/libutil.la \
- ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+ ../boost/format/libformat.la
 
 nix-store.o: help.txt.hh
 
diff --git a/src/nix-store/help.txt b/src/nix-store/help.txt
index 071934b2000f..4782518517dd 100644
--- a/src/nix-store/help.txt
+++ b/src/nix-store/help.txt
@@ -16,7 +16,7 @@ Operations:
 
   --gc: run the garbage collector
 
-  --dump: dump a path as a Nix archive, forgetting dependencies
+  --dump: dump a path as a Nix archive (NAR), forgetting dependencies
   --restore: restore a path from a Nix archive, without
       registering validity
 
@@ -27,6 +27,9 @@ Operations:
   --verify: verify Nix structures
   --optimise: optimise the Nix store by hard-linking identical files
 
+  --query-failed-paths: list paths that failed to build (if enabled)
+  --clear-failed-paths: clear the failed status of the given paths
+
   --version: output version information
   --help: display help
 
@@ -41,6 +44,7 @@ Query flags:
   --graph: print a dot graph rooted at given path
   --xml: emit an XML representation of the graph rooted at the given path
   --hash: print the SHA-256 hash of the contents of the path
+  --size: print the size of the NAR dump of the path
   --roots: print the garbage collector roots that point to the path
 
 Query switches (not applicable to all queries):
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index ff6538137a8a..49a705585ebf 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -22,7 +22,7 @@ typedef void (* Operation) (Strings opFlags, Strings opArgs);
 
 void printHelp()
 {
-    cout << string((char *) helpText, sizeof helpText);
+    cout << string((char *) helpText);
 }
 
 
@@ -34,7 +34,7 @@ static bool indirectRoot = false;
 LocalStore & ensureLocalStore()
 {
     LocalStore * store2(dynamic_cast<LocalStore *>(store.get()));
-    if (!store2) throw Error("you don't have sufficient rights to use --verify");
+    if (!store2) throw Error("you don't have sufficient rights to use this command");
     return *store2;
 }
 
@@ -226,7 +226,7 @@ static void printTree(const Path & path,
 static void opQuery(Strings opFlags, Strings opArgs)
 {
     enum { qOutputs, qRequisites, qReferences, qReferrers
-         , qReferrersClosure, qDeriver, qBinding, qHash
+         , qReferrersClosure, qDeriver, qBinding, qHash, qSize
          , qTree, qGraph, qXml, qResolve, qRoots } query = qOutputs;
     bool useOutput = false;
     bool includeOutputs = false;
@@ -248,6 +248,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
             query = qBinding;
         }
         else if (*i == "--hash") query = qHash;
+        else if (*i == "--size") query = qSize;
         else if (*i == "--tree") query = qTree;
         else if (*i == "--graph") query = qGraph;
         else if (*i == "--xml") query = qXml;
@@ -310,11 +311,15 @@ static void opQuery(Strings opFlags, Strings opArgs)
             break;
 
         case qHash:
+        case qSize:
             foreach (Strings::iterator, i, opArgs) {
                 Path path = maybeUseOutput(followLinksToStorePath(*i), useOutput, forceRealise);
-                Hash hash = store->queryPathHash(path);
-                assert(hash.type == htSHA256);
-                cout << format("sha256:%1%\n") % printHash32(hash);
+                ValidPathInfo info = store->queryPathInfo(path);
+                if (query == qHash) {
+                    assert(info.hash.type == htSHA256);
+                    cout << format("sha256:%1%\n") % printHash32(info.hash);
+                } else if (query == qSize) 
+                    cout << format("%1%\n") % info.narSize;
             }
             break;
 
@@ -393,9 +398,8 @@ static void opDumpDB(Strings opFlags, Strings opArgs)
     if (!opArgs.empty())
         throw UsageError("no arguments expected");
     PathSet validPaths = store->queryValidPaths();
-    foreach (PathSet::iterator, i, validPaths) {
-        cout << makeValidityRegistration(singleton<PathSet>(*i), true, true);
-    }
+    foreach (PathSet::iterator, i, validPaths)
+        cout << store->makeValidityRegistration(singleton<PathSet>(*i), true, true);
 }
 
 
@@ -410,8 +414,11 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
             /* !!! races */
             if (canonicalise)
                 canonicalisePathMetaData(info.path);
-            if (!hashGiven)
-                info.hash = hashPath(htSHA256, info.path);
+            if (!hashGiven) {
+                HashResult hash = hashPath(htSHA256, info.path);
+                info.hash = hash.first;
+                info.narSize = hash.second;
+            }
             infos.push_back(info);
         }
     }
@@ -661,8 +668,7 @@ static void opOptimise(Strings opFlags, Strings opArgs)
 
     bool dryRun = false;
 
-    for (Strings::iterator i = opFlags.begin();
-         i != opFlags.end(); ++i)
+    foreach (Strings::iterator, i, opFlags)
         if (*i == "--dry-run") dryRun = true;
         else throw UsageError(format("unknown flag `%1%'") % *i);
 
@@ -677,6 +683,24 @@ static void opOptimise(Strings opFlags, Strings opArgs)
 }
 
 
+static void opQueryFailedPaths(Strings opFlags, Strings opArgs)
+{
+    if (!opArgs.empty() || !opFlags.empty())
+        throw UsageError("no arguments expected");
+    PathSet failed = store->queryFailedPaths();
+    foreach (PathSet::iterator, i, failed)
+        cout << format("%1%\n") % *i;
+}
+
+
+static void opClearFailedPaths(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty())
+        throw UsageError("no flags expected");
+    store->clearFailedPaths(PathSet(opArgs.begin(), opArgs.end()));
+}
+
+
 /* Scan the arguments; find the operation, set global flags, put all
    other flags in a list, and put all other arguments in another
    list. */
@@ -728,6 +752,10 @@ void run(Strings args)
             op = opVerify;
         else if (arg == "--optimise")
             op = opOptimise;
+        else if (arg == "--query-failed-paths")
+            op = opQueryFailedPaths;
+        else if (arg == "--clear-failed-paths")
+            op = opClearFailedPaths;
         else if (arg == "--add-root") {
             if (i == args.end())
                 throw UsageError("`--add-root requires an argument");
diff --git a/src/nix-worker/Makefile.am b/src/nix-worker/Makefile.am
index 50c8ae36d2e8..b6094a2a038c 100644
--- a/src/nix-worker/Makefile.am
+++ b/src/nix-worker/Makefile.am
@@ -2,7 +2,7 @@ bin_PROGRAMS = nix-worker
 
 nix_worker_SOURCES = nix-worker.cc help.txt
 nix_worker_LDADD = ../libmain/libmain.la ../libstore/libstore.la ../libutil/libutil.la \
- ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+ ../boost/format/libformat.la
 
 nix-worker.o: help.txt.hh
 
diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc
index 13cecf654e7a..081b5b2fd9f4 100644
--- a/src/nix-worker/nix-worker.cc
+++ b/src/nix-worker/nix-worker.cc
@@ -315,14 +315,16 @@ static void performOp(unsigned int clientVersion,
     }
 
     case wopQueryReferences:
-    case wopQueryReferrers: {
+    case wopQueryReferrers:
+    case wopQueryDerivationOutputs: {
         Path path = readStorePath(from);
         startWork();
         PathSet paths;
         if (op == wopQueryReferences)
             store->queryReferences(path, paths);
-        else
+        else if (op == wopQueryReferrers)
             store->queryReferrers(path, paths);
+        else paths = store->queryDerivationOutputs(path);
         stopWork();
         writeStringSet(paths, to);
         break;
@@ -519,10 +521,50 @@ static void performOp(unsigned int clientVersion,
             writeString(info.deriver, to);
             writeStringSet(info.references, to);
             writeLongLong(info.downloadSize, to);
+            if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
+                writeLongLong(info.narSize, to);
         }
         break;
     }
             
+    case wopQueryValidPaths: {
+        startWork();
+        PathSet paths = store->queryValidPaths();
+        stopWork();
+        writeStringSet(paths, to);
+        break;
+    }
+
+    case wopQueryFailedPaths: {
+        startWork();
+        PathSet paths = store->queryFailedPaths();
+        stopWork();
+        writeStringSet(paths, to);
+        break;
+    }
+
+    case wopClearFailedPaths: {
+        PathSet paths = readStringSet(from);
+        startWork();
+        store->clearFailedPaths(paths);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopQueryPathInfo: {
+        Path path = readStorePath(from);
+        startWork();
+        ValidPathInfo info = store->queryPathInfo(path);
+        stopWork();
+        writeString(info.deriver, to);
+        writeString(printHash(info.hash), to);
+        writeStringSet(info.references, to);
+        writeInt(info.registrationTime, to);
+        writeLongLong(info.narSize, to);
+        break;
+    }
+
     default:
         throw Error(format("invalid operation %1%") % op);
     }
@@ -770,7 +812,7 @@ void run(Strings args)
 
 void printHelp()
 {
-    std::cout << string((char *) helpText, sizeof helpText);
+    std::cout << string((char *) helpText);
 }
 
 
diff --git a/substitute.mk b/substitute.mk
index f502b942d66e..d83399cfde78 100644
--- a/substitute.mk
+++ b/substitute.mk
@@ -24,6 +24,7 @@
 	 -e "s^@xmllint\@^$(xmllint)^g" \
 	 -e "s^@xmlflags\@^$(xmlflags)^g" \
 	 -e "s^@xsltproc\@^$(xsltproc)^g" \
+	 -e "s^@sqlite_bin\@^$(sqlite_bin)^g" \
 	 -e "s^@version\@^$(VERSION)^g" \
 	 -e "s^@testPath\@^$(coreutils):$$(dirname $$(type -p expr))^g" \
 	 < $< > $@ || rm $@
diff --git a/tests/Makefile.am b/tests/Makefile.am
index beb7852db31c..fb4a2285c1d5 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -7,7 +7,8 @@ TESTS = init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
   fallback.sh nix-push.sh gc.sh gc-concurrent.sh verify.sh nix-pull.sh \
   referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
   gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
-  remote-store.sh export.sh export-graph.sh negative-caching.sh
+  remote-store.sh export.sh export-graph.sh negative-caching.sh \
+  binary-patching.sh
 
 XFAIL_TESTS =
 
@@ -31,5 +32,6 @@ EXTRA_DIST = $(TESTS) \
   filter-source.nix \
   export-graph.nix \
   negative-caching.nix \
+  binary-patching.nix \
   $(wildcard lang/*.nix) $(wildcard lang/*.exp) $(wildcard lang/*.exp.xml) $(wildcard lang/*.flags) \
   common.sh.in
diff --git a/tests/binary-patching.nix b/tests/binary-patching.nix
new file mode 100644
index 000000000000..0de3028bd64d
--- /dev/null
+++ b/tests/binary-patching.nix
@@ -0,0 +1,18 @@
+{ version }:
+
+with import ./config.nix;
+
+mkDerivation {
+  name = "foo-${toString version}";
+  builder = builtins.toFile "builder.sh"
+    ''
+      mkdir $out
+      seq 1 1000000 > $out/foo
+      ${if version != 1 then ''
+        seq 1000000 1010000 >> $out/foo
+      '' else ""}
+      ${if version == 3 then ''
+        echo foobar >> $out/foo
+      '' else ""}
+    '';
+}
diff --git a/tests/binary-patching.sh b/tests/binary-patching.sh
new file mode 100644
index 000000000000..c320dccc7901
--- /dev/null
+++ b/tests/binary-patching.sh
@@ -0,0 +1,58 @@
+source common.sh
+
+clearManifests
+
+mkdir -p $TEST_ROOT/cache2 $TEST_ROOT/patches
+
+RESULT=$TEST_ROOT/result
+
+# Build version 1 and 2 of the "foo" package.
+$NIX_BIN_DIR/nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest1 \
+    $($nixbuild -o $RESULT binary-patching.nix --arg version 1)
+
+out2=$($nixbuild -o $RESULT binary-patching.nix --arg version 2)
+$NIX_BIN_DIR/nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest2 $out2
+    
+out3=$($nixbuild -o $RESULT binary-patching.nix --arg version 3)
+$NIX_BIN_DIR/nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest3 $out3
+
+rm $RESULT
+
+# Generate binary patches.
+$NIX_BIN_DIR/generate-patches.pl $TEST_ROOT/cache2 $TEST_ROOT/patches \
+    file://$TEST_ROOT/patches $TEST_ROOT/manifest1 $TEST_ROOT/manifest2
+
+$NIX_BIN_DIR/generate-patches.pl $TEST_ROOT/cache2 $TEST_ROOT/patches \
+    file://$TEST_ROOT/patches $TEST_ROOT/manifest2 $TEST_ROOT/manifest3
+
+grep -q "patch {" $TEST_ROOT/manifest3
+
+# Get rid of versions 2 and 3.
+$nixstore --delete $out2 $out3
+
+# Pull the manifest containing the patches.
+clearManifests
+$NIX_BIN_DIR/nix-pull file://$TEST_ROOT/manifest3
+
+# Make sure that the download size prediction uses the patches rather
+# than the full download.
+$nixbuild -o $RESULT binary-patching.nix --arg version 3 --dry-run 2>&1 | grep -q "0.01 MiB"
+
+# Now rebuild it.  This should use the two patches generated above.
+rm -f $TEST_ROOT/var/log/nix/downloads
+$nixbuild -o $RESULT binary-patching.nix --arg version 3
+rm $RESULT
+[ "$(grep ' patch ' $TEST_ROOT/var/log/nix/downloads | wc -l)" -eq 2 ]
+
+# Add a patch from version 1 directly to version 3.
+$NIX_BIN_DIR/generate-patches.pl $TEST_ROOT/cache2 $TEST_ROOT/patches \
+    file://$TEST_ROOT/patches $TEST_ROOT/manifest1 $TEST_ROOT/manifest3
+
+# Rebuild version 3.  This should use the direct patch rather than the
+# sequence of two patches.
+$nixstore --delete $out2 $out3
+clearManifests
+rm $TEST_ROOT/var/log/nix/downloads
+$NIX_BIN_DIR/nix-pull file://$TEST_ROOT/manifest3
+$nixbuild -o $RESULT binary-patching.nix --arg version 3
+[ "$(grep ' patch ' $TEST_ROOT/var/log/nix/downloads | wc -l)" -eq 1 ]
diff --git a/tests/build-hook.hook.sh b/tests/build-hook.hook.sh
index 83fa3bf78757..ff709985d3af 100755
--- a/tests/build-hook.hook.sh
+++ b/tests/build-hook.hook.sh
@@ -2,20 +2,22 @@
 
 #set -x
 
-drv=$4
+while read x y drv rest; do
 
-echo "HOOK for $drv" >&2
+    echo "HOOK for $drv" >&2
 
-outPath=`sed 's/Derive(\[("out",\"\([^\"]*\)\".*/\1/' $drv`
+    outPath=`sed 's/Derive(\[("out",\"\([^\"]*\)\".*/\1/' $drv`
 
-echo "output path is $outPath" >&2
+    echo "output path is $outPath" >&2
 
-if `echo $outPath | grep -q input-1`; then
-    echo "# accept" >&2
-    read x
-    echo "got $x"
-    mkdir $outPath
-    echo "BAR" > $outPath/foo
-else
-    echo "# decline" >&2
-fi
+    if `echo $outPath | grep -q input-1`; then
+        echo "# accept" >&2
+        read inputs
+        read outputs
+        mkdir $outPath
+        echo "BAR" > $outPath/foo
+    else
+        echo "# decline" >&2
+    fi
+
+done
\ No newline at end of file
diff --git a/tests/common.sh.in b/tests/common.sh.in
index b30bc44b5cc0..2c47d75eea23 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -38,6 +38,7 @@ export dot=@dot@
 export xmllint="@xmllint@"
 export xmlflags="@xmlflags@"
 export xsltproc="@xsltproc@"
+export sqlite3="@sqlite_bin@/bin/sqlite3"
 export SHELL="@shell@"
 
 export version=@version@
diff --git a/tests/init.sh b/tests/init.sh
index 0639a70662cd..cdc7aca834e8 100644
--- a/tests/init.sh
+++ b/tests/init.sh
@@ -23,6 +23,8 @@ ln -s $nixinstantiate $NIX_BIN_DIR/
 ln -s $nixhash $NIX_BIN_DIR/
 ln -s $nixenv $NIX_BIN_DIR/
 ln -s $nixworker $NIX_BIN_DIR/
+ln -s $TOP/src/bsdiff-*/bsdiff $NIX_BIN_DIR/
+ln -s $TOP/src/bsdiff-*/bspatch $NIX_BIN_DIR/
 ln -s $TOP/scripts/nix-prefetch-url $NIX_BIN_DIR/
 ln -s $TOP/scripts/nix-collect-garbage $NIX_BIN_DIR/
 ln -s $TOP/scripts/nix-build $NIX_BIN_DIR/
@@ -34,17 +36,19 @@ ln -s $bzip2_bin_test/bzip2 $NIX_BIN_DIR/nix/
 ln -s $bzip2_bin_test/bunzip2 $NIX_BIN_DIR/nix/
 ln -s $TOP/scripts/copy-from-other-stores.pl $NIX_BIN_DIR/nix/
 ln -s $TOP/scripts/download-using-manifests.pl $NIX_BIN_DIR/nix/
+ln -s $TOP/scripts/generate-patches.pl $NIX_BIN_DIR/
 ln -s $TOP/scripts/readmanifest.pm $NIX_BIN_DIR/nix/
 
 cat > "$NIX_CONF_DIR"/nix.conf <<EOF
 gc-keep-outputs = false
 gc-keep-derivations = false
 env-keep-derivations = false
+fsync-metadata = false
 EOF
 
 mkdir $NIX_DATA_DIR/nix
 cp -pr $TOP/corepkgs $NIX_DATA_DIR/nix/
-# Bah, script has the prefix hard-coded.  This is really messy stuff
+# Bah, scripts have the prefix hard-coded.  This is really messy stuff
 # (and likely to fail).
 for i in \
     $NIX_DATA_DIR/nix/corepkgs/nar/nar.sh \
@@ -57,6 +61,7 @@ for i in \
     $NIX_BIN_DIR/nix-push \
     $NIX_BIN_DIR/nix-pull \
     $NIX_BIN_DIR/nix/readmanifest.pm \
+    $NIX_BIN_DIR/generate-patches.pl \
     ; do
     sed < $i > $i.tmp \
         -e "s^$REAL_BIN_DIR/nix-store^$NIX_BIN_DIR/nix-store^" \
@@ -96,7 +101,6 @@ mv $NIX_BIN_DIR/nix/download-using-manifests.pl $NIX_BIN_DIR/nix/substituters/do
 $nixstore --init
 
 # Did anything happen?
-test -e "$NIX_DB_DIR"/info
-test -e "$NIX_DB_DIR"/referrer
+test -e "$NIX_DB_DIR"/db.sqlite
 
 echo 'Hello World' > ./dummy
diff --git a/tests/nix-push.sh b/tests/nix-push.sh
index a9a2f5f9c06a..0a35e3b97ab7 100644
--- a/tests/nix-push.sh
+++ b/tests/nix-push.sh
@@ -5,7 +5,7 @@ outPath=$($nixstore -r $drvPath)
 
 echo "pushing $drvPath"
 
-mkdir $TEST_ROOT/cache
+mkdir -p $TEST_ROOT/cache
 
 $NIX_BIN_DIR/nix-push \
     --copy $TEST_ROOT/cache $TEST_ROOT/manifest $drvPath
diff --git a/tests/referrers.sh b/tests/referrers.sh
index aa5edb555139..a0c195d5ab6c 100644
--- a/tests/referrers.sh
+++ b/tests/referrers.sh
@@ -1,9 +1,8 @@
 source common.sh
 
-# This takes way to long on Cygwin (because process creation is so slow...).
-if test "$system" = i686-cygwin; then exit 0; fi
+clearStore
 
-max=1000
+max=500
 
 reference=$NIX_STORE_DIR/abcdef
 touch $reference
@@ -13,46 +12,23 @@ echo "making registration..."
 
 for ((n = 0; n < $max; n++)); do
     storePath=$NIX_STORE_DIR/$n
-    touch $storePath
+    echo -n > $storePath
     ref2=$NIX_STORE_DIR/$((n+1))
     if test $((n+1)) = $max; then
         ref2=$reference
     fi
-    (echo $storePath && echo && echo 2 && echo $reference && echo $ref2)
+    echo $storePath; echo; echo 2; echo $reference; echo $ref2
 done > $TEST_ROOT/reg_info
 
 echo "registering..."
 
-time $nixstore --register-validity < $TEST_ROOT/reg_info
-
-oldTime=$(cat test-tmp/db/info/1 | grep Registered-At)
-
-echo "sleeping..."
-
-sleep 2
-
-echo "reregistering..."
-
-time $nixstore --register-validity --reregister < $TEST_ROOT/reg_info
-
-newTime=$(cat test-tmp/db/info/1 | grep Registered-At)
-
-if test "$newTime" != "$oldTime"; then
-    echo "reregistration changed original registration time"
-    exit 1
-fi
-
-if test "$(cat test-tmp/db/referrer/1 | wc -w)" -ne 1; then
-    echo "reregistration duplicated referrers"
-    exit 1
-fi
+$nixstore --register-validity < $TEST_ROOT/reg_info
 
 echo "collecting garbage..."
 ln -sfn $reference "$NIX_STATE_DIR"/gcroots/ref
-time $nixstore --gc
+$nixstore --gc
 
-if test "$(cat test-tmp/db/referrer/abcdef | wc -w)" -ne 0; then
+if test "$(sqlite3 ./test-tmp/db/db.sqlite 'select count(*) from Refs')" -ne 0; then
     echo "referrers not cleaned up"
     exit 1
 fi
-
diff --git a/tests/substituter.sh b/tests/substituter.sh
index 94ae26472501..65c4fa1856d0 100755
--- a/tests/substituter.sh
+++ b/tests/substituter.sh
@@ -16,7 +16,8 @@ if test $1 = "--query"; then
             echo 1
             echo "" # deriver
             echo 0 # nr of refs
-            echo 0 # download size
+            echo $((1 * 1024 * 1024)) # download size
+            echo $((2 * 1024 * 1024)) # nar size
         else
             echo "bad command $cmd"
             exit 1
diff --git a/tests/substituter2.sh b/tests/substituter2.sh
index 70f7d0943b98..c56a1bc47b5c 100755
--- a/tests/substituter2.sh
+++ b/tests/substituter2.sh
@@ -16,6 +16,7 @@ if test $1 = "--query"; then
             echo "" # deriver
             echo 0 # nr of refs
             echo 0 # download size
+            echo 0 # nar size
         else
             echo "bad command $cmd"
             exit 1
diff --git a/tests/substitutes.sh b/tests/substitutes.sh
index 042827314bd9..b48576c8c302 100644
--- a/tests/substitutes.sh
+++ b/tests/substitutes.sh
@@ -14,6 +14,8 @@ echo $outPath > $TEST_ROOT/sub-paths
 
 export NIX_SUBSTITUTERS=$(pwd)/substituter.sh
 
+$nixstore -r "$drvPath" --dry-run 2>&1 | grep -q "1.00 MiB.*2.00 MiB"
+
 $nixstore -rvv "$drvPath"
 
 text=$(cat "$outPath"/hello)