about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.gitignore124
-rw-r--r--COPYING504
-rw-r--r--INSTALL229
-rw-r--r--Makefile29
-rw-r--r--Makefile.config.in35
-rw-r--r--README10
-rwxr-xr-xbootstrap.sh4
-rwxr-xr-xconfig/config.guess1537
-rwxr-xr-xconfig/config.sub1786
-rwxr-xr-xconfig/install-sh527
-rw-r--r--configure.ac329
-rw-r--r--corepkgs/buildenv.nix28
-rw-r--r--corepkgs/buildenv.pl168
-rw-r--r--corepkgs/config.nix.in17
-rw-r--r--corepkgs/derivation.nix27
-rw-r--r--corepkgs/fetchurl.nix45
-rw-r--r--corepkgs/imported-drv-to-derivation.nix21
-rw-r--r--corepkgs/local.mk5
-rw-r--r--corepkgs/nar.nix49
-rw-r--r--corepkgs/unpack-channel.nix42
-rwxr-xr-xdev-shell18
-rw-r--r--doc/manual/bugs.xml39
-rw-r--r--doc/manual/build-farm.xml113
-rw-r--r--doc/manual/builtins.xml949
-rw-r--r--doc/manual/conf-file.xml538
-rw-r--r--doc/manual/env-common.xml338
-rw-r--r--doc/manual/figures/user-environments.pngbin0 -> 85031 bytes
-rw-r--r--doc/manual/figures/user-environments.sxdbin0 -> 8412 bytes
-rw-r--r--doc/manual/glossary.xml179
-rw-r--r--doc/manual/hacking.xml41
-rw-r--r--doc/manual/images/callouts/1.gifbin0 -> 889 bytes
-rw-r--r--doc/manual/images/callouts/10.gifbin0 -> 929 bytes
-rw-r--r--doc/manual/images/callouts/11.gifbin0 -> 202 bytes
-rw-r--r--doc/manual/images/callouts/12.gifbin0 -> 210 bytes
-rw-r--r--doc/manual/images/callouts/13.gifbin0 -> 209 bytes
-rw-r--r--doc/manual/images/callouts/14.gifbin0 -> 205 bytes
-rw-r--r--doc/manual/images/callouts/15.gifbin0 -> 210 bytes
-rw-r--r--doc/manual/images/callouts/2.gifbin0 -> 907 bytes
-rw-r--r--doc/manual/images/callouts/3.gifbin0 -> 914 bytes
-rw-r--r--doc/manual/images/callouts/4.gifbin0 -> 907 bytes
-rw-r--r--doc/manual/images/callouts/5.gifbin0 -> 916 bytes
-rw-r--r--doc/manual/images/callouts/6.gifbin0 -> 218 bytes
-rw-r--r--doc/manual/images/callouts/7.gifbin0 -> 907 bytes
-rw-r--r--doc/manual/images/callouts/8.gifbin0 -> 918 bytes
-rw-r--r--doc/manual/images/callouts/9.gifbin0 -> 923 bytes
-rw-r--r--doc/manual/installation.xml447
-rw-r--r--doc/manual/introduction.xml273
-rw-r--r--doc/manual/local.mk113
-rw-r--r--doc/manual/manual.xml84
-rw-r--r--doc/manual/nix-build.xml189
-rw-r--r--doc/manual/nix-channel.xml112
-rw-r--r--doc/manual/nix-collect-garbage.xml67
-rw-r--r--doc/manual/nix-copy-closure.xml187
-rw-r--r--doc/manual/nix-daemon.xml34
-rw-r--r--doc/manual/nix-env.xml1300
-rw-r--r--doc/manual/nix-hash.xml164
-rw-r--r--doc/manual/nix-install-package.xml198
-rw-r--r--doc/manual/nix-instantiate.xml262
-rw-r--r--doc/manual/nix-lang-ref.xml182
-rw-r--r--doc/manual/nix-prefetch-url.xml93
-rw-r--r--doc/manual/nix-pull.xml50
-rw-r--r--doc/manual/nix-push.xml398
-rw-r--r--doc/manual/nix-shell.xml200
-rw-r--r--doc/manual/nix-store.xml1351
-rw-r--r--doc/manual/opt-common-syn.xml47
-rw-r--r--doc/manual/opt-common.xml389
-rw-r--r--doc/manual/opt-inst-syn.xml22
-rw-r--r--doc/manual/package-management.xml591
-rw-r--r--doc/manual/quick-start.xml110
-rw-r--r--doc/manual/quote-literals.xsl40
-rw-r--r--doc/manual/release-notes.xml2521
-rw-r--r--doc/manual/schemas.xml4
-rw-r--r--doc/manual/style.css255
-rw-r--r--doc/manual/troubleshooting.xml92
-rw-r--r--doc/manual/writing-nix-expressions.xml1900
-rw-r--r--doc/signing.txt24
-rw-r--r--local.mk13
-rw-r--r--misc/emacs/README10
-rw-r--r--misc/emacs/local.mk1
-rw-r--r--misc/emacs/nix-mode.el95
-rw-r--r--misc/systemd/local.mk5
-rw-r--r--misc/systemd/nix-daemon.service.in9
-rw-r--r--misc/systemd/nix-daemon.socket.in11
-rw-r--r--misc/upstart/local.mk5
-rw-r--r--misc/upstart/nix-daemon.conf.in5
-rw-r--r--misc/vim/syntax/nix.vim37
-rw-r--r--mk/README.md (renamed from README.md)0
-rw-r--r--mk/clean.mk (renamed from clean.mk)0
-rw-r--r--mk/dist.mk (renamed from dist.mk)0
-rw-r--r--mk/functions.mk (renamed from functions.mk)0
-rw-r--r--mk/install.mk (renamed from install.mk)0
-rw-r--r--mk/jars.mk (renamed from jars.mk)0
-rw-r--r--mk/lib.mk (renamed from lib.mk)0
-rw-r--r--mk/libraries.mk (renamed from libraries.mk)0
-rw-r--r--mk/patterns.mk (renamed from patterns.mk)0
-rw-r--r--mk/programs.mk (renamed from programs.mk)0
-rw-r--r--mk/templates.mk (renamed from templates.mk)0
-rw-r--r--mk/tests.mk (renamed from tests.mk)0
-rw-r--r--mk/tracing.mk (renamed from tracing.mk)0
-rw-r--r--nix.spec.in194
-rw-r--r--perl/MANIFEST7
-rw-r--r--perl/lib/Nix/Config.pm.in42
-rw-r--r--perl/lib/Nix/CopyClosure.pm115
-rw-r--r--perl/lib/Nix/Crypto.pm42
-rw-r--r--perl/lib/Nix/GeneratePatches.pm340
-rw-r--r--perl/lib/Nix/Manifest.pm468
-rw-r--r--perl/lib/Nix/SSH.pm103
-rw-r--r--perl/lib/Nix/Store.pm92
-rw-r--r--perl/lib/Nix/Store.xs292
-rw-r--r--perl/lib/Nix/Utils.pm47
-rw-r--r--perl/local.mk40
-rw-r--r--release.nix301
-rwxr-xr-xscripts/build-remote.pl.in275
-rwxr-xr-xscripts/copy-from-other-stores.pl.in102
-rw-r--r--scripts/download-from-binary-cache.pl.in609
-rwxr-xr-xscripts/download-using-manifests.pl.in376
-rwxr-xr-xscripts/find-runtime-roots.pl.in79
-rw-r--r--scripts/install-nix-from-closure.sh123
-rw-r--r--scripts/local.mk37
-rwxr-xr-xscripts/nix-build.in286
-rwxr-xr-xscripts/nix-channel.in209
-rwxr-xr-xscripts/nix-collect-garbage.in65
-rwxr-xr-xscripts/nix-copy-closure.in99
-rwxr-xr-xscripts/nix-generate-patches.in51
-rwxr-xr-xscripts/nix-http-export.cgi.in51
-rwxr-xr-xscripts/nix-install-package.in136
-rwxr-xr-xscripts/nix-prefetch-url.in128
-rw-r--r--scripts/nix-profile.sh.in31
-rwxr-xr-xscripts/nix-pull.in99
-rwxr-xr-xscripts/nix-push.in291
-rwxr-xr-xscripts/nix-reduce-build.in171
-rwxr-xr-xscripts/show-duplication.pl73
-rw-r--r--src/boost/assert.hpp38
-rw-r--r--src/boost/format.hpp64
-rw-r--r--src/boost/format/exceptions.hpp96
-rw-r--r--src/boost/format/feed_args.hpp247
-rw-r--r--src/boost/format/format_class.hpp135
-rw-r--r--src/boost/format/format_fwd.hpp49
-rw-r--r--src/boost/format/format_implementation.cc256
-rw-r--r--src/boost/format/free_funcs.cc71
-rw-r--r--src/boost/format/group.hpp680
-rw-r--r--src/boost/format/internals.hpp167
-rw-r--r--src/boost/format/internals_fwd.hpp65
-rw-r--r--src/boost/format/local.mk7
-rw-r--r--src/boost/format/macros_default.hpp48
-rw-r--r--src/boost/format/parsing.cc454
-rw-r--r--src/boost/throw_exception.hpp47
-rw-r--r--src/bsdiff-4.3/bsdiff.163
-rw-r--r--src/bsdiff-4.3/bsdiff.c405
-rw-r--r--src/bsdiff-4.3/bspatch.159
-rw-r--r--src/bsdiff-4.3/bspatch.c224
-rw-r--r--src/bsdiff-4.3/compat-include/err.h12
-rw-r--r--src/bsdiff-4.3/local.mk11
-rw-r--r--src/download-via-ssh/download-via-ssh.cc141
-rw-r--r--src/download-via-ssh/local.mk11
-rw-r--r--src/libexpr/attr-path.cc97
-rw-r--r--src/libexpr/attr-path.hh13
-rw-r--r--src/libexpr/common-opts.cc62
-rw-r--r--src/libexpr/common-opts.hh18
-rw-r--r--src/libexpr/eval-inline.hh82
-rw-r--r--src/libexpr/eval.cc1467
-rw-r--r--src/libexpr/eval.hh299
-rw-r--r--src/libexpr/get-drvs.cc302
-rw-r--r--src/libexpr/get-drvs.hh91
-rw-r--r--src/libexpr/json-to-value.cc144
-rw-r--r--src/libexpr/json-to-value.hh13
-rw-r--r--src/libexpr/lexer.l193
-rw-r--r--src/libexpr/local.mk30
-rw-r--r--src/libexpr/names.cc104
-rw-r--r--src/libexpr/names.hh24
-rw-r--r--src/libexpr/nixexpr.cc383
-rw-r--r--src/libexpr/nixexpr.hh332
-rw-r--r--src/libexpr/parser.y656
-rw-r--r--src/libexpr/primops.cc1483
-rw-r--r--src/libexpr/symbol-table.hh88
-rw-r--r--src/libexpr/value-to-json.cc86
-rw-r--r--src/libexpr/value-to-json.hh64
-rw-r--r--src/libexpr/value-to-xml.cc163
-rw-r--r--src/libexpr/value-to-xml.hh14
-rw-r--r--src/libexpr/value.hh162
-rw-r--r--src/libmain/local.mk11
-rw-r--r--src/libmain/shared.cc327
-rw-r--r--src/libmain/shared.hh86
-rw-r--r--src/libmain/stack.cc72
-rw-r--r--src/libstore/build.cc3327
-rw-r--r--src/libstore/derivations.cc288
-rw-r--r--src/libstore/derivations.hh93
-rw-r--r--src/libstore/gc.cc760
-rw-r--r--src/libstore/globals.cc266
-rw-r--r--src/libstore/globals.hh224
-rw-r--r--src/libstore/local-store.cc2019
-rw-r--r--src/libstore/local-store.hh336
-rw-r--r--src/libstore/local.mk32
-rw-r--r--src/libstore/misc.cc220
-rw-r--r--src/libstore/misc.hh38
-rw-r--r--src/libstore/optimise-store.cc238
-rw-r--r--src/libstore/pathlocks.cc199
-rw-r--r--src/libstore/pathlocks.hh45
-rw-r--r--src/libstore/references.cc122
-rw-r--r--src/libstore/references.hh11
-rw-r--r--src/libstore/remote-store.cc617
-rw-r--r--src/libstore/remote-store.hh103
-rw-r--r--src/libstore/schema.sql44
-rw-r--r--src/libstore/store-api.cc331
-rw-r--r--src/libstore/store-api.hh366
-rw-r--r--src/libstore/worker-protocol.hh59
-rw-r--r--src/libutil/affinity.cc55
-rw-r--r--src/libutil/affinity.hh9
-rw-r--r--src/libutil/archive.cc362
-rw-r--r--src/libutil/archive.hh79
-rw-r--r--src/libutil/hash.cc382
-rw-r--r--src/libutil/hash.hh113
-rw-r--r--src/libutil/local.mk15
-rw-r--r--src/libutil/md32_common.h620
-rw-r--r--src/libutil/md5.c365
-rw-r--r--src/libutil/md5.h82
-rw-r--r--src/libutil/monitor-fd.hh42
-rw-r--r--src/libutil/serialise.cc286
-rw-r--r--src/libutil/serialise.hh132
-rw-r--r--src/libutil/sha1.c369
-rw-r--r--src/libutil/sha1.h28
-rw-r--r--src/libutil/sha256.c238
-rw-r--r--src/libutil/sha256.h35
-rw-r--r--src/libutil/types.hh86
-rw-r--r--src/libutil/util.cc1144
-rw-r--r--src/libutil/util.hh384
-rw-r--r--src/libutil/xml-writer.cc94
-rw-r--r--src/libutil/xml-writer.hh69
-rw-r--r--src/nix-daemon/local.mk15
-rw-r--r--src/nix-daemon/nix-daemon.cc820
-rw-r--r--src/nix-env/local.mk7
-rw-r--r--src/nix-env/nix-env.cc1439
-rw-r--r--src/nix-env/profiles.cc146
-rw-r--r--src/nix-env/profiles.hh55
-rw-r--r--src/nix-env/user-env.cc151
-rw-r--r--src/nix-env/user-env.hh13
-rw-r--r--src/nix-hash/local.mk7
-rw-r--r--src/nix-hash/nix-hash.cc63
-rw-r--r--src/nix-instantiate/local.mk7
-rw-r--r--src/nix-instantiate/nix-instantiate.cc196
-rw-r--r--src/nix-log2xml/local.mk5
-rw-r--r--src/nix-log2xml/log2xml.cc201
-rw-r--r--src/nix-log2xml/logfile.css86
-rw-r--r--src/nix-store/dotgraph.cc162
-rw-r--r--src/nix-store/dotgraph.hh9
-rw-r--r--src/nix-store/local.mk11
-rw-r--r--src/nix-store/nix-store.cc1112
-rw-r--r--src/nix-store/serve-protocol.hh22
-rw-r--r--src/nix-store/xmlgraph.cc71
-rw-r--r--src/nix-store/xmlgraph.hh9
-rw-r--r--tests/add.sh28
-rw-r--r--tests/binary-cache.sh89
-rw-r--r--tests/binary-patching.nix18
-rw-r--r--tests/binary-patching.sh61
-rwxr-xr-xtests/build-hook.hook.sh23
-rw-r--r--tests/build-hook.nix22
-rw-r--r--tests/build-hook.sh10
-rw-r--r--tests/case-hack.sh19
-rw-r--r--tests/case.narbin0 -> 2416 bytes
-rw-r--r--tests/check-refs.nix58
-rw-r--r--tests/check-refs.sh36
-rw-r--r--tests/common.sh.in88
-rw-r--r--tests/config.nix20
-rw-r--r--tests/dependencies.builder0.sh16
-rw-r--r--tests/dependencies.builder1.sh2
-rw-r--r--tests/dependencies.builder2.sh2
-rw-r--r--tests/dependencies.nix23
-rw-r--r--tests/dependencies.sh52
-rw-r--r--tests/dump-db.sh20
-rw-r--r--tests/export-graph.nix29
-rw-r--r--tests/export-graph.sh30
-rw-r--r--tests/export.sh31
-rw-r--r--tests/fallback.sh20
-rw-r--r--tests/fetchurl.nix6
-rw-r--r--tests/fetchurl.sh9
-rw-r--r--tests/filter-source.nix12
-rw-r--r--tests/filter-source.sh20
-rw-r--r--tests/fixed.builder1.sh3
-rw-r--r--tests/fixed.builder2.sh6
-rw-r--r--tests/fixed.nix50
-rw-r--r--tests/fixed.sh52
-rw-r--r--tests/gc-concurrent.builder.sh13
-rw-r--r--tests/gc-concurrent.nix27
-rw-r--r--tests/gc-concurrent.sh58
-rw-r--r--tests/gc-concurrent2.builder.sh7
-rw-r--r--tests/gc-runtime.nix17
-rw-r--r--tests/gc-runtime.sh38
-rw-r--r--tests/gc.sh40
-rw-r--r--tests/hash-check.nix29
-rw-r--r--tests/hash.sh62
-rw-r--r--tests/import-derivation.nix23
-rw-r--r--tests/import-derivation.sh12
-rw-r--r--tests/init.sh33
-rw-r--r--tests/install-package.sh21
-rw-r--r--tests/lang.sh70
-rw-r--r--tests/lang/dir1/a.nix1
-rw-r--r--tests/lang/dir2/a.nix1
-rw-r--r--tests/lang/dir2/b.nix1
-rw-r--r--tests/lang/dir3/a.nix1
-rw-r--r--tests/lang/dir3/b.nix1
-rw-r--r--tests/lang/dir3/c.nix1
-rw-r--r--tests/lang/dir4/a.nix1
-rw-r--r--tests/lang/dir4/c.nix1
-rw-r--r--tests/lang/eval-fail-abort.nix1
-rw-r--r--tests/lang/eval-fail-antiquoted-path.nix4
-rw-r--r--tests/lang/eval-fail-assert.nix5
-rw-r--r--tests/lang/eval-fail-bad-antiquote-1.nix1
-rw-r--r--tests/lang/eval-fail-bad-antiquote-2.nix1
-rw-r--r--tests/lang/eval-fail-bad-antiquote-3.nix1
-rw-r--r--tests/lang/eval-fail-blackhole.nix5
-rw-r--r--tests/lang/eval-fail-missing-arg.nix1
-rw-r--r--tests/lang/eval-fail-remove.nix5
-rw-r--r--tests/lang/eval-fail-scope-5.nix10
-rw-r--r--tests/lang/eval-fail-substring.nix1
-rw-r--r--tests/lang/eval-fail-to-path.nix1
-rw-r--r--tests/lang/eval-fail-undeclared-arg.nix1
-rw-r--r--tests/lang/eval-okay-arithmetic.exp1
-rw-r--r--tests/lang/eval-okay-arithmetic.nix55
-rw-r--r--tests/lang/eval-okay-attrnames.exp1
-rw-r--r--tests/lang/eval-okay-attrnames.nix11
-rw-r--r--tests/lang/eval-okay-attrs.exp1
-rw-r--r--tests/lang/eval-okay-attrs.nix5
-rw-r--r--tests/lang/eval-okay-attrs2.exp1
-rw-r--r--tests/lang/eval-okay-attrs2.nix10
-rw-r--r--tests/lang/eval-okay-attrs3.exp1
-rw-r--r--tests/lang/eval-okay-attrs3.nix22
-rw-r--r--tests/lang/eval-okay-attrs4.exp1
-rw-r--r--tests/lang/eval-okay-attrs4.nix7
-rw-r--r--tests/lang/eval-okay-attrs5.exp1
-rw-r--r--tests/lang/eval-okay-attrs5.nix21
-rw-r--r--tests/lang/eval-okay-autoargs.exp1
-rw-r--r--tests/lang/eval-okay-autoargs.flags1
-rw-r--r--tests/lang/eval-okay-autoargs.nix15
-rw-r--r--tests/lang/eval-okay-builtins.exp1
-rw-r--r--tests/lang/eval-okay-builtins.nix12
-rw-r--r--tests/lang/eval-okay-closure.exp.xml343
-rw-r--r--tests/lang/eval-okay-closure.nix13
-rw-r--r--tests/lang/eval-okay-concat.exp1
-rw-r--r--tests/lang/eval-okay-concat.nix1
-rw-r--r--tests/lang/eval-okay-context.exp1
-rw-r--r--tests/lang/eval-okay-context.nix6
-rw-r--r--tests/lang/eval-okay-curpos.exp1
-rw-r--r--tests/lang/eval-okay-curpos.nix5
-rw-r--r--tests/lang/eval-okay-delayed-with-inherit.exp1
-rw-r--r--tests/lang/eval-okay-delayed-with-inherit.nix24
-rw-r--r--tests/lang/eval-okay-delayed-with.exp1
-rw-r--r--tests/lang/eval-okay-delayed-with.nix29
-rw-r--r--tests/lang/eval-okay-dynamic-attrs-2.exp1
-rw-r--r--tests/lang/eval-okay-dynamic-attrs-2.nix1
-rw-r--r--tests/lang/eval-okay-dynamic-attrs-bare.exp1
-rw-r--r--tests/lang/eval-okay-dynamic-attrs-bare.nix17
-rw-r--r--tests/lang/eval-okay-dynamic-attrs.exp1
-rw-r--r--tests/lang/eval-okay-dynamic-attrs.nix17
-rw-r--r--tests/lang/eval-okay-elem.exp1
-rw-r--r--tests/lang/eval-okay-elem.nix6
-rw-r--r--tests/lang/eval-okay-empty-args.exp1
-rw-r--r--tests/lang/eval-okay-empty-args.nix1
-rw-r--r--tests/lang/eval-okay-eq-derivations.exp1
-rw-r--r--tests/lang/eval-okay-eq-derivations.nix10
-rw-r--r--tests/lang/eval-okay-eq.exp.disabled1
-rw-r--r--tests/lang/eval-okay-eq.nix3
-rw-r--r--tests/lang/eval-okay-filter.exp1
-rw-r--r--tests/lang/eval-okay-filter.nix5
-rw-r--r--tests/lang/eval-okay-flatten.exp1
-rw-r--r--tests/lang/eval-okay-flatten.nix8
-rw-r--r--tests/lang/eval-okay-fromjson.exp1
-rw-r--r--tests/lang/eval-okay-fromjson.nix32
-rw-r--r--tests/lang/eval-okay-functionargs.exp.xml15
-rw-r--r--tests/lang/eval-okay-functionargs.nix80
-rw-r--r--tests/lang/eval-okay-getattrpos.exp1
-rw-r--r--tests/lang/eval-okay-getattrpos.nix6
-rw-r--r--tests/lang/eval-okay-getenv.exp1
-rw-r--r--tests/lang/eval-okay-getenv.nix1
-rw-r--r--tests/lang/eval-okay-hash.exp1
-rw-r--r--tests/lang/eval-okay-hash.nix7
-rw-r--r--tests/lang/eval-okay-if.exp1
-rw-r--r--tests/lang/eval-okay-if.nix1
-rw-r--r--tests/lang/eval-okay-import.exp1
-rw-r--r--tests/lang/eval-okay-import.nix11
-rw-r--r--tests/lang/eval-okay-ind-string.exp1
-rw-r--r--tests/lang/eval-okay-ind-string.nix120
-rw-r--r--tests/lang/eval-okay-let.exp1
-rw-r--r--tests/lang/eval-okay-let.nix5
-rw-r--r--tests/lang/eval-okay-list.exp1
-rw-r--r--tests/lang/eval-okay-list.nix7
-rw-r--r--tests/lang/eval-okay-listtoattrs.exp1
-rw-r--r--tests/lang/eval-okay-listtoattrs.nix11
-rw-r--r--tests/lang/eval-okay-logic.exp1
-rw-r--r--tests/lang/eval-okay-logic.nix1
-rw-r--r--tests/lang/eval-okay-map.exp1
-rw-r--r--tests/lang/eval-okay-map.nix3
-rw-r--r--tests/lang/eval-okay-new-let.exp1
-rw-r--r--tests/lang/eval-okay-new-let.nix14
-rw-r--r--tests/lang/eval-okay-null-dynamic-attrs.exp1
-rw-r--r--tests/lang/eval-okay-null-dynamic-attrs.nix1
-rw-r--r--tests/lang/eval-okay-overrides.exp1
-rw-r--r--tests/lang/eval-okay-overrides.nix9
-rw-r--r--tests/lang/eval-okay-pathexists.exp1
-rw-r--r--tests/lang/eval-okay-pathexists.nix5
-rw-r--r--tests/lang/eval-okay-patterns.exp1
-rw-r--r--tests/lang/eval-okay-patterns.nix16
-rw-r--r--tests/lang/eval-okay-readfile.exp1
-rw-r--r--tests/lang/eval-okay-readfile.nix1
-rw-r--r--tests/lang/eval-okay-redefine-builtin.exp1
-rw-r--r--tests/lang/eval-okay-redefine-builtin.nix3
-rw-r--r--tests/lang/eval-okay-remove.exp1
-rw-r--r--tests/lang/eval-okay-remove.nix5
-rw-r--r--tests/lang/eval-okay-scope-1.exp1
-rw-r--r--tests/lang/eval-okay-scope-1.nix6
-rw-r--r--tests/lang/eval-okay-scope-2.exp1
-rw-r--r--tests/lang/eval-okay-scope-2.nix6
-rw-r--r--tests/lang/eval-okay-scope-3.exp1
-rw-r--r--tests/lang/eval-okay-scope-3.nix6
-rw-r--r--tests/lang/eval-okay-scope-4.exp1
-rw-r--r--tests/lang/eval-okay-scope-4.nix10
-rw-r--r--tests/lang/eval-okay-scope-6.exp1
-rw-r--r--tests/lang/eval-okay-scope-6.nix7
-rw-r--r--tests/lang/eval-okay-scope-7.exp1
-rw-r--r--tests/lang/eval-okay-scope-7.nix6
-rw-r--r--tests/lang/eval-okay-search-path.exp1
-rw-r--r--tests/lang/eval-okay-search-path.flags1
-rw-r--r--tests/lang/eval-okay-search-path.nix11
-rw-r--r--tests/lang/eval-okay-string.exp1
-rw-r--r--tests/lang/eval-okay-string.nix10
-rw-r--r--tests/lang/eval-okay-strings-as-attrs-names.exp1
-rw-r--r--tests/lang/eval-okay-strings-as-attrs-names.nix20
-rw-r--r--tests/lang/eval-okay-substring.exp1
-rw-r--r--tests/lang/eval-okay-substring.nix21
-rw-r--r--tests/lang/eval-okay-tail-call-1.exp-disabled1
-rw-r--r--tests/lang/eval-okay-tail-call-1.nix3
-rw-r--r--tests/lang/eval-okay-tojson.exp1
-rw-r--r--tests/lang/eval-okay-tojson.nix11
-rw-r--r--tests/lang/eval-okay-toxml.exp1
-rw-r--r--tests/lang/eval-okay-toxml.nix3
-rw-r--r--tests/lang/eval-okay-toxml2.exp1
-rw-r--r--tests/lang/eval-okay-toxml2.nix1
-rw-r--r--tests/lang/eval-okay-tryeval.exp1
-rw-r--r--tests/lang/eval-okay-tryeval.nix5
-rw-r--r--tests/lang/eval-okay-types.exp1
-rw-r--r--tests/lang/eval-okay-types.nix25
-rw-r--r--tests/lang/eval-okay-versions.exp1
-rw-r--r--tests/lang/eval-okay-versions.nix40
-rw-r--r--tests/lang/eval-okay-with.exp1
-rw-r--r--tests/lang/eval-okay-with.nix19
-rw-r--r--tests/lang/eval-okay-xml.exp.xml49
-rw-r--r--tests/lang/eval-okay-xml.nix19
-rw-r--r--tests/lang/imported.nix3
-rw-r--r--tests/lang/imported2.nix1
-rw-r--r--tests/lang/lib.nix61
-rw-r--r--tests/lang/parse-fail-dup-attrs-1.nix4
-rw-r--r--tests/lang/parse-fail-dup-attrs-2.nix13
-rw-r--r--tests/lang/parse-fail-dup-attrs-3.nix13
-rw-r--r--tests/lang/parse-fail-dup-attrs-4.nix4
-rw-r--r--tests/lang/parse-fail-dup-attrs-6.nix4
-rw-r--r--tests/lang/parse-fail-dup-attrs-7.nix9
-rw-r--r--tests/lang/parse-fail-dup-formals.nix1
-rw-r--r--tests/lang/parse-fail-patterns-1.nix1
-rw-r--r--tests/lang/parse-fail-regression-20060610.nix11
-rw-r--r--tests/lang/parse-fail-undef-var-2.nix7
-rw-r--r--tests/lang/parse-fail-undef-var.nix1
-rw-r--r--tests/lang/parse-okay-1.nix1
-rw-r--r--tests/lang/parse-okay-crlf.nix17
-rw-r--r--tests/lang/parse-okay-dup-attrs-5.nix4
-rw-r--r--tests/lang/parse-okay-regression-20041027.nix11
-rw-r--r--tests/lang/parse-okay-subversion.nix43
-rw-r--r--tests/lang/parse-okay-url.nix7
-rw-r--r--tests/local.mk22
-rw-r--r--tests/logging.sh25
-rw-r--r--tests/misc.sh16
-rw-r--r--tests/multiple-outputs.nix68
-rw-r--r--tests/multiple-outputs.sh63
-rw-r--r--tests/negative-caching.nix21
-rw-r--r--tests/negative-caching.sh22
-rw-r--r--tests/nix-build.sh19
-rw-r--r--tests/nix-channel.sh43
-rw-r--r--tests/nix-copy-closure.nix63
-rw-r--r--tests/nix-profile.sh10
-rw-r--r--tests/nix-pull.sh33
-rw-r--r--tests/nix-push.sh12
-rw-r--r--tests/optimise-store.sh43
-rw-r--r--tests/parallel.builder.sh29
-rw-r--r--tests/parallel.nix19
-rw-r--r--tests/parallel.sh56
-rw-r--r--tests/referrers.sh36
-rw-r--r--tests/remote-builds.nix103
-rw-r--r--tests/remote-store.sh16
-rw-r--r--tests/repair.sh65
-rw-r--r--tests/secure-drv-outputs.nix23
-rw-r--r--tests/secure-drv-outputs.sh37
-rw-r--r--tests/simple.builder.sh11
-rw-r--r--tests/simple.nix8
-rw-r--r--tests/simple.sh25
-rwxr-xr-xtests/substituter.sh37
-rwxr-xr-xtests/substituter2.sh33
-rw-r--r--tests/substitutes.sh22
-rw-r--r--tests/substitutes2.sh21
-rw-r--r--tests/timeout.builder.sh2
-rw-r--r--tests/timeout.nix6
-rw-r--r--tests/timeout.sh21
-rw-r--r--tests/user-envs.builder.sh5
-rw-r--r--tests/user-envs.nix29
-rw-r--r--tests/user-envs.sh168
-rw-r--r--version1
503 files changed, 58686 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000000..0a3c979208bf
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,124 @@
+Makefile.config
+
+# /
+/aclocal.m4
+/autom4te.cache
+/config.*
+/configure
+/nix.spec
+/stamp-h1
+/svn-revision
+/NEWS
+/libtool
+
+/corepkgs/config.nix
+
+# /corepkgs/buildenv/
+/corepkgs/buildenv/builder.pl
+
+# /corepkgs/channels/
+/corepkgs/channels/unpack.sh
+
+# /corepkgs/nar/
+/corepkgs/nar/nar.sh
+/corepkgs/nar/unnar.sh
+
+# /doc/manual/
+/doc/manual/manual.html
+/doc/manual/manual.xmli
+/doc/manual/manual.pdf
+/doc/manual/manual.is-valid
+/doc/manual/*.1
+/doc/manual/*.5
+/doc/manual/*.8
+/doc/manual/version.txt
+/doc/manual/release-notes.html
+
+# /scripts/
+/scripts/nix-profile.sh
+/scripts/nix-pull
+/scripts/nix-push
+/scripts/nix-switch
+/scripts/nix-collect-garbage
+/scripts/nix-prefetch-url
+/scripts/nix-install-package
+/scripts/nix-channel
+/scripts/nix-build
+/scripts/nix-copy-closure
+/scripts/nix-generate-patches
+/scripts/NixConfig.pm
+/scripts/NixManifest.pm
+/scripts/GeneratePatches.pm
+/scripts/download-using-manifests.pl
+/scripts/copy-from-other-stores.pl
+/scripts/download-from-binary-cache.pl
+/scripts/find-runtime-roots.pl
+/scripts/build-remote.pl
+/scripts/nix-reduce-build
+/scripts/nix-http-export.cgi
+
+# /src/bsdiff-4.3/
+/src/bsdiff-4.3/bsdiff
+/src/bsdiff-4.3/bspatch
+
+# /src/libexpr/
+/src/libexpr/lexer-tab.cc
+/src/libexpr/lexer-tab.hh
+/src/libexpr/parser-tab.cc
+/src/libexpr/parser-tab.hh
+/src/libexpr/parser-tab.output
+/src/libexpr/nix.tbl
+
+# /src/libstore/
+/src/libstore/schema.sql.hh
+
+# /src/nix-env/
+/src/nix-env/nix-env
+
+# /src/nix-hash/
+/src/nix-hash/nix-hash
+
+# /src/nix-instantiate/
+/src/nix-instantiate/nix-instantiate
+
+# /src/nix-log2xml/
+/src/nix-log2xml/nix-log2xml
+
+# /src/nix-store/
+/src/nix-store/nix-store
+
+# /src/nix-daemon/
+/src/nix-daemon/nix-daemon
+
+# /src/download-via-ssh/
+/src/download-via-ssh/download-via-ssh
+
+# /tests/
+/tests/test-tmp
+/tests/common.sh
+/tests/dummy
+/tests/result*
+
+# /tests/lang/
+/tests/lang/*.out
+/tests/lang/*.out.xml
+/tests/lang/*.ast
+
+/perl/lib/Nix/Config.pm
+/perl/lib/Nix/Store.cc
+
+/misc/systemd/nix-daemon.service
+/misc/systemd/nix-daemon.socket
+/misc/upstart/nix-daemon.conf
+
+*.a
+*.o
+*.so
+*.dep
+*~
+
+# GNU Global
+GPATH
+GRTAGS
+GSYMS
+GTAGS
diff --git a/COPYING b/COPYING
new file mode 100644
index 000000000000..5ab7695ab8ca
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,504 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 000000000000..a4b34144dcff
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,229 @@
+Copyright 1994, 1995, 1996, 1999, 2000, 2001, 2002 Free Software
+Foundation, Inc.
+
+   This file is free documentation; the Free Software Foundation gives
+unlimited permission to copy, distribute and modify it.
+
+Basic Installation
+==================
+
+   These are generic installation instructions.
+
+   The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation.  It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions.  Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, and a
+file `config.log' containing compiler output (useful mainly for
+debugging `configure').
+
+   It can also use an optional file (typically called `config.cache'
+and enabled with `--cache-file=config.cache' or simply `-C') that saves
+the results of its tests to speed up reconfiguring.  (Caching is
+disabled by default to prevent problems with accidental use of stale
+cache files.)
+
+   If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release.  If you are using the cache, and at
+some point `config.cache' contains results you don't want to keep, you
+may remove or edit it.
+
+   The file `configure.ac' (or `configure.in') is used to create
+`configure' by a program called `autoconf'.  You only need
+`configure.ac' if you want to change it or regenerate `configure' using
+a newer version of `autoconf'.
+
+The simplest way to compile this package is:
+
+  1. `cd' to the directory containing the package's source code and type
+     `./configure' to configure the package for your system.  If you're
+     using `csh' on an old version of System V, you might need to type
+     `sh ./configure' instead to prevent `csh' from trying to execute
+     `configure' itself.
+
+     Running `configure' takes awhile.  While running, it prints some
+     messages telling which features it is checking for.
+
+  2. Type `make' to compile the package.
+
+  3. Optionally, type `make check' to run any self-tests that come with
+     the package.
+
+  4. Type `make install' to install the programs and any data files and
+     documentation.
+
+  5. You can remove the program binaries and object files from the
+     source code directory by typing `make clean'.  To also remove the
+     files that `configure' created (so you can compile the package for
+     a different kind of computer), type `make distclean'.  There is
+     also a `make maintainer-clean' target, but that is intended mainly
+     for the package's developers.  If you use it, you may have to get
+     all sorts of other programs in order to regenerate files that came
+     with the distribution.
+
+Compilers and Options
+=====================
+
+   Some systems require unusual options for compilation or linking that
+the `configure' script does not know about.  Run `./configure --help'
+for details on some of the pertinent environment variables.
+
+   You can give `configure' initial values for configuration parameters
+by setting variables in the command line or in the environment.  Here
+is an example:
+
+     ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix
+
+   *Note Defining Variables::, for more details.
+
+Compiling For Multiple Architectures
+====================================
+
+   You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory.  To do this, you must use a version of `make' that
+supports the `VPATH' variable, such as GNU `make'.  `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script.  `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'.
+
+   If you have to use a `make' that does not support the `VPATH'
+variable, you have to compile the package for one architecture at a
+time in the source code directory.  After you have installed the
+package for one architecture, use `make distclean' before reconfiguring
+for another architecture.
+
+Installation Names
+==================
+
+   By default, `make install' will install the package's files in
+`/usr/local/bin', `/usr/local/man', etc.  You can specify an
+installation prefix other than `/usr/local' by giving `configure' the
+option `--prefix=PATH'.
+
+   You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files.  If you
+give `configure' the option `--exec-prefix=PATH', the package will use
+PATH as the prefix for installing programs and libraries.
+Documentation and other data files will still use the regular prefix.
+
+   In addition, if you use an unusual directory layout you can give
+options like `--bindir=PATH' to specify different values for particular
+kinds of files.  Run `configure --help' for a list of the directories
+you can set and what kinds of files go in them.
+
+   If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+Optional Features
+=================
+
+   Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System).  The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+   For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+Specifying the System Type
+==========================
+
+   There may be some features `configure' cannot figure out
+automatically, but needs to determine by the type of machine the package
+will run on.  Usually, assuming the package is built to be run on the
+_same_ architectures, `configure' can figure that out, but if it prints
+a message saying it cannot guess the machine type, give it the
+`--build=TYPE' option.  TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name which has the form:
+
+     CPU-COMPANY-SYSTEM
+
+where SYSTEM can have one of these forms:
+
+     OS KERNEL-OS
+
+   See the file `config.sub' for the possible values of each field.  If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the machine type.
+
+   If you are _building_ compiler tools for cross-compiling, you should
+use the `--target=TYPE' option to select the type of system they will
+produce code for.
+
+   If you want to _use_ a cross compiler, that generates code for a
+platform different from the build platform, you should specify the
+"host" platform (i.e., that on which the generated programs will
+eventually be run) with `--host=TYPE'.
+
+Sharing Defaults
+================
+
+   If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists.  Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Defining Variables
+==================
+
+   Variables not defined in a site shell script can be set in the
+environment passed to `configure'.  However, some packages may run
+configure again during the build, and the customized values of these
+variables may be lost.  In order to avoid this problem, you should set
+them in the `configure' command line, using `VAR=value'.  For example:
+
+     ./configure CC=/usr/local2/bin/gcc
+
+will cause the specified gcc to be used as the C compiler (unless it is
+overridden in the site shell script).
+
+`configure' Invocation
+======================
+
+   `configure' recognizes the following options to control how it
+operates.
+
+`--help'
+`-h'
+     Print a summary of the options to `configure', and exit.
+
+`--version'
+`-V'
+     Print the version of Autoconf used to generate the `configure'
+     script, and exit.
+
+`--cache-file=FILE'
+     Enable the cache: use and save the results of the tests in FILE,
+     traditionally `config.cache'.  FILE defaults to `/dev/null' to
+     disable caching.
+
+`--config-cache'
+`-C'
+     Alias for `--cache-file=config.cache'.
+
+`--quiet'
+`--silent'
+`-q'
+     Do not print messages saying which checks are being made.  To
+     suppress all normal output, redirect it to `/dev/null' (any error
+     messages will still be shown).
+
+`--srcdir=DIR'
+     Look for the package's source code in directory DIR.  Usually
+     `configure' can determine that directory automatically.
+
+`configure' also accepts some other, not widely useful, options.  Run
+`configure --help' for more details.
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 000000000000..d6c645f0f336
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,29 @@
+makefiles = \
+  local.mk \
+  src/boost/format/local.mk \
+  src/libutil/local.mk \
+  src/libstore/local.mk \
+  src/libmain/local.mk \
+  src/libexpr/local.mk \
+  src/nix-hash/local.mk \
+  src/nix-store/local.mk \
+  src/nix-instantiate/local.mk \
+  src/nix-env/local.mk \
+  src/nix-daemon/local.mk \
+  src/download-via-ssh/local.mk \
+  src/nix-log2xml/local.mk \
+  src/bsdiff-4.3/local.mk \
+  perl/local.mk \
+  scripts/local.mk \
+  corepkgs/local.mk \
+  misc/systemd/local.mk \
+  misc/upstart/local.mk \
+  misc/emacs/local.mk \
+  doc/manual/local.mk \
+  tests/local.mk
+
+GLOBAL_CXXFLAGS += -std=c++0x
+
+include Makefile.config
+
+include mk/lib.mk
diff --git a/Makefile.config.in b/Makefile.config.in
new file mode 100644
index 000000000000..53bdbbf61072
--- /dev/null
+++ b/Makefile.config.in
@@ -0,0 +1,35 @@
+BDW_GC_LIBS = @BDW_GC_LIBS@
+CC = @CC@
+CFLAGS = @CFLAGS@
+CXX = @CXX@
+CXXFLAGS = @CXXFLAGS@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+OPENSSL_LIBS = @OPENSSL_LIBS@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+bash = @bash@
+bindir = @bindir@
+bsddiff_compat_include = @bsddiff_compat_include@
+curl = @curl@
+datadir = @datadir@
+datarootdir = @datarootdir@
+dblatex = @dblatex@
+docbookrng = @docbookrng@
+docbookxsl = @docbookxsl@
+docdir = @docdir@
+exec_prefix = @exec_prefix@
+includedir = @includedir@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+perl = @perl@
+perlbindings = @perlbindings@
+perllibdir = @perllibdir@
+pkglibdir = $(libdir)/$(PACKAGE_NAME)
+prefix = @prefix@
+storedir = @storedir@
+sysconfdir = @sysconfdir@
+w3m = @w3m@
+xmllint = @xmllint@
+xsltproc = @xsltproc@
diff --git a/README b/README
new file mode 100644
index 000000000000..35a1f6cc08c9
--- /dev/null
+++ b/README
@@ -0,0 +1,10 @@
+Nix is a purely functional package manager.  For installation and
+usage instructions, please read the manual, which can be found in
+`docs/manual/manual.html', and additionally at the Nix website at
+<http://nixos.org/>.
+
+
+Acknowledgments
+
+This product includes software developed by the OpenSSL Project for
+use in the OpenSSL Toolkit (http://www.OpenSSL.org/).
diff --git a/bootstrap.sh b/bootstrap.sh
new file mode 100755
index 000000000000..e3e259351675
--- /dev/null
+++ b/bootstrap.sh
@@ -0,0 +1,4 @@
+#! /bin/sh -e
+rm -f aclocal.m4
+mkdir -p config
+exec autoreconf -vfi
diff --git a/config/config.guess b/config/config.guess
new file mode 100755
index 000000000000..137bedf2e28b
--- /dev/null
+++ b/config/config.guess
@@ -0,0 +1,1537 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+#   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+#   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+#   2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2012-08-14'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner.  Please send patches (context
+# diff format) to <config-patches@gnu.org> and include a ChangeLog
+# entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub.  If it succeeds, it prints the system name on stdout, and
+# exits with 0.  Otherwise, it exits with 1.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )	# Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help" >&2
+       exit 1 ;;
+    * )
+       break ;;
+  esac
+done
+
+if test $# != 0; then
+  echo "$me: too many arguments$help" >&2
+  exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,)    echo "int x;" > $dummy.c ;
+	for c in cc gcc c89 c99 ; do
+	  if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+	     CC_FOR_BUILD="$c"; break ;
+	  fi ;
+	done ;
+	if test x"$CC_FOR_BUILD" = x ; then
+	  CC_FOR_BUILD=no_compiler_found ;
+	fi
+	;;
+ ,,*)   CC_FOR_BUILD=$CC ;;
+ ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+	PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null`  || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+    *:NetBSD:*:*)
+	# NetBSD (nbsd) targets should (where applicable) match one or
+	# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+	# *-*-netbsdecoff* and *-*-netbsd*.  For targets that recently
+	# switched to ELF, *-*-netbsd* would select the old
+	# object file format.  This provides both forward
+	# compatibility and a consistent mechanism for selecting the
+	# object file format.
+	#
+	# Note: NetBSD doesn't particularly care about the vendor
+	# portion of the name.  We always set it to "unknown".
+	sysctl="sysctl -n hw.machine_arch"
+	UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+	    /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+	case "${UNAME_MACHINE_ARCH}" in
+	    armeb) machine=armeb-unknown ;;
+	    arm*) machine=arm-unknown ;;
+	    sh3el) machine=shl-unknown ;;
+	    sh3eb) machine=sh-unknown ;;
+	    sh5el) machine=sh5le-unknown ;;
+	    *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+	esac
+	# The Operating System including object format, if it has switched
+	# to ELF recently, or will in the future.
+	case "${UNAME_MACHINE_ARCH}" in
+	    arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+		eval $set_cc_for_build
+		if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+			| grep -q __ELF__
+		then
+		    # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+		    # Return netbsd for either.  FIX?
+		    os=netbsd
+		else
+		    os=netbsdelf
+		fi
+		;;
+	    *)
+		os=netbsd
+		;;
+	esac
+	# The OS release
+	# Debian GNU/NetBSD machines have a different userland, and
+	# thus, need a distinct triplet. However, they do not need
+	# kernel version information, so it can be replaced with a
+	# suitable tag, in the style of linux-gnu.
+	case "${UNAME_VERSION}" in
+	    Debian*)
+		release='-gnu'
+		;;
+	    *)
+		release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+		;;
+	esac
+	# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+	# contains redundant information, the shorter form:
+	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+	echo "${machine}-${os}${release}"
+	exit ;;
+    *:Bitrig:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+	exit ;;
+    *:OpenBSD:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+	exit ;;
+    *:ekkoBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+	exit ;;
+    *:SolidBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+	exit ;;
+    macppc:MirBSD:*:*)
+	echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+	exit ;;
+    *:MirBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+	exit ;;
+    alpha:OSF1:*:*)
+	case $UNAME_RELEASE in
+	*4.0)
+		UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+		;;
+	*5.*)
+		UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+		;;
+	esac
+	# According to Compaq, /usr/sbin/psrinfo has been available on
+	# OSF/1 and Tru64 systems produced since 1995.  I hope that
+	# covers most systems running today.  This code pipes the CPU
+	# types through head -n 1, so we only detect the type of CPU 0.
+	ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+	case "$ALPHA_CPU_TYPE" in
+	    "EV4 (21064)")
+		UNAME_MACHINE="alpha" ;;
+	    "EV4.5 (21064)")
+		UNAME_MACHINE="alpha" ;;
+	    "LCA4 (21066/21068)")
+		UNAME_MACHINE="alpha" ;;
+	    "EV5 (21164)")
+		UNAME_MACHINE="alphaev5" ;;
+	    "EV5.6 (21164A)")
+		UNAME_MACHINE="alphaev56" ;;
+	    "EV5.6 (21164PC)")
+		UNAME_MACHINE="alphapca56" ;;
+	    "EV5.7 (21164PC)")
+		UNAME_MACHINE="alphapca57" ;;
+	    "EV6 (21264)")
+		UNAME_MACHINE="alphaev6" ;;
+	    "EV6.7 (21264A)")
+		UNAME_MACHINE="alphaev67" ;;
+	    "EV6.8CB (21264C)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.8AL (21264B)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.8CX (21264D)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.9A (21264/EV69A)")
+		UNAME_MACHINE="alphaev69" ;;
+	    "EV7 (21364)")
+		UNAME_MACHINE="alphaev7" ;;
+	    "EV7.9 (21364A)")
+		UNAME_MACHINE="alphaev79" ;;
+	esac
+	# A Pn.n version is a patched version.
+	# A Vn.n version is a released version.
+	# A Tn.n version is a released field test version.
+	# A Xn.n version is an unreleased experimental baselevel.
+	# 1.2 uses "1.2" for uname -r.
+	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+	exitcode=$?
+	trap '' 0
+	exit $exitcode ;;
+    Alpha\ *:Windows_NT*:*)
+	# How do we know it's Interix rather than the generic POSIX subsystem?
+	# Should we change UNAME_MACHINE based on the output of uname instead
+	# of the specific Alpha model?
+	echo alpha-pc-interix
+	exit ;;
+    21064:Windows_NT:50:3)
+	echo alpha-dec-winnt3.5
+	exit ;;
+    Amiga*:UNIX_System_V:4.0:*)
+	echo m68k-unknown-sysv4
+	exit ;;
+    *:[Aa]miga[Oo][Ss]:*:*)
+	echo ${UNAME_MACHINE}-unknown-amigaos
+	exit ;;
+    *:[Mm]orph[Oo][Ss]:*:*)
+	echo ${UNAME_MACHINE}-unknown-morphos
+	exit ;;
+    *:OS/390:*:*)
+	echo i370-ibm-openedition
+	exit ;;
+    *:z/VM:*:*)
+	echo s390-ibm-zvmoe
+	exit ;;
+    *:OS400:*:*)
+	echo powerpc-ibm-os400
+	exit ;;
+    arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+	echo arm-acorn-riscix${UNAME_RELEASE}
+	exit ;;
+    arm:riscos:*:*|arm:RISCOS:*:*)
+	echo arm-unknown-riscos
+	exit ;;
+    SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+	echo hppa1.1-hitachi-hiuxmpp
+	exit ;;
+    Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+	# akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+	if test "`(/bin/universe) 2>/dev/null`" = att ; then
+		echo pyramid-pyramid-sysv3
+	else
+		echo pyramid-pyramid-bsd
+	fi
+	exit ;;
+    NILE*:*:*:dcosx)
+	echo pyramid-pyramid-svr4
+	exit ;;
+    DRS?6000:unix:4.0:6*)
+	echo sparc-icl-nx6
+	exit ;;
+    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+	case `/usr/bin/uname -p` in
+	    sparc) echo sparc-icl-nx7; exit ;;
+	esac ;;
+    s390x:SunOS:*:*)
+	echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4H:SunOS:5.*:*)
+	echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+	echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+	echo i386-pc-auroraux${UNAME_RELEASE}
+	exit ;;
+    i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+	eval $set_cc_for_build
+	SUN_ARCH="i386"
+	# If there is a compiler, see if it is configured for 64-bit objects.
+	# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+	# This test works for both compilers.
+	if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+	    if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+		(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		grep IS_64BIT_ARCH >/dev/null
+	    then
+		SUN_ARCH="x86_64"
+	    fi
+	fi
+	echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:6*:*)
+	# According to config.sub, this is the proper way to canonicalize
+	# SunOS6.  Hard to guess exactly what SunOS6 will be like, but
+	# it's likely to be more like Solaris than SunOS4.
+	echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:*:*)
+	case "`/usr/bin/arch -k`" in
+	    Series*|S4*)
+		UNAME_RELEASE=`uname -v`
+		;;
+	esac
+	# Japanese Language versions have a version number like `4.1.3-JL'.
+	echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+	exit ;;
+    sun3*:SunOS:*:*)
+	echo m68k-sun-sunos${UNAME_RELEASE}
+	exit ;;
+    sun*:*:4.2BSD:*)
+	UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+	test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+	case "`/bin/arch`" in
+	    sun3)
+		echo m68k-sun-sunos${UNAME_RELEASE}
+		;;
+	    sun4)
+		echo sparc-sun-sunos${UNAME_RELEASE}
+		;;
+	esac
+	exit ;;
+    aushp:SunOS:*:*)
+	echo sparc-auspex-sunos${UNAME_RELEASE}
+	exit ;;
+    # The situation for MiNT is a little confusing.  The machine name
+    # can be virtually everything (everything which is not
+    # "atarist" or "atariste" at least should have a processor
+    # > m68000).  The system name ranges from "MiNT" over "FreeMiNT"
+    # to the lowercase version "mint" (or "freemint").  Finally
+    # the system name "TOS" denotes a system which is actually not
+    # MiNT.  But MiNT is downward compatible to TOS, so this should
+    # be no problem.
+    atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+	echo m68k-milan-mint${UNAME_RELEASE}
+	exit ;;
+    hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+	echo m68k-hades-mint${UNAME_RELEASE}
+	exit ;;
+    *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+	echo m68k-unknown-mint${UNAME_RELEASE}
+	exit ;;
+    m68k:machten:*:*)
+	echo m68k-apple-machten${UNAME_RELEASE}
+	exit ;;
+    powerpc:machten:*:*)
+	echo powerpc-apple-machten${UNAME_RELEASE}
+	exit ;;
+    RISC*:Mach:*:*)
+	echo mips-dec-mach_bsd4.3
+	exit ;;
+    RISC*:ULTRIX:*:*)
+	echo mips-dec-ultrix${UNAME_RELEASE}
+	exit ;;
+    VAX*:ULTRIX*:*:*)
+	echo vax-dec-ultrix${UNAME_RELEASE}
+	exit ;;
+    2020:CLIX:*:* | 2430:CLIX:*:*)
+	echo clipper-intergraph-clix${UNAME_RELEASE}
+	exit ;;
+    mips:*:*:UMIPS | mips:*:*:RISCos)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h>  /* for printf() prototype */
+	int main (int argc, char *argv[]) {
+#else
+	int main (argc, argv) int argc; char *argv[]; {
+#endif
+	#if defined (host_mips) && defined (MIPSEB)
+	#if defined (SYSTYPE_SYSV)
+	  printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+	#endif
+	#if defined (SYSTYPE_SVR4)
+	  printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+	#endif
+	#if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+	  printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+	#endif
+	#endif
+	  exit (-1);
+	}
+EOF
+	$CC_FOR_BUILD -o $dummy $dummy.c &&
+	  dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+	  SYSTEM_NAME=`$dummy $dummyarg` &&
+	    { echo "$SYSTEM_NAME"; exit; }
+	echo mips-mips-riscos${UNAME_RELEASE}
+	exit ;;
+    Motorola:PowerMAX_OS:*:*)
+	echo powerpc-motorola-powermax
+	exit ;;
+    Motorola:*:4.3:PL8-*)
+	echo powerpc-harris-powermax
+	exit ;;
+    Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+	echo powerpc-harris-powermax
+	exit ;;
+    Night_Hawk:Power_UNIX:*:*)
+	echo powerpc-harris-powerunix
+	exit ;;
+    m88k:CX/UX:7*:*)
+	echo m88k-harris-cxux7
+	exit ;;
+    m88k:*:4*:R4*)
+	echo m88k-motorola-sysv4
+	exit ;;
+    m88k:*:3*:R3*)
+	echo m88k-motorola-sysv3
+	exit ;;
+    AViiON:dgux:*:*)
+	# DG/UX returns AViiON for all architectures
+	UNAME_PROCESSOR=`/usr/bin/uname -p`
+	if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+	then
+	    if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+	       [ ${TARGET_BINARY_INTERFACE}x = x ]
+	    then
+		echo m88k-dg-dgux${UNAME_RELEASE}
+	    else
+		echo m88k-dg-dguxbcs${UNAME_RELEASE}
+	    fi
+	else
+	    echo i586-dg-dgux${UNAME_RELEASE}
+	fi
+	exit ;;
+    M88*:DolphinOS:*:*)	# DolphinOS (SVR3)
+	echo m88k-dolphin-sysv3
+	exit ;;
+    M88*:*:R3*:*)
+	# Delta 88k system running SVR3
+	echo m88k-motorola-sysv3
+	exit ;;
+    XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+	echo m88k-tektronix-sysv3
+	exit ;;
+    Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+	echo m68k-tektronix-bsd
+	exit ;;
+    *:IRIX*:*:*)
+	echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+	exit ;;
+    ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+	echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
+	exit ;;               # Note that: echo "'`uname -s`'" gives 'AIX '
+    i*86:AIX:*:*)
+	echo i386-ibm-aix
+	exit ;;
+    ia64:AIX:*:*)
+	if [ -x /usr/bin/oslevel ] ; then
+		IBM_REV=`/usr/bin/oslevel`
+	else
+		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+	fi
+	echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+	exit ;;
+    *:AIX:2:3)
+	if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+		eval $set_cc_for_build
+		sed 's/^		//' << EOF >$dummy.c
+		#include <sys/systemcfg.h>
+
+		main()
+			{
+			if (!__power_pc())
+				exit(1);
+			puts("powerpc-ibm-aix3.2.5");
+			exit(0);
+			}
+EOF
+		if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+		then
+			echo "$SYSTEM_NAME"
+		else
+			echo rs6000-ibm-aix3.2.5
+		fi
+	elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+		echo rs6000-ibm-aix3.2.4
+	else
+		echo rs6000-ibm-aix3.2
+	fi
+	exit ;;
+    *:AIX:*:[4567])
+	IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+	if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+		IBM_ARCH=rs6000
+	else
+		IBM_ARCH=powerpc
+	fi
+	if [ -x /usr/bin/oslevel ] ; then
+		IBM_REV=`/usr/bin/oslevel`
+	else
+		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+	fi
+	echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+	exit ;;
+    *:AIX:*:*)
+	echo rs6000-ibm-aix
+	exit ;;
+    ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+	echo romp-ibm-bsd4.4
+	exit ;;
+    ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
+	echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
+	exit ;;                             # report: romp-ibm BSD 4.3
+    *:BOSX:*:*)
+	echo rs6000-bull-bosx
+	exit ;;
+    DPX/2?00:B.O.S.:*:*)
+	echo m68k-bull-sysv3
+	exit ;;
+    9000/[34]??:4.3bsd:1.*:*)
+	echo m68k-hp-bsd
+	exit ;;
+    hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+	echo m68k-hp-bsd4.4
+	exit ;;
+    9000/[34678]??:HP-UX:*:*)
+	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+	case "${UNAME_MACHINE}" in
+	    9000/31? )            HP_ARCH=m68000 ;;
+	    9000/[34]?? )         HP_ARCH=m68k ;;
+	    9000/[678][0-9][0-9])
+		if [ -x /usr/bin/getconf ]; then
+		    sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+		    sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+		    case "${sc_cpu_version}" in
+		      523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+		      528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+		      532)                      # CPU_PA_RISC2_0
+			case "${sc_kernel_bits}" in
+			  32) HP_ARCH="hppa2.0n" ;;
+			  64) HP_ARCH="hppa2.0w" ;;
+			  '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
+			esac ;;
+		    esac
+		fi
+		if [ "${HP_ARCH}" = "" ]; then
+		    eval $set_cc_for_build
+		    sed 's/^		//' << EOF >$dummy.c
+
+		#define _HPUX_SOURCE
+		#include <stdlib.h>
+		#include <unistd.h>
+
+		int main ()
+		{
+		#if defined(_SC_KERNEL_BITS)
+		    long bits = sysconf(_SC_KERNEL_BITS);
+		#endif
+		    long cpu  = sysconf (_SC_CPU_VERSION);
+
+		    switch (cpu)
+			{
+			case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+			case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+			case CPU_PA_RISC2_0:
+		#if defined(_SC_KERNEL_BITS)
+			    switch (bits)
+				{
+				case 64: puts ("hppa2.0w"); break;
+				case 32: puts ("hppa2.0n"); break;
+				default: puts ("hppa2.0"); break;
+				} break;
+		#else  /* !defined(_SC_KERNEL_BITS) */
+			    puts ("hppa2.0"); break;
+		#endif
+			default: puts ("hppa1.0"); break;
+			}
+		    exit (0);
+		}
+EOF
+		    (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+		    test -z "$HP_ARCH" && HP_ARCH=hppa
+		fi ;;
+	esac
+	if [ ${HP_ARCH} = "hppa2.0w" ]
+	then
+	    eval $set_cc_for_build
+
+	    # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+	    # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
+	    # generating 64-bit code.  GNU and HP use different nomenclature:
+	    #
+	    # $ CC_FOR_BUILD=cc ./config.guess
+	    # => hppa2.0w-hp-hpux11.23
+	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+	    # => hppa64-hp-hpux11.23
+
+	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+		grep -q __LP64__
+	    then
+		HP_ARCH="hppa2.0w"
+	    else
+		HP_ARCH="hppa64"
+	    fi
+	fi
+	echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+	exit ;;
+    ia64:HP-UX:*:*)
+	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+	echo ia64-hp-hpux${HPUX_REV}
+	exit ;;
+    3050*:HI-UX:*:*)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#include <unistd.h>
+	int
+	main ()
+	{
+	  long cpu = sysconf (_SC_CPU_VERSION);
+	  /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+	     true for CPU_PA_RISC1_0.  CPU_IS_PA_RISC returns correct
+	     results, however.  */
+	  if (CPU_IS_PA_RISC (cpu))
+	    {
+	      switch (cpu)
+		{
+		  case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+		  case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+		  case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+		  default: puts ("hppa-hitachi-hiuxwe2"); break;
+		}
+	    }
+	  else if (CPU_IS_HP_MC68K (cpu))
+	    puts ("m68k-hitachi-hiuxwe2");
+	  else puts ("unknown-hitachi-hiuxwe2");
+	  exit (0);
+	}
+EOF
+	$CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+		{ echo "$SYSTEM_NAME"; exit; }
+	echo unknown-hitachi-hiuxwe2
+	exit ;;
+    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+	echo hppa1.1-hp-bsd
+	exit ;;
+    9000/8??:4.3bsd:*:*)
+	echo hppa1.0-hp-bsd
+	exit ;;
+    *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+	echo hppa1.0-hp-mpeix
+	exit ;;
+    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+	echo hppa1.1-hp-osf
+	exit ;;
+    hp8??:OSF1:*:*)
+	echo hppa1.0-hp-osf
+	exit ;;
+    i*86:OSF1:*:*)
+	if [ -x /usr/sbin/sysversion ] ; then
+	    echo ${UNAME_MACHINE}-unknown-osf1mk
+	else
+	    echo ${UNAME_MACHINE}-unknown-osf1
+	fi
+	exit ;;
+    parisc*:Lites*:*:*)
+	echo hppa1.1-hp-lites
+	exit ;;
+    C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+	echo c1-convex-bsd
+	exit ;;
+    C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+	if getsysinfo -f scalar_acc
+	then echo c32-convex-bsd
+	else echo c2-convex-bsd
+	fi
+	exit ;;
+    C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+	echo c34-convex-bsd
+	exit ;;
+    C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+	echo c38-convex-bsd
+	exit ;;
+    C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+	echo c4-convex-bsd
+	exit ;;
+    CRAY*Y-MP:*:*:*)
+	echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*[A-Z]90:*:*:*)
+	echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+	| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+	      -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+	      -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*TS:*:*:*)
+	echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*T3E:*:*:*)
+	echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*SV1:*:*:*)
+	echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    *:UNICOS/mp:*:*)
+	echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+	FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+	echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+	exit ;;
+    5000:UNIX_System_V:4.*:*)
+	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+	echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+	exit ;;
+    i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+	echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+	exit ;;
+    sparc*:BSD/OS:*:*)
+	echo sparc-unknown-bsdi${UNAME_RELEASE}
+	exit ;;
+    *:BSD/OS:*:*)
+	echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+	exit ;;
+    *:FreeBSD:*:*)
+	UNAME_PROCESSOR=`/usr/bin/uname -p`
+	case ${UNAME_PROCESSOR} in
+	    amd64)
+		echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+	    *)
+		echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+	esac
+	exit ;;
+    i*:CYGWIN*:*)
+	echo ${UNAME_MACHINE}-pc-cygwin
+	exit ;;
+    *:MINGW64*:*)
+	echo ${UNAME_MACHINE}-pc-mingw64
+	exit ;;
+    *:MINGW*:*)
+	echo ${UNAME_MACHINE}-pc-mingw32
+	exit ;;
+    i*:MSYS*:*)
+	echo ${UNAME_MACHINE}-pc-msys
+	exit ;;
+    i*:windows32*:*)
+	# uname -m includes "-pc" on this system.
+	echo ${UNAME_MACHINE}-mingw32
+	exit ;;
+    i*:PW*:*)
+	echo ${UNAME_MACHINE}-pc-pw32
+	exit ;;
+    *:Interix*:*)
+	case ${UNAME_MACHINE} in
+	    x86)
+		echo i586-pc-interix${UNAME_RELEASE}
+		exit ;;
+	    authenticamd | genuineintel | EM64T)
+		echo x86_64-unknown-interix${UNAME_RELEASE}
+		exit ;;
+	    IA64)
+		echo ia64-unknown-interix${UNAME_RELEASE}
+		exit ;;
+	esac ;;
+    [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+	echo i${UNAME_MACHINE}-pc-mks
+	exit ;;
+    8664:Windows_NT:*)
+	echo x86_64-pc-mks
+	exit ;;
+    i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+	# How do we know it's Interix rather than the generic POSIX subsystem?
+	# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+	# UNAME_MACHINE based on the output of uname instead of i386?
+	echo i586-pc-interix
+	exit ;;
+    i*:UWIN*:*)
+	echo ${UNAME_MACHINE}-pc-uwin
+	exit ;;
+    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+	echo x86_64-unknown-cygwin
+	exit ;;
+    p*:CYGWIN*:*)
+	echo powerpcle-unknown-cygwin
+	exit ;;
+    prep*:SunOS:5.*:*)
+	echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    *:GNU:*:*)
+	# the GNU system
+	echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+	exit ;;
+    *:GNU/*:*:*)
+	# other systems with GNU libc and userland
+	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+	exit ;;
+    i*86:Minix:*:*)
+	echo ${UNAME_MACHINE}-pc-minix
+	exit ;;
+    aarch64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    aarch64_be:Linux:*:*)
+	UNAME_MACHINE=aarch64_be
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    alpha:Linux:*:*)
+	case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+	  EV5)   UNAME_MACHINE=alphaev5 ;;
+	  EV56)  UNAME_MACHINE=alphaev56 ;;
+	  PCA56) UNAME_MACHINE=alphapca56 ;;
+	  PCA57) UNAME_MACHINE=alphapca56 ;;
+	  EV6)   UNAME_MACHINE=alphaev6 ;;
+	  EV67)  UNAME_MACHINE=alphaev67 ;;
+	  EV68*) UNAME_MACHINE=alphaev68 ;;
+	esac
+	objdump --private-headers /bin/sh | grep -q ld.so.1
+	if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+	echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+	exit ;;
+    arm*:Linux:*:*)
+	eval $set_cc_for_build
+	if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+	    | grep -q __ARM_EABI__
+	then
+	    echo ${UNAME_MACHINE}-unknown-linux-gnu
+	else
+	    if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+		| grep -q __ARM_PCS_VFP
+	    then
+		echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+	    else
+		echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+	    fi
+	fi
+	exit ;;
+    avr32*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    cris:Linux:*:*)
+	echo ${UNAME_MACHINE}-axis-linux-gnu
+	exit ;;
+    crisv32:Linux:*:*)
+	echo ${UNAME_MACHINE}-axis-linux-gnu
+	exit ;;
+    frv:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    hexagon:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    i*86:Linux:*:*)
+	LIBC=gnu
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#ifdef __dietlibc__
+	LIBC=dietlibc
+	#endif
+EOF
+	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+	echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+	exit ;;
+    ia64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    m32r*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    m68*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    mips:Linux:*:* | mips64:Linux:*:*)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#undef CPU
+	#undef ${UNAME_MACHINE}
+	#undef ${UNAME_MACHINE}el
+	#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+	CPU=${UNAME_MACHINE}el
+	#else
+	#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+	CPU=${UNAME_MACHINE}
+	#else
+	CPU=
+	#endif
+	#endif
+EOF
+	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+	;;
+    or32:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    padre:Linux:*:*)
+	echo sparc-unknown-linux-gnu
+	exit ;;
+    parisc64:Linux:*:* | hppa64:Linux:*:*)
+	echo hppa64-unknown-linux-gnu
+	exit ;;
+    parisc:Linux:*:* | hppa:Linux:*:*)
+	# Look for CPU level
+	case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+	  PA7*) echo hppa1.1-unknown-linux-gnu ;;
+	  PA8*) echo hppa2.0-unknown-linux-gnu ;;
+	  *)    echo hppa-unknown-linux-gnu ;;
+	esac
+	exit ;;
+    ppc64:Linux:*:*)
+	echo powerpc64-unknown-linux-gnu
+	exit ;;
+    ppc:Linux:*:*)
+	echo powerpc-unknown-linux-gnu
+	exit ;;
+    s390:Linux:*:* | s390x:Linux:*:*)
+	echo ${UNAME_MACHINE}-ibm-linux
+	exit ;;
+    sh64*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    sh*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    sparc:Linux:*:* | sparc64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    tile*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    vax:Linux:*:*)
+	echo ${UNAME_MACHINE}-dec-linux-gnu
+	exit ;;
+    x86_64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    xtensa*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    i*86:DYNIX/ptx:4*:*)
+	# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+	# earlier versions are messed up and put the nodename in both
+	# sysname and nodename.
+	echo i386-sequent-sysv4
+	exit ;;
+    i*86:UNIX_SV:4.2MP:2.*)
+	# Unixware is an offshoot of SVR4, but it has its own version
+	# number series starting with 2...
+	# I am not positive that other SVR4 systems won't match this,
+	# I just have to hope.  -- rms.
+	# Use sysv4.2uw... so that sysv4* matches it.
+	echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+	exit ;;
+    i*86:OS/2:*:*)
+	# If we were able to find `uname', then EMX Unix compatibility
+	# is probably installed.
+	echo ${UNAME_MACHINE}-pc-os2-emx
+	exit ;;
+    i*86:XTS-300:*:STOP)
+	echo ${UNAME_MACHINE}-unknown-stop
+	exit ;;
+    i*86:atheos:*:*)
+	echo ${UNAME_MACHINE}-unknown-atheos
+	exit ;;
+    i*86:syllable:*:*)
+	echo ${UNAME_MACHINE}-pc-syllable
+	exit ;;
+    i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+	echo i386-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    i*86:*DOS:*:*)
+	echo ${UNAME_MACHINE}-pc-msdosdjgpp
+	exit ;;
+    i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+	UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+	if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+		echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+	else
+		echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+	fi
+	exit ;;
+    i*86:*:5:[678]*)
+	# UnixWare 7.x, OpenUNIX and OpenServer 6.
+	case `/bin/uname -X | grep "^Machine"` in
+	    *486*)	     UNAME_MACHINE=i486 ;;
+	    *Pentium)	     UNAME_MACHINE=i586 ;;
+	    *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+	esac
+	echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+	exit ;;
+    i*86:*:3.2:*)
+	if test -f /usr/options/cb.name; then
+		UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+		echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+	elif /bin/uname -X 2>/dev/null >/dev/null ; then
+		UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+		(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+		(/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+			&& UNAME_MACHINE=i586
+		(/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+			&& UNAME_MACHINE=i686
+		(/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+			&& UNAME_MACHINE=i686
+		echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+	else
+		echo ${UNAME_MACHINE}-pc-sysv32
+	fi
+	exit ;;
+    pc:*:*:*)
+	# Left here for compatibility:
+	# uname -m prints for DJGPP always 'pc', but it prints nothing about
+	# the processor, so we play safe by assuming i586.
+	# Note: whatever this is, it MUST be the same as what config.sub
+	# prints for the "djgpp" host, or else GDB configury will decide that
+	# this is a cross-build.
+	echo i586-pc-msdosdjgpp
+	exit ;;
+    Intel:Mach:3*:*)
+	echo i386-pc-mach3
+	exit ;;
+    paragon:*:*:*)
+	echo i860-intel-osf1
+	exit ;;
+    i860:*:4.*:*) # i860-SVR4
+	if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+	  echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+	else # Add other i860-SVR4 vendors below as they are discovered.
+	  echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
+	fi
+	exit ;;
+    mini*:CTIX:SYS*5:*)
+	# "miniframe"
+	echo m68010-convergent-sysv
+	exit ;;
+    mc68k:UNIX:SYSTEM5:3.51m)
+	echo m68k-convergent-sysv
+	exit ;;
+    M680?0:D-NIX:5.3:*)
+	echo m68k-diab-dnix
+	exit ;;
+    M68*:*:R3V[5678]*:*)
+	test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+    3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+	OS_REL=''
+	test -r /etc/.relid \
+	&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	  && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+	  && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	  && { echo i486-ncr-sysv4; exit; } ;;
+    NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+	OS_REL='.3'
+	test -r /etc/.relid \
+	    && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	    && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+	echo m68k-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    mc68030:UNIX_System_V:4.*:*)
+	echo m68k-atari-sysv4
+	exit ;;
+    TSUNAMI:LynxOS:2.*:*)
+	echo sparc-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    rs6000:LynxOS:2.*:*)
+	echo rs6000-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+	echo powerpc-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    SM[BE]S:UNIX_SV:*:*)
+	echo mips-dde-sysv${UNAME_RELEASE}
+	exit ;;
+    RM*:ReliantUNIX-*:*:*)
+	echo mips-sni-sysv4
+	exit ;;
+    RM*:SINIX-*:*:*)
+	echo mips-sni-sysv4
+	exit ;;
+    *:SINIX-*:*:*)
+	if uname -p 2>/dev/null >/dev/null ; then
+		UNAME_MACHINE=`(uname -p) 2>/dev/null`
+		echo ${UNAME_MACHINE}-sni-sysv4
+	else
+		echo ns32k-sni-sysv
+	fi
+	exit ;;
+    PENTIUM:*:4.0*:*)	# Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+			# says <Richard.M.Bartel@ccMail.Census.GOV>
+	echo i586-unisys-sysv4
+	exit ;;
+    *:UNIX_System_V:4*:FTX*)
+	# From Gerald Hewes <hewes@openmarket.com>.
+	# How about differentiating between stratus architectures? -djm
+	echo hppa1.1-stratus-sysv4
+	exit ;;
+    *:*:*:FTX*)
+	# From seanf@swdc.stratus.com.
+	echo i860-stratus-sysv4
+	exit ;;
+    i*86:VOS:*:*)
+	# From Paul.Green@stratus.com.
+	echo ${UNAME_MACHINE}-stratus-vos
+	exit ;;
+    *:VOS:*:*)
+	# From Paul.Green@stratus.com.
+	echo hppa1.1-stratus-vos
+	exit ;;
+    mc68*:A/UX:*:*)
+	echo m68k-apple-aux${UNAME_RELEASE}
+	exit ;;
+    news*:NEWS-OS:6*:*)
+	echo mips-sony-newsos6
+	exit ;;
+    R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+	if [ -d /usr/nec ]; then
+		echo mips-nec-sysv${UNAME_RELEASE}
+	else
+		echo mips-unknown-sysv${UNAME_RELEASE}
+	fi
+	exit ;;
+    BeBox:BeOS:*:*)	# BeOS running on hardware made by Be, PPC only.
+	echo powerpc-be-beos
+	exit ;;
+    BeMac:BeOS:*:*)	# BeOS running on Mac or Mac clone, PPC only.
+	echo powerpc-apple-beos
+	exit ;;
+    BePC:BeOS:*:*)	# BeOS running on Intel PC compatible.
+	echo i586-pc-beos
+	exit ;;
+    BePC:Haiku:*:*)	# Haiku running on Intel PC compatible.
+	echo i586-pc-haiku
+	exit ;;
+    x86_64:Haiku:*:*)
+	echo x86_64-unknown-haiku
+	exit ;;
+    SX-4:SUPER-UX:*:*)
+	echo sx4-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-5:SUPER-UX:*:*)
+	echo sx5-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-6:SUPER-UX:*:*)
+	echo sx6-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-7:SUPER-UX:*:*)
+	echo sx7-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-8:SUPER-UX:*:*)
+	echo sx8-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-8R:SUPER-UX:*:*)
+	echo sx8r-nec-superux${UNAME_RELEASE}
+	exit ;;
+    Power*:Rhapsody:*:*)
+	echo powerpc-apple-rhapsody${UNAME_RELEASE}
+	exit ;;
+    *:Rhapsody:*:*)
+	echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+	exit ;;
+    *:Darwin:*:*)
+	UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+	case $UNAME_PROCESSOR in
+	    i386)
+		eval $set_cc_for_build
+		if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+		  if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+		      (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		      grep IS_64BIT_ARCH >/dev/null
+		  then
+		      UNAME_PROCESSOR="x86_64"
+		  fi
+		fi ;;
+	    unknown) UNAME_PROCESSOR=powerpc ;;
+	esac
+	echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+	exit ;;
+    *:procnto*:*:* | *:QNX:[0123456789]*:*)
+	UNAME_PROCESSOR=`uname -p`
+	if test "$UNAME_PROCESSOR" = "x86"; then
+		UNAME_PROCESSOR=i386
+		UNAME_MACHINE=pc
+	fi
+	echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+	exit ;;
+    *:QNX:*:4*)
+	echo i386-pc-qnx
+	exit ;;
+    NEO-?:NONSTOP_KERNEL:*:*)
+	echo neo-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    NSE-*:NONSTOP_KERNEL:*:*)
+	echo nse-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    NSR-?:NONSTOP_KERNEL:*:*)
+	echo nsr-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    *:NonStop-UX:*:*)
+	echo mips-compaq-nonstopux
+	exit ;;
+    BS2000:POSIX*:*:*)
+	echo bs2000-siemens-sysv
+	exit ;;
+    DS/*:UNIX_System_V:*:*)
+	echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+	exit ;;
+    *:Plan9:*:*)
+	# "uname -m" is not consistent, so use $cputype instead. 386
+	# is converted to i386 for consistency with other x86
+	# operating systems.
+	if test "$cputype" = "386"; then
+	    UNAME_MACHINE=i386
+	else
+	    UNAME_MACHINE="$cputype"
+	fi
+	echo ${UNAME_MACHINE}-unknown-plan9
+	exit ;;
+    *:TOPS-10:*:*)
+	echo pdp10-unknown-tops10
+	exit ;;
+    *:TENEX:*:*)
+	echo pdp10-unknown-tenex
+	exit ;;
+    KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+	echo pdp10-dec-tops20
+	exit ;;
+    XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+	echo pdp10-xkl-tops20
+	exit ;;
+    *:TOPS-20:*:*)
+	echo pdp10-unknown-tops20
+	exit ;;
+    *:ITS:*:*)
+	echo pdp10-unknown-its
+	exit ;;
+    SEI:*:*:SEIUX)
+	echo mips-sei-seiux${UNAME_RELEASE}
+	exit ;;
+    *:DragonFly:*:*)
+	echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+	exit ;;
+    *:*VMS:*:*)
+	UNAME_MACHINE=`(uname -p) 2>/dev/null`
+	case "${UNAME_MACHINE}" in
+	    A*) echo alpha-dec-vms ; exit ;;
+	    I*) echo ia64-dec-vms ; exit ;;
+	    V*) echo vax-dec-vms ; exit ;;
+	esac ;;
+    *:XENIX:*:SysV)
+	echo i386-pc-xenix
+	exit ;;
+    i*86:skyos:*:*)
+	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+	exit ;;
+    i*86:rdos:*:*)
+	echo ${UNAME_MACHINE}-pc-rdos
+	exit ;;
+    i*86:AROS:*:*)
+	echo ${UNAME_MACHINE}-pc-aros
+	exit ;;
+    x86_64:VMkernel:*:*)
+	echo ${UNAME_MACHINE}-unknown-esx
+	exit ;;
+esac
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+  /* BFD wants "bsd" instead of "newsos".  Perhaps BFD should be changed,
+     I don't know....  */
+  printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+  printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+	"4"
+#else
+	""
+#endif
+	); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+  printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+  printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+  int version;
+  version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+  if (version < 4)
+    printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+  else
+    printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+  exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+  printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+  printf ("ns32k-encore-mach\n"); exit (0);
+#else
+  printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+  printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+  printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+  printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+    struct utsname un;
+
+    uname(&un);
+
+    if (strncmp(un.version, "V2", 2) == 0) {
+	printf ("i386-sequent-ptx2\n"); exit (0);
+    }
+    if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+	printf ("i386-sequent-ptx1\n"); exit (0);
+    }
+    printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+#  include <sys/param.h>
+#  if defined (BSD)
+#   if BSD == 43
+      printf ("vax-dec-bsd4.3\n"); exit (0);
+#   else
+#    if BSD == 199006
+      printf ("vax-dec-bsd4.3reno\n"); exit (0);
+#    else
+      printf ("vax-dec-bsd\n"); exit (0);
+#    endif
+#   endif
+#  else
+    printf ("vax-dec-bsd\n"); exit (0);
+#  endif
+# else
+    printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+  printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+  exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+	{ echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+    case `getsysinfo -f cpu_type` in
+    c1*)
+	echo c1-convex-bsd
+	exit ;;
+    c2*)
+	if getsysinfo -f scalar_acc
+	then echo c32-convex-bsd
+	else echo c2-convex-bsd
+	fi
+	exit ;;
+    c34*)
+	echo c34-convex-bsd
+	exit ;;
+    c38*)
+	echo c38-convex-bsd
+	exit ;;
+    c4*)
+	echo c4-convex-bsd
+	exit ;;
+    esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo               = `(hostinfo) 2>/dev/null`
+/bin/universe          = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch              = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM  = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/config/config.sub b/config/config.sub
new file mode 100755
index 000000000000..bdda9e4a32c1
--- /dev/null
+++ b/config/config.sub
@@ -0,0 +1,1786 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+#   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+#   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+#   2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2012-08-18'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine.  It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Please send patches to <config-patches@gnu.org>.  Submit a context
+# diff and a properly formatted GNU ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support.  The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+#	CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+#	CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+       $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )	# Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help"
+       exit 1 ;;
+
+    *local*)
+       # First pass through any local machine types.
+       echo $1
+       exit ;;
+
+    * )
+       break ;;
+  esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+    exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+    exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+  linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+  knetbsd*-gnu* | netbsd*-gnu* | \
+  kopensolaris*-gnu* | \
+  storm-chaos* | os2-emx* | rtmk-nova*)
+    os=-$maybe_os
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+    ;;
+  android-linux)
+    os=-linux-android
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+    ;;
+  *)
+    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+    if [ $basic_machine != $1 ]
+    then os=`echo $1 | sed 's/.*-/-/'`
+    else os=; fi
+    ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work.  We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+	-sun*os*)
+		# Prevent following clause from handling this invalid input.
+		;;
+	-dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+	-att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+	-unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+	-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+	-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+	-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+	-apple | -axis | -knuth | -cray | -microblaze)
+		os=
+		basic_machine=$1
+		;;
+	-bluegene*)
+		os=-cnk
+		;;
+	-sim | -cisco | -oki | -wec | -winbond)
+		os=
+		basic_machine=$1
+		;;
+	-scout)
+		;;
+	-wrs)
+		os=-vxworks
+		basic_machine=$1
+		;;
+	-chorusos*)
+		os=-chorusos
+		basic_machine=$1
+		;;
+	-chorusrdb)
+		os=-chorusrdb
+		basic_machine=$1
+		;;
+	-hiux*)
+		os=-hiuxwe2
+		;;
+	-sco6)
+		os=-sco5v6
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco5)
+		os=-sco3.2v5
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco4)
+		os=-sco3.2v4
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco3.2.[4-9]*)
+		os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco3.2v[4-9]*)
+		# Don't forget version if it is 3.2v4 or newer.
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco5v6*)
+		# Don't forget version if it is 3.2v4 or newer.
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco*)
+		os=-sco3.2v2
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-udk*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-isc)
+		os=-isc2.2
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-clix*)
+		basic_machine=clipper-intergraph
+		;;
+	-isc*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-lynx*178)
+		os=-lynxos178
+		;;
+	-lynx*5)
+		os=-lynxos5
+		;;
+	-lynx*)
+		os=-lynxos
+		;;
+	-ptx*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+		;;
+	-windowsnt*)
+		os=`echo $os | sed -e 's/windowsnt/winnt/'`
+		;;
+	-psos*)
+		os=-psos
+		;;
+	-mint | -mint[0-9]*)
+		basic_machine=m68k-atari
+		os=-mint
+		;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+	# Recognize the basic CPU types without company name.
+	# Some are omitted here because they have special meanings below.
+	1750a | 580 \
+	| a29k \
+	| aarch64 | aarch64_be \
+	| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+	| am33_2.0 \
+	| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+        | be32 | be64 \
+	| bfin \
+	| c4x | clipper \
+	| d10v | d30v | dlx | dsp16xx \
+	| epiphany \
+	| fido | fr30 | frv \
+	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+	| hexagon \
+	| i370 | i860 | i960 | ia64 \
+	| ip2k | iq2000 \
+	| le32 | le64 \
+	| lm32 \
+	| m32c | m32r | m32rle | m68000 | m68k | m88k \
+	| maxq | mb | microblaze | mcore | mep | metag \
+	| mips | mipsbe | mipseb | mipsel | mipsle \
+	| mips16 \
+	| mips64 | mips64el \
+	| mips64octeon | mips64octeonel \
+	| mips64orion | mips64orionel \
+	| mips64r5900 | mips64r5900el \
+	| mips64vr | mips64vrel \
+	| mips64vr4100 | mips64vr4100el \
+	| mips64vr4300 | mips64vr4300el \
+	| mips64vr5000 | mips64vr5000el \
+	| mips64vr5900 | mips64vr5900el \
+	| mipsisa32 | mipsisa32el \
+	| mipsisa32r2 | mipsisa32r2el \
+	| mipsisa64 | mipsisa64el \
+	| mipsisa64r2 | mipsisa64r2el \
+	| mipsisa64sb1 | mipsisa64sb1el \
+	| mipsisa64sr71k | mipsisa64sr71kel \
+	| mipstx39 | mipstx39el \
+	| mn10200 | mn10300 \
+	| moxie \
+	| mt \
+	| msp430 \
+	| nds32 | nds32le | nds32be \
+	| nios | nios2 \
+	| ns16k | ns32k \
+	| open8 \
+	| or32 \
+	| pdp10 | pdp11 | pj | pjl \
+	| powerpc | powerpc64 | powerpc64le | powerpcle \
+	| pyramid \
+	| rl78 | rx \
+	| score \
+	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+	| sh64 | sh64le \
+	| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+	| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+	| spu \
+	| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+	| ubicom32 \
+	| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+	| we32k \
+	| x86 | xc16x | xstormy16 | xtensa \
+	| z8k | z80)
+		basic_machine=$basic_machine-unknown
+		;;
+	c54x)
+		basic_machine=tic54x-unknown
+		;;
+	c55x)
+		basic_machine=tic55x-unknown
+		;;
+	c6x)
+		basic_machine=tic6x-unknown
+		;;
+	m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
+		basic_machine=$basic_machine-unknown
+		os=-none
+		;;
+	m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+		;;
+	ms1)
+		basic_machine=mt-unknown
+		;;
+
+	strongarm | thumb | xscale)
+		basic_machine=arm-unknown
+		;;
+	xgate)
+		basic_machine=$basic_machine-unknown
+		os=-none
+		;;
+	xscaleeb)
+		basic_machine=armeb-unknown
+		;;
+
+	xscaleel)
+		basic_machine=armel-unknown
+		;;
+
+	# We use `pc' rather than `unknown'
+	# because (1) that's what they normally are, and
+	# (2) the word "unknown" tends to confuse beginning users.
+	i*86 | x86_64)
+	  basic_machine=$basic_machine-pc
+	  ;;
+	# Object if more than one company name word.
+	*-*-*)
+		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		exit 1
+		;;
+	# Recognize the basic CPU types with company name.
+	580-* \
+	| a29k-* \
+	| aarch64-* | aarch64_be-* \
+	| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+	| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
+	| avr-* | avr32-* \
+	| be32-* | be64-* \
+	| bfin-* | bs2000-* \
+	| c[123]* | c30-* | [cjt]90-* | c4x-* \
+	| clipper-* | craynv-* | cydra-* \
+	| d10v-* | d30v-* | dlx-* \
+	| elxsi-* \
+	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+	| h8300-* | h8500-* \
+	| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+	| hexagon-* \
+	| i*86-* | i860-* | i960-* | ia64-* \
+	| ip2k-* | iq2000-* \
+	| le32-* | le64-* \
+	| lm32-* \
+	| m32c-* | m32r-* | m32rle-* \
+	| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+	| m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+	| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+	| mips16-* \
+	| mips64-* | mips64el-* \
+	| mips64octeon-* | mips64octeonel-* \
+	| mips64orion-* | mips64orionel-* \
+	| mips64r5900-* | mips64r5900el-* \
+	| mips64vr-* | mips64vrel-* \
+	| mips64vr4100-* | mips64vr4100el-* \
+	| mips64vr4300-* | mips64vr4300el-* \
+	| mips64vr5000-* | mips64vr5000el-* \
+	| mips64vr5900-* | mips64vr5900el-* \
+	| mipsisa32-* | mipsisa32el-* \
+	| mipsisa32r2-* | mipsisa32r2el-* \
+	| mipsisa64-* | mipsisa64el-* \
+	| mipsisa64r2-* | mipsisa64r2el-* \
+	| mipsisa64sb1-* | mipsisa64sb1el-* \
+	| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+	| mipstx39-* | mipstx39el-* \
+	| mmix-* \
+	| mt-* \
+	| msp430-* \
+	| nds32-* | nds32le-* | nds32be-* \
+	| nios-* | nios2-* \
+	| none-* | np1-* | ns16k-* | ns32k-* \
+	| open8-* \
+	| orion-* \
+	| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+	| pyramid-* \
+	| rl78-* | romp-* | rs6000-* | rx-* \
+	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+	| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+	| sparclite-* \
+	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+	| tahoe-* \
+	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+	| tile*-* \
+	| tron-* \
+	| ubicom32-* \
+	| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+	| vax-* \
+	| we32k-* \
+	| x86-* | x86_64-* | xc16x-* | xps100-* \
+	| xstormy16-* | xtensa*-* \
+	| ymp-* \
+	| z8k-* | z80-*)
+		;;
+	# Recognize the basic CPU types without company name, with glob match.
+	xtensa*)
+		basic_machine=$basic_machine-unknown
+		;;
+	# Recognize the various machine names and aliases which stand
+	# for a CPU type and a company and sometimes even an OS.
+	386bsd)
+		basic_machine=i386-unknown
+		os=-bsd
+		;;
+	3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+		basic_machine=m68000-att
+		;;
+	3b*)
+		basic_machine=we32k-att
+		;;
+	a29khif)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	abacus)
+		basic_machine=abacus-unknown
+		;;
+	adobe68k)
+		basic_machine=m68010-adobe
+		os=-scout
+		;;
+	alliant | fx80)
+		basic_machine=fx80-alliant
+		;;
+	altos | altos3068)
+		basic_machine=m68k-altos
+		;;
+	am29k)
+		basic_machine=a29k-none
+		os=-bsd
+		;;
+	amd64)
+		basic_machine=x86_64-pc
+		;;
+	amd64-*)
+		basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	amdahl)
+		basic_machine=580-amdahl
+		os=-sysv
+		;;
+	amiga | amiga-*)
+		basic_machine=m68k-unknown
+		;;
+	amigaos | amigados)
+		basic_machine=m68k-unknown
+		os=-amigaos
+		;;
+	amigaunix | amix)
+		basic_machine=m68k-unknown
+		os=-sysv4
+		;;
+	apollo68)
+		basic_machine=m68k-apollo
+		os=-sysv
+		;;
+	apollo68bsd)
+		basic_machine=m68k-apollo
+		os=-bsd
+		;;
+	aros)
+		basic_machine=i386-pc
+		os=-aros
+		;;
+	aux)
+		basic_machine=m68k-apple
+		os=-aux
+		;;
+	balance)
+		basic_machine=ns32k-sequent
+		os=-dynix
+		;;
+	blackfin)
+		basic_machine=bfin-unknown
+		os=-linux
+		;;
+	blackfin-*)
+		basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	bluegene*)
+		basic_machine=powerpc-ibm
+		os=-cnk
+		;;
+	c54x-*)
+		basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c55x-*)
+		basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c6x-*)
+		basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c90)
+		basic_machine=c90-cray
+		os=-unicos
+		;;
+	cegcc)
+		basic_machine=arm-unknown
+		os=-cegcc
+		;;
+	convex-c1)
+		basic_machine=c1-convex
+		os=-bsd
+		;;
+	convex-c2)
+		basic_machine=c2-convex
+		os=-bsd
+		;;
+	convex-c32)
+		basic_machine=c32-convex
+		os=-bsd
+		;;
+	convex-c34)
+		basic_machine=c34-convex
+		os=-bsd
+		;;
+	convex-c38)
+		basic_machine=c38-convex
+		os=-bsd
+		;;
+	cray | j90)
+		basic_machine=j90-cray
+		os=-unicos
+		;;
+	craynv)
+		basic_machine=craynv-cray
+		os=-unicosmp
+		;;
+	cr16 | cr16-*)
+		basic_machine=cr16-unknown
+		os=-elf
+		;;
+	crds | unos)
+		basic_machine=m68k-crds
+		;;
+	crisv32 | crisv32-* | etraxfs*)
+		basic_machine=crisv32-axis
+		;;
+	cris | cris-* | etrax*)
+		basic_machine=cris-axis
+		;;
+	crx)
+		basic_machine=crx-unknown
+		os=-elf
+		;;
+	da30 | da30-*)
+		basic_machine=m68k-da30
+		;;
+	decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+		basic_machine=mips-dec
+		;;
+	decsystem10* | dec10*)
+		basic_machine=pdp10-dec
+		os=-tops10
+		;;
+	decsystem20* | dec20*)
+		basic_machine=pdp10-dec
+		os=-tops20
+		;;
+	delta | 3300 | motorola-3300 | motorola-delta \
+	      | 3300-motorola | delta-motorola)
+		basic_machine=m68k-motorola
+		;;
+	delta88)
+		basic_machine=m88k-motorola
+		os=-sysv3
+		;;
+	dicos)
+		basic_machine=i686-pc
+		os=-dicos
+		;;
+	djgpp)
+		basic_machine=i586-pc
+		os=-msdosdjgpp
+		;;
+	dpx20 | dpx20-*)
+		basic_machine=rs6000-bull
+		os=-bosx
+		;;
+	dpx2* | dpx2*-bull)
+		basic_machine=m68k-bull
+		os=-sysv3
+		;;
+	ebmon29k)
+		basic_machine=a29k-amd
+		os=-ebmon
+		;;
+	elxsi)
+		basic_machine=elxsi-elxsi
+		os=-bsd
+		;;
+	encore | umax | mmax)
+		basic_machine=ns32k-encore
+		;;
+	es1800 | OSE68k | ose68k | ose | OSE)
+		basic_machine=m68k-ericsson
+		os=-ose
+		;;
+	fx2800)
+		basic_machine=i860-alliant
+		;;
+	genix)
+		basic_machine=ns32k-ns
+		;;
+	gmicro)
+		basic_machine=tron-gmicro
+		os=-sysv
+		;;
+	go32)
+		basic_machine=i386-pc
+		os=-go32
+		;;
+	h3050r* | hiux*)
+		basic_machine=hppa1.1-hitachi
+		os=-hiuxwe2
+		;;
+	h8300hms)
+		basic_machine=h8300-hitachi
+		os=-hms
+		;;
+	h8300xray)
+		basic_machine=h8300-hitachi
+		os=-xray
+		;;
+	h8500hms)
+		basic_machine=h8500-hitachi
+		os=-hms
+		;;
+	harris)
+		basic_machine=m88k-harris
+		os=-sysv3
+		;;
+	hp300-*)
+		basic_machine=m68k-hp
+		;;
+	hp300bsd)
+		basic_machine=m68k-hp
+		os=-bsd
+		;;
+	hp300hpux)
+		basic_machine=m68k-hp
+		os=-hpux
+		;;
+	hp3k9[0-9][0-9] | hp9[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hp9k2[0-9][0-9] | hp9k31[0-9])
+		basic_machine=m68000-hp
+		;;
+	hp9k3[2-9][0-9])
+		basic_machine=m68k-hp
+		;;
+	hp9k6[0-9][0-9] | hp6[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hp9k7[0-79][0-9] | hp7[0-79][0-9])
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k78[0-9] | hp78[0-9])
+		# FIXME: really hppa2.0-hp
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+		# FIXME: really hppa2.0-hp
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[0-9][13679] | hp8[0-9][13679])
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[0-9][0-9] | hp8[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hppa-next)
+		os=-nextstep3
+		;;
+	hppaosf)
+		basic_machine=hppa1.1-hp
+		os=-osf
+		;;
+	hppro)
+		basic_machine=hppa1.1-hp
+		os=-proelf
+		;;
+	i370-ibm* | ibm*)
+		basic_machine=i370-ibm
+		;;
+	i*86v32)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv32
+		;;
+	i*86v4*)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv4
+		;;
+	i*86v)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv
+		;;
+	i*86sol2)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-solaris2
+		;;
+	i386mach)
+		basic_machine=i386-mach
+		os=-mach
+		;;
+	i386-vsta | vsta)
+		basic_machine=i386-unknown
+		os=-vsta
+		;;
+	iris | iris4d)
+		basic_machine=mips-sgi
+		case $os in
+		    -irix*)
+			;;
+		    *)
+			os=-irix4
+			;;
+		esac
+		;;
+	isi68 | isi)
+		basic_machine=m68k-isi
+		os=-sysv
+		;;
+	m68knommu)
+		basic_machine=m68k-unknown
+		os=-linux
+		;;
+	m68knommu-*)
+		basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	m88k-omron*)
+		basic_machine=m88k-omron
+		;;
+	magnum | m3230)
+		basic_machine=mips-mips
+		os=-sysv
+		;;
+	merlin)
+		basic_machine=ns32k-utek
+		os=-sysv
+		;;
+	microblaze)
+		basic_machine=microblaze-xilinx
+		;;
+	mingw64)
+		basic_machine=x86_64-pc
+		os=-mingw64
+		;;
+	mingw32)
+		basic_machine=i386-pc
+		os=-mingw32
+		;;
+	mingw32ce)
+		basic_machine=arm-unknown
+		os=-mingw32ce
+		;;
+	miniframe)
+		basic_machine=m68000-convergent
+		;;
+	*mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+		basic_machine=m68k-atari
+		os=-mint
+		;;
+	mips3*-*)
+		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+		;;
+	mips3*)
+		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+		;;
+	monitor)
+		basic_machine=m68k-rom68k
+		os=-coff
+		;;
+	morphos)
+		basic_machine=powerpc-unknown
+		os=-morphos
+		;;
+	msdos)
+		basic_machine=i386-pc
+		os=-msdos
+		;;
+	ms1-*)
+		basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+		;;
+	msys)
+		basic_machine=i386-pc
+		os=-msys
+		;;
+	mvs)
+		basic_machine=i370-ibm
+		os=-mvs
+		;;
+	nacl)
+		basic_machine=le32-unknown
+		os=-nacl
+		;;
+	ncr3000)
+		basic_machine=i486-ncr
+		os=-sysv4
+		;;
+	netbsd386)
+		basic_machine=i386-unknown
+		os=-netbsd
+		;;
+	netwinder)
+		basic_machine=armv4l-rebel
+		os=-linux
+		;;
+	news | news700 | news800 | news900)
+		basic_machine=m68k-sony
+		os=-newsos
+		;;
+	news1000)
+		basic_machine=m68030-sony
+		os=-newsos
+		;;
+	news-3600 | risc-news)
+		basic_machine=mips-sony
+		os=-newsos
+		;;
+	necv70)
+		basic_machine=v70-nec
+		os=-sysv
+		;;
+	next | m*-next )
+		basic_machine=m68k-next
+		case $os in
+		    -nextstep* )
+			;;
+		    -ns2*)
+		      os=-nextstep2
+			;;
+		    *)
+		      os=-nextstep3
+			;;
+		esac
+		;;
+	nh3000)
+		basic_machine=m68k-harris
+		os=-cxux
+		;;
+	nh[45]000)
+		basic_machine=m88k-harris
+		os=-cxux
+		;;
+	nindy960)
+		basic_machine=i960-intel
+		os=-nindy
+		;;
+	mon960)
+		basic_machine=i960-intel
+		os=-mon960
+		;;
+	nonstopux)
+		basic_machine=mips-compaq
+		os=-nonstopux
+		;;
+	np1)
+		basic_machine=np1-gould
+		;;
+	neo-tandem)
+		basic_machine=neo-tandem
+		;;
+	nse-tandem)
+		basic_machine=nse-tandem
+		;;
+	nsr-tandem)
+		basic_machine=nsr-tandem
+		;;
+	op50n-* | op60c-*)
+		basic_machine=hppa1.1-oki
+		os=-proelf
+		;;
+	openrisc | openrisc-*)
+		basic_machine=or32-unknown
+		;;
+	os400)
+		basic_machine=powerpc-ibm
+		os=-os400
+		;;
+	OSE68000 | ose68000)
+		basic_machine=m68000-ericsson
+		os=-ose
+		;;
+	os68k)
+		basic_machine=m68k-none
+		os=-os68k
+		;;
+	pa-hitachi)
+		basic_machine=hppa1.1-hitachi
+		os=-hiuxwe2
+		;;
+	paragon)
+		basic_machine=i860-intel
+		os=-osf
+		;;
+	parisc)
+		basic_machine=hppa-unknown
+		os=-linux
+		;;
+	parisc-*)
+		basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	pbd)
+		basic_machine=sparc-tti
+		;;
+	pbb)
+		basic_machine=m68k-tti
+		;;
+	pc532 | pc532-*)
+		basic_machine=ns32k-pc532
+		;;
+	pc98)
+		basic_machine=i386-pc
+		;;
+	pc98-*)
+		basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentium | p5 | k5 | k6 | nexgen | viac3)
+		basic_machine=i586-pc
+		;;
+	pentiumpro | p6 | 6x86 | athlon | athlon_*)
+		basic_machine=i686-pc
+		;;
+	pentiumii | pentium2 | pentiumiii | pentium3)
+		basic_machine=i686-pc
+		;;
+	pentium4)
+		basic_machine=i786-pc
+		;;
+	pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+		basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentiumpro-* | p6-* | 6x86-* | athlon-*)
+		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentium4-*)
+		basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pn)
+		basic_machine=pn-gould
+		;;
+	power)	basic_machine=power-ibm
+		;;
+	ppc | ppcbe)	basic_machine=powerpc-unknown
+		;;
+	ppc-* | ppcbe-*)
+		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppcle | powerpclittle | ppc-le | powerpc-little)
+		basic_machine=powerpcle-unknown
+		;;
+	ppcle-* | powerpclittle-*)
+		basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppc64)	basic_machine=powerpc64-unknown
+		;;
+	ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+		basic_machine=powerpc64le-unknown
+		;;
+	ppc64le-* | powerpc64little-*)
+		basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ps2)
+		basic_machine=i386-ibm
+		;;
+	pw32)
+		basic_machine=i586-unknown
+		os=-pw32
+		;;
+	rdos)
+		basic_machine=i386-pc
+		os=-rdos
+		;;
+	rom68k)
+		basic_machine=m68k-rom68k
+		os=-coff
+		;;
+	rm[46]00)
+		basic_machine=mips-siemens
+		;;
+	rtpc | rtpc-*)
+		basic_machine=romp-ibm
+		;;
+	s390 | s390-*)
+		basic_machine=s390-ibm
+		;;
+	s390x | s390x-*)
+		basic_machine=s390x-ibm
+		;;
+	sa29200)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	sb1)
+		basic_machine=mipsisa64sb1-unknown
+		;;
+	sb1el)
+		basic_machine=mipsisa64sb1el-unknown
+		;;
+	sde)
+		basic_machine=mipsisa32-sde
+		os=-elf
+		;;
+	sei)
+		basic_machine=mips-sei
+		os=-seiux
+		;;
+	sequent)
+		basic_machine=i386-sequent
+		;;
+	sh)
+		basic_machine=sh-hitachi
+		os=-hms
+		;;
+	sh5el)
+		basic_machine=sh5le-unknown
+		;;
+	sh64)
+		basic_machine=sh64-unknown
+		;;
+	sparclite-wrs | simso-wrs)
+		basic_machine=sparclite-wrs
+		os=-vxworks
+		;;
+	sps7)
+		basic_machine=m68k-bull
+		os=-sysv2
+		;;
+	spur)
+		basic_machine=spur-unknown
+		;;
+	st2000)
+		basic_machine=m68k-tandem
+		;;
+	stratus)
+		basic_machine=i860-stratus
+		os=-sysv4
+		;;
+	strongarm-* | thumb-*)
+		basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	sun2)
+		basic_machine=m68000-sun
+		;;
+	sun2os3)
+		basic_machine=m68000-sun
+		os=-sunos3
+		;;
+	sun2os4)
+		basic_machine=m68000-sun
+		os=-sunos4
+		;;
+	sun3os3)
+		basic_machine=m68k-sun
+		os=-sunos3
+		;;
+	sun3os4)
+		basic_machine=m68k-sun
+		os=-sunos4
+		;;
+	sun4os3)
+		basic_machine=sparc-sun
+		os=-sunos3
+		;;
+	sun4os4)
+		basic_machine=sparc-sun
+		os=-sunos4
+		;;
+	sun4sol2)
+		basic_machine=sparc-sun
+		os=-solaris2
+		;;
+	sun3 | sun3-*)
+		basic_machine=m68k-sun
+		;;
+	sun4)
+		basic_machine=sparc-sun
+		;;
+	sun386 | sun386i | roadrunner)
+		basic_machine=i386-sun
+		;;
+	sv1)
+		basic_machine=sv1-cray
+		os=-unicos
+		;;
+	symmetry)
+		basic_machine=i386-sequent
+		os=-dynix
+		;;
+	t3e)
+		basic_machine=alphaev5-cray
+		os=-unicos
+		;;
+	t90)
+		basic_machine=t90-cray
+		os=-unicos
+		;;
+	tile*)
+		basic_machine=$basic_machine-unknown
+		os=-linux-gnu
+		;;
+	tx39)
+		basic_machine=mipstx39-unknown
+		;;
+	tx39el)
+		basic_machine=mipstx39el-unknown
+		;;
+	toad1)
+		basic_machine=pdp10-xkl
+		os=-tops20
+		;;
+	tower | tower-32)
+		basic_machine=m68k-ncr
+		;;
+	tpf)
+		basic_machine=s390x-ibm
+		os=-tpf
+		;;
+	udi29k)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	ultra3)
+		basic_machine=a29k-nyu
+		os=-sym1
+		;;
+	v810 | necv810)
+		basic_machine=v810-nec
+		os=-none
+		;;
+	vaxv)
+		basic_machine=vax-dec
+		os=-sysv
+		;;
+	vms)
+		basic_machine=vax-dec
+		os=-vms
+		;;
+	vpp*|vx|vx-*)
+		basic_machine=f301-fujitsu
+		;;
+	vxworks960)
+		basic_machine=i960-wrs
+		os=-vxworks
+		;;
+	vxworks68)
+		basic_machine=m68k-wrs
+		os=-vxworks
+		;;
+	vxworks29k)
+		basic_machine=a29k-wrs
+		os=-vxworks
+		;;
+	w65*)
+		basic_machine=w65-wdc
+		os=-none
+		;;
+	w89k-*)
+		basic_machine=hppa1.1-winbond
+		os=-proelf
+		;;
+	xbox)
+		basic_machine=i686-pc
+		os=-mingw32
+		;;
+	xps | xps100)
+		basic_machine=xps100-honeywell
+		;;
+	xscale-* | xscalee[bl]-*)
+		basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+		;;
+	ymp)
+		basic_machine=ymp-cray
+		os=-unicos
+		;;
+	z8k-*-coff)
+		basic_machine=z8k-unknown
+		os=-sim
+		;;
+	z80-*-coff)
+		basic_machine=z80-unknown
+		os=-sim
+		;;
+	none)
+		basic_machine=none-none
+		os=-none
+		;;
+
+# Here we handle the default manufacturer of certain CPU types.  It is in
+# some cases the only manufacturer, in others, it is the most popular.
+	w89k)
+		basic_machine=hppa1.1-winbond
+		;;
+	op50n)
+		basic_machine=hppa1.1-oki
+		;;
+	op60c)
+		basic_machine=hppa1.1-oki
+		;;
+	romp)
+		basic_machine=romp-ibm
+		;;
+	mmix)
+		basic_machine=mmix-knuth
+		;;
+	rs6000)
+		basic_machine=rs6000-ibm
+		;;
+	vax)
+		basic_machine=vax-dec
+		;;
+	pdp10)
+		# there are many clones, so DEC is not a safe bet
+		basic_machine=pdp10-unknown
+		;;
+	pdp11)
+		basic_machine=pdp11-dec
+		;;
+	we32k)
+		basic_machine=we32k-att
+		;;
+	sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+		basic_machine=sh-unknown
+		;;
+	sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+		basic_machine=sparc-sun
+		;;
+	cydra)
+		basic_machine=cydra-cydrome
+		;;
+	orion)
+		basic_machine=orion-highlevel
+		;;
+	orion105)
+		basic_machine=clipper-highlevel
+		;;
+	mac | mpw | mac-mpw)
+		basic_machine=m68k-apple
+		;;
+	pmac | pmac-mpw)
+		basic_machine=powerpc-apple
+		;;
+	*-unknown)
+		# Make sure to match an already-canonicalized machine name.
+		;;
+	*)
+		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		exit 1
+		;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+	*-digital*)
+		basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+		;;
+	*-commodore*)
+		basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+		;;
+	*)
+		;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+	# First match some system type aliases
+	# that might get confused with valid system types.
+	# -solaris* is a basic system type, with this one exception.
+	-auroraux)
+		os=-auroraux
+		;;
+	-solaris1 | -solaris1.*)
+		os=`echo $os | sed -e 's|solaris1|sunos4|'`
+		;;
+	-solaris)
+		os=-solaris2
+		;;
+	-svr4*)
+		os=-sysv4
+		;;
+	-unixware*)
+		os=-sysv4.2uw
+		;;
+	-gnu/linux*)
+		os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+		;;
+	# First accept the basic system types.
+	# The portable systems comes first.
+	# Each alternative MUST END IN A *, to match a version number.
+	# -sysv* is not here because it comes later, after sysvr4.
+	-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+	      | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+	      | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+	      | -sym* | -kopensolaris* \
+	      | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+	      | -aos* | -aros* \
+	      | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+	      | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+	      | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+	      | -bitrig* | -openbsd* | -solidbsd* \
+	      | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+	      | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+	      | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+	      | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+	      | -chorusos* | -chorusrdb* | -cegcc* \
+	      | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+	      | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+	      | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+	      | -uxpv* | -beos* | -mpeix* | -udk* \
+	      | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+	      | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+	# Remember, each alternative MUST END IN *, to match a version number.
+		;;
+	-qnx*)
+		case $basic_machine in
+		    x86-* | i*86-*)
+			;;
+		    *)
+			os=-nto$os
+			;;
+		esac
+		;;
+	-nto-qnx*)
+		;;
+	-nto*)
+		os=`echo $os | sed -e 's|nto|nto-qnx|'`
+		;;
+	-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+	      | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+		;;
+	-mac*)
+		os=`echo $os | sed -e 's|mac|macos|'`
+		;;
+	-linux-dietlibc)
+		os=-linux-dietlibc
+		;;
+	-linux*)
+		os=`echo $os | sed -e 's|linux|linux-gnu|'`
+		;;
+	-sunos5*)
+		os=`echo $os | sed -e 's|sunos5|solaris2|'`
+		;;
+	-sunos6*)
+		os=`echo $os | sed -e 's|sunos6|solaris3|'`
+		;;
+	-opened*)
+		os=-openedition
+		;;
+	-os400*)
+		os=-os400
+		;;
+	-wince*)
+		os=-wince
+		;;
+	-osfrose*)
+		os=-osfrose
+		;;
+	-osf*)
+		os=-osf
+		;;
+	-utek*)
+		os=-bsd
+		;;
+	-dynix*)
+		os=-bsd
+		;;
+	-acis*)
+		os=-aos
+		;;
+	-atheos*)
+		os=-atheos
+		;;
+	-syllable*)
+		os=-syllable
+		;;
+	-386bsd)
+		os=-bsd
+		;;
+	-ctix* | -uts*)
+		os=-sysv
+		;;
+	-nova*)
+		os=-rtmk-nova
+		;;
+	-ns2 )
+		os=-nextstep2
+		;;
+	-nsk*)
+		os=-nsk
+		;;
+	# Preserve the version number of sinix5.
+	-sinix5.*)
+		os=`echo $os | sed -e 's|sinix|sysv|'`
+		;;
+	-sinix*)
+		os=-sysv4
+		;;
+	-tpf*)
+		os=-tpf
+		;;
+	-triton*)
+		os=-sysv3
+		;;
+	-oss*)
+		os=-sysv3
+		;;
+	-svr4)
+		os=-sysv4
+		;;
+	-svr3)
+		os=-sysv3
+		;;
+	-sysvr4)
+		os=-sysv4
+		;;
+	# This must come after -sysvr4.
+	-sysv*)
+		;;
+	-ose*)
+		os=-ose
+		;;
+	-es1800*)
+		os=-ose
+		;;
+	-xenix)
+		os=-xenix
+		;;
+	-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+		os=-mint
+		;;
+	-aros*)
+		os=-aros
+		;;
+	-kaos*)
+		os=-kaos
+		;;
+	-zvmoe)
+		os=-zvmoe
+		;;
+	-dicos*)
+		os=-dicos
+		;;
+	-nacl*)
+		;;
+	-none)
+		;;
+	*)
+		# Get rid of the `-' at the beginning of $os.
+		os=`echo $os | sed 's/[^-]*-//'`
+		echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+		exit 1
+		;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system.  Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+	score-*)
+		os=-elf
+		;;
+	spu-*)
+		os=-elf
+		;;
+	*-acorn)
+		os=-riscix1.2
+		;;
+	arm*-rebel)
+		os=-linux
+		;;
+	arm*-semi)
+		os=-aout
+		;;
+	c4x-* | tic4x-*)
+		os=-coff
+		;;
+	hexagon-*)
+		os=-elf
+		;;
+	tic54x-*)
+		os=-coff
+		;;
+	tic55x-*)
+		os=-coff
+		;;
+	tic6x-*)
+		os=-coff
+		;;
+	# This must come before the *-dec entry.
+	pdp10-*)
+		os=-tops20
+		;;
+	pdp11-*)
+		os=-none
+		;;
+	*-dec | vax-*)
+		os=-ultrix4.2
+		;;
+	m68*-apollo)
+		os=-domain
+		;;
+	i386-sun)
+		os=-sunos4.0.2
+		;;
+	m68000-sun)
+		os=-sunos3
+		;;
+	m68*-cisco)
+		os=-aout
+		;;
+	mep-*)
+		os=-elf
+		;;
+	mips*-cisco)
+		os=-elf
+		;;
+	mips*-*)
+		os=-elf
+		;;
+	or32-*)
+		os=-coff
+		;;
+	*-tti)	# must be before sparc entry or we get the wrong os.
+		os=-sysv3
+		;;
+	sparc-* | *-sun)
+		os=-sunos4.1.1
+		;;
+	*-be)
+		os=-beos
+		;;
+	*-haiku)
+		os=-haiku
+		;;
+	*-ibm)
+		os=-aix
+		;;
+	*-knuth)
+		os=-mmixware
+		;;
+	*-wec)
+		os=-proelf
+		;;
+	*-winbond)
+		os=-proelf
+		;;
+	*-oki)
+		os=-proelf
+		;;
+	*-hp)
+		os=-hpux
+		;;
+	*-hitachi)
+		os=-hiux
+		;;
+	i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+		os=-sysv
+		;;
+	*-cbm)
+		os=-amigaos
+		;;
+	*-dg)
+		os=-dgux
+		;;
+	*-dolphin)
+		os=-sysv3
+		;;
+	m68k-ccur)
+		os=-rtu
+		;;
+	m88k-omron*)
+		os=-luna
+		;;
+	*-next )
+		os=-nextstep
+		;;
+	*-sequent)
+		os=-ptx
+		;;
+	*-crds)
+		os=-unos
+		;;
+	*-ns)
+		os=-genix
+		;;
+	i370-*)
+		os=-mvs
+		;;
+	*-next)
+		os=-nextstep3
+		;;
+	*-gould)
+		os=-sysv
+		;;
+	*-highlevel)
+		os=-bsd
+		;;
+	*-encore)
+		os=-bsd
+		;;
+	*-sgi)
+		os=-irix
+		;;
+	*-siemens)
+		os=-sysv4
+		;;
+	*-masscomp)
+		os=-rtu
+		;;
+	f30[01]-fujitsu | f700-fujitsu)
+		os=-uxpv
+		;;
+	*-rom68k)
+		os=-coff
+		;;
+	*-*bug)
+		os=-coff
+		;;
+	*-apple)
+		os=-macos
+		;;
+	*-atari*)
+		os=-mint
+		;;
+	*)
+		os=-none
+		;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer.  We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+	*-unknown)
+		case $os in
+			-riscix*)
+				vendor=acorn
+				;;
+			-sunos*)
+				vendor=sun
+				;;
+			-cnk*|-aix*)
+				vendor=ibm
+				;;
+			-beos*)
+				vendor=be
+				;;
+			-hpux*)
+				vendor=hp
+				;;
+			-mpeix*)
+				vendor=hp
+				;;
+			-hiux*)
+				vendor=hitachi
+				;;
+			-unos*)
+				vendor=crds
+				;;
+			-dgux*)
+				vendor=dg
+				;;
+			-luna*)
+				vendor=omron
+				;;
+			-genix*)
+				vendor=ns
+				;;
+			-mvs* | -opened*)
+				vendor=ibm
+				;;
+			-os400*)
+				vendor=ibm
+				;;
+			-ptx*)
+				vendor=sequent
+				;;
+			-tpf*)
+				vendor=ibm
+				;;
+			-vxsim* | -vxworks* | -windiss*)
+				vendor=wrs
+				;;
+			-aux*)
+				vendor=apple
+				;;
+			-hms*)
+				vendor=hitachi
+				;;
+			-mpw* | -macos*)
+				vendor=apple
+				;;
+			-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+				vendor=atari
+				;;
+			-vos*)
+				vendor=stratus
+				;;
+		esac
+		basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+		;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/config/install-sh b/config/install-sh
new file mode 100755
index 000000000000..377bb8687ffe
--- /dev/null
+++ b/config/install-sh
@@ -0,0 +1,527 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2011-11-20.07; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# 'make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+nl='
+'
+IFS=" ""	$nl"
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit=${DOITPROG-}
+if test -z "$doit"; then
+  doit_exec=exec
+else
+  doit_exec=$doit
+fi
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_glob='?'
+initialize_posix_glob='
+  test "$posix_glob" != "?" || {
+    if (set -f) 2>/dev/null; then
+      posix_glob=
+    else
+      posix_glob=:
+    fi
+  }
+'
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+no_target_directory=
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+   or: $0 [OPTION]... SRCFILES... DIRECTORY
+   or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+   or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+     --help     display this help and exit.
+     --version  display version info and exit.
+
+  -c            (ignored)
+  -C            install only if different (preserve the last data modification time)
+  -d            create directories instead of installing files.
+  -g GROUP      $chgrpprog installed files to GROUP.
+  -m MODE       $chmodprog installed files to MODE.
+  -o USER       $chownprog installed files to USER.
+  -s            $stripprog installed files.
+  -t DIRECTORY  install into DIRECTORY.
+  -T            report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+  CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+  RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+  case $1 in
+    -c) ;;
+
+    -C) copy_on_change=true;;
+
+    -d) dir_arg=true;;
+
+    -g) chgrpcmd="$chgrpprog $2"
+	shift;;
+
+    --help) echo "$usage"; exit $?;;
+
+    -m) mode=$2
+	case $mode in
+	  *' '* | *'	'* | *'
+'*	  | *'*'* | *'?'* | *'['*)
+	    echo "$0: invalid mode: $mode" >&2
+	    exit 1;;
+	esac
+	shift;;
+
+    -o) chowncmd="$chownprog $2"
+	shift;;
+
+    -s) stripcmd=$stripprog;;
+
+    -t) dst_arg=$2
+	# Protect names problematic for 'test' and other utilities.
+	case $dst_arg in
+	  -* | [=\(\)!]) dst_arg=./$dst_arg;;
+	esac
+	shift;;
+
+    -T) no_target_directory=true;;
+
+    --version) echo "$0 $scriptversion"; exit $?;;
+
+    --)	shift
+	break;;
+
+    -*)	echo "$0: invalid option: $1" >&2
+	exit 1;;
+
+    *)  break;;
+  esac
+  shift
+done
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+  # When -d is used, all remaining arguments are directories to create.
+  # When -t is used, the destination is already specified.
+  # Otherwise, the last argument is the destination.  Remove it from $@.
+  for arg
+  do
+    if test -n "$dst_arg"; then
+      # $@ is not empty: it contains at least $arg.
+      set fnord "$@" "$dst_arg"
+      shift # fnord
+    fi
+    shift # arg
+    dst_arg=$arg
+    # Protect names problematic for 'test' and other utilities.
+    case $dst_arg in
+      -* | [=\(\)!]) dst_arg=./$dst_arg;;
+    esac
+  done
+fi
+
+if test $# -eq 0; then
+  if test -z "$dir_arg"; then
+    echo "$0: no input file specified." >&2
+    exit 1
+  fi
+  # It's OK to call 'install-sh -d' without argument.
+  # This can happen when creating conditional directories.
+  exit 0
+fi
+
+if test -z "$dir_arg"; then
+  do_exit='(exit $ret); exit $ret'
+  trap "ret=129; $do_exit" 1
+  trap "ret=130; $do_exit" 2
+  trap "ret=141; $do_exit" 13
+  trap "ret=143; $do_exit" 15
+
+  # Set umask so as not to create temps with too-generous modes.
+  # However, 'strip' requires both read and write access to temps.
+  case $mode in
+    # Optimize common cases.
+    *644) cp_umask=133;;
+    *755) cp_umask=22;;
+
+    *[0-7])
+      if test -z "$stripcmd"; then
+	u_plus_rw=
+      else
+	u_plus_rw='% 200'
+      fi
+      cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+    *)
+      if test -z "$stripcmd"; then
+	u_plus_rw=
+      else
+	u_plus_rw=,u+rw
+      fi
+      cp_umask=$mode$u_plus_rw;;
+  esac
+fi
+
+for src
+do
+  # Protect names problematic for 'test' and other utilities.
+  case $src in
+    -* | [=\(\)!]) src=./$src;;
+  esac
+
+  if test -n "$dir_arg"; then
+    dst=$src
+    dstdir=$dst
+    test -d "$dstdir"
+    dstdir_status=$?
+  else
+
+    # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+    # might cause directories to be created, which would be especially bad
+    # if $src (and thus $dsttmp) contains '*'.
+    if test ! -f "$src" && test ! -d "$src"; then
+      echo "$0: $src does not exist." >&2
+      exit 1
+    fi
+
+    if test -z "$dst_arg"; then
+      echo "$0: no destination specified." >&2
+      exit 1
+    fi
+    dst=$dst_arg
+
+    # If destination is a directory, append the input filename; won't work
+    # if double slashes aren't ignored.
+    if test -d "$dst"; then
+      if test -n "$no_target_directory"; then
+	echo "$0: $dst_arg: Is a directory" >&2
+	exit 1
+      fi
+      dstdir=$dst
+      dst=$dstdir/`basename "$src"`
+      dstdir_status=0
+    else
+      # Prefer dirname, but fall back on a substitute if dirname fails.
+      dstdir=`
+	(dirname "$dst") 2>/dev/null ||
+	expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	     X"$dst" : 'X\(//\)[^/]' \| \
+	     X"$dst" : 'X\(//\)$' \| \
+	     X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
+	echo X"$dst" |
+	    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+		   s//\1/
+		   q
+		 }
+		 /^X\(\/\/\)[^/].*/{
+		   s//\1/
+		   q
+		 }
+		 /^X\(\/\/\)$/{
+		   s//\1/
+		   q
+		 }
+		 /^X\(\/\).*/{
+		   s//\1/
+		   q
+		 }
+		 s/.*/./; q'
+      `
+
+      test -d "$dstdir"
+      dstdir_status=$?
+    fi
+  fi
+
+  obsolete_mkdir_used=false
+
+  if test $dstdir_status != 0; then
+    case $posix_mkdir in
+      '')
+	# Create intermediate dirs using mode 755 as modified by the umask.
+	# This is like FreeBSD 'install' as of 1997-10-28.
+	umask=`umask`
+	case $stripcmd.$umask in
+	  # Optimize common cases.
+	  *[2367][2367]) mkdir_umask=$umask;;
+	  .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+	  *[0-7])
+	    mkdir_umask=`expr $umask + 22 \
+	      - $umask % 100 % 40 + $umask % 20 \
+	      - $umask % 10 % 4 + $umask % 2
+	    `;;
+	  *) mkdir_umask=$umask,go-w;;
+	esac
+
+	# With -d, create the new directory with the user-specified mode.
+	# Otherwise, rely on $mkdir_umask.
+	if test -n "$dir_arg"; then
+	  mkdir_mode=-m$mode
+	else
+	  mkdir_mode=
+	fi
+
+	posix_mkdir=false
+	case $umask in
+	  *[123567][0-7][0-7])
+	    # POSIX mkdir -p sets u+wx bits regardless of umask, which
+	    # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+	    ;;
+	  *)
+	    tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+	    trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+	    if (umask $mkdir_umask &&
+		exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+	    then
+	      if test -z "$dir_arg" || {
+		   # Check for POSIX incompatibilities with -m.
+		   # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+		   # other-writable bit of parent directory when it shouldn't.
+		   # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+		   ls_ld_tmpdir=`ls -ld "$tmpdir"`
+		   case $ls_ld_tmpdir in
+		     d????-?r-*) different_mode=700;;
+		     d????-?--*) different_mode=755;;
+		     *) false;;
+		   esac &&
+		   $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+		     ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+		     test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+		   }
+		 }
+	      then posix_mkdir=:
+	      fi
+	      rmdir "$tmpdir/d" "$tmpdir"
+	    else
+	      # Remove any dirs left behind by ancient mkdir implementations.
+	      rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+	    fi
+	    trap '' 0;;
+	esac;;
+    esac
+
+    if
+      $posix_mkdir && (
+	umask $mkdir_umask &&
+	$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+      )
+    then :
+    else
+
+      # The umask is ridiculous, or mkdir does not conform to POSIX,
+      # or it failed possibly due to a race condition.  Create the
+      # directory the slow way, step by step, checking for races as we go.
+
+      case $dstdir in
+	/*) prefix='/';;
+	[-=\(\)!]*) prefix='./';;
+	*)  prefix='';;
+      esac
+
+      eval "$initialize_posix_glob"
+
+      oIFS=$IFS
+      IFS=/
+      $posix_glob set -f
+      set fnord $dstdir
+      shift
+      $posix_glob set +f
+      IFS=$oIFS
+
+      prefixes=
+
+      for d
+      do
+	test X"$d" = X && continue
+
+	prefix=$prefix$d
+	if test -d "$prefix"; then
+	  prefixes=
+	else
+	  if $posix_mkdir; then
+	    (umask=$mkdir_umask &&
+	     $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+	    # Don't fail if two instances are running concurrently.
+	    test -d "$prefix" || exit 1
+	  else
+	    case $prefix in
+	      *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+	      *) qprefix=$prefix;;
+	    esac
+	    prefixes="$prefixes '$qprefix'"
+	  fi
+	fi
+	prefix=$prefix/
+      done
+
+      if test -n "$prefixes"; then
+	# Don't fail if two instances are running concurrently.
+	(umask $mkdir_umask &&
+	 eval "\$doit_exec \$mkdirprog $prefixes") ||
+	  test -d "$dstdir" || exit 1
+	obsolete_mkdir_used=true
+      fi
+    fi
+  fi
+
+  if test -n "$dir_arg"; then
+    { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+    { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+    { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+      test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+  else
+
+    # Make a couple of temp file names in the proper directory.
+    dsttmp=$dstdir/_inst.$$_
+    rmtmp=$dstdir/_rm.$$_
+
+    # Trap to clean up those temp files at exit.
+    trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+    # Copy the file name to the temp name.
+    (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+    # and set any options; do chmod last to preserve setuid bits.
+    #
+    # If any of these fail, we abort the whole thing.  If we want to
+    # ignore errors from any of these, just make sure not to ignore
+    # errors from the above "$doit $cpprog $src $dsttmp" command.
+    #
+    { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+    { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+    { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+    { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+    # If -C, don't bother to copy if it wouldn't change the file.
+    if $copy_on_change &&
+       old=`LC_ALL=C ls -dlL "$dst"	2>/dev/null` &&
+       new=`LC_ALL=C ls -dlL "$dsttmp"	2>/dev/null` &&
+
+       eval "$initialize_posix_glob" &&
+       $posix_glob set -f &&
+       set X $old && old=:$2:$4:$5:$6 &&
+       set X $new && new=:$2:$4:$5:$6 &&
+       $posix_glob set +f &&
+
+       test "$old" = "$new" &&
+       $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+    then
+      rm -f "$dsttmp"
+    else
+      # Rename the file to the real destination.
+      $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+      # The rename failed, perhaps because mv can't rename something else
+      # to itself, or perhaps because mv is so ancient that it does not
+      # support -f.
+      {
+	# Now remove or move aside any old file at destination location.
+	# We try this two ways since rm can't unlink itself on some
+	# systems and the destination file might be busy for other
+	# reasons.  In this case, the final cleanup might fail but the new
+	# file should still install successfully.
+	{
+	  test ! -f "$dst" ||
+	  $doit $rmcmd -f "$dst" 2>/dev/null ||
+	  { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+	    { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+	  } ||
+	  { echo "$0: cannot unlink or rename $dst" >&2
+	    (exit 1); exit 1
+	  }
+	} &&
+
+	# Now rename the file to the real destination.
+	$doit $mvcmd "$dsttmp" "$dst"
+      }
+    fi || exit 1
+
+    trap '' 0
+  fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 000000000000..00c1d495d445
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,329 @@
+AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./version)$VERSION_SUFFIX"]))
+AC_CONFIG_SRCDIR(README)
+AC_CONFIG_AUX_DIR(config)
+
+AC_PROG_SED
+
+# Construct a Nix system name (like "i686-linux").
+AC_CANONICAL_HOST
+AC_MSG_CHECKING([for the canonical Nix system name])
+
+AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
+  [Platform identifier (e.g., `i686-linux').]),
+  [system=$withval],
+  [case "$host_cpu" in
+     i*86)
+        machine_name="i686";;
+     amd64)
+        machine_name="x86_64";;
+     *)
+        machine_name="$host_cpu";;
+   esac
+
+   case "$host_os" in
+     linux-gnu*)
+        # For backward compatibility, strip the `-gnu' part.
+        system="$machine_name-linux";;
+     *)
+        # Strip the version number from names such as `gnu0.3',
+        # `darwin10.2.0', etc.
+        system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
+   esac])
+
+sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
+
+case $sys_name in
+    cygwin*)
+        sys_name=cygwin
+        ;;
+esac
+
+AC_MSG_RESULT($system)
+AC_SUBST(system)
+AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
+
+
+# State should be stored in /nix/var, unless the user overrides it explicitly.
+test "$localstatedir" = '${prefix}/var' && localstatedir=/nix/var
+
+
+# Solaris-specific stuff.
+if test "$sys_name" = sunos; then
+    # Solaris requires -lsocket -lnsl for network functions
+    LIBS="-lsocket -lnsl $LIBS"
+fi
+
+
+CFLAGS=${CFLAGS:--g -O3 -Wall}
+CXXFLAGS=${CXXFLAGS:--g -O3 -Wall}
+AC_PROG_CC
+AC_PROG_CXX
+
+
+# Use 64-bit file system calls so that we can support files > 2 GiB.
+AC_SYS_LARGEFILE
+
+
+# Check for pubsetbuf.
+AC_MSG_CHECKING([for pubsetbuf])
+AC_LANG_PUSH(C++)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <iostream>
+using namespace std;
+static char buf[1024];]],
+    [[cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));]])],
+    [AC_MSG_RESULT(yes) AC_DEFINE(HAVE_PUBSETBUF, 1, [Whether pubsetbuf is available.])],
+    AC_MSG_RESULT(no))
+AC_LANG_POP(C++)
+
+
+# Check for chroot support (requires chroot() and bind mounts).
+AC_CHECK_FUNCS([chroot])
+AC_CHECK_FUNCS([unshare])
+AC_CHECK_FUNCS([statvfs])
+AC_CHECK_HEADERS([sched.h])
+AC_CHECK_HEADERS([sys/param.h])
+AC_CHECK_HEADERS([sys/mount.h], [], [],
+[#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+# endif
+])
+
+
+# Check for lutimes, optionally used for changing the mtime of
+# symlinks.
+AC_CHECK_FUNCS([lutimes])
+
+
+# Check for sched_setaffinity.
+AC_CHECK_FUNCS([sched_setaffinity])
+
+
+# Check whether the store optimiser can optimise symlinks.
+AC_MSG_CHECKING([whether it is possible to create a link to a symlink])
+ln -s bla tmp_link
+if ln tmp_link tmp_link2 2> /dev/null; then
+    AC_MSG_RESULT(yes)
+    AC_DEFINE(CAN_LINK_SYMLINK, 1, [Whether link() works on symlinks.])
+else
+    AC_MSG_RESULT(no)
+fi
+rm -f tmp_link tmp_link2
+
+
+# Check for <locale>.
+AC_LANG_PUSH(C++)
+AC_CHECK_HEADERS([locale])
+AC_LANG_POP(C++)
+
+
+# Check for <err.h>.
+AC_CHECK_HEADER([err.h], [], [bsddiff_compat_include="-Icompat-include"])
+AC_SUBST([bsddiff_compat_include])
+
+
+# Check whether we have the personality() syscall, which allows us to
+# do i686-linux builds on x86_64-linux machines.
+AC_CHECK_HEADERS([sys/personality.h])
+
+
+# Check for <linux/fs.h> (for immutable file support).
+AC_CHECK_HEADERS([linux/fs.h])
+
+
+AC_DEFUN([NEED_PROG],
+[
+AC_PATH_PROG($1, $2)
+if test -z "$$1"; then
+    AC_MSG_ERROR([$2 is required])
+fi
+])
+
+NEED_PROG(curl, curl)
+NEED_PROG(bash, bash)
+NEED_PROG(patch, patch)
+AC_PATH_PROG(xmllint, xmllint, false)
+AC_PATH_PROG(xsltproc, xsltproc, false)
+AC_PATH_PROG(w3m, w3m, false)
+AC_PATH_PROG(flex, flex, false)
+AC_PATH_PROG(bison, bison, false)
+NEED_PROG(perl, perl)
+NEED_PROG(sed, sed)
+NEED_PROG(tar, tar)
+NEED_PROG(bzip2, bzip2)
+NEED_PROG(gzip, gzip)
+NEED_PROG(xz, xz)
+AC_PATH_PROG(dot, dot)
+AC_PATH_PROG(dblatex, dblatex)
+AC_PATH_PROG(pv, pv, pv)
+
+
+# Test that Perl has the open/fork feature (Perl 5.8.0 and beyond).
+AC_MSG_CHECKING([whether Perl is recent enough])
+if ! $perl -e 'open(FOO, "-|", "true"); while (<FOO>) { print; }; close FOO or die;'; then
+    AC_MSG_RESULT(no)
+    AC_MSG_ERROR([Your Perl version is too old.  Nix requires Perl 5.8.0 or newer.])
+fi
+AC_MSG_RESULT(yes)
+
+
+# Figure out where to install Perl modules.
+AC_MSG_CHECKING([for the Perl installation prefix])
+perlversion=$($perl -e 'use Config; print $Config{version};')
+perlarchname=$($perl -e 'use Config; print $Config{archname};')
+AC_SUBST(perllibdir, [${libdir}/perl5/site_perl/$perlversion/$perlarchname])
+AC_MSG_RESULT($perllibdir)
+
+
+NEED_PROG(cat, cat)
+NEED_PROG(tr, tr)
+AC_ARG_WITH(coreutils-bin, AC_HELP_STRING([--with-coreutils-bin=PATH],
+  [path of cat, mkdir, etc.]),
+  coreutils=$withval, coreutils=$(dirname $cat))
+AC_SUBST(coreutils)
+
+
+AC_ARG_WITH(docbook-rng, AC_HELP_STRING([--with-docbook-rng=PATH],
+  [path of the DocBook RelaxNG schema]),
+  docbookrng=$withval, docbookrng=/docbook-rng-missing)
+AC_SUBST(docbookrng)
+
+AC_ARG_WITH(docbook-xsl, AC_HELP_STRING([--with-docbook-xsl=PATH],
+  [path of the DocBook XSL stylesheets]),
+  docbookxsl=$withval, docbookxsl=/docbook-xsl-missing)
+AC_SUBST(docbookxsl)
+
+
+AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
+  [path of the Nix store (defaults to /nix/store)]),
+  storedir=$withval, storedir='/nix/store')
+AC_SUBST(storedir)
+
+
+# Look for OpenSSL, an optional dependency.
+AC_PATH_PROG(openssl, openssl, openssl) # if not found, call openssl in $PATH
+AC_SUBST(openssl)
+AC_DEFINE_UNQUOTED(OPENSSL_PATH, ["$openssl"], [Path of the OpenSSL binary])
+
+PKG_CHECK_MODULES([OPENSSL], [libcrypto],
+  [AC_DEFINE([HAVE_OPENSSL], [1], [Whether to use OpenSSL.])
+   CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"
+   have_openssl=1], [have_openssl=])
+AC_SUBST(HAVE_OPENSSL, [$have_openssl])
+
+
+# Look for libbz2, a required dependency.
+AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true],
+  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See http://www.bzip.org/.])])
+AC_CHECK_HEADERS([bzlib.h], [true],
+  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See http://www.bzip.org/.])])
+
+
+# Look for SQLite, a required dependency.
+PKG_CHECK_MODULES([SQLITE3], [sqlite3 >= 3.6.19], [CXXFLAGS="$SQLITE3_CFLAGS $CXXFLAGS"])
+
+
+# Whether to use the Boehm garbage collector.
+AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
+  [enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=no]]),
+  gc=$enableval, gc=no)
+if test "$gc" = yes; then
+  PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
+  CXXFLAGS="$BDW_GC_CFLAGS $CXXFLAGS"
+  AC_DEFINE(HAVE_BOEHMGC, 1, [Whether to use the Boehm garbage collector.])
+fi
+
+
+# Check for the required Perl dependencies (DBI, DBD::SQLite and WWW::Curl).
+perlFlags="-I$perllibdir"
+
+AC_ARG_WITH(dbi, AC_HELP_STRING([--with-dbi=PATH],
+  [prefix of the Perl DBI library]),
+  perlFlags="$perlFlags -I$withval")
+
+AC_ARG_WITH(dbd-sqlite, AC_HELP_STRING([--with-dbd-sqlite=PATH],
+  [prefix of the Perl DBD::SQLite library]),
+  perlFlags="$perlFlags -I$withval")
+
+AC_ARG_WITH(www-curl, AC_HELP_STRING([--with-www-curl=PATH],
+  [prefix of the Perl WWW::Curl library]),
+  perlFlags="$perlFlags -I$withval")
+
+AC_MSG_CHECKING([whether DBD::SQLite works])
+if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then
+    AC_MSG_RESULT(no)
+    AC_MSG_FAILURE([The Perl modules DBI and/or DBD::SQLite are missing.])
+fi
+AC_MSG_RESULT(yes)
+
+AC_MSG_CHECKING([whether WWW::Curl works])
+if ! $perl $perlFlags -e 'use WWW::Curl;' 2>&5; then
+    AC_MSG_RESULT(no)
+    AC_MSG_FAILURE([The Perl module WWW::Curl is missing.])
+fi
+AC_MSG_RESULT(yes)
+
+AC_SUBST(perlFlags)
+
+
+# Whether to build the Perl bindings
+AC_MSG_CHECKING([whether to build the Perl bindings])
+AC_ARG_ENABLE(perl-bindings, AC_HELP_STRING([--enable-perl-bindings],
+  [whether to build the Perl bindings (recommended) [default=yes]]),
+  perlbindings=$enableval, perlbindings=yes)
+if test "$enable_shared" = no; then
+   # Perl bindings require shared libraries.
+   perlbindings=no
+fi
+AC_SUBST(perlbindings)
+AC_MSG_RESULT($perlbindings)
+
+
+AC_ARG_ENABLE(init-state, AC_HELP_STRING([--disable-init-state],
+  [do not initialise DB etc. in `make install']),
+  init_state=$enableval, init_state=yes)
+#AM_CONDITIONAL(INIT_STATE, test "$init_state" = "yes")
+
+
+# Setuid installations.
+AC_CHECK_FUNCS([setresuid setreuid lchown])
+
+
+# Nice to have, but not essential.
+AC_CHECK_FUNCS([strsignal posix_fallocate nanosleep sysconf])
+
+
+# This is needed if bzip2 is a static library, and the Nix libraries
+# are dynamic.
+if test "$(uname)" = "Darwin"; then
+    LDFLAGS="-all_load $LDFLAGS"
+fi
+
+
+# Figure out the extension of dynamic libraries.
+eval dynlib_suffix=$shrext_cmds
+AC_SUBST(dynlib_suffix)
+
+
+# Do we have GNU tar?
+AC_MSG_CHECKING([if you have a recent GNU tar])
+if $tar --version 2> /dev/null | grep -q GNU && tar cvf /dev/null --warning=no-timestamp ./config.log > /dev/null; then
+    AC_MSG_RESULT(yes)
+    tarFlags="--warning=no-timestamp"
+else
+    AC_MSG_RESULT(no)
+fi
+AC_SUBST(tarFlags)
+
+
+# Expand all variables in config.status.
+test "$prefix" = NONE && prefix=$ac_default_prefix
+test "$exec_prefix" = NONE && exec_prefix='${prefix}'
+for name in $ac_subst_vars; do
+    declare $name="$(eval echo "${!name}")"
+    declare $name="$(eval echo "${!name}")"
+    declare $name="$(eval echo "${!name}")"
+done
+
+AC_CONFIG_HEADER([config.h])
+AC_CONFIG_FILES([])
+AC_OUTPUT
diff --git a/corepkgs/buildenv.nix b/corepkgs/buildenv.nix
new file mode 100644
index 000000000000..c52a0ea93d35
--- /dev/null
+++ b/corepkgs/buildenv.nix
@@ -0,0 +1,28 @@
+with import <nix/config.nix>;
+
+{ derivations, manifest }:
+
+derivation {
+  name = "user-environment";
+  system = builtins.currentSystem;
+  builder = perl;
+  args = [ "-w" ./buildenv.pl ];
+
+  manifest = manifest;
+
+  # !!! grmbl, need structured data for passing this in a clean way.
+  derivations =
+    map (d:
+      [ (d.meta.active or "true")
+        (d.meta.priority or 5)
+        (builtins.length d.outputs)
+      ] ++ map (output: builtins.getAttr output d) d.outputs)
+      derivations;
+
+  # Building user environments remotely just causes huge amounts of
+  # network traffic, so don't do that.
+  preferLocalBuild = true;
+
+  # Don't build in a chroot because Nix's dependencies may not be there.
+  __noChroot = true;
+}
diff --git a/corepkgs/buildenv.pl b/corepkgs/buildenv.pl
new file mode 100644
index 000000000000..264442104320
--- /dev/null
+++ b/corepkgs/buildenv.pl
@@ -0,0 +1,168 @@
+use strict;
+use Cwd;
+use IO::Handle;
+use utf8;
+
+STDOUT->autoflush(1);
+
+my $out = $ENV{"out"};
+mkdir "$out", 0755 || die "error creating $out";
+
+
+my $symlinks = 0;
+
+my %priorities;
+
+
+# For each activated package, create symlinks.
+
+sub createLinks {
+    my $srcDir = shift;
+    my $dstDir = shift;
+    my $priority = shift;
+
+    my @srcFiles = glob("$srcDir/*");
+
+    foreach my $srcFile (@srcFiles) {
+        my $baseName = $srcFile;
+        $baseName =~ s/^.*\///g; # strip directory
+        my $dstFile = "$dstDir/$baseName";
+
+        # The files below are special-cased so that they don't show up
+        # in user profiles, either because they are useless, or
+        # because they would cause pointless collisions (e.g., each
+        # Python package brings its own
+        # `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
+        # Urgh, hacky...
+        if ($srcFile =~ /\/propagated-build-inputs$/ ||
+            $srcFile =~ /\/nix-support$/ ||
+            $srcFile =~ /\/perllocal.pod$/ ||
+            $srcFile =~ /\/info\/dir$/ ||
+            $srcFile =~ /\/log$/)
+        {
+            # Do nothing.
+        }
+
+        elsif (-d $srcFile) {
+
+            lstat $dstFile;
+
+            if (-d _) {
+                createLinks($srcFile, $dstFile, $priority);
+            }
+
+            elsif (-l _) {
+                my $target = readlink $dstFile or die;
+                if (!-d $target) {
+                    die "collision between directory ‘$srcFile’ and non-directory ‘$target’";
+                }
+                unlink $dstFile or die "error unlinking ‘$dstFile’: $!";
+                mkdir $dstFile, 0755 ||
+                    die "error creating directory ‘$dstFile’: $!";
+                createLinks($target, $dstFile, $priorities{$dstFile});
+                createLinks($srcFile, $dstFile, $priority);
+            }
+
+            else {
+                symlink($srcFile, $dstFile) ||
+                    die "error creating link ‘$dstFile’: $!";
+                $priorities{$dstFile} = $priority;
+                $symlinks++;
+            }
+        }
+
+        else {
+
+            if (-l $dstFile) {
+                my $target = readlink $dstFile;
+                my $prevPriority = $priorities{$dstFile};
+                die("collision between ‘$srcFile’ and ‘$target’; " .
+                    "use ‘nix-env --set-flag priority NUMBER PKGNAME’ " .
+                    "to change the priority of one of the conflicting packages\n")
+                    if $prevPriority == $priority;
+                next if $prevPriority < $priority;
+                unlink $dstFile or die;
+            }
+
+            symlink($srcFile, $dstFile) ||
+                die "error creating link ‘$dstFile’: $!";
+            $priorities{$dstFile} = $priority;
+            $symlinks++;
+        }
+    }
+}
+
+
+my %done;
+my %postponed;
+
+sub addPkg;
+sub addPkg {
+    my $pkgDir = shift;
+    my $priority = shift;
+
+    return if (defined $done{$pkgDir});
+    $done{$pkgDir} = 1;
+
+#    print "symlinking $pkgDir\n";
+    createLinks("$pkgDir", "$out", $priority);
+
+    my $propagatedFN = "$pkgDir/nix-support/propagated-user-env-packages";
+    if (-e $propagatedFN) {
+        open PROP, "<$propagatedFN" or die;
+        my $propagated = <PROP>;
+        close PROP;
+        my @propagated = split ' ', $propagated;
+        foreach my $p (@propagated) {
+            $postponed{$p} = 1 unless defined $done{$p};
+        }
+    }
+}
+
+
+# Convert the stuff we get from the environment back into a coherent
+# data type.
+my @pkgs;
+my @derivations = split ' ', $ENV{"derivations"};
+while (scalar @derivations) {
+    my $active = shift @derivations;
+    my $priority = shift @derivations;
+    my $outputs = shift @derivations;
+    for (my $n = 0; $n < $outputs; $n++) {
+        my $path = shift @derivations;
+        push @pkgs,
+            { path => $path
+            , active => $active ne "false"
+            , priority => int($priority) };
+    }
+}
+
+
+# Symlink to the packages that have been installed explicitly by the
+# user.  Process in priority order to reduce unnecessary
+# symlink/unlink steps.
+@pkgs = sort { $a->{priority} <=> $b->{priority} || $a->{path} cmp $b->{path} } @pkgs;
+foreach my $pkg (@pkgs) {
+    #print $pkg, " ", $pkgs{$pkg}->{priority}, "\n";
+    addPkg($pkg->{path}, $pkg->{priority}) if $pkg->{active};
+}
+
+
+# Symlink to the packages that have been "propagated" by packages
+# installed by the user (i.e., package X declares that it want Y
+# installed as well).  We do these later because they have a lower
+# priority in case of collisions.
+my $priorityCounter = 1000; # don't care about collisions
+while (scalar(keys %postponed) > 0) {
+    my @pkgDirs = keys %postponed;
+    %postponed = ();
+    foreach my $pkgDir (sort @pkgDirs) {
+        addPkg($pkgDir, $priorityCounter++);
+    }
+}
+
+
+print STDERR "created $symlinks symlinks in user environment\n";
+
+
+symlink($ENV{"manifest"}, "$out/manifest.nix") or die "cannot create manifest";
diff --git a/corepkgs/config.nix.in b/corepkgs/config.nix.in
new file mode 100644
index 000000000000..a5ec83b9ea0c
--- /dev/null
+++ b/corepkgs/config.nix.in
@@ -0,0 +1,17 @@
+let
+  fromEnv = var: def:
+    let val = builtins.getEnv var; in
+    if val != "" then val else def;
+in {
+  perl = "@perl@";
+  shell = "@bash@";
+  coreutils = "@coreutils@";
+  bzip2 = "@bzip2@";
+  gzip = "@gzip@";
+  xz = "@xz@";
+  tar = "@tar@";
+  tarFlags = "@tarFlags@";
+  tr = "@tr@";
+  curl = "@curl@";
+  nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@";
+}
diff --git a/corepkgs/derivation.nix b/corepkgs/derivation.nix
new file mode 100644
index 000000000000..c0fbe8082cd3
--- /dev/null
+++ b/corepkgs/derivation.nix
@@ -0,0 +1,27 @@
+/* This is the implementation of the ‘derivation’ builtin function.
+   It's actually a wrapper around the ‘derivationStrict’ primop. */
+
+drvAttrs @ { outputs ? [ "out" ], ... }:
+
+let
+
+  strict = derivationStrict drvAttrs;
+
+  commonAttrs = drvAttrs // (builtins.listToAttrs outputsList) //
+    { all = map (x: x.value) outputsList;
+      inherit drvAttrs;
+    };
+
+  outputToAttrListElement = outputName:
+    { name = outputName;
+      value = commonAttrs // {
+        outPath = builtins.getAttr outputName strict;
+        drvPath = strict.drvPath;
+        type = "derivation";
+        inherit outputName;
+      };
+    };
+
+  outputsList = map outputToAttrListElement outputs;
+
+in (builtins.head outputsList).value
diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix
new file mode 100644
index 000000000000..4faedb14062a
--- /dev/null
+++ b/corepkgs/fetchurl.nix
@@ -0,0 +1,45 @@
+with import <nix/config.nix>;
+
+{system ? builtins.currentSystem, url, outputHash ? "", outputHashAlgo ? "", md5 ? "", sha1 ? "", sha256 ? "", executable ? false}:
+
+assert (outputHash != "" && outputHashAlgo != "")
+    || md5 != "" || sha1 != "" || sha256 != "";
+
+let
+
+  builder = builtins.toFile "fetchurl.sh"
+    (''
+      echo "downloading $url into $out"
+      ${curl} --fail --location --max-redirs 20 --insecure "$url" > "$out"
+    '' + (if executable then "${coreutils}/chmod +x $out" else ""));
+
+in
+    
+derivation {
+  name = baseNameOf (toString url);
+  builder = shell;
+  args = [ "-e" builder ];
+
+  # New-style output content requirements.
+  outputHashAlgo = if outputHashAlgo != "" then outputHashAlgo else
+      if sha256 != "" then "sha256" else if sha1 != "" then "sha1" else "md5";
+  outputHash = if outputHash != "" then outputHash else
+      if sha256 != "" then sha256 else if sha1 != "" then sha1 else md5;
+  outputHashMode = if executable then "recursive" else "flat";
+  
+  inherit system url;
+
+  # No need to double the amount of network traffic
+  preferLocalBuild = true;
+
+  # Don't build in a chroot because Nix's dependencies may not be there.
+  __noChroot = true;
+
+  impureEnvVars = [
+    # We borrow these environment variables from the caller to allow
+    # easy proxy configuration.  This is impure, but a fixed-output
+    # derivation like fetchurl is allowed to do so since its result is
+    # by definition pure.
+    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
+  ];
+}
diff --git a/corepkgs/imported-drv-to-derivation.nix b/corepkgs/imported-drv-to-derivation.nix
new file mode 100644
index 000000000000..bdb60169860a
--- /dev/null
+++ b/corepkgs/imported-drv-to-derivation.nix
@@ -0,0 +1,21 @@
+attrs @ { drvPath, outputs, ... }:
+
+let
+
+  commonAttrs = (builtins.listToAttrs outputsList) //
+    { all = map (x: x.value) outputsList;
+      inherit drvPath;
+      type = "derivation";
+    };
+
+  outputToAttrListElement = outputName:
+    { name = outputName;
+      value = commonAttrs // {
+        outPath = builtins.getAttr outputName attrs;
+        inherit outputName;
+      };
+    };
+    
+  outputsList = map outputToAttrListElement outputs;
+    
+in (builtins.head outputsList).value
diff --git a/corepkgs/local.mk b/corepkgs/local.mk
new file mode 100644
index 000000000000..19c1d06962c0
--- /dev/null
+++ b/corepkgs/local.mk
@@ -0,0 +1,5 @@
+corepkgs_FILES = nar.nix buildenv.nix buildenv.pl unpack-channel.nix derivation.nix fetchurl.nix imported-drv-to-derivation.nix
+
+$(foreach file,config.nix $(corepkgs_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/corepkgs)))
+
+template-files += $(d)/config.nix
diff --git a/corepkgs/nar.nix b/corepkgs/nar.nix
new file mode 100644
index 000000000000..04be17fb0ce2
--- /dev/null
+++ b/corepkgs/nar.nix
@@ -0,0 +1,49 @@
+with import <nix/config.nix>;
+
+let
+
+  builder = builtins.toFile "nar.sh"
+    ''
+      export PATH=${nixBinDir}:${coreutils}
+
+      if [ $compressionType = xz ]; then
+        ext=.xz
+        compressor="| ${xz} -7"
+      elif [ $compressionType = bzip2 ]; then
+        ext=.bz2
+        compressor="| ${bzip2}"
+      else
+        ext=
+        compressor=
+      fi
+
+      echo "packing ‘$storePath’..."
+      mkdir $out
+      dst=$out/tmp.nar$ext
+
+      set -o pipefail
+      eval "nix-store --dump \"$storePath\" $compressor > $dst"
+
+      hash=$(nix-hash --flat --type $hashAlgo --base32 $dst)
+      echo -n $hash > $out/nar-compressed-hash
+
+      mv $dst $out/$hash.nar$ext
+    '';
+
+in
+
+{ storePath, hashAlgo, compressionType }:
+
+derivation {
+  name = "nar";
+  system = builtins.currentSystem;
+  builder = shell;
+  args = [ "-e" builder ];
+  inherit storePath hashAlgo compressionType;
+
+  # Don't build in a chroot because Nix's dependencies may not be there.
+  __noChroot = true;
+
+  # Remote machines may not have ${nixBinDir} or ${coreutils} in the same prefixes
+  preferLocalBuild = true;
+}
diff --git a/corepkgs/unpack-channel.nix b/corepkgs/unpack-channel.nix
new file mode 100644
index 000000000000..f7c521035428
--- /dev/null
+++ b/corepkgs/unpack-channel.nix
@@ -0,0 +1,42 @@
+with import <nix/config.nix>;
+
+let
+
+  builder = builtins.toFile "unpack-channel.sh"
+    ''
+      mkdir $out
+      cd $out
+      xzpat="\.xz\$"
+      gzpat="\.gz\$"
+      if [[ "$src" =~ $xzpat ]]; then
+        ${xz} -d < $src | ${tar} xf - ${tarFlags}
+      elif [[ "$src" =~ $gzpat ]]; then
+        ${gzip} -d < $src | ${tar} xf - ${tarFlags}
+      else
+        ${bzip2} -d < $src | ${tar} xf - ${tarFlags}
+      fi
+      mv * $out/$channelName
+      if [ -n "$binaryCacheURL" ]; then
+        mkdir $out/binary-caches
+        echo -n "$binaryCacheURL" > $out/binary-caches/$channelName
+      fi
+    '';
+
+in
+
+{ name, channelName, src, binaryCacheURL ? "" }:
+
+derivation {
+  system = builtins.currentSystem;
+  builder = shell;
+  args = [ "-e" builder ];
+  inherit name channelName src binaryCacheURL;
+
+  PATH = "${nixBinDir}:${coreutils}";
+
+  # No point in doing this remotely.
+  preferLocalBuild = true;
+
+  # Don't build in a chroot because Nix's dependencies may not be there.
+  __noChroot = true;
+}
diff --git a/dev-shell b/dev-shell
new file mode 100755
index 000000000000..eae9246f4710
--- /dev/null
+++ b/dev-shell
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+if [ -e tests/test-tmp ]; then
+    chmod -R u+w tests/test-tmp
+    rm -rf tests/test-tmp
+fi
+
+s=$(type -p nix-shell)
+exec $s release.nix -A tarball --command "
+    unset http_proxy
+    export NIX_REMOTE=$NIX_REMOTE
+    export NIX_PATH='$NIX_PATH'
+    export NIX_BUILD_SHELL=$(type -p bash)
+    export c=\$configureFlags
+    exec $s release.nix -A build.$(if [ $(uname -s) = Darwin ]; then echo x86_64-darwin; else echo x86_64-linux; fi) --exclude tarball --command '
+        configureFlags+=\" \$c --prefix=$(pwd)/inst --sysconfdir=$(pwd)/inst/etc\"
+        return
+    '" \
+    "$@"
diff --git a/doc/manual/bugs.xml b/doc/manual/bugs.xml
new file mode 100644
index 000000000000..aa87e4b57048
--- /dev/null
+++ b/doc/manual/bugs.xml
@@ -0,0 +1,39 @@
+<appendix xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink">
+
+<title>Bugs / To-Do</title>
+
+
+<itemizedlist>
+
+<listitem><para>The man-pages generated from the DocBook documentation
+are ugly.</para></listitem>
+
+<listitem><para>Generations properly form a tree.  E.g., if after
+switching to generation 39, we perform an installation action, a
+generation 43 is created which is a descendant of 39, not 42.  So a
+rollback from 43 ought to go back to 39.  This is not currently
+implemented; generations form a linear sequence.</para></listitem>
+
+<listitem><para>For security, <command>nix-push</command> manifests
+should be digitally signed, and <command>nix-pull</command> should
+verify the signatures.  The actual NAR archives in the cache do not
+need to be signed, since the manifest contains cryptographic hashes of
+these files (and <filename>fetchurl.nix</filename> checks
+them).</para></listitem>
+
+<listitem><para>It would be useful to have an option in
+<command>nix-env --delete-generations</command> to remove non-current
+generations older than a certain age.</para></listitem>
+
+<listitem><para>There should be a flexible way to change the user
+environment builder.  Currently, you have to replace
+<filename><replaceable>prefix</replaceable>/share/nix/corepkgs/buildenv/builder.pl</filename>,
+which is hard-coded into <command>nix-env</command>.  Also, the
+default builder should be more powerful.  For instance, there should
+be some way to specify priorities to resolve
+collisions.</para></listitem>
+
+</itemizedlist>
+
+</appendix>
diff --git a/doc/manual/build-farm.xml b/doc/manual/build-farm.xml
new file mode 100644
index 000000000000..2e0d86b89f09
--- /dev/null
+++ b/doc/manual/build-farm.xml
@@ -0,0 +1,113 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id='chap-distributed-builds'>
+
+<title>Setting Up Distributed Builds</title>
+
+<para>Nix supports distributed builds: a local Nix installation can
+forward Nix builds to other machines over the network.  This allows
+multiple builds to be performed in parallel (thus improving
+performance) and allows Nix to perform multi-platform builds in a
+semi-transparent way.  For instance, if you perform a build for a
+<literal>powerpc-darwin</literal> on an <literal>i686-linux</literal>
+machine, Nix can automatically forward the build to a
+<literal>powerpc-darwin</literal> machine, if available.</para>
+
+<para>You can enable distributed builds by setting the environment
+variable <envar>NIX_BUILD_HOOK</envar> to point to a program that Nix
+will call whenever it wants to build a derivation.  The build hook
+(typically a shell or Perl script) can decline the build, in which Nix
+will perform it in the usual way if possible, or it can accept it, in
+which case it is responsible for somehow getting the inputs of the
+build to another machine, doing the build there, and getting the
+results back.  The details of the build hook protocol are described in
+the documentation of the <link
+linkend="envar-build-hook"><envar>NIX_BUILD_HOOK</envar>
+variable</link>.</para>
+
+<example xml:id='ex-remote-systems'><title>Remote machine configuration:
+<filename>remote-systems.conf</filename></title>
+<programlisting>
+nix@mcflurry.labs.cs.uu.nl  powerpc-darwin  /home/nix/.ssh/id_quarterpounder_auto  2
+nix@scratchy.labs.cs.uu.nl  i686-linux      /home/nix/.ssh/id_scratchy_auto        8 1 kvm
+nix@itchy.labs.cs.uu.nl     i686-linux      /home/nix/.ssh/id_scratchy_auto        8 2
+nix@poochie.labs.cs.uu.nl   i686-linux      /home/nix/.ssh/id_scratchy_auto        8 2 kvm perf
+</programlisting>
+</example>
+
+<para>Nix ships with a build hook that should be suitable for most
+purposes.  It uses <command>ssh</command> and
+<command>nix-copy-closure</command> to copy the build inputs and
+outputs and perform the remote build.  To use it, you should set
+<envar>NIX_BUILD_HOOK</envar> to
+<filename><replaceable>prefix</replaceable>/libexec/nix/build-remote.pl</filename>.
+You should also define a list of available build machines and point
+the environment variable <envar>NIX_REMOTE_SYSTEMS</envar> to it.  An
+example configuration is shown in <xref linkend='ex-remote-systems'
+/>.  Each line in the file specifies a machine, with the following
+bits of information:
+
+<orderedlist>
+  
+  <listitem><para>The name of the remote machine, with optionally the
+  user under which the remote build should be performed.  This is
+  actually passed as an argument to <command>ssh</command>, so it can
+  be an alias defined in your
+  <filename>~/.ssh/config</filename>.</para></listitem>
+
+  <listitem><para>A comma-separated list of Nix platform type
+  identifiers, such as <literal>powerpc-darwin</literal>.  It is
+  possible for a machine to support multiple platform types, e.g.,
+  <literal>i686-linux,x86_64-linux</literal>.</para></listitem>
+
+  <listitem><para>The SSH private key to be used to log in to the
+  remote machine.  Since builds should be non-interactive, this key
+  should not have a passphrase!</para></listitem>
+
+  <listitem><para>The maximum number of builds that
+  <filename>build-remote.pl</filename> will execute in parallel on the
+  machine.  Typically this should be equal to the number of CPU cores.
+  For instance, the machine <literal>itchy</literal> in the example
+  will execute up to 8 builds in parallel.</para></listitem>
+
+  <listitem><para>The “speed factor”, indicating the relative speed of
+  the machine.  If there are multiple machines of the right type, Nix
+  will prefer the fastest, taking load into account.</para></listitem>
+
+  <listitem><para>A comma-separated list of <emphasis>supported
+  features</emphasis>.  If a derivation has the
+  <varname>requiredSystemFeatures</varname> attribute, then
+  <filename>build-remote.pl</filename> will only perform the
+  derivation on a machine that has the specified features.  For
+  instance, the attribute
+  
+<programlisting>
+requiredSystemFeatures = [ "kvm" ];  
+</programlisting>
+
+  will cause the build to be performed on a machine that has the
+  <literal>kvm</literal> feature (i.e., <literal>scratchy</literal> in
+  the example above).</para></listitem>
+
+  <listitem><para>A comma-separated list of <emphasis>mandatory
+  features</emphasis>.  A machine will only be used to build a
+  derivation if all of the machine’s mandatory features appear in the
+  derivation’s <varname>requiredSystemFeatures</varname> attribute.
+  Thus, in the example, the machine <literal>poochie</literal> will
+  only do derivations that have
+  <varname>requiredSystemFeatures</varname> set to <literal>["kvm"
+  "perf"]</literal> or <literal>["perf"]</literal>.</para></listitem>
+
+</orderedlist>
+
+You should also set up the environment variable
+<envar>NIX_CURRENT_LOAD</envar> to point at a directory (e.g.,
+<filename>/var/run/nix/current-load</filename>) that
+<filename>build-remote.pl</filename> uses to remember how many builds
+it is currently executing remotely.  It doesn't look at the actual
+load on the remote machine, so if you have multiple instances of Nix
+running, they should use the same <envar>NIX_CURRENT_LOAD</envar>
+file.  Maybe in the future <filename>build-remote.pl</filename> will
+look at the actual remote load.</para>
+  
+</chapter>
diff --git a/doc/manual/builtins.xml b/doc/manual/builtins.xml
new file mode 100644
index 000000000000..b289c6f0ed4b
--- /dev/null
+++ b/doc/manual/builtins.xml
@@ -0,0 +1,949 @@
+<section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id='ssec-builtins'>
+
+<title>Built-in functions</title>
+
+
+<para>This section lists the functions and constants built into the
+Nix expression evaluator.  (The built-in function
+<function>derivation</function> is discussed above.)  Some built-ins,
+such as <function>derivation</function>, are always in scope of every
+Nix expression; you can just access them right away.  But to prevent
+polluting the namespace too much, most built-ins are not in scope.
+Instead, you can access them through the <varname>builtins</varname>
+built-in value, which is a set that contains all built-in functions
+and values.  For instance, <function>derivation</function> is also
+available as <function>builtins.derivation</function>.</para>
+
+
+<variablelist>
+
+
+  <varlistentry><term><function>abort</function> <replaceable>s</replaceable></term>
+
+    <listitem><para>Abort Nix expression evaluation, print error
+    message <replaceable>s</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.add</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the sum of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.attrNames</function>
+  <replaceable>set</replaceable></term>
+
+    <listitem><para>Return the names of the attributes in the set
+    <replaceable>set</replaceable> in a sorted list.  For instance,
+    <literal>builtins.attrNames { y = 1; x = "foo"; }</literal>
+    evaluates to <literal>[ "x" "y" ]</literal>.  There is no built-in
+    function <function>attrValues</function>, but you can easily
+    define it yourself:
+
+<programlisting>
+attrValues = set: map (name: builtins.getAttr name set) (builtins.attrNames set);</programlisting>
+
+    </para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>baseNameOf</function> <replaceable>s</replaceable></term>
+
+    <listitem><para>Return the <emphasis>base name</emphasis> of the
+    string <replaceable>s</replaceable>, that is, everything following
+    the final slash in the string.  This is similar to the GNU
+    <command>basename</command> command.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><varname>builtins</varname></term>
+
+    <listitem><para>The set <varname>builtins</varname> contains all
+    the built-in functions and values.  You can use
+    <varname>builtins</varname> to test for the availability of
+    features in the Nix installation, e.g.,
+
+<programlisting>
+if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
+
+    This allows a Nix expression to fall back gracefully on older Nix
+    installations that don’t have the desired built-in
+    function.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.compareVersions</function>
+  <replaceable>s1</replaceable> <replaceable>s2</replaceable></term>
+
+    <listitem><para>Compare two strings representing versions and
+    return <literal>-1</literal> if version
+    <replaceable>s1</replaceable> is older than version
+    <replaceable>s2</replaceable>, <literal>0</literal> if they are
+    the same, and <literal>1</literal> if
+    <replaceable>s1</replaceable> is newer than
+    <replaceable>s2</replaceable>.  The version comparison algorithm
+    is the same as the one used by <link
+    linkend="ssec-version-comparisons"><command>nix-env
+    -u</command></link>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.concatLists</function>
+  <replaceable>lists</replaceable></term>
+
+    <listitem><para>Concatenate a list of lists into a single
+    list.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry
+  xml:id='builtin-currentSystem'><term><varname>builtins.currentSystem</varname></term>
+
+    <listitem><para>The built-in value <varname>currentSystem</varname>
+    evaluates to the Nix platform identifier for the Nix installation
+    on which the expression is being evaluated, such as
+    <literal>"i686-linux"</literal> or
+    <literal>"powerpc-darwin"</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <!--
+  <varlistentry><term><function>currentTime</function></term>
+
+    <listitem><para>The built-in value <varname>currentTime</varname>
+    returns the current system time in seconds since 00:00:00 1/1/1970
+    UTC.  Due to the evaluation model of Nix expressions
+    (<emphasis>maximal laziness</emphasis>), it always yields the same
+    value within an execution of Nix.</para></listitem>
+
+  </varlistentry>
+  -->
+
+
+  <!--
+  <varlistentry><term><function>dependencyClosure</function></term>
+
+    <listitem><para>TODO</para></listitem>
+
+  </varlistentry>
+  -->
+
+
+  <varlistentry><term><function>derivation</function>
+  <replaceable>attrs</replaceable></term>
+
+    <listitem><para><function>derivation</function> is described in
+    <xref linkend='ssec-derivation' />.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>dirOf</function> <replaceable>s</replaceable></term>
+
+    <listitem><para>Return the directory part of the string
+    <replaceable>s</replaceable>, that is, everything before the final
+    slash in the string.  This is similar to the GNU
+    <command>dirname</command> command.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.div</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the quotient of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.elem</function>
+  <replaceable>x</replaceable> <replaceable>xs</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if a value equal to
+    <replaceable>x</replaceable> occurs in the list
+    <replaceable>xs</replaceable>, and <literal>false</literal>
+    otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.elemAt</function>
+  <replaceable>xs</replaceable> <replaceable>n</replaceable></term>
+
+    <listitem><para>Return element <replaceable>n</replaceable> from
+    the list <replaceable>xs</replaceable>.  Elements are counted
+    starting from 0.  A fatal error occurs in the index is out of
+    bounds.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.filter</function>
+  <replaceable>f</replaceable> <replaceable>xs</replaceable></term>
+
+    <listitem><para>Return a list consisting of the elements of
+    <replaceable>xs</replaceable> for which the function
+    <replaceable>f</replaceable> returns
+    <literal>true</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.filterSource</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem>
+
+      <para>This function allows you to copy sources into the Nix
+      store while filtering certain files.  For instance, suppose that
+      you want to use the directory <filename>source-dir</filename> as
+      an input to a Nix expression, e.g.
+
+<programlisting>
+stdenv.mkDerivation {
+  ...
+  src = ./source-dir;
+}
+</programlisting>
+
+      However, if <filename>source-dir</filename> is a Subversion
+      working copy, then all those annoying <filename>.svn</filename>
+      subdirectories will also be copied to the store.  Worse, the
+      contents of those directories may change a lot, causing lots of
+      spurious rebuilds.  With <function>filterSource</function> you
+      can filter out the <filename>.svn</filename> directories:
+
+<programlisting>
+  src = builtins.filterSource
+    (path: type: type != "directory" || baseNameOf path != ".svn")
+    ./source-dir;
+</programlisting>
+
+      </para>
+
+      <para>Thus, the first argument <replaceable>e1</replaceable>
+      must be a predicate function that is called for each regular
+      file, directory or symlink in the source tree
+      <replaceable>e2</replaceable>.  If the function returns
+      <literal>true</literal>, the file is copied to the Nix store,
+      otherwise it is omitted.  The function is called with two
+      arguments.  The first is the full path of the file.  The second
+      is a string that identifies the type of the file, which is
+      either <literal>"regular"</literal>,
+      <literal>"directory"</literal>, <literal>"symlink"</literal> or
+      <literal>"unknown"</literal> (for other kinds of files such as
+      device nodes or fifos — but note that those cannot be copied to
+      the Nix store, so if the predicate returns
+      <literal>true</literal> for them, the copy will fail).</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.fromJSON</function> <replaceable>e</replaceable></term>
+
+    <listitem><para>Convert a JSON string to a Nix
+    value. For example,
+
+<programlisting>
+builtins.fromJSON ''{"x": [1, 2, 3], "y": null}''
+</programlisting>
+
+    returns the value <literal>{ x = [ 1 2 3 ]; y = null;
+    }</literal>. Floating point numbers are not
+    supported.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.getAttr</function>
+  <replaceable>s</replaceable> <replaceable>set</replaceable></term>
+
+    <listitem><para><function>getAttr</function> returns the attribute
+    named <replaceable>s</replaceable> from
+    <replaceable>set</replaceable>.  Evaluation aborts if the
+    attribute doesn’t exist.  This is a dynamic version of the
+    <literal>.</literal> operator, since <replaceable>s</replaceable>
+    is an expression rather than an identifier.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.getEnv</function>
+  <replaceable>s</replaceable></term>
+
+    <listitem><para><function>getEnv</function> returns the value of
+    the environment variable <replaceable>s</replaceable>, or an empty
+    string if the variable doesn’t exist.  This function should be
+    used with care, as it can introduce all sorts of nasty environment
+    dependencies in your Nix expression.</para>
+
+    <para><function>getEnv</function> is used in Nix Packages to
+    locate the file <filename>~/.nixpkgs/config.nix</filename>, which
+    contains user-local settings for Nix Packages.  (That is, it does
+    a <literal>getEnv "HOME"</literal> to locate the user’s home
+    directory.)</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.hasAttr</function>
+  <replaceable>s</replaceable> <replaceable>set</replaceable></term>
+
+    <listitem><para><function>hasAttr</function> returns
+    <literal>true</literal> if <replaceable>set</replaceable> has an
+    attribute named <replaceable>s</replaceable>, and
+    <literal>false</literal> otherwise.  This is a dynamic version of
+    the <literal>?</literal>  operator, since
+    <replaceable>s</replaceable> is an expression rather than an
+    identifier.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.hashString</function>
+  <replaceable>type</replaceable> <replaceable>s</replaceable></term>
+
+    <listitem><para>Return a base-16 representation of the
+    cryptographic hash of string <replaceable>s</replaceable>.  The
+    hash algorithm specified by <replaceable>type</replaceable> must
+    be one of <literal>"md5"</literal>, <literal>"sha1"</literal> or
+    <literal>"sha256"</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.head</function>
+  <replaceable>list</replaceable></term>
+
+    <listitem><para>Return the first element of a list; abort
+    evaluation if the argument isn’t a list or is an empty list.  You
+    can test whether a list is empty by comparing it with
+    <literal>[]</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>import</function>
+  <replaceable>path</replaceable></term>
+
+    <listitem><para>Load, parse and return the Nix expression in the
+    file <replaceable>path</replaceable>.  If <replaceable>path
+    </replaceable> is a directory, the file <filename>default.nix
+    </filename> in that directory is loaded.  Evaluation aborts if the
+    file doesn’t exist or contains an incorrect Nix expression.
+    <function>import</function> implements Nix’s module system: you
+    can put any Nix expression (such as a set or a function) in a
+    separate file, and use it from Nix expressions in other
+    files.</para>
+
+    <para>A Nix expression loaded by <function>import</function> must
+    not contain any <emphasis>free variables</emphasis> (identifiers
+    that are not defined in the Nix expression itself and are not
+    built-in).  Therefore, it cannot refer to variables that are in
+    scope at the call site.  For instance, if you have a calling
+    expression
+
+<programlisting>
+rec {
+  x = 123;
+  y = import ./foo.nix;
+}</programlisting>
+
+    then the following <filename>foo.nix</filename> will give an
+    error:
+
+<programlisting>
+x + 456</programlisting>
+
+    since <varname>x</varname> is not in scope in
+    <filename>foo.nix</filename>.  If you want <varname>x</varname>
+    to be available in <filename>foo.nix</filename>, you should pass
+    it as a function argument:
+
+<programlisting>
+rec {
+  x = 123;
+  y = import ./foo.nix x;
+}</programlisting>
+
+    and
+
+<programlisting>
+x: x + 456</programlisting>
+
+    (The function argument doesn’t have to be called
+    <varname>x</varname> in <filename>foo.nix</filename>; any name
+    would work.)</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.intersectAttrs</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return a set consisting of the attributes in the
+    set <replaceable>e2</replaceable> that also exist in the set
+    <replaceable>e1</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.isAttrs</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to a set, and
+    <literal>false</literal> otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.isList</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to a list, and
+    <literal>false</literal> otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.isFunction</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to a function, and
+    <literal>false</literal> otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.isString</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to a string, and
+    <literal>false</literal> otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.isInt</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to an int, and
+    <literal>false</literal> otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.isBool</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to a bool, and
+    <literal>false</literal> otherwise.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>isNull</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if
+    <replaceable>e</replaceable> evaluates to <literal>null</literal>,
+    and <literal>false</literal> otherwise.</para>
+
+    <warning><para>This function is <emphasis>deprecated</emphasis>;
+    just write <literal>e == null</literal> instead.</para></warning>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.length</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return the length of the list
+    <replaceable>e</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.lessThan</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if the integer
+    <replaceable>e1</replaceable> is less than the integer
+    <replaceable>e2</replaceable>, and <literal>false</literal>
+    otherwise.  Evaluation aborts if either
+    <replaceable>e1</replaceable> or <replaceable>e2</replaceable>
+    does not evaluate to an integer.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.listToAttrs</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Construct a set from a list specifying the names
+    and values of each attribute.  Each element of the list should be
+    a set consisting of a string-valued attribute
+    <varname>name</varname> specifying the name of the attribute, and
+    an attribute <varname>value</varname> specifying its value.
+    Example:
+
+<programlisting>
+builtins.listToAttrs
+  [ { name = "foo"; value = 123; }
+    { name = "bar"; value = 456; }
+  ]
+</programlisting>
+
+    evaluates to
+
+<programlisting>
+{ foo = 123; bar = 456; }
+</programlisting>
+
+    </para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><function>map</function>
+  <replaceable>f</replaceable> <replaceable>list</replaceable></term>
+
+    <listitem><para>Apply the function <replaceable>f</replaceable> to
+    each element in the list <replaceable>list</replaceable>.  For
+    example,
+
+<programlisting>
+map (x: "foo" + x) [ "bar" "bla" "abc" ]</programlisting>
+
+    evaluates to <literal>[ "foobar" "foobla" "fooabc"
+    ]</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.mul</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the product of the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.parseDrvName</function>
+  <replaceable>s</replaceable></term>
+
+    <listitem><para>Split the string <replaceable>s</replaceable> into
+    a package name and version.  The package name is everything up to
+    but not including the first dash followed by a digit, and the
+    version is everything following that dash.  The result is returned
+    in a set <literal>{ name, version }</literal>.  Thus,
+    <literal>builtins.parseDrvName "nix-0.12pre12876"</literal>
+    returns <literal>{ name = "nix"; version = "0.12pre12876";
+    }</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.pathExists</function>
+  <replaceable>path</replaceable></term>
+
+    <listitem><para>Return <literal>true</literal> if the path
+    <replaceable>path</replaceable> exists, and
+    <literal>false</literal> otherwise.  One application of this
+    function is to conditionally include a Nix expression containing
+    user configuration:
+
+<programlisting>
+let
+  fileName = builtins.getEnv "CONFIG_FILE";
+  config =
+    if fileName != "" &amp;&amp; builtins.pathExists (builtins.toPath fileName)
+    then import (builtins.toPath fileName)
+    else { someSetting = false; }; <lineannotation># default configuration</lineannotation>
+in config.someSetting</programlisting>
+
+    (Note that <envar>CONFIG_FILE</envar> must be an absolute path for
+    this to work.)</para></listitem>
+
+  </varlistentry>
+
+
+  <!--
+  <varlistentry><term><function>relativise</function></term>
+
+    <listitem><para>TODO</para></listitem>
+
+  </varlistentry>
+  -->
+
+
+  <varlistentry><term><function>builtins.readFile</function>
+  <replaceable>path</replaceable></term>
+
+    <listitem><para>Return the contents of the file
+    <replaceable>path</replaceable> as a string.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>removeAttrs</function>
+  <replaceable>set</replaceable> <replaceable>list</replaceable></term>
+
+    <listitem><para>Remove the attributes listed in
+    <replaceable>list</replaceable> from
+    <replaceable>set</replaceable>.  The attributes don’t have to
+    exist in <replaceable>set</replaceable>. For instance,
+
+<screen>
+removeAttrs { x = 1; y = 2; z = 3; } [ "a" "x" "z" ]</screen>
+
+    evaluates to <literal>{ y = 2; }</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.stringLength</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return the length of the string
+    <replaceable>e</replaceable>.  If <replaceable>e</replaceable> is
+    not a string, evaluation is aborted.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.sub</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Return the difference between the integers
+    <replaceable>e1</replaceable> and
+    <replaceable>e2</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.substring</function>
+  <replaceable>start</replaceable> <replaceable>len</replaceable>
+  <replaceable>s</replaceable></term>
+
+    <listitem><para>Return the substring of
+    <replaceable>s</replaceable> from character position
+    <replaceable>start</replaceable> (zero-based) up to but not
+    including <replaceable>start + len</replaceable>.  If
+    <replaceable>start</replaceable> is greater than the length of the
+    string, an empty string is returned, and if <replaceable>start +
+    len</replaceable> lies beyond the end of the string, only the
+    substring up to the end of the string is returned.
+    <replaceable>start</replaceable> must be
+    non-negative.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.tail</function>
+  <replaceable>list</replaceable></term>
+
+    <listitem><para>Return the second to last elements of a list;
+    abort evaluation if the argument isn’t a list or is an empty
+    list.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>throw</function>
+  <replaceable>s</replaceable></term>
+
+    <listitem><para>Throw an error message
+    <replaceable>s</replaceable>.  This usually aborts Nix expression
+    evaluation, but in <command>nix-env -qa</command> and other
+    commands that try to evaluate a set of derivations to get
+    information about those derivations, a derivation that throws an
+    error is silently skipped (which is not the case for
+    <function>abort</function>).</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry
+  xml:id='builtin-toFile'><term><function>builtins.toFile</function>
+  <replaceable>name</replaceable> <replaceable>s</replaceable></term>
+
+    <listitem><para>Store the string <replaceable>s</replaceable> in a
+    file in the Nix store and return its path.  The file has suffix
+    <replaceable>name</replaceable>.  This file can be used as an
+    input to derivations.  One application is to write builders
+    “inline”.  For instance, the following Nix expression combines
+    <xref linkend='ex-hello-nix' /> and <xref
+    linkend='ex-hello-builder' /> into one file:
+
+<programlisting>
+{ stdenv, fetchurl, perl }:
+
+stdenv.mkDerivation {
+  name = "hello-2.1.1";
+
+  builder = builtins.toFile "builder.sh" "
+    source $stdenv/setup
+
+    PATH=$perl/bin:$PATH
+
+    tar xvfz $src
+    cd hello-*
+    ./configure --prefix=$out
+    make
+    make install
+  ";
+
+  src = fetchurl {
+    url = http://nix.cs.uu.nl/dist/tarballs/hello-2.1.1.tar.gz;
+    md5 = "70c9ccf9fac07f762c24f2df2290784d";
+  };
+  inherit perl;
+}</programlisting>
+
+    </para>
+
+    <para>It is even possible for one file to refer to another, e.g.,
+
+<programlisting>
+  builder = let
+    configFile = builtins.toFile "foo.conf" "
+      # This is some dummy configuration file.
+      <replaceable>...</replaceable>
+    ";
+  in builtins.toFile "builder.sh" "
+    source $stdenv/setup
+    <replaceable>...</replaceable>
+    cp ${configFile} $out/etc/foo.conf
+  ";</programlisting>
+
+    Note that <literal>${configFile}</literal> is an antiquotation
+    (see <xref linkend='ssec-values' />), so the result of the
+    expression <literal>configFile</literal> (i.e., a path like
+    <filename>/nix/store/m7p7jfny445k...-foo.conf</filename>) will be
+    spliced into the resulting string.</para>
+
+    <para>It is however <emphasis>not</emphasis> allowed to have files
+    mutually referring to each other, like so:
+
+<programlisting>
+let
+  foo = builtins.toFile "foo" "...${bar}...";
+  bar = builtins.toFile "bar" "...${foo}...";
+in foo</programlisting>
+
+    This is not allowed because it would cause a cyclic dependency in
+    the computation of the cryptographic hashes for
+    <varname>foo</varname> and <varname>bar</varname>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.toJSON</function> <replaceable>e</replaceable></term>
+
+    <listitem><para>Return a string containing a JSON representation
+    of <replaceable>e</replaceable>.  Strings, integers, booleans,
+    nulls and lists are mapped to their JSON equivalents.  Sets
+    (except derivations) are represented as objects.  Derivations are
+    translated to a JSON string containing the derivation’s output
+    path.  Paths are copied to the store and represented as a JSON
+    string of the resulting store path.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.toPath</function> <replaceable>s</replaceable></term>
+
+    <listitem><para>Convert the string value
+    <replaceable>s</replaceable> into a path value.  The string
+    <replaceable>s</replaceable> must represent an absolute path
+    (i.e., must start with <literal>/</literal>).  The path need not
+    exist.  The resulting path is canonicalised, e.g.,
+    <literal>builtins.toPath "//foo/xyzzy/../bar/"</literal> returns
+    <literal>/foo/bar</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>toString</function> <replaceable>e</replaceable></term>
+
+    <listitem><para>Convert the expression
+    <replaceable>e</replaceable> to a string.
+    <replaceable>e</replaceable> can be a string (in which case
+    <function>toString</function> is a no-op) or a path (e.g.,
+    <literal>toString /foo/bar</literal> yields
+    <literal>"/foo/bar"</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id='builtin-toXML'><term><function>builtins.toXML</function> <replaceable>e</replaceable></term>
+
+    <listitem><para>Return a string containing an XML representation
+    of <replaceable>e</replaceable>.  The main application for
+    <function>toXML</function> is to communicate information with the
+    builder in a more structured format than plain environment
+    variables.</para>
+
+    <!-- TODO: more formally describe the schema of the XML
+    representation -->
+
+    <para><xref linkend='ex-toxml' /> shows an example where this is
+    the case.  The builder is supposed to generate the configuration
+    file for a <link xlink:href='http://jetty.mortbay.org/'>Jetty
+    servlet container</link>.  A servlet container contains a number
+    of servlets (<filename>*.war</filename> files) each exported under
+    a specific URI prefix.  So the servlet configuration is a list of
+    sets containing the <varname>path</varname> and
+    <varname>war</varname> of the servlet (<xref
+    linkend='ex-toxml-co-servlets' />).  This kind of information is
+    difficult to communicate with the normal method of passing
+    information through an environment variable, which just
+    concatenates everything together into a string (which might just
+    work in this case, but wouldn’t work if fields are optional or
+    contain lists themselves).  Instead the Nix expression is
+    converted to an XML representation with
+    <function>toXML</function>, which is unambiguous and can easily be
+    processed with the appropriate tools.  For instance, in the
+    example an XSLT stylesheet (<xref linkend='ex-toxml-co-stylesheet'
+    />) is applied to it (<xref linkend='ex-toxml-co-apply' />) to
+    generate the XML configuration file for the Jetty server.  The XML
+    representation produced from <xref linkend='ex-toxml-co-servlets'
+    /> by <function>toXML</function> is shown in <xref
+    linkend='ex-toxml-result' />.</para>
+
+    <para>Note that <xref linkend='ex-toxml' /> uses the <function
+    linkend='builtin-toFile'>toFile</function> built-in to write the
+    builder and the stylesheet “inline” in the Nix expression.  The
+    path of the stylesheet is spliced into the builder at
+    <literal>xsltproc ${stylesheet}
+    <replaceable>...</replaceable></literal>.</para>
+
+    <example xml:id='ex-toxml'><title>Passing information to a builder
+    using <function>toXML</function></title>
+
+<programlisting><![CDATA[
+{ stdenv, fetchurl, libxslt, jira, uberwiki }:
+
+stdenv.mkDerivation (rec {
+  name = "web-server";
+
+  buildInputs = [ libxslt ];
+
+  builder = builtins.toFile "builder.sh" "
+    source $stdenv/setup
+    mkdir $out
+    echo $servlets | xsltproc ${stylesheet} - > $out/server-conf.xml]]> <co xml:id='ex-toxml-co-apply' /> <![CDATA[
+  ";
+
+  stylesheet = builtins.toFile "stylesheet.xsl"]]> <co xml:id='ex-toxml-co-stylesheet' /> <![CDATA[
+   "<?xml version='1.0' encoding='UTF-8'?>
+    <xsl:stylesheet xmlns:xsl='http://www.w3.org/1999/XSL/Transform' version='1.0'>
+      <xsl:template match='/'>
+        <Configure>
+          <xsl:for-each select='/expr/list/attrs'>
+            <Call name='addWebApplication'>
+              <Arg><xsl:value-of select=\"attr[@name = 'path']/string/@value\" /></Arg>
+              <Arg><xsl:value-of select=\"attr[@name = 'war']/path/@value\" /></Arg>
+            </Call>
+          </xsl:for-each>
+        </Configure>
+      </xsl:template>
+    </xsl:stylesheet>
+  ";
+
+  servlets = builtins.toXML []]> <co xml:id='ex-toxml-co-servlets' /> <![CDATA[
+    { path = "/bugtracker"; war = jira + "/lib/atlassian-jira.war"; }
+    { path = "/wiki"; war = uberwiki + "/uberwiki.war"; }
+  ];
+})]]></programlisting>
+
+    </example>
+
+    <example xml:id='ex-toxml-result'><title>XML representation produced by
+    <function>toXML</function></title>
+
+<programlisting><![CDATA[<?xml version='1.0' encoding='utf-8'?>
+<expr>
+  <list>
+    <attrs>
+      <attr name="path">
+        <string value="/bugtracker" />
+      </attr>
+      <attr name="war">
+        <path value="/nix/store/d1jh9pasa7k2...-jira/lib/atlassian-jira.war" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="path">
+        <string value="/wiki" />
+      </attr>
+      <attr name="war">
+        <path value="/nix/store/y6423b1yi4sx...-uberwiki/uberwiki.war" />
+      </attr>
+    </attrs>
+  </list>
+</expr>]]></programlisting>
+
+    </example>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.trace</function>
+  <replaceable>e1</replaceable> <replaceable>e2</replaceable></term>
+
+    <listitem><para>Evaluate <replaceable>e1</replaceable> and print its
+    abstract syntax representation on standard error.  Then return
+    <replaceable>e2</replaceable>.  This function is useful for
+    debugging.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><function>builtins.typeOf</function>
+  <replaceable>e</replaceable></term>
+
+    <listitem><para>Return a string representing the type of the value
+    <replaceable>e</replaceable>, namely <literal>"int"</literal>,
+    <literal>"bool"</literal>, <literal>"string"</literal>,
+    <literal>"path"</literal>, <literal>"null"</literal>,
+    <literal>"set"</literal>, <literal>"list"</literal> or
+    <literal>"lambda"</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+</variablelist>
+
+
+</section>
diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml
new file mode 100644
index 000000000000..e420d1ed6c34
--- /dev/null
+++ b/doc/manual/conf-file.xml
@@ -0,0 +1,538 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-conf-file">
+
+<refmeta>
+  <refentrytitle>nix.conf</refentrytitle>
+  <manvolnum>5</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix.conf</refname>
+  <refpurpose>Nix configuration file</refpurpose>
+</refnamediv>
+
+<refsection><title>Description</title>
+
+<para>A number of persistent settings of Nix are stored in the file
+<filename><replaceable>sysconfdir</replaceable>/nix/nix.conf</filename>.
+This file is a list of <literal><replaceable>name</replaceable> =
+<replaceable>value</replaceable></literal> pairs, one per line.
+Comments start with a <literal>#</literal> character.  Here is an example
+configuration file:</para>
+
+<programlisting>
+gc-keep-outputs = true       # Nice for developers
+gc-keep-derivations = true   # Idem
+env-keep-derivations = false
+</programlisting>
+
+<para>You can override settings using the <option>--option</option>
+flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
+
+<para>The following settings are currently available:
+
+<variablelist>
+
+
+  <varlistentry xml:id="conf-gc-keep-outputs"><term><literal>gc-keep-outputs</literal></term>
+
+    <listitem><para>If <literal>true</literal>, the garbage collector
+    will keep the outputs of non-garbage derivations.  If
+    <literal>false</literal> (default), outputs will be deleted unless
+    they are GC roots themselves (or reachable from other roots).</para>
+
+    <para>In general, outputs must be registered as roots separately.
+    However, even if the output of a derivation is registered as a
+    root, the collector will still delete store paths that are used
+    only at build time (e.g., the C compiler, or source tarballs
+    downloaded from the network).  To prevent it from doing so, set
+    this option to <literal>true</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-gc-keep-derivations"><term><literal>gc-keep-derivations</literal></term>
+
+    <listitem><para>If <literal>true</literal> (default), the garbage
+    collector will keep the derivations from which non-garbage store
+    paths were built.  If <literal>false</literal>, they will be
+    deleted unless explicitly registered as a root (or reachable from
+    other roots).</para>
+
+    <para>Keeping derivation around is useful for querying and
+    traceability (e.g., it allows you to ask with what dependencies or
+    options a store path was built), so by default this option is on.
+    Turn it off to safe a bit of disk space (or a lot if
+    <literal>gc-keep-outputs</literal> is also turned on).</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>env-keep-derivations</literal></term>
+
+    <listitem><para>If <literal>false</literal> (default), derivations
+    are not stored in Nix user environments.  That is, the derivation
+    any build-time-only dependencies may be garbage-collected.</para>
+
+    <para>If <literal>true</literal>, when you add a Nix derivation to
+    a user environment, the path of the derivation is stored in the
+    user environment.  Thus, the derivation will not be
+    garbage-collected until the user environment generation is deleted
+    (<command>nix-env --delete-generations</command>).  To prevent
+    build-time-only dependencies from being collected, you should also
+    turn on <literal>gc-keep-outputs</literal>.</para>
+
+    <para>The difference between this option and
+    <literal>gc-keep-derivations</literal> is that this one is
+    “sticky”: it applies to any user environment created while this
+    option was enabled, while <literal>gc-keep-derivations</literal>
+    only applies at the moment the garbage collector is
+    run.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-max-jobs"><term><literal>build-max-jobs</literal></term>
+
+    <listitem><para>This option defines the maximum number of jobs
+    that Nix will try to build in parallel.  The default is
+    <literal>1</literal>.  You should generally set it to the number
+    of CPUs in your system (e.g., <literal>2</literal> on an Athlon 64
+    X2).  It can be overridden using the <option
+    linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>)
+    command line switch.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-cores"><term><literal>build-cores</literal></term>
+
+    <listitem><para>Sets the value of the
+    <envar>NIX_BUILD_CORES</envar> environment variable in the
+    invocation of builders.  Builders can use this variable at their
+    discretion to control the maximum amount of parallelism.  For
+    instance, in Nixpkgs, if the derivation attribute
+    <varname>enableParallelBuilding</varname> is set to
+    <literal>true</literal>, the builder passes the
+    <option>-j<replaceable>N</replaceable></option> flag to GNU Make.
+    It can be overridden using the <option
+    linkend='opt-cores'>--cores</option> command line switch and
+    defaults to <literal>1</literal>.  The value <literal>0</literal>
+    means that the builder should use all available CPU cores in the
+    system.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-max-silent-time"><term><literal>build-max-silent-time</literal></term>
+
+    <listitem>
+
+      <para>This option defines the maximum number of seconds that a
+      builder can go without producing any data on standard output or
+      standard error.  This is useful (for instance in an automated
+      build system) to catch builds that are stuck in an infinite
+      loop, or to catch remote builds that are hanging due to network
+      problems.  It can be overridden using the <option
+      linkend="opt-max-silent-time">--max-silent-time</option> command
+      line switch.</para>
+
+      <para>The value <literal>0</literal> means that there is no
+      timeout.  This is also the default.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-timeout"><term><literal>build-timeout</literal></term>
+
+    <listitem>
+
+      <para>This option defines the maximum number of seconds that a
+      builder can run.  This is useful (for instance in an automated
+      build system) to catch builds that are stuck in an infinite loop
+      but keep writing to their standard output or standard error.  It
+      can be overridden using the <option
+      linkend="opt-timeout">--timeout</option> command line
+      switch.</para>
+
+      <para>The value <literal>0</literal> means that there is no
+      timeout.  This is also the default.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-max-log-size"><term><literal>build-max-log-size</literal></term>
+
+    <listitem>
+
+      <para>This option defines the maximum number of bytes that a
+      builder can write to its stdout/stderr.  If the builder exceeds
+      this limit, it’s killed.  A value of <literal>0</literal> (the
+      default) means that there is no limit.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-users-group"><term><literal>build-users-group</literal></term>
+
+    <listitem><para>This options specifies the Unix group containing
+    the Nix build user accounts.  In multi-user Nix installations,
+    builds should not be performed by the Nix account since that would
+    allow users to arbitrarily modify the Nix store and database by
+    supplying specially crafted builders; and they cannot be performed
+    by the calling user since that would allow him/her to influence
+    the build result.</para>
+
+    <para>Therefore, if this option is non-empty and specifies a valid
+    group, builds will be performed under the user accounts that are a
+    member of the group specified here (as listed in
+    <filename>/etc/group</filename>).  Those user accounts should not
+    be used for any other purpose!</para>
+
+    <para>Nix will never run two builds under the same user account at
+    the same time.  This is to prevent an obvious security hole: a
+    malicious user writing a Nix expression that modifies the build
+    result of a legitimate Nix expression being built by another user.
+    Therefore it is good to have as many Nix build user accounts as
+    you can spare.  (Remember: uids are cheap.)</para>
+
+    <para>The build users should have permission to create files in
+    the Nix store, but not delete them.  Therefore,
+    <filename>/nix/store</filename> should be owned by the Nix
+    account, its group should be the group specified here, and its
+    mode should be <literal>1775</literal>.</para>
+
+    <para>If the build users group is empty, builds will be performed
+    under the uid of the Nix process (that is, the uid of the caller
+    if <envar>NIX_REMOTE</envar> is empty, the uid under which the Nix
+    daemon runs if <envar>NIX_REMOTE</envar> is
+    <literal>daemon</literal>).  Obviously, this should not be used in
+    multi-user settings with untrusted users.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>build-use-chroot</literal></term>
+
+    <listitem><para>If set to <literal>true</literal>, builds will be
+    performed in a <emphasis>chroot environment</emphasis>, i.e., the
+    build will be isolated from the normal file system hierarchy and
+    will only see its dependencies in the Nix store, the temporary
+    build directory, private versions of <filename>/proc</filename>,
+    <filename>/dev</filename>, <filename>/dev/shm</filename> and
+    <filename>/dev/pts</filename>, and the paths configured with the
+    <link linkend='conf-build-chroot-dirs'><literal>build-chroot-dirs</literal>
+    option</link>. This is useful to prevent undeclared dependencies
+    on files in directories such as
+    <filename>/usr/bin</filename>.</para>
+
+    <para>The use of a chroot requires that Nix is run as root (so you
+    should use the <link linkend='conf-build-users-group'>“build
+    users” feature</link> to perform the actual builds under different
+    users than root).  Currently, chroot builds only work on Linux
+    because Nix uses “bind mounts” to make the Nix store and other
+    directories available inside the chroot.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-chroot-dirs"><term><literal>build-chroot-dirs</literal></term>
+
+    <listitem><para>A list of paths bind-mounted into Nix chroot
+    environments.  Contrary to what the name suggests, the specified
+    paths do not have to be directories; you can bind-mount other
+    types of files as well.  You can use the syntax
+    <literal><replaceable>target</replaceable>=<replaceable>source</replaceable></literal>
+    to mount a path in a different location in the chroot; for
+    instance, <literal>/bin=/nix-bin</literal> will mount the path
+    <literal>/nix-bin</literal> as <literal>/bin</literal> inside the
+    chroot.</para>
+
+    <para>Depending on how Nix was built, the default value for this option
+    may be empty or provide <filename>/bin/sh</filename> as a
+    bind-mount of <command>bash</command>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-build-extra-chroot-dirs"><term><literal>build-extra-chroot-dirs</literal></term>
+
+    <listitem><para>A list of additional paths appended to
+    <option>build-chroot-dirs</option>. Useful if you want to extend
+    its default value.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>build-use-substitutes</literal></term>
+
+    <listitem><para>If set to <literal>true</literal> (default), Nix
+    will use binary substitutes if available.  This option can be
+    disabled to force building from source.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>build-fallback</literal></term>
+
+    <listitem><para>If set to <literal>true</literal>, Nix will fall
+    back to building from source if a binary substitute fails.  This
+    is equivalent to the <option>--fallback</option> flag.  The
+    default is <literal>false</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>build-cache-failures</literal></term>
+
+    <listitem><para>If set to <literal>true</literal>, Nix will
+    “cache” build failures, meaning that it will remember (in its
+    database) that a derivation previously failed.  If you then try to
+    build the derivation again, Nix will immediately fail rather than
+    perform the build again.  Failures in fixed-output derivations
+    (such as <function>fetchurl</function> calls) are never cached.
+    The “failed” status of a derivation can be cleared using
+    <command>nix-store --clear-failed-paths</command>.  By default,
+    failure caching is disabled.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>build-keep-log</literal></term>
+
+    <listitem><para>If set to <literal>true</literal> (the default),
+    Nix will write the build log of a derivation (i.e. the standard
+    output and error of its builder) to the directory
+    <filename>/nix/var/log/nix/drvs</filename>.  The build log can be
+    retrieved using the command <command>nix-store -l
+    <replaceable>path</replaceable></command>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>build-compress-log</literal></term>
+
+    <listitem><para>If set to <literal>true</literal> (the default),
+    build logs written to <filename>/nix/var/log/nix/drvs</filename>
+    will be compressed on the fly using bzip2.  Otherwise, they will
+    not be compressed.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>use-binary-caches</literal></term>
+
+    <listitem><para>If set to <literal>true</literal> (the default),
+    Nix will check the binary caches specified by
+    <option>binary-caches</option> and related options to obtain
+    binary substitutes.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>binary-caches</literal></term>
+
+    <listitem><para>A list of URLs of binary caches, separated by
+    whitespace.  The default is
+    <literal>http://cache.nixos.org</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>binary-caches-files</literal></term>
+
+    <listitem><para>A list of names of files that will be read to
+    obtain additional binary cache URLs.  The default is
+    <literal>/nix/var/nix/profiles/per-user/<replaceable>username</replaceable>/channels/binary-caches/*</literal>.
+    Note that when you’re using the Nix daemon,
+    <replaceable>username</replaceable> is always equal to
+    <literal>root</literal>, so Nix will only use the binary caches
+    provided by the channels installed by root.  Do not set this
+    option to read files created by untrusted users!</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>trusted-binary-caches</literal></term>
+
+    <listitem><para>A list of URLs of binary caches, separated by
+    whitespace.  These are not used by default, but can be enabled by
+    users of the Nix daemon by specifying <literal>--option
+    binary-caches <replaceable>urls</replaceable></literal> on the
+    command line.  Unprivileged users are only allowed to pass a
+    subset of the URLs listed in <literal>binary-caches</literal> and
+    <literal>trusted-binary-caches</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>extra-binary-caches</literal></term>
+
+    <listitem><para>Additional binary caches appended to those
+    specified in <option>binary-caches</option> and
+    <option>binary-caches-files</option>.  When used by unprivileged
+    users, untrusted binary caches (i.e. those not listed in
+    <option>trusted-binary-caches</option>) are silently
+    ignored.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>binary-caches-parallel-connections</literal></term>
+
+    <listitem><para>The maximum number of parallel HTTP connections
+    used by the binary cache substituter to get NAR info files.  This
+    number should be high to minimise latency.  It defaults to
+    150.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>force-manifest</literal></term>
+
+    <listitem><para>If this option is set to <literal>false</literal>
+    (default) and a Nix channel provides both a manifest and a binary
+    cache, only the binary cache will be used.  If set to
+    <literal>true</literal>, the manifest will be fetched as well.
+    This is useful if you want to use binary patches (which are
+    currently not supported by binary caches).</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>system</literal></term>
+
+    <listitem><para>This option specifies the canonical Nix system
+    name of the current installation, such as
+    <literal>i686-linux</literal> or
+    <literal>powerpc-darwin</literal>.  Nix can only build derivations
+    whose <literal>system</literal> attribute equals the value
+    specified here.  In general, it never makes sense to modify this
+    value from its default, since you can use it to ‘lie’ about the
+    platform you are building on (e.g., perform a Mac OS build on a
+    Linux machine; the result would obviously be wrong).  It only
+    makes sense if the Nix binaries can run on multiple platforms,
+    e.g., ‘universal binaries’ that run on <literal>powerpc-darwin</literal> and
+    <literal>i686-darwin</literal>.</para>
+
+    <para>It defaults to the canonical Nix system name detected by
+    <filename>configure</filename> at build time.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>fsync-metadata</literal></term>
+
+    <listitem><para>If set to <literal>true</literal>, changes to the
+    Nix store metadata (in <filename>/nix/var/nix/db</filename>) are
+    synchronously flushed to disk.  This improves robustness in case
+    of system crashes, but reduces performance.  The default is
+    <literal>true</literal>.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><literal>auto-optimise-store</literal></term>
+
+    <listitem><para>If set to <literal>true</literal>, Nix
+    automatically detects files in the store that have identical
+    contents, and replaces them with hard links to a single copy.
+    This saves disk space.  If set to <literal>false</literal> (the
+    default), you can still run <command>nix-store
+    --optimise</command> to get rid of duplicate
+    files.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-connect-timeout"><term><literal>connect-timeout</literal></term>
+
+    <listitem>
+
+      <para>The timeout (in seconds) for establishing connections in
+      the binary cache substituter.  It corresponds to
+      <command>curl</command>’s <option>--connect-timeout</option>
+      option.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-log-servers"><term><literal>log-servers</literal></term>
+
+    <listitem>
+
+      <para>A list of URL prefixes (such as
+      <literal>http://hydra.nixos.org/log</literal>) from which
+      <command>nix-store -l</command> will try to fetch build logs if
+      they’re not available locally.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term>
+
+    <listitem>
+
+      <para>A list of names of users (separated by whitespace) that
+      have additional rights when connecting to the Nix daemon, such
+      as the ability to specify additional binary caches, or to import
+      unsigned NARs. You can also specify groups by prefixing them
+      with <literal>@</literal>; for instance,
+      <literal>@wheel</literal> means all users in the
+      <literal>wheel</literal> group. The default is
+      <literal>root</literal>.</para>
+
+      <warning><para>The users listed here have the ability to
+      compromise the security of a multi-user Nix store. For instance,
+      they could install Trojan horses subsequently executed by other
+      users. So you should consider carefully whether to add users to
+      this list.</para></warning>
+
+    </listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="conf-allowed-users"><term><literal>allowed-users</literal></term>
+
+    <listitem>
+
+      <para>A list of names of users (separated by whitespace) that
+      are allowed to connect to the Nix daemon. As with the
+      <option>trusted-users</option> option, you can specify groups by
+      prefixing them with <literal>@</literal>. Also, you can allow
+      all users by specifying <literal>*</literal>. The default is
+      <literal>*</literal>.</para>
+
+      <para>Note that trusted users are always allowed to connect.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+
+</variablelist>
+
+</para>
+
+</refsection>
+
+</refentry>
diff --git a/doc/manual/env-common.xml b/doc/manual/env-common.xml
new file mode 100644
index 000000000000..91a3e9e32694
--- /dev/null
+++ b/doc/manual/env-common.xml
@@ -0,0 +1,338 @@
+<section xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id="sec-common-env">
+
+<title>Common environment variables</title>
+
+
+<para>Most Nix commands interpret the following environment variables:</para>
+
+<variablelist xml:id="env-common">
+
+  
+<varlistentry><term><envar>NIX_PATH</envar></term>
+
+  <listitem>
+
+    <para>A colon-separated list of directories used to look up Nix
+    expressions enclosed in angle brackets (i.e.,
+    <literal>&lt;<replaceable>path</replaceable>></literal>).  For
+    instance, the value
+
+    <screen>
+/home/eelco/Dev:/etc/nixos</screen>
+
+    will cause Nix to look for paths relative to
+    <filename>/home/eelco/Dev</filename> and
+    <filename>/etc/nixos</filename>, in that order.  It is also
+    possible to match paths against a prefix.  For example, the value
+    
+    <screen>
+nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos</screen>
+
+    will cause Nix to search for
+    <literal>&lt;nixpkgs/<replaceable>path</replaceable>></literal> in
+    <filename>/home/eelco/Dev/nixpkgs-branch/<replaceable>path</replaceable></filename>
+    and
+    <filename>/etc/nixos/nixpkgs/<replaceable>path</replaceable></filename>.
+    </para>
+
+    <para>The search path can be extended using the
+    <option>-I</option> option, which takes precedence over
+    <envar>NIX_PATH</envar>.</para></listitem>
+
+</varlistentry>
+    
+
+<varlistentry><term><envar>NIX_IGNORE_SYMLINK_STORE</envar></term>
+
+  <listitem>
+
+  <para>Normally, the Nix store directory (typically
+  <filename>/nix/store</filename>) is not allowed to contain any
+  symlink components.  This is to prevent “impure” builds.  Builders
+  sometimes “canonicalise” paths by resolving all symlink components.
+  Thus, builds on different machines (with
+  <filename>/nix/store</filename> resolving to different locations)
+  could yield different results.  This is generally not a problem,
+  except when builds are deployed to machines where
+  <filename>/nix/store</filename> resolves differently.  If you are
+  sure that you’re not going to do that, you can set
+  <envar>NIX_IGNORE_SYMLINK_STORE</envar> to <envar>1</envar>.</para>
+
+  <para>Note that if you’re symlinking the Nix store so that you can
+  put it on another file system than the root file system, on Linux
+  you’re better off using <literal>bind</literal> mount points, e.g.,
+
+  <screen>
+$ mkdir /nix   
+$ mount -o bind /mnt/otherdisk/nix /nix</screen>
+
+  Consult the <citerefentry><refentrytitle>mount</refentrytitle>
+  <manvolnum>8</manvolnum></citerefentry> manual page for details.</para>
+
+  </listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_STORE_DIR</envar></term>
+
+  <listitem><para>Overrides the location of the Nix store (default
+  <filename><replaceable>prefix</replaceable>/store</filename>).</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_DATA_DIR</envar></term>
+
+  <listitem><para>Overrides the location of the Nix static data
+  directory (default
+  <filename><replaceable>prefix</replaceable>/share</filename>).</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_LOG_DIR</envar></term>
+
+  <listitem><para>Overrides the location of the Nix log directory
+  (default <filename><replaceable>prefix</replaceable>/log/nix</filename>).</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_STATE_DIR</envar></term>
+
+  <listitem><para>Overrides the location of the Nix state directory
+  (default <filename><replaceable>prefix</replaceable>/var/nix</filename>).</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_DB_DIR</envar></term>
+
+  <listitem><para>Overrides the location of the Nix database (default
+  <filename><replaceable>$NIX_STATE_DIR</replaceable>/db</filename>, i.e.,
+  <filename><replaceable>prefix</replaceable>/var/nix/db</filename>).</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_CONF_DIR</envar></term>
+
+  <listitem><para>Overrides the location of the Nix configuration
+  directory (default
+  <filename><replaceable>prefix</replaceable>/etc/nix</filename>).</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry><term><envar>TMPDIR</envar></term>
+
+  <listitem><para>Use the specified directory to store temporary
+  files.  In particular, this includes temporary build directories;
+  these can take up substantial amounts of disk space.  The default is
+  <filename>/tmp</filename>.</para></listitem>
+  
+</varlistentry>
+
+
+<varlistentry xml:id="envar-build-hook"><term><envar>NIX_BUILD_HOOK</envar></term>
+
+  <listitem>
+
+  <para>Specifies the location of the <emphasis>build hook</emphasis>,
+  which is a program (typically some script) that Nix will call
+  whenever it wants to build a derivation.  This is used to implement
+  distributed builds<phrase condition="manual"> (see <xref
+  linkend="chap-distributed-builds" />)</phrase>.</para>
+
+  <!--
+  The protocol by
+  which the calling Nix process and the build hook communicate is as
+  follows.
+
+  <para>The build hook is called with the following command-line
+  arguments:
+
+  <orderedlist>
+
+    <listitem><para>A boolean value <literal>0</literal> or
+    <literal>1</literal> specifying whether Nix can locally execute
+    more builds, as per the <link
+    linkend="opt-max-jobs"><option>- -max-jobs</option> option</link>.
+    The purpose of this argument is to allow the hook to not have to
+    maintain bookkeeping for the local machine.</para></listitem>
+
+    <listitem><para>The Nix platform identifier for the local machine
+    (e.g., <literal>i686-linux</literal>).</para></listitem>
+
+    <listitem><para>The Nix platform identifier for the derivation,
+    i.e., its <link linkend="attr-system"><varname>system</varname>
+    attribute</link>.</para></listitem>
+
+    <listitem><para>The store path of the derivation.</para></listitem>
+
+  </orderedlist>
+
+  </para>
+
+  <para>On the basis of this information, and whatever persistent
+  state the build hook keeps about other machines and their current
+  load, it has to decide what to do with the build.  It should print
+  out on standard error one of the following responses (terminated by
+  a newline, <literal>"\n"</literal>):
+
+  <variablelist>
+
+    <varlistentry><term><literal># decline</literal></term>
+
+      <listitem><para>The build hook is not willing or able to perform
+      the build; the calling Nix process should do the build itself,
+      if possible.</para></listitem>
+
+    </varlistentry>
+
+    <varlistentry><term><literal># postpone</literal></term>
+
+      <listitem><para>The build hook cannot perform the build now, but
+      can do so in the future (e.g., because all available build slots
+      on remote machines are in use).  The calling Nix process should
+      postpone this build until at least one currently running build
+      has terminated.</para></listitem>
+
+    </varlistentry>
+
+    <varlistentry><term><literal># accept</literal></term>
+
+      <listitem><para>The build hook has accepted the
+      build.</para></listitem>
+
+    </varlistentry>
+
+  </variablelist>
+
+  </para>
+
+  <para>After sending <literal># accept</literal>, the hook should
+  read one line from standard input, which will be the string
+  <literal>okay</literal>.  It can then proceed with the build.
+  Before sending <literal>okay</literal>, Nix will store in the hook’s
+  current directory a number of text files that contain information
+  about the derivation:
+
+  <variablelist>
+
+    <varlistentry><term><filename>inputs</filename></term>
+
+      <listitem><para>The set of store paths that are inputs to the
+      build process (one per line).  These have to be copied
+      <emphasis>to</emphasis> the remote machine (in addition to the
+      store derivation itself).</para></listitem>
+
+    </varlistentry>
+  
+    <varlistentry><term><filename>outputs</filename></term>
+
+      <listitem><para>The set of store paths that are outputs of the
+      derivation (one per line).  These have to be copied
+      <emphasis>from</emphasis> the remote machine if the build
+      succeeds.</para></listitem>
+
+    </varlistentry>
+
+    <varlistentry><term><filename>references</filename></term>
+
+      <listitem><para>The reference graph of the inputs, in the format
+      accepted by the command <command>nix-store
+      - -register-validity</command>.  It is necessary to run this
+      command on the remote machine after copying the inputs to inform
+      Nix on the remote machine that the inputs are valid
+      paths.</para></listitem>
+
+    </varlistentry>
+
+  </variablelist>
+
+  </para>
+
+  <para>The hook should copy the inputs to the remote machine,
+  register the validity of the inputs, perform the remote build, and
+  copy the outputs back to the local machine.  An exit code other than
+  <literal>0</literal> indicates that the hook has failed.  An exit
+  code equal to 100 means that the remote build failed (as opposed to,
+  e.g., a network error).</para>
+  -->
+
+  </listitem>
+
+
+</varlistentry>
+
+
+<varlistentry xml:id="envar-remote"><term><envar>NIX_REMOTE</envar></term>
+
+  <listitem><para>This variable should be set to
+  <literal>daemon</literal> if you want to use the Nix daemon to
+  execute Nix operations. This is necessary in <link
+  linkend="ssec-multi-user">multi-user Nix installations</link>.
+  Otherwise, it should be left unset.</para></listitem>
+
+</varlistentry>
+
+    
+<varlistentry xml:id="envar-other-stores"><term><envar>NIX_OTHER_STORES</envar></term>
+
+  <listitem><para>This variable contains the paths of remote Nix
+  installations from which packages can be copied, separated by colons.
+  <phrase condition="manual">See <xref linkend="sec-sharing-packages"
+  /> for details.</phrase>  Each path should be the
+  <filename>/nix</filename> directory of a remote Nix installation
+  (i.e., not the <filename>/nix/store</filename> directory).  The
+  paths are subject to globbing, so you can set it so something like
+  <literal>/var/run/nix/remote-stores/*/nix</literal> and mount
+  multiple remote filesystems in
+  <literal>/var/run/nix/remote-stores</literal>.</para>
+
+  <para>Note that if you’re building through the <link
+  linkend="sec-nix-daemon">Nix daemon</link>, the only setting for
+  this variable that matters is the one that the
+  <command>nix-daemon</command> process uses.  So if you want to
+  change it, you have to restart the daemon.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_SHOW_STATS</envar></term>
+
+  <listitem><para>If set to <literal>1</literal>, Nix will print some
+  evaluation statistics, such as the number of values
+  allocated.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><envar>NIX_COUNT_CALLS</envar></term>
+
+  <listitem><para>If set to <literal>1</literal>, Nix will print how
+  often functions were called during Nix expression evaluation.  This
+  is useful for profiling your Nix expressions.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><envar>GC_INITIAL_HEAP_SIZE</envar></term>
+
+  <listitem><para>If Nix has been configured to use the Boehm garbage
+  collector, this variable sets the initial size of the heap in bytes.
+  It defaults to 384 MiB.  Setting it to a low value reduces memory
+  consumption, but will increase runtime due to the overhead of
+  garbage collection.</para></listitem>
+
+</varlistentry>
+
+    
+</variablelist>
+
+
+</section>
diff --git a/doc/manual/figures/user-environments.png b/doc/manual/figures/user-environments.png
new file mode 100644
index 000000000000..1f781cf23ce5
--- /dev/null
+++ b/doc/manual/figures/user-environments.png
Binary files differdiff --git a/doc/manual/figures/user-environments.sxd b/doc/manual/figures/user-environments.sxd
new file mode 100644
index 000000000000..bc661b6406fb
--- /dev/null
+++ b/doc/manual/figures/user-environments.sxd
Binary files differdiff --git a/doc/manual/glossary.xml b/doc/manual/glossary.xml
new file mode 100644
index 000000000000..d74940c90b30
--- /dev/null
+++ b/doc/manual/glossary.xml
@@ -0,0 +1,179 @@
+<appendix xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink">
+
+<title>Glossary</title>
+
+
+<glosslist>
+
+
+<glossentry xml:id="gloss-derivation"><glossterm>derivation</glossterm>
+
+  <glossdef><para>A description of a build action.  The result of a
+  derivation is a store object.  Derivations are typically specified
+  in Nix expressions using the <link
+  linkend="ssec-derivation"><function>derivation</function>
+  primitive</link>.  These are translated into low-level
+  <emphasis>store derivations</emphasis> (implicitly by
+  <command>nix-env</command> and <command>nix-build</command>, or
+  explicitly by <command>nix-instantiate</command>).</para></glossdef>
+
+</glossentry>
+
+
+<glossentry><glossterm>store</glossterm>
+
+  <glossdef><para>The location in the file system where store objects
+  live.  Typically <filename>/nix/store</filename>.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry><glossterm>store path</glossterm>
+
+  <glossdef><para>The location in the file system of a store object,
+  i.e., an immediate child of the Nix store
+  directory.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry><glossterm>store object</glossterm>
+
+  <glossdef><para>A file that is an immediate child of the Nix store
+  directory.  These can be regular files, but also entire directory
+  trees.  Store objects can be sources (objects copied from outside of
+  the store), derivation outputs (objects produced by running a build
+  action), or derivations (files describing a build
+  action).</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-substitute"><glossterm>substitute</glossterm>
+
+  <glossdef><para>A substitute is a command invocation stored in the
+  Nix database that describes how to build a store object, bypassing
+  normal the build mechanism (i.e., derivations).  Typically, the
+  substitute builds the store object by downloading a pre-built
+  version of the store object from some server.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry><glossterm>purity</glossterm>
+
+  <glossdef><para>The assumption that equal Nix derivations when run
+  always produce the same output.  This cannot be guaranteed in
+  general (e.g., a builder can rely on external inputs such as the
+  network or the system time) but the Nix model assumes
+  it.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry><glossterm>Nix expression</glossterm>
+
+  <glossdef><para>A high-level description of software packages and
+  compositions thereof.  Deploying software using Nix entails writing
+  Nix expressions for your packages.  Nix expressions are translated
+  to derivations that are stored in the Nix store.  These derivations
+  can then be built.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-reference"><glossterm>reference</glossterm>
+
+  <glossdef><para>A store path <varname>P</varname> is said to have a
+  reference to a store path <varname>Q</varname> if the store object
+  at <varname>P</varname> contains the path <varname>Q</varname>
+  somewhere.  This implies than an execution involving
+  <varname>P</varname> potentially needs <varname>Q</varname> to be
+  present.  The <emphasis>references</emphasis> of a store path are
+  the set of store paths to which it has a reference.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-closure"><glossterm>closure</glossterm>
+
+  <glossdef><para>The closure of a store path is the set of store
+  paths that are directly or indirectly “reachable” from that store
+  path; that is, it’s the closure of the path under the <link
+  linkend="gloss-reference">references</link> relation.  For instance,
+  if the store object at path <varname>P</varname> contains a
+  reference to path <varname>Q</varname>, then <varname>Q</varname> is
+  in the closure of <varname>P</varname>.  For correct deployment it
+  is necessary to deploy whole closures, since otherwise at runtime
+  files could be missing.  The command <command>nix-store
+  -qR</command> prints out closures of store paths.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-output-path"><glossterm>output path</glossterm>
+
+  <glossdef><para>A store path produced by a derivation.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-deriver"><glossterm>deriver</glossterm>
+
+  <glossdef><para>The deriver of an <link
+  linkend="gloss-output-path">output path</link> is the store
+  derivation that built it.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-validity"><glossterm>validity</glossterm>
+
+  <glossdef><para>A store path is considered
+  <emphasis>valid</emphasis> if it exists in the file system, is
+  listed in the Nix database as being valid, and if all paths in its
+  closure are also valid.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-user-env"><glossterm>user environment</glossterm>
+
+  <glossdef><para>An automatically generated store object that
+  consists of a set of symlinks to “active” applications, i.e., other
+  store paths.  These are generated automatically by <link
+  linkend="sec-nix-env"><command>nix-env</command></link>.  See <xref
+  linkend="sec-profiles" />.</para>
+
+  </glossdef>
+  
+</glossentry>
+
+
+<glossentry xml:id="gloss-profile"><glossterm>profile</glossterm>
+
+  <glossdef><para>A symlink to the current <link
+  linkend="gloss-user-env">user environment</link> of a user, e.g.,
+  <filename>/nix/var/nix/profiles/default</filename>.</para></glossdef>
+
+</glossentry>
+
+
+<glossentry xml:id="gloss-nar"><glossterm>NAR</glossterm>
+
+  <glossdef><para>A <emphasis>N</emphasis>ix
+  <emphasis>AR</emphasis>chive.  This is a serialisation of a path in
+  the Nix store.  It can contain regular files, directories and
+  symbolic links.  NARs are generated and unpacked using
+  <command>nix-store --dump</command> and <command>nix-store
+  --restore</command>.</para></glossdef>
+
+</glossentry>
+
+
+
+</glosslist>
+
+
+</appendix>
diff --git a/doc/manual/hacking.xml b/doc/manual/hacking.xml
new file mode 100644
index 000000000000..11af0998f982
--- /dev/null
+++ b/doc/manual/hacking.xml
@@ -0,0 +1,41 @@
+<appendix xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xml:id="chap-hacking">
+
+<title>Hacking</title>
+
+<para>This section provides some notes on how to hack on Nix.  To get
+the latest version of Nix from GitHub:
+<screen>
+$ git clone git://github.com/NixOS/nix.git
+$ cd nix
+</screen>
+</para>
+
+<para>To build it and its dependencies:
+<screen>
+$ nix-build release.nix -A build.x86_64-linux
+</screen>
+</para>
+
+<para>To build all dependencies and start a shell in which all
+environment variables are set up so that those dependencies can be
+found:
+<screen>
+$ ./dev-shell
+</screen>
+To build Nix itself in this shell:
+<screen>
+[nix-shell]$ ./bootstrap.sh
+[nix-shell]$ configurePhase
+[nix-shell]$ make
+</screen>
+To test it:
+<screen>
+[nix-shell]$ make install
+[nix-shell]$ make installcheck
+</screen>
+
+</para>
+
+</appendix>
diff --git a/doc/manual/images/callouts/1.gif b/doc/manual/images/callouts/1.gif
new file mode 100644
index 000000000000..9e7a87f75461
--- /dev/null
+++ b/doc/manual/images/callouts/1.gif
Binary files differdiff --git a/doc/manual/images/callouts/10.gif b/doc/manual/images/callouts/10.gif
new file mode 100644
index 000000000000..e80f7f8e632e
--- /dev/null
+++ b/doc/manual/images/callouts/10.gif
Binary files differdiff --git a/doc/manual/images/callouts/11.gif b/doc/manual/images/callouts/11.gif
new file mode 100644
index 000000000000..67f91a239d66
--- /dev/null
+++ b/doc/manual/images/callouts/11.gif
Binary files differdiff --git a/doc/manual/images/callouts/12.gif b/doc/manual/images/callouts/12.gif
new file mode 100644
index 000000000000..54c4b42f1901
--- /dev/null
+++ b/doc/manual/images/callouts/12.gif
Binary files differdiff --git a/doc/manual/images/callouts/13.gif b/doc/manual/images/callouts/13.gif
new file mode 100644
index 000000000000..dd5d7d9b6439
--- /dev/null
+++ b/doc/manual/images/callouts/13.gif
Binary files differdiff --git a/doc/manual/images/callouts/14.gif b/doc/manual/images/callouts/14.gif
new file mode 100644
index 000000000000..3d7a952a3105
--- /dev/null
+++ b/doc/manual/images/callouts/14.gif
Binary files differdiff --git a/doc/manual/images/callouts/15.gif b/doc/manual/images/callouts/15.gif
new file mode 100644
index 000000000000..1c9183d5bb61
--- /dev/null
+++ b/doc/manual/images/callouts/15.gif
Binary files differdiff --git a/doc/manual/images/callouts/2.gif b/doc/manual/images/callouts/2.gif
new file mode 100644
index 000000000000..94d42a30f99b
--- /dev/null
+++ b/doc/manual/images/callouts/2.gif
Binary files differdiff --git a/doc/manual/images/callouts/3.gif b/doc/manual/images/callouts/3.gif
new file mode 100644
index 000000000000..dd3541a1bc25
--- /dev/null
+++ b/doc/manual/images/callouts/3.gif
Binary files differdiff --git a/doc/manual/images/callouts/4.gif b/doc/manual/images/callouts/4.gif
new file mode 100644
index 000000000000..4bcbf7e31a17
--- /dev/null
+++ b/doc/manual/images/callouts/4.gif
Binary files differdiff --git a/doc/manual/images/callouts/5.gif b/doc/manual/images/callouts/5.gif
new file mode 100644
index 000000000000..1c62b4f92093
--- /dev/null
+++ b/doc/manual/images/callouts/5.gif
Binary files differdiff --git a/doc/manual/images/callouts/6.gif b/doc/manual/images/callouts/6.gif
new file mode 100644
index 000000000000..23bc5555d2a4
--- /dev/null
+++ b/doc/manual/images/callouts/6.gif
Binary files differdiff --git a/doc/manual/images/callouts/7.gif b/doc/manual/images/callouts/7.gif
new file mode 100644
index 000000000000..e55ce89585a8
--- /dev/null
+++ b/doc/manual/images/callouts/7.gif
Binary files differdiff --git a/doc/manual/images/callouts/8.gif b/doc/manual/images/callouts/8.gif
new file mode 100644
index 000000000000..49375e09f4cc
--- /dev/null
+++ b/doc/manual/images/callouts/8.gif
Binary files differdiff --git a/doc/manual/images/callouts/9.gif b/doc/manual/images/callouts/9.gif
new file mode 100644
index 000000000000..da12a4fe2825
--- /dev/null
+++ b/doc/manual/images/callouts/9.gif
Binary files differdiff --git a/doc/manual/installation.xml b/doc/manual/installation.xml
new file mode 100644
index 000000000000..423bef5e22a8
--- /dev/null
+++ b/doc/manual/installation.xml
@@ -0,0 +1,447 @@
+<?xml version="1.0" encoding="utf-8"?>
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id="chap-installation">
+
+<title>Installation</title>
+
+
+<section><title>Supported platforms</title>
+
+<para>Nix is currently supported on the following platforms:
+
+<itemizedlist>
+
+  <listitem><para>Linux (particularly on x86, x86_64, and
+  PowerPC).</para></listitem>
+
+  <listitem><para>Mac OS X.</para></listitem>
+
+  <listitem><para>FreeBSD (only tested on Intel).</para></listitem>
+
+  <!--
+  <listitem><para>Windows through <link
+  xlink:href="http://www.cygwin.com/">Cygwin</link>.</para>
+
+  <warning><para>On Cygwin, Nix <emphasis>must</emphasis> be installed
+  on an NTFS partition.  It will not work correctly on a FAT
+  partition.</para></warning>
+
+  </listitem>
+  -->
+
+</itemizedlist>
+
+</para>
+
+<para>Nix is pretty portable, so it should work on most other Unix
+platforms as well.</para>
+
+</section>
+
+
+<section><title>Installing a binary distribution</title>
+
+<para>The easiest way to install Nix is to run the following:
+
+<screen>
+$ bash &lt;(curl https://nixos.org/nix/install)
+</screen>
+
+This will perform a single-user installation of Nix, meaning that
+<filename>/nix</filename> is owned by the invoking user.  You should
+run this under your usual user account, <emphasis>not</emphasis> as
+root.  The script will invoke <command>sudo</command> to create
+<filename>/nix</filename> if it doesn’t already exist.  If you don’t
+have <command>sudo</command>, you should manually create
+<command>/nix</command> first as root:
+
+<screen>
+$ mkdir /nix
+$ chown alice /nix
+</screen>
+
+</para>
+
+<para>You can also manually download and install a binary package.
+Binary packages of the latest stable release are available for Fedora,
+Debian, Ubuntu, Mac OS X and various other systems from the <link
+xlink:href="http://nixos.org/nix/download.html">Nix homepage</link>.
+You can also get builds of the latest development release from our
+<link
+xlink:href="http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents">continuous
+build system</link>.</para>
+
+<para>For Fedora, RPM packages are available.  These can be installed
+or upgraded using <command>rpm -U</command>.  For example,
+
+<screen>
+$ rpm -U nix-1.7-1.i386.rpm</screen>
+
+</para>
+
+<para>For Debian and Ubuntu, you can download a Deb package and
+install it like this:
+
+<screen>
+$ dpkg -i nix_1.7-1_amd64.deb</screen>
+
+</para>
+
+<para>For other platforms, including Mac OS X (Darwin), FreeBSD and
+other Linux distributions, you can download a binary tarball that
+contains Nix and all its dependencies.  (This is what the install
+script at <uri>https://nixos.org/nix/install</uri> uses.)  You should
+unpack it somewhere (e.g. in <filename>/tmp</filename>), and then run
+the script named <command>install</command> inside the binary tarball:
+
+<screen>
+alice$ cd /tmp
+alice$ tar xfj nix-1.7-x86_64-darwin.tar.bz2
+alice$ cd nix-1.7-x86_64-darwin
+alice$ ./install
+</screen>
+
+</para>
+
+<para>Nix can be uninstalled using <command>rpm -e nix</command> or
+<command>dpkg -r nix</command> on RPM- and Dpkg-based systems,
+respectively.  After this you should manually remove the Nix store and
+other auxiliary data, if desired:
+
+<screen>
+$ rm -rf /nix</screen>
+
+</para>
+
+</section>
+
+
+<section><title>Installing Nix from source</title>
+
+<para>If no binary package is available, you can download and compile
+a source distribution.</para>
+
+<section><title>Prerequisites</title>
+
+<itemizedlist>
+
+  <listitem><para>GNU Make.</para></listitem>
+
+  <listitem><para>A version of GCC or Clang that supports C++11.</para></listitem>
+
+  <listitem><para>Perl 5.8 or higher.</para></listitem>
+
+  <listitem><para><command>pkg-config</command> to locate
+  dependencies.  If your distribution does not provide it, you can get
+  it from <link
+  xlink:href="http://www.freedesktop.org/wiki/Software/pkg-config"
+  />.</para></listitem>
+
+  <listitem><para>The bzip2 compressor program and the
+  <literal>libbz2</literal> library.  Thus you must have bzip2
+  installed, including development headers and libraries.  If your
+  distribution does not provide these, you can obtain bzip2 from <link
+  xlink:href="http://www.bzip.org/"/>.</para></listitem>
+
+  <listitem><para>The SQLite embedded database library, version 3.6.19
+  or higher.  If your distribution does not provide it, please install
+  it from <link xlink:href="http://www.sqlite.org/" />.</para></listitem>
+
+  <listitem><para>The Perl DBI and DBD::SQLite libraries, which are
+  available from <link
+  xlink:href="http://search.cpan.org/">CPAN</link> if your
+  distribution does not provide them.</para></listitem>
+
+  <listitem><para>The <link
+  xlink:href="http://www.hpl.hp.com/personal/Hans_Boehm/gc/">Boehm
+  garbage collector</link> to reduce the evaluator’s memory
+  consumption (optional).  To enable it, install
+  <literal>pkgconfig</literal> and the Boehm garbage collector, and
+  pass the flag <option>--enable-gc</option> to
+  <command>configure</command>.</para></listitem>
+
+  <listitem><para>The <command>xmllint</command> and
+  <command>xsltproc</command> programs to build this manual and the
+  man-pages.  These are part of the <literal>libxml2</literal> and
+  <literal>libxslt</literal> packages, respectively.  You also need
+  the <link
+  xlink:href="http://docbook.sourceforge.net/projects/xsl/">DocBook
+  XSL stylesheets</link> and optionally the <link
+  xlink:href="http://www.docbook.org/schemas/5x"> DocBook 5.0 RELAX NG
+  schemas</link>.  Note that these are only required if you modify the
+  manual sources or when you are building from the Git
+  repository.</para></listitem>
+
+  <listitem><para>Recent versions of Bison and Flex to build the
+  parser.  (This is because Nix needs GLR support in Bison and
+  reentrancy support in Flex.)  For Bison, you need version 2.6, which
+  can be obtained from the <link
+  xlink:href="ftp://alpha.gnu.org/pub/gnu/bison">GNU FTP
+  server</link>.  For Flex, you need version 2.5.35, which is
+  available on <link
+  xlink:href="http://lex.sourceforge.net/">SourceForge</link>.
+  Slightly older versions may also work, but ancient versions like the
+  ubiquitous 2.5.4a won't.  Note that these are only required if you
+  modify the parser or when you are building from the Git
+  repository.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<section><title>Obtaining a source distribution</title>
+
+<para>The source tarball of the most recent stable release can be
+downloaded from the <link
+xlink:href="http://nixos.org/nix/download.html">Nix homepage</link>.
+You can also grab the <link
+xlink:href="http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents">most
+recent development release</link>.</para>
+
+<para>Alternatively, the most recent sources of Nix can be obtained
+from its <link
+xlink:href="https://github.com/NixOS/nix">Git
+repository</link>.  For example, the following command will check out
+the latest revision into a directory called
+<filename>nix</filename>:</para>
+
+<screen>
+$ git clone https://github.com/NixOS/nix</screen>
+
+<para>Likewise, specific releases can be obtained from the <link
+xlink:href="https://github.com/NixOS/nix/tags">tags</link> of the
+repository.</para>
+
+</section>
+
+
+<section><title>Building Nix from source</title>
+
+<para>After unpacking or checking out the Nix sources, issue the
+following commands:
+
+<screen>
+$ ./configure <replaceable>options...</replaceable>
+$ make
+$ make install</screen>
+
+Nix requires GNU Make so you may need to invoke
+<command>gmake</command> instead.</para>
+
+<para>When building from the Git repository, these should be preceded
+by the command:
+
+<screen>
+$ ./bootstrap.sh</screen>
+
+</para>
+
+<para>The installation path can be specified by passing the
+<option>--prefix=<replaceable>prefix</replaceable></option> to
+<command>configure</command>.  The default installation directory is
+<filename>/usr/local</filename>.  You can change this to any location
+you like.  You must have write permission to the
+<replaceable>prefix</replaceable> path.</para>
+
+<para>Nix keeps its <emphasis>store</emphasis> (the place where
+packages are stored) in <filename>/nix/store</filename> by default.
+This can be changed using
+<option>--with-store-dir=<replaceable>path</replaceable></option>.</para>
+
+<warning><para>It is best <emphasis>not</emphasis> to change the Nix
+store from its default, since doing so makes it impossible to use
+pre-built binaries from the standard Nixpkgs channels — that is, all
+packages will need to be built from source.</para></warning>
+
+<para>Nix keeps state (such as its database and log files) in
+<filename>/nix/var</filename> by default.  This can be changed using
+<option>--localstatedir=<replaceable>path</replaceable></option>.</para>
+
+<para>If you want to rebuild the documentation, pass the full path to
+the DocBook RELAX NG schemas and to the DocBook XSL stylesheets using
+the
+<option>--with-docbook-rng=<replaceable>path</replaceable></option>
+and
+<option>--with-docbook-xsl=<replaceable>path</replaceable></option>
+options.</para>
+
+</section>
+
+
+</section>
+
+
+<!-- TODO: should be updated
+<section><title>Upgrading Nix through Nix</title>
+
+<para>You can install the latest stable version of Nix through Nix
+itself by subscribing to the channel <link
+xlink:href="http://nixos.org/releases/nix/channels/nix-stable" />,
+or the latest unstable version by subscribing to the channel <link
+xlink:href="http://nixos.org/releases/nix/channels/nix-unstable" />.
+You can also do a <link linkend="sec-one-click">one-click
+installation</link> by clicking on the package links at <link
+xlink:href="http://nixos.org/releases/full-index-nix.html" />.</para>
+
+</section>
+-->
+
+
+<section><title>Security</title>
+
+<para>Nix has two basic security models.  First, it can be used in
+“single-user mode”, which is similar to what most other package
+management tools do: there is a single user (typically <systemitem
+class="username">root</systemitem>) who performs all package
+management operations.  All other users can then use the installed
+packages, but they cannot perform package management operations
+themselves.</para>
+
+<para>Alternatively, you can configure Nix in “multi-user mode”.  In
+this model, all users can perform package management operations — for
+instance, every user can install software without requiring root
+privileges.  Nix ensures that this is secure.  For instance, it’s not
+possible for one user to overwrite a package used by another user with
+a Trojan horse.</para>
+
+
+<section><title>Single-user mode</title>
+
+<para>In single-user mode, all Nix operations that access the database
+in <filename><replaceable>prefix</replaceable>/var/nix/db</filename>
+or modify the Nix store in
+<filename><replaceable>prefix</replaceable>/store</filename> must be
+performed under the user ID that owns those directories.  This is
+typically <systemitem class="username">root</systemitem>.  (If you
+install from RPM packages, that’s in fact the default ownership.)
+However, on single-user machines, it is often convenient to
+<command>chown</command> those directories to your normal user account
+so that you don’t have to <command>su</command> to <systemitem
+class="username">root</systemitem> all the time.</para>
+
+</section>
+
+
+<section xml:id="ssec-multi-user"><title>Multi-user mode</title>
+
+<para>To allow a Nix store to be shared safely among multiple users,
+it is important that users are not able to run builders that modify
+the Nix store or database in arbitrary ways, or that interfere with
+builds started by other users.  If they could do so, they could
+install a Trojan horse in some package and compromise the accounts of
+other users.</para>
+
+<para>To prevent this, the Nix store and database are owned by some
+privileged user (usually <literal>root</literal>) and builders are
+executed under special user accounts (usually named
+<literal>nixbld1</literal>, <literal>nixbld2</literal>, etc.).  When a
+unprivileged user runs a Nix command, actions that operate on the Nix
+store (such as builds) are forwarded to a <emphasis>Nix
+daemon</emphasis> running under the owner of the Nix store/database
+that performs the operation.</para>
+
+<note><para>Multi-user mode has one important limitation: only
+<systemitem class="username">root</systemitem> can run <command
+linkend="sec-nix-pull">nix-pull</command> to register the availability
+of pre-built binaries.  However, those registrations are shared by all
+users, so they still get the benefit from <command>nix-pull</command>s
+done by <systemitem class="username">root</systemitem>.</para></note>
+
+
+<section><title>Setting up the build users</title>
+
+<para>The <emphasis>build users</emphasis> are the special UIDs under
+which builds are performed.  They should all be members of the
+<emphasis>build users group</emphasis> <literal>nixbld</literal>.
+This group should have no other members.  The build users should not
+be members of any other group. On Linux, you can create the group and
+users as follows:
+
+<screen>
+$ groupadd -r nixbld
+$ for n in $(seq 1 10); do useradd -c "Nix build user $n" \
+    -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" \
+    nixbld$n; done
+</screen>
+
+This creates 10 build users. There can never be more concurrent builds
+than the number of build users, so you may want to increase this if
+you expect to do many builds at the same time.</para>
+
+</section>
+
+
+<section><title>Running the daemon</title>
+
+<para>The <link linkend="sec-nix-daemon">Nix daemon</link> should be
+started as follows (as <literal>root</literal>):
+
+<screen>
+$ nix-daemon</screen>
+
+You’ll want to put that line somewhere in your system’s boot
+scripts.</para>
+
+<para>To let unprivileged users use the daemon, they should set the
+<link linkend="envar-remote"><envar>NIX_REMOTE</envar> environment
+variable</link> to <literal>daemon</literal>.  So you should put a
+line like
+
+<programlisting>
+export NIX_REMOTE=daemon</programlisting>
+
+into the users’ login scripts.</para>
+
+</section>
+
+
+<section><title>Restricting access</title>
+
+<para>To limit which users can perform Nix operations, you can use the
+permissions on the directory
+<filename>/nix/var/nix/daemon-socket</filename>.  For instance, if you
+want to restrict the use of Nix to the members of a group called
+<literal>nix-users</literal>, do
+
+<screen>
+$ chgrp nix-users /nix/var/nix/daemon-socket
+$ chmod ug=rwx,o= /nix/var/nix/daemon-socket
+</screen>
+
+This way, users who are not in the <literal>nix-users</literal> group
+cannot connect to the Unix domain socket
+<filename>/nix/var/nix/daemon-socket/socket</filename>, so they cannot
+perform Nix operations.</para>
+
+</section>
+
+
+</section> <!-- end of multi-user -->
+
+
+</section> <!-- end of security -->
+
+
+<section><title>Using Nix</title>
+
+<para>To use Nix, some environment variables should be set.  In
+particular, <envar>PATH</envar> should contain the directories
+<filename><replaceable>prefix</replaceable>/bin</filename> and
+<filename>~/.nix-profile/bin</filename>.  The first directory contains
+the Nix tools themselves, while <filename>~/.nix-profile</filename> is
+a symbolic link to the current <emphasis>user environment</emphasis>
+(an automatically generated package consisting of symlinks to
+installed packages).  The simplest way to set the required environment
+variables is to include the file
+<filename><replaceable>prefix</replaceable>/etc/profile.d/nix.sh</filename>
+in your <filename>~/.profile</filename> (or similar), like this:</para>
+
+<screen>
+source <replaceable>prefix</replaceable>/etc/profile.d/nix.sh</screen>
+
+</section>
+
+
+</chapter>
diff --git a/doc/manual/introduction.xml b/doc/manual/introduction.xml
new file mode 100644
index 000000000000..e0300dc86cb9
--- /dev/null
+++ b/doc/manual/introduction.xml
@@ -0,0 +1,273 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id="chap-introduction">
+
+<title>Introduction</title>
+
+
+<section><title>About Nix</title>
+
+<para>Nix is a <emphasis>purely functional package manager</emphasis>.
+This means that it treats packages like values in purely functional
+programming languages such as Haskell — they are built by functions
+that don’t have side-effects, and they never change after they have
+been built.  Nix stores packages in the <emphasis>Nix
+store</emphasis>, usually the directory
+<filename>/nix/store</filename>, where each package has its own unique
+subdirectory such as
+
+<programlisting>
+/nix/store/nlc4z5y1hm8w9s8vm6m1f5hy962xjmp5-firefox-12.0
+</programlisting>
+
+where <literal>nlc4z5…</literal> is a unique identifier for the
+package that captures all its dependencies (it’s a cryptographic hash
+of the package’s build dependency graph).  This enables many powerful
+features.</para>
+
+
+<simplesect><title>Multiple versions</title>
+
+<para>You can have multiple versions or variants of a package
+installed at the same time.  This is especially important when
+different applications have dependencies on different versions of the
+same package — it prevents the “DLL hell”.  Because of the hashing
+scheme, different versions of a package end up in different paths in
+the Nix store, so they don’t interfere with each other.</para>
+
+<para>An important consequence is that operations like upgrading or
+uninstalling an application cannot break other applications, since
+these operations never “destructively” update or delete files that are
+used by other packages.</para>
+
+</simplesect>
+
+
+<simplesect><title>Complete dependencies</title>
+
+<para>Nix helps you make sure that package dependency specifications
+are complete.  In general, when you’re making a package for a package
+management system like RPM, you have to specify for each package what
+its dependencies are, but there are no guarantees that this
+specification is complete.  If you forget a dependency, then the
+package will build and work correctly on <emphasis>your</emphasis>
+machine if you have the dependency installed, but not on the end
+user's machine if it's not there.</para>
+
+<para>Since Nix on the other hand doesn’t install packages in “global”
+locations like <filename>/usr/bin</filename> but in package-specific
+directories, the risk of incomplete dependencies is greatly reduced.
+This is because tools such as compilers don’t search in per-packages
+directories such as
+<filename>/nix/store/5lbfaxb722zp…-openssl-0.9.8d/include</filename>,
+so if a package builds correctly on your system, this is because you
+specified the dependency explicitly.</para>
+
+<para>Runtime dependencies are found by scanning binaries for the hash
+parts of Nix store paths (such as <literal>r8vvq9kq…</literal>).  This
+sounds risky, but it works extremely well.</para>
+
+</simplesect>
+
+
+<simplesect><title>Multi-user support</title>
+
+<para>Nix has multi-user support.  This means that non-privileged
+users can securely install software.  Each user can have a different
+<emphasis>profile</emphasis>, a set of packages in the Nix store that
+appear in the user’s <envar>PATH</envar>.  If a user installs a
+package that another user has already installed previously, the
+package won’t be built or downloaded a second time.  At the same time,
+it is not possible for one user to inject a Trojan horse into a
+package that might be used by another user.</para>
+
+<!--
+<para>More details can be found in Section 3 of our <a
+href="docs/papers.html#securesharing">ASE 2005 paper</a>.</para>
+-->
+
+</simplesect>
+
+
+<simplesect><title>Atomic upgrades and rollbacks</title>
+
+<para>Since package management operations never overwrite packages in
+the Nix store but just add new versions in different paths, they are
+<emphasis>atomic</emphasis>.  So during a package upgrade, there is no
+time window in which the package has some files from the old version
+and some files from the new version — which would be bad because a
+program might well crash if it’s started during that period.</para>
+
+<para>And since package aren’t overwritten, the old versions are still
+there after an upgrade.  This means that you can <emphasis>roll
+back</emphasis> to the old version:</para>
+
+<screen>
+$ nix-env --upgrade <replaceable>some-packages</replaceable>
+$ nix-env --rollback
+</screen>
+
+</simplesect>
+
+
+<simplesect><title>Garbage collection</title>
+
+<para>When you uninstall a package like this…
+
+<screen>
+$ nix-env --uninstall firefox
+</screen>
+
+the package isn’t deleted from the system right away (after all, you
+might want to do a rollback, or it might be in the profiles of other
+users).  Instead, unused packages can be deleted safely by running the
+<emphasis>garbage collector</emphasis>:
+
+<screen>
+$ nix-collect-garbage
+</screen>
+
+This deletes all packages that aren’t in use by any user profile or by
+a currently running program.</para>
+
+</simplesect>
+
+
+<simplesect><title>Functional package language</title>
+
+<para>Packages are built from <emphasis>Nix expressions</emphasis>,
+which is a simple functional language.  A Nix expression describes
+everything that goes into a package build action (a “derivation”):
+other packages, sources, the build script, environment variables for
+the build script, etc.  Nix tries very hard to ensure that Nix
+expressions are <emphasis>deterministic</emphasis>: building a Nix
+expression twice should yield the same result.</para>
+
+<para>Because it’s a functional language, it’s easy to support
+building variants of a package: turn the Nix expression into a
+function and call it any number of times with the appropriate
+arguments.  Due to the hashing scheme, variants don’t conflict with
+each other in the Nix store.</para>
+
+</simplesect>
+
+
+<simplesect><title>Transparent source/binary deployment</title>
+
+<para>Nix expressions generally describe how to build a package from
+source, so an installation action like
+
+<screen>
+$ nix-env --install firefox
+</screen>
+
+<emphasis>could</emphasis> cause quite a bit of build activity, as not
+only Firefox but also all its dependencies (all the way up to the C
+library and the compiler) would have to built, at least if they are
+not already in the Nix store.  This is a <emphasis>source deployment
+model</emphasis>.  For most users, building from source is not very
+pleasant as it takes far too long.  However, Nix can automatically
+skip building from source and download a pre-built binary instead if
+it knows about it.  <emphasis>Nix channels</emphasis> provide Nix
+expressions along with pre-built binaries.</para>
+
+<!--
+<para>source deployment model (like <a
+href="http://www.gentoo.org/">Gentoo</a>) and a binary model (like
+RPM)</para>
+-->
+
+</simplesect>
+
+
+<simplesect><title>Binary patching</title>
+
+<para>In addition to downloading binaries automatically if they’re
+available, Nix can download binary deltas that patch an existing
+package in the Nix store into a new version.  This speeds up
+upgrades.</para>
+
+</simplesect>
+
+
+<simplesect><title>Nix Packages collection</title>
+
+<para>We provide a large set of Nix expressions containing hundreds of
+existing Unix packages, the <emphasis>Nix Packages
+collection</emphasis> (Nixpkgs).</para>
+
+</simplesect>
+
+
+<simplesect><title>Service deployment</title>
+
+<para>Nix can be used not only for rolling out packages, but also
+complete <emphasis>configurations</emphasis> of services.  This is
+done by treating all the static bits of a service (such as software
+packages, configuration files, control scripts, static web pages,
+etc.) as “packages” that can be built by Nix expressions.  As a
+result, all the features above apply to services as well: for
+instance, you can roll back a web server configuration if a
+configuration change turns out to be undesirable, you can easily have
+multiple instances of a service (e.g., a test and production server),
+and because the whole service is built in a purely functional way from
+a Nix expression, it is repeatable so you can easily reproduce the
+service on another machine.</para>
+
+<!--
+<para>You can read more about this in our <a
+href="docs/papers.html#servicecm">SCM-12 paper</a>.</para>
+-->
+
+</simplesect>
+
+
+<simplesect><title>Portability</title>
+
+<para>Nix should run on most Unix systems, including Linux, FreeBSD and
+Mac OS X.<!-- It is also supported on Windows using Cygwin.--></para>
+
+</simplesect>
+
+
+<simplesect><title>NixOS</title>
+
+<para>NixOS is a Linux distribution based on Nix.  It uses Nix not
+just for package management but also to manage the system
+configuration (e.g., to build configuration files in
+<filename>/etc</filename>).  This means, among other things, that it’s
+possible to easily roll back the entire configuration of the system to
+an earlier state.  Also, users can install software without root
+privileges.  For more information and downloads, see the <link
+xlink:href="http://nixos.org/">NixOS homepage</link>.</para>
+
+</simplesect>
+
+
+<!-- other features:
+
+- build farms
+- reproducibility (Nix expressions allows whole configuration to be rebuilt)
+
+-->
+
+</section>
+
+
+<section><title>License</title>
+
+<para>Nix is free software; you can redistribute it and/or modify it
+under the terms of the <link
+xlink:href="http://www.gnu.org/licenses/lgpl.html">GNU Lesser General
+Public License</link> as published by the <link
+xlink:href="http://www.fsf.org/">Free Software Foundation</link>;
+either version 2.1 of the License, or (at your option) any later
+version.  Nix is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+Lesser General Public License for more details.</para>
+
+</section>
+
+
+</chapter>
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
new file mode 100644
index 000000000000..1c5a3a2ffff1
--- /dev/null
+++ b/doc/manual/local.mk
@@ -0,0 +1,113 @@
+XSLTPROC = $(xsltproc) --nonet $(xmlflags) \
+  --param section.autolabel 1 \
+  --param section.label.includes.component.label 1 \
+  --param html.stylesheet \'style.css\' \
+  --param xref.with.number.and.title 1 \
+  --param toc.section.depth 3 \
+  --param admon.style \'\' \
+  --param callout.graphics.extension \'.gif\' \
+  --param contrib.inline.enabled 0
+
+MANUAL_SRCS := $(wildcard $(d)/*.xml)
+
+
+# Do XInclude processing / RelaxNG validation
+$(d)/manual.xmli: $(d)/manual.xml $(MANUAL_SRCS) $(d)/version.txt
+	$(trace-gen) $(xmllint) --nonet --xinclude $< -o $@.tmp
+	@mv $@.tmp $@
+
+$(d)/version.txt:
+	$(trace-gen) echo -n $(PACKAGE_VERSION) > $@
+
+# Note: RelaxNG validation requires xmllint >= 2.7.4.
+$(d)/manual.is-valid: $(d)/manual.xmli
+	$(trace-gen) $(XSLTPROC) --novalid --stringparam profile.condition manual \
+	  $(docbookxsl)/profiling/profile.xsl $< 2> /dev/null | \
+	  $(xmllint) --nonet --noout --relaxng $(docbookrng)/docbook.rng -
+	@touch $@
+
+clean-files += $(d)/manual.xmli $(d)/version.txt $(d)/manual.is-valid
+
+dist-files += $(d)/manual.xmli $(d)/version.txt $(d)/manual.is-valid
+
+
+# Generate man pages.
+man-pages := $(foreach n, \
+  nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
+  nix-collect-garbage.1 nix-push.1 nix-pull.1 \
+  nix-prefetch-url.1 nix-channel.1 \
+  nix-install-package.1 nix-hash.1 nix-copy-closure.1 \
+  nix.conf.5 nix-daemon.8, \
+  $(d)/$(n))
+
+$(man-pages): $(d)/manual.xmli $(d)/manual.is-valid
+	$(trace-gen) $(XSLTPROC) --stringparam profile.condition manpage \
+	  $(docbookxsl)/profiling/profile.xsl $< 2> /dev/null | \
+	  (cd doc/manual && $(XSLTPROC) $(docbookxsl)/manpages/docbook.xsl -)
+
+clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8
+
+dist-files += $(man-pages)
+
+
+# Generate the HTML manual.
+$(d)/manual.html: $(d)/manual.xml $(MANUAL_SRCS) $(d)/manual.is-valid
+	$(trace-gen) $(XSLTPROC) --xinclude --stringparam profile.condition manual \
+	  $(docbookxsl)/profiling/profile.xsl $< | \
+	  $(XSLTPROC) --output $@ $(docbookxsl)/xhtml/docbook.xsl -
+
+$(foreach file, $(d)/manual.html $(d)/style.css, $(eval $(call install-data-in, $(file), $(docdir)/manual)))
+
+$(foreach file, $(wildcard $(d)/figures/*.png), $(eval $(call install-data-in, $(file), $(docdir)/manual/figures)))
+
+$(foreach file, $(wildcard $(d)/images/callouts/*.gif), $(eval $(call install-data-in, $(file), $(docdir)/manual/images/callouts)))
+
+$(eval $(call install-symlink, manual.html, $(docdir)/manual/index.html))
+
+all: $(d)/manual.html
+
+clean-files += $(d)/manual.html
+
+dist-files += $(d)/manual.html
+
+
+# Generate the PDF manual.
+$(d)/manual.pdf: $(d)/manual.xml $(MANUAL_SRCS) $(d)/manual.is-valid
+	$(trace-gen) if test "$(dblatex)" != ""; then \
+		cd doc/manual && $(XSLTPROC) --xinclude --stringparam profile.condition manual \
+		  $(docbookxsl)/profiling/profile.xsl manual.xml | \
+		  $(dblatex) -o $(notdir $@) $(dblatex_opts) -; \
+	else \
+		echo "Please install dblatex and rerun configure."; \
+		exit 1; \
+	fi
+
+clean-files += $(d)/manual.pdf
+
+
+# Generate the release notes.
+
+NEWS_OPTS = \
+ --stringparam generate.toc "article nop" \
+ --stringparam section.autolabel.max.depth 0 \
+ --stringparam header.rule 0
+
+$(d)/release-notes.html: $(d)/release-notes.xml
+	$(trace-gen) $(XSLTPROC) --xinclude --output $@ $(NEWS_OPTS) \
+	  $(docbookxsl)/xhtml/docbook.xsl $<
+
+NEWS: $(d)/release-notes.xml
+	$(trace-gen) $(XSLTPROC) --xinclude doc/manual/quote-literals.xsl $< | \
+	  $(XSLTPROC) --output $@.tmp.html $(NEWS_OPTS) \
+	  $(docbookxsl)/xhtml/docbook.xsl - && \
+	LANG=en_US.UTF-8 $(w3m) -dump $@.tmp.html > $@.tmp && \
+	sed -e 's/●/*/g' -e 's/○/-/g' -e 's/━/-/g' < $@.tmp > NEWS && \
+	rm $@.tmp $@.tmp.html
+
+dist-files += NEWS $(d)/release-notes.html
+
+clean-files += NEWS $(d)/release-notes.html
+
+all: $(d)/release-notes.html NEWS
+
+$(foreach file, $(d)/release-notes.html, $(eval $(call install-data-in, $(file), $(docdir)/manual)))
diff --git a/doc/manual/manual.xml b/doc/manual/manual.xml
new file mode 100644
index 000000000000..6593d13987ab
--- /dev/null
+++ b/doc/manual/manual.xml
@@ -0,0 +1,84 @@
+<book xmlns="http://docbook.org/ns/docbook"
+      xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <info>
+
+    <title>Nix User's Guide</title>
+
+    <edition>Version <xi:include href="version.txt" parse="text" /></edition>
+
+    <author>
+      <personname>
+        <firstname>Eelco</firstname>
+        <surname>Dolstra</surname>
+      </personname>
+      <affiliation>
+        <orgname>LogicBlox</orgname>
+      </affiliation>
+      <contrib>Author</contrib>
+    </author>
+
+    <copyright>
+      <year>2004-2014</year>
+      <holder>Eelco Dolstra</holder>
+    </copyright>
+
+    <date>April 2014</date>
+
+  </info>
+
+
+  <xi:include href="introduction.xml" />
+  <xi:include href="quick-start.xml" />
+  <xi:include href="installation.xml" />
+  <xi:include href="package-management.xml" />
+  <xi:include href="writing-nix-expressions.xml" />
+  <xi:include href="build-farm.xml" />
+
+
+  <appendix>
+    <title>Command Reference</title>
+    <xi:include href="opt-common.xml" />
+    <xi:include href="env-common.xml" />
+
+    <section>
+      <title>Main commands</title>
+      <xi:include href="nix-env.xml" />
+      <xi:include href="nix-build.xml" />
+      <xi:include href="nix-shell.xml" />
+      <xi:include href="nix-store.xml" />
+    </section>
+
+    <section>
+      <title>Utilities</title>
+      <xi:include href="nix-channel.xml" />
+      <xi:include href="nix-collect-garbage.xml" />
+      <xi:include href="nix-copy-closure.xml" />
+      <xi:include href="nix-daemon.xml" />
+      <xi:include href="nix-hash.xml" />
+      <xi:include href="nix-install-package.xml" />
+      <xi:include href="nix-instantiate.xml" />
+      <xi:include href="nix-prefetch-url.xml" />
+      <xi:include href="nix-pull.xml" />
+      <xi:include href="nix-push.xml" />
+    </section>
+
+    <section>
+      <title>Files</title>
+      <xi:include href="conf-file.xml" />
+    </section>
+
+  </appendix>
+
+  <xi:include href="troubleshooting.xml" />
+  <!-- <xi:include href="bugs.xml" /> -->
+  <xi:include href="glossary.xml" />
+  <xi:include href="hacking.xml" />
+
+  <appendix>
+    <title>Nix Release Notes</title>
+    <xi:include href="release-notes.xml"
+                xpointer="xmlns(x=http://docbook.org/ns/docbook)xpointer(x:article/x:section)" />
+  </appendix>
+
+</book>
diff --git a/doc/manual/nix-build.xml b/doc/manual/nix-build.xml
new file mode 100644
index 000000000000..3832f5fc3450
--- /dev/null
+++ b/doc/manual/nix-build.xml
@@ -0,0 +1,189 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-build">
+
+<refmeta>
+  <refentrytitle>nix-build</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-build</refname>
+  <refpurpose>build a Nix expression</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-build</command>
+    <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="opt-common-syn.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(/db:nop/*)" />
+    <arg><option>--arg</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg><option>--argstr</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--attr</option></arg>
+        <arg choice='plain'><option>-A</option></arg>
+      </group>
+      <replaceable>attrPath</replaceable>
+    </arg>
+    <arg><option>--drv-link</option> <replaceable>drvlink</replaceable></arg>
+    <arg><option>--add-drv-link</option></arg>
+    <arg><option>--no-out-link</option></arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--out-link</option></arg>
+        <arg choice='plain'><option>-o</option></arg>
+      </group>
+      <replaceable>outlink</replaceable>
+    </arg>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsection><title>Description</title>
+
+<para>The <command>nix-build</command> command builds the derivations
+described by the Nix expressions in <replaceable>paths</replaceable>.
+If the build succeeds, it places a symlink to the result in the
+current directory.  The symlink is called <filename>result</filename>.
+If there are multiple Nix expressions, or the Nix expressions evaluate
+to multiple derivations, multiple sequentially numbered symlinks are
+created (<filename>result</filename>, <filename>result-2</filename>,
+and so on).</para>
+
+<para>If no <replaceable>paths</replaceable> are specified, then
+<command>nix-build</command> will use <filename>default.nix</filename>
+in the current directory, if it exists.</para>
+
+<para><command>nix-build</command> is essentially a wrapper around
+<link
+linkend="sec-nix-instantiate"><command>nix-instantiate</command></link>
+(to translate a high-level Nix expression to a low-level store
+derivation) and <link
+linkend="rsec-nix-store-realise"><command>nix-store
+--realise</command></link> (to build the store derivation).</para>
+
+<warning><para>The result of the build is automatically registered as
+a root of the Nix garbage collector.  This root disappears
+automatically when the <filename>result</filename> symlink is deleted
+or renamed.  So don’t rename the symlink.</para></warning>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<para>All options not listed here are passed to <command>nix-store
+--realise</command>, except for <option>--arg</option> and
+<option>--attr</option> / <option>-A</option> which are passed to
+<command>nix-instantiate</command>.  <phrase condition="manual">See
+also <xref linkend="sec-common-options" />.</phrase></para>
+
+<variablelist>
+
+  <varlistentry><term><option>--drv-link</option> <replaceable>drvlink</replaceable></term>
+
+    <listitem><para>Add a symlink named
+    <replaceable>drvlink</replaceable> to the store derivation
+    produced by <command>nix-instantiate</command>.  The derivation is
+    a root of the garbage collector until the symlink is deleted or
+    renamed.  If there are multiple derivations, numbers are suffixed
+    to <replaceable>drvlink</replaceable> to distinguish between
+    them.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--add-drv-link</option></term>
+
+    <listitem><para>Shorthand for <option>--drv-link</option>
+    <filename>./derivation</filename>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--no-out-link</option></term>
+
+    <listitem><para>Do not create a symlink to the output path.  Note
+    that as a result the output does not become a root of the garbage
+    collector, and so might be deleted by <command>nix-store
+    --gc</command>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry xml:id='opt-out-link'><term><option>--out-link</option> /
+  <option>-o</option> <replaceable>outlink</replaceable></term>
+
+    <listitem><para>Change the name of the symlink to the output path
+    created from <filename>result</filename> to
+    <replaceable>outlink</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<para>The following common options are supported:</para>
+
+<variablelist condition="manpage">
+  <xi:include href="opt-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='opt-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-build '&lt;nixpkgs>' -A firefox
+store derivation is /nix/store/qybprl8sz2lc...-firefox-1.5.0.7.drv
+/nix/store/d18hyl92g30l...-firefox-1.5.0.7
+
+$ ls -l result
+lrwxrwxrwx  <replaceable>...</replaceable>  result -> /nix/store/d18hyl92g30l...-firefox-1.5.0.7
+
+$ ls ./result/bin/
+firefox  firefox-config</screen>
+
+<para>If a derivation has multiple outputs,
+<command>nix-build</command> will build the default (first) output.
+You can also build all outputs:
+<screen>
+$ nix-build '&lt;nixpkgs>' -A openssl.all
+</screen>
+This will create a symlink for each output named
+<filename>result-<replaceable>outputname</replaceable></filename>.
+The suffix is omitted if the output name is <literal>out</literal>.
+So if <literal>openssl</literal> has outputs <literal>out</literal>,
+<literal>bin</literal> and <literal>man</literal>,
+<command>nix-build</command> will create symlinks
+<literal>result</literal>, <literal>result-bin</literal> and
+<literal>result-man</literal>.  It’s also possible to build a specific
+output:
+<screen>
+$ nix-build '&lt;nixpkgs>' -A openssl.man
+</screen>
+This will create a symlink <literal>result-man</literal>.</para>
+
+<para>Build a Nix expression given on the command line:
+
+<screen>
+$ nix-build -E 'with import &lt;nixpkgs> { }; runCommand "foo" { } "echo bar > $out"'
+$ cat ./result
+bar
+</screen>
+
+</para>
+
+</refsection>
+
+
+<refsection condition="manpage"><title>Environment variables</title>
+
+<variablelist>
+  <xi:include href="env-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='env-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-channel.xml b/doc/manual/nix-channel.xml
new file mode 100644
index 000000000000..2c4e1151b0d4
--- /dev/null
+++ b/doc/manual/nix-channel.xml
@@ -0,0 +1,112 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-channel">
+  
+<refmeta>
+  <refentrytitle>nix-channel</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-channel</refname>
+  <refpurpose>manage Nix channels</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-channel</command>
+    <group choice='req'>
+      <arg choice='plain'><option>--add</option> <replaceable>url</replaceable> <arg choice='opt'><replaceable>name</replaceable></arg></arg>
+      <arg choice='plain'><option>--remove</option> <replaceable>url</replaceable></arg>
+      <arg choice='plain'><option>--list</option></arg>
+      <arg choice='plain'><option>--update</option> <arg rep='repeat'><replaceable>names</replaceable></arg></arg>
+    </group>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsection><title>Description</title>
+
+<para>A Nix channel is mechanism that allows you to automatically stay
+up-to-date with a set of pre-built Nix expressions.  A Nix channel is
+just a URL that points to a place containing a set of Nix expressions
+and a <command>nix-push</command> manifest.  <phrase
+condition="manual">See also <xref linkend="sec-channels"
+/>.</phrase></para>
+
+<para>This command has the following operations:
+
+<variablelist>
+
+  <varlistentry><term><option>--add</option> <replaceable>url</replaceable> [<replaceable>name</replaceable>]</term>
+
+    <listitem><para>Adds a channel named
+    <replaceable>name</replaceable> with URL
+    <replaceable>url</replaceable> to the list of subscribed channels.
+    If <replaceable>name</replaceable> is omitted, it defaults to the
+    last component of <replaceable>url</replaceable>, with the
+    suffixes <literal>-stable</literal> or
+    <literal>-unstable</literal> removed.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--remove</option> <replaceable>name</replaceable></term>
+
+    <listitem><para>Removes the channel named
+    <replaceable>name</replaceable> from the list of subscribed
+    channels.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--list</option></term>
+
+    <listitem><para>Prints the names and URLs of all subscribed
+    channels on standard output.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--update</option> [<replaceable>names</replaceable>…]</term>
+
+    <listitem><para>Downloads the Nix expressions of all subscribed
+    channels (or only those included in
+    <replaceable>names</replaceable> if specified), makes them the
+    default for <command>nix-env</command> operations (by symlinking
+    them from the directory <filename>~/.nix-defexpr</filename>), and
+    performs a <command>nix-pull</command> on the manifests of all
+    channels to make pre-built binaries available.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</para>
+
+<para>Note that <option>--add</option> does not automatically perform
+an update.</para>
+
+<para>The list of subscribed channels is stored in
+<filename>~/.nix-channels</filename>.</para>
+
+<para>A channel consists of two elements: a bzipped Tar archive
+containing the Nix expressions, and a manifest created by
+<command>nix-push</command>.  These must be stored under
+<literal><replaceable>url</replaceable>/nixexprs.tar.bz2</literal> and
+<literal><replaceable>url</replaceable>/MANIFEST</literal>,
+respectively.</para>
+
+</refsection>
+
+<refsection><title>Examples</title>
+
+<para>To subscribe to the Nixpkgs channel and install the GNU Hello package:</para>
+
+<screen>
+$ nix-channel --add http://nixos.org/channels/nixpkgs-unstable
+$ nix-channel --update
+$ nix-env -iA nixpkgs.hello</screen>
+
+</refsection>
+
+</refentry>
diff --git a/doc/manual/nix-collect-garbage.xml b/doc/manual/nix-collect-garbage.xml
new file mode 100644
index 000000000000..cf870740f4ab
--- /dev/null
+++ b/doc/manual/nix-collect-garbage.xml
@@ -0,0 +1,67 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-collect-garbage">
+  
+<refmeta>
+  <refentrytitle>nix-collect-garbage</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-collect-garbage</refname>
+  <refpurpose>delete unreachable store paths</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-collect-garbage</command>
+    <arg><option>--delete-old</option></arg>
+    <arg><option>-d</option></arg>
+    <arg><option>--delete-older-than</option> <replaceable>period</replaceable></arg>
+    <group choice='opt'>
+      <arg choice='plain'><option>--print-roots</option></arg>
+      <arg choice='plain'><option>--print-live</option></arg>
+      <arg choice='plain'><option>--print-dead</option></arg>
+      <arg choice='plain'><option>--delete</option></arg>
+    </group>
+    <arg><option>--dry-run</option></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-collect-garbage</command> is mostly an
+alias of <link linkend="rsec-nix-store-gc"><command>nix-store
+--gc</command></link>, that is, it deletes all unreachable paths in
+the Nix store to clean up your system.  However, it provides two
+additional options: <option>-d</option> (<option>--delete-old</option>),
+which deletes all old generations of all profiles in
+<filename>/nix/var/nix/profiles</filename> by invoking
+<literal>nix-env --delete-generations old</literal> on all profiles
+(of course, this makes rollbacks to previous configurations
+impossible); and
+<option>--delete-older-than</option> <replaceable>period</replaceable>,
+where period is a value such as <literal>30d</literal>, which deletes
+all generations older than the specified number of days in all profiles
+in <filename>/nix/var/nix/profiles</filename> (except for the generations
+that were active at that point in time).
+</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<para>To delete from the Nix store everything that is not used by the
+current generations of each profile, do
+
+<screen>
+$ nix-collect-garbage -d</screen>
+
+</para>
+
+</refsection>
+
+</refentry>
diff --git a/doc/manual/nix-copy-closure.xml b/doc/manual/nix-copy-closure.xml
new file mode 100644
index 000000000000..3f8fd8017089
--- /dev/null
+++ b/doc/manual/nix-copy-closure.xml
@@ -0,0 +1,187 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-copy-closure">
+
+<refmeta>
+  <refentrytitle>nix-copy-closure</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-copy-closure</refname>
+  <refpurpose>copy a closure to or from a remote machine via SSH</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-copy-closure</command>
+    <group>
+      <arg choice='plain'><option>--to</option></arg>
+      <arg choice='plain'><option>--from</option></arg>
+    </group>
+    <arg><option>--sign</option></arg>
+    <arg><option>--gzip</option></arg>
+    <!--
+    <arg><option>- -show-progress</option></arg>
+    -->
+    <arg><option>--include-outputs</option></arg>
+    <arg><option>--use-substitutes</option></arg>
+    <arg><option>-s</option></arg>
+    <arg choice='plain'>
+      <replaceable>user@</replaceable><replaceable>machine</replaceable>
+    </arg>
+    <arg choice='plain'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para><command>nix-copy-closure</command> gives you an easy and
+efficient way to exchange software between machines.  Given one or
+more Nix store paths <replaceable>paths</replaceable> on the local
+machine, <command>nix-copy-closure</command> computes the closure of
+those paths (i.e. all their dependencies in the Nix store), and copies
+all paths in the closure to the remote machine via the
+<command>ssh</command> (Secure Shell) command.  With the
+<option>--from</option>, the direction is reversed:
+the closure of <replaceable>paths</replaceable> on a remote machine is
+copied to the Nix store on the local machine.</para>
+
+<para>This command is efficient because it only sends the store paths
+that are missing on the target machine.</para>
+
+<para>Since <command>nix-copy-closure</command> calls
+<command>ssh</command>, you may be asked to type in the appropriate
+password or passphrase.  In fact, you may be asked
+<emphasis>twice</emphasis> because <command>nix-copy-closure</command>
+currently connects twice to the remote machine, first to get the set
+of paths missing on the target machine, and second to send the dump of
+those paths.  If this bothers you, use
+<command>ssh-agent</command>.</para>
+
+
+<refsection><title>Options</title>
+
+<variablelist>
+
+  <varlistentry><term><option>--to</option></term>
+
+    <listitem><para>Copy the closure of
+    <replaceable>paths</replaceable> from the local Nix store to the
+    Nix store on <replaceable>machine</replaceable>.  This is the
+    default.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--from</option></term>
+
+    <listitem><para>Copy the closure of
+    <replaceable>paths</replaceable> from the Nix store on
+    <replaceable>machine</replaceable> to the local Nix
+    store.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--sign</option></term>
+
+    <listitem><para>Let the sending machine cryptographically sign the
+    dump of each path with the key in
+    <filename><replaceable>sysconfdir</replaceable>/nix/signing-key.sec</filename>.
+    If the user on the target machine does not have direct access to
+    the Nix store (i.e., if the target machine has a multi-user Nix
+    installation), then the target machine will check the dump against
+    <filename><replaceable>sysconfdir</replaceable>/nix/signing-key.pub</filename>
+    before unpacking it in its Nix store.  This allows secure sharing
+    of store paths between untrusted users on two machines, provided
+    that there is a trust relation between the Nix installations on
+    both machines (namely, they have matching public/secret
+    keys).</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--gzip</option></term>
+
+    <listitem><para>Enable compression of the SSH
+    connection.</para></listitem>
+
+  </varlistentry>
+
+  <!--
+  <varlistentry><term><option>- -show-progress</option></term>
+
+    <listitem><para>Show the progress of each path's transfer as it's made.
+    This requires the <command>pv</command> utility to be in <envar>PATH</envar>.</para></listitem>
+
+  </varlistentry>
+  -->
+
+  <varlistentry><term><option>--include-outputs</option></term>
+
+    <listitem><para>Also copy the outputs of store derivations
+    included in the closure.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--use-substitutes</option> / <option>-s</option></term>
+
+    <listitem><para>Attempt to download missing paths on the target
+    machine using Nix’s substitute mechanism.  Any paths that cannot
+    be substituted on the target are still copied normally from the
+    source.  This is useful, for instance, if the connection between
+    the source and target machine is slow, but the connection between
+    the target machine and <literal>nixos.org</literal> (the default
+    binary cache server) is fast.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Environment variables</title>
+
+<variablelist>
+
+  <varlistentry><term><envar>NIX_SSHOPTS</envar></term>
+
+    <listitem><para>Additional options to be passed to
+    <command>ssh</command> on the command line.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>Copy Firefox with all its dependencies to a remote machine:
+
+<screen>
+$ nix-copy-closure --to alice@itchy.labs $(type -tP firefox)</screen>
+
+</para>
+
+<para>Copy Subversion from a remote machine and then install it into a
+user environment:
+
+<screen>
+$ nix-copy-closure --from alice@itchy.labs \
+    /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4
+$ nix-env -i /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4
+</screen>
+
+</para>
+
+</refsection>
+
+
+</refsection>
+
+</refentry>
diff --git a/doc/manual/nix-daemon.xml b/doc/manual/nix-daemon.xml
new file mode 100644
index 000000000000..c68605fd6566
--- /dev/null
+++ b/doc/manual/nix-daemon.xml
@@ -0,0 +1,34 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-daemon">
+
+<refmeta>
+  <refentrytitle>nix-daemon</refentrytitle>
+  <manvolnum>8</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-daemon</refname>
+  <refpurpose>Nix multi-user support daemon</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-daemon</command>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The Nix daemon is necessary in multi-user Nix installations.  It
+performs build actions and other operations on the Nix store on behalf
+of unprivileged users.</para>
+
+
+</refsection>
+
+</refentry>
diff --git a/doc/manual/nix-env.xml b/doc/manual/nix-env.xml
new file mode 100644
index 000000000000..c44020803451
--- /dev/null
+++ b/doc/manual/nix-env.xml
@@ -0,0 +1,1300 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-env">
+
+<refmeta>
+  <refentrytitle>nix-env</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-env</refname>
+  <refpurpose>manipulate or query Nix user environments</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-env</command>
+    <xi:include href="opt-common-syn.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(/db:nop/*)" />
+    <arg><option>--arg</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg><option>--argstr</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--file</option></arg>
+        <arg choice='plain'><option>-f</option></arg>
+      </group>
+      <replaceable>path</replaceable>
+    </arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--profile</option></arg>
+        <arg choice='plain'><option>-p</option></arg>
+      </group>
+      <replaceable>path</replaceable>
+    </arg>
+    <arg>
+      <arg choice='plain'><option>--system-filter</option></arg>
+      <replaceable>system</replaceable>
+    </arg>
+    <arg><option>--dry-run</option></arg>
+    <arg choice='plain'><replaceable>operation</replaceable></arg>
+    <arg rep='repeat'><replaceable>options</replaceable></arg>
+    <arg rep='repeat'><replaceable>arguments</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-env</command> is used to manipulate Nix
+user environments.  User environments are sets of software packages
+available to a user at some point in time.  In other words, they are a
+synthesised view of the programs available in the Nix store.  There
+may be many user environments: different users can have different
+environments, and individual users can switch between different
+environments.</para>
+
+<para><command>nix-env</command> takes exactly one
+<emphasis>operation</emphasis> flag which indicates the subcommand to
+be performed.  These are documented below.</para>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Common options</title>
+
+<para>This section lists the options that are common to all
+operations.  These options are allowed for every subcommand, though
+they may not always have an effect.  <phrase condition="manual">See
+also <xref linkend="sec-common-options" />.</phrase></para>
+
+<variablelist>
+
+  <varlistentry><term><option>--file</option></term>
+    <term><option>-f</option></term>
+
+    <listitem><para>Specifies the Nix expression (designated below as
+    the <emphasis>active Nix expression</emphasis>) used by the
+    <option>--install</option>, <option>--upgrade</option>, and
+    <option>--query --available</option> operations to obtain
+    derivations.  The default is
+    <filename>~/.nix-defexpr</filename>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--profile</option></term>
+    <term><option>-p</option></term>
+
+    <listitem><para>Specifies the profile to be used by those
+    operations that operate on a profile (designated below as the
+    <emphasis>active profile</emphasis>).  A profile is a sequence of
+    user environments called <emphasis>generations</emphasis>, one of
+    which is the <emphasis>current
+    generation</emphasis>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--dry-run</option></term>
+
+    <listitem><para>For the <option>--install</option>,
+    <option>--upgrade</option>, <option>--uninstall</option>,
+    <option>--switch-generation</option>,
+    <option>--delete-generations</option> and
+    <option>--rollback</option> operations, this flag will cause
+    <command>nix-env</command> to print what
+    <emphasis>would</emphasis> be done if this flag had not been
+    specified, without actually doing it.</para>
+
+    <para><option>--dry-run</option> also prints out which paths will
+    be <link linkend="gloss-substitute">substituted</link> (i.e.,
+    downloaded) and which paths will be built from source (because no
+    substitute is available).</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--system-filter</option> <replaceable>system</replaceable></term>
+
+    <listitem><para>By default, operations such as <option>--query
+    --available</option> show derivations matching any platform.  This
+    option allows you to use derivations for the specified platform
+    <replaceable>system</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<variablelist condition="manpage">
+  <xi:include href="opt-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='opt-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Files</title>
+
+<variablelist>
+
+  <varlistentry><term><filename>~/.nix-defexpr</filename></term>
+
+    <listitem><para>A directory that contains the default Nix
+    expressions used by the <option>--install</option>,
+    <option>--upgrade</option>, and <option>--query
+    --available</option> operations to obtain derivations.  The
+    <option>--file</option> option may be used to override this
+    default.</para>
+
+    <para>The Nix expressions in this directory are combined into a
+    single set, with each file as an attribute that has the name of
+    the file.  Thus, if <filename>~/.nix-defexpr</filename> contains
+    two files, <filename>foo</filename> and <filename>bar</filename>,
+    then the default Nix expression will essentially be
+
+<programlisting>
+{
+  foo = import ~/.nix-defexpr/foo;
+  bar = import ~/.nix-defexpr/bar;
+}</programlisting>
+
+    </para>
+
+    <para>The command <command>nix-channel</command> places symlinks
+    to the downloaded Nix expressions from each subscribed channel in
+    this directory.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><filename>~/.nix-profile</filename></term>
+
+    <listitem><para>A symbolic link to the user's current profile.  By
+    default, this symlink points to
+    <filename><replaceable>prefix</replaceable>/var/nix/profiles/default</filename>.
+    The <envar>PATH</envar> environment variable should include
+    <filename>~/.nix-profile/bin</filename> for the user environment
+    to be visible to the user.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id="rsec-nix-env-install"><title>Operation <option>--install</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--install</option></arg>
+    <arg choice='plain'><option>-i</option></arg>
+  </group>
+  <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="opt-inst-syn.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(/db:nop/*)" />
+  <group choice='opt'>
+    <arg choice='plain'><option>--preserve-installed</option></arg>
+    <arg choice='plain'><option>-P</option></arg>
+  </group>
+  <group choice='opt'>
+    <arg choice='plain'><option>--remove-all</option></arg>
+    <arg choice='plain'><option>-r</option></arg>
+  </group>
+  <arg choice='plain' rep='repeat'><replaceable>args</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>The install operation creates a new user environment, based on
+the current generation of the active profile, to which a set of store
+paths described by <replaceable>args</replaceable> is added.  The
+arguments <replaceable>args</replaceable> map to store paths in a
+number of possible ways:
+
+<itemizedlist>
+
+  <listitem><para>By default, <replaceable>args</replaceable> is a set
+  of derivation names denoting derivations in the active Nix
+  expression.  These are realised, and the resulting output paths are
+  installed.  Currently installed derivations with a name equal to the
+  name of a derivation being added are removed unless the option
+  <option>--preserve-installed</option> is
+  specified.</para>
+
+  <para>If there are multiple derivations matching a name in
+  <replaceable>args</replaceable> that have the same name (e.g.,
+  <literal>gcc-3.3.6</literal> and <literal>gcc-4.1.1</literal>), then
+  the derivation with the highest <emphasis>priority</emphasis> is
+  used.  A derivation can define a priority by declaring the
+  <varname>meta.priority</varname> attribute.  This attribute should
+  be a number, with a higher value denoting a lower priority.  The
+  default priority is <literal>0</literal>.</para>
+
+  <para>If there are multiple matching derivations with the same
+  priority, then the derivation with the highest version will be
+  installed.</para>
+
+  <para>You can force the installation of multiple derivations with
+  the same name by being specific about the versions.  For instance,
+  <literal>nix-env -i gcc-3.3.6 gcc-4.1.1</literal> will install both
+  version of GCC (and will probably cause a user environment
+  conflict!).</para></listitem>
+
+  <listitem><para>If <link
+  linkend='opt-attr'><option>--attr</option></link>
+  (<option>-A</option>) is specified, the arguments are
+  <emphasis>attribute paths</emphasis> that select attributes from the
+  top-level Nix expression.  This is faster than using derivation
+  names and unambiguous.  To find out the attribute paths of available
+  packages, use <literal>nix-env -qaP '*'</literal>.</para></listitem>
+
+  <listitem><para>If <option>--from-profile</option>
+  <replaceable>path</replaceable> is given,
+  <replaceable>args</replaceable> is a set of names denoting installed
+  store paths in the profile <replaceable>path</replaceable>.  This is
+  an easy way to copy user environment elements from one profile to
+  another.</para></listitem>
+
+  <listitem><para>If <option>--from-expression</option> is given,
+  <replaceable>args</replaceable> are Nix <link
+  linkend="ss-functions">functions</link> that are called with the
+  active Nix expression as their single argument.  The derivations
+  returned by those function calls are installed.  This allows
+  derivations to be specified in an unambiguous way, which is necessary
+  if there are multiple derivations with the same
+  name.</para></listitem>
+
+  <listitem><para>If <replaceable>args</replaceable> are store
+  derivations, then these are <link
+  linkend="rsec-nix-store-realise">realised</link>, and the resulting
+  output paths are installed.</para></listitem>
+
+  <listitem><para>If <replaceable>args</replaceable> are store paths
+  that are not store derivations, then these are <link
+  linkend="rsec-nix-store-realise">realised</link> and
+  installed.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+</refsection>
+
+
+<refsection><title>Flags</title>
+
+<variablelist>
+
+  <varlistentry><term><option>--prebuild-only</option> / <option>-b</option></term>
+
+    <listitem><para>Use only derivations for which a substitute is
+    registered, i.e., there is a pre-built binary available that can
+    be downloaded in lieu of building the derivation.  Thus, no
+    packages will be built from source.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--preserve-installed</option></term>
+    <term><option>-P</option></term>
+
+    <listitem><para>Do not remove derivations with a name matching one
+    of the derivations being installed.  Usually, trying to have two
+    versions of the same package installed in the same generation of a
+    profile will lead to an error in building the generation, due to
+    file name clashes between the two versions.  However, this is not
+    the case for all packages.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--remove-all</option></term>
+    <term><option>-r</option></term>
+
+    <listitem><para>Remove all previously installed packages first.
+    This is equivalent to running <literal>nix-env -e '*'</literal>
+    first, except that everything happens in a single
+    transaction.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection xml:id='refsec-nix-env-install-examples'><title>Examples</title>
+
+<para>To install a specific version of <command>gcc</command> from the
+active Nix expression:
+
+<screen>
+$ nix-env --install gcc-3.3.2
+installing `gcc-3.3.2'
+uninstalling `gcc-3.1'</screen>
+
+Note the the previously installed version is removed, since
+<option>--preserve-installed</option> was not specified.</para>
+
+<para>To install an arbitrary version:
+
+<screen>
+$ nix-env --install gcc
+installing `gcc-3.3.2'</screen>
+
+</para>
+
+<para>To install using a specific attribute:
+
+<screen>
+$ nix-env -i -A gcc40mips
+$ nix-env -i -A xorg.xorgserver</screen>
+
+</para>
+
+<para>To install all derivations in the Nix expression <filename>foo.nix</filename>:
+
+<screen>
+$ nix-env -f ~/foo.nix -i '*'</screen>
+
+</para>
+
+<para>To copy the store path with symbolic name <literal>gcc</literal>
+from another profile:
+
+<screen>
+$ nix-env -i --from-profile /nix/var/nix/profiles/foo -i gcc</screen>
+
+</para>
+
+<para>To install a specific store derivation (typically created by
+<command>nix-instantiate</command>):
+
+<screen>
+$ nix-env -i /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv</screen>
+
+</para>
+
+<para>To install a specific output path:
+
+<screen>
+$ nix-env -i /nix/store/y3cgx0xj1p4iv9x0pnnmdhr8iyg741vk-gcc-3.4.3</screen>
+
+</para>
+
+<para>To install from a Nix expression specified on the command-line:
+
+<screen>
+$ nix-env -f ./foo.nix -i -E \
+    'f: (f {system = "i686-linux";}).subversionWithJava'</screen>
+
+I.e., this evaluates to <literal>(f: (f {system =
+"i686-linux";}).subversionWithJava) (import ./foo.nix)</literal>, thus
+selecting the <literal>subversionWithJava</literal> attribute from the
+set returned by calling the function defined in
+<filename>./foo.nix</filename>.</para>
+
+<para>A dry-run tells you which paths will be downloaded or built from
+source:
+
+<screen>
+$ nix-env -f pkgs/top-level/all-packages.nix -i f-spot --dry-run
+(dry run; not doing anything)
+installing `f-spot-0.0.10'
+the following derivations will be built:
+  /nix/store/0g63jv9aagwbgci4nnzs2dkxqz84kdja-libgnomeprintui-2.12.1.tar.bz2.drv
+  /nix/store/0gfarvxq6sannsdw8a1ir40j1ys2mqb4-ORBit2-2.14.2.tar.bz2.drv
+  /nix/store/0i9gs5zc04668qiy60ga2rc16abkj7g8-sqlite-2.8.17.drv
+  <replaceable>...</replaceable>
+the following paths will be substituted:
+  /nix/store/8zbipvm4gp9jfqh9nnk1n3bary1a37gs-perl-XML-Parser-2.34
+  /nix/store/b8a2bg7gnyvvvjjibp4axg9x1hzkw36c-mono-1.1.4
+  <replaceable>...</replaceable></screen>
+
+</para>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id="rsec-nix-env-upgrade"><title>Operation <option>--upgrade</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--upgrade</option></arg>
+    <arg choice='plain'><option>-u</option></arg>
+  </group>
+  <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="opt-inst-syn.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(/db:nop/*)" />
+  <group choice='opt'>
+    <arg choice='plain'><option>--lt</option></arg>
+    <arg choice='plain'><option>--leq</option></arg>
+    <arg choice='plain'><option>--eq</option></arg>
+    <arg choice='plain'><option>--always</option></arg>
+  </group>
+  <arg choice='plain' rep='repeat'><replaceable>args</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The upgrade operation creates a new user environment, based on
+the current generation of the active profile, in which all store paths
+are replaced for which there are newer versions in the set of paths
+described by <replaceable>args</replaceable>.  Paths for which there
+are no newer versions are left untouched; this is not an error.  It is
+also not an error if an element of <replaceable>args</replaceable>
+matches no installed derivations.</para>
+
+<para>For a description of how <replaceable>args</replaceable> is
+mapped to a set of store paths, see <link
+linkend="rsec-nix-env-install"><option>--install</option></link>.  If
+<replaceable>args</replaceable> describes multiple store paths with
+the same symbolic name, only the one with the highest version is
+installed.</para>
+
+</refsection>
+
+<refsection><title>Flags</title>
+
+<variablelist>
+
+  <varlistentry><term><option>--lt</option></term>
+
+    <listitem><para>Only upgrade a derivation to newer versions.  This
+    is the default.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--leq</option></term>
+
+    <listitem><para>In addition to upgrading to newer versions, also
+    “upgrade” to derivations that have the same version.  Version are
+    not a unique identification of a derivation, so there may be many
+    derivations that have the same version.  This flag may be useful
+    to force “synchronisation” between the installed and available
+    derivations.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--eq</option></term>
+
+    <listitem><para><emphasis>Only</emphasis> “upgrade” to derivations
+    that have the same version.  This may not seem very useful, but it
+    actually is, e.g., when there is a new release of Nixpkgs and you
+    want to replace installed applications with the same versions
+    built against newer dependencies (to reduce the number of
+    dependencies floating around on your system).</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--always</option></term>
+
+    <listitem><para>In addition to upgrading to newer versions, also
+    “upgrade” to derivations that have the same or a lower version.
+    I.e., derivations may actually be downgraded depending on what is
+    available in the active Nix expression.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<para>For the other flags, see <option
+linkend="rsec-nix-env-install">--install</option>.</para>
+
+</refsection>
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env --upgrade gcc
+upgrading `gcc-3.3.1' to `gcc-3.4'
+
+$ nix-env -u gcc-3.3.2 --always <lineannotation>(switch to a specific version)</lineannotation>
+upgrading `gcc-3.4' to `gcc-3.3.2'
+
+$ nix-env --upgrade pan
+<lineannotation>(no upgrades available, so nothing happens)</lineannotation>
+
+$ nix-env -u '*' <lineannotation>(try to upgrade everything)</lineannotation>
+upgrading `hello-2.1.2' to `hello-2.1.3'
+upgrading `mozilla-1.2' to `mozilla-1.4'</screen>
+
+</refsection>
+
+<refsection xml:id="ssec-version-comparisons"><title>Versions</title>
+
+<para>The upgrade operation determines whether a derivation
+<varname>y</varname> is an upgrade of a derivation
+<varname>x</varname> by looking at their respective
+<literal>name</literal> attributes.  The names (e.g.,
+<literal>gcc-3.3.1</literal> are split into two parts: the package
+name (<literal>gcc</literal>), and the version
+(<literal>3.3.1</literal>).  The version part starts after the first
+dash not following by a letter.  <varname>x</varname> is considered an
+upgrade of <varname>y</varname> if their package names match, and the
+version of <varname>y</varname> is higher that that of
+<varname>x</varname>.</para>
+
+<para>The versions are compared by splitting them into contiguous
+components of numbers and letters.  E.g., <literal>3.3.1pre5</literal>
+is split into <literal>[3, 3, 1, "pre", 5]</literal>.  These lists are
+then compared lexicographically (from left to right).  Corresponding
+components <varname>a</varname> and <varname>b</varname> are compared
+as follows.  If they are both numbers, integer comparison is used.  If
+<varname>a</varname> is an empty string and <varname>b</varname> is a
+number, <varname>a</varname> is considered less than
+<varname>b</varname>.  The special string component
+<literal>pre</literal> (for <emphasis>pre-release</emphasis>) is
+considered to be less than other components.  String components are
+considered less than number components.  Otherwise, they are compared
+lexicographically (i.e., using case-sensitive string comparison).</para>
+
+<para>This is illustrated by the following examples:
+
+<screen>
+1.0 &lt; 2.3
+2.1 &lt; 2.3
+2.3 = 2.3
+2.5 > 2.3
+3.1 > 2.3
+2.3.1 > 2.3
+2.3.1 > 2.3a
+2.3pre1 &lt; 2.3
+2.3pre3 &lt; 2.3pre12
+2.3a &lt; 2.3c
+2.3pre1 &lt; 2.3c
+2.3pre1 &lt; 2.3q</screen>
+
+</para>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--uninstall</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--uninstall</option></arg>
+    <arg choice='plain'><option>-e</option></arg>
+  </group>
+  <arg choice='plain' rep='repeat'><replaceable>drvnames</replaceable></arg>
+</cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The uninstall operation creates a new user environment, based on
+the current generation of the active profile, from which the store
+paths designated by the symbolic names
+<replaceable>names</replaceable> are removed.</para>
+
+</refsection>
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env --uninstall gcc
+$ nix-env -e '*' <lineannotation>(remove everything)</lineannotation></screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id="rsec-nix-env-set-flag"><title>Operation <option>--set-flag</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <arg choice='plain'><option>--set-flag</option></arg>
+  <arg choice='plain'><replaceable>name</replaceable></arg>
+  <arg choice='plain'><replaceable>value</replaceable></arg>
+  <arg choice='plain' rep='repeat'><replaceable>drvnames</replaceable></arg>
+</cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The <option>--set-flag</option> operation allows meta attributes
+of installed packages to be modified.  There are several attributes
+that can be usefully modified, because they affect the behaviour of
+<command>nix-env</command> or the user environment build
+script:
+
+<itemizedlist>
+
+  <listitem><para><varname>priority</varname> can be changed to
+  resolve filename clashes.  The user environment build script uses
+  the <varname>meta.priority</varname> attribute of derivations to
+  resolve filename collisions between packages.  Lower priority values
+  denote a higher priority.  For instance, the GCC wrapper package and
+  the Binutils package in Nixpkgs both have a file
+  <filename>bin/ld</filename>, so previously if you tried to install
+  both you would get a collision.  Now, on the other hand, the GCC
+  wrapper declares a higher priority than Binutils, so the former’s
+  <filename>bin/ld</filename> is symlinked in the user
+  environment.</para></listitem>
+
+  <listitem><para><varname>keep</varname> can be set to
+  <literal>true</literal> to prevent the package from being upgraded
+  or replaced.  This is useful if you want to hang on to an older
+  version of a package.</para></listitem>
+
+  <listitem><para><varname>active</varname> can be set to
+  <literal>false</literal> to “disable” the package.  That is, no
+  symlinks will be generated to the files of the package, but it
+  remains part of the profile (so it won’t be garbage-collected).  It
+  can be set back to <literal>true</literal> to re-enable the
+  package.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+</refsection>
+
+<refsection><title>Examples</title>
+
+<para>To prevent the currently installed Firefox from being upgraded:
+
+<screen>
+$ nix-env --set-flag keep true firefox</screen>
+
+After this, <command>nix-env -u</command> will ignore Firefox.</para>
+
+<para>To disable the currently installed Firefox, then install a new
+Firefox while the old remains part of the profile:
+
+<screen>
+$ nix-env -q \*
+firefox-2.0.0.9 <lineannotation>(the current one)</lineannotation>
+
+$ nix-env --preserve-installed -i firefox-2.0.0.11
+installing `firefox-2.0.0.11'
+building path(s) `/nix/store/myy0y59q3ig70dgq37jqwg1j0rsapzsl-user-environment'
+collision between `/nix/store/<replaceable>...</replaceable>-firefox-2.0.0.11/bin/firefox'
+  and `/nix/store/<replaceable>...</replaceable>-firefox-2.0.0.9/bin/firefox'.
+<lineannotation>(i.e., can’t have two active at the same time)</lineannotation>
+
+$ nix-env --set-flag active false firefox
+setting flag on `firefox-2.0.0.9'
+
+$ nix-env --preserve-installed -i firefox-2.0.0.11
+installing `firefox-2.0.0.11'
+
+$ nix-env -q \*
+firefox-2.0.0.11 <lineannotation>(the enabled one)</lineannotation>
+firefox-2.0.0.9 <lineannotation>(the disabled one)</lineannotation></screen>
+
+</para>
+
+<para>To make files from <literal>binutils</literal> take precedence
+over files from <literal>gcc</literal>:
+
+<screen>
+$ nix-env --set-flag priority 5 binutils
+$ nix-env --set-flag priority 10 gcc</screen>
+
+</para>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--query</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--query</option></arg>
+    <arg choice='plain'><option>-q</option></arg>
+  </group>
+  <group choice='opt'>
+    <arg choice='plain'><option>--installed</option></arg>
+    <arg choice='plain'><option>--available</option></arg>
+    <arg choice='plain'><option>-a</option></arg>
+  </group>
+
+  <sbr />
+
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--status</option></arg>
+      <arg choice='plain'><option>-s</option></arg>
+    </group>
+  </arg>
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--attr-path</option></arg>
+      <arg choice='plain'><option>-P</option></arg>
+    </group>
+  </arg>
+  <arg><option>--no-name</option></arg>
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--compare-versions</option></arg>
+      <arg choice='plain'><option>-c</option></arg>
+    </group>
+  </arg>
+  <arg><option>--system</option></arg>
+  <arg><option>--drv-path</option></arg>
+  <arg><option>--out-path</option></arg>
+  <arg><option>--description</option></arg>
+  <arg><option>--meta</option></arg>
+
+  <sbr />
+
+  <arg><option>--xml</option></arg>
+  <arg><option>--json</option></arg>
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--prebuilt-only</option></arg>
+      <arg choice='plain'><option>-b</option></arg>
+    </group>
+  </arg>
+
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--attr</option></arg>
+      <arg choice='plain'><option>-A</option></arg>
+    </group>
+    <replaceable>attribute-path</replaceable>
+  </arg>
+
+  <sbr />
+
+  <arg choice='plain' rep='repeat'><replaceable>names</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>The query operation displays information about either the store
+paths that are installed in the current generation of the active
+profile (<option>--installed</option>), or the derivations that are
+available for installation in the active Nix expression
+(<option>--available</option>).  It only prints information about
+derivations whose symbolic name matches one of
+<replaceable>names</replaceable>.  The wildcard <literal>*</literal>
+shows all derivations.</para>
+
+<para>The derivations are sorted by their <literal>name</literal>
+attributes.</para>
+
+</refsection>
+
+
+<refsection><title>Source selection</title>
+
+<para>The following flags specify the set of things on which the query
+operates.</para>
+
+<variablelist>
+
+  <varlistentry><term><option>--installed</option></term>
+
+    <listitem><para>The query operates on the store paths that are
+    installed in the current generation of the active profile.  This
+    is the default.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--available</option></term>
+    <term><option>-a</option></term>
+
+    <listitem><para>The query operates on the derivations that are
+    available in the active Nix expression.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Queries</title>
+
+<para>The following flags specify what information to display about
+the selected derivations.  Multiple flags may be specified, in which
+case the information is shown in the order given here.  Note that the
+name of the derivation is shown unless <option>--no-name</option> is
+specified.</para>
+
+<!-- TODO: fix the terminology here; i.e., derivations, store paths,
+user environment elements, etc. -->
+
+<variablelist>
+
+  <varlistentry><term><option>--xml</option></term>
+
+    <listitem><para>Print the result in an XML representation suitable
+    for automatic processing by other tools.  The root element is
+    called <literal>items</literal>, which contains a
+    <literal>item</literal> element for each available or installed
+    derivation.  The fields discussed below are all stored in
+    attributes of the <literal>item</literal>
+    elements.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--json</option></term>
+
+    <listitem><para>Print the result in a JSON representation suitable
+    for automatic processing by other tools.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--prebuild-only</option> / <option>-b</option></term>
+
+    <listitem><para>Show only derivations for which a substitute is
+    registered, i.e., there is a pre-built binary available that can
+    be downloaded in lieu of building the derivation.  Thus, this
+    shows all packages that probably can be installed
+    quickly.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--status</option></term>
+    <term><option>-s</option></term>
+
+    <listitem><para>Print the <emphasis>status</emphasis> of the
+    derivation.  The status consists of three characters.  The first
+    is <literal>I</literal> or <literal>-</literal>, indicating
+    whether the derivation is currently installed in the current
+    generation of the active profile.  This is by definition the case
+    for <option>--installed</option>, but not for
+    <option>--available</option>.  The second is <literal>P</literal>
+    or <literal>-</literal>, indicating whether the derivation is
+    present on the system.  This indicates whether installation of an
+    available derivation will require the derivation to be built.  The
+    third is <literal>S</literal> or <literal>-</literal>, indicating
+    whether a substitute is available for the
+    derivation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--attr-path</option></term>
+    <term><option>-P</option></term>
+
+    <listitem><para>Print the <emphasis>attribute path</emphasis> of
+    the derivation, which can be used to unambiguously select it using
+    the <link linkend="opt-attr"><option>--attr</option> option</link>
+    available in commands that install derivations like
+    <literal>nix-env --install</literal>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--no-name</option></term>
+
+    <listitem><para>Suppress printing of the <literal>name</literal>
+    attribute of each derivation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--compare-versions</option> /
+  <option>-c</option></term>
+
+    <listitem><para>Compare installed versions to available versions,
+    or vice versa (if <option>--available</option> is given).  This is
+    useful for quickly seeing whether upgrades for installed
+    packages are available in a Nix expression.  A column is added
+    with the following meaning:
+
+    <variablelist>
+
+      <varlistentry><term><literal>&lt;</literal> <replaceable>version</replaceable></term>
+
+        <listitem><para>A newer version of the package is available
+        or installed.</para></listitem>
+
+      </varlistentry>
+
+      <varlistentry><term><literal>=</literal> <replaceable>version</replaceable></term>
+
+        <listitem><para>At most the same version of the package is
+        available or installed.</para></listitem>
+
+      </varlistentry>
+
+      <varlistentry><term><literal>></literal> <replaceable>version</replaceable></term>
+
+        <listitem><para>Only older versions of the package are
+        available or installed.</para></listitem>
+
+      </varlistentry>
+
+      <varlistentry><term><literal>- ?</literal></term>
+
+        <listitem><para>No version of the package is available or
+        installed.</para></listitem>
+
+      </varlistentry>
+
+    </variablelist>
+
+    </para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--system</option></term>
+
+    <listitem><para>Print the <literal>system</literal> attribute of
+    the derivation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--drv-path</option></term>
+
+    <listitem><para>Print the path of the store
+    derivation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--out-path</option></term>
+
+    <listitem><para>Print the output path of the
+    derivation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--description</option></term>
+
+    <listitem><para>Print a short (one-line) description of the
+    derivation, if available.  The description is taken from the
+    <literal>meta.description</literal> attribute of the
+    derivation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--meta</option></term>
+
+    <listitem><para>Print all of the meta-attributes of the
+    derivation.  This option is only available with
+    <option>--xml</option>.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env -q '*' <lineannotation>(show installed derivations)</lineannotation>
+bison-1.875c
+docbook-xml-4.2
+firefox-1.0.4
+MPlayer-1.0pre7
+ORBit2-2.8.3
+...
+
+$ nix-env -qa '*' <lineannotation>(show available derivations)</lineannotation>
+firefox-1.0.7
+GConf-2.4.0.1
+MPlayer-1.0pre7
+ORBit2-2.8.3
+...
+
+$ nix-env -qas '*' <lineannotation>(show status of available derivations)</lineannotation>
+-P- firefox-1.0.7   <lineannotation>(not installed but present)</lineannotation>
+--S GConf-2.4.0.1   <lineannotation>(not present, but there is a substitute for fast installation)</lineannotation>
+--S MPlayer-1.0pre3 <lineannotation>(i.e., this is not the installed MPlayer, even though the version is the same!)</lineannotation>
+IP- ORBit2-2.8.3    <lineannotation>(installed and by definition present)</lineannotation>
+...
+
+<lineannotation>(show available derivations in the Nix expression <!-- !!! <filename>-->foo.nix<!-- </filename> -->)</lineannotation>
+$ nix-env -f ./foo.nix -qa '*'
+foo-1.2.3
+
+$ nix-env -qc '*' <lineannotation>(compare installed versions to what’s available)</lineannotation>
+<replaceable>...</replaceable>
+acrobat-reader-7.0 - ?      <lineannotation>(package is not available at all)</lineannotation>
+autoconf-2.59      = 2.59   <lineannotation>(same version)</lineannotation>
+firefox-1.0.4      &lt; 1.0.7  <lineannotation>(a more recent version is available)</lineannotation>
+<replaceable>...</replaceable>
+
+<lineannotation>(show info about a specific package, in XML)</lineannotation>
+$ nix-env -qa --xml --description firefox
+<![CDATA[<?xml version='1.0' encoding='utf-8'?>
+<items>
+  <item attrPath="0.0.firefoxWrapper"
+    description="Mozilla Firefox - the browser, reloaded (with various plugins)"
+    name="firefox-1.5.0.7" system="i686-linux" />
+</items>]]></screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--switch-profile</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--switch-profile</option></arg>
+    <arg choice='plain'><option>-S</option></arg>
+  </group>
+  <arg choice='req'><replaceable>path</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>This operation makes <replaceable>path</replaceable> the current
+profile for the user.  That is, the symlink
+<filename>~/.nix-profile</filename> is made to point to
+<replaceable>path</replaceable>.</para>
+
+</refsection>
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env -S ~/my-profile</screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--list-generations</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <arg choice='plain'><option>--list-generations</option></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>This operation print a list of all the currently existing
+generations for the active profile.  These may be switched to using
+the <option>--switch-generation</option> operation.  It also prints
+the creation date of the generation, and indicates the current
+generation.</para>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env --list-generations
+  95   2004-02-06 11:48:24
+  96   2004-02-06 11:49:01
+  97   2004-02-06 16:22:45
+  98   2004-02-06 16:24:33   (current)</screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--delete-generations</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <arg choice='plain'><option>--delete-generations</option></arg>
+  <arg choice='plain' rep='repeat'><replaceable>generations</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>This operation deletes the specified generations of the current
+profile.  The generations can be a list of generation numbers, the
+special value <literal>old</literal> to delete all non-current
+generations, or a value such as <literal>30d</literal> to delete all
+generations older than the specified number of days (except for the
+generation that was active at that point in time).
+Periodically deleting old generations is important to make garbage
+collection effective.</para>
+
+</refsection>
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env --delete-generations 3 4 8
+
+$ nix-env --delete-generations 30d
+
+$ nix-env -p other_profile --delete-generations old</screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--switch-generation</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--switch-generation</option></arg>
+    <arg choice='plain'><option>-G</option></arg>
+  </group>
+  <arg choice='req'><replaceable>generation</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>This operation makes generation number
+<replaceable>generation</replaceable> the current generation of the
+active profile.  That is, if the
+<filename><replaceable>profile</replaceable></filename> is the path to
+the active profile, then the symlink
+<filename><replaceable>profile</replaceable></filename> is made to
+point to
+<filename><replaceable>profile</replaceable>-<replaceable>generation</replaceable>-link</filename>,
+which is in turn a symlink to the actual user environment in the Nix
+store.</para>
+
+<para>Switching will fail if the specified generation does not exist.</para>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env -G 42
+switching from generation 50 to 42</screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--rollback</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-env</command>
+  <arg choice='plain'><option>--rollback</option></arg>
+</cmdsynopsis>
+
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>This operation switches to the “previous” generation of the
+active profile, that is, the highest numbered generation lower than
+the current generation, if it exists.  It is just a convenience
+wrapper around <option>--list-generations</option> and
+<option>--switch-generation</option>.</para>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-env --rollback
+switching from generation 92 to 91
+
+$ nix-env --rollback
+error: no generation older than the current (91) exists</screen>
+
+</refsection>
+
+</refsection>
+
+
+<refsection condition="manpage"><title>Environment variables</title>
+
+<variablelist>
+  
+  <varlistentry><term><envar>NIX_PROFILE</envar></term>
+
+    <listitem><para>Location of the Nix profile.  Defaults to the
+    target of the symlink <filename>~/.nix-profile</filename>, if it
+    exists, or <filename>/nix/var/nix/profiles/default</filename>
+    otherwise.</para></listitem>
+
+  </varlistentry>
+
+  <xi:include href="env-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='env-common']/*)" />
+</variablelist>
+
+</refsection>
+  
+
+</refentry>
diff --git a/doc/manual/nix-hash.xml b/doc/manual/nix-hash.xml
new file mode 100644
index 000000000000..af4e361ff8d4
--- /dev/null
+++ b/doc/manual/nix-hash.xml
@@ -0,0 +1,164 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-hash">
+  
+<refmeta>
+  <refentrytitle>nix-hash</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-hash</refname>
+  <refpurpose>compute the cryptographic hash of a path</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-hash</command>
+    <arg><option>--flat</option></arg>
+    <arg><option>--base32</option></arg>
+    <arg><option>--truncate</option></arg>
+    <arg><option>--type</option> <replaceable>hashAlgo</replaceable></arg>
+    <arg choice='plain' rep='repeat'><replaceable>path</replaceable></arg>
+  </cmdsynopsis>
+  <cmdsynopsis>
+    <command>nix-hash</command>
+    <arg choice='plain'><option>--to-base16</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>hash</replaceable></arg>
+  </cmdsynopsis>
+  <cmdsynopsis>
+    <command>nix-hash</command>
+    <arg choice='plain'><option>--to-base32</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>hash</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-hash</command> computes the
+cryptographic hash of the contents of each
+<replaceable>path</replaceable> and prints it on standard output.  By
+default, it computes an MD5 hash, but other hash algorithms are
+available as well.  The hash is printed in hexadecimal.</para>
+
+<para>The hash is computed over a <emphasis>serialisation</emphasis>
+of each path: a dump of the file system tree rooted at the path.  This
+allows directories and symlinks to be hashed as well as regular files.
+The dump is in the <emphasis>NAR format</emphasis> produced by <link
+linkend="refsec-nix-store-dump"><command>nix-store</command>
+<option>--dump</option></link>.  Thus, <literal>nix-hash
+<replaceable>path</replaceable></literal> yields the same
+cryptographic hash as <literal>nix-store --dump
+<replaceable>path</replaceable> | md5sum</literal>.</para>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<variablelist>
+  
+  <varlistentry><term><option>--flat</option></term>
+
+    <listitem><para>Print the cryptographic hash of the contents of
+    each regular file <replaceable>path</replaceable>.  That is, do
+    not compute the hash over the dump of
+    <replaceable>path</replaceable>.  The result is identical to that
+    produced by the GNU commands <command>md5sum</command> and
+    <command>sha1sum</command>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--base32</option></term>
+
+    <listitem><para>Print the hash in a base-32 representation rather
+    than hexadecimal.  This base-32 representation is more compact and
+    can be used in Nix expressions (such as in calls to
+    <function>fetchurl</function>).</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--truncate</option></term>
+
+    <listitem><para>Truncate hashes longer than 160 bits (such as
+    SHA-256) to 160 bits.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--type</option> <replaceable>hashAlgo</replaceable></term>
+
+    <listitem><para>Use the specified cryptographic hash algorithm,
+    which can be one of <literal>md5</literal>,
+    <literal>sha1</literal>, and
+    <literal>sha256</literal>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--to-base16</option></term>
+
+    <listitem><para>Don’t hash anything, but convert the base-32 hash
+    representation <replaceable>hash</replaceable> to
+    hexadecimal.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--to-base32</option></term>
+
+    <listitem><para>Don’t hash anything, but convert the hexadecimal
+    hash representation <replaceable>hash</replaceable> to
+    base-32.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>Computing hashes:
+
+<screen>
+$ mkdir test
+$ echo "hello" > test/world
+
+$ nix-hash test/ <lineannotation>(MD5 hash; default)</lineannotation>
+8179d3caeff1869b5ba1744e5a245c04
+
+$ nix-store --dump test/ | md5sum <lineannotation>(for comparison)</lineannotation>
+8179d3caeff1869b5ba1744e5a245c04  -
+
+$ nix-hash --type sha1 test/
+e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
+
+$ nix-hash --type sha1 --base32 test/
+nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
+
+$ nix-hash --type sha256 --flat test/
+error: reading file `test/': Is a directory
+
+$ nix-hash --type sha256 --flat test/world
+5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03</screen>
+
+</para>
+
+<para>Converting between hexadecimal and base-32:
+
+<screen>
+$ nix-hash --type sha1 --to-base32 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
+nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
+
+$ nix-hash --type sha1 --to-base16 nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
+e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6</screen>
+
+</para>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-install-package.xml b/doc/manual/nix-install-package.xml
new file mode 100644
index 000000000000..54a66348f013
--- /dev/null
+++ b/doc/manual/nix-install-package.xml
@@ -0,0 +1,198 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-install-package">
+  
+<refmeta>
+  <refentrytitle>nix-install-package</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-install-package</refname>
+  <refpurpose>install a Nix Package file</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-install-package</command>
+    <arg><option>--non-interactive</option></arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--profile</option></arg>
+        <arg choice='plain'><option>-p</option></arg>
+      </group>
+      <replaceable>path</replaceable>
+    </arg>
+    <sbr />
+    <group choice='req'>
+      <arg choice='req'>
+        <option>--url</option>
+        <arg choice='plain'><replaceable>url</replaceable></arg>
+      </arg>
+      <arg choice='req'>
+        <arg choice='plain'><replaceable>file</replaceable></arg>
+      </arg>
+    </group>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-install-package</command> interactively
+installs a Nix Package file (<filename>*.nixpkg</filename>), which is
+a small file that contains a store path to be installed along with the
+URL of a <link linkend="sec-nix-push"><command>nix-push</command>
+manifest</link>.  The Nix Package file is either
+<replaceable>file</replaceable>, or automatically downloaded from
+<replaceable>url</replaceable> if the <option>--url</option> switch is
+used.</para>
+
+<para><command>nix-install-package</command> is used in <link
+linkend="sec-one-click">one-click installs</link> to download and
+install pre-built binary packages with all necessary dependencies.
+<command>nix-install-package</command> is intended to be associated
+with the MIME type <literal>application/nix-package</literal> in a web
+browser so that it is invoked automatically when you click on
+<filename>*.nixpkg</filename> files.  When invoked, it restarts itself
+in a terminal window (since otherwise it would be invisible when run
+from a browser), asks the user to confirm whether to install the
+package, and if so downloads and installs the package into the user’s
+current profile.</para>
+
+<para>To obtain a window, <command>nix-install-package</command> tries
+to restart itself with <command>xterm</command>,
+<command>konsole</command> and
+<command>gnome-terminal</command>.</para>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<variablelist>
+  
+  <varlistentry><term><option>--non-interactive</option></term>
+
+    <listitem><para>Do not open a new terminal window and do not ask
+    for confirmation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--profile</option></term>
+    <term><option>-p</option></term>
+
+    <listitem><para>Install the package into the specified profile
+    rather than the user’s current profile.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>To install <filename>subversion-1.4.0.nixpkg</filename> into the
+user’s current profile, without any prompting:
+
+<screen>
+$ nix-install-package --non-interactive subversion-1.4.0.nixpkg</screen>
+
+</para>
+
+<para>To install the same package from some URL into a different
+profile:
+
+<screen>
+$ nix-install-package --non-interactive -p /nix/var/nix/profiles/eelco \
+    --url http://nix.cs.uu.nl/dist/nix/nixpkgs-0.10pre6622/pkgs/subversion-1.4.0-i686-linux.nixpkg</screen>
+
+</para>
+
+</refsection>
+
+
+<refsection><title>Format of <literal>nixpkg</literal> files</title>
+
+<para>A Nix Package file consists of a single line with the following
+format:
+
+<screen>
+NIXPKG1 <replaceable>manifestURL</replaceable> <replaceable>name</replaceable> <replaceable>system</replaceable> <replaceable>drvPath</replaceable> <replaceable>outPath</replaceable></screen>
+
+The elements are as follows:
+
+<variablelist>
+
+  <varlistentry><term><literal>NIXPKG1</literal></term>
+  
+    <listitem><para>The version of the Nix Package
+    file.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><replaceable>manifestURL</replaceable></term>
+  
+    <listitem><para>The manifest to be pulled by
+    <command>nix-pull</command>.  The manifest must contain
+    <replaceable>outPath</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><replaceable>name</replaceable></term>
+  
+    <listitem><para>The symbolic name and version of the
+    package.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><replaceable>system</replaceable></term>
+  
+    <listitem><para>The platform identifier of the platform for which
+    this binary package is intended.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><replaceable>drvPath</replaceable></term>
+  
+    <listitem><para>The path in the Nix store of the derivation from
+    which <replaceable>outPath</replaceable> was built.  Not currently
+    used.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><replaceable>outPath</replaceable></term>
+  
+    <listitem><para>The path in the Nix store of the package.  After
+    <command>nix-install-package</command> has obtained the manifest
+    from <replaceable>manifestURL</replaceable>, it performs a
+    <literal>nix-env -i</literal> <replaceable>outPath</replaceable>
+    to install the binary package.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+  
+</para>
+
+<para>An example follows:
+
+<screen>
+NIXPKG1 http://.../nixpkgs-0.10pre6622/MANIFEST subversion-1.4.0 i686-darwin \
+  /nix/store/4kh60jkp...-subversion-1.4.0.drv \
+  /nix/store/nkw7wpgb...-subversion-1.4.0</screen>
+
+(The line breaks (<literal>\</literal>) are for presentation purposes
+and not part of the actual file.)
+
+</para>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-instantiate.xml b/doc/manual/nix-instantiate.xml
new file mode 100644
index 000000000000..936f154dde56
--- /dev/null
+++ b/doc/manual/nix-instantiate.xml
@@ -0,0 +1,262 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-instantiate">
+
+<refmeta>
+  <refentrytitle>nix-instantiate</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-instantiate</refname>
+  <refpurpose>instantiate store derivations from Nix expressions</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-instantiate</command>
+    <group>
+      <arg choice='plain'><option>--parse</option></arg>
+      <arg choice='plain'>
+        <option>--eval</option>
+        <arg><option>--strict</option></arg>
+        <arg><option>--xml</option></arg>
+      </arg>
+    </group>
+    <arg><option>--read-write-mode</option></arg>
+    <arg><option>--arg</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--attr</option></arg>
+        <arg choice='plain'><option>-A</option></arg>
+      </group>
+      <replaceable>attrPath</replaceable>
+    </arg>
+    <arg><option>--add-root</option> <replaceable>path</replaceable></arg>
+    <arg><option>--indirect</option></arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--expr</option></arg>
+      <arg choice='plain'><option>-E</option></arg>
+    </group>
+    <arg choice='plain' rep='repeat'><replaceable>files</replaceable></arg>
+    <sbr/>
+    <command>nix-instantiate</command>
+    <arg choice='plain'><option>--file-file</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>files</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-instantiate</command> generates <link
+linkend="gloss-derivation">store derivations</link> from (high-level)
+Nix expressions.  It evaluates the Nix expressions in each of
+<replaceable>files</replaceable> (which defaults to
+<replaceable>./default.nix</replaceable>).  Each top-level expression
+should evaluate to a derivation, a list of derivations, or a set of
+derivations.  The paths of the resulting store derivations are printed
+on standard output.</para>
+
+<para>If <replaceable>files</replaceable> is the character
+<literal>-</literal>, then a Nix expression will be read from standard
+input.</para>
+
+<para condition="manual">See also <xref linkend="sec-common-options"
+/> for a list of common options.</para>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<variablelist>
+
+  <varlistentry>
+    <term><option>--add-root</option> <replaceable>path</replaceable></term>
+    <term><option>--indirect</option></term>
+
+    <listitem><para>See the <link linkend="opt-add-root">corresponding
+    options</link> in <command>nix-store</command>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--parse</option></term>
+
+    <listitem><para>Just parse the input files, and print their
+    abstract syntax trees on standard output in ATerm
+    format.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--eval</option></term>
+
+    <listitem><para>Just parse and evaluate the input files, and print
+    the resulting values on standard output.  No instantiation of
+    store derivations takes place.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--find-file</option></term>
+
+    <listitem><para>Look up the given files in Nix’s search path (as
+    specified by the <envar>NIX_PATH</envar> environment variable).
+    If found, print the corresponding absolute paths on standard
+    output.  For instance, if <envar>NIX_PATH</envar> is
+    <literal>nixpkgs=/home/alice/nixpkgs</literal>, then
+    <literal>nix-instantiate --find-file nixpkgs/default.nix</literal>
+    will print
+    <literal>/home/alice/nixpkgs/default.nix</literal>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--xml</option></term>
+
+    <listitem><para>When used with <option>--parse</option> and
+    <option>--eval</option>, print the resulting expression as an
+    XML representation of the abstract syntax tree rather than as an
+    ATerm.  The schema is the same as that used by the <link
+    linkend="builtin-toXML"><function>toXML</function>
+    built-in</link>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--json</option></term>
+
+    <listitem><para>When used with <option>--parse</option> and
+    <option>--eval</option>, print the resulting expression as an
+    JSON representation of the abstract syntax tree rather than as an
+    ATerm.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--strict</option></term>
+
+    <listitem><para>When used with <option>--eval</option>,
+    recursively evaluate list elements and attributes.  Normally, such
+    sub-expressions are left unevaluated (since the Nix expression
+    language is lazy).</para>
+
+    <warning><para>This option can cause non-termination, because lazy
+    data structures can be infinitely large.</para></warning>
+
+    </listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--read-write-mode</option></term>
+
+    <listitem><para>When used with <option>--eval</option>, perform
+    evaluation in read/write mode so nix language features that
+    require it will still work (at the cost of needing to do
+    instantiation of every evaluated derivation).</para>
+
+    </listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<variablelist condition="manpage">
+  <xi:include href="opt-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='opt-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>Instantiating store derivations from a Nix expression, and
+building them using <command>nix-store</command>:
+
+<screen>
+$ nix-instantiate test.nix <lineannotation>(instantiate)</lineannotation>
+/nix/store/cigxbmvy6dzix98dxxh9b6shg7ar5bvs-perl-BerkeleyDB-0.26.drv
+
+$ nix-store -r $(nix-instantiate test.nix) <lineannotation>(build)</lineannotation>
+<replaceable>...</replaceable>
+/nix/store/qhqk4n8ci095g3sdp93x7rgwyh9rdvgk-perl-BerkeleyDB-0.26 <lineannotation>(output path)</lineannotation>
+
+$ ls -l /nix/store/qhqk4n8ci095g3sdp93x7rgwyh9rdvgk-perl-BerkeleyDB-0.26
+dr-xr-xr-x    2 eelco    users        4096 1970-01-01 01:00 lib
+...</screen>
+
+</para>
+
+<para>You can also give a Nix expression on the command line:
+
+<screen>
+$ nix-instantiate -E 'with import &lt;nixpkgs> { }; hello'
+/nix/store/j8s4zyv75a724q38cb0r87rlczaiag4y-hello-2.8.drv
+</screen>
+
+This is equivalent to:
+
+<screen>
+$ nix-instantiate '&lt;nixpkgs>' -A hello
+</screen>
+
+</para>
+
+<para>Parsing and evaluating Nix expressions:
+
+<screen>
+$ nix-instantiate --parse -E '1 + 2'
+1 + 2
+
+$ nix-instantiate --eval -E '1 + 2'
+3
+
+$ nix-instantiate --eval --xml -E '1 + 2'
+<![CDATA[<?xml version='1.0' encoding='utf-8'?>
+<expr>
+  <int value="3" />
+</expr>]]></screen>
+
+</para>
+
+<para>The difference between non-strict and strict evaluation:
+
+<screen>
+$ nix-instantiate --eval --xml -E 'rec { x = "foo"; y = x; }'
+<replaceable>...</replaceable><![CDATA[
+  <attr name="x">
+    <string value="foo" />
+  </attr>
+  <attr name="y">
+    <unevaluated />
+  </attr>]]>
+<replaceable>...</replaceable></screen>
+
+Note that <varname>y</varname> is left unevaluated (the XML
+representation doesn’t attempt to show non-normal forms).
+
+<screen>
+$ nix-instantiate --eval --xml --strict -E 'rec { x = "foo"; y = x; }'
+<replaceable>...</replaceable><![CDATA[
+  <attr name="x">
+    <string value="foo" />
+  </attr>
+  <attr name="y">
+    <string value="foo" />
+  </attr>]]>
+<replaceable>...</replaceable></screen>
+
+</para>
+
+</refsection>
+
+
+<refsection condition="manpage"><title>Environment variables</title>
+
+<variablelist>
+  <xi:include href="env-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='env-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-lang-ref.xml b/doc/manual/nix-lang-ref.xml
new file mode 100644
index 000000000000..86273ac3d016
--- /dev/null
+++ b/doc/manual/nix-lang-ref.xml
@@ -0,0 +1,182 @@
+<appendix>
+  <title>Nix Language Reference</title>
+
+  <sect1>
+    <title>Grammar</title>
+
+    <productionset>
+      <title>Expressions</title>
+      
+      <production id="nix.expr">
+        <lhs>Expr</lhs>
+        <rhs>
+          <nonterminal def="#nix.expr_function" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_function">
+        <lhs>ExprFunction</lhs>
+        <rhs>
+          '{' <nonterminal def="#nix.formals" /> '}' ':' <nonterminal def="#nix.expr_function" />
+          <sbr />|
+          <nonterminal def="#nix.expr_assert" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_assert">
+        <lhs>ExprAssert</lhs>
+        <rhs>
+          'assert' <nonterminal def="#nix.expr" /> ';' <nonterminal def="#nix.expr_assert" />
+          <sbr />|
+          <nonterminal def="#nix.expr_if" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_if">
+        <lhs>ExprIf</lhs>
+        <rhs>
+          'if' <nonterminal def="#nix.expr" /> 'then' <nonterminal def="#nix.expr" />
+          'else' <nonterminal def="#nix.expr" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_op">
+        <lhs>ExprOp</lhs>
+        <rhs>
+          '!' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '==' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '!=' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '&amp;&amp;' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '||' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '->' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '//' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '~' <nonterminal def="#nix.expr_op" />
+          <sbr />|
+          <nonterminal def="#nix.expr_op" /> '?' <nonterminal def="#nix.id" />
+          <sbr />|
+          <nonterminal def="#nix.expr_app" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_app">
+        <lhs>ExprApp</lhs>
+        <rhs>
+          <nonterminal def="#nix.expr_app" /> '.' <nonterminal def="#nix.expr_select" />
+          <sbr />|
+          <nonterminal def="#nix.expr_select" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_select">
+        <lhs>ExprSelect</lhs>
+        <rhs>
+          <nonterminal def="#nix.expr_select" /> <nonterminal def="#nix.id" />
+          <sbr />|
+          <nonterminal def="#nix.expr_simple" />
+        </rhs>
+      </production>
+      
+      <production id="nix.expr_simple">
+        <lhs>ExprSimple</lhs>
+        <rhs>
+          <nonterminal def="#nix.id" /> |
+          <nonterminal def="#nix.int" /> |
+          <nonterminal def="#nix.str" /> |
+          <nonterminal def="#nix.path" /> |
+          <nonterminal def="#nix.uri" />
+          <sbr />|
+          'true' | 'false' | 'null'
+          <sbr />|
+          '(' <nonterminal def="#nix.expr" /> ')'
+          <sbr />|
+          '{' <nonterminal def="#nix.bind" />* '}'
+          <sbr />|
+          'let' '{' <nonterminal def="#nix.bind" />* '}'
+          <sbr />|
+          'rec' '{' <nonterminal def="#nix.bind" />* '}'
+          <sbr />|
+          '[' <nonterminal def="#nix.expr_select" />* ']'
+        </rhs>
+      </production>
+
+      <production id="nix.bind">
+        <lhs>Bind</lhs>
+        <rhs>
+          <nonterminal def="#nix.id" /> '=' <nonterminal def="#nix.expr" /> ';'
+          <sbr />|
+          'inherit' ('(' <nonterminal def="#nix.expr" /> ')')? <nonterminal def="#nix.id" />* ';'
+        </rhs>
+      </production>
+
+      <production id="nix.formals">
+        <lhs>Formals</lhs>
+        <rhs>
+          <nonterminal def="#nix.formal" /> ',' <nonterminal def="#nix.formals" />
+          | <nonterminal def="#nix.formal" />
+        </rhs>
+      </production>
+          
+      <production id="nix.formal">
+        <lhs>Formal</lhs>
+        <rhs>
+          <nonterminal def="#nix.id" />
+          <sbr />|
+          <nonterminal def="#nix.id" /> '?' <nonterminal def="#nix.expr" />
+        </rhs>
+      </production>
+          
+    </productionset>
+
+    <productionset>
+      <title>Terminals</title>
+
+      <production id="nix.id">
+        <lhs>Id</lhs>
+        <rhs>[a-zA-Z\_][a-zA-Z0-9\_\']*</rhs>
+      </production>
+    
+      <production id="nix.int">
+        <lhs>Int</lhs>
+        <rhs>[0-9]+</rhs>
+      </production>
+    
+      <production id="nix.str">
+        <lhs>Str</lhs>
+        <rhs>\"[^\n\"]*\"</rhs>
+      </production>
+
+      <production id="nix.path">
+        <lhs>Path</lhs>
+        <rhs>[a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+</rhs>
+      </production>
+    
+      <production id="nix.uri">
+        <lhs>Uri</lhs>
+        <rhs>[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&amp;\=\+\$\,\-\_\.\!\~\*\']+</rhs>
+      </production>
+
+      <production id="nix.ws">
+        <lhs>Whitespace</lhs>
+        <rhs>
+          [ \t\n]+
+          <sbr />|
+          \#[^\n]*
+          <sbr />|
+          \/\*(.|\n)*\*\/
+        </rhs>
+      </production>
+
+    </productionset>
+    
+  </sect1>
+
+</appendix>
diff --git a/doc/manual/nix-prefetch-url.xml b/doc/manual/nix-prefetch-url.xml
new file mode 100644
index 000000000000..c416e675b05c
--- /dev/null
+++ b/doc/manual/nix-prefetch-url.xml
@@ -0,0 +1,93 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-prefetch-url">
+  
+<refmeta>
+  <refentrytitle>nix-prefetch-url</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-prefetch-url</refname>
+  <refpurpose>copy a file from a URL into the store and print its hash</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-prefetch-url</command>
+    <arg><option>--type</option> <replaceable>hashAlgo</replaceable></arg>
+    <arg choice='plain'><replaceable>url</replaceable></arg>
+    <arg><replaceable>hash</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-prefetch-url</command> downloads the
+file referenced by the URL <replaceable>url</replaceable>, prints its
+cryptographic hash, and copies it into the Nix store.  The file name
+in the store is
+<filename><replaceable>hash</replaceable>-<replaceable>baseName</replaceable></filename>,
+where <replaceable>baseName</replaceable> is everything following the
+final slash in <replaceable>url</replaceable>.</para>
+
+<para>This command is just a convenience for Nix expression writers.
+Often a Nix expression fetches some source distribution from the
+network using the <literal>fetchurl</literal> expression contained in
+Nixpkgs.  However, <literal>fetchurl</literal> requires a
+cryptographic hash.  If you don't know the hash, you would have to
+download the file first, and then <literal>fetchurl</literal> would
+download it again when you build your Nix expression.  Since
+<literal>fetchurl</literal> uses the same name for the downloaded file
+as <command>nix-prefetch-url</command>, the redundant download can be
+avoided.</para>
+
+<para>If <replaceable>hash</replaceable> is specified, then a download
+is not performed if the Nix store already contains a file with the
+same hash and base name.  Otherwise, the file is downloaded, and an
+error if signaled if the actual hash of the file does not match the
+specified hash.</para>
+
+<para>This command prints the hash on standard output.  Additionally,
+if the environment variable <envar>PRINT_PATH</envar> is set, the path
+of the downloaded file in the Nix store is also printed.</para>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<variablelist>
+  
+  <varlistentry><term><option>--type</option> <replaceable>hashAlgo</replaceable></term>
+
+    <listitem><para>Use the specified cryptographic hash algorithm,
+    which can be one of <literal>md5</literal>,
+    <literal>sha1</literal>, and
+    <literal>sha256</literal>.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-prefetch-url ftp://ftp.nluug.nl/pub/gnu/make/make-3.80.tar.bz2
+0bbd1df101bc0294d440471e50feca71
+
+$ PRINT_PATH=1 nix-prefetch-url ftp://ftp.nluug.nl/pub/gnu/make/make-3.80.tar.bz2
+0bbd1df101bc0294d440471e50feca71
+/nix/store/wvyz8ifdn7wyz1p3pqyn0ra45ka2l492-make-3.80.tar.bz2</screen>
+
+</refsection>
+
+    
+</refentry>
diff --git a/doc/manual/nix-pull.xml b/doc/manual/nix-pull.xml
new file mode 100644
index 000000000000..8e4a505e1d25
--- /dev/null
+++ b/doc/manual/nix-pull.xml
@@ -0,0 +1,50 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-pull">
+
+<refmeta>
+  <refentrytitle>nix-pull</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-pull</refname>
+  <refpurpose>pull substitutes from a network cache</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-pull</command>
+    <arg choice='plain'><replaceable>url</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-pull</command> obtains a list of
+pre-built store paths from the URL <replaceable>url</replaceable>, and
+for each of these store paths, registers a substitute derivation that
+downloads and unpacks it into the Nix store.  This is used to speed up
+installations: if you attempt to install something that has already
+been built and stored into the network cache, Nix can transparently
+re-use the pre-built store paths.</para>
+
+<para>The file at <replaceable>url</replaceable> must be compatible
+with the files created by <replaceable>nix-push</replaceable>.</para>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<screen>
+$ nix-pull http://nix.cs.uu.nl/dist/nix/nixpkgs-0.5pre753/MANIFEST</screen>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-push.xml b/doc/manual/nix-push.xml
new file mode 100644
index 000000000000..e789bbf7d352
--- /dev/null
+++ b/doc/manual/nix-push.xml
@@ -0,0 +1,398 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-push">
+
+<refmeta>
+  <refentrytitle>nix-push</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-push</refname>
+  <refpurpose>generate a binary cache</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-push</command>
+    <arg choice='plain'><option>--dest</option> <replaceable>dest-dir</replaceable></arg>
+    <arg><option>--bzip2</option></arg>
+    <arg><option>--none</option></arg>
+    <arg><option>--force</option></arg>
+    <arg><option>--link</option></arg>
+    <arg><option>--manifest</option></arg>
+    <arg><option>--manifest-path</option> <replaceable>filename</replaceable></arg>
+    <arg><option>--url-prefix</option> <replaceable>url</replaceable></arg>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-push</command> produces a
+<emphasis>binary cache</emphasis>, a directory containing compressed
+Nix archives (NARs) plus some metadata of the closure of the specified
+store paths.  This directory can then be made available through a web
+server to other Nix installations, allowing them to skip building from
+source and instead download binaries from the cache
+automatically.</para>
+
+<para><command>nix-push</command> performs the following actions.
+      
+<orderedlist>
+
+  <listitem><para>Each path in <replaceable>paths</replaceable> is
+  built (using <link
+  linkend='rsec-nix-store-realise'><command>nix-store
+  --realise</command></link>).</para></listitem>
+
+  <listitem><para>All paths in the closure of
+  <replaceable>paths</replaceable> are determined (using
+  <command>nix-store --query --requisites
+  --include-outputs</command>).  Note that since the
+  <option>--include-outputs</option> flag is used, if
+  <replaceable>paths</replaceable> includes a store derivation, you
+  get a combined source/binary distribution (e.g., source tarballs
+  will be included).</para></listitem>
+
+  <listitem><para>All store paths determined in the previous step are
+  packaged into a NAR (using <command>nix-store --dump</command>) and
+  compressed using <command>xz</command> or <command>bzip2</command>.
+  The resulting files have the extension <filename>.nar.xz</filename>
+  or <filename>.nar.bz2</filename>.  Also for each store path, Nix
+  generates a file with extension <filename>.narinfo</filename>
+  containing metadata such as the references, cryptographic hash and
+  size of each path.</para></listitem>
+
+  <listitem><para>Optionally, a single <emphasis>manifest</emphasis>
+  file is created that contains the same metadata as the
+  <filename>.narinfo</filename> files.  This is for compatibility with
+  Nix versions prior to 1.2 (see <command>nix-pull</command> for
+  details).</para></listitem>
+
+  <listitem><para>A file named <option>nix-cache-info</option> is
+  placed in the destination directory.  The existence of this file
+  marks the directory as a binary cache.</para></listitem>
+
+</orderedlist>
+
+</para>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<variablelist>
+
+  <varlistentry><term><option>--dest</option> <replaceable>dest-dir</replaceable></term>
+
+    <listitem><para>Set the destination directory to
+    <replaceable>dir</replaceable>, which is created if it does not
+    exist.  This flag is required.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--bzip2</option></term>
+
+    <listitem><para>Compress NARs using <command>bzip2</command>
+    instead of <command>xz -9</command>.  The latter compresses about
+    30% better on typical archives, decompresses about twice as fast,
+    but compresses a lot slower and is not supported by Nix prior to
+    version 1.2.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--none</option></term>
+
+    <listitem><para>Do not compress NARs.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--force</option></term>
+
+    <listitem><para>Overwrite <filename>.narinfo</filename> files if
+    they already exist.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--link</option></term>
+
+    <listitem><para>By default, NARs are generated in the Nix store
+    and then copied to <replaceable>dest-dir</replaceable>.  If this
+    option is given, hard links are used instead.  This only works if
+    <replaceable>dest-dir</replaceable> is on the same filesystem as
+    the Nix store.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--manifest</option></term>
+
+    <listitem><para>Force the generation of a manifest suitable for
+    use by <command>nix-pull</command>.  The manifest is stored as
+    <filename><replaceable>dest-dir</replaceable>/MANIFEST</filename>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--manifest-path</option> <replaceable>filename</replaceable></term>
+
+    <listitem><para>Like <option>--manifest</option>, but store the
+    manifest in <replaceable>filename</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--url-prefix</option> <replaceable>url</replaceable></term>
+
+    <listitem><para>Manifests are expected to contain the absolute
+    URLs of NARs.  For generating these URLs, the prefix
+    <replaceable>url</replaceable> is used.  It defaults to
+    <uri>file://<replaceable>dest-dir</replaceable></uri>.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>To add the closure of Thunderbird to a binary cache:
+
+<screen>
+$ nix-push --dest /tmp/cache $(nix-build -A thunderbird)
+</screen>
+
+Assuming that <filename>/tmp/cache</filename> is exported by a web
+server as <uri>http://example.org/cache</uri>, you can then use this
+cache on another machine to speed up the installation of Thunderbird:
+
+<screen>
+$ nix-build -A thunderbird --option binary-caches http://example.org/cache
+</screen>
+
+Alternatively, you could add <literal>binary-caches =
+http://example.org/cache</literal> to
+<filename>nix.conf</filename>.</para>
+
+<para>To also include build-time dependencies (such as source
+tarballs):
+
+<screen>
+$ nix-push --dest /tmp/cache $(nix-instantiate -A thunderbird)
+</screen>
+
+</para>
+
+<para>To generate a manifest suitable for <command>nix-pull</command>:
+
+<screen>
+$ nix-push --dest /tmp/cache $(nix-build -A thunderbird) --manifest
+</screen>
+
+On another machine you can then do:
+
+<screen>
+$ nix-pull http://example.org/cache
+</screen>
+
+to cause the binaries to be used by subsequent Nix operations.</para>
+
+</refsection>
+
+
+<refsection><title>Binary cache format and operation</title>
+
+<para>A binary cache with URL <replaceable>url</replaceable> only
+denotes a valid binary cache if the file
+<uri><replaceable>url</replaceable>/nix-cache-info</uri> exists.  If
+this file does not exist (or cannot be downloaded), the cache is
+ignored.  If it does exist, it must be a text file containing cache
+properties.  Here’s an example:
+
+<screen>
+StoreDir: /nix/store
+WantMassQuery: 1
+Priority: 10
+</screen>
+
+The properties that are currently supported are:
+
+<variablelist>
+  
+  <varlistentry><term><literal>StoreDir</literal></term>
+
+    <listitem><para>The path of the Nix store to which this binary
+    cache applies.  Binaries are not relocatable — a binary built for
+    <filename>/nix/store</filename> won’t generally work in
+    <filename>/home/alice/store</filename> — so to prevent binaries
+    from being used in a wrong store, a binary cache is only used if
+    its <literal>StoreDir</literal> matches the local Nix
+    configuration.  The default is
+    <filename>/nix/store</filename>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>WantMassQuery</literal></term>
+
+    <listitem><para>Query operations such as <command>nix-env
+    -qas</command> can cause thousands of cache queries, and thus
+    thousands of HTTP requests, to determine which packages are
+    available in binary form.  While these requests are small, not
+    every server may appreciate a potential onslaught of queries.  If
+    <literal>WantMassQuery</literal> is set to <literal>0</literal>
+    (default), “mass queries” such as <command>nix-env -qas</command>
+    will skip this cache.  Thus a package may appear not to have a
+    binary substitute.  However, the binary will still be used when
+    you actually install the package.  If
+    <literal>WantMassQuery</literal> is set to <literal>1</literal>,
+    mass queries will use this cache.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>Priority</literal></term>
+
+    <listitem><para>Each binary cache has a priority (defaulting to
+    50).  Binary caches are checked for binaries in order of ascending
+    priority; thus a higher number denotes a lower priority.  The
+    binary cache <uri>http://cache.nixos.org</uri> has priority
+    40.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</para>
+
+<para>Every time Nix needs to build some store path
+<replaceable>p</replaceable>, it will check each configured binary
+cache to see if it has a NAR file for <replaceable>p</replaceable>,
+until it finds one.  If no cache has a NAR, Nix will fall back to
+building the path from source (if applicable).  To see if a cache with
+URL <replaceable>url</replaceable> has a binary for
+<replaceable>p</replaceable>, Nix fetches
+<replaceable>url/h</replaceable>, where <replaceable>h</replaceable>
+is the hash part of <replaceable>p</replaceable>.  Thus, if we have a
+cache <uri>http://cache.nixos.org</uri> and we want to obtain
+the store path
+<screen>
+/nix/store/a8922c0h87iilxzzvwn2hmv8x210aqb9-glibc-2.7
+</screen>
+then Nix will attempt to fetch
+<screen>
+http://cache.nixos.org/a8922c0h87iilxzzvwn2hmv8x210aqb9.narinfo
+</screen>
+(Commands such as <command>nix-env -qas</command> will issue an HTTP
+HEAD request, since it only needs to know if the
+<filename>.narinfo</filename> file exists.)  The
+<filename>.narinfo</filename> file is a simple text file that looks
+like this:
+
+<screen>
+StorePath: /nix/store/a8922c0h87iilxzzvwn2hmv8x210aqb9-glibc-2.7
+URL: nar/0zzjpdz46mdn74v09m053yczlz4am038g8r74iy8w43gx8801h70.nar.bz2
+Compression: bzip2
+FileHash: sha256:0zzjpdz46mdn74v09m053yczlz4am038g8r74iy8w43gx8801h70
+FileSize: 24473768
+NarHash: sha256:0s491y1h9hxj5ghiizlxk7ax6jwbha00zwn7lpyd5xg5bhf60vzg
+NarSize: 109521136
+References: 2ma2k0ys8knh4an48n28vigcmc2z8773-linux-headers-2.6.23.16 ...
+Deriver: 7akyyc87ka32xwmqza9dvyg5pwx3j212-glibc-2.7.drv
+</screen>
+
+The fields are as follows:
+
+<variablelist>
+  
+  <varlistentry><term><literal>StorePath</literal></term>
+
+    <listitem><para>The full store path, including the name part
+    (e.g., <literal>glibc-2.7</literal>).  It must match the
+    requested store path.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>URL</literal></term>
+
+    <listitem><para>The URL of the NAR, relative to the binary cache
+    URL.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>Compression</literal></term>
+
+    <listitem><para>The compression method; either
+    <literal>xz</literal> or
+    <literal>bzip2</literal>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>FileHash</literal></term>
+
+    <listitem><para>The SHA-256 hash of the compressed
+    NAR.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>FileSize</literal></term>
+
+    <listitem><para>The size of the compressed NAR.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>NarHash</literal></term>
+
+    <listitem><para>The SHA-256 hash of the uncompressed NAR.  This is
+    equal to the hash of the store path as returned by
+    <command>nix-store -q --hash
+    <replaceable>p</replaceable></command>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>NarSize</literal></term>
+
+    <listitem><para>The size of the uncompressed NAR.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>References</literal></term>
+
+    <listitem><para>The references of the store path, without the Nix
+    store prefix.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>Deriver</literal></term>
+
+    <listitem><para>The deriver of the store path, without the Nix
+    store prefix.  This field is optional.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><literal>System</literal></term>
+
+    <listitem><para>The Nix platform type of this binary, if known.
+    This field is optional.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</para>
+
+<para>Thus, in our example, after recursively ensuring that the
+references exist (e.g.,
+<filename>/nix/store/2ma2k0ys8knh4an48n28vigcmc2z8773-linux-headers-2.6.23.16</filename>),
+Nix will fetch <screen>
+http://cache.nixos.org/nar/0zzjpdz46mdn74v09m053yczlz4am038g8r74iy8w43gx8801h70.nar.bz2
+</screen> and decompress and unpack it to
+<filename>/nix/store/a8922c0h87iilxzzvwn2hmv8x210aqb9-glibc-2.7</filename>.</para>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-shell.xml b/doc/manual/nix-shell.xml
new file mode 100644
index 000000000000..d5f70a9e66a2
--- /dev/null
+++ b/doc/manual/nix-shell.xml
@@ -0,0 +1,200 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-shell">
+
+<refmeta>
+  <refentrytitle>nix-shell</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-shell</refname>
+  <refpurpose>start an interactive shell based on a Nix expression</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-shell</command>
+    <arg><option>--arg</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg><option>--argstr</option> <replaceable>name</replaceable> <replaceable>value</replaceable></arg>
+    <arg>
+      <group choice='req'>
+        <arg choice='plain'><option>--attr</option></arg>
+        <arg choice='plain'><option>-A</option></arg>
+      </group>
+      <replaceable>attrPath</replaceable>
+    </arg>
+    <arg><option>--command</option> <replaceable>cmd</replaceable></arg>
+    <arg><option>--exclude</option> <replaceable>regexp</replaceable></arg>
+    <arg><option>--pure</option></arg>
+    <group choice='req'>
+      <group choice='plain'>
+        <group>
+          <arg choice='plain'><option>--packages</option></arg>
+          <arg choice='plain'><option>-p</option></arg>
+        </group>
+        <replaceable>packages</replaceable>
+      </group>
+      <arg><replaceable>path</replaceable></arg>
+    </group>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-shell</command> will build the
+dependencies of the specified derivation, but not the derivation
+itself.  It will then start an interactive shell in which all
+environment variables defined by the derivation
+<replaceable>path</replaceable> have been set to their corresponding
+values, and the script <literal>$stdenv/setup</literal> has been
+sourced.  This is useful for reproducing the environment of a
+derivation for development.</para>
+
+<para>If <replaceable>path</replaceable> is not given,
+<command>nix-shell</command> defaults to
+<filename>shell.nix</filename> if it exists, and
+<filename>default.nix</filename> otherwise.</para>
+
+<para>If the derivation defines the variable
+<varname>shellHook</varname>, it will be evaluated after
+<literal>$stdenv/setup</literal> has been sourced.  Since this hook is
+not executed by regular Nix builds, it allows you to perform
+initialisation specific to <command>nix-shell</command>.  For example,
+the derivation attribute
+
+<programlisting>
+shellHook =
+  ''
+    echo "Hello shell"
+  '';
+</programlisting>
+
+will cause <command>nix-shell</command> to print <literal>Hello shell</literal>.</para>
+
+</refsection>
+
+
+<refsection><title>Options</title>
+
+<para>All options not listed here are passed to <command>nix-store
+--realise</command>, except for <option>--arg</option> and
+<option>--attr</option> / <option>-A</option> which are passed to
+<command>nix-instantiate</command>.  <phrase condition="manual">See
+also <xref linkend="sec-common-options" />.</phrase></para>
+
+<variablelist>
+
+  <varlistentry><term><option>--command</option> <replaceable>cmd</replaceable></term>
+
+    <listitem><para>In the environment of the derivation, run the
+    shell command <replaceable>cmd</replaceable> instead of starting
+    an interactive shell.  However, if you end the shell command with
+    <literal>return</literal>, you still get an interactive shell.
+    This can be useful for doing any additional
+    initialisation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--exclude</option> <replaceable>regexp</replaceable></term>
+
+    <listitem><para>Do not build any dependencies whose store path
+    matches the regular expression <replaceable>regexp</replaceable>.
+    This option may be specified multiple times.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--pure</option></term>
+
+    <listitem><para>If this flag is specified, the environment is
+    almost entirely cleared before the interactive shell is started,
+    so you get an environment that more closely corresponds to the
+    “real” Nix build.  A few variables, in particular
+    <envar>HOME</envar>, <envar>USER</envar> and
+    <envar>DISPLAY</envar>, are retained.  Note that
+    <filename>~/.bashrc</filename> and (depending on your Bash
+    installation) <filename>/etc/bashrc</filename> are still sourced,
+    so any variables set there will affect the interactive
+    shell.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--packages</option> / <option>-p</option></term>
+
+    <listitem><para>Set up an environment in which the specified
+    packages are present.  The command line arguments are interpreted
+    as attribute names inside the Nix Packages collection.  Thus,
+    <literal>nix-shell -p libjpeg openjdk</literal> will start a shell
+    in which the packages denoted by the attribute names
+    <varname>libjpeg</varname> and <varname>openjdk</varname> are
+    present.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<para>The following common options are supported:</para>
+
+<variablelist condition="manpage">
+  <xi:include href="opt-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='opt-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>To build the dependencies of the package Pan, and start an
+interactive shell in which to build it:
+
+<screen>
+$ nix-shell '&lt;nixpkgs>' -A pan
+[nix-shell]$ unpackPhase
+[nix-shell]$ cd pan-*
+[nix-shell]$ configurePhase
+[nix-shell]$ buildPhase
+[nix-shell]$ ./pan/gui/pan
+</screen>
+
+To clear the environment first, and do some additional automatic
+initialisation of the interactive shell:
+
+<screen>
+$ nix-shell '&lt;nixpkgs>' -A pan --pure \
+    --command 'export NIX_DEBUG=1; export NIX_CORES=8; return'
+</screen>
+
+Nix expressions can also be given on the command line.  For instance,
+the following starts a shell containing the packages
+<literal>sqlite</literal> and <literal>libX11</literal>:
+
+<screen>
+$ nix-shell -E 'with import &lt;nixpkgs> { }; runCommand "dummy" { buildInputs = [ sqlite xorg.libX11 ]; } ""'
+</screen>
+
+A shorter way to do the same is:
+
+<screen>
+$ nix-shell -p sqlite xorg.libX11
+[nix-shell]$ echo $NIX_LDFLAGS
+… -L/nix/store/j1zg5v…-sqlite-3.8.0.2/lib -L/nix/store/0gmcz9…-libX11-1.6.1/lib …
+</screen>
+
+</para>
+
+</refsection>
+
+
+<refsection condition="manpage"><title>Environment variables</title>
+
+<variablelist>
+  <xi:include href="env-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='env-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/nix-store.xml b/doc/manual/nix-store.xml
new file mode 100644
index 000000000000..c9a912ff0ee4
--- /dev/null
+++ b/doc/manual/nix-store.xml
@@ -0,0 +1,1351 @@
+<refentry xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink"
+          xmlns:xi="http://www.w3.org/2001/XInclude"
+          xml:id="sec-nix-store">
+
+<refmeta>
+  <refentrytitle>nix-store</refentrytitle>
+  <manvolnum>1</manvolnum>
+  <refmiscinfo class="source">Nix</refmiscinfo>
+  <refmiscinfo class="version"><xi:include href="version.txt" parse="text"/></refmiscinfo>
+</refmeta>
+
+<refnamediv>
+  <refname>nix-store</refname>
+  <refpurpose>manipulate or query the Nix store</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="opt-common-syn.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(/db:nop/*)" />
+    <arg><option>--add-root</option> <replaceable>path</replaceable></arg>
+    <arg><option>--indirect</option></arg>
+    <arg choice='plain'><replaceable>operation</replaceable></arg>
+    <arg rep='repeat'><replaceable>options</replaceable></arg>
+    <arg rep='repeat'><replaceable>arguments</replaceable></arg>
+  </cmdsynopsis>
+</refsynopsisdiv>
+
+
+<refsection><title>Description</title>
+
+<para>The command <command>nix-store</command> performs primitive
+operations on the Nix store.  You generally do not need to run this
+command manually.</para>
+
+<para><command>nix-store</command> takes exactly one
+<emphasis>operation</emphasis> flag which indicates the subcommand to
+be performed.  These are documented below.</para>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Common options</title>
+
+<para>This section lists the options that are common to all
+operations.  These options are allowed for every subcommand, though
+they may not always have an effect.  <phrase condition="manual">See
+also <xref linkend="sec-common-options" /> for a list of common
+options.</phrase></para>
+
+<variablelist>
+
+  <varlistentry xml:id="opt-add-root"><term><option>--add-root</option> <replaceable>path</replaceable></term>
+
+    <listitem><para>Causes the result of a realisation
+    (<option>--realise</option> and <option>--force-realise</option>)
+    to be registered as a root of the garbage collector<phrase
+    condition="manual"> (see <xref linkend="ssec-gc-roots"
+    />)</phrase>.  The root is stored in
+    <replaceable>path</replaceable>, which must be inside a directory
+    that is scanned for roots by the garbage collector (i.e.,
+    typically in a subdirectory of
+    <filename>/nix/var/nix/gcroots/</filename>)
+    <emphasis>unless</emphasis> the <option>--indirect</option> flag
+    is used.</para>
+
+    <para>If there are multiple results, then multiple symlinks will
+    be created by sequentially numbering symlinks beyond the first one
+    (e.g., <filename>foo</filename>, <filename>foo-2</filename>,
+    <filename>foo-3</filename>, and so on).</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--indirect</option></term>
+
+    <listitem>
+
+    <para>In conjunction with <option>--add-root</option>, this option
+    allows roots to be stored <emphasis>outside</emphasis> of the GC
+    roots directory.  This is useful for commands such as
+    <command>nix-build</command> that place a symlink to the build
+    result in the current directory; such a build result should not be
+    garbage-collected unless the symlink is removed.</para>
+
+    <para>The <option>--indirect</option> flag causes a uniquely named
+    symlink to <replaceable>path</replaceable> to be stored in
+    <filename>/nix/var/nix/gcroots/auto/</filename>.  For instance,
+
+    <screen>
+$ nix-store --add-root /home/eelco/bla/result --indirect -r <replaceable>...</replaceable>
+
+$ ls -l /nix/var/nix/gcroots/auto
+lrwxrwxrwx    1 ... 2005-03-13 21:10 dn54lcypm8f8... -> /home/eelco/bla/result
+
+$ ls -l /home/eelco/bla/result
+lrwxrwxrwx    1 ... 2005-03-13 21:10 /home/eelco/bla/result -> /nix/store/1r11343n6qd4...-f-spot-0.0.10</screen>
+
+    Thus, when <filename>/home/eelco/bla/result</filename> is removed,
+    the GC root in the <filename>auto</filename> directory becomes a
+    dangling symlink and will be ignored by the collector.</para>
+
+    <warning><para>Note that it is not possible to move or rename
+    indirect GC roots, since the symlink in the
+    <filename>auto</filename> directory will still point to the old
+    location.</para></warning>
+
+    </listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<variablelist condition="manpage">
+  <xi:include href="opt-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='opt-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id='rsec-nix-store-realise'><title>Operation <option>--realise</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-store</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--realise</option></arg>
+    <arg choice='plain'><option>-r</option></arg>
+  </group>
+  <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  <arg><option>--dry-run</option></arg>
+</cmdsynopsis>
+
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--realise</option> essentially “builds”
+the specified store paths.  Realisation is a somewhat overloaded term:
+
+<itemizedlist>
+
+  <listitem><para>If the store path is a
+  <emphasis>derivation</emphasis>, realisation ensures that the output
+  paths of the derivation are <link
+  linkend="gloss-validity">valid</link> (i.e., the output path and its
+  closure exist in the file system).  This can be done in several
+  ways.  First, it is possible that the outputs are already valid, in
+  which case we are done immediately.  Otherwise, there may be <link
+  linkend="gloss-substitute">substitutes</link> that produce the
+  outputs (e.g., by downloading them).  Finally, the outputs can be
+  produced by performing the build action described by the
+  derivation.</para></listitem>
+
+  <listitem><para>If the store path is not a derivation, realisation
+  ensures that the specified path is valid (i.e., it and its closure
+  exist in the file system).  If the path is already valid, we are
+  done immediately.  Otherwise, the path and any missing paths in its
+  closure may be produced through substitutes.  If there are no
+  (successful) subsitutes, realisation fails.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+<para>The output path of each derivation is printed on standard
+output.  (For non-derivations argument, the argument itself is
+printed.)</para>
+
+<para>The following flags are available:</para>
+
+<variablelist>
+
+  <varlistentry><term><option>--dry-run</option></term>
+
+    <listitem><para>Print on standard error a description of what
+    packages would be built or downloaded, without actually performing
+    the operation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--ignore-unknown</option></term>
+
+    <listitem><para>If a non-derivation path does not have a
+    substitute, then silently ignore it.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>This operation is typically used to build store derivations
+produced by <link
+linkend="sec-nix-instantiate"><command>nix-instantiate</command></link>:
+
+<screen>
+$ nix-store -r $(nix-instantiate ./test.nix)
+/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1</screen>
+
+This is essentially what <link
+linkend="sec-nix-build"><command>nix-build</command></link> does.</para>
+
+</refsection>
+
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id='rsec-nix-store-gc'><title>Operation <option>--gc</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-store</command>
+  <arg choice='plain'><option>--gc</option></arg>
+  <group>
+    <arg choice='plain'><option>--print-roots</option></arg>
+    <arg choice='plain'><option>--print-live</option></arg>
+    <arg choice='plain'><option>--print-dead</option></arg>
+    <arg choice='plain'><option>--delete</option></arg>
+  </group>
+  <arg><option>--max-freed</option> <replaceable>bytes</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>Without additional flags, the operation <option>--gc</option>
+performs a garbage collection on the Nix store.  That is, all paths in
+the Nix store not reachable via file system references from a set of
+“roots”, are deleted.</para>
+
+<para>The following suboperations may be specified:</para>
+
+<variablelist>
+
+  <varlistentry><term><option>--print-roots</option></term>
+
+    <listitem><para>This operation prints on standard output the set
+    of roots used by the garbage collector.  What constitutes a root
+    is described in <xref linkend="ssec-gc-roots"
+    />.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--print-live</option></term>
+
+    <listitem><para>This operation prints on standard output the set
+    of “live” store paths, which are all the store paths reachable
+    from the roots.  Live paths should never be deleted, since that
+    would break consistency — it would become possible that
+    applications are installed that reference things that are no
+    longer present in the store.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--print-dead</option></term>
+
+    <listitem><para>This operation prints out on standard output the
+    set of “dead” store paths, which is just the opposite of the set
+    of live paths: any path in the store that is not live (with
+    respect to the roots) is dead.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--delete</option></term>
+
+    <listitem><para>This operation performs an actual garbage
+    collection.  All dead paths are removed from the
+    store.  This is the default.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+<para>By default, all unreachable paths are deleted.  The following
+options control what gets deleted and in what order:
+
+<variablelist>
+
+  <varlistentry><term><option>--max-freed</option> <replaceable>bytes</replaceable></term>
+
+    <listitem><para>Keep deleting paths until at least
+    <replaceable>bytes</replaceable> bytes have been deleted, then
+    stop.  The argument <replaceable>bytes</replaceable> can be
+    followed by the multiplicative suffix <literal>K</literal>,
+    <literal>M</literal>, <literal>G</literal> or
+    <literal>T</literal>, denoting KiB, MiB, GiB or TiB
+    units.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</para>
+
+<para>The behaviour of the collector is also influenced by the <link
+linkend="conf-gc-keep-outputs"><literal>gc-keep-outputs</literal></link>
+and <link
+linkend="conf-gc-keep-derivations"><literal>gc-keep-derivations</literal></link>
+variables in the Nix configuration file.</para>
+
+<para>With <option>--delete</option>, the collector prints the total
+number of freed bytes when it finishes (or when it is interrupted).
+With <option>--print-dead</option>, it prints the number of bytes that
+would be freed.</para>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>To delete all unreachable paths, just do:
+
+<screen>
+$ nix-store --gc
+deleting `/nix/store/kq82idx6g0nyzsp2s14gfsc38npai7lf-cairo-1.0.4.tar.gz.drv'
+<replaceable>...</replaceable>
+8825586 bytes freed (8.42 MiB)</screen>
+
+</para>
+
+<para>To delete at least 100 MiBs of unreachable paths:
+
+<screen>
+$ nix-store --gc --max-freed $((100 * 1024 * 1024))</screen>
+
+</para>
+
+</refsection>
+
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--delete</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-store</command>
+  <arg choice='plain'><option>--delete</option></arg>
+  <arg><option>--ignore-liveness</option></arg>
+  <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--delete</option> deletes the store paths
+<replaceable>paths</replaceable> from the Nix store, but only if it is
+safe to do so; that is, when the path is not reachable from a root of
+the garbage collector.  This means that you can only delete paths that
+would also be deleted by <literal>nix-store --gc</literal>.  Thus,
+<literal>--delete</literal> is a more targeted version of
+<literal>--gc</literal>.</para>
+
+<para>With the option <option>--ignore-liveness</option>, reachability
+from the roots is ignored.  However, the path still won’t be deleted
+if there are other paths in the store that refer to it (i.e., depend
+on it).</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4
+0 bytes freed (0.00 MiB)
+error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive</screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id='refsec-nix-store-query'><title>Operation <option>--query</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-store</command>
+  <group choice='req'>
+    <arg choice='plain'><option>--query</option></arg>
+    <arg choice='plain'><option>-q</option></arg>
+  </group>
+  <group choice='req'>
+    <arg choice='plain'><option>--outputs</option></arg>
+    <arg choice='plain'><option>--requisites</option></arg>
+    <arg choice='plain'><option>-R</option></arg>
+    <arg choice='plain'><option>--references</option></arg>
+    <arg choice='plain'><option>--referrers</option></arg>
+    <arg choice='plain'><option>--referrers-closure</option></arg>
+    <arg choice='plain'><option>--deriver</option></arg>
+    <arg choice='plain'><option>--deriver</option></arg>
+    <arg choice='plain'><option>--graph</option></arg>
+    <arg choice='plain'><option>--tree</option></arg>
+    <arg choice='plain'><option>--binding</option> <replaceable>name</replaceable></arg>
+    <arg choice='plain'><option>--hash</option></arg>
+    <arg choice='plain'><option>--size</option></arg>
+    <arg choice='plain'><option>--roots</option></arg>
+  </group>
+  <arg><option>--use-output</option></arg>
+  <arg><option>-u</option></arg>
+  <arg><option>--force-realise</option></arg>
+  <arg><option>-f</option></arg>
+  <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--query</option> displays various bits of
+information about the store paths .  The queries are described below.  At
+most one query can be specified.  The default query is
+<option>--outputs</option>.</para>
+
+<para>The paths <replaceable>paths</replaceable> may also be symlinks
+from outside of the Nix store, to the Nix store.  In that case, the
+query is applied to the target of the symlink.</para>
+
+
+</refsection>
+
+
+<refsection><title>Common query options</title>
+
+<variablelist>
+
+  <varlistentry><term><option>--use-output</option></term>
+    <term><option>-u</option></term>
+
+    <listitem><para>For each argument to the query that is a store
+    derivation, apply the query to the output path of the derivation
+    instead.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--force-realise</option></term>
+    <term><option>-f</option></term>
+
+    <listitem><para>Realise each argument to the query first (see
+    <link linkend="rsec-nix-store-realise"><command>nix-store
+    --realise</command></link>).</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection xml:id='nixref-queries'><title>Queries</title>
+
+<variablelist>
+
+  <varlistentry><term><option>--outputs</option></term>
+
+    <listitem><para>Prints out the <link
+    linkend="gloss-output-path">output paths</link> of the store
+    derivations <replaceable>paths</replaceable>.  These are the paths
+    that will be produced when the derivation is
+    built.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--requisites</option></term>
+    <term><option>-R</option></term>
+
+    <listitem><para>Prints out the <link
+    linkend="gloss-closure">closure</link> of the store path
+    <replaceable>paths</replaceable>.</para>
+
+    <para>This query has one option:</para>
+
+    <variablelist>
+
+      <varlistentry><term><option>--include-outputs</option></term>
+
+        <listitem><para>Also include the output path of store
+        derivations, and their closures.</para></listitem>
+
+      </varlistentry>
+
+    </variablelist>
+
+    <para>This query can be used to implement various kinds of
+    deployment.  A <emphasis>source deployment</emphasis> is obtained
+    by distributing the closure of a store derivation.  A
+    <emphasis>binary deployment</emphasis> is obtained by distributing
+    the closure of an output path.  A <emphasis>cache
+    deployment</emphasis> (combined source/binary deployment,
+    including binaries of build-time-only dependencies) is obtained by
+    distributing the closure of a store derivation and specifying the
+    option <option>--include-outputs</option>.</para>
+
+    </listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--references</option></term>
+
+    <listitem><para>Prints the set of <link
+    linkend="gloss-reference">references</link> of the store paths
+    <replaceable>paths</replaceable>, that is, their immediate
+    dependencies.  (For <emphasis>all</emphasis> dependencies, use
+    <option>--requisites</option>.)</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--referrers</option></term>
+
+    <listitem><para>Prints the set of <emphasis>referrers</emphasis> of
+    the store paths <replaceable>paths</replaceable>, that is, the
+    store paths currently existing in the Nix store that refer to one
+    of <replaceable>paths</replaceable>.  Note that contrary to the
+    references, the set of referrers is not constant; it can change as
+    store paths are added or removed.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--referrers-closure</option></term>
+
+    <listitem><para>Prints the closure of the set of store paths
+    <replaceable>paths</replaceable> under the referrers relation; that
+    is, all store paths that directly or indirectly refer to one of
+    <replaceable>paths</replaceable>.  These are all the path currently
+    in the Nix store that are dependent on
+    <replaceable>paths</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--deriver</option></term>
+
+    <listitem><para>Prints the <link
+    linkend="gloss-deriver">deriver</link> of the store paths
+    <replaceable>paths</replaceable>.  If the path has no deriver
+    (e.g., if it is a source file), or if the deriver is not known
+    (e.g., in the case of a binary-only deployment), the string
+    <literal>unknown-deriver</literal> is printed.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--graph</option></term>
+
+    <listitem><para>Prints the references graph of the store paths
+    <replaceable>paths</replaceable> in the format of the
+    <command>dot</command> tool of AT&amp;T's <link
+    xlink:href="http://www.graphviz.org/">Graphviz package</link>.
+    This can be used to visualise dependency graphs.  To obtain a
+    build-time dependency graph, apply this to a store derivation.  To
+    obtain a runtime dependency graph, apply it to an output
+    path.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--tree</option></term>
+
+    <listitem><para>Prints the references graph of the store paths
+    <replaceable>paths</replaceable> as a nested ASCII tree.
+    References are ordered by descending closure size; this tends to
+    flatten the tree, making it more readable.  The query only
+    recurses into a store path when it is first encountered; this
+    prevents a blowup of the tree representation of the
+    graph.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--binding</option> <replaceable>name</replaceable></term>
+
+    <listitem><para>Prints the value of the attribute
+    <replaceable>name</replaceable> (i.e., environment variable) of
+    the store derivations <replaceable>paths</replaceable>.  It is an
+    error for a derivation to not have the specified
+    attribute.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--hash</option></term>
+
+    <listitem><para>Prints the SHA-256 hash of the contents of the
+    store paths <replaceable>paths</replaceable> (that is, the hash of
+    the output of <command>nix-store --dump</command> on the given
+    paths).  Since the hash is stored in the Nix database, this is a
+    fast operation.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--size</option></term>
+
+    <listitem><para>Prints the size in bytes of the contents of the
+    store paths <replaceable>paths</replaceable> — to be precise, the
+    size of the output of <command>nix-store --dump</command> on the
+    given paths.  Note that the actual disk space required by the
+    store paths may be higher, especially on filesystems with large
+    cluster sizes.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--roots</option></term>
+
+    <listitem><para>Prints the garbage collector roots that point,
+    directly or indirectly, at the store paths
+    <replaceable>paths</replaceable>.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</refsection>
+
+
+<refsection><title>Examples</title>
+
+<para>Print the closure (runtime dependencies) of the
+<command>svn</command> program in the current user environment:
+
+<screen>
+$ nix-store -qR $(which svn)
+/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4
+/nix/store/9lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4
+<replaceable>...</replaceable></screen>
+
+</para>
+
+<para>Print the build-time dependencies of <command>svn</command>:
+
+<screen>
+$ nix-store -qR $(nix-store -qd $(which svn))
+/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv
+/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh
+/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv
+<replaceable>... lots of other paths ...</replaceable></screen>
+
+The difference with the previous example is that we ask the closure of
+the derivation (<option>-qd</option>), not the closure of the output
+path that contains <command>svn</command>.</para>
+
+<para>Show the build-time dependencies as a tree:
+
+<screen>
+$ nix-store -q --tree $(nix-store -qd $(which svn))
+/nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv
++---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh
++---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv
+|   +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash
+|   +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh
+<replaceable>...</replaceable></screen>
+
+</para>
+
+<para>Show all paths that depend on the same OpenSSL library as
+<command>svn</command>:
+
+<screen>
+$ nix-store -q --referrers $(nix-store -q --binding openssl $(nix-store -qd $(which svn)))
+/nix/store/23ny9l9wixx21632y2wi4p585qhva1q8-sylpheed-1.0.0
+/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4
+/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3
+/nix/store/l51240xqsgg8a7yrbqdx1rfzyv6l26fx-lynx-2.8.5</screen>
+
+</para>
+
+<para>Show all paths that directly or indirectly depend on the Glibc
+(C library) used by <command>svn</command>:
+
+<screen>
+$ nix-store -q --referrers-closure $(ldd $(which svn) | grep /libc.so | awk '{print $3}')
+/nix/store/034a6h4vpz9kds5r6kzb9lhh81mscw43-libgnomeprintui-2.8.2
+/nix/store/15l3yi0d45prm7a82pcrknxdh6nzmxza-gawk-3.1.4
+<replaceable>...</replaceable></screen>
+
+Note that <command>ldd</command> is a command that prints out the
+dynamic libraries used by an ELF executable.</para>
+
+<para>Make a picture of the runtime dependency graph of the current
+user environment:
+
+<screen>
+$ nix-store -q --graph ~/.nix-profile | dot -Tps > graph.ps
+$ gv graph.ps</screen>
+
+</para>
+
+<para>Show every garbage collector root that points to a store path
+that depends on <command>svn</command>:
+
+<screen>
+$ nix-store -q --roots $(which svn)
+/nix/var/nix/profiles/default-81-link
+/nix/var/nix/profiles/default-82-link
+/nix/var/nix/profiles/per-user/eelco/profile-97-link
+</screen>
+
+</para>
+
+</refsection>
+
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<!--
+<refsection xml:id="rsec-nix-store-reg-val"><title>Operation <option>-XXX-register-validity</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-store</command>
+  <arg choice='plain'><option>-XXX-register-validity</option></arg>
+</cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>TODO</para>
+
+</refsection>
+
+</refsection>
+-->
+
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--add</option></title>
+
+<refsection><title>Synopsis</title>
+
+<cmdsynopsis>
+  <command>nix-store</command>
+  <arg choice='plain'><option>--add</option></arg>
+  <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+</cmdsynopsis>
+
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--add</option> adds the specified paths to
+the Nix store.  It prints the resulting paths in the Nix store on
+standard output.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --add ./foo.c
+/nix/store/m7lrha58ph6rcnv109yzx1nk1cj7k7zf-foo.c</screen>
+
+</refsection>
+
+</refsection>
+
+
+
+<!--######################################################################-->
+
+<refsection xml:id='refsec-nix-store-verify'><title>Operation <option>--verify</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--verify</option></arg>
+    <arg><option>--check-contents</option></arg>
+    <arg><option>--repair</option></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--verify</option> verifies the internal
+consistency of the Nix database, and the consistency between the Nix
+database and the Nix store.  Any inconsistencies encountered are
+automatically repaired.  Inconsistencies are generally the result of
+the Nix store or database being modified by non-Nix tools, or of bugs
+in Nix itself.</para>
+
+<para>This operation has the following options:
+
+<variablelist>
+
+  <varlistentry><term><option>--check-contents</option></term>
+
+    <listitem><para>Checks that the contents of every valid store path
+    has not been altered by computing a SHA-256 hash of the contents
+    and comparing it with the hash stored in the Nix database at build
+    time.  Paths that have been modified are printed out.  For large
+    stores, <option>--check-contents</option> is obviously quite
+    slow.</para></listitem>
+
+  </varlistentry>
+
+  <varlistentry><term><option>--repair</option></term>
+
+    <listitem><para>If any valid path is missing from the store, or
+    (if <option>--check-contents</option> is given) the contents of a
+    valid path has been modified, then try to repair the path by
+    redownloading it.  See <command>nix-store --repair-path</command>
+    for details.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</para>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--verify-path</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--verify-path</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--verify-path</option> compares the
+contents of the given store paths to their cryptographic hashes stored
+in Nix’s database.  For every changed path, it prints a warning
+message.  The exit status is 0 if no path has changed, and 1
+otherwise.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<para>To verify the integrity of the <command>svn</command> command and all its dependencies:
+
+<screen>
+$ nix-store --verify-path $(nix-store -qR $(which svn))
+</screen>
+
+</para>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--repair-path</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--repair-path</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--repair-path</option> attempts to
+“repair” the specified paths by redownloading them using the available
+substituters.  If no substitutes are available, then repair is not
+possible.</para>
+
+<warning><para>During repair, there is a very small time window during
+which the old path (if it exists) is moved out of the way and replaced
+with the new path.  If repair is interrupted in between, then the
+system may be left in a broken state (e.g., if the path contains a
+critical system component like the GNU C Library).</para></warning>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --verify-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13
+path `/nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13' was modified!
+  expected hash `2db57715ae90b7e31ff1f2ecb8c12ec1cc43da920efcbe3b22763f36a1861588',
+  got `481c5aa5483ebc97c20457bb8bca24deea56550d3985cda0027f67fe54b808e4'
+
+$ nix-store --repair-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13
+fetching path `/nix/store/d7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13'...
+…
+</screen>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection xml:id='refsec-nix-store-dump'><title>Operation <option>--dump</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--dump</option></arg>
+    <arg choice='plain'><replaceable>path</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--dump</option> produces a NAR (Nix
+ARchive) file containing the contents of the file system tree rooted
+at <replaceable>path</replaceable>.  The archive is written to
+standard output.</para>
+
+<para>A NAR archive is like a TAR or Zip archive, but it contains only
+the information that Nix considers important.  For instance,
+timestamps are elided because all files in the Nix store have their
+timestamp set to 0 anyway.  Likewise, all permissions are left out
+except for the execute bit, because all files in the Nix store have
+644 or 755 permission.</para>
+
+<para>Also, a NAR archive is <emphasis>canonical</emphasis>, meaning
+that “equal” paths always produce the same NAR archive.  For instance,
+directory entries are always sorted so that the actual on-disk order
+doesn’t influence the result.  This means that the cryptographic hash
+of a NAR dump of a path is usable as a fingerprint of the contents of
+the path.  Indeed, the hashes of store paths stored in Nix’s database
+(see <link linkend="refsec-nix-store-query"><literal>nix-store -q
+--hash</literal></link>) are SHA-256 hashes of the NAR dump of each
+store path.</para>
+
+<para>NAR archives support filenames of unlimited length and 64-bit
+file sizes.  They can contain regular files, directories, and symbolic
+links, but not other types of files (such as device nodes).</para>
+
+<para>A Nix archive can be unpacked using <literal>nix-store
+--restore</literal>.</para>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--restore</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--restore</option></arg>
+    <arg choice='plain'><replaceable>path</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--restore</option> unpacks a NAR archive
+to <replaceable>path</replaceable>, which must not already exist.  The
+archive is read from standard input.</para>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection xml:id='refsec-nix-store-export'><title>Operation <option>--export</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--export</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--export</option> writes a serialisation
+of the specified store paths to standard output in a format that can
+be imported into another Nix store with <command
+linkend="refsec-nix-store-import">nix-store --import</command>.  This
+is like <command linkend="refsec-nix-store-dump">nix-store
+--dump</command>, except that the NAR archive produced by that command
+doesn’t contain the necessary meta-information to allow it to be
+imported into another Nix store (namely, the set of references of the
+path).</para>
+
+<para>This command does not produce a <emphasis>closure</emphasis> of
+the specified paths, so if a store path references other store paths
+that are missing in the target Nix store, the import will fail.  To
+copy a whole closure, do something like
+
+<screen>
+$ nix-store --export $(nix-store -qR <replaceable>paths</replaceable>) > out</screen>
+
+</para>
+
+<para>For an example of how <option>--export</option> and
+<option>--import</option> can be used, see the source of the <command
+linkend="sec-nix-copy-closure">nix-copy-closure</command>
+command.</para>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection xml:id='refsec-nix-store-import'><title>Operation <option>--import</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--import</option></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--import</option> reads a serialisation of
+a set of store paths produced by <command
+linkend="refsec-nix-store-export">nix-store --export</command> from
+standard input and adds those store paths to the Nix store.  Paths
+that already exist in the Nix store are ignored.  If a path refers to
+another path that doesn’t exist in the Nix store, the import
+fails.</para>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--optimise</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--optimise</option></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--optimise</option> reduces Nix store disk
+space usage by finding identical files in the store and hard-linking
+them to each other.  It typically reduces the size of the store by
+something like 25-35%.  Only regular files and symlinks are
+hard-linked in this manner.  Files are considered identical when they
+have the same NAR archive serialisation: that is, regular files must
+have the same contents and permission (executable or non-executable),
+and symlinks must have the same contents.</para>
+
+<para>After completion, or when the command is interrupted, a report
+on the achieved savings is printed on standard error.</para>
+
+<para>Use <option>-vv</option> or <option>-vvv</option> to get some
+progress indication.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --optimise
+hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1'
+<replaceable>...</replaceable>
+541838819 bytes (516.74 MiB) freed by hard-linking 54143 files;
+there are 114486 files with equal contents out of 215894 files in total
+</screen>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--read-log</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <group choice='req'>
+      <arg choice='plain'><option>--read-log</option></arg>
+      <arg choice='plain'><option>-l</option></arg>
+    </group>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--read-log</option> prints the build log
+of the specified store paths on standard output.  The build log is
+whatever the builder of a derivation wrote to standard output and
+standard error.  If a store path is not a derivation, the deriver of
+the store path is used.</para>
+
+<para>Build logs are kept in
+<filename>/nix/var/log/nix/drvs</filename>.  However, there is no
+guarantee that a build log is available for any particular store path.
+For instance, if the path was downloaded as a pre-built binary through
+a substitute, then the log is unavailable. If the log is not available
+locally, then <command>nix-store</command> will try to download the
+log from the servers specified in the Nix option
+<option>log-servers</option>. For example, if it’s set to
+<literal>http://hydra.nixos.org/log</literal>, then Nix will check
+<literal>http://hydra.nixos.org/log/<replaceable>base-name</replaceable></literal>.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store -l $(which ktorrent)
+building /nix/store/dhc73pvzpnzxhdgpimsd9sw39di66ph1-ktorrent-2.2.1
+unpacking sources
+unpacking source archive /nix/store/p8n1jpqs27mgkjw07pb5269717nzf5f8-ktorrent-2.2.1.tar.gz
+ktorrent-2.2.1/
+ktorrent-2.2.1/NEWS
+<replaceable>...</replaceable>
+</screen>
+
+</refsection>
+
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--dump-db</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--dump-db</option></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--dump-db</option> writes a dump of the
+Nix database to standard output.  It can be loaded into an empty Nix
+store using <option>--load-db</option>.  This is useful for making
+backups and when migrating to different database schemas.</para>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--load-db</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--load-db</option></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--load-db</option> reads a dump of the Nix
+database created by <option>--dump-db</option> from standard input and
+loads it into the Nix database.</para>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--print-env</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--print-env</option></arg>
+    <arg choice='plain'><replaceable>drvpath</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>The operation <option>--print-env</option> prints out the
+environment of a derivation in a format that can be evaluated by a
+shell.  The command line arguments of the builder are placed in the
+variable <envar>_args</envar>.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --print-env $(nix-instantiate '&lt;nixpkgs>' -A firefox)
+<replaceable>…</replaceable>
+export src; src='/nix/store/plpj7qrwcz94z2psh6fchsi7s8yihc7k-firefox-12.0.source.tar.bz2'
+export stdenv; stdenv='/nix/store/7c8asx3yfrg5dg1gzhzyq2236zfgibnn-stdenv'
+export system; system='x86_64-linux'
+export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-builder.sh'
+</screen>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--query-failed-paths</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--query-failed-paths</option></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>If build failure caching is enabled through the
+<literal>build-cache-failures</literal> configuration option, the
+operation <option>--query-failed-paths</option> will print out all
+store paths that have failed to build.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --query-failed-paths
+/nix/store/000zi5dcla86l92jn1g997jb06sidm7x-perl-PerlMagick-6.59
+/nix/store/0011iy7sfwbc1qj5a1f6ifjnbcdail8a-haskell-gitit-ghc7.0.4-0.8.1
+/nix/store/001c0yn1hkh86gprvrb46cxnz3pki7q3-gamin-0.1.10
+<replaceable>…</replaceable>
+</screen>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection><title>Operation <option>--clear-failed-paths</option></title>
+
+<refsection>
+  <title>Synopsis</title>
+  <cmdsynopsis>
+    <command>nix-store</command>
+    <arg choice='plain'><option>--clear-failed-paths</option></arg>
+    <arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
+  </cmdsynopsis>
+</refsection>
+
+<refsection><title>Description</title>
+
+<para>If build failure caching is enabled through the
+<literal>build-cache-failures</literal> configuration option, the
+operation <option>--clear-failed-paths</option> clears the “failed”
+state of the given store paths, allowing them to be built again.  This
+is useful if the failure was actually transient (e.g. because the disk
+was full).</para>
+
+<para>If a path denotes a derivation, its output paths are cleared.
+You can provide the argument <literal>*</literal> to clear all store
+paths.</para>
+
+</refsection>
+
+<refsection><title>Example</title>
+
+<screen>
+$ nix-store --clear-failed-paths /nix/store/000zi5dcla86l92jn1g997jb06sidm7x-perl-PerlMagick-6.59
+$ nix-store --clear-failed-paths *
+</screen>
+
+</refsection>
+
+</refsection>
+
+
+<!--######################################################################-->
+
+<refsection condition="manpage"><title>Environment variables</title>
+
+<variablelist>
+  <xi:include href="env-common.xml#xmlns(db=http://docbook.org/ns/docbook)xpointer(//db:variablelist[@xml:id='env-common']/*)" />
+</variablelist>
+
+</refsection>
+
+
+</refentry>
diff --git a/doc/manual/opt-common-syn.xml b/doc/manual/opt-common-syn.xml
new file mode 100644
index 000000000000..d65f4009ee6e
--- /dev/null
+++ b/doc/manual/opt-common-syn.xml
@@ -0,0 +1,47 @@
+<nop xmlns="http://docbook.org/ns/docbook">
+  
+<arg><option>--help</option></arg>
+<arg><option>--version</option></arg>
+<arg rep='repeat'><option>--verbose</option></arg>
+<arg rep='repeat'><option>-v</option></arg>
+<arg><option>--no-build-output</option></arg>
+<arg><option>-Q</option></arg>
+<arg>
+  <group choice='req'>
+    <arg choice='plain'><option>--max-jobs</option></arg>
+    <arg choice='plain'><option>-j</option></arg>
+  </group>
+  <replaceable>number</replaceable>
+</arg>
+<arg>
+  <option>--cores</option>
+  <replaceable>number</replaceable>
+</arg>
+<arg>
+  <option>--max-silent-time</option>
+  <replaceable>number</replaceable>
+</arg>
+<arg>
+  <option>--timeout</option>
+  <replaceable>number</replaceable>
+</arg>
+<arg><option>--keep-going</option></arg>
+<arg><option>-k</option></arg>
+<arg><option>--keep-failed</option></arg>
+<arg><option>-K</option></arg>
+<arg><option>--fallback</option></arg>
+<arg><option>--readonly-mode</option></arg>
+<arg><option>--log-type</option> <replaceable>type</replaceable></arg>
+<arg><option>--show-trace</option></arg>
+<arg>
+  <option>-I</option>
+  <replaceable>path</replaceable>
+</arg>
+<arg>
+  <option>--option</option>
+  <replaceable>name</replaceable>
+  <replaceable>value</replaceable>
+</arg>
+<sbr />
+
+</nop>
diff --git a/doc/manual/opt-common.xml b/doc/manual/opt-common.xml
new file mode 100644
index 000000000000..f8584f4d62ed
--- /dev/null
+++ b/doc/manual/opt-common.xml
@@ -0,0 +1,389 @@
+<section xmlns="http://docbook.org/ns/docbook" xml:id="sec-common-options">
+
+<title>Common options</title>
+
+
+<para>Most Nix commands accept the following command-line options:</para>
+
+<variablelist xml:id="opt-common">
+
+<varlistentry><term><option>--help</option></term>
+
+  <listitem><para>Prints out a summary of the command syntax and
+  exits.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--version</option></term>
+
+  <listitem><para>Prints out the Nix version number on standard output
+  and exits.</para></listitem>
+</varlistentry>
+
+
+<varlistentry><term><option>--verbose</option></term>
+  <term><option>-v</option></term>
+
+  <listitem>
+
+  <para>Increases the level of verbosity of diagnostic messages
+  printed on standard error.  For each Nix operation, the information
+  printed on standard output is well-defined; any diagnostic
+  information is printed on standard error, never on standard
+  output.</para>
+
+  <para>This option may be specified repeatedly.  Currently, the
+  following verbosity levels exist:</para>
+
+  <variablelist>
+
+    <varlistentry><term>0</term>
+    <listitem><para>“Errors only”: only print messages
+    explaining why the Nix invocation failed.</para></listitem>
+    </varlistentry>
+
+    <varlistentry><term>1</term>
+    <listitem><para>“Informational”: print
+    <emphasis>useful</emphasis> messages about what Nix is doing.
+    This is the default.</para></listitem>
+    </varlistentry>
+
+    <varlistentry><term>2</term>
+    <listitem><para>“Talkative”: print more informational
+    messages.</para></listitem>
+    </varlistentry>
+
+    <varlistentry><term>3</term>
+    <listitem><para>“Chatty”: print even more
+    informational messages.</para></listitem>
+    </varlistentry>
+
+    <varlistentry><term>4</term>
+    <listitem><para>“Debug”: print debug
+    information.</para></listitem>
+    </varlistentry>
+
+    <varlistentry><term>5</term>
+    <listitem><para>“Vomit”: print vast amounts of debug
+    information.</para></listitem>
+    </varlistentry>
+
+  </variablelist>
+
+  </listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--no-build-output</option></term>
+  <term><option>-Q</option></term>
+
+  <listitem><para>By default, output written by builders to standard
+  output and standard error is echoed to the Nix command's standard
+  error.  This option suppresses this behaviour.  Note that the
+  builder's standard output and error are always written to a log file
+  in
+  <filename><replaceable>prefix</replaceable>/nix/var/log/nix</filename>.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry xml:id="opt-max-jobs"><term><option>--max-jobs</option></term>
+  <term><option>-j</option></term>
+
+  <listitem><para>Sets the maximum number of build jobs that Nix will
+  perform in parallel to the specified number.  The default is
+  specified by the <link
+  linkend='conf-build-max-jobs'><literal>build-max-jobs</literal></link>
+  configuration setting, which itself defaults to
+  <literal>1</literal>.  A higher value is useful on SMP systems or to
+  exploit I/O latency.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry xml:id="opt-cores"><term><option>--cores</option></term>
+
+  <listitem><para>Sets the value of the <envar>NIX_BUILD_CORES</envar>
+  environment variable in the invocation of builders.  Builders can
+  use this variable at their discretion to control the maximum amount
+  of parallelism.  For instance, in Nixpkgs, if the derivation
+  attribute <varname>enableParallelBuilding</varname> is set to
+  <literal>true</literal>, the builder passes the
+  <option>-j<replaceable>N</replaceable></option> flag to GNU Make.
+  It defaults to the value of the <link
+  linkend='conf-build-cores'><literal>build-cores</literal></link>
+  configuration setting, if set, or <literal>1</literal> otherwise.
+  The value <literal>0</literal> means that the builder should use all
+  available CPU cores in the system.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry xml:id="opt-max-silent-time"><term><option>--max-silent-time</option></term>
+
+  <listitem><para>Sets the maximum number of seconds that a builder
+  can go without producing any data on standard output or standard
+  error.  The default is specified by the <link
+  linkend='conf-build-max-silent-time'><literal>build-max-silent-time</literal></link>
+  configuration setting.  <literal>0</literal> means no
+  time-out.</para></listitem>
+
+</varlistentry>
+
+<varlistentry xml:id="opt-timeout"><term><option>--timeout</option></term>
+
+  <listitem><para>Sets the maximum number of seconds that a builder
+  can run.  The default is specified by the <link
+  linkend='conf-build-timeout'><literal>build-timeout</literal></link>
+  configuration setting.  <literal>0</literal> means no
+  timeout.</para></listitem>
+
+</varlistentry>
+
+<varlistentry><term><option>--keep-going</option></term>
+  <term><option>-k</option></term>
+
+  <listitem><para>Keep going in case of failed builds, to the
+  greatest extent possible.  That is, if building an input of some
+  derivation fails, Nix will still build the other inputs, but not the
+  derivation itself.  Without this option, Nix stops if any build
+  fails (except for builds of substitutes), possibly killing builds in
+  progress (in case of parallel or distributed builds).</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--keep-failed</option></term>
+  <term><option>-K</option></term>
+
+  <listitem><para>Specifies that in case of a build failure, the
+  temporary directory (usually in <filename>/tmp</filename>) in which
+  the build takes place should not be deleted.  The path of the build
+  directory is printed as an informational message.
+    </para>
+  </listitem>
+</varlistentry>
+
+
+<varlistentry><term><option>--fallback</option></term>
+
+  <listitem>
+
+  <para>Whenever Nix attempts to build a derivation for which
+  substitutes are known for each output path, but realising the output
+  paths through the substitutes fails, fall back on building the
+  derivation.</para>
+
+  <para>The most common scenario in which this is useful is when we
+  have registered substitutes in order to perform binary distribution
+  from, say, a network repository.  If the repository is down, the
+  realisation of the derivation will fail.  When this option is
+  specified, Nix will build the derivation instead.  Thus,
+  installation from binaries falls back on installation from source.
+  This option is not the default since it is generally not desirable
+  for a transient failure in obtaining the substitutes to lead to a
+  full build from source (with the related consumption of
+  resources).</para>
+
+  </listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--readonly-mode</option></term>
+
+  <listitem><para>When this option is used, no attempt is made to open
+  the Nix database.  Most Nix operations do need database access, so
+  those operations will fail.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry xml:id="opt-log-type"><term><option>--log-type</option>
+<replaceable>type</replaceable></term>
+
+  <listitem>
+
+  <para>This option determines how the output written to standard
+  error is formatted.  Nix’s diagnostic messages are typically
+  <emphasis>nested</emphasis>.  For instance, when tracing Nix
+  expression evaluation (<command>nix-env -vvvvv</command>, messages
+  from subexpressions are nested inside their parent expressions.  Nix
+  builder output is also often nested.  For instance, the Nix Packages
+  generic builder nests the various build tasks (unpack, configure,
+  compile, etc.), and the GNU Make in <literal>stdenv-linux</literal>
+  has been patched to provide nesting for recursive Make
+  invocations.</para>
+
+  <para><replaceable>type</replaceable> can be one of the
+  following:
+
+  <variablelist>
+
+    <varlistentry><term><literal>pretty</literal></term>
+
+      <listitem><para>Pretty-print the output, indicating different
+      nesting levels using spaces.  This is the
+      default.</para></listitem>
+
+    </varlistentry>
+
+    <varlistentry><term><literal>escapes</literal></term>
+
+      <listitem><para>Indicate nesting using escape codes that can be
+      interpreted by the <command>nix-log2xml</command> tool in the
+      Nix source distribution.  The resulting XML file can be fed into
+      the <command>log2html.xsl</command> stylesheet to create an HTML
+      file that can be browsed interactively, using JavaScript to
+      expand and collapse parts of the output.</para></listitem>
+
+    </varlistentry>
+
+    <varlistentry><term><literal>flat</literal></term>
+
+      <listitem><para>Remove all nesting.</para></listitem>
+
+    </varlistentry>
+
+  </variablelist>
+
+  </para>
+
+  </listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--arg</option> <replaceable>name</replaceable> <replaceable>value</replaceable></term>
+
+  <listitem><para>This option is accepted by
+  <command>nix-env</command>, <command>nix-instantiate</command> and
+  <command>nix-build</command>.  When evaluating Nix expressions, the
+  expression evaluator will automatically try to call functions that
+  it encounters.  It can automatically call functions for which every
+  argument has a <link linkend='ss-functions'>default value</link>
+  (e.g., <literal>{ <replaceable>argName</replaceable> ?
+  <replaceable>defaultValue</replaceable> }:
+  <replaceable>...</replaceable></literal>).  With
+  <option>--arg</option>, you can also call functions that have
+  arguments without a default value (or override a default value).
+  That is, if the evaluator encounters a function with an argument
+  named <replaceable>name</replaceable>, it will call it with value
+  <replaceable>value</replaceable>.</para>
+
+  <para>For instance, the file
+  <literal>pkgs/top-level/all-packages.nix</literal> in Nixpkgs is
+  actually a function:
+
+<programlisting>
+{ # The system (e.g., `i686-linux') for which to build the packages.
+  system ? builtins.currentSystem
+  <replaceable>...</replaceable>
+}: <replaceable>...</replaceable></programlisting>
+
+  So if you call this Nix expression (e.g., when you do
+  <literal>nix-env -i <replaceable>pkgname</replaceable></literal>),
+  the function will be called automatically using the value <link
+  linkend='builtin-currentSystem'><literal>builtins.currentSystem</literal></link>
+  for the <literal>system</literal> argument.  You can override this
+  using <option>--arg</option>, e.g., <literal>nix-env -i
+  <replaceable>pkgname</replaceable> --arg system
+  \"i686-freebsd\"</literal>.  (Note that since the argument is a Nix
+  string literal, you have to escape the quotes.)</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--argstr</option> <replaceable>name</replaceable> <replaceable>value</replaceable></term>
+
+  <listitem><para>This option is like <option>--arg</option>, only the
+  value is not a Nix expression but a string.  So instead of
+  <literal>--arg system \"i686-linux\"</literal> (the outer quotes are
+  to keep the shell happy) you can say <literal>--argstr system
+  i686-linux</literal>.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry xml:id="opt-attr"><term><option>--attr</option> / <option>-A</option>
+<replaceable>attrPath</replaceable></term>
+
+  <listitem><para>Select an attribute from the top-level Nix
+  expression being evaluated.  (<command>nix-env</command>,
+  <command>nix-instantiate</command>, <command>nix-build</command> and
+  <command>nix-shell</command> only.)  The <emphasis>attribute
+  path</emphasis> <replaceable>attrPath</replaceable> is a sequence of
+  attribute names separated by dots.  For instance, given a top-level
+  Nix expression <replaceable>e</replaceable>, the attribute path
+  <literal>xorg.xorgserver</literal> would cause the expression
+  <literal><replaceable>e</replaceable>.xorg.xorgserver</literal> to
+  be used.  See <link
+  linkend='refsec-nix-env-install-examples'><command>nix-env
+  --install</command></link> for some concrete examples.</para>
+
+  <para>In addition to attribute names, you can also specify array
+  indices.  For instance, the attribute path
+  <literal>foo.3.bar</literal> selects the <literal>bar</literal>
+  attribute of the fourth element of the array in the
+  <literal>foo</literal> attribute of the top-level
+  expression.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--expr</option> / <option>-E</option></term>
+
+  <listitem><para>Interpret the command line arguments as a list of
+  Nix expressions to be parsed and evaluated, rather than as a list
+  of file names of Nix expressions.
+  (<command>nix-instantiate</command>, <command>nix-build</command>
+  and <command>nix-shell</command> only.)</para></listitem>
+
+</varlistentry>
+
+<varlistentry><term><option>--show-trace</option></term>
+
+  <listitem><para>Causes Nix to print out a stack trace in case of Nix
+  expression evaluation errors.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>-I</option> <replaceable>path</replaceable></term>
+
+  <listitem><para>Add a path to the Nix expression search path.  This
+  option may be given multiple times.  See the <envar>NIX_PATH</envar>
+  environment variable for information on the semantics of the Nix
+  search path.  Paths added through <option>-I</option> take
+  precedence over <envar>NIX_PATH</envar>.</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--option</option> <replaceable>name</replaceable> <replaceable>value</replaceable></term>
+
+  <listitem><para>Set the Nix configuration option
+  <replaceable>name</replaceable> to <replaceable>value</replaceable>.
+  This overrides settings in the Nix configuration file (see
+  <citerefentry><refentrytitle>nix.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>).</para></listitem>
+
+</varlistentry>
+
+
+<varlistentry><term><option>--repair</option></term>
+
+  <listitem><para>Fix corrupted or missing store paths by
+  redownloading or rebuilding them.  Note that this is slow because it
+  requires computing a cryptographic hash of the contents of every
+  path in the closure of the build.  Also note the warning under
+  <command>nix-store --repair-path</command>.</para></listitem>
+
+</varlistentry>
+
+
+</variablelist>
+
+
+</section>
diff --git a/doc/manual/opt-inst-syn.xml b/doc/manual/opt-inst-syn.xml
new file mode 100644
index 000000000000..e8c3f1ec6f04
--- /dev/null
+++ b/doc/manual/opt-inst-syn.xml
@@ -0,0 +1,22 @@
+<nop xmlns="http://docbook.org/ns/docbook">
+  
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--prebuilt-only</option></arg>
+      <arg choice='plain'><option>-b</option></arg>
+    </group>
+  </arg>
+  
+  <arg>
+    <group choice='req'>
+      <arg choice='plain'><option>--attr</option></arg>
+      <arg choice='plain'><option>-A</option></arg>
+    </group>
+  </arg>
+
+  <arg><option>--from-expression</option></arg>
+  <arg><option>-E</option></arg>
+    
+  <arg><option>--from-profile</option> <replaceable>path</replaceable></arg>
+
+</nop>
diff --git a/doc/manual/package-management.xml b/doc/manual/package-management.xml
new file mode 100644
index 000000000000..e1d24b14772c
--- /dev/null
+++ b/doc/manual/package-management.xml
@@ -0,0 +1,591 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id='chap-package-management'>
+
+<title>Package Management</title>
+
+
+<para>This chapter discusses how to do package management with Nix,
+i.e., how to obtain, install, upgrade, and erase packages.  This is
+the “user’s” perspective of the Nix system — people
+who want to <emphasis>create</emphasis> packages should consult
+<xref linkend='chap-writing-nix-expressions' />.</para>
+
+
+<section><title>Basic package management</title>
+
+<para>The main command for package management is <link
+linkend="sec-nix-env"><command>nix-env</command></link>.  You can use
+it to install, upgrade, and erase packages, and to query what
+packages are installed or are available for installation.</para>
+
+<para>In Nix, different users can have different “views”
+on the set of installed applications.  That is, there might be lots of
+applications present on the system (possibly in many different
+versions), but users can have a specific selection of those active —
+where “active” just means that it appears in a directory
+in the user’s <envar>PATH</envar>.  Such a view on the set of
+installed applications is called a <emphasis>user
+environment</emphasis>, which is just a directory tree consisting of
+symlinks to the files of the active applications.  </para>
+
+<para>Components are installed from a set of <emphasis>Nix
+expressions</emphasis> that tell Nix how to build those packages,
+including, if necessary, their dependencies.  There is a collection of
+Nix expressions called the Nix Package collection that contains
+packages ranging from basic development stuff such as GCC and Glibc,
+to end-user applications like Mozilla Firefox.  (Nix is however not
+tied to the Nix Package collection; you could write your own Nix
+expressions based on it, or completely new ones.)  You can download
+the latest version from <link
+xlink:href='http://nixos.org/nixpkgs/download.html' />.</para>
+
+<para>Assuming that you have downloaded and unpacked a release of Nix
+Packages, you can view the set of available packages in the release:
+
+<screen>
+$ nix-env -qaf nixpkgs-<replaceable>version</replaceable> '*'
+ant-blackdown-1.4.2
+aterm-2.2
+bash-3.0
+binutils-2.15
+bison-1.875d
+blackdown-1.4.2
+bzip2-1.0.2
+...</screen>
+
+where <literal>nixpkgs-<replaceable>version</replaceable></literal> is
+where you’ve unpacked the release.  The flag <option>-q</option>
+specifies a query operation; <option>-a</option> means that you want
+to show the “available” (i.e., installable) packages, as opposed to
+the installed packages; and <option>-f</option>
+<filename>nixpkgs-<replaceable>version</replaceable></filename>
+specifies the source of the packages.  The argument
+<literal>'*'</literal> shows all installable packages. (The quotes are
+necessary to prevent shell expansion.)  You can also select specific
+packages by name:
+
+<screen>
+$ nix-env -qaf nixpkgs-<replaceable>version</replaceable> gcc
+gcc-3.4.6
+gcc-4.0.3
+gcc-4.1.1</screen>
+
+</para>
+
+<para>It is also possible to see the <emphasis>status</emphasis> of
+available packages, i.e., whether they are installed into the user
+environment and/or present in the system:
+
+<screen>
+$ nix-env -qasf nixpkgs-<replaceable>version</replaceable> '*'
+...
+-PS bash-3.0
+--S binutils-2.15
+IPS bison-1.875d
+...</screen>
+
+The first character (<literal>I</literal>) indicates whether the
+package is installed in your current user environment.  The second
+(<literal>P</literal>) indicates whether it is present on your system
+(in which case installing it into your user environment would be a
+very quick operation).  The last one (<literal>S</literal>) indicates
+whether there is a so-called <emphasis>substitute</emphasis> for the
+package, which is Nix’s mechanism for doing binary deployment.  It
+just means that Nix knows that it can fetch a pre-built package from
+somewhere (typically a network server) instead of building it
+locally.</para>
+
+<para>So now that we have a set of Nix expressions we can build the
+packages contained in them.  This is done using <literal>nix-env
+-i</literal>.  For instance,
+
+<screen>
+$ nix-env -f nixpkgs-<replaceable>version</replaceable> -i subversion</screen>
+
+will install the package called <literal>subversion</literal> (which
+is, of course, the <link
+xlink:href='http://subversion.tigris.org/'>Subversion version
+management system</link>).</para>
+
+<para>When you do this for the first time, Nix will start building
+Subversion and all its dependencies.  This will take quite a while —
+typically an hour or two on modern machines.  Fortunately, there is a
+faster way (so do a Ctrl-C on that install operation!): you just need
+to tell Nix that pre-built binaries of all those packages are
+available somewhere.  This is done using the
+<command>nix-pull</command> command, which must be supplied with a URL
+containing a <emphasis>manifest</emphasis> describing what binaries
+are available.  This URL should correspond to the Nix Packages release
+that you’re using.  For instance, if you obtained a release from <link
+xlink:href='http://nixos.org/releases/nixpkgs/nixpkgs-0.12pre11712-4lrp7j8x'
+/>, then you should do:
+
+<screen>
+$ nix-pull http://nixos.org/releases/nixpkgs/nixpkgs-0.12pre11712-4lrp7j8x/MANIFEST</screen>
+
+If you then issue the installation command, it should start
+downloading binaries from <systemitem
+class='fqdomainname'>nixos.org</systemitem>, instead of building
+them from source.  This might still take a while since all
+dependencies must be downloaded, but on a reasonably fast connection
+such as a DSL line it’s on the order of a few minutes.</para>
+
+<para>Naturally, packages can also be uninstalled:
+
+<screen>
+$ nix-env -e subversion</screen>
+
+</para>
+
+<para>Upgrading to a new version is just as easy.  If you have a new
+release of Nix Packages, you can do:
+
+<screen>
+$ nix-env -f nixpkgs-<replaceable>version</replaceable> -u subversion</screen>
+
+This will <emphasis>only</emphasis> upgrade Subversion if there is a
+“newer” version in the new set of Nix expressions, as
+defined by some pretty arbitrary rules regarding ordering of version
+numbers (which generally do what you’d expect of them).  To just
+unconditionally replace Subversion with whatever version is in the Nix
+expressions, use <parameter>-i</parameter> instead of
+<parameter>-u</parameter>; <parameter>-i</parameter> will remove
+whatever version is already installed.</para>
+
+<para>You can also upgrade all packages for which there are newer
+versions:
+
+<screen>
+$ nix-env -f nixpkgs-<replaceable>version</replaceable> -u '*'</screen>
+
+</para>
+
+<para>Sometimes it’s useful to be able to ask what
+<command>nix-env</command> would do, without actually doing it.  For
+instance, to find out what packages would be upgraded by
+<literal>nix-env -u '*'</literal>, you can do
+
+<screen>
+$ nix-env ... -u '*' --dry-run
+(dry run; not doing anything)
+upgrading `libxslt-1.1.0' to `libxslt-1.1.10'
+upgrading `graphviz-1.10' to `graphviz-1.12'
+upgrading `coreutils-5.0' to `coreutils-5.2.1'</screen>
+
+</para>
+
+</section>
+
+
+<section xml:id="sec-profiles"><title>Profiles</title>
+
+<para>Profiles and user environments are Nix’s mechanism for
+implementing the ability to allow different users to have different
+configurations, and to do atomic upgrades and rollbacks.  To
+understand how they work, it’s useful to know a bit about how Nix
+works.  In Nix, packages are stored in unique locations in the
+<emphasis>Nix store</emphasis> (typically,
+<filename>/nix/store</filename>).  For instance, a particular version
+of the Subversion package might be stored in a directory
+<filename>/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3/</filename>,
+while another version might be stored in
+<filename>/nix/store/5mq2jcn36ldlmh93yj1n8s9c95pj7c5s-subversion-1.1.2</filename>.
+The long strings prefixed to the directory names are cryptographic
+hashes<footnote><para>160-bit truncations of SHA-256 hashes encoded in
+a base-32 notation, to be precise.</para></footnote> of
+<emphasis>all</emphasis> inputs involved in building the package —
+sources, dependencies, compiler flags, and so on.  So if two
+packages differ in any way, they end up in different locations in
+the file system, so they don’t interfere with each other.  <xref
+linkend='fig-user-environments' /> shows a part of a typical Nix
+store.</para>
+
+<figure xml:id='fig-user-environments'><title>User environments</title>
+  <mediaobject>
+    <imageobject>
+      <imagedata fileref='figures/user-environments.png' format='PNG' />
+    </imageobject>
+  </mediaobject>
+</figure>
+
+<para>Of course, you wouldn’t want to type
+
+<screen>
+$ /nix/store/dpmvp969yhdq...-subversion-1.1.3/bin/svn</screen>
+
+every time you want to run Subversion.  Of course we could set up the
+<envar>PATH</envar> environment variable to include the
+<filename>bin</filename> directory of every package we want to use,
+but this is not very convenient since changing <envar>PATH</envar>
+doesn’t take effect for already existing processes.  The solution Nix
+uses is to create directory trees of symlinks to
+<emphasis>activated</emphasis> packages.  These are called
+<emphasis>user environments</emphasis> and they are packages
+themselves (though automatically generated by
+<command>nix-env</command>), so they too reside in the Nix store.  For
+instance, in <xref linkend='fig-user-environments' /> the user
+environment <filename>/nix/store/0c1p5z4kda11...-user-env</filename>
+contains a symlink to just Subversion 1.1.2 (arrows in the figure
+indicate symlinks).  This would be what we would obtain if we had done
+
+<screen>
+$ nix-env -i subversion</screen>
+
+on a set of Nix expressions that contained Subversion 1.1.2.</para>
+
+<para>This doesn’t in itself solve the problem, of course; you
+wouldn’t want to type
+<filename>/nix/store/0c1p5z4kda11...-user-env/bin/svn</filename>
+either.  That’s why there are symlinks outside of the store that point
+to the user environments in the store; for instance, the symlinks
+<filename>default-42-link</filename> and
+<filename>default-43-link</filename> in the example.  These are called
+<emphasis>generations</emphasis> since every time you perform a
+<command>nix-env</command> operation, a new user environment is
+generated based on the current one.  For instance, generation 43 was
+created from generation 42 when we did
+
+<screen>
+$ nix-env -i subversion mozilla</screen>
+
+on a set of Nix expressions that contained Mozilla and a new version
+of Subversion.</para>
+
+<para>Generations are grouped together into
+<emphasis>profiles</emphasis> so that different users don’t interfere
+with each other if they don’t want to.  For example:
+
+<screen>
+$ ls -l /nix/var/nix/profiles/
+...
+lrwxrwxrwx  1 eelco ... default-42-link -> /nix/store/0c1p5z4kda11...-user-env
+lrwxrwxrwx  1 eelco ... default-43-link -> /nix/store/3aw2pdyx2jfc...-user-env
+lrwxrwxrwx  1 eelco ... default -> default-43-link</screen>
+
+This shows a profile called <filename>default</filename>.  The file
+<filename>default</filename> itself is actually a symlink that points
+to the current generation.  When we do a <command>nix-env</command>
+operation, a new user environment and generation link are created
+based on the current one, and finally the <filename>default</filename>
+symlink is made to point at the new generation.  This last step is
+atomic on Unix, which explains how we can do atomic upgrades.  (Note
+that the building/installing of new packages doesn’t interfere in
+any way with old packages, since they are stored in different
+locations in the Nix store.)</para>
+
+<para>If you find that you want to undo a <command>nix-env</command>
+operation, you can just do
+
+<screen>
+$ nix-env --rollback</screen>
+
+which will just make the current generation link point at the previous
+link.  E.g., <filename>default</filename> would be made to point at
+<filename>default-42-link</filename>.  You can also switch to a
+specific generation:
+
+<screen>
+$ nix-env --switch-generation 43</screen>
+
+which in this example would roll forward to generation 43 again.  You
+can also see all available generations:
+
+<screen>
+$ nix-env --list-generations</screen></para>
+
+<para>Actually, there is another level of indirection not shown in the
+figure above.  You generally wouldn’t have
+<filename>/nix/var/nix/profiles/<replaceable>some-profile</replaceable>/bin</filename>
+in your <envar>PATH</envar>.  Rather, there is a symlink
+<filename>~/.nix-profile</filename> that points to your current
+profile.  This means that you should put
+<filename>~/.nix-profile/bin</filename> in your <envar>PATH</envar>
+(and indeed, that’s what the initialisation script
+<filename>/nix/etc/profile.d/nix.sh</filename> does).  This makes it
+easier to switch to a different profile.  You can do that using the
+command <command>nix-env --switch-profile</command>:
+
+<screen>
+$ nix-env --switch-profile /nix/var/nix/profiles/my-profile
+
+$ nix-env --switch-profile /nix/var/nix/profiles/default</screen>
+
+These commands switch to the <filename>my-profile</filename> and
+default profile, respectively.  If the profile doesn’t exist, it will
+be created automatically.  You should be careful about storing a
+profile in another location than the <filename>profiles</filename>
+directory, since otherwise it might not be used as a root of the
+garbage collector (see <xref linkend='sec-garbage-collection'
+/>).</para>
+
+<para>All <command>nix-env</command> operations work on the profile
+pointed to by <command>~/.nix-profile</command>, but you can override
+this using the <option>--profile</option> option (abbreviation
+<option>-p</option>):
+
+<screen>
+$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion</screen>
+
+This will <emphasis>not</emphasis> change the
+<command>~/.nix-profile</command> symlink.</para>
+
+</section>
+
+
+<section xml:id='sec-garbage-collection'><title>Garbage collection</title>
+
+<para><command>nix-env</command> operations such as upgrades
+(<option>-u</option>) and uninstall (<option>-e</option>) never
+actually delete packages from the system.  All they do (as shown
+above) is to create a new user environment that no longer contains
+symlinks to the “deleted” packages.</para>
+
+<para>Of course, since disk space is not infinite, unused packages
+should be removed at some point.  You can do this by running the Nix
+garbage collector.  It will remove from the Nix store any package
+not used (directly or indirectly) by any generation of any
+profile.</para>
+
+<para>Note however that as long as old generations reference a
+package, it will not be deleted.  After all, we wouldn’t be able to
+do a rollback otherwise.  So in order for garbage collection to be
+effective, you should also delete (some) old generations.  Of course,
+this should only be done if you are certain that you will not need to
+roll back.</para>
+
+<para>To delete all old (non-current) generations of your current
+profile:
+
+<screen>
+$ nix-env --delete-generations old</screen>
+
+Instead of <literal>old</literal> you can also specify a list of
+generations, e.g.,
+
+<screen>
+$ nix-env --delete-generations 10 11 14</screen>
+
+</para>
+
+<para>After removing appropriate old generations you can run the
+garbage collector as follows:
+
+<screen>
+$ nix-store --gc</screen>
+
+If you are feeling uncertain, you can also first view what files would
+be deleted:
+
+<screen>
+$ nix-store --gc --print-dead</screen>
+
+Likewise, the option <option>--print-live</option> will show the paths
+that <emphasis>won’t</emphasis> be deleted.</para>
+
+<para>There is also a convenient little utility
+<command>nix-collect-garbage</command>, which when invoked with the
+<option>-d</option> (<option>--delete-old</option>) switch deletes all
+old generations of all profiles in
+<filename>/nix/var/nix/profiles</filename>.  So
+
+<screen>
+$ nix-collect-garbage -d</screen>
+
+is a quick and easy way to clean up your system.</para>
+
+
+
+
+<section xml:id="ssec-gc-roots"><title>Garbage collector roots</title>
+
+<para>The roots of the garbage collector are all store paths to which
+there are symlinks in the directory
+<filename><replaceable>prefix</replaceable>/nix/var/nix/gcroots</filename>.
+For instance, the following command makes the path
+<filename>/nix/store/d718ef...-foo</filename> a root of the collector:
+
+<screen>
+$ ln -s /nix/store/d718ef...-foo /nix/var/nix/gcroots/bar</screen>
+	
+That is, after this command, the garbage collector will not remove
+<filename>/nix/store/d718ef...-foo</filename> or any of its
+dependencies.</para>
+
+<para>Subdirectories of
+<filename><replaceable>prefix</replaceable>/nix/var/nix/gcroots</filename>
+are also searched for symlinks.  Symlinks to non-store paths are
+followed and searched for roots, but symlinks to non-store paths
+<emphasis>inside</emphasis> the paths reached in that way are not
+followed to prevent infinite recursion.</para>
+
+</section>
+
+</section>
+
+
+<section xml:id="sec-channels"><title>Channels</title>
+
+<para>If you want to stay up to date with a set of packages, it’s not
+very convenient to manually download the latest set of Nix expressions
+for those packages, use <command>nix-pull</command> to register
+pre-built binaries (if available), and upgrade using
+<command>nix-env</command>.  Fortunately, there’s a better way:
+<emphasis>Nix channels</emphasis>.</para>
+
+<para>A Nix channel is just a URL that points to a place that contains
+a set of Nix expressions and a manifest.  Using the command <link
+linkend="sec-nix-channel"><command>nix-channel</command></link> you
+can automatically stay up to date with whatever is available at that
+URL.</para>
+
+<para>You can “subscribe” to a channel using
+<command>nix-channel --add</command>, e.g.,
+
+<screen>
+$ nix-channel --add http://nixos.org/channels/nixpkgs-unstable</screen>
+
+subscribes you to a channel that always contains that latest version
+of the Nix Packages collection.  (Instead of
+<literal>nixpkgs-unstable</literal> you could also subscribe to
+<literal>nixpkgs-stable</literal>, which should have a higher level of
+stability, but right now is just outdated.)  Subscribing really just
+means that the URL is added to the file
+<filename>~/.nix-channels</filename>.  Right now there is no command
+to “unsubscribe”; you should just edit that file manually
+and delete the offending URL.</para>
+
+<para>To obtain the latest Nix expressions available in a channel, do
+
+<screen>
+$ nix-channel --update</screen>
+
+This downloads the Nix expressions in every channel (downloaded from
+<literal><replaceable>url</replaceable>/nixexprs.tar.bz2</literal>)
+and registers any available pre-built binaries in every channel
+(by <command>nix-pull</command>ing
+<literal><replaceable>url</replaceable>/MANIFEST</literal>).  It also
+makes the union of each channel’s Nix expressions the default for
+<command>nix-env</command> operations.  Consequently, you can then say
+
+<screen>
+$ nix-env -u '*'</screen>
+
+to upgrade all packages in your profile to the latest versions
+available in the subscribed channels.</para>
+
+</section>
+
+
+<section xml:id="sec-one-click"><title>One-click installs</title>
+
+<para>Often, when you want to install a specific package (e.g., from
+the <link
+xlink:href="http://nixos.org/nixpkgs/">Nix
+Packages collection</link>), subscribing to a channel is a bit
+cumbersome.  And channels don’t help you at all if you want to install
+an older version of a package than the one provided by the current
+contents of the channel, or a package that has been removed from the
+channel.  That’s when <emphasis>one-click installs</emphasis> come in
+handy: you can just go to the web page that contains the package,
+click on it, and it will be installed with all the necessary
+dependencies.</para>
+
+<para>For instance, you can go to <link
+xlink:href="http://hydra.nixos.org/jobset/nixpkgs/trunk/channel/latest"
+/> and click on any link for the individual packages for your
+platform.  The first time you do this, your browser will ask what to
+do with <literal>application/nix-package</literal> files.  You should
+open them with <filename>/nix/bin/nix-install-package</filename>.
+This will open a window that asks you to confirm that you want to
+install the package.  When you answer <literal>Y</literal>, the
+package and all its dependencies will be installed.  This is a binary
+deployment mechanism — you get packages pre-compiled for the selected
+platform type.</para>
+
+<para>You can also install <literal>application/nix-package</literal>
+files from the command line directly.  See <xref
+linkend='sec-nix-install-package' /> for details.</para>
+
+</section>
+
+
+<section xml:id="sec-sharing-packages"><title>Sharing packages between machines</title>
+
+<para>Sometimes you want to copy a package from one machine to
+another.  Or, you want to install some packages and you know that
+another machine already has some or all of those packages or their
+dependencies.  In that case there are mechanisms to quickly copy
+packages between machines.</para>
+
+<para>The command <command
+linkend="sec-nix-copy-closure">nix-copy-closure</command> copies a Nix
+store path along with all its dependencies to or from another machine
+via the SSH protocol.  It doesn’t copy store paths that are already
+present on the target machine.  For example, the following command
+copies Firefox with all its dependencies:
+
+<screen>
+$ nix-copy-closure --to alice@itchy.example.org $(type -p firefox)</screen>
+
+See <xref linkend='sec-nix-copy-closure' /> for details.</para>
+
+<para>With <command linkend='refsec-nix-store-export'>nix-store
+--export</command> and <command
+linkend='refsec-nix-store-import'>nix-store --import</command> you can
+write the closure of a store path (that is, the path and all its
+dependencies) to a file, and then unpack that file into another Nix
+store.  For example,
+
+<screen>
+$ nix-store --export $(nix-store -qR $(type -p firefox)) > firefox.closure</screen>
+
+writes the closure of Firefox to a file.  You can then copy this file
+to another machine and install the closure:
+
+<screen>
+$ nix-store --import &lt; firefox.closure</screen>
+
+Any store paths in the closure that are already present in the target
+store are ignored.  It is also possible to pipe the export into
+another command, e.g. to copy and install a closure directly to/on
+another machine:
+
+<screen>
+$ nix-store --export $(nix-store -qR $(type -p firefox)) | bzip2 | \
+    ssh alice@itchy.example.org "bunzip2 | nix-store --import"</screen>
+
+But note that <command>nix-copy-closure</command> is generally more
+efficient in this example because it only copies paths that are not
+already present in the target Nix store.</para>
+
+<para>Finally, if you can mount the Nix store of a remote machine in
+your local filesystem, Nix can copy paths from the remote Nix store to
+the local Nix store <emphasis>on demand</emphasis>.  For instance,
+suppose that you mount a remote machine containing a Nix store via
+<command
+xlink:href="http://fuse.sourceforge.net/sshfs.html">sshfs</command>:
+
+<screen>
+$ sshfs alice@itchy.example.org:/ /mnt</screen>
+
+You should then set the <envar>NIX_OTHER_STORES</envar> environment
+variable to tell Nix about this remote Nix store:
+
+<screen>
+$ export NIX_OTHER_STORES=/mnt/nix</screen>
+
+Then if you do any Nix operation, e.g.
+
+<screen>
+$ nix-env -i firefox</screen>
+
+and Nix has to build a path that it sees is already present in
+<filename>/mnt/nix</filename>, then it will just copy from there
+instead of building it from source.</para>
+
+
+</section>
+
+
+</chapter>
diff --git a/doc/manual/quick-start.xml b/doc/manual/quick-start.xml
new file mode 100644
index 000000000000..17079906396c
--- /dev/null
+++ b/doc/manual/quick-start.xml
@@ -0,0 +1,110 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id="chap-quick-start">
+
+<title>Quick Start</title>
+
+
+<para>This chapter is for impatient people who don't like reading
+documentation.  For more in-depth information you are kindly referred
+to the following chapters.</para>
+
+<orderedlist>
+
+<listitem><para>Install Nix by running the following:
+
+<screen>
+$ bash &lt;(curl https://nixos.org/nix/install)
+</screen>
+
+This will install Nix in <filename>/nix</filename>. The install script
+will create <filename>/nix</filename> using <command>sudo</command>,
+so make sure you have sufficient rights.  (For other installation
+methods, see <xref linkend="chap-installation"/>.)</para></listitem>
+
+<listitem><para>See what installable packages are currently available
+in the channel:
+
+<screen>
+$ nix-env -qa
+docbook-xml-4.2
+firefox-1.0pre-PR-0.10.1
+hello-2.1.1
+libxslt-1.1.0
+<replaceable>...</replaceable></screen>
+
+</para></listitem>
+
+<listitem><para>Install some packages from the channel:
+
+<screen>
+$ nix-env -i hello <replaceable>...</replaceable> </screen>
+
+This should download pre-built packages; it should not build them
+locally (if it does, something went wrong).</para></listitem>
+
+<listitem><para>Test that they work:
+
+<screen>
+$ which hello
+/home/eelco/.nix-profile/bin/hello
+$ hello
+Hello, world!
+</screen>
+
+</para></listitem>
+
+<listitem><para>Uninstall a package:
+
+<screen>
+$ nix-env -e hello</screen>
+
+</para></listitem>
+
+<listitem><para>To keep up-to-date with the channel, do:
+
+<screen>
+$ nix-channel --update nixpkgs
+$ nix-env -u '*'</screen>
+
+The latter command will upgrade each installed package for which there
+is a “newer” version (as determined by comparing the version
+numbers).</para></listitem>
+
+<listitem><para>You can also install specific packages directly from
+your web browser.  For instance, you can go to <link
+xlink:href="http://hydra.nixos.org/jobset/nixpkgs/trunk/channel/latest"
+/> and click on any link for the individual packages for your
+platform.  Associate <literal>application/nix-package</literal> with
+the program <command>nix-install-package</command>.  A window should
+appear asking you whether it’s okay to install the package.  Say
+<literal>Y</literal>.  The package and all its dependencies will be
+installed.</para></listitem>
+
+<listitem><para>If you're unhappy with the result of a
+<command>nix-env</command> action (e.g., an upgraded package turned
+out not to work properly), you can go back:
+
+<screen>
+$ nix-env --rollback</screen>
+
+</para></listitem>
+
+<listitem><para>You should periodically run the Nix garbage collector
+to get rid of unused packages, since uninstalls or upgrades don't
+actually delete them:
+
+<screen>
+$ nix-collect-garbage -d</screen>
+
+<!--
+The first command deletes old “generations” of your profile (making
+rollbacks impossible, but also making the packages in those old
+generations available for garbage collection), while the second
+command actually deletes them.-->
+
+</para></listitem>
+
+</orderedlist>
+
+</chapter>
diff --git a/doc/manual/quote-literals.xsl b/doc/manual/quote-literals.xsl
new file mode 100644
index 000000000000..5002643dbda4
--- /dev/null
+++ b/doc/manual/quote-literals.xsl
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+
+<xsl:stylesheet
+  version="1.0"
+  xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+  xmlns:str="http://exslt.org/strings"
+  extension-element-prefixes="str">
+
+  <xsl:output method="xml"/>
+
+  <xsl:template match="function|command|literal|varname|filename|option|quote">`<xsl:apply-templates/>'</xsl:template>
+
+  <xsl:template match="token"><xsl:text>    </xsl:text><xsl:apply-templates /><xsl:text>
+</xsl:text></xsl:template>
+
+  <xsl:template match="screen|programlisting">
+    <screen><xsl:apply-templates select="str:split(., '&#xA;')" /></screen>
+  </xsl:template>
+
+  <xsl:template match="section[following::section]">
+    <section>
+      <xsl:apply-templates />
+      <screen><xsl:text>
+      </xsl:text></screen>
+    </section>
+  </xsl:template>
+
+  <xsl:template match="*">
+    <xsl:element name="{name(.)}" namespace="{namespace-uri(.)}">
+      <xsl:copy-of select="namespace::*" />
+      <xsl:for-each select="@*">
+	<xsl:attribute name="{name(.)}" namespace="{namespace-uri(.)}">
+	  <xsl:value-of select="."/>
+	</xsl:attribute>
+      </xsl:for-each>
+      <xsl:apply-templates/>
+    </xsl:element>
+  </xsl:template>
+
+</xsl:stylesheet>
diff --git a/doc/manual/release-notes.xml b/doc/manual/release-notes.xml
new file mode 100644
index 000000000000..426078b829de
--- /dev/null
+++ b/doc/manual/release-notes.xml
@@ -0,0 +1,2521 @@
+<article xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id="sec-relnotes">
+
+<title>Nix Release Notes</title>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.8"><title>Release 1.8 (TBA)</title>
+
+<para>TODO</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.7"><title>Release 1.7 (April 11, 2014)</title>
+
+<para>In addition to the usual bug fixes, this release has the
+following new features:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Antiquotation is now allowed inside of quoted attribute
+    names (e.g. <literal>set."${foo}"</literal>). In the case where
+    the attribute name is just a single antiquotation, the quotes can
+    be dropped (e.g. the above example can be written
+    <literal>set.${foo}</literal>). If an attribute name inside of a
+    set declaration evaluates to <literal>null</literal> (e.g.
+    <literal>{ ${null} = false; }</literal>), then that attribute is
+    not added to the set.</para>
+  </listitem>
+
+  <listitem>
+    <para>Experimental support for cryptographically signed binary
+    caches.  See <link
+    xlink:href="https://github.com/NixOS/nix/commit/0fdf4da0e979f992db75cc17376e455ddc5a96d8">the
+    commit for details</link>.</para>
+  </listitem>
+
+  <listitem>
+    <para>An experimental new substituter,
+    <command>download-via-ssh</command>, that fetches binaries from
+    remote machines via SSH.  Specifying the flags <literal>--option
+    use-ssh-substituter true --option ssh-substituter-hosts
+    <replaceable>user@hostname</replaceable></literal> will cause Nix
+    to download binaries from the specified machine, if it has
+    them.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-store -r</command> and
+    <command>nix-build</command> have a new flag,
+    <option>--check</option>, that builds a previously built
+    derivation again, and prints an error message if the output is not
+    exactly the same. This helps to verify whether a derivation is
+    truly deterministic.  For example:
+
+<screen>
+$ nix-build '&lt;nixpkgs>' -A patchelf
+<replaceable>…</replaceable>
+$ nix-build '&lt;nixpkgs>' -A patchelf --check
+<replaceable>…</replaceable>
+error: derivation `/nix/store/1ipvxs…-patchelf-0.6' may not be deterministic:
+  hash mismatch in output `/nix/store/4pc1dm…-patchelf-0.6.drv'
+</screen>
+
+    </para>
+
+  </listitem>
+
+  <listitem>
+    <para>The <command>nix-instantiate</command> flags
+    <option>--eval-only</option> and <option>--parse-only</option>
+    have been renamed to <option>--eval</option> and
+    <option>--parse</option>, respectively.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-instantiate</command>,
+    <command>nix-build</command> and <command>nix-shell</command> now
+    have a flag <option>--expr</option> (or <option>-E</option>) that
+    allows you to specify the expression to be evaluated as a command
+    line argument.  For instance, <literal>nix-instantiate --eval -E
+    '1 + 2'</literal> will print <literal>3</literal>.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-shell</command> improvements:</para>
+
+    <itemizedlist>
+
+      <listitem>
+        <para>It has a new flag, <option>--packages</option> (or
+        <option>-p</option>), that sets up a build environment
+        containing the specified packages from Nixpkgs. For example,
+        the command
+
+<screen>
+$ nix-shell -p sqlite xorg.libX11 hello
+</screen>
+
+        will start a shell in which the given packages are
+        present.</para>
+      </listitem>
+
+      <listitem>
+        <para>It now uses <filename>shell.nix</filename> as the
+        default expression, falling back to
+        <filename>default.nix</filename> if the former doesn’t
+        exist.  This makes it convenient to have a
+        <filename>shell.nix</filename> in your project to set up a
+        nice development environment.</para>
+      </listitem>
+
+      <listitem>
+        <para>It evaluates the derivation attribute
+        <varname>shellHook</varname>, if set. Since
+        <literal>stdenv</literal> does not normally execute this hook,
+        it allows you to do <command>nix-shell</command>-specific
+        setup.</para>
+      </listitem>
+
+      <listitem>
+        <para>It preserves the user’s timezone setting.</para>
+      </listitem>
+
+    </itemizedlist>
+
+  </listitem>
+
+  <listitem>
+    <para>In chroots, Nix now sets up a <filename>/dev</filename>
+    containing only a minimal set of devices (such as
+    <filename>/dev/null</filename>). Note that it only does this if
+    you <emphasis>don’t</emphasis> have <filename>/dev</filename>
+    listed in your <option>build-chroot-dirs</option> setting;
+    otherwise, it will bind-mount the <literal>/dev</literal> from
+    outside the chroot.</para>
+
+    <para>Similarly, if you don’t have <filename>/dev/pts</filename> listed
+    in <option>build-chroot-dirs</option>, Nix will mount a private
+    <literal>devpts</literal> filesystem on the chroot’s
+    <filename>/dev/pts</filename>.</para>
+
+  </listitem>
+
+  <listitem>
+    <para>New built-in function: <function>builtins.toJSON</function>,
+    which returns a JSON representation of a value.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-env -q</command> has a new flag
+    <option>--json</option> to print a JSON representation of the
+    installed or available packages.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-env</command> now supports meta attributes with
+    more complex values, such as attribute sets.</para>
+  </listitem>
+
+  <listitem>
+    <para>The <option>-A</option> flag now allows attribute names with
+    dots in them, e.g.
+
+<screen>
+$ nix-instantiate --eval '&lt;nixos>' -A 'config.systemd.units."nscd.service".text'
+</screen>
+
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>The <option>--max-freed</option> option to
+    <command>nix-store --gc</command> now accepts a unit
+    specifier. For example, <literal>nix-store --gc --max-freed
+    1G</literal> will free up to 1 gigabyte of disk space.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-collect-garbage</command> has a new flag
+    <option>--delete-older-than</option>
+    <replaceable>N</replaceable><literal>d</literal>, which deletes
+    all user environment generations older than
+    <replaceable>N</replaceable> days.  Likewise, <command>nix-env
+    --delete-generations</command> accepts a
+    <replaceable>N</replaceable><literal>d</literal> age limit.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now heuristically detects whether a build failure was
+    due to a disk-full condition. In that case, the build is not
+    flagged as “permanently failed”. This is mostly useful for Hydra,
+    which needs to distinguish between permanent and transient build
+    failures.</para>
+  </listitem>
+
+  <listitem>
+    <para>There is a new symbol <literal>__curPos</literal> that
+    expands to an attribute set containing its file name and line and
+    column numbers, e.g. <literal>{ file = "foo.nix"; line = 10;
+    column = 5; }</literal>.  There also is a new builtin function,
+    <varname>unsafeGetAttrPos</varname>, that returns the position of
+    an attribute.  This is used by Nixpkgs to provide location
+    information in error messages, e.g.
+
+<screen>
+$ nix-build '&lt;nixpkgs>' -A libreoffice --argstr system x86_64-darwin
+error: the package ‘libreoffice-4.0.5.2’ in ‘.../applications/office/libreoffice/default.nix:263’
+  is not supported on ‘x86_64-darwin’
+</screen>
+
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>The garbage collector is now more concurrent with other Nix
+    processes because it releases certain locks earlier.</para>
+  </listitem>
+
+  <listitem>
+    <para>The binary tarball installer has been improved.  You can now
+    install Nix by running:
+
+<screen>
+$ bash &lt;(curl https://nixos.org/nix/install)
+</screen>
+
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>More evaluation errors include position information. For
+    instance, selecting a missing attribute will print something like
+
+<screen>
+error: attribute `nixUnstabl' missing, at /etc/nixos/configurations/misc/eelco/mandark.nix:216:15
+</screen>
+
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>The command <command>nix-setuid-helper</command> is
+    gone.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix no longer uses Automake, but instead has a
+    non-recursive, GNU Make-based build system.</para>
+  </listitem>
+
+  <listitem>
+    <para>All installed libraries now have the prefix
+    <literal>libnix</literal>.  In particular, this gets rid of
+    <literal>libutil</literal>, which could clash with libraries with
+    the same name from other packages.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now requires a compiler that supports C++11.</para>
+  </listitem>
+
+</itemizedlist>
+
+<para>This release has contributions from Danny Wilson, Domen Kožar,
+Eelco Dolstra, Ian-Woo Kim, Ludovic Courtès, Maxim Ivanov, Petr
+Rockai, Ricardo M. Correia and Shea Levy.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.6.1"><title>Release 1.6.1 (October 28, 2013)</title>
+
+<para>This is primarily a bug fix release.  Changes of interest
+are:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Nix 1.6 accidentally changed the semantics of antiquoted
+    paths in strings, such as <literal>"${/foo}/bar"</literal>.  This
+    release reverts to the Nix 1.5.3 behaviour.</para>
+  </listitem>
+
+  <listitem>
+    <para>Previously, Nix optimised expressions such as
+    <literal>"${<replaceable>expr</replaceable>}"</literal> to
+    <replaceable>expr</replaceable>.  Thus it neither checked whether
+    <replaceable>expr</replaceable> could be coerced to a string, nor
+    applied such coercions.  This meant that
+    <literal>"${123}"</literal> evaluatued to <literal>123</literal>,
+    and <literal>"${./foo}"</literal> evaluated to
+    <literal>./foo</literal> (even though
+    <literal>"${./foo} "</literal> evaluates to
+    <literal>"/nix/store/<replaceable>hash</replaceable>-foo "</literal>).
+    Nix now checks the type of antiquoted expressions and
+    applies coercions.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now shows the exact position of undefined variables.  In
+    particular, undefined variable errors in a <literal>with</literal>
+    previously didn't show <emphasis>any</emphasis> position
+    information, so this makes it a lot easier to fix such
+    errors.</para>
+  </listitem>
+
+  <listitem>
+    <para>Undefined variables are now treated consistently.
+    Previously, the <function>tryEval</function> function would catch
+    undefined variables inside a <literal>with</literal> but not
+    outside.  Now <function>tryEval</function> never catches undefined
+    variables.</para>
+  </listitem>
+
+  <listitem>
+    <para>Bash completion in <command>nix-shell</command> now works
+    correctly.</para>
+  </listitem>
+
+  <listitem>
+    <para>Stack traces are less verbose: they no longer show calls to
+    builtin functions and only show a single line for each derivation
+    on the call stack.</para>
+  </listitem>
+
+  <listitem>
+    <para>New built-in function: <function>builtins.typeOf</function>,
+    which returns the type of its argument as a string.</para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.6.0"><title>Release 1.6 (September 10, 2013)</title>
+
+<para>In addition to the usual bug fixes, this release has several new
+features:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>The command <command>nix-build --run-env</command> has been
+    renamed to <command>nix-shell</command>.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-shell</command> now sources
+    <filename>$stdenv/setup</filename> <emphasis>inside</emphasis> the
+    interactive shell, rather than in a parent shell.  This ensures
+    that shell functions defined by <literal>stdenv</literal> can be
+    used in the interactive shell.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-shell</command> has a new flag
+    <option>--pure</option> to clear the environment, so you get an
+    environment that more closely corresponds to the “real” Nix build.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-shell</command> now sets the shell prompt
+    (<envar>PS1</envar>) to ensure that Nix shells are distinguishable
+    from your regular shells.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-env</command> no longer requires a
+    <literal>*</literal> argument to match all packages, so
+    <literal>nix-env -qa</literal> is equivalent to <literal>nix-env
+    -qa '*'</literal>.</para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-env -i</command> has a new flag
+    <option>--remove-all</option> (<option>-r</option>) to remove all
+    previous packages from the profile.  This makes it easier to do
+    declarative package management similar to NixOS’s
+    <option>environment.systemPackages</option>.  For instance, if you
+    have a specification <filename>my-packages.nix</filename> like this:
+
+<programlisting>
+with import &lt;nixpkgs> {};
+[ thunderbird
+  geeqie
+  ...
+]
+</programlisting>
+
+    then after any change to this file, you can run:
+
+<screen>
+$ nix-env -f my-packages.nix -ir
+</screen>
+
+    to update your profile to match the specification.</para>
+  </listitem>
+
+  <listitem>
+    <para>The ‘<literal>with</literal>’ language construct is now more
+    lazy.  It only evaluates its argument if a variable might actually
+    refer to an attribute in the argument.  For instance, this now
+    works:
+
+<programlisting>
+let
+  pkgs = with pkgs; { foo = "old"; bar = foo; } // overrides;
+  overrides = { foo = "new"; };
+in pkgs.bar
+</programlisting>
+
+    This evaluates to <literal>"new"</literal>, while previously it
+    gave an “infinite recursion” error.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now has proper integer arithmetic operators. For
+    instance, you can write <literal>x + y</literal> instead of
+    <literal>builtins.add x y</literal>, or <literal>x &lt;
+    y</literal> instead of <literal>builtins.lessThan x y</literal>.
+    The comparison operators also work on strings.</para>
+  </listitem>
+
+  <listitem>
+    <para>On 64-bit systems, Nix integers are now 64 bits rather than
+    32 bits.</para>
+  </listitem>
+
+  <listitem>
+    <para>When using the Nix daemon, the <command>nix-daemon</command>
+    worker process now runs on the same CPU as the client, on systems
+    that support setting CPU affinity.  This gives a significant speedup
+    on some systems.</para>
+  </listitem>
+
+  <listitem>
+    <para>If a stack overflow occurs in the Nix evaluator, you now get
+    a proper error message (rather than “Segmentation fault”) on some
+    systems.</para>
+  </listitem>
+
+  <listitem>
+    <para>In addition to directories, you can now bind-mount regular
+    files in chroots through the (now misnamed) option
+    <option>build-chroot-dirs</option>.</para>
+  </listitem>
+
+</itemizedlist>
+
+<para>This release has contributions from Domen Kožar, Eelco Dolstra,
+Florian Friesdorf, Gergely Risko, Ivan Kozik, Ludovic Courtès and Shea
+Levy.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.5.3"><title>Release 1.5.3 (June 17, 2013)</title>
+
+<para>This is primarily a bug fix release.  The following changes are
+noteworthy:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Yet another security bug involving hard links to files
+    outside the store was fixed.  This bug only affected multi-user
+    installations that do not have hard link restrictions
+    enabled.  (NixOS is thus not vulnerable.)</para>
+  </listitem>
+
+  <listitem>
+    <para>The default binary cache URL has changed from
+    <uri>http://nixos.org/binary-cache</uri> to
+    <uri>http://cache.nixos.org</uri>.  The latter is hosted on Amazon
+    CloudFront (courtesy of <link
+    xlink:href="http://www.logicblox.com/">LogicBlox</link>) and
+    should provide better performance for users in both Europe and
+    North America.</para>
+  </listitem>
+
+  <listitem>
+    <para>The binary cache substituter now prints a warning message if
+    fetching information from the cache takes more than five seconds.
+    Thus network or server problems no longer make Nix appear to just
+    hang.</para>
+  </listitem>
+
+  <listitem>
+    <para>Stack traces now show function names, e.g.
+<screen>
+while evaluating `concatMapStrings' at `<replaceable>...</replaceable>/nixpkgs/pkgs/lib/strings.nix:18:25':
+</screen>
+      Also, if a function is called with an unexpected argument, Nix
+      now shows the name of the argument.
+    </para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.5.2"><title>Release 1.5.2 (May 13, 2013)</title>
+
+<para>This is primarily a bug fix release.  It has contributions from
+Eelco Dolstra, Lluís Batlle i Rossell and Shea Levy.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.5.1"><title>Release 1.5.1 (February 28, 2013)</title>
+
+<para>The bug fix to the bug fix had a bug itself, of course.  But
+this time it will work for sure!</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.5"><title>Release 1.5 (February 27, 2013)</title>
+
+<para>This is a brown paper bag release to fix a regression introduced
+by the hard link security fix in 1.4.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.4"><title>Release 1.4 (February 26, 2013)</title>
+
+<para>This release fixes a security bug in multi-user operation.  It
+was possible for derivations to cause the mode of files outside of the
+Nix store to be changed to 444 (read-only but world-readable) by
+creating hard links to those files (<link
+xlink:href="https://github.com/NixOS/nix/commit/5526a282b5b44e9296e61e07d7d2626a79141ac4">details</link>).</para>
+
+<para>There are also the following improvements:</para>
+
+<itemizedlist>
+
+  <listitem><para>New built-in function:
+  <function>builtins.hashString</function>.</para></listitem>
+
+  <listitem><para>Build logs are now stored in
+  <filename>/nix/var/log/nix/drvs/<replaceable>XX</replaceable>/</filename>,
+  where <replaceable>XX</replaceable> is the first two characters of
+  the derivation.  This is useful on machines that keep a lot of build
+  logs (such as Hydra servers).</para></listitem>
+
+  <listitem><para>The function <function>corepkgs/fetchurl</function>
+  can now make the downloaded file executable.  This will allow
+  getting rid of all bootstrap binaries in the Nixpkgs source
+  tree.</para></listitem>
+
+  <listitem><para>Language change: The expression <literal>"${./path}
+  ..."</literal> now evaluates to a string instead of a
+  path.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.3"><title>Release 1.3 (January 4, 2013)</title>
+
+<para>This is primarily a bug fix release.  When this version is first
+run on Linux, it removes any immutable bits from the Nix store and
+increases the schema version of the Nix store.  (The previous release
+removed support for setting the immutable bit; this release clears any
+remaining immutable bits to make certain operations more
+efficient.)</para>
+
+<para>This release has contributions from Eelco Dolstra and Stuart
+Pernsteiner.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.2"><title>Release 1.2 (December 6, 2012)</title>
+
+<para>This release has the following improvements and changes:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Nix has a new binary substituter mechanism: the
+    <emphasis>binary cache</emphasis>.  A binary cache contains
+    pre-built binaries of Nix packages.  Whenever Nix wants to build a
+    missing Nix store path, it will check a set of binary caches to
+    see if any of them has a pre-built binary of that path.  The
+    configuration setting <option>binary-caches</option> contains a
+    list of URLs of binary caches.  For instance, doing
+<screen>
+$ nix-env -i thunderbird --option binary-caches http://cache.nixos.org
+</screen>
+    will install Thunderbird and its dependencies, using the available
+    pre-built binaries in <uri>http://cache.nixos.org</uri>.
+    The main advantage over the old “manifest”-based method of getting
+    pre-built binaries is that you don’t have to worry about your
+    manifest being in sync with the Nix expressions you’re installing
+    from; i.e., you don’t need to run <command>nix-pull</command> to
+    update your manifest.  It’s also more scalable because you don’t
+    need to redownload a giant manifest file every time.
+    </para>
+
+    <para>A Nix channel can provide a binary cache URL that will be
+    used automatically if you subscribe to that channel.  If you use
+    the Nixpkgs or NixOS channels
+    (<uri>http://nixos.org/channels</uri>) you automatically get the
+    cache <uri>http://cache.nixos.org</uri>.</para>
+
+    <para>Binary caches are created using <command>nix-push</command>.
+    For details on the operation and format of binary caches, see the
+    <command>nix-push</command> manpage.  More details are provided in
+    <link xlink:href="http://lists.science.uu.nl/pipermail/nix-dev/2012-September/009826.html">this
+    nix-dev posting</link>.</para>
+  </listitem>
+
+  <listitem>
+    <para>Multiple output support should now be usable.  A derivation
+    can declare that it wants to produce multiple store paths by
+    saying something like
+<programlisting>
+outputs = [ "lib" "headers" "doc" ];
+</programlisting>
+    This will cause Nix to pass the intended store path of each output
+    to the builder through the environment variables
+    <literal>lib</literal>, <literal>headers</literal> and
+    <literal>doc</literal>.  Other packages can refer to a specific
+    output by referring to
+    <literal><replaceable>pkg</replaceable>.<replaceable>output</replaceable></literal>,
+    e.g.
+<programlisting>
+buildInputs = [ pkg.lib pkg.headers ];
+</programlisting>
+    If you install a package with multiple outputs using
+    <command>nix-env</command>, each output path will be symlinked
+    into the user environment.</para>
+  </listitem>
+
+  <listitem>
+    <para>Dashes are now valid as part of identifiers and attribute
+    names.</para>
+  </listitem>
+
+  <listitem>
+    <para>The new operation <command>nix-store --repair-path</command>
+    allows corrupted or missing store paths to be repaired by
+    redownloading them.  <command>nix-store --verify --check-contents
+    --repair</command> will scan and repair all paths in the Nix
+    store.  Similarly, <command>nix-env</command>,
+    <command>nix-build</command>, <command>nix-instantiate</command>
+    and <command>nix-store --realise</command> have a
+    <option>--repair</option> flag to detect and fix bad paths by
+    rebuilding or redownloading them.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix no longer sets the immutable bit on files in the Nix
+    store.  Instead, the recommended way to guard the Nix store
+    against accidental modification on Linux is to make it a read-only
+    bind mount, like this:
+
+<screen>
+$ mount --bind /nix/store /nix/store
+$ mount -o remount,ro,bind /nix/store
+</screen>
+
+    Nix will automatically make <filename>/nix/store</filename>
+    writable as needed (using a private mount namespace) to allow
+    modifications.</para>
+  </listitem>
+
+  <listitem>
+    <para>Store optimisation (replacing identical files in the store
+    with hard links) can now be done automatically every time a path
+    is added to the store.  This is enabled by setting the
+    configuration option <literal>auto-optimise-store</literal> to
+    <literal>true</literal> (disabled by default).</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now supports <command>xz</command> compression for NARs
+    in addition to <command>bzip2</command>.  It compresses about 30%
+    better on typical archives and decompresses about twice as
+    fast.</para>
+  </listitem>
+
+  <listitem>
+    <para>Basic Nix expression evaluation profiling: setting the
+    environment variable <envar>NIX_COUNT_CALLS</envar> to
+    <literal>1</literal> will cause Nix to print how many times each
+    primop or function was executed.</para>
+  </listitem>
+
+  <listitem>
+    <para>New primops: <varname>concatLists</varname>,
+    <varname>elem</varname>, <varname>elemAt</varname> and
+    <varname>filter</varname>.</para>
+  </listitem>
+
+  <listitem>
+    <para>The command <command>nix-copy-closure</command> has a new
+    flag <option>--use-substitutes</option> (<option>-s</option>) to
+    download missing paths on the target machine using the substitute
+    mechanism.</para>
+  </listitem>
+
+  <listitem>
+    <para>The command <command>nix-worker</command> has been renamed
+    to <command>nix-daemon</command>.  Support for running the Nix
+    worker in “slave” mode has been removed.</para>
+  </listitem>
+
+  <listitem>
+    <para>The <option>--help</option> flag of every Nix command now
+    invokes <command>man</command>.</para>
+  </listitem>
+
+  <listitem>
+    <para>Chroot builds are now supported on systemd machines.</para>
+  </listitem>
+
+</itemizedlist>
+
+<para>This release has contributions from Eelco Dolstra, Florian
+Friesdorf, Mats Erik Andersson and Shea Levy.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.1"><title>Release 1.1 (July 18, 2012)</title>
+
+<para>This release has the following improvements:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>On Linux, when doing a chroot build, Nix now uses various
+    namespace features provided by the Linux kernel to improve
+    build isolation.  Namely:
+    <itemizedlist>
+      <listitem><para>The private network namespace ensures that
+      builders cannot talk to the outside world (or vice versa): each
+      build only sees a private loopback interface.  This also means
+      that two concurrent builds can listen on the same port (e.g. as
+      part of a test) without conflicting with each
+      other.</para></listitem>
+      <listitem><para>The PID namespace causes each build to start as
+      PID 1.  Processes outside of the chroot are not visible to those
+      on the inside.  On the other hand, processes inside the chroot
+      <emphasis>are</emphasis> visible from the outside (though with
+      different PIDs).</para></listitem>
+      <listitem><para>The IPC namespace prevents the builder from
+      communicating with outside processes using SysV IPC mechanisms
+      (shared memory, message queues, semaphores).  It also ensures
+      that all IPC objects are destroyed when the builder
+      exits.</para></listitem>
+      <listitem><para>The UTS namespace ensures that builders see a
+      hostname of <literal>localhost</literal> rather than the actual
+      hostname.</para></listitem>
+      <listitem><para>The private mount namespace was already used by
+      Nix to ensure that the bind-mounts used to set up the chroot are
+      cleaned up automatically.</para></listitem>
+    </itemizedlist>
+    </para>
+  </listitem>
+
+  <listitem>
+    <para>Build logs are now compressed using
+    <command>bzip2</command>.  The command <command>nix-store
+    -l</command> decompresses them on the fly.  This can be disabled
+    by setting the option <literal>build-compress-log</literal> to
+    <literal>false</literal>.</para>
+  </listitem>
+
+  <listitem>
+    <para>The creation of build logs in
+    <filename>/nix/var/log/nix/drvs</filename> can be disabled by
+    setting the new option <literal>build-keep-log</literal> to
+    <literal>false</literal>.  This is useful, for instance, for Hydra
+    build machines.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now reserves some space in
+    <filename>/nix/var/nix/db/reserved</filename> to ensure that the
+    garbage collector can run successfully if the disk is full.  This
+    is necessary because SQLite transactions fail if the disk is
+    full.</para>
+  </listitem>
+
+  <listitem>
+    <para>Added a basic <function>fetchurl</function> function.  This
+    is not intended to replace the <function>fetchurl</function> in
+    Nixpkgs, but is useful for bootstrapping; e.g., it will allow us
+    to get rid of the bootstrap binaries in the Nixpkgs source tree
+    and download them instead.  You can use it by doing
+    <literal>import &lt;nix/fetchurl.nix> { url =
+    <replaceable>url</replaceable>; sha256 =
+    "<replaceable>hash</replaceable>"; }</literal>. (Shea Levy)</para>
+  </listitem>
+
+  <listitem>
+    <para>Improved RPM spec file. (Michel Alexandre Salim)</para>
+  </listitem>
+
+  <listitem>
+    <para>Support for on-demand socket-based activation in the Nix
+    daemon with <command>systemd</command>.</para>
+  </listitem>
+
+  <listitem>
+    <para>Added a manpage for
+    <citerefentry><refentrytitle>nix.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>.</para>
+  </listitem>
+
+  <listitem>
+    <para>When using the Nix daemon, the <option>-s</option> flag in
+    <command>nix-env -qa</command> is now much faster.</para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-1.0"><title>Release 1.0 (May 11, 2012)</title>
+
+<para>There have been numerous improvements and bug fixes since the
+previous release.  Here are the most significant:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Nix can now optionally use the Boehm garbage collector.
+    This significantly reduces the Nix evaluator’s memory footprint,
+    especially when evaluating large NixOS system configurations.  It
+    can be enabled using the <option>--enable-gc</option> configure
+    option.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now uses SQLite for its database.  This is faster and
+    more flexible than the old <emphasis>ad hoc</emphasis> format.
+    SQLite is also used to cache the manifests in
+    <filename>/nix/var/nix/manifests</filename>, resulting in a
+    significant speedup.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix now has an search path for expressions.  The search path
+    is set using the environment variable <envar>NIX_PATH</envar> and
+    the <option>-I</option> command line option.  In Nix expressions,
+    paths between angle brackets are used to specify files that must
+    be looked up in the search path.  For instance, the expression
+    <literal>&lt;nixpkgs/default.nix></literal> looks for a file
+    <filename>nixpkgs/default.nix</filename> relative to every element
+    in the search path.</para>
+  </listitem>
+
+  <listitem>
+    <para>The new command <command>nix-build --run-env</command>
+    builds all dependencies of a derivation, then starts a shell in an
+    environment containing all variables from the derivation.  This is
+    useful for reproducing the environment of a derivation for
+    development.</para>
+  </listitem>
+
+  <listitem>
+    <para>The new command <command>nix-store --verify-path</command>
+    verifies that the contents of a store path have not
+    changed.</para>
+  </listitem>
+
+  <listitem>
+    <para>The new command <command>nix-store --print-env</command>
+    prints out the environment of a derivation in a format that can be
+    evaluated by a shell.</para>
+  </listitem>
+
+  <listitem>
+    <para>Attribute names can now be arbitrary strings.  For instance,
+    you can write <literal>{ "foo-1.2" = …; "bla bla" = …; }."bla
+    bla"</literal>.</para>
+  </listitem>
+
+  <listitem>
+    <para>Attribute selection can now provide a default value using
+    the <literal>or</literal> operator.  For instance, the expression
+    <literal>x.y.z or e</literal> evaluates to the attribute
+    <literal>x.y.z</literal> if it exists, and <literal>e</literal>
+    otherwise.</para>
+  </listitem>
+
+  <listitem>
+    <para>The right-hand side of the <literal>?</literal> operator can
+    now be an attribute path, e.g., <literal>attrs ?
+    a.b.c</literal>.</para>
+  </listitem>
+
+  <listitem>
+    <para>On Linux, Nix will now make files in the Nix store immutable
+    on filesystems that support it.  This prevents accidental
+    modification of files in the store by the root user.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix has preliminary support for derivations with multiple
+    outputs.  This is useful because it allows parts of a package to
+    be deployed and garbage-collected separately.  For instance,
+    development parts of a package such as header files or static
+    libraries would typically not be part of the closure of an
+    application, resulting in reduced disk usage and installation
+    time.</para>
+  </listitem>
+
+  <listitem>
+    <para>The Nix store garbage collector is faster and holds the
+    global lock for a shorter amount of time.</para>
+  </listitem>
+
+  <listitem>
+    <para>The option <option>--timeout</option> (corresponding to the
+    configuration setting <literal>build-timeout</literal>) allows you
+    to set an absolute timeout on builds — if a build runs for more than
+    the given number of seconds, it is terminated.  This is useful for
+    recovering automatically from builds that are stuck in an infinite
+    loop but keep producing output, and for which
+    <literal>--max-silent-time</literal> is ineffective.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix development has moved to GitHub (<link
+    xlink:href="https://github.com/NixOS/nix" />).</para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-0.16"><title>Release 0.16 (August 17, 2010)</title>
+
+<para>This release has the following improvements:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>The Nix expression evaluator is now much faster in most
+    cases: typically, <link
+    xlink:href="http://www.mail-archive.com/nix-dev@cs.uu.nl/msg04113.html">3
+    to 8 times compared to the old implementation</link>.  It also
+    uses less memory.  It no longer depends on the ATerm
+    library.</para>
+  </listitem>
+
+  <listitem>
+    <para>
+      Support for configurable parallelism inside builders.  Build
+      scripts have always had the ability to perform multiple build
+      actions in parallel (for instance, by running <command>make -j
+      2</command>), but this was not desirable because the number of
+      actions to be performed in parallel was not configurable.  Nix
+      now has an option <option>--cores
+      <replaceable>N</replaceable></option> as well as a configuration
+      setting <varname>build-cores =
+      <replaceable>N</replaceable></varname> that causes the
+      environment variable <envar>NIX_BUILD_CORES</envar> to be set to
+      <replaceable>N</replaceable> when the builder is invoked.  The
+      builder can use this at its discretion to perform a parallel
+      build, e.g., by calling <command>make -j
+      <replaceable>N</replaceable></command>.  In Nixpkgs, this can be
+      enabled on a per-package basis by setting the derivation
+      attribute <varname>enableParallelBuilding</varname> to
+      <literal>true</literal>.
+    </para>
+  </listitem>
+
+  <listitem>
+    <para><command>nix-store -q</command> now supports XML output
+    through the <option>--xml</option> flag.</para>
+  </listitem>
+
+  <listitem>
+    <para>Several bug fixes.</para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-0.15"><title>Release 0.15 (March 17, 2010)</title>
+
+<para>This is a bug-fix release.  Among other things, it fixes
+building on Mac OS X (Snow Leopard), and improves the contents of
+<filename>/etc/passwd</filename> and <filename>/etc/group</filename>
+in <literal>chroot</literal> builds.</para>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-0.14"><title>Release 0.14 (February 4, 2010)</title>
+
+<para>This release has the following improvements:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>The garbage collector now starts deleting garbage much
+    faster than before.  It no longer determines liveness of all paths
+    in the store, but does so on demand.</para>
+  </listitem>
+
+  <listitem>
+    <para>Added a new operation, <command>nix-store --query
+    --roots</command>, that shows the garbage collector roots that
+    directly or indirectly point to the given store paths.</para>
+  </listitem>
+
+  <listitem>
+    <para>Removed support for converting Berkeley DB-based Nix
+    databases to the new schema.</para>
+  </listitem>
+
+  <listitem>
+    <para>Removed the <option>--use-atime</option> and
+    <option>--max-atime</option> garbage collector options.  They were
+    not very useful in practice.</para>
+  </listitem>
+
+  <listitem>
+    <para>On Windows, Nix now requires Cygwin 1.7.x.</para>
+  </listitem>
+
+  <listitem>
+    <para>A few bug fixes.</para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-0.13"><title>Release 0.13 (November 5,
+2009)</title>
+
+<para>This is primarily a bug fix release.  It has some new
+features:</para>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Syntactic sugar for writing nested attribute sets.  Instead of
+
+<programlisting>
+{
+  foo = {
+    bar = 123;
+    xyzzy = true;
+  };
+  a = { b = { c = "d"; }; };
+}
+</programlisting>
+
+    you can write
+
+<programlisting>
+{
+  foo.bar = 123;
+  foo.xyzzy = true;
+  a.b.c = "d";
+}
+</programlisting>
+
+    This is useful, for instance, in NixOS configuration files.</para>
+
+  </listitem>
+
+  <listitem>
+    <para>Support for Nix channels generated by Hydra, the Nix-based
+    continuous build system.  (Hydra generates NAR archives on the
+    fly, so the size and hash of these archives isn’t known in
+    advance.)</para>
+  </listitem>
+
+  <listitem>
+    <para>Support <literal>i686-linux</literal> builds directly on
+    <literal>x86_64-linux</literal> Nix installations.  This is
+    implemented using the <function>personality()</function> syscall,
+    which causes <command>uname</command> to return
+    <literal>i686</literal> in child processes.</para>
+  </listitem>
+
+  <listitem>
+    <para>Various improvements to the <literal>chroot</literal>
+    support.  Building in a <literal>chroot</literal> works quite well
+    now.</para>
+  </listitem>
+
+  <listitem>
+    <para>Nix no longer blocks if it tries to build a path and another
+    process is already building the same path.  Instead it tries to
+    build another buildable path first.  This improves
+    parallelism.</para>
+  </listitem>
+
+  <listitem>
+    <para>Support for large (> 4 GiB) files in NAR archives.</para>
+  </listitem>
+
+  <listitem>
+    <para>Various (performance) improvements to the remote build
+    mechanism.</para>
+  </listitem>
+
+  <listitem>
+    <para>New primops: <varname>builtins.addErrorContext</varname> (to
+    add a string to stack traces — useful for debugging),
+    <varname>builtins.isBool</varname>,
+    <varname>builtins.isString</varname>,
+    <varname>builtins.isInt</varname>,
+    <varname>builtins.intersectAttrs</varname>.</para>
+  </listitem>
+
+  <listitem>
+    <para>OpenSolaris support (Sander van der Burg).</para>
+  </listitem>
+
+  <listitem>
+    <para>Stack traces are no longer displayed unless the
+    <option>--show-trace</option> option is used.</para>
+  </listitem>
+
+  <listitem>
+    <para>The scoping rules for <literal>inherit
+    (<replaceable>e</replaceable>) ...</literal> in recursive
+    attribute sets have changed.  The expression
+    <replaceable>e</replaceable> can now refer to the attributes
+    defined in the containing set.</para>
+  </listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-0.12"><title>Release 0.12 (November 20,
+2008)</title>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Nix no longer uses Berkeley DB to store Nix store metadata.
+    The principal advantages of the new storage scheme are: it works
+    properly over decent implementations of NFS (allowing Nix stores
+    to be shared between multiple machines); no recovery is needed
+    when a Nix process crashes; no write access is needed for
+    read-only operations; no more running out of Berkeley DB locks on
+    certain operations.</para>
+
+    <para>You still need to compile Nix with Berkeley DB support if
+    you want Nix to automatically convert your old Nix store to the
+    new schema.  If you don’t need this, you can build Nix with the
+    <filename>configure</filename> option
+    <option>--disable-old-db-compat</option>.</para>
+
+    <para>After the automatic conversion to the new schema, you can
+    delete the old Berkeley DB files:
+
+    <screen>
+$ cd /nix/var/nix/db
+$ rm __db* log.* derivers references referrers reserved validpaths DB_CONFIG</screen>
+
+    The new metadata is stored in the directories
+    <filename>/nix/var/nix/db/info</filename> and
+    <filename>/nix/var/nix/db/referrer</filename>.  Though the
+    metadata is stored in human-readable plain-text files, they are
+    not intended to be human-editable, as Nix is rather strict about
+    the format.</para>
+
+    <para>The new storage schema may or may not require less disk
+    space than the Berkeley DB environment, mostly depending on the
+    cluster size of your file system.  With 1 KiB clusters (which
+    seems to be the <literal>ext3</literal> default nowadays) it
+    usually takes up much less space.</para>
+  </listitem>
+
+  <listitem><para>There is a new substituter that copies paths
+  directly from other (remote) Nix stores mounted somewhere in the
+  filesystem.  For instance, you can speed up an installation by
+  mounting some remote Nix store that already has the packages in
+  question via NFS or <literal>sshfs</literal>.  The environment
+  variable <envar>NIX_OTHER_STORES</envar> specifies the locations of
+  the remote Nix directories,
+  e.g. <literal>/mnt/remote-fs/nix</literal>.</para></listitem>
+
+  <listitem><para>New <command>nix-store</command> operations
+  <option>--dump-db</option> and <option>--load-db</option> to dump
+  and reload the Nix database.</para></listitem>
+
+  <listitem><para>The garbage collector has a number of new options to
+  allow only some of the garbage to be deleted.  The option
+  <option>--max-freed <replaceable>N</replaceable></option> tells the
+  collector to stop after at least <replaceable>N</replaceable> bytes
+  have been deleted.  The option <option>--max-links
+  <replaceable>N</replaceable></option> tells it to stop after the
+  link count on <filename>/nix/store</filename> has dropped below
+  <replaceable>N</replaceable>.  This is useful for very large Nix
+  stores on filesystems with a 32000 subdirectories limit (like
+  <literal>ext3</literal>).  The option <option>--use-atime</option>
+  causes store paths to be deleted in order of ascending last access
+  time.  This allows non-recently used stuff to be deleted.  The
+  option <option>--max-atime <replaceable>time</replaceable></option>
+  specifies an upper limit to the last accessed time of paths that may
+  be deleted.  For instance,
+
+    <screen>
+    $ nix-store --gc -v --max-atime $(date +%s -d "2 months ago")</screen>
+
+  deletes everything that hasn’t been accessed in two months.</para></listitem>
+
+  <listitem><para><command>nix-env</command> now uses optimistic
+  profile locking when performing an operation like installing or
+  upgrading, instead of setting an exclusive lock on the profile.
+  This allows multiple <command>nix-env -i / -u / -e</command>
+  operations on the same profile in parallel.  If a
+  <command>nix-env</command> operation sees at the end that the profile
+  was changed in the meantime by another process, it will just
+  restart.  This is generally cheap because the build results are
+  still in the Nix store.</para></listitem>
+
+  <listitem><para>The option <option>--dry-run</option> is now
+  supported by <command>nix-store -r</command> and
+  <command>nix-build</command>.</para></listitem>
+
+  <listitem><para>The information previously shown by
+  <option>--dry-run</option> (i.e., which derivations will be built
+  and which paths will be substituted) is now always shown by
+  <command>nix-env</command>, <command>nix-store -r</command> and
+  <command>nix-build</command>.  The total download size of
+  substitutable paths is now also shown.  For instance, a build will
+  show something like
+
+    <screen>
+the following derivations will be built:
+  /nix/store/129sbxnk5n466zg6r1qmq1xjv9zymyy7-activate-configuration.sh.drv
+  /nix/store/7mzy971rdm8l566ch8hgxaf89x7lr7ik-upstart-jobs.drv
+  ...
+the following paths will be downloaded/copied (30.02 MiB):
+  /nix/store/4m8pvgy2dcjgppf5b4cj5l6wyshjhalj-samba-3.2.4
+  /nix/store/7h1kwcj29ip8vk26rhmx6bfjraxp0g4l-libunwind-0.98.6
+  ...</screen>
+
+  </para></listitem>
+
+  <listitem><para>Language features:
+
+    <itemizedlist>
+
+      <listitem><para>@-patterns as in Haskell.  For instance, in a
+      function definition
+
+      <programlisting>f = args @ {x, y, z}: <replaceable>...</replaceable>;</programlisting>
+
+      <varname>args</varname> refers to the argument as a whole, which
+      is further pattern-matched against the attribute set pattern
+      <literal>{x, y, z}</literal>.</para></listitem>
+
+      <listitem><para>“<literal>...</literal>” (ellipsis) patterns.
+      An attribute set pattern can now say <literal>...</literal>  at
+      the end of the attribute name list to specify that the function
+      takes <emphasis>at least</emphasis> the listed attributes, while
+      ignoring additional attributes.  For instance,
+
+      <programlisting>{stdenv, fetchurl, fuse, ...}: <replaceable>...</replaceable></programlisting>
+
+      defines a function that accepts any attribute set that includes
+      at least the three listed attributes.</para></listitem>
+
+      <listitem><para>New primops:
+      <varname>builtins.parseDrvName</varname> (split a package name
+      string like <literal>"nix-0.12pre12876"</literal> into its name
+      and version components, e.g. <literal>"nix"</literal> and
+      <literal>"0.12pre12876"</literal>),
+      <varname>builtins.compareVersions</varname> (compare two version
+      strings using the same algorithm that <command>nix-env</command>
+      uses), <varname>builtins.length</varname> (efficiently compute
+      the length of a list), <varname>builtins.mul</varname> (integer
+      multiplication), <varname>builtins.div</varname> (integer
+      division).
+      <!-- <varname>builtins.genericClosure</varname> -->
+      </para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+  <listitem><para><command>nix-prefetch-url</command> now supports
+  <literal>mirror://</literal> URLs, provided that the environment
+  variable <envar>NIXPKGS_ALL</envar> points at a Nixpkgs
+  tree.</para></listitem>
+
+  <listitem><para>Removed the commands
+  <command>nix-pack-closure</command> and
+  <command>nix-unpack-closure</command>.   You can do almost the same
+  thing but much more efficiently by doing <literal>nix-store --export
+  $(nix-store -qR <replaceable>paths</replaceable>) > closure</literal> and
+  <literal>nix-store --import &lt;
+  closure</literal>.</para></listitem>
+
+  <listitem><para>Lots of bug fixes, including a big performance bug in
+  the handling of <literal>with</literal>-expressions.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+<!--==================================================================-->
+
+<section xml:id="ssec-relnotes-0.11"><title>Release 0.11 (December 31,
+2007)</title>
+
+<para>Nix 0.11 has many improvements over the previous stable release.
+The most important improvement is secure multi-user support.  It also
+features many usability enhancements and language extensions, many of
+them prompted by NixOS, the purely functional Linux distribution based
+on Nix.  Here is an (incomplete) list:</para>
+
+
+<itemizedlist>
+
+
+  <listitem><para>Secure multi-user support.  A single Nix store can
+  now be shared between multiple (possible untrusted) users.  This is
+  an important feature for NixOS, where it allows non-root users to
+  install software.  The old setuid method for sharing a store between
+  multiple users has been removed.  Details for setting up a
+  multi-user store can be found in the manual.</para></listitem>
+
+
+  <listitem><para>The new command <command>nix-copy-closure</command>
+  gives you an easy and efficient way to exchange software between
+  machines.  It copies the missing parts of the closure of a set of
+  store path to or from a remote machine via
+  <command>ssh</command>.</para></listitem>
+
+
+  <listitem><para>A new kind of string literal: strings between double
+  single-quotes (<literal>''</literal>) have indentation
+  “intelligently” removed.  This allows large strings (such as shell
+  scripts or configuration file fragments in NixOS) to cleanly follow
+  the indentation of the surrounding expression.  It also requires
+  much less escaping, since <literal>''</literal> is less common in
+  most languages than <literal>"</literal>.</para></listitem>
+
+
+  <listitem><para><command>nix-env</command> <option>--set</option>
+  modifies the current generation of a profile so that it contains
+  exactly the specified derivation, and nothing else.  For example,
+  <literal>nix-env -p /nix/var/nix/profiles/browser --set
+  firefox</literal> lets the profile named
+  <filename>browser</filename> contain just Firefox.</para></listitem>
+
+
+  <listitem><para><command>nix-env</command> now maintains
+  meta-information about installed packages in profiles.  The
+  meta-information is the contents of the <varname>meta</varname>
+  attribute of derivations, such as <varname>description</varname> or
+  <varname>homepage</varname>.  The command <literal>nix-env -q --xml
+  --meta</literal> shows all meta-information.</para></listitem>
+
+
+  <listitem><para><command>nix-env</command> now uses the
+  <varname>meta.priority</varname> attribute of derivations to resolve
+  filename collisions between packages.  Lower priority values denote
+  a higher priority.  For instance, the GCC wrapper package and the
+  Binutils package in Nixpkgs both have a file
+  <filename>bin/ld</filename>, so previously if you tried to install
+  both you would get a collision.  Now, on the other hand, the GCC
+  wrapper declares a higher priority than Binutils, so the former’s
+  <filename>bin/ld</filename> is symlinked in the user
+  environment.</para></listitem>
+
+
+  <listitem><para><command>nix-env -i / -u</command>: instead of
+  breaking package ties by version, break them by priority and version
+  number.  That is, if there are multiple packages with the same name,
+  then pick the package with the highest priority, and only use the
+  version if there are multiple packages with the same
+  priority.</para>
+
+  <para>This makes it possible to mark specific versions/variant in
+  Nixpkgs more or less desirable than others.  A typical example would
+  be a beta version of some package (e.g.,
+  <literal>gcc-4.2.0rc1</literal>) which should not be installed even
+  though it is the highest version, except when it is explicitly
+  selected (e.g., <literal>nix-env -i
+  gcc-4.2.0rc1</literal>).</para></listitem>
+
+
+  <listitem><para><command>nix-env --set-flag</command> allows meta
+  attributes of installed packages to be modified.  There are several
+  attributes that can be usefully modified, because they affect the
+  behaviour of <command>nix-env</command> or the user environment
+  build script:
+
+    <itemizedlist>
+
+      <listitem><para><varname>meta.priority</varname> can be changed
+      to resolve filename clashes (see above).</para></listitem>
+
+      <listitem><para><varname>meta.keep</varname> can be set to
+      <literal>true</literal> to prevent the package from being
+      upgraded or replaced.  Useful if you want to hang on to an older
+      version of a package.</para></listitem>
+
+      <listitem><para><varname>meta.active</varname> can be set to
+      <literal>false</literal> to “disable” the package.  That is, no
+      symlinks will be generated to the files of the package, but it
+      remains part of the profile (so it won’t be garbage-collected).
+      Set it back to <literal>true</literal> to re-enable the
+      package.</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+
+  <listitem><para><command>nix-env -q</command> now has a flag
+  <option>--prebuilt-only</option> (<option>-b</option>) that causes
+  <command>nix-env</command> to show only those derivations whose
+  output is already in the Nix store or that can be substituted (i.e.,
+  downloaded from somewhere).  In other words, it shows the packages
+  that can be installed “quickly”, i.e., don’t need to be built from
+  source.  The <option>-b</option> flag is also available in
+  <command>nix-env -i</command> and <command>nix-env -u</command> to
+  filter out derivations for which no pre-built binary is
+  available.</para></listitem>
+
+
+  <listitem><para>The new option <option>--argstr</option> (in
+  <command>nix-env</command>, <command>nix-instantiate</command> and
+  <command>nix-build</command>) is like <option>--arg</option>, except
+  that the value is a string.  For example, <literal>--argstr system
+  i686-linux</literal> is equivalent to <literal>--arg system
+  \"i686-linux\"</literal> (note that <option>--argstr</option>
+  prevents annoying quoting around shell arguments).</para></listitem>
+
+
+  <listitem><para><command>nix-store</command> has a new operation
+  <option>--read-log</option> (<option>-l</option>)
+  <parameter>paths</parameter> that shows the build log of the given
+  paths.</para></listitem>
+
+
+  <!--
+  <listitem><para>TODO: semantic cleanups of string concatenation
+  etc. (mostly in r6740).</para></listitem>
+  -->
+
+
+  <listitem><para>Nix now uses Berkeley DB 4.5.  The database is
+  upgraded automatically, but you should be careful not to use old
+  versions of Nix that still use Berkeley DB 4.4.</para></listitem>
+
+
+  <!-- foo
+  <listitem><para>TODO: option <option>- -reregister</option> in
+  <command>nix-store - -register-validity</command>.</para></listitem>
+  -->
+
+
+  <listitem><para>The option <option>--max-silent-time</option>
+  (corresponding to the configuration setting
+  <literal>build-max-silent-time</literal>) allows you to set a
+  timeout on builds — if a build produces no output on
+  <literal>stdout</literal> or <literal>stderr</literal> for the given
+  number of seconds, it is terminated.  This is useful for recovering
+  automatically from builds that are stuck in an infinite
+  loop.</para></listitem>
+
+
+  <listitem><para><command>nix-channel</command>: each subscribed
+  channel is its own attribute in the top-level expression generated
+  for the channel.  This allows disambiguation (e.g. <literal>nix-env
+  -i -A nixpkgs_unstable.firefox</literal>).</para></listitem>
+
+
+  <listitem><para>The substitutes table has been removed from the
+  database.  This makes operations such as <command>nix-pull</command>
+  and <command>nix-channel --update</command> much, much
+  faster.</para></listitem>
+
+
+  <listitem><para><command>nix-pull</command> now supports
+  bzip2-compressed manifests.  This speeds up
+  channels.</para></listitem>
+
+
+  <listitem><para><command>nix-prefetch-url</command> now has a
+  limited form of caching.  This is used by
+  <command>nix-channel</command> to prevent unnecessary downloads when
+  the channel hasn’t changed.</para></listitem>
+
+
+  <listitem><para><command>nix-prefetch-url</command> now by default
+  computes the SHA-256 hash of the file instead of the MD5 hash.  In
+  calls to <function>fetchurl</function> you should pass the
+  <literal>sha256</literal> attribute instead of
+  <literal>md5</literal>.  You can pass either a hexadecimal or a
+  base-32 encoding of the hash.</para></listitem>
+
+
+  <listitem><para>Nix can now perform builds in an automatically
+  generated “chroot”.  This prevents a builder from accessing stuff
+  outside of the Nix store, and thus helps ensure purity.  This is an
+  experimental feature.</para></listitem>
+
+
+  <listitem><para>The new command <command>nix-store
+  --optimise</command> reduces Nix store disk space usage by finding
+  identical files in the store and hard-linking them to each other.
+  It typically reduces the size of the store by something like
+  25-35%.</para></listitem>
+
+
+  <listitem><para><filename>~/.nix-defexpr</filename> can now be a
+  directory, in which case the Nix expressions in that directory are
+  combined into an attribute set, with the file names used as the
+  names of the attributes.  The command <command>nix-env
+  --import</command> (which set the
+  <filename>~/.nix-defexpr</filename> symlink) is
+  removed.</para></listitem>
+
+
+  <listitem><para>Derivations can specify the new special attribute
+  <varname>allowedReferences</varname> to enforce that the references
+  in the output of a derivation are a subset of a declared set of
+  paths.  For example, if <varname>allowedReferences</varname> is an
+  empty list, then the output must not have any references.  This is
+  used in NixOS to check that generated files such as initial ramdisks
+  for booting Linux don’t have any dependencies.</para></listitem>
+
+
+  <listitem><para>The new attribute
+  <varname>exportReferencesGraph</varname> allows builders access to
+  the references graph of their inputs.  This is used in NixOS for
+  tasks such as generating ISO-9660 images that contain a Nix store
+  populated with the closure of certain paths.</para></listitem>
+
+
+  <listitem><para>Fixed-output derivations (like
+  <function>fetchurl</function>) can define the attribute
+  <varname>impureEnvVars</varname> to allow external environment
+  variables to be passed to builders.  This is used in Nixpkgs to
+  support proxy configuration, among other things.</para></listitem>
+
+
+  <listitem><para>Several new built-in functions:
+  <function>builtins.attrNames</function>,
+  <function>builtins.filterSource</function>,
+  <function>builtins.isAttrs</function>,
+  <function>builtins.isFunction</function>,
+  <function>builtins.listToAttrs</function>,
+  <function>builtins.stringLength</function>,
+  <function>builtins.sub</function>,
+  <function>builtins.substring</function>,
+  <function>throw</function>,
+  <function>builtins.trace</function>,
+  <function>builtins.readFile</function>.</para></listitem>
+
+
+</itemizedlist>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.10.1 (October 11, 2006)</title>
+
+<para>This release fixes two somewhat obscure bugs that occur when
+evaluating Nix expressions that are stored inside the Nix store
+(<literal>NIX-67</literal>).  These do not affect most users.</para>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.10 (October 6, 2006)</title>
+
+<note><para>This version of Nix uses Berkeley DB 4.4 instead of 4.3.
+The database is upgraded automatically, but you should be careful not
+to use old versions of Nix that still use Berkeley DB 4.3.  In
+particular, if you use a Nix installed through Nix, you should run
+
+<screen>
+$ nix-store --clear-substitutes</screen>
+
+first.</para></note>
+
+<warning><para>Also, the database schema has changed slighted to fix a
+performance issue (see below).  When you run any Nix 0.10 command for
+the first time, the database will be upgraded automatically.  This is
+irreversible.</para></warning>
+
+<itemizedlist>
+
+
+  <!-- Usability / features -->
+
+
+  <listitem><para><command>nix-env</command> usability improvements:
+
+    <itemizedlist>
+
+      <listitem><para>An option <option>--compare-versions</option>
+      (or <option>-c</option>) has been added to <command>nix-env
+      --query</command> to allow you to compare installed versions of
+      packages to available versions, or vice versa.  An easy way to
+      see if you are up to date with what’s in your subscribed
+      channels is <literal>nix-env -qc \*</literal>.</para></listitem>
+
+      <listitem><para><literal>nix-env --query</literal> now takes as
+      arguments a list of package names about which to show
+      information, just like <option>--install</option>, etc.: for
+      example, <literal>nix-env -q gcc</literal>.  Note that to show
+      all derivations, you need to specify
+      <literal>\*</literal>.</para></listitem>
+
+      <listitem><para><literal>nix-env -i
+      <replaceable>pkgname</replaceable></literal> will now install
+      the highest available version of
+      <replaceable>pkgname</replaceable>, rather than installing all
+      available versions (which would probably give collisions)
+      (<literal>NIX-31</literal>).</para></listitem>
+
+      <listitem><para><literal>nix-env (-i|-u) --dry-run</literal> now
+      shows exactly which missing paths will be built or
+      substituted.</para></listitem>
+
+      <listitem><para><literal>nix-env -qa --description</literal>
+      shows human-readable descriptions of packages, provided that
+      they have a <literal>meta.description</literal> attribute (which
+      most packages in Nixpkgs don’t have yet).</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+
+  <listitem><para>New language features:
+
+    <itemizedlist>
+
+      <listitem><para>Reference scanning (which happens after each
+      build) is much faster and takes a constant amount of
+      memory.</para></listitem>
+
+      <listitem><para>String interpolation.  Expressions like
+
+<programlisting>
+"--with-freetype2-library=" + freetype + "/lib"</programlisting>
+
+      can now be written as
+
+<programlisting>
+"--with-freetype2-library=${freetype}/lib"</programlisting>
+
+      You can write arbitrary expressions within
+      <literal>${<replaceable>...</replaceable>}</literal>, not just
+      identifiers.</para></listitem>
+
+      <listitem><para>Multi-line string literals.</para></listitem>
+
+      <listitem><para>String concatenations can now involve
+      derivations, as in the example <code>"--with-freetype2-library="
+      + freetype + "/lib"</code>.  This was not previously possible
+      because we need to register that a derivation that uses such a
+      string is dependent on <literal>freetype</literal>.  The
+      evaluator now properly propagates this information.
+      Consequently, the subpath operator (<literal>~</literal>) has
+      been deprecated.</para></listitem>
+
+      <listitem><para>Default values of function arguments can now
+      refer to other function arguments; that is, all arguments are in
+      scope in the default values
+      (<literal>NIX-45</literal>).</para></listitem>
+
+      <!--
+      <listitem><para>TODO: domain checks (r5895).</para></listitem>
+      -->
+
+      <listitem><para>Lots of new built-in primitives, such as
+      functions for list manipulation and integer arithmetic.  See the
+      manual for a complete list.  All primops are now available in
+      the set <varname>builtins</varname>, allowing one to test for
+      the availability of primop in a backwards-compatible
+      way.</para></listitem>
+
+      <listitem><para>Real let-expressions: <literal>let x = ...;
+      ... z = ...; in ...</literal>.</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+
+  <listitem><para>New commands <command>nix-pack-closure</command> and
+  <command>nix-unpack-closure</command> than can be used to easily
+  transfer a store path with all its dependencies to another machine.
+  Very convenient whenever you have some package on your machine and
+  you want to copy it somewhere else.</para></listitem>
+
+
+  <listitem><para>XML support:
+
+    <itemizedlist>
+
+      <listitem><para><literal>nix-env -q --xml</literal> prints the
+      installed or available packages in an XML representation for
+      easy processing by other tools.</para></listitem>
+
+      <listitem><para><literal>nix-instantiate --eval-only
+      --xml</literal> prints an XML representation of the resulting
+      term.  (The new flag <option>--strict</option> forces ‘deep’
+      evaluation of the result, i.e., list elements and attributes are
+      evaluated recursively.)</para></listitem>
+
+      <listitem><para>In Nix expressions, the primop
+      <function>builtins.toXML</function> converts a term to an XML
+      representation.  This is primarily useful for passing structured
+      information to builders.</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+
+  <listitem><para>You can now unambiguously specify which derivation to
+  build or install in <command>nix-env</command>,
+  <command>nix-instantiate</command> and <command>nix-build</command>
+  using the <option>--attr</option> / <option>-A</option> flags, which
+  takes an attribute name as argument.  (Unlike symbolic package names
+  such as <literal>subversion-1.4.0</literal>, attribute names in an
+  attribute set are unique.)  For instance, a quick way to perform a
+  test build of a package in Nixpkgs is <literal>nix-build
+  pkgs/top-level/all-packages.nix -A
+  <replaceable>foo</replaceable></literal>.  <literal>nix-env -q
+  --attr</literal> shows the attribute names corresponding to each
+  derivation.</para></listitem>
+
+
+  <listitem><para>If the top-level Nix expression used by
+  <command>nix-env</command>, <command>nix-instantiate</command> or
+  <command>nix-build</command> evaluates to a function whose arguments
+  all have default values, the function will be called automatically.
+  Also, the new command-line switch <option>--arg
+  <replaceable>name</replaceable>
+  <replaceable>value</replaceable></option> can be used to specify
+  function arguments on the command line.</para></listitem>
+
+
+  <listitem><para><literal>nix-install-package --url
+  <replaceable>URL</replaceable></literal> allows a package to be
+  installed directly from the given URL.</para></listitem>
+
+
+  <listitem><para>Nix now works behind an HTTP proxy server; just set
+  the standard environment variables <envar>http_proxy</envar>,
+  <envar>https_proxy</envar>, <envar>ftp_proxy</envar> or
+  <envar>all_proxy</envar> appropriately.  Functions such as
+  <function>fetchurl</function> in Nixpkgs also respect these
+  variables.</para></listitem>
+
+
+  <listitem><para><literal>nix-build -o
+  <replaceable>symlink</replaceable></literal> allows the symlink to
+  the build result to be named something other than
+  <literal>result</literal>.</para></listitem>
+
+
+  <!-- Stability / performance / etc. -->
+
+
+  <listitem><para>Platform support:
+
+    <itemizedlist>
+
+      <listitem><para>Support for 64-bit platforms, provided a <link
+      xlink:href="http://bugzilla.sen.cwi.nl:8080/show_bug.cgi?id=606">suitably
+      patched ATerm library</link> is used.  Also, files larger than 2
+      GiB are now supported.</para></listitem>
+
+      <listitem><para>Added support for Cygwin (Windows,
+      <literal>i686-cygwin</literal>), Mac OS X on Intel
+      (<literal>i686-darwin</literal>) and Linux on PowerPC
+      (<literal>powerpc-linux</literal>).</para></listitem>
+
+      <listitem><para>Users of SMP and multicore machines will
+      appreciate that the number of builds to be performed in parallel
+      can now be specified in the configuration file in the
+      <literal>build-max-jobs</literal> setting.</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+
+  <listitem><para>Garbage collector improvements:
+
+    <itemizedlist>
+
+      <listitem><para>Open files (such as running programs) are now
+      used as roots of the garbage collector.  This prevents programs
+      that have been uninstalled from being garbage collected while
+      they are still running.  The script that detects these
+      additional runtime roots
+      (<filename>find-runtime-roots.pl</filename>) is inherently
+      system-specific, but it should work on Linux and on all
+      platforms that have the <command>lsof</command>
+      utility.</para></listitem>
+
+      <listitem><para><literal>nix-store --gc</literal>
+      (a.k.a. <command>nix-collect-garbage</command>) prints out the
+      number of bytes freed on standard output.  <literal>nix-store
+      --gc --print-dead</literal> shows how many bytes would be freed
+      by an actual garbage collection.</para></listitem>
+
+      <listitem><para><literal>nix-collect-garbage -d</literal>
+      removes all old generations of <emphasis>all</emphasis> profiles
+      before calling the actual garbage collector (<literal>nix-store
+      --gc</literal>).  This is an easy way to get rid of all old
+      packages in the Nix store.</para></listitem>
+
+      <listitem><para><command>nix-store</command> now has an
+      operation <option>--delete</option> to delete specific paths
+      from the Nix store.  It won’t delete reachable (non-garbage)
+      paths unless <option>--ignore-liveness</option> is
+      specified.</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+
+  <listitem><para>Berkeley DB 4.4’s process registry feature is used
+  to recover from crashed Nix processes.</para></listitem>
+
+  <!--  <listitem><para>TODO: shared stores.</para></listitem> -->
+
+  <listitem><para>A performance issue has been fixed with the
+  <literal>referer</literal> table, which stores the inverse of the
+  <literal>references</literal> table (i.e., it tells you what store
+  paths refer to a given path).  Maintaining this table could take a
+  quadratic amount of time, as well as a quadratic amount of Berkeley
+  DB log file space (in particular when running the garbage collector)
+  (<literal>NIX-23</literal>).</para></listitem>
+
+  <listitem><para>Nix now catches the <literal>TERM</literal> and
+  <literal>HUP</literal> signals in addition to the
+  <literal>INT</literal> signal.  So you can now do a <literal>killall
+  nix-store</literal> without triggering a database
+  recovery.</para></listitem>
+
+  <listitem><para><command>bsdiff</command> updated to version
+  4.3.</para></listitem>
+
+  <listitem><para>Substantial performance improvements in expression
+  evaluation and <literal>nix-env -qa</literal>, all thanks to <link
+  xlink:href="http://valgrind.org/">Valgrind</link>.  Memory use has
+  been reduced by a factor 8 or so.  Big speedup by memoisation of
+  path hashing.</para></listitem>
+
+  <listitem><para>Lots of bug fixes, notably:
+
+    <itemizedlist>
+
+      <listitem><para>Make sure that the garbage collector can run
+      successfully when the disk is full
+      (<literal>NIX-18</literal>).</para></listitem>
+
+      <listitem><para><command>nix-env</command> now locks the profile
+      to prevent races between concurrent <command>nix-env</command>
+      operations on the same profile
+      (<literal>NIX-7</literal>).</para></listitem>
+
+      <listitem><para>Removed misleading messages from
+      <literal>nix-env -i</literal> (e.g., <literal>installing
+      `foo'</literal> followed by <literal>uninstalling
+      `foo'</literal>) (<literal>NIX-17</literal>).</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+  <listitem><para>Nix source distributions are a lot smaller now since
+  we no longer include a full copy of the Berkeley DB source
+  distribution (but only the bits we need).</para></listitem>
+
+  <listitem><para>Header files are now installed so that external
+  programs can use the Nix libraries.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.9.2 (September 21, 2005)</title>
+
+<para>This bug fix release fixes two problems on Mac OS X:
+
+<itemizedlist>
+
+  <listitem><para>If Nix was linked against statically linked versions
+  of the ATerm or Berkeley DB library, there would be dynamic link
+  errors at runtime.</para></listitem>
+
+  <listitem><para><command>nix-pull</command> and
+  <command>nix-push</command> intermittently failed due to race
+  conditions involving pipes and child processes with error messages
+  such as <literal>open2: open(GLOB(0x180b2e4), >&amp;=9) failed: Bad
+  file descriptor at /nix/bin/nix-pull line 77</literal> (issue
+  <literal>NIX-14</literal>).</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.9.1 (September 20, 2005)</title>
+
+<para>This bug fix release addresses a problem with the ATerm library
+when the <option>--with-aterm</option> flag in
+<command>configure</command> was <emphasis>not</emphasis> used.</para>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.9 (September 16, 2005)</title>
+
+<para>NOTE: this version of Nix uses Berkeley DB 4.3 instead of 4.2.
+The database is upgraded automatically, but you should be careful not
+to use old versions of Nix that still use Berkeley DB 4.2.  In
+particular, if you use a Nix installed through Nix, you should run
+
+<screen>
+$ nix-store --clear-substitutes</screen>
+
+first.</para>
+
+
+<itemizedlist>
+
+  <listitem><para>Unpacking of patch sequences is much faster now
+  since we no longer do redundant unpacking and repacking of
+  intermediate paths.</para></listitem>
+
+  <listitem><para>Nix now uses Berkeley DB 4.3.</para></listitem>
+
+  <listitem><para>The <function>derivation</function> primitive is
+  lazier.  Attributes of dependent derivations can mutually refer to
+  each other (as long as there are no data dependencies on the
+  <varname>outPath</varname> and <varname>drvPath</varname> attributes
+  computed by <function>derivation</function>).</para>
+
+  <para>For example, the expression <literal>derivation
+  attrs</literal> now evaluates to (essentially)
+
+  <programlisting>
+attrs // {
+  type = "derivation";
+  outPath = derivation! attrs;
+  drvPath = derivation! attrs;
+}</programlisting>
+
+  where <function>derivation!</function> is a primop that does the
+  actual derivation instantiation (i.e., it does what
+  <function>derivation</function> used to do).  The advantage is that
+  it allows commands such as <command>nix-env -qa</command> and
+  <command>nix-env -i</command> to be much faster since they no longer
+  need to instantiate all derivations, just the
+  <varname>name</varname> attribute.</para>
+
+  <para>Also, it allows derivations to cyclically reference each
+  other, for example,
+
+  <programlisting>
+webServer = derivation {
+  ...
+  hostName = "svn.cs.uu.nl";
+  services = [svnService];
+};
+&#x20;
+svnService = derivation {
+  ...
+  hostName = webServer.hostName;
+};</programlisting>
+
+  Previously, this would yield a black hole (infinite recursion).</para>
+
+  </listitem>
+
+  <listitem><para><command>nix-build</command> now defaults to using
+  <filename>./default.nix</filename> if no Nix expression is
+  specified.</para></listitem>
+
+  <listitem><para><command>nix-instantiate</command>, when applied to
+  a Nix expression that evaluates to a function, will call the
+  function automatically if all its arguments have
+  defaults.</para></listitem>
+
+  <listitem><para>Nix now uses libtool to build dynamic libraries.
+  This reduces the size of executables.</para></listitem>
+
+  <listitem><para>A new list concatenation operator
+  <literal>++</literal>.  For example, <literal>[1 2 3] ++ [4 5
+  6]</literal> evaluates to <literal>[1 2 3 4 5
+  6]</literal>.</para></listitem>
+
+  <listitem><para>Some currently undocumented primops to support
+  low-level build management using Nix (i.e., using Nix as a Make
+  replacement).  See the commit messages for <literal>r3578</literal>
+  and <literal>r3580</literal>.</para></listitem>
+
+  <listitem><para>Various bug fixes and performance
+  improvements.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.8.1 (April 13, 2005)</title>
+
+<para>This is a bug fix release.</para>
+
+<itemizedlist>
+
+  <listitem><para>Patch downloading was broken.</para></listitem>
+
+  <listitem><para>The garbage collector would not delete paths that
+  had references from invalid (but substitutable)
+  paths.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.8 (April 11, 2005)</title>
+
+<para>NOTE: the hashing scheme in Nix 0.8 changed (as detailed below).
+As a result, <command>nix-pull</command> manifests and channels built
+for Nix 0.7 and below will now work anymore.  However, the Nix
+expression language has not changed, so you can still build from
+source.  Also, existing user environments continue to work.  Nix 0.8
+will automatically upgrade the database schema of previous
+installations when it is first run.</para>
+
+<para>If you get the error message
+
+<screen>
+you have an old-style manifest `/nix/var/nix/manifests/[...]'; please
+delete it</screen>
+
+you should delete previously downloaded manifests:
+
+<screen>
+$ rm /nix/var/nix/manifests/*</screen>
+
+If <command>nix-channel</command> gives the error message
+
+<screen>
+manifest `http://catamaran.labs.cs.uu.nl/dist/nix/channels/[channel]/MANIFEST'
+is too old (i.e., for Nix &lt;= 0.7)</screen>
+
+then you should unsubscribe from the offending channel
+(<command>nix-channel --remove
+<replaceable>URL</replaceable></command>; leave out
+<literal>/MANIFEST</literal>), and subscribe to the same URL, with
+<literal>channels</literal> replaced by <literal>channels-v3</literal>
+(e.g., <link
+xlink:href='http://catamaran.labs.cs.uu.nl/dist/nix/channels-v3/nixpkgs-unstable'
+/>).</para>
+
+<para>Nix 0.8 has the following improvements:
+
+<itemizedlist>
+
+  <listitem><para>The cryptographic hashes used in store paths are now
+  160 bits long, but encoded in base-32 so that they are still only 32
+  characters long (e.g.,
+  <filename>/nix/store/csw87wag8bqlqk7ipllbwypb14xainap-atk-1.9.0</filename>).
+  (This is actually a 160 bit truncation of a SHA-256
+  hash.)</para></listitem>
+
+  <listitem><para>Big cleanups and simplifications of the basic store
+  semantics.  The notion of “closure store expressions” is gone (and
+  so is the notion of “successors”); the file system references of a
+  store path are now just stored in the database.</para>
+
+  <para>For instance, given any store path, you can query its closure:
+
+  <screen>
+$ nix-store -qR $(which firefox)
+... lots of paths ...</screen>
+
+  Also, Nix now remembers for each store path the derivation that
+  built it (the “deriver”):
+
+  <screen>
+$ nix-store -qR $(which firefox)
+/nix/store/4b0jx7vq80l9aqcnkszxhymsf1ffa5jd-firefox-1.0.1.drv</screen>
+
+  So to see the build-time dependencies, you can do
+
+  <screen>
+$ nix-store -qR $(nix-store -qd $(which firefox))</screen>
+
+  or, in a nicer format:
+
+  <screen>
+$ nix-store -q --tree $(nix-store -qd $(which firefox))</screen>
+
+  </para>
+
+  <para>File system references are also stored in reverse.  For
+  instance, you can query all paths that directly or indirectly use a
+  certain Glibc:
+
+  <screen>
+$ nix-store -q --referrers-closure \
+    /nix/store/8lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4</screen>
+
+  </para>
+
+  </listitem>
+
+  <listitem><para>The concept of fixed-output derivations has been
+  formalised.  Previously, functions such as
+  <function>fetchurl</function> in Nixpkgs used a hack (namely,
+  explicitly specifying a store path hash) to prevent changes to, say,
+  the URL of the file from propagating upwards through the dependency
+  graph, causing rebuilds of everything.  This can now be done cleanly
+  by specifying the <varname>outputHash</varname> and
+  <varname>outputHashAlgo</varname> attributes.  Nix itself checks
+  that the content of the output has the specified hash.  (This is
+  important for maintaining certain invariants necessary for future
+  work on secure shared stores.)</para></listitem>
+
+  <listitem><para>One-click installation :-) It is now possible to
+  install any top-level component in Nixpkgs directly, through the web
+  — see, e.g., <link
+  xlink:href='http://catamaran.labs.cs.uu.nl/dist/nixpkgs-0.8/' />.
+  All you have to do is associate
+  <filename>/nix/bin/nix-install-package</filename> with the MIME type
+  <literal>application/nix-package</literal> (or the extension
+  <filename>.nixpkg</filename>), and clicking on a package link will
+  cause it to be installed, with all appropriate dependencies.  If you
+  just want to install some specific application, this is easier than
+  subscribing to a channel.</para></listitem>
+
+  <listitem><para><command>nix-store -r
+  <replaceable>PATHS</replaceable></command> now builds all the
+  derivations PATHS in parallel.  Previously it did them sequentially
+  (though exploiting possible parallelism between subderivations).
+  This is nice for build farms.</para></listitem>
+
+  <listitem><para><command>nix-channel</command> has new operations
+  <option>--list</option> and
+  <option>--remove</option>.</para></listitem>
+
+  <listitem><para>New ways of installing components into user
+  environments:
+
+  <itemizedlist>
+
+    <listitem><para>Copy from another user environment:
+
+    <screen>
+$ nix-env -i --from-profile .../other-profile firefox</screen>
+
+    </para></listitem>
+
+    <listitem><para>Install a store derivation directly (bypassing the
+    Nix expression language entirely):
+
+    <screen>
+$ nix-env -i /nix/store/z58v41v21xd3...-aterm-2.3.1.drv</screen>
+
+    (This is used to implement <command>nix-install-package</command>,
+    which is therefore immune to evolution in the Nix expression
+    language.)</para></listitem>
+
+    <listitem><para>Install an already built store path directly:
+
+    <screen>
+$ nix-env -i /nix/store/hsyj5pbn0d9i...-aterm-2.3.1</screen>
+
+    </para></listitem>
+
+    <listitem><para>Install the result of a Nix expression specified
+    as a command-line argument:
+
+    <screen>
+$ nix-env -f .../i686-linux.nix -i -E 'x: x.firefoxWrapper'</screen>
+
+    The difference with the normal installation mode is that
+    <option>-E</option> does not use the <varname>name</varname>
+    attributes of derivations.  Therefore, this can be used to
+    disambiguate multiple derivations with the same
+    name.</para></listitem>
+
+  </itemizedlist></para></listitem>
+
+  <listitem><para>A hash of the contents of a store path is now stored
+  in the database after a successful build.  This allows you to check
+  whether store paths have been tampered with: <command>nix-store
+  --verify --check-contents</command>.</para></listitem>
+
+  <listitem>
+
+    <para>Implemented a concurrent garbage collector.  It is now
+    always safe to run the garbage collector, even if other Nix
+    operations are happening simultaneously.</para>
+
+    <para>However, there can still be GC races if you use
+    <command>nix-instantiate</command> and <command>nix-store
+    --realise</command> directly to build things.  To prevent races,
+    use the <option>--add-root</option> flag of those commands.</para>
+
+  </listitem>
+
+  <listitem><para>The garbage collector now finally deletes paths in
+  the right order (i.e., topologically sorted under the “references”
+  relation), thus making it safe to interrupt the collector without
+  risking a store that violates the closure
+  invariant.</para></listitem>
+
+  <listitem><para>Likewise, the substitute mechanism now downloads
+  files in the right order, thus preserving the closure invariant at
+  all times.</para></listitem>
+
+  <listitem><para>The result of <command>nix-build</command> is now
+  registered as a root of the garbage collector.  If the
+  <filename>./result</filename> link is deleted, the GC root
+  disappears automatically.</para></listitem>
+
+  <listitem>
+
+    <para>The behaviour of the garbage collector can be changed
+    globally by setting options in
+    <filename>/nix/etc/nix/nix.conf</filename>.
+
+    <itemizedlist>
+
+      <listitem><para><literal>gc-keep-derivations</literal> specifies
+      whether deriver links should be followed when searching for live
+      paths.</para></listitem>
+
+      <listitem><para><literal>gc-keep-outputs</literal> specifies
+      whether outputs of derivations should be followed when searching
+      for live paths.</para></listitem>
+
+      <listitem><para><literal>env-keep-derivations</literal>
+      specifies whether user environments should store the paths of
+      derivations when they are added (thus keeping the derivations
+      alive).</para></listitem>
+
+    </itemizedlist>
+
+  </para></listitem>
+
+  <listitem><para>New <command>nix-env</command> query flags
+  <option>--drv-path</option> and
+  <option>--out-path</option>.</para></listitem>
+
+  <listitem><para><command>fetchurl</command> allows SHA-1 and SHA-256
+  in addition to MD5.  Just specify the attribute
+  <varname>sha1</varname> or <varname>sha256</varname> instead of
+  <varname>md5</varname>.</para></listitem>
+
+  <listitem><para>Manual updates.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.7 (January 12, 2005)</title>
+
+<itemizedlist>
+
+  <listitem><para>Binary patching.  When upgrading components using
+  pre-built binaries (through nix-pull / nix-channel), Nix can
+  automatically download and apply binary patches to already installed
+  components instead of full downloads.  Patching is “smart”: if there
+  is a <emphasis>sequence</emphasis> of patches to an installed
+  component, Nix will use it.  Patches are currently generated
+  automatically between Nixpkgs (pre-)releases.</para></listitem>
+
+  <listitem><para>Simplifications to the substitute
+  mechanism.</para></listitem>
+
+  <listitem><para>Nix-pull now stores downloaded manifests in
+  <filename>/nix/var/nix/manifests</filename>.</para></listitem>
+
+  <listitem><para>Metadata on files in the Nix store is canonicalised
+  after builds: the last-modified timestamp is set to 0 (00:00:00
+  1/1/1970), the mode is set to 0444 or 0555 (readable and possibly
+  executable by all; setuid/setgid bits are dropped), and the group is
+  set to the default.  This ensures that the result of a build and an
+  installation through a substitute is the same; and that timestamp
+  dependencies are revealed.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.6 (November 14, 2004)</title>
+
+<itemizedlist>
+
+  <listitem>
+    <para>Rewrite of the normalisation engine.
+
+    <itemizedlist>
+
+      <listitem><para>Multiple builds can now be performed in parallel
+      (option <option>-j</option>).</para></listitem>
+
+      <listitem><para>Distributed builds.  Nix can now call a shell
+      script to forward builds to Nix installations on remote
+      machines, which may or may not be of the same platform
+      type.</para></listitem>
+
+      <listitem><para>Option <option>--fallback</option> allows
+      recovery from broken substitutes.</para></listitem>
+
+      <listitem><para>Option <option>--keep-going</option> causes
+      building of other (unaffected) derivations to continue if one
+      failed.</para></listitem>
+
+    </itemizedlist>
+
+    </para>
+
+  </listitem>
+
+  <listitem><para>Improvements to the garbage collector (i.e., it
+  should actually work now).</para></listitem>
+
+  <listitem><para>Setuid Nix installations allow a Nix store to be
+  shared among multiple users.</para></listitem>
+
+  <listitem><para>Substitute registration is much faster
+  now.</para></listitem>
+
+  <listitem><para>A utility <command>nix-build</command> to build a
+  Nix expression and create a symlink to the result int the current
+  directory; useful for testing Nix derivations.</para></listitem>
+
+  <listitem><para>Manual updates.</para></listitem>
+
+  <listitem>
+
+    <para><command>nix-env</command> changes:
+
+    <itemizedlist>
+
+      <listitem><para>Derivations for other platforms are filtered out
+      (which can be overridden using
+      <option>--system-filter</option>).</para></listitem>
+
+      <listitem><para><option>--install</option> by default now
+      uninstall previous derivations with the same
+      name.</para></listitem>
+
+      <listitem><para><option>--upgrade</option> allows upgrading to a
+      specific version.</para></listitem>
+
+      <listitem><para>New operation
+      <option>--delete-generations</option> to remove profile
+      generations (necessary for effective garbage
+      collection).</para></listitem>
+
+      <listitem><para>Nicer output (sorted,
+      columnised).</para></listitem>
+
+    </itemizedlist>
+
+    </para>
+
+  </listitem>
+
+  <listitem><para>More sensible verbosity levels all around (builder
+  output is now shown always, unless <option>-Q</option> is
+  given).</para></listitem>
+
+  <listitem>
+
+    <para>Nix expression language changes:
+
+    <itemizedlist>
+
+      <listitem><para>New language construct: <literal>with
+      <replaceable>E1</replaceable>;
+      <replaceable>E2</replaceable></literal> brings all attributes
+      defined in the attribute set <replaceable>E1</replaceable> in
+      scope in <replaceable>E2</replaceable>.</para></listitem>
+
+      <listitem><para>Added a <function>map</function>
+      function.</para></listitem>
+
+      <listitem><para>Various new operators (e.g., string
+      concatenation).</para></listitem>
+
+    </itemizedlist>
+
+    </para>
+
+  </listitem>
+
+  <listitem><para>Expression evaluation is much
+  faster.</para></listitem>
+
+  <listitem><para>An Emacs mode for editing Nix expressions (with
+  syntax highlighting and indentation) has been
+  added.</para></listitem>
+
+  <listitem><para>Many bug fixes.</para></listitem>
+
+</itemizedlist>
+
+</section>
+
+
+
+<!--==================================================================-->
+
+<section><title>Release 0.5 and earlier</title>
+
+<para>Please refer to the Subversion commit log messages.</para>
+
+</section>
+
+
+
+</article>
diff --git a/doc/manual/schemas.xml b/doc/manual/schemas.xml
new file mode 100644
index 000000000000..691a517b9c26
--- /dev/null
+++ b/doc/manual/schemas.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0"?>
+<locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0">
+  <uri pattern="*.xml" typeId="DocBook"/>
+</locatingRules>
diff --git a/doc/manual/style.css b/doc/manual/style.css
new file mode 100644
index 000000000000..ac76a64bbb21
--- /dev/null
+++ b/doc/manual/style.css
@@ -0,0 +1,255 @@
+/* Copied from http://bakefile.sourceforge.net/, which appears
+   licensed under the GNU GPL. */
+
+
+/***************************************************************************
+                             Basic headers and text:
+ ***************************************************************************/
+
+body
+{
+    font-family: "Nimbus Sans L", sans-serif;
+    background: white;
+    margin: 2em 1em 2em 1em;
+}
+
+h1, h2, h3, h4
+{
+    color: #005aa0;
+}
+
+h1 /* title */
+{
+    font-size: 200%;
+}
+
+h2 /* chapters, appendices, subtitle */
+{
+    font-size: 180%;
+}
+
+/* Extra space between chapters, appendices. */
+div.chapter > div.titlepage h2, div.appendix > div.titlepage h2 
+{ 
+    margin-top: 1.5em;
+}
+
+div.section > div.titlepage h2 /* sections */
+{
+    font-size: 150%;
+    margin-top: 1.5em;
+}
+
+h3 /* subsections */
+{
+    font-size: 125%;
+}
+
+div.simplesect h2
+{
+    font-size: 110%;
+}
+
+div.appendix h3
+{
+    font-size: 150%;
+    margin-top: 1.5em;
+}
+
+div.refnamediv h2, div.refsynopsisdiv h2, div.refsection h2 /* refentry parts */
+{
+    margin-top: 1.4em;
+    font-size: 125%;
+}
+
+div.refsection h3
+{
+    font-size: 110%;
+}
+
+
+/***************************************************************************
+                               Examples:
+ ***************************************************************************/
+
+div.example
+{
+    border: 1px solid #b0b0b0;
+    padding: 6px 6px;
+    margin-left: 1.5em;
+    margin-right: 1.5em;
+    background: #f4f4f8;
+    border-radius: 0.4em;
+    box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
+}
+
+div.example p.title
+{
+    margin-top: 0em;
+}
+
+div.example pre
+{
+    box-shadow: none;
+}
+
+
+/***************************************************************************
+                            Screen dumps:
+ ***************************************************************************/
+
+pre.screen, pre.programlisting
+{
+    border: 1px solid #b0b0b0;
+    padding: 3px 3px;
+    margin-left: 1.5em;
+    margin-right: 1.5em;
+    color: #600000;
+    background: #f4f4f8;
+    font-family: monospace;
+    border-radius: 0.4em;
+    box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
+}
+
+div.example pre.programlisting
+{
+    border: 0px;
+    padding: 0 0;
+    margin: 0 0 0 0;
+}
+
+
+/***************************************************************************
+                               Notes, warnings etc:
+ ***************************************************************************/
+
+.note, .warning
+{
+    border: 1px solid #b0b0b0;
+    padding: 3px 3px;
+    margin-left: 1.5em;
+    margin-right: 1.5em;
+    margin-bottom: 1em;
+    padding: 0.3em 0.3em 0.3em 0.3em;
+    background: #fffff5;
+    border-radius: 0.4em;
+    box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
+}
+
+div.note, div.warning
+{
+    font-style: italic;
+}
+
+div.note h3, div.warning h3
+{
+    color: red;
+    font-size: 100%;
+    padding-right: 0.5em;
+    display: inline;
+}
+
+div.note p, div.warning p
+{
+    margin-bottom: 0em;
+}
+
+div.note h3 + p, div.warning h3 + p
+{
+    display: inline;
+}
+
+div.note h3
+{
+    color: blue;
+    font-size: 100%;
+}
+
+div.navfooter *
+{
+    font-size: 90%;
+}
+
+
+/***************************************************************************
+                        Links colors and highlighting: 
+ ***************************************************************************/
+
+a { text-decoration: none; }
+a:hover { text-decoration: underline; }
+a:link { color: #0048b3; }
+a:visited { color: #002a6a; }
+
+
+/***************************************************************************
+                              Table of contents:
+ ***************************************************************************/
+
+div.toc
+{
+    font-size: 90%;
+}
+
+div.toc dl
+{
+    margin-top: 0em;
+    margin-bottom: 0em;
+}
+
+
+/***************************************************************************
+                               Special elements:
+ ***************************************************************************/
+
+tt, code
+{
+    color: #400000;
+}
+
+.term
+{
+    font-weight: bold;
+    
+}
+
+div.variablelist dd p, div.glosslist dd p
+{
+    margin-top: 0em;
+}
+
+div.variablelist dd, div.glosslist dd
+{
+    margin-left: 1.5em;
+}
+
+div.glosslist dt
+{
+    font-style: italic;
+}
+
+.varname
+{
+    color: #400000;
+}
+
+span.command strong
+{
+    font-weight: normal;
+    color: #400000;
+}
+
+div.calloutlist table
+{
+    box-shadow: none;
+}
+
+table
+{
+    border-collapse: collapse;
+    box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
+}
+
+div.affiliation
+{
+    font-style: italic;
+}
\ No newline at end of file
diff --git a/doc/manual/troubleshooting.xml b/doc/manual/troubleshooting.xml
new file mode 100644
index 000000000000..ec8c4c924fc7
--- /dev/null
+++ b/doc/manual/troubleshooting.xml
@@ -0,0 +1,92 @@
+<appendix xmlns="http://docbook.org/ns/docbook"
+          xmlns:xlink="http://www.w3.org/1999/xlink">
+
+<title>Troubleshooting</title>
+
+
+<para>This section provides solutions for some common problems.  See
+the <link xlink:href="http://bugs.strategoxt.org/browse/NIX">Nix
+bug tracker</link> for a list of currently known issues.</para>
+
+
+<section><title>Collisions in <command>nix-env</command></title>
+
+<para>Symptom: when installing or upgrading, you get an error message such as
+
+<screen>
+$ nix-env -i docbook-xml
+...
+adding /nix/store/s5hyxgm62gk2...-docbook-xml-4.2
+collision between `/nix/store/s5hyxgm62gk2...-docbook-xml-4.2/xml/dtd/docbook/calstblx.dtd'
+  and `/nix/store/06h377hr4b33...-docbook-xml-4.3/xml/dtd/docbook/calstblx.dtd'
+  at /nix/store/...-builder.pl line 62.</screen>
+
+</para>
+
+<para>The cause is that two installed packages in the user environment
+have overlapping filenames (e.g.,
+<filename>xml/dtd/docbook/calstblx.dtd</filename>.  This usually
+happens when you accidentally try to install two versions of the same
+package.  For instance, in the example above, the Nix Packages
+collection contains two versions of <literal>docbook-xml</literal>, so
+<command>nix-env -i</command> will try to install both.  The default
+user environment builder has no way to way to resolve such conflicts,
+so it just gives up.</para>
+
+<para>Solution: remove one of the offending packages from the user
+environment (if already installed) using <command>nix-env
+-e</command>, or specify exactly which version should be installed
+(e.g., <literal>nix-env -i docbook-xml-4.2</literal>).</para>
+
+<para>Alternatively, you can modify the user environment builder
+script (in
+<filename><replaceable>prefix</replaceable>/share/nix/corepkgs/buildenv/builder.pl</filename>)
+to implement some conflict resolution policy.  E.g., the script could
+be modified to rename conflicting file names, or to pick one over the
+other.</para>
+
+</section>
+
+
+<section><title><quote>Too many links</quote> error in the Nix
+store</title>
+
+
+<para>Symptom: when building something, you get an error message such as
+
+<screen>
+...
+<literal>mkdir: cannot create directory `/nix/store/<replaceable>name</replaceable>': Too many links</literal></screen>
+
+</para>
+
+<para>This is usually because you have more than 32,000 subdirectories
+in <filename>/nix/store</filename>, as can be seen using <command>ls
+-l</command>:
+
+<screen>
+$ ls -l /nix/store
+drwxrwxrwt 32000 nix nix 4620288 Sep 8 15:08 store</screen>
+
+The <literal>ext2</literal> file system is limited to an inode link
+count of 32,000 (each subdirectory increasing the count by one).
+Furthermore, the <literal>st_nlink</literal> field of the
+<function>stat</function> system call is a 16-bit value.</para>
+
+<para>This only happens on very large Nix installations (such as build
+machines).</para>
+
+<para>Quick solution: run the garbage collector.  You may want to use
+the <option>--max-links</option> option.</para>
+
+<para>Real solution: put the Nix store on a file system that supports
+more than 32,000 subdirectories per directory, such as ReiserFS.
+(This doesn’t solve the <literal>st_nlink</literal> limit, but
+ReiserFS lies to the kernel by reporting a link count of 1 if it
+exceeds the limit.)</para>
+
+</section>
+  
+
+
+</appendix>
diff --git a/doc/manual/writing-nix-expressions.xml b/doc/manual/writing-nix-expressions.xml
new file mode 100644
index 000000000000..6db2adcfa0c3
--- /dev/null
+++ b/doc/manual/writing-nix-expressions.xml
@@ -0,0 +1,1900 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id='chap-writing-nix-expressions'
+         xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<title>Writing Nix Expressions</title>
+
+
+<para>This chapter shows you how to write Nix expressions, which are
+the things that tell Nix how to build packages.  It starts with a
+simple example (a Nix expression for GNU Hello), and then moves
+on to a more in-depth look at the Nix expression language.</para>
+
+<note><para>This chapter is mostly about the Nix expression language.
+For more extensive information on adding packages to the Nix Packages
+collection (such as functions in the standard environment and coding
+conventions), please consult <link
+xlink:href="http://nixos.org/nixpkgs/manual/">its
+manual</link>.</para></note>
+
+
+<section><title>A simple Nix expression</title>
+
+<para>This section shows how to add and test the <link
+xlink:href='http://www.gnu.org/software/hello/hello.html'>GNU Hello
+package</link> to the Nix Packages collection.  Hello is a program
+that prints out the text <quote>Hello, world!</quote>.</para>
+
+<para>To add a package to the Nix Packages collection, you generally
+need to do three things:
+
+<orderedlist>
+
+  <listitem><para>Write a Nix expression for the package.  This is a
+  file that describes all the inputs involved in building the package,
+  such as dependencies, sources, and so on.</para></listitem>
+
+  <listitem><para>Write a <emphasis>builder</emphasis>.  This is a
+  shell script<footnote><para>In fact, it can be written in any
+  language, but typically it's a <command>bash</command> shell
+  script.</para></footnote> that actually builds the package from
+  the inputs.</para></listitem>
+
+  <listitem><para>Add the package to the file
+  <filename>pkgs/top-level/all-packages.nix</filename>.  The Nix
+  expression written in the first step is a
+  <emphasis>function</emphasis>; it requires other packages in order
+  to build it.  In this step you put it all together, i.e., you call
+  the function with the right arguments to build the actual
+  package.</para></listitem>
+
+</orderedlist>
+
+</para>
+
+
+<section><title>The Nix expression</title>
+
+<example xml:id='ex-hello-nix'><title>Nix expression for GNU Hello
+(<filename>default.nix</filename>)</title>
+<programlisting>
+{ stdenv, fetchurl, perl }: <co xml:id='ex-hello-nix-co-1' />
+
+stdenv.mkDerivation { <co xml:id='ex-hello-nix-co-2' />
+  name = "hello-2.1.1"; <co xml:id='ex-hello-nix-co-3' />
+  builder = ./builder.sh; <co xml:id='ex-hello-nix-co-4' />
+  src = fetchurl { <co xml:id='ex-hello-nix-co-5' />
+    url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
+    md5 = "70c9ccf9fac07f762c24f2df2290784d";
+  };
+  inherit perl; <co xml:id='ex-hello-nix-co-6' />
+}</programlisting>
+</example>
+
+<para><xref linkend='ex-hello-nix' /> shows a Nix expression for GNU
+Hello.  It's actually already in the Nix Packages collection in
+<filename>pkgs/applications/misc/hello/ex-1/default.nix</filename>.
+It is customary to place each package in a separate directory and call
+the single Nix expression in that directory
+<filename>default.nix</filename>.  The file has the following elements
+(referenced from the figure by number):
+
+<calloutlist>
+
+  <callout arearefs='ex-hello-nix-co-1'>
+
+    <para>This states that the expression is a
+    <emphasis>function</emphasis> that expects to be called with three
+    arguments: <varname>stdenv</varname>, <varname>fetchurl</varname>,
+    and <varname>perl</varname>.  They are needed to build Hello, but
+    we don't know how to build them here; that's why they are function
+    arguments.  <varname>stdenv</varname> is a package that is used
+    by almost all Nix Packages packages; it provides a
+    <quote>standard</quote> environment consisting of the things you
+    would expect in a basic Unix environment: a C/C++ compiler (GCC,
+    to be precise), the Bash shell, fundamental Unix tools such as
+    <command>cp</command>, <command>grep</command>,
+    <command>tar</command>, etc.  <varname>fetchurl</varname> is a
+    function that downloads files.  <varname>perl</varname> is the
+    Perl interpreter.</para>
+
+    <para>Nix functions generally have the form <literal>{ x, y, ...,
+    z }: e</literal> where <varname>x</varname>, <varname>y</varname>,
+    etc. are the names of the expected arguments, and where
+    <replaceable>e</replaceable> is the body of the function.  So
+    here, the entire remainder of the file is the body of the
+    function; when given the required arguments, the body should
+    describe how to build an instance of the Hello package.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-nix-co-2'>
+
+    <para>So we have to build a package.  Building something from
+    other stuff is called a <emphasis>derivation</emphasis> in Nix (as
+    opposed to sources, which are built by humans instead of
+    computers).  We perform a derivation by calling
+    <varname>stdenv.mkDerivation</varname>.
+    <varname>mkDerivation</varname> is a function provided by
+    <varname>stdenv</varname> that builds a package from a set of
+    <emphasis>attributes</emphasis>.  A set is just a list of
+    key/value pairs where each key is a string and each value is an
+    arbitrary Nix expression.  They take the general form <literal>{
+    <replaceable>name1</replaceable> =
+    <replaceable>expr1</replaceable>; <replaceable>...</replaceable>
+    <replaceable>nameN</replaceable> =
+    <replaceable>exprN</replaceable>; }</literal>.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-nix-co-3'>
+
+    <para>The attribute <varname>name</varname> specifies the symbolic
+    name and version of the package.  Nix doesn't really care about
+    these things, but they are used by for instance <command>nix-env
+    -q</command> to show a <quote>human-readable</quote> name for
+    packages.  This attribute is required by
+    <varname>mkDerivation</varname>.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-nix-co-4'>
+
+    <para>The attribute <varname>builder</varname> specifies the
+    builder.  This attribute can sometimes be omitted, in which case
+    <varname>mkDerivation</varname> will fill in a default builder
+    (which does a <literal>configure; make; make install</literal>, in
+    essence).  Hello is sufficiently simple that the default builder
+    would suffice, but in this case, we will show an actual builder
+    for educational purposes.  The value
+    <command>./builder.sh</command> refers to the shell script shown
+    in <xref linkend='ex-hello-builder' />, discussed below.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-nix-co-5'>
+
+    <para>The builder has to know what the sources of the package
+    are.  Here, the attribute <varname>src</varname> is bound to the
+    result of a call to the <command>fetchurl</command> function.
+    Given a URL and an MD5 hash of the expected contents of the file
+    at that URL, this function builds a derivation that downloads the
+    file and checks its hash.  So the sources are a dependency that
+    like all other dependencies is built before Hello itself is
+    built.</para>
+
+    <para>Instead of <varname>src</varname> any other name could have
+    been used, and in fact there can be any number of sources (bound
+    to different attributes).  However, <varname>src</varname> is
+    customary, and it's also expected by the default builder (which we
+    don't use in this example).</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-nix-co-6'>
+
+    <para>Since the derivation requires Perl, we have to pass the
+    value of the <varname>perl</varname> function argument to the
+    builder.  All attributes in the set are actually passed as
+    environment variables to the builder, so declaring an attribute
+
+    <programlisting>
+perl = perl;</programlisting>
+
+    will do the trick: it binds an attribute <varname>perl</varname>
+    to the function argument which also happens to be called
+    <varname>perl</varname>.  However, it looks a bit silly, so there
+    is a shorter syntax.  The <literal>inherit</literal> keyword
+    causes the specified attributes to be bound to whatever variables
+    with the same name happen to be in scope.</para>
+
+  </callout>
+
+</calloutlist>
+
+</para>
+
+</section>
+
+
+<section><title>The builder</title>
+
+<example xml:id='ex-hello-builder'><title>Build script for GNU Hello
+(<filename>builder.sh</filename>)</title>
+<programlisting>
+source $stdenv/setup <co xml:id='ex-hello-builder-co-1' />
+
+PATH=$perl/bin:$PATH <co xml:id='ex-hello-builder-co-2' />
+
+tar xvfz $src <co xml:id='ex-hello-builder-co-3' />
+cd hello-*
+./configure --prefix=$out <co xml:id='ex-hello-builder-co-4' />
+make <co xml:id='ex-hello-builder-co-5' />
+make install</programlisting>
+</example>
+
+<para><xref linkend='ex-hello-builder' /> shows the builder referenced
+from Hello's Nix expression (stored in
+<filename>pkgs/applications/misc/hello/ex-1/builder.sh</filename>).
+The builder can actually be made a lot shorter by using the
+<emphasis>generic builder</emphasis> functions provided by
+<varname>stdenv</varname>, but here we write out the build steps to
+elucidate what a builder does.  It performs the following
+steps:</para>
+
+<calloutlist>
+
+  <callout arearefs='ex-hello-builder-co-1'>
+
+    <para>When Nix runs a builder, it initially completely clears the
+    environment (except for the attributes declared in the
+    derivation).  For instance, the <envar>PATH</envar> variable is
+    empty<footnote><para>Actually, it's initialised to
+    <filename>/path-not-set</filename> to prevent Bash from setting it
+    to a default value.</para></footnote>.  This is done to prevent
+    undeclared inputs from being used in the build process.  If for
+    example the <envar>PATH</envar> contained
+    <filename>/usr/bin</filename>, then you might accidentally use
+    <filename>/usr/bin/gcc</filename>.</para>
+
+    <para>So the first step is to set up the environment.  This is
+    done by calling the <filename>setup</filename> script of the
+    standard environment.  The environment variable
+    <envar>stdenv</envar> points to the location of the standard
+    environment being used.  (It wasn't specified explicitly as an
+    attribute in <xref linkend='ex-hello-nix' />, but
+    <varname>mkDerivation</varname> adds it automatically.)</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-builder-co-2'>
+
+    <para>Since Hello needs Perl, we have to make sure that Perl is in
+    the <envar>PATH</envar>.  The <envar>perl</envar> environment
+    variable points to the location of the Perl package (since it
+    was passed in as an attribute to the derivation), so
+    <filename><replaceable>$perl</replaceable>/bin</filename> is the
+    directory containing the Perl interpreter.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-builder-co-3'>
+
+    <para>Now we have to unpack the sources.  The
+    <varname>src</varname> attribute was bound to the result of
+    fetching the Hello source tarball from the network, so the
+    <envar>src</envar> environment variable points to the location in
+    the Nix store to which the tarball was downloaded.  After
+    unpacking, we <command>cd</command> to the resulting source
+    directory.</para>
+
+    <para>The whole build is performed in a temporary directory
+    created in <varname>/tmp</varname>, by the way.  This directory is
+    removed after the builder finishes, so there is no need to clean
+    up the sources afterwards.  Also, the temporary directory is
+    always newly created, so you don't have to worry about files from
+    previous builds interfering with the current build.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-builder-co-4'>
+
+    <para>GNU Hello is a typical Autoconf-based package, so we first
+    have to run its <filename>configure</filename> script.  In Nix
+    every package is stored in a separate location in the Nix store,
+    for instance
+    <filename>/nix/store/9a54ba97fb71b65fda531012d0443ce2-hello-2.1.1</filename>.
+    Nix computes this path by cryptographically hashing all attributes
+    of the derivation.  The path is passed to the builder through the
+    <envar>out</envar> environment variable.  So here we give
+    <filename>configure</filename> the parameter
+    <literal>--prefix=$out</literal> to cause Hello to be installed in
+    the expected location.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-builder-co-5'>
+
+    <para>Finally we build Hello (<literal>make</literal>) and install
+    it into the location specified by <envar>out</envar>
+    (<literal>make install</literal>).</para>
+
+  </callout>
+
+</calloutlist>
+
+<para>If you are wondering about the absence of error checking on the
+result of various commands called in the builder: this is because the
+shell script is evaluated with Bash's <option>-e</option> option,
+which causes the script to be aborted if any command fails without an
+error check.</para>
+
+</section>
+
+
+<section><title>Composition</title>
+
+<example xml:id='ex-hello-composition'><title>Composing GNU Hello
+(<filename>all-packages.nix</filename>)</title>
+<programlisting>
+...
+
+rec { <co xml:id='ex-hello-composition-co-1' />
+
+  hello = import ../applications/misc/hello/ex-1 <co xml:id='ex-hello-composition-co-2' /> { <co xml:id='ex-hello-composition-co-3' />
+    inherit fetchurl stdenv perl;
+  };
+
+  perl = import ../development/interpreters/perl { <co xml:id='ex-hello-composition-co-4' />
+    inherit fetchurl stdenv;
+  };
+
+  fetchurl = import ../build-support/fetchurl {
+    inherit stdenv; ...
+  };
+
+  stdenv = ...;
+
+}
+</programlisting>
+</example>
+
+<para>The Nix expression in <xref linkend='ex-hello-nix' /> is a
+function; it is missing some arguments that have to be filled in
+somewhere.  In the Nix Packages collection this is done in the file
+<filename>pkgs/top-level/all-packages.nix</filename>, where all
+Nix expressions for packages are imported and called with the
+appropriate arguments.  <xref linkend='ex-hello-composition' /> shows
+some fragments of
+<filename>all-packages.nix</filename>.</para>
+
+<calloutlist>
+
+  <callout arearefs='ex-hello-composition-co-1'>
+
+    <para>This file defines a set of attributes, all of which are
+    concrete derivations (i.e., not functions).  In fact, we define a
+    <emphasis>mutually recursive</emphasis> set of attributes.  That
+    is, the attributes can refer to each other.  This is precisely
+    what we want since we want to <quote>plug</quote> the
+    various packages into each other.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-composition-co-2'>
+
+    <para>Here we <emphasis>import</emphasis> the Nix expression for
+    GNU Hello.  The import operation just loads and returns the
+    specified Nix expression. In fact, we could just have put the
+    contents of <xref linkend='ex-hello-nix' /> in
+    <filename>all-packages.nix</filename> at this point.  That
+    would be completely equivalent, but it would make the file rather
+    bulky.</para>
+
+    <para>Note that we refer to
+    <filename>../applications/misc/hello/ex-1</filename>, not
+    <filename>../applications/misc/hello/ex-1/default.nix</filename>.
+    When you try to import a directory, Nix automatically appends
+    <filename>/default.nix</filename> to the file name.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-composition-co-3'>
+
+    <para>This is where the actual composition takes place.  Here we
+    <emphasis>call</emphasis> the function imported from
+    <filename>../applications/misc/hello/ex-1</filename> with a set
+    containing the things that the function expects, namely
+    <varname>fetchurl</varname>, <varname>stdenv</varname>, and
+    <varname>perl</varname>.  We use inherit again to use the
+    attributes defined in the surrounding scope (we could also have
+    written <literal>fetchurl = fetchurl;</literal>, etc.).</para>
+
+    <para>The result of this function call is an actual derivation
+    that can be built by Nix (since when we fill in the arguments of
+    the function, what we get is its body, which is the call to
+    <varname>stdenv.mkDerivation</varname> in <xref
+    linkend='ex-hello-nix' />).</para>
+
+    <note><para>Nixpkgs has a convenience function
+    <function>callPackage</function> that imports and calls a
+    function, filling in any missing arguments by passing the
+    corresponding attribute from the Nixpkgs set, like this:
+
+<programlisting>
+hello = callPackage ../applications/misc/hello/ex-1 { };
+</programlisting>
+
+    If necessary, you can set or override arguments:
+
+<programlisting>
+hello = callPackage ../applications/misc/hello/ex-1 { stdenv = myStdenv; };
+</programlisting>
+
+    </para></note>
+
+  </callout>
+
+  <callout arearefs='ex-hello-composition-co-4'>
+
+    <para>Likewise, we have to instantiate Perl,
+    <varname>fetchurl</varname>, and the standard environment.</para>
+
+  </callout>
+
+</calloutlist>
+
+</section>
+
+
+<section><title>Testing</title>
+
+<para>You can now try to build Hello.  Of course, you could do
+<literal>nix-env -f pkgs/top-level/all-packages.nix -i hello</literal>,
+but you may not want to install a possibly broken package just yet.
+The best way to test the package is by using the command <command
+linkend="sec-nix-build">nix-build</command>, which builds a Nix
+expression and creates a symlink named <filename>result</filename> in
+the current directory:
+
+<screen>
+$ nix-build pkgs/top-level/all-packages.nix -A hello
+building path `/nix/store/632d2b22514d...-hello-2.1.1'
+hello-2.1.1/
+hello-2.1.1/intl/
+hello-2.1.1/intl/ChangeLog
+<replaceable>...</replaceable>
+
+$ ls -l result
+lrwxrwxrwx ... 2006-09-29 10:43 result -> /nix/store/632d2b22514d...-hello-2.1.1
+
+$ ./result/bin/hello
+Hello, world!</screen>
+
+The <link linkend='opt-attr'><option>-A</option></link> option selects
+the <literal>hello</literal> attribute from
+<filename>all-packages.nix</filename>.  This is faster than using the
+symbolic package name specified by the <literal>name</literal>
+attribute (which also happens to be <literal>hello</literal>) and is
+unambiguous (there can be multiple packages with the symbolic name
+<literal>hello</literal>, but there can be only one attribute in a set
+named <literal>hello</literal>).</para>
+
+<para><command>nix-build</command> registers the
+<filename>./result</filename> symlink as a garbage collection root, so
+unless and until you delete the <filename>./result</filename> symlink,
+the output of the build will be safely kept on your system.  You can
+use <command>nix-build</command>’s <option
+linkend='opt-out-link'>-o</option> switch to give the symlink another
+name.</para>
+
+<para>Nix has a transactional semantics.  Once a build finishes
+successfully, Nix makes a note of this in its database: it registers
+that the path denoted by <envar>out</envar> is now
+<quote>valid</quote>.  If you try to build the derivation again, Nix
+will see that the path is already valid and finish immediately.  If a
+build fails, either because it returns a non-zero exit code, because
+Nix or the builder are killed, or because the machine crashes, then
+the output paths will not be registered as valid.  If you try to build
+the derivation again, Nix will remove the output paths if they exist
+(e.g., because the builder died half-way through <literal>make
+install</literal>) and try again.  Note that there is no
+<quote>negative caching</quote>: Nix doesn't remember that a build
+failed, and so a failed build can always be repeated.  This is because
+Nix cannot distinguish between permanent failures (e.g., a compiler
+error due to a syntax error in the source) and transient failures
+(e.g., a disk full condition).</para>
+
+<para>Nix also performs locking.  If you run multiple Nix builds
+simultaneously, and they try to build the same derivation, the first
+Nix instance that gets there will perform the build, while the others
+block (or perform other derivations if available) until the build
+finishes:
+
+<screen>
+$ nix-build pkgs/top-level/all-packages.nix -A hello
+waiting for lock on `/nix/store/0h5b7hp8d4hqfrw8igvx97x1xawrjnac-hello-2.1.1x'</screen>
+
+So it is always safe to run multiple instances of Nix in parallel
+(which isn’t the case with, say, <command>make</command>).</para>
+
+<para>If you have a system with multiple CPUs, you may want to have
+Nix build different derivations in parallel (insofar as possible).
+Just pass the option <link linkend='opt-max-jobs'><option>-j
+<replaceable>N</replaceable></option></link>, where
+<replaceable>N</replaceable> is the maximum number of jobs to be run
+in parallel, or set.  Typically this should be the number of
+CPUs.</para>
+
+</section>
+
+
+<section><title>The generic builder</title>
+
+<para>Recall from <xref linkend='ex-hello-builder' /> that the builder
+looked something like this:
+
+<programlisting>
+PATH=$perl/bin:$PATH
+tar xvfz $src
+cd hello-*
+./configure --prefix=$out
+make
+make install</programlisting>
+
+The builders for almost all Unix packages look like this — set up some
+environment variables, unpack the sources, configure, build, and
+install.  For this reason the standard environment provides some Bash
+functions that automate the build process.  A builder using the
+generic build facilities in shown in <xref linkend='ex-hello-builder2'
+/>.</para>
+
+<example xml:id='ex-hello-builder2'><title>Build script using the generic
+build functions</title>
+<programlisting>
+buildInputs="$perl" <co xml:id='ex-hello-builder2-co-1' />
+
+source $stdenv/setup <co xml:id='ex-hello-builder2-co-2' />
+
+genericBuild <co xml:id='ex-hello-builder2-co-3' /></programlisting>
+</example>
+
+<calloutlist>
+
+  <callout arearefs='ex-hello-builder2-co-1'>
+
+    <para>The <envar>buildInputs</envar> variable tells
+    <filename>setup</filename> to use the indicated packages as
+    <quote>inputs</quote>.  This means that if a package provides a
+    <filename>bin</filename> subdirectory, it's added to
+    <envar>PATH</envar>; if it has a <filename>include</filename>
+    subdirectory, it's added to GCC's header search path; and so
+    on.<footnote><para>How does it work? <filename>setup</filename>
+    tries to source the file
+    <filename><replaceable>pkg</replaceable>/nix-support/setup-hook</filename>
+    of all dependencies.  These “setup hooks” can then set up whatever
+    environment variables they want; for instance, the setup hook for
+    Perl sets the <envar>PERL5LIB</envar> environment variable to
+    contain the <filename>lib/site_perl</filename> directories of all
+    inputs.</para></footnote>
+    </para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-builder2-co-2'>
+
+    <para>The function <function>genericBuild</function> is defined in
+    the file <literal>$stdenv/setup</literal>.</para>
+
+  </callout>
+
+  <callout arearefs='ex-hello-builder2-co-3'>
+
+    <para>The final step calls the shell function
+    <function>genericBuild</function>, which performs the steps that
+    were done explicitly in <xref linkend='ex-hello-builder' />.  The
+    generic builder is smart enough to figure out whether to unpack
+    the sources using <command>gzip</command>,
+    <command>bzip2</command>, etc.  It can be customised in many ways;
+    see <xref linkend='sec-standard-environment' />.</para>
+
+  </callout>
+
+</calloutlist>
+
+<para>Discerning readers will note that the
+<envar>buildInputs</envar> could just as well have been set in the Nix
+expression, like this:
+
+<programlisting>
+  buildInputs = [ perl ];</programlisting>
+
+The <varname>perl</varname> attribute can then be removed, and the
+builder becomes even shorter:
+
+<programlisting>
+source $stdenv/setup
+genericBuild</programlisting>
+
+In fact, <varname>mkDerivation</varname> provides a default builder
+that looks exactly like that, so it is actually possible to omit the
+builder for Hello entirely.</para>
+
+</section>
+
+
+</section>
+
+
+
+<section><title>The Nix expression language</title>
+
+<para>The Nix expression language is a pure, lazy, functional
+language.  Purity means that operations in the language don't have
+side-effects (for instance, there is no variable assignment).
+Laziness means that arguments to functions are evaluated only when
+they are needed.  Functional means that functions are
+<quote>normal</quote> values that can be passed around and manipulated
+in interesting ways.  The language is not a full-featured, general
+purpose language.  Its main job is to describe packages,
+compositions of packages, and the variability within
+packages.</para>
+
+<para>This section presents the various features of the
+language.</para>
+
+
+<section xml:id='ssec-values'><title>Values</title>
+
+
+<simplesect><title>Simple values</title>
+
+<para>Nix has the following basic data types:
+
+<itemizedlist>
+
+  <listitem>
+
+    <para><emphasis>Strings</emphasis> can be written in three
+    ways.</para>
+
+    <para>The most common way is to enclose the string between double
+    quotes, e.g., <literal>"foo bar"</literal>.  Strings can span
+    multiple lines.  The special characters <literal>"</literal> and
+    <literal>\</literal> and the character sequence
+    <literal>${</literal> must be escaped by prefixing them with a
+    backslash (<literal>\</literal>).  Newlines, carriage returns and
+    tabs can be written as <literal>\n</literal>,
+    <literal>\r</literal> and <literal>\t</literal>,
+    respectively.</para>
+
+    <para>You can include the result of an expression into a string by
+    enclosing it in
+    <literal>${<replaceable>...</replaceable>}</literal>, a feature
+    known as <emphasis>antiquotation</emphasis>.  The enclosed
+    expression must evaluate to something that can be coerced into a
+    string (meaning that it must be a string, a path, or a
+    derivation).  For instance, rather than writing
+
+<programlisting>
+"--with-freetype2-library=" + freetype + "/lib"</programlisting>
+
+    (where <varname>freetype</varname> is a derivation), you can
+    instead write the more natural
+
+<programlisting>
+"--with-freetype2-library=${freetype}/lib"</programlisting>
+
+    The latter is automatically translated to the former.  A more
+    complicated example (from the Nix expression for <link
+    xlink:href='http://www.trolltech.com/products/qt'>Qt</link>):
+
+<programlisting>
+configureFlags = "
+  -system-zlib -system-libpng -system-libjpeg
+  ${if openglSupport then "-dlopen-opengl
+    -L${mesa}/lib -I${mesa}/include
+    -L${libXmu}/lib -I${libXmu}/include" else ""}
+  ${if threadSupport then "-thread" else "-no-thread"}
+";</programlisting>
+
+    Note that Nix expressions and strings can be arbitrarily nested;
+    in this case the outer string contains various antiquotations that
+    themselves contain strings (e.g., <literal>"-thread"</literal>),
+    some of which in turn contain expressions (e.g.,
+    <literal>${mesa}</literal>).</para>
+
+    <para>The second way to write string literals is as an
+    <emphasis>indented string</emphasis>, which is enclosed between
+    pairs of <emphasis>double single-quotes</emphasis>, like so:
+
+<programlisting>
+''
+  This is the first line.
+  This is the second line.
+    This is the third line.
+''</programlisting>
+
+    This kind of string literal intelligently strips indentation from
+    the start of each line.  To be precise, it strips from each line a
+    number of spaces equal to the minimal indentation of the string as
+    a whole (disregarding the indentation of empty lines).  For
+    instance, the first and second line are indented two space, while
+    the third line is indented four spaces.  Thus, two spaces are
+    stripped from each line, so the resulting string is
+
+<programlisting>
+"This is the first line.\nThis is the second line.\n  This is the third line.\n"</programlisting>
+
+    </para>
+
+    <para>Note that the whitespace and newline following the opening
+    <literal>''</literal> is ignored if there is no non-whitespace
+    text on the initial line.</para>
+
+    <para>Antiquotation
+    (<literal>${<replaceable>expr</replaceable>}</literal>) is
+    supported in indented strings.</para>
+
+    <para>Since <literal>${</literal> and <literal>''</literal> have
+    special meaning in indented strings, you need a way to quote them.
+    <literal>${</literal> can be escaped by prefixing it with
+    <literal>''</literal> (that is, two single quotes), i.e.,
+    <literal>''${</literal>.  <literal>''</literal> can be escaped by
+    prefixing it with <literal>'</literal>, i.e.,
+    <literal>'''</literal>.  Finally, linefeed, carriage-return and
+    tab characters can be written as <literal>''\n</literal>,
+    <literal>''\r</literal>, <literal>''\t</literal>.</para>
+
+    <para>Indented strings are primarily useful in that they allow
+    multi-line string literals to follow the indentation of the
+    enclosing Nix expression, and that less escaping is typically
+    necessary for strings representing languages such as shell scripts
+    and configuration files because <literal>''</literal> is much less
+    common than <literal>"</literal>.  Example:
+
+<programlisting>
+stdenv.mkDerivation {
+  <replaceable>...</replaceable>
+  postInstall =
+    ''
+      mkdir $out/bin $out/etc
+      cp foo $out/bin
+      echo "Hello World" > $out/etc/foo.conf
+      ${if enableBar then "cp bar $out/bin" else ""}
+    '';
+  <replaceable>...</replaceable>
+}
+</programlisting>
+
+    </para>
+
+    <para>Finally, as a convenience, <emphasis>URIs</emphasis> as
+    defined in appendix B of <link
+    xlink:href='http://www.ietf.org/rfc/rfc2396.txt'>RFC 2396</link>
+    can be written <emphasis>as is</emphasis>, without quotes.  For
+    instance, the string
+    <literal>"http://example.org/foo.tar.bz2"</literal>
+    can also be written as
+    <literal>http://example.org/foo.tar.bz2</literal>.</para>
+
+  </listitem>
+
+  <listitem><para><emphasis>Integers</emphasis>, e.g.,
+  <literal>123</literal>.</para></listitem>
+
+  <listitem><para><emphasis>Paths</emphasis>, e.g.,
+  <filename>/bin/sh</filename> or <filename>./builder.sh</filename>.
+  A path must contain at least one slash to be recognised as such; for
+  instance, <filename>builder.sh</filename> is not a
+  path<footnote><para>It's parsed as an expression that selects the
+  attribute <varname>sh</varname> from the variable
+  <varname>builder</varname>.</para></footnote>.  If the file name is
+  relative, i.e., if it does not begin with a slash, it is made
+  absolute at parse time relative to the directory of the Nix
+  expression that contained it.  For instance, if a Nix expression in
+  <filename>/foo/bar/bla.nix</filename> refers to
+  <filename>../xyzzy/fnord.nix</filename>, the absolute path is
+  <filename>/foo/xyzzy/fnord.nix</filename>.</para></listitem>
+
+  <listitem><para><emphasis>Booleans</emphasis> with values
+  <literal>true</literal> and
+  <literal>false</literal>.</para></listitem>
+
+  <listitem><para>The null value, denoted as
+  <literal>null</literal>.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+</simplesect>
+
+
+<simplesect><title>Lists</title>
+
+<para>Lists are formed by enclosing a whitespace-separated list of
+values between square brackets.  For example,
+
+<programlisting>
+[ 123 ./foo.nix "abc" (f { x = y; }) ]</programlisting>
+
+defines a list of four elements, the last being the result of a call
+to the function <varname>f</varname>.  Note that function calls have
+to be enclosed in parentheses.  If they had been omitted, e.g.,
+
+<programlisting>
+[ 123 ./foo.nix "abc" f { x = y; } ]</programlisting>
+
+the result would be a list of five elements, the fourth one being a
+function and the fifth being a set.</para>
+
+</simplesect>
+
+
+<simplesect><title>Sets</title>
+
+<para>Sets are really the core of the language, since ultimately the
+Nix language is all about creating derivations, which are really just
+sets of attributes to be passed to build scripts.</para>
+
+<para>Sets are just a list of name/value pairs (called
+<emphasis>attributes</emphasis>) enclosed in curly brackets, where
+each value is an arbitrary expression terminated by a semicolon.  For
+example:
+
+<programlisting>
+{ x = 123;
+  text = "Hello";
+  y = f { bla = 456; };
+}</programlisting>
+
+This defines a set with attributes named <varname>x</varname>,
+<varname>text</varname>, <varname>y</varname>.  The order of the
+attributes is irrelevant.  An attribute name may only occur
+once.</para>
+
+<para>Attributes can be selected from a set using the
+<literal>.</literal> operator.  For instance,
+
+<programlisting>
+{ a = "Foo"; b = "Bar"; }.a</programlisting>
+
+evaluates to <literal>"Foo"</literal>.  It is possible to provide a
+default value in an attribute selection using the
+<literal>or</literal> keyword.  For example,
+
+<programlisting>
+{ a = "Foo"; b = "Bar"; }.c or "Xyzzy"</programlisting>
+
+will evaluate to <literal>"Xyzzy"</literal> because there is no
+<varname>c</varname> attribute in the set.</para>
+
+<para>You can use arbitrary double-quoted strings as attribute
+names:
+
+<programlisting>
+{ "foo ${bar}" = 123; "nix-1.0" = 456; }."foo ${bar}"
+</programlisting>
+
+This will evaluate to <literal>123</literal> (Assuming
+<literal>bar</literal> is antiquotable). In the case where an
+attribute name is just a single antiquotation, the quotes can be
+dropped:
+
+<programlisting>
+{ foo = 123; }.${bar} or 456 </programlisting>
+
+This will evaluate to <literal>123</literal> if
+<literal>bar</literal> evaluates to <literal>"foo"</literal> when
+coerced to a string and <literal>456</literal> otherwise (again
+assuming <literal>bar</literal> is antiquotable).</para>
+
+<para>In the special case where an attribute name inside of a set declaration
+evaluates to <literal>null</literal> (which is normally an error, as
+<literal>null</literal> is not antiquotable), that attribute is simply not
+added to the set:
+
+<programlisting>
+{ ${if foo then "bar" else null} = true; }</programlisting>
+
+This will evaluate to <literal>{}</literal> if <literal>foo</literal>
+evaluates to <literal>false</literal>.</para>
+
+
+</simplesect>
+
+
+</section>
+
+
+<section><title>Language constructs</title>
+
+
+<simplesect><title>Recursive sets</title>
+
+<para>Recursive sets are just normal sets, but the attributes can
+refer to each other.  For example,
+
+<programlisting>
+rec {
+  x = y;
+  y = 123;
+}.x
+</programlisting>
+
+evaluates to <literal>123</literal>.  Note that without
+<literal>rec</literal> the binding <literal>x = y;</literal> would
+refer to the variable <varname>y</varname> in the surrounding scope,
+if one exists, and would be invalid if no such variable exists.  That
+is, in a normal (non-recursive) set, attributes are not added to the
+lexical scope; in a recursive set, they are.</para>
+
+<para>Recursive sets of course introduce the danger of infinite
+recursion.  For example,
+
+<programlisting>
+rec {
+  x = y;
+  y = x;
+}.x</programlisting>
+
+does not terminate<footnote><para>Actually, Nix detects infinite
+recursion in this case and aborts (<quote>infinite recursion
+encountered</quote>).</para></footnote>.</para>
+
+</simplesect>
+
+
+<simplesect><title>Let-expressions</title>
+
+<para>A let-expression allows you define local variables for an
+expression.  For instance,
+
+<programlisting>
+let
+  x = "foo";
+  y = "bar";
+in x + y</programlisting>
+
+evaluates to <literal>"foobar"</literal>.
+
+</para>
+
+</simplesect>
+
+
+<simplesect><title>Inheriting attributes</title>
+
+<para>When defining a set it is often convenient to copy variables
+from the surrounding lexical scope (e.g., when you want to propagate
+attributes).  This can be shortened using the
+<literal>inherit</literal> keyword.  For instance,
+
+<programlisting>
+let x = 123; in
+{ inherit x;
+  y = 456;
+}</programlisting>
+
+evaluates to <literal>{ x = 123; y = 456; }</literal>.  (Note that
+this works because <varname>x</varname> is added to the lexical scope
+by the <literal>let</literal> construct.)  It is also possible to
+inherit attributes from another set.  For instance, in this fragment
+from <filename>all-packages.nix</filename>,
+
+<programlisting>
+  graphviz = (import ../tools/graphics/graphviz) {
+    inherit fetchurl stdenv libpng libjpeg expat x11 yacc;
+    inherit (xlibs) libXaw;
+  };
+
+  xlibs = {
+    libX11 = ...;
+    libXaw = ...;
+    ...
+  }
+
+  libpng = ...;
+  libjpg = ...;
+  ...</programlisting>
+
+the set used in the function call to the function defined in
+<filename>../tools/graphics/graphviz</filename> inherits a number of
+variables from the surrounding scope (<varname>fetchurl</varname>
+... <varname>yacc</varname>), but also inherits
+<varname>libXaw</varname> (the X Athena Widgets) from the
+<varname>xlibs</varname> (X11 client-side libraries) set.</para>
+
+</simplesect>
+
+
+<simplesect xml:id="ss-functions"><title>Functions</title>
+
+<para>Functions have the following form:
+
+<programlisting>
+<replaceable>pattern</replaceable>: <replaceable>body</replaceable></programlisting>
+
+The pattern specifies what the argument of the function must look
+like, and binds variables in the body to (parts of) the
+argument.  There are three kinds of patterns:</para>
+
+<itemizedlist>
+
+
+  <listitem><para>If a pattern is a single identifier, then the
+  function matches any argument.  Example:
+
+  <programlisting>
+let negate = x: !x;
+    concat = x: y: x + y;
+in if negate true then concat "foo" "bar" else ""</programlisting>
+
+  Note that <function>concat</function> is a function that takes one
+  argument and returns a function that takes another argument.  This
+  allows partial parameterisation (i.e., only filling some of the
+  arguments of a function); e.g.,
+
+  <programlisting>
+map (concat "foo") [ "bar" "bla" "abc" ]</programlisting>
+
+  evaluates to <literal>[ "foobar" "foobla"
+  "fooabc" ]</literal>.</para></listitem>
+
+
+  <listitem><para>A <emphasis>set pattern</emphasis> of the form
+  <literal>{ name1, name2, …, nameN }</literal> matches a set
+  containing the listed attributes, and binds the values of those
+  attributes to variables in the function body.  For example, the
+  function
+
+<programlisting>
+{ x, y, z }: z + y + x</programlisting>
+
+  can only be called with a set containing exactly the attributes
+  <varname>x</varname>, <varname>y</varname> and
+  <varname>z</varname>.  No other attributes are allowed.  If you want
+  to allow additional arguments, you can use an ellipsis
+  (<literal>...</literal>):
+
+<programlisting>
+{ x, y, z, ... }: z + y + x</programlisting>
+
+  This works on any set that contains at least the three named
+  attributes.</para>
+
+  <para>It is possible to provide <emphasis>default values</emphasis>
+  for attributes, in which case they are allowed to be missing.  A
+  default value is specified by writing
+  <literal><replaceable>name</replaceable> ?
+  <replaceable>e</replaceable></literal>, where
+  <replaceable>e</replaceable> is an arbitrary expression.  For example,
+
+<programlisting>
+{ x, y ? "foo", z ? "bar" }: z + y + x</programlisting>
+
+  specifies a function that only requires an attribute named
+  <varname>x</varname>, but optionally accepts <varname>y</varname>
+  and <varname>z</varname>.</para></listitem>
+
+
+  <listitem><para>An <literal>@</literal>-pattern provides a means of referring
+  to the whole value being matched:
+
+<programlisting>
+args@{ x, y, z, ... }: z + y + x + args.a</programlisting>
+
+  Here <varname>args</varname> is bound to the entire argument, which
+  is further matched against the pattern <literal>{ x, y, z,
+  ... }</literal>.</para></listitem>
+
+
+</itemizedlist>
+
+<para>Note that functions do not have names.  If you want to give them
+a name, you can bind them to an attribute, e.g.,
+
+<programlisting>
+let concat = { x, y }: x + y;
+in concat { x = "foo"; y = "bar"; }</programlisting>
+
+</para>
+
+</simplesect>
+
+
+<simplesect><title>Conditionals</title>
+
+<para>Conditionals look like this:
+
+<programlisting>
+if <replaceable>e1</replaceable> then <replaceable>e2</replaceable> else <replaceable>e3</replaceable></programlisting>
+
+where <replaceable>e1</replaceable> is an expression that should
+evaluate to a Boolean value (<literal>true</literal> or
+<literal>false</literal>).</para>
+
+</simplesect>
+
+
+<simplesect><title>Assertions</title>
+
+<para>Assertions are generally used to check that certain requirements
+on or between features and dependencies hold.  They look like this:
+
+<programlisting>
+assert <replaceable>e1</replaceable>; <replaceable>e2</replaceable></programlisting>
+
+where <replaceable>e1</replaceable> is an expression that should
+evaluate to a Boolean value.  If it evaluates to
+<literal>true</literal>, <replaceable>e2</replaceable> is returned;
+otherwise expression evaluation is aborted and a backtrace is printed.</para>
+
+<example xml:id='ex-subversion-nix'><title>Nix expression for Subversion</title>
+<programlisting>
+{ localServer ? false
+, httpServer ? false
+, sslSupport ? false
+, pythonBindings ? false
+, javaSwigBindings ? false
+, javahlBindings ? false
+, stdenv, fetchurl
+, openssl ? null, httpd ? null, db4 ? null, expat, swig ? null, j2sdk ? null
+}:
+
+assert localServer -> db4 != null; <co xml:id='ex-subversion-nix-co-1' />
+assert httpServer -> httpd != null &amp;&amp; httpd.expat == expat; <co xml:id='ex-subversion-nix-co-2' />
+assert sslSupport -> openssl != null &amp;&amp; (httpServer -> httpd.openssl == openssl); <co xml:id='ex-subversion-nix-co-3' />
+assert pythonBindings -> swig != null &amp;&amp; swig.pythonSupport;
+assert javaSwigBindings -> swig != null &amp;&amp; swig.javaSupport;
+assert javahlBindings -> j2sdk != null;
+
+stdenv.mkDerivation {
+  name = "subversion-1.1.1";
+  ...
+  openssl = if sslSupport then openssl else null; <co xml:id='ex-subversion-nix-co-4' />
+  ...
+}</programlisting>
+</example>
+
+<para><xref linkend='ex-subversion-nix' /> show how assertions are
+used in the Nix expression for Subversion.</para>
+
+<calloutlist>
+
+  <callout arearefs='ex-subversion-nix-co-1'>
+    <para>This assertion states that if Subversion is to have support
+    for local repositories, then Berkeley DB is needed.  So if the
+    Subversion function is called with the
+    <varname>localServer</varname> argument set to
+    <literal>true</literal> but the <varname>db4</varname> argument
+    set to <literal>null</literal>, then the evaluation fails.</para>
+  </callout>
+
+  <callout arearefs='ex-subversion-nix-co-2'>
+    <para>This is a more subtle condition: if Subversion is built with
+    Apache (<literal>httpServer</literal>) support, then the Expat
+    library (an XML library) used by Subversion should be same as the
+    one used by Apache.  This is because in this configuration
+    Subversion code ends up being linked with Apache code, and if the
+    Expat libraries do not match, a build- or runtime link error or
+    incompatibility might occur.</para>
+  </callout>
+
+  <callout arearefs='ex-subversion-nix-co-3'>
+    <para>This assertion says that in order for Subversion to have SSL
+    support (so that it can access <literal>https</literal> URLs), an
+    OpenSSL library must be passed.  Additionally, it says that
+    <emphasis>if</emphasis> Apache support is enabled, then Apache's
+    OpenSSL should match Subversion's.  (Note that if Apache support
+    is not enabled, we don't care about Apache's OpenSSL.)</para>
+  </callout>
+
+  <callout arearefs='ex-subversion-nix-co-4'>
+    <para>The conditional here is not really related to assertions,
+    but is worth pointing out: it ensures that if SSL support is
+    disabled, then the Subversion derivation is not dependent on
+    OpenSSL, even if a non-<literal>null</literal> value was passed.
+    This prevents an unnecessary rebuild of Subversion if OpenSSL
+    changes.</para>
+  </callout>
+
+</calloutlist>
+
+</simplesect>
+
+
+
+<simplesect><title>With-expressions</title>
+
+<para>A <emphasis>with-expression</emphasis>,
+
+<programlisting>
+with <replaceable>e1</replaceable>; <replaceable>e2</replaceable></programlisting>
+
+introduces the set <replaceable>e1</replaceable> into the lexical
+scope of the expression <replaceable>e2</replaceable>.  For instance,
+
+<programlisting>
+let as = { x = "foo"; y = "bar"; };
+in with as; x + y</programlisting>
+
+evaluates to <literal>"foobar"</literal> since the
+<literal>with</literal> adds the <varname>x</varname> and
+<varname>y</varname> attributes of <varname>as</varname> to the
+lexical scope in the expression <literal>x + y</literal>.  The most
+common use of <literal>with</literal> is in conjunction with the
+<function>import</function> function.  E.g.,
+
+<programlisting>
+with (import ./definitions.nix); ...</programlisting>
+
+makes all attributes defined in the file
+<filename>definitions.nix</filename> available as if they were defined
+locally in a <literal>rec</literal>-expression.</para>
+
+</simplesect>
+
+
+<simplesect><title>Comments</title>
+
+<para>Comments can be single-line, started with a <literal>#</literal>
+character, or inline/multi-line, enclosed within <literal>/*
+... */</literal>.</para>
+
+</simplesect>
+
+
+</section>
+
+
+<section><title>Operators</title>
+
+<para><xref linkend='table-operators' /> lists the operators in the
+Nix expression language, in order of precedence (from strongest to
+weakest binding).</para>
+
+<table xml:id='table-operators'>
+  <title>Operators</title>
+  <tgroup cols='3'>
+    <thead>
+      <row>
+        <entry>Syntax</entry>
+        <entry>Associativity</entry>
+        <entry>Description</entry>
+      </row>
+    </thead>
+    <tbody>
+      <row>
+        <entry><replaceable>e</replaceable> <literal>.</literal>
+        <replaceable>attrpath</replaceable>
+        [ <literal>or</literal> <replaceable>def</replaceable> ]
+        </entry>
+        <entry>none</entry>
+        <entry>Select attribute denoted by the attribute path
+        <replaceable>attrpath</replaceable> from set
+        <replaceable>e</replaceable>.  (An attribute path is a
+        dot-separated list of attribute names.)  If the attribute
+        doesn’t exist, return <replaceable>def</replaceable> if
+        provided, otherwise abort evaluation.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <replaceable>e2</replaceable></entry>
+        <entry>left</entry>
+        <entry>Call function <replaceable>e1</replaceable> with
+        argument <replaceable>e2</replaceable>.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e</replaceable> <literal>?</literal>
+        <replaceable>attrpath</replaceable></entry>
+        <entry>none</entry>
+        <entry>Test whether set <replaceable>e</replaceable> contains
+        the attribute denoted by <replaceable>attrpath</replaceable>;
+        return <literal>true</literal> or
+        <literal>false</literal>.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>++</literal> <replaceable>e2</replaceable></entry>
+        <entry>right</entry>
+        <entry>List concatenation.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>+</literal> <replaceable>e2</replaceable></entry>
+        <entry>left</entry>
+        <entry>String or path concatenation.</entry>
+      </row>
+      <row>
+        <entry><literal>!</literal> <replaceable>e</replaceable></entry>
+        <entry>left</entry>
+        <entry>Boolean negation.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>//</literal>
+        <replaceable>e2</replaceable></entry>
+        <entry>right</entry>
+        <entry>Return a set consisting of the attributes in
+        <replaceable>e1</replaceable> and
+        <replaceable>e2</replaceable> (with the latter taking
+        precedence over the former in case of equally named
+        attributes).</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>==</literal>
+        <replaceable>e2</replaceable></entry>
+        <entry>none</entry>
+        <entry>Equality.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>!=</literal>
+        <replaceable>e2</replaceable></entry>
+        <entry>none</entry>
+        <entry>Inequality.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>&amp;&amp;</literal>
+        <replaceable>e2</replaceable></entry>
+        <entry>left</entry>
+        <entry>Logical AND.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>||</literal>
+        <replaceable>e2</replaceable></entry>
+        <entry>left</entry>
+        <entry>Logical OR.</entry>
+      </row>
+      <row>
+        <entry><replaceable>e1</replaceable> <literal>-></literal>
+        <replaceable>e2</replaceable></entry>
+        <entry>none</entry>
+        <entry>Logical implication (equivalent to
+        <literal>!<replaceable>e1</replaceable> ||
+        <replaceable>e2</replaceable></literal>).</entry>
+      </row>
+    </tbody>
+  </tgroup>
+</table>
+
+</section>
+
+
+<section xml:id="ssec-derivation"><title>Derivations</title>
+
+<para>The most important built-in function is
+<function>derivation</function>, which is used to describe a single
+derivation (a build action).  It takes as input a set, the attributes
+of which specify the inputs of the build.</para>
+
+<itemizedlist>
+
+  <listitem xml:id="attr-system"><para>There must be an attribute named
+  <varname>system</varname> whose value must be a string specifying a
+  Nix platform identifier, such as <literal>"i686-linux"</literal> or
+  <literal>"powerpc-darwin"</literal><footnote><para>To figure out
+  your platform identifier, look at the line <quote>Checking for the
+  canonical Nix system name</quote> in the output of Nix's
+  <filename>configure</filename> script.</para></footnote> The build
+  can only be performed on a machine and operating system matching the
+  platform identifier.  (Nix can automatically forward builds for
+  other platforms by forwarding them to other machines; see <xref
+  linkend='chap-distributed-builds' />.)</para></listitem>
+
+  <listitem><para>There must be an attribute named
+  <varname>name</varname> whose value must be a string.  This is used
+  as a symbolic name for the package by <command>nix-env</command>,
+  and it is appended to the output paths of the
+  derivation.</para></listitem>
+
+  <listitem><para>There must be an attribute named
+  <varname>builder</varname> that identifies the program that is
+  executed to perform the build.  It can be either a derivation or a
+  source (a local file reference, e.g.,
+  <filename>./builder.sh</filename>).</para></listitem>
+
+  <listitem><para>Every attribute is passed as an environment variable
+  to the builder.  Attribute values are translated to environment
+  variables as follows:
+
+    <itemizedlist>
+
+      <listitem><para>Strings and integers are just passed
+      verbatim.</para></listitem>
+
+      <listitem><para>A <emphasis>path</emphasis> (e.g.,
+      <filename>../foo/sources.tar</filename>) causes the referenced
+      file to be copied to the store; its location in the store is put
+      in the environment variable.  The idea is that all sources
+      should reside in the Nix store, since all inputs to a derivation
+      should reside in the Nix store.</para></listitem>
+
+      <listitem><para>A <emphasis>derivation</emphasis> causes that
+      derivation to be built prior to the present derivation; its
+      default output path is put in the environment
+      variable.</para></listitem>
+
+      <listitem><para>Lists of the previous types are also allowed.
+      They are simply concatenated, separated by
+      spaces.</para></listitem>
+
+      <listitem><para><literal>true</literal> is passed as the string
+      <literal>1</literal>, <literal>false</literal> and
+      <literal>null</literal> are passed as an empty string.
+      </para></listitem>
+    </itemizedlist>
+
+  </para></listitem>
+
+  <listitem><para>The optional attribute <varname>args</varname>
+  specifies command-line arguments to be passed to the builder.  It
+  should be a list.</para></listitem>
+
+  <listitem><para>The optional attribute <varname>outputs</varname>
+  specifies a list of symbolic outputs of the derivation.  By default,
+  a derivation produces a single output path, denoted as
+  <literal>out</literal>.  However, derivations can produce multiple
+  output paths.  This is useful because it allows outputs to be
+  downloaded or garbage-collected separately.  For instance, imagine a
+  library package that provides a dynamic library, header files, and
+  documentation.  A program that links against the library doesn’t
+  need the header files and documentation at runtime, and it doesn’t
+  need the documentation at build time.  Thus, the library package
+  could specify:
+<programlisting>
+outputs = [ "lib" "headers" "doc" ];
+</programlisting>
+  This will cause Nix to pass environment variables
+  <literal>lib</literal>, <literal>headers</literal> and
+  <literal>doc</literal> to the builder containing the intended store
+  paths of each output.  The builder would typically do something like
+<programlisting>
+./configure --libdir=$lib/lib --includedir=$headers/include --docdir=$doc/share/doc
+</programlisting>
+  for an Autoconf-style package.  You can refer to each output of a
+  derivation by selecting it as an attribute, e.g.
+<programlisting>
+buildInputs = [ pkg.lib pkg.headers ];
+</programlisting>
+  The first element of <varname>output</varname> determines the
+  <emphasis>default output</emphasis>.  Thus, you could also write
+<programlisting>
+buildInputs = [ pkg pkg.headers ];
+</programlisting>
+  since <literal>pkg</literal> is equivalent to
+  <literal>pkg.lib</literal>.</para></listitem>
+
+</itemizedlist>
+
+<para>The function <function>mkDerivation</function> in the standard
+environment is a wrapper around <function>derivation</function> that
+adds a default value for <varname>system</varname> and always uses
+Bash as the builder, to which the supplied builder is passed as a
+command-line argument.  See <xref linkend='sec-standard-environment'
+/>.</para>
+
+<para>The builder is executed as follows:
+
+<itemizedlist>
+
+  <listitem><para>A temporary directory is created under the directory
+  specified by <envar>TMPDIR</envar> (default
+  <filename>/tmp</filename>) where the build will take place.  The
+  current directory is changed to this directory.</para></listitem>
+
+  <listitem><para>The environment is cleared and set to the derivation
+  attributes, as specified above.</para></listitem>
+
+  <listitem><para>In addition, the following variables are set:
+
+  <itemizedlist>
+
+    <listitem><para><envar>NIX_BUILD_TOP</envar> contains the path of
+    the temporary directory for this build.</para></listitem>
+
+    <listitem><para>Also, <envar>TMPDIR</envar>,
+    <envar>TEMPDIR</envar>, <envar>TMP</envar>, <envar>TEMP</envar>
+    are set to point to the temporary directory.  This is to prevent
+    the builder from accidentally writing temporary files anywhere
+    else.  Doing so might cause interference by other
+    processes.</para></listitem>
+
+    <listitem><para><envar>PATH</envar> is set to
+    <filename>/path-not-set</filename> to prevent shells from
+    initialising it to their built-in default value.</para></listitem>
+
+    <listitem><para><envar>HOME</envar> is set to
+    <filename>/homeless-shelter</filename> to prevent programs from
+    using <filename>/etc/passwd</filename> or the like to find the
+    user's home directory, which could cause impurity.  Usually, when
+    <envar>HOME</envar> is set, it is used as the location of the home
+    directory, even if it points to a non-existent
+    path.</para></listitem>
+
+    <listitem><para><envar>NIX_STORE</envar> is set to the path of the
+    top-level Nix store directory (typically,
+    <filename>/nix/store</filename>).</para></listitem>
+
+    <listitem><para>For each output declared in
+    <varname>outputs</varname>, the corresponding environment variable
+    is set to point to the intended path in the Nix store for that
+    output.  Each output path is a concatenation of the cryptographic
+    hash of all build inputs, the <varname>name</varname> attribute
+    and the output name.  (The output name is omitted if it’s
+    <literal>out</literal>.)</para></listitem>
+
+  </itemizedlist>
+
+  </para></listitem>
+
+  <listitem><para>If an output path already exists, it is removed.
+  Also, locks are acquired to prevent multiple Nix instances from
+  performing the same build at the same time.</para></listitem>
+
+  <listitem><para>A log of the combined standard output and error is
+  written to <filename>/nix/var/log/nix</filename>.</para></listitem>
+
+  <listitem><para>The builder is executed with the arguments specified
+  by the attribute <varname>args</varname>.  If it exits with exit
+  code 0, it is considered to have succeeded.</para></listitem>
+
+  <listitem><para>The temporary directory is removed (unless the
+  <option>-K</option> option was specified).</para></listitem>
+
+  <listitem><para>If the build was successful, Nix scans each output
+  path for references to input paths by looking for the hash parts of
+  the input paths.  Since these are potential runtime dependencies,
+  Nix registers them as dependencies of the output
+  paths.</para></listitem>
+
+  <listitem><para>After the build, Nix sets the last-modified
+  timestamp on all files in the build result to 1 (00:00:01 1/1/1970
+  UTC), sets the group to the default group, and sets the mode of the
+  file to 0444 or 0555 (i.e., read-only, with execute permission
+  enabled if the file was originally executable).  Note that possible
+  <literal>setuid</literal> and <literal>setgid</literal> bits are
+  cleared.  Setuid and setgid programs are not currently supported by
+  Nix.  This is because the Nix archives used in deployment have no
+  concept of ownership information, and because it makes the build
+  result dependent on the user performing the build.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+
+<section><title>Advanced attributes</title>
+
+<para>Derivations can declare some infrequently used optional
+attributes.</para>
+
+<variablelist>
+
+  <varlistentry><term><varname>allowedReferences</varname></term>
+
+    <listitem><para>The optional attribute
+    <varname>allowedReferences</varname> specifies a list of legal
+    references (dependencies) of the output of the builder.  For
+    example,
+
+<programlisting>
+allowedReferences = [];
+</programlisting>
+
+    enforces that the output of a derivation cannot have any runtime
+    dependencies on its inputs.  This is used in NixOS to check that
+    generated files such as initial ramdisks for booting Linux don’t
+    have accidental dependencies on other paths in the Nix
+    store.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><varname>exportReferencesGraph</varname></term>
+
+    <listitem><para>This attribute allows builders access to the
+    references graph of their inputs.  The attribute is a list of
+    inputs in the Nix store whose references graph the builder needs
+    to know.  The value of this attribute should be a list of pairs
+    <literal>[ <replaceable>name1</replaceable>
+    <replaceable>path1</replaceable> <replaceable>name2</replaceable>
+    <replaceable>path2</replaceable> <replaceable>...</replaceable>
+    ]</literal>.  The references graph of each
+    <replaceable>pathN</replaceable> will be stored in a text file
+    <replaceable>nameN</replaceable> in the temporary build directory.
+    The text files have the format used by <command>nix-store
+    --register-validity</command> (with the deriver fields left
+    empty).  For example, when the following derivation is built:
+
+<programlisting>
+derivation {
+  ...
+  exportReferencesGraph = [ "libfoo-graph" libfoo ];
+};
+</programlisting>
+
+    the references graph of <literal>libfoo</literal> is placed in the
+    file <filename>libfoo-graph</filename> in the temporary build
+    directory.</para>
+
+    <para><varname>exportReferencesGraph</varname> is useful for
+    builders that want to do something with the closure of a store
+    path.  Examples include the builders in NixOS that generate the
+    initial ramdisk for booting Linux (a <command>cpio</command>
+    archive containing the closure of the boot script) and the
+    ISO-9660 image for the installation CD (which is populated with a
+    Nix store containing the closure of a bootable NixOS
+    configuration).</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry xml:id="fixed-output-drvs">
+    <term><varname>outputHash</varname></term>
+    <term><varname>outputHashAlgo</varname></term>
+    <term><varname>outputHashMode</varname></term>
+
+    <listitem><para>These attributes declare that the derivation is a
+    so-called <emphasis>fixed-output derivation</emphasis>, which
+    means that a cryptographic hash of the output is already known in
+    advance.  When the build of a fixed-output derivation finishes,
+    Nix computes the cryptographic hash of the output and compares it
+    to the hash declared with these attributes.  If there is a
+    mismatch, the build fails.</para>
+
+    <para>The rationale for fixed-output derivations is derivations
+    such as those produced by the <function>fetchurl</function>
+    function.  This function downloads a file from a given URL.  To
+    ensure that the downloaded file has not been modified, the caller
+    must also specify a cryptographic hash of the file.  For example,
+
+<programlisting>
+fetchurl {
+  url = http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz;
+  md5 = "70c9ccf9fac07f762c24f2df2290784d";
+}
+</programlisting>
+
+    It sometimes happens that the URL of the file changes, e.g.,
+    because servers are reorganised or no longer available.  We then
+    must update the call to <function>fetchurl</function>, e.g.,
+
+<programlisting>
+fetchurl {
+  url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
+  md5 = "70c9ccf9fac07f762c24f2df2290784d";
+}
+</programlisting>
+
+    If a <function>fetchurl</function> derivation was treated like a
+    normal derivation, the output paths of the derivation and
+    <emphasis>all derivations depending on it</emphasis> would change.
+    For instance, if we were to change the URL of the Glibc source
+    distribution in Nixpkgs (a package on which almost all other
+    packages depend) massive rebuilds would be needed.  This is
+    unfortunate for a change which we know cannot have a real effect
+    as it propagates upwards through the dependency graph.</para>
+
+    <para>For fixed-output derivations, on the other hand, the name of
+    the output path only depends on the <varname>outputHash*</varname>
+    and <varname>name</varname> attributes, while all other attributes
+    are ignored for the purpose of computing the output path.  (The
+    <varname>name</varname> attribute is included because it is part
+    of the path.)</para>
+
+    <para>As an example, here is the (simplified) Nix expression for
+    <varname>fetchurl</varname>:
+
+<programlisting>
+{ stdenv, curl }: # The <command>curl</command> program is used for downloading.
+
+{ url, md5 }:
+
+stdenv.mkDerivation {
+  name = baseNameOf (toString url);
+  builder = ./builder.sh;
+  buildInputs = [ curl ];
+
+  # This is a fixed-output derivation; the output must be a regular
+  # file with MD5 hash <varname>md5</varname>.
+  outputHashMode = "flat";
+  outputHashAlgo = "md5";
+  outputHash = md5;
+
+  inherit url;
+}
+</programlisting>
+
+    </para>
+
+    <para>The <varname>outputHashAlgo</varname> attribute specifies
+    the hash algorithm used to compute the hash.  It can currently be
+    <literal>"md5"</literal>, <literal>"sha1"</literal> or
+    <literal>"sha256"</literal>.</para>
+
+    <para>The <varname>outputHashMode</varname> attribute determines
+    how the hash is computed.  It must be one of the following two
+    values:
+
+    <variablelist>
+
+      <varlistentry><term><literal>"flat"</literal></term>
+
+        <listitem><para>The output must be a non-executable regular
+        file.  If it isn’t, the build fails.  The hash is simply
+        computed over the contents of that file (so it’s equal to what
+        Unix commands like <command>md5sum</command> or
+        <command>sha1sum</command> produce).</para>
+
+        <para>This is the default.</para></listitem>
+
+      </varlistentry>
+
+      <varlistentry><term><literal>"recursive"</literal></term>
+
+        <listitem><para>The hash is computed over the NAR archive dump
+        of the output (i.e., the result of <link
+        linkend="refsec-nix-store-dump"><command>nix-store
+        --dump</command></link>).  In this case, the output can be
+        anything, including a directory tree.</para></listitem>
+
+      </varlistentry>
+
+    </variablelist>
+
+    </para>
+
+    <para>The <varname>outputHash</varname> attribute, finally, must
+    be a string containing the hash in either hexadecimal or base-32
+    notation.  (See the <link
+    linkend="sec-nix-hash"><command>nix-hash</command> command</link>
+    for information about converting to and from base-32
+    notation.)</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><varname>impureEnvVars</varname></term>
+
+    <listitem><para>This attribute allows you to specify a list of
+    environment variables that should be passed from the environment
+    of the calling user to the builder.  Usually, the environment is
+    cleared completely when the builder is executed, but with this
+    attribute you can allow specific environment variables to be
+    passed unmodified.  For example, <function>fetchurl</function> in
+    Nixpkgs has the line
+
+<programlisting>
+impureEnvVars = [ "http_proxy" "https_proxy" <replaceable>...</replaceable> ];
+</programlisting>
+
+    to make it use the proxy server configuration specified by the
+    user in the environment variables <envar>http_proxy</envar> and
+    friends.</para>
+
+    <para>This attribute is only allowed in <link
+    linkend="fixed-output-drvs">fixed-output derivations</link>, where
+    impurities such as these are okay since (the hash of) the output
+    is known in advance.  It is ignored for all other
+    derivations.</para></listitem>
+
+  </varlistentry>
+
+
+  <varlistentry><term><varname>preferLocalBuild</varname></term>
+
+    <listitem><para>If this attribute is set to
+    <literal>true</literal>, it has two effects.  First, the
+    derivation will always be built, not substituted, even if a
+    substitute is available.  Second, if <link
+    linkend="chap-distributed-builds">distributed building is
+    enabled</link>, then, if possible, the derivaton will be built
+    locally instead of forwarded to a remote machine.  This is
+    appropriate for trivial builders where the cost of doing a
+    download or remote build would exceed the cost of building
+    locally.</para></listitem>
+
+  </varlistentry>
+
+</variablelist>
+
+</section>
+
+
+</section>
+
+
+
+<xi:include href="builtins.xml" />
+
+
+</section>
+
+
+
+<section xml:id='sec-standard-environment'><title>The standard environment</title>
+
+
+<para>The standard environment is used by passing it as an input
+called <envar>stdenv</envar> to the derivation, and then doing
+
+<programlisting>
+source $stdenv/setup</programlisting>
+
+at the top of the builder.</para>
+
+<para>Apart from adding the aforementioned commands to the
+<envar>PATH</envar>, <filename>setup</filename> also does the
+following:
+
+<itemizedlist>
+
+  <listitem><para>All input packages specified in the
+  <envar>buildInputs</envar> environment variable have their
+  <filename>/bin</filename> subdirectory added to <envar>PATH</envar>,
+  their <filename>/include</filename> subdirectory added to the C/C++
+  header file search path, and their <filename>/lib</filename>
+  subdirectory added to the linker search path.  This can be extended.
+  For instance, when the <command>pkgconfig</command> package is
+  used, the subdirectory <filename>/lib/pkgconfig</filename> of each
+  input is added to the <envar>PKG_CONFIG_PATH</envar> environment
+  variable.</para></listitem>
+
+  <listitem><para>The environment variable
+  <envar>NIX_CFLAGS_STRIP</envar> is set so that the compiler strips
+  debug information from object files.  This can be disabled by
+  setting <envar>NIX_STRIP_DEBUG</envar> to
+  <literal>0</literal>.</para></listitem>
+
+</itemizedlist>
+
+</para>
+
+<para>The <filename>setup</filename> script also exports a function
+called <function>genericBuild</function> that knows how to build
+typical Autoconf-style packages.  It can be customised to perform
+builds for any type of package.  It is advisable to use
+<function>genericBuild</function> since it provides facilities that
+are almost always useful such as unpacking of sources, patching of
+sources, nested logging, etc.</para>
+
+<para>The definitive, up-to-date documentation of the generic builder
+is the source itself, which resides in
+<filename>pkgs/stdenv/generic/setup.sh</filename>.</para>
+
+
+<section><title>Customising the generic builder</title>
+
+<para>The operation of the generic builder can be modified in many
+places by setting certain variables.  These <emphasis>hook
+variables</emphasis> are typically set to the name of some shell
+function defined by you.  For instance, to perform some additional
+steps after <command>make install</command> you would set the
+<varname>postInstall</varname> variable:
+
+<programlisting>
+postInstall=myPostInstall
+
+myPostInstall() {
+    mkdir $out/share/extra
+    cp extrafiles/* $out/share/extra
+}</programlisting>
+
+</para>
+
+
+</section>
+
+
+<section><title>Debugging failed builds</title>
+
+<para>At the beginning of each phase, the set of all shell variables
+is written to the file <filename>env-vars</filename> at the top-level
+build directory.  This is useful for debugging: it allows you to
+recreate the environment in which a build was performed.  For
+instance, if a build fails, then assuming you used the
+<option>-K</option> flag, you can go to the output directory and
+<quote>switch</quote> to the environment of the builder:
+
+<screen>
+$ nix-build -K ./foo.nix
+... fails, keeping build directory `/tmp/nix-1234-0'
+
+$ cd /tmp/nix-1234-0
+
+$ source env-vars
+
+<lineannotation>(edit some files...)</lineannotation>
+
+$ make
+
+<lineannotation>(execution continues with the same GCC, make, etc.)</lineannotation></screen>
+
+</para>
+
+</section>
+
+
+</section>
+
+
+</chapter>
diff --git a/doc/signing.txt b/doc/signing.txt
new file mode 100644
index 000000000000..1d042e95e220
--- /dev/null
+++ b/doc/signing.txt
@@ -0,0 +1,24 @@
+Generate a private key:
+
+$ (umask 277 && openssl genrsa -out /nix/etc/nix/signing-key.sec 2048)
+
+The private key should be kept secret (only readable to the Nix daemon
+user).
+
+
+Generate the corresponding public key:
+
+$ openssl rsa -in /nix/etc/nix/signing-key.sec -pubout > /nix/etc/nix/signing-key.pub
+
+The public key should be copied to all machines to which you want to
+export store paths.
+
+
+Signing:
+
+$ nix-hash --type sha256 --flat svn.nar | openssl rsautl -sign -inkey mykey.sec > svn.nar.sign
+
+
+Verifying a signature:
+
+$ test "$(nix-hash --type sha256 --flat svn.nar)" = "$(openssl rsautl -verify -inkey mykey.pub -pubin -in svn.nar.sign)"
diff --git a/local.mk b/local.mk
new file mode 100644
index 000000000000..d9e13a922c8b
--- /dev/null
+++ b/local.mk
@@ -0,0 +1,13 @@
+ifeq ($(MAKECMDGOALS), dist)
+  dist-files += $(shell git ls-files)
+endif
+
+dist-files += configure config.h.in nix.spec
+
+clean-files += Makefile.config
+
+GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr
+
+$(foreach i, config.h $(call rwildcard, src/lib*, *.hh), $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644)))
+
+$(foreach i, $(call rwildcard, src/boost, *.hpp), $(eval $(call install-file-in, $(i), $(includedir)/nix/$(patsubst src/%/,%,$(dir $(i))), 0644)))
diff --git a/misc/emacs/README b/misc/emacs/README
new file mode 100644
index 000000000000..8c87f67d5718
--- /dev/null
+++ b/misc/emacs/README
@@ -0,0 +1,10 @@
+The Nix Emacs mode supports syntax highlighting, somewhat sensible
+indenting, and refilling of comments.
+
+To enable Nix mode in Emacs, add something like this to your ~/.emacs
+file:
+
+  (load "/nix/share/emacs/site-lisp/nix-mode.el")
+
+This automatically causes Nix mode to be activated for all files with
+extension `.nix'.
diff --git a/misc/emacs/local.mk b/misc/emacs/local.mk
new file mode 100644
index 000000000000..8e06b881bcdf
--- /dev/null
+++ b/misc/emacs/local.mk
@@ -0,0 +1 @@
+$(eval $(call install-data-in,$(d)/nix-mode.el,$(datadir)/emacs/site-lisp))
diff --git a/misc/emacs/nix-mode.el b/misc/emacs/nix-mode.el
new file mode 100644
index 000000000000..fc64523f1a28
--- /dev/null
+++ b/misc/emacs/nix-mode.el
@@ -0,0 +1,95 @@
+;;; nix-mode.el --- Major mode for editing Nix expressions
+
+;; Author: Eelco Dolstra
+;; URL: https://github.com/NixOS/nix/tree/master/misc/emacs
+;; Version: 1.0
+
+;;; Commentary:
+
+;;; Code:
+
+(defconst nix-font-lock-keywords
+  '("\\<if\\>" "\\<then\\>" "\\<else\\>" "\\<assert\\>" "\\<with\\>"
+    "\\<let\\>" "\\<in\\>" "\\<rec\\>" "\\<inherit\\>" "\\<or\\>"
+    ("\\<true\\>" . font-lock-builtin-face)
+    ("\\<false\\>" . font-lock-builtin-face)
+    ("\\<null\\>" . font-lock-builtin-face)
+    ("\\<import\\>" . font-lock-builtin-face)
+    ("\\<derivation\\>" . font-lock-builtin-face)
+    ("\\<baseNameOf\\>" . font-lock-builtin-face)
+    ("\\<toString\\>" . font-lock-builtin-face)
+    ("\\<isNull\\>" . font-lock-builtin-face)
+    ("[a-zA-Z][a-zA-Z0-9\\+-\\.]*:[a-zA-Z0-9%/\\?:@&=\\+\\$,_\\.!~\\*'-]+"
+     . font-lock-constant-face)
+    ("\\<\\([a-zA-Z_][a-zA-Z0-9_'\-\.]*\\)[ \t]*="
+     (1 font-lock-variable-name-face nil nil))
+    ("<[a-zA-Z0-9._\\+-]+\\(/[a-zA-Z0-9._\\+-]+\\)*>"
+     . font-lock-constant-face)
+    ("[a-zA-Z0-9._\\+-]*\\(/[a-zA-Z0-9._\\+-]+\\)+"
+     . font-lock-constant-face))
+  "Font lock keywords for nix.")
+
+(defvar nix-mode-syntax-table
+  (let ((table (make-syntax-table)))
+    (modify-syntax-entry ?/ ". 14" table)
+    (modify-syntax-entry ?* ". 23" table)
+    (modify-syntax-entry ?# "< b" table)
+    (modify-syntax-entry ?\n "> b" table)
+    table)
+  "Syntax table for Nix mode.")
+
+(defun nix-indent-line ()
+  "Indent current line in a Nix expression."
+  (interactive)
+  (indent-relative-maybe))
+
+
+;;;###autoload
+(define-derived-mode nix-mode fundamental-mode "Nix"
+  "Major mode for editing Nix expressions.
+
+The following commands may be useful:
+
+  '\\[newline-and-indent]'
+    Insert a newline and move the cursor to align with the previous
+    non-empty line.
+
+  '\\[fill-paragraph]'
+    Refill a paragraph so that all lines are at most `fill-column'
+    lines long.  This should do the right thing for comments beginning
+    with `#'.  However, this command doesn't work properly yet if the
+    comment is adjacent to code (i.e., no intervening empty lines).
+    In that case, select the text to be refilled and use
+    `\\[fill-region]' instead.
+
+The hook `nix-mode-hook' is run when Nix mode is started.
+
+\\{nix-mode-map}
+"
+  (set-syntax-table nix-mode-syntax-table)
+
+  ;; Font lock support.
+  (setq font-lock-defaults '(nix-font-lock-keywords nil nil nil nil))
+
+  ;; Automatic indentation [C-j].
+  (set (make-local-variable 'indent-line-function) 'nix-indent-line)
+
+  ;; Indenting of comments.
+  (set (make-local-variable 'comment-start) "# ")
+  (set (make-local-variable 'comment-end) "")
+  (set (make-local-variable 'comment-start-skip) "\\(^\\|\\s-\\);?#+ *")
+
+  ;; Filling of comments.
+  (set (make-local-variable 'adaptive-fill-mode) t)
+  (set (make-local-variable 'paragraph-start) "[ \t]*\\(#+[ \t]*\\)?$")
+  (set (make-local-variable 'paragraph-separate) paragraph-start))
+
+
+;;;###autoload
+(progn
+  (add-to-list 'auto-mode-alist '("\\.nix\\'" . nix-mode))
+  (add-to-list 'auto-mode-alist '("\\.nix.in\\'" . nix-mode)))
+
+(provide 'nix-mode)
+
+;;; nix-mode.el ends here
diff --git a/misc/systemd/local.mk b/misc/systemd/local.mk
new file mode 100644
index 000000000000..004549fd2776
--- /dev/null
+++ b/misc/systemd/local.mk
@@ -0,0 +1,5 @@
+ifeq ($(OS), Linux)
+
+  $(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))
+
+endif
diff --git a/misc/systemd/nix-daemon.service.in b/misc/systemd/nix-daemon.service.in
new file mode 100644
index 000000000000..5fc04a3f5713
--- /dev/null
+++ b/misc/systemd/nix-daemon.service.in
@@ -0,0 +1,9 @@
+[Unit]
+Description=Nix Daemon
+RequiresMountsFor=@storedir@
+RequiresMountsFor=@localstatedir@
+ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
+
+[Service]
+ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
+KillMode=process
diff --git a/misc/systemd/nix-daemon.socket.in b/misc/systemd/nix-daemon.socket.in
new file mode 100644
index 000000000000..9ed39ffe6eb2
--- /dev/null
+++ b/misc/systemd/nix-daemon.socket.in
@@ -0,0 +1,11 @@
+[Unit]
+Description=Nix Daemon Socket
+Before=multi-user.target
+RequiresMountsFor=@storedir@
+ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
+
+[Socket]
+ListenStream=@localstatedir@/nix/daemon-socket/socket
+
+[Install]
+WantedBy=sockets.target
diff --git a/misc/upstart/local.mk b/misc/upstart/local.mk
new file mode 100644
index 000000000000..a73dc061e4fc
--- /dev/null
+++ b/misc/upstart/local.mk
@@ -0,0 +1,5 @@
+ifeq ($(OS), Linux)
+
+  $(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))
+
+endif
diff --git a/misc/upstart/nix-daemon.conf.in b/misc/upstart/nix-daemon.conf.in
new file mode 100644
index 000000000000..0e806edbd770
--- /dev/null
+++ b/misc/upstart/nix-daemon.conf.in
@@ -0,0 +1,5 @@
+description "Nix Daemon"
+start on filesystem
+stop on shutdown
+respawn
+exec @bindir@/nix-daemon --daemon
diff --git a/misc/vim/syntax/nix.vim b/misc/vim/syntax/nix.vim
new file mode 100644
index 000000000000..ddddea5f0596
--- /dev/null
+++ b/misc/vim/syntax/nix.vim
@@ -0,0 +1,37 @@
+" Vim syntax file
+" Language:	nix
+" Maintainer:	Marc Weber <marco-oweber@gmx.de>
+"               Modify and commit if you feel that way
+" Last Change:	2007 Dec
+
+" Quit when a (custom) syntax file was already loaded
+if exists("b:current_syntax")
+  finish
+endif
+
+syn keyword	nixKeyword	let throw inherit import true false null with
+syn keyword	nixConditional	if else then
+syn keyword     nixBrace        ( ) { } =
+syn keyword     nixBuiltin         __currentSystem __currentTime __isFunction __getEnv __trace __toPath __pathExists 
+  \ __readFile __toXML __toFile __filterSource __attrNames __getAttr __hasAttr __isAttrs __listToAttrs __isList 
+  \ __head __tail __add __sub __lessThan __substring __stringLength
+
+syn match nixAttr "\w\+\ze\s*="
+syn match nixFuncArg "\zs\w\+\ze\s*:"
+syn region nixStringParam start=+\${+ end=+}+
+syn region nixMultiLineComment start=+/\*+ skip=+\\"+ end=+\*/+
+syn match  nixEndOfLineComment "#.*$"
+syn region nixStringIndented start=+''+ skip=+'''\|''${\|"+ end=+''+ contains=nixStringParam
+syn region nixString         start=+"+ skip=+\\"+ end=+"+ contains=nixStringParam
+
+hi def link nixKeyword       Keyword
+hi def link nixConditional   Conditional
+hi def link nixBrace         Special
+hi def link nixString        String
+hi def link nixStringIndented String
+hi def link nixBuiltin       Special
+hi def link nixStringParam   Macro
+hi def link nixMultiLineComment Comment
+hi def link nixEndOfLineComment Comment
+hi def link nixAttr        Identifier
+hi def link nixFuncArg     Identifier
diff --git a/README.md b/mk/README.md
index e4cd742b4c7f..e4cd742b4c7f 100644
--- a/README.md
+++ b/mk/README.md
diff --git a/clean.mk b/mk/clean.mk
index ce9afb3b0db7..ce9afb3b0db7 100644
--- a/clean.mk
+++ b/mk/clean.mk
diff --git a/dist.mk b/mk/dist.mk
index 794b277713d4..794b277713d4 100644
--- a/dist.mk
+++ b/mk/dist.mk
diff --git a/functions.mk b/mk/functions.mk
index 45d917399391..45d917399391 100644
--- a/functions.mk
+++ b/mk/functions.mk
diff --git a/install.mk b/mk/install.mk
index dad0fd8533ab..dad0fd8533ab 100644
--- a/install.mk
+++ b/mk/install.mk
diff --git a/jars.mk b/mk/jars.mk
index 99470f37435b..99470f37435b 100644
--- a/jars.mk
+++ b/mk/jars.mk
diff --git a/lib.mk b/mk/lib.mk
index 56e162d5007c..56e162d5007c 100644
--- a/lib.mk
+++ b/mk/lib.mk
diff --git a/libraries.mk b/mk/libraries.mk
index 3b91c699e65a..3b91c699e65a 100644
--- a/libraries.mk
+++ b/mk/libraries.mk
diff --git a/patterns.mk b/mk/patterns.mk
index 6b2cfd017050..6b2cfd017050 100644
--- a/patterns.mk
+++ b/mk/patterns.mk
diff --git a/programs.mk b/mk/programs.mk
index 72afdf95251b..72afdf95251b 100644
--- a/programs.mk
+++ b/mk/programs.mk
diff --git a/templates.mk b/mk/templates.mk
index ab99168bb7a5..ab99168bb7a5 100644
--- a/templates.mk
+++ b/mk/templates.mk
diff --git a/tests.mk b/mk/tests.mk
index 004a48028616..004a48028616 100644
--- a/tests.mk
+++ b/mk/tests.mk
diff --git a/tracing.mk b/mk/tracing.mk
index 13912d3c7821..13912d3c7821 100644
--- a/tracing.mk
+++ b/mk/tracing.mk
diff --git a/nix.spec.in b/nix.spec.in
new file mode 100644
index 000000000000..7d7775b87c56
--- /dev/null
+++ b/nix.spec.in
@@ -0,0 +1,194 @@
+%global nixbld_user "nix-builder-"
+%global nixbld_group "nix-builders"
+
+Summary: The Nix software deployment system
+Name: nix
+Version: @PACKAGE_VERSION@
+Release: 2%{?dist}
+License: LGPLv2+
+%if 0%{?rhel}
+Group: Applications/System
+%endif
+URL: http://nixos.org/
+Source0: %{name}-%{version}.tar.bz2
+%if 0%{?el5}
+BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
+%endif
+BuildRequires: perl(DBD::SQLite)
+BuildRequires: perl(DBI)
+BuildRequires: perl(WWW::Curl)
+BuildRequires: perl(ExtUtils::ParseXS)
+Requires: /usr/bin/perl
+Requires: curl
+Requires: perl-DBD-SQLite
+Requires: bzip2
+Requires: gzip
+Requires: xz
+BuildRequires: bzip2-devel
+BuildRequires: sqlite-devel
+
+# Hack to make that shitty RPM scanning hack shut up.
+Provides: perl(Nix::SSH)
+
+%description
+Nix is a purely functional package manager. It allows multiple
+versions of a package to be installed side-by-side, ensures that
+dependency specifications are complete, supports atomic upgrades and
+rollbacks, allows non-root users to install software, and has many
+other features. It is the basis of the NixOS Linux distribution, but
+it can be used equally well under other Unix systems.
+
+%package        devel
+Summary:        Development files for %{name}
+%if 0%{?rhel}
+Group:          Development/Libraries
+%endif
+Requires:       %{name}%{?_isa} = %{version}-%{release}
+
+%description   devel
+The %{name}-devel package contains libraries and header files for
+developing applications that use %{name}.
+
+
+%package doc
+Summary:        Documentation files for %{name}
+%if 0%{?rhel}
+Group:          Documentation
+%endif
+BuildArch:      noarch
+Requires:       %{name} = %{version}-%{release}
+
+%description   doc
+The %{name}-doc package contains documentation files for %{name}.
+
+
+%package -n emacs-%{name}
+Summary:        Nix mode for Emacs
+%if 0%{?rhel}
+Group:          Applications/Editors
+%endif
+BuildArch:      noarch
+BuildRequires:  emacs
+Requires:       emacs(bin) >= %{_emacs_version}
+
+%description -n emacs-%{name}
+This package provides a major mode for editing Nix expressions.
+
+%package -n emacs-%{name}-el
+Summary:        Elisp source files for emacs-%{name}
+%if 0%{?rhel}
+Group:          Applications/Editors
+%endif
+BuildArch:      noarch
+Requires:       emacs-%{name} = %{version}-%{release}
+
+%description -n emacs-%{name}-el
+This package contains the elisp source file for the Nix major mode for
+GNU Emacs. You do not need to install this package to run Nix. Install
+the emacs-%{name} package to edit Nix expressions with GNU Emacs.
+
+
+%prep
+%setup -q
+# Install Perl modules to vendor_perl
+# configure.ac need to be changed to make this global; however, this will
+# also affect NixOS. Use discretion.
+%{__sed} -i 's|perl5/site_perl/$perlversion/$perlarchname|perl5/vendor_perl|' \
+  configure
+
+
+%build
+extraFlags=
+# - override docdir so large documentation files are owned by the
+#   -doc subpackage
+# - set localstatedir by hand to the preferred nix value
+%configure --localstatedir=/nix/var \
+           --docdir=%{_defaultdocdir}/%{name}-doc-%{version} \
+           $extraFlags
+make %{?_smp_flags}
+%{_emacs_bytecompile} misc/emacs/nix-mode.el
+
+
+%install
+%if 0%{?el5}
+rm -rf $RPM_BUILD_ROOT
+%endif
+make DESTDIR=$RPM_BUILD_ROOT install
+
+find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';'
+
+# make per-user directories
+for d in profiles gcroots;
+do
+  mkdir -p $RPM_BUILD_ROOT/nix/var/nix/$d/per-user
+  chmod 1777 $RPM_BUILD_ROOT/nix/var/nix/$d/per-user
+done
+
+# fix permission of nix profile
+# (until this is fixed in the relevant Makefile)
+chmod -x $RPM_BUILD_ROOT%{_sysconfdir}/profile.d/nix.sh
+
+# Copy the byte-compiled mode file by hand
+cp -p misc/emacs/nix-mode.elc $RPM_BUILD_ROOT%{_emacs_sitelispdir}/
+
+# we ship this file in the base package
+rm -f $RPM_BUILD_ROOT%{_defaultdocdir}/%{name}-doc-%{version}/README
+
+# Get rid of Upstart job.
+rm -rf $RPM_BUILD_ROOT%{_sysconfdir}/init
+
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+
+%pre
+getent group %{nixbld_group} >/dev/null || groupadd -r %{nixbld_group}
+for i in $(seq 10);
+do
+  getent passwd %{nixbld_user}$i >/dev/null || \
+    useradd -r -g %{nixbld_group} -G %{nixbld_group} -d /var/empty \
+      -s %{_sbindir}/nologin \
+      -c "Nix build user $i" %{nixbld_user}$i
+done
+
+%post
+chgrp %{nixbld_group} /nix/store
+chmod 1775 /nix/store
+%if ! 0%{?rhel}
+# Enable and start Nix worker
+systemctl enable nix-daemon.socket nix-daemon.service
+systemctl start  nix-daemon.socket
+%endif
+
+%files
+%{_bindir}/nix-*
+%{_libdir}/*.so
+%{perl_vendorarch}/*
+%exclude %dir %{perl_vendorarch}/auto/
+%{_prefix}/libexec/*
+%if ! 0%{?rhel}
+%{_prefix}/lib/systemd/system/nix-daemon.socket
+%{_prefix}/lib/systemd/system/nix-daemon.service
+%endif
+%{_datadir}/emacs/site-lisp/nix-mode.el
+%{_datadir}/nix
+%{_mandir}/man1/*.1*
+%{_mandir}/man5/*.5*
+%{_mandir}/man8/*.8*
+%config(noreplace) %{_sysconfdir}/profile.d/nix.sh
+/nix
+
+%files devel
+%{_includedir}/nix
+
+%files doc
+%docdir %{_defaultdocdir}/%{name}-doc-%{version}
+%{_defaultdocdir}/%{name}-doc-%{version}
+
+%files -n emacs-%{name}
+%{_emacs_sitelispdir}/*.elc
+#{_emacs_sitestartdir}/*.el
+
+%files -n emacs-%{name}-el
+%{_emacs_sitelispdir}/*.el
diff --git a/perl/MANIFEST b/perl/MANIFEST
new file mode 100644
index 000000000000..08897647c978
--- /dev/null
+++ b/perl/MANIFEST
@@ -0,0 +1,7 @@
+Changes
+Makefile.PL
+MANIFEST
+Nix.xs
+README
+t/Nix.t
+lib/Nix.pm
diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in
new file mode 100644
index 000000000000..bc51310e5aff
--- /dev/null
+++ b/perl/lib/Nix/Config.pm.in
@@ -0,0 +1,42 @@
+package Nix::Config;
+
+$version = "@PACKAGE_VERSION@";
+
+$binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
+$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
+$stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix";
+$manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests";
+$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
+$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
+$storeDir = $ENV{"NIX_STORE_DIR"} || "@storedir@";
+
+$bzip2 = "@bzip2@";
+$xz = "@xz@";
+$curl = "@curl@";
+$openssl = "@openssl@";
+
+$useBindings = "@perlbindings@" eq "yes";
+
+%config = ();
+
+sub readConfig {
+    if (defined $ENV{'_NIX_OPTIONS'}) {
+        foreach my $s (split '\n', $ENV{'_NIX_OPTIONS'}) {
+            my ($n, $v) = split '=', $s, 2;
+            $config{$n} = $v;
+        }
+        return;
+    }
+
+    my $config = "$confDir/nix.conf";
+    return unless -f $config;
+
+    open CONFIG, "<$config" or die "cannot open ‘$config’";
+    while (<CONFIG>) {
+        /^\s*([\w\-\.]+)\s*=\s*(.*)$/ or next;
+        $config{$1} = $2;
+    }
+    close CONFIG;
+}
+
+return 1;
diff --git a/perl/lib/Nix/CopyClosure.pm b/perl/lib/Nix/CopyClosure.pm
new file mode 100644
index 000000000000..10d26c3a71f0
--- /dev/null
+++ b/perl/lib/Nix/CopyClosure.pm
@@ -0,0 +1,115 @@
+package Nix::CopyClosure;
+
+use utf8;
+use strict;
+use Nix::Config;
+use Nix::Store;
+use Nix::SSH;
+use List::Util qw(sum);
+use IPC::Open2;
+
+
+sub copyToOpen {
+    my ($from, $to, $sshHost, $storePaths, $includeOutputs, $dryRun, $sign, $useSubstitutes) = @_;
+
+    $useSubstitutes = 0 if $dryRun || !defined $useSubstitutes;
+
+    # Get the closure of this path.
+    my @closure = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs,
+        map { followLinksToStorePath $_ } @{$storePaths})));
+
+    # Send the "query valid paths" command with the "lock" option
+    # enabled. This prevents a race where the remote host
+    # garbage-collect paths that are already there. Optionally, ask
+    # the remote host to substitute missing paths.
+    syswrite($to, pack("L<x4L<x4L<x4", 1, 1, $useSubstitutes)) or die;
+    writeStrings(\@closure, $to);
+
+    # Get back the set of paths that are already valid on the remote host.
+    my %present;
+    $present{$_} = 1 foreach readStrings($from);
+
+    my @missing = grep { !$present{$_} } @closure;
+    return if !@missing;
+
+    my $missingSize = 0;
+    $missingSize += (queryPathInfo($_, 1))[3] foreach @missing;
+
+    printf STDERR "copying %d missing paths (%.2f MiB) to ‘$sshHost’...\n",
+        scalar(@missing), $missingSize / (1024**2);
+    return if $dryRun;
+
+    # Send the "import paths" command.
+    syswrite($to, pack("L<x4", 4)) or die;
+    exportPaths(fileno($to), $sign, @missing);
+    readInt($from) == 1 or die "remote machine ‘$sshHost’ failed to import closure\n";
+}
+
+
+sub copyTo {
+    my ($sshHost, $sshOpts, $storePaths, $includeOutputs, $dryRun, $sign, $useSubstitutes) = @_;
+
+    # Connect to the remote host.
+    my ($from, $to);
+    eval {
+        ($from, $to) = connectToRemoteNix($sshHost, $sshOpts);
+    };
+    if ($@) {
+        chomp $@;
+        warn "$@; falling back to old closure copying method\n";
+        return oldCopyTo(@_);
+    }
+
+    copyToOpen($from, $to, $sshHost, $storePaths, $includeOutputs, $dryRun, $sign, $useSubstitutes);
+
+    close $to;
+}
+
+
+# For backwards compatibility with Nix <= 1.7. Will be removed
+# eventually.
+sub oldCopyTo {
+    my ($sshHost, $sshOpts, $storePaths, $includeOutputs, $dryRun, $sign, $useSubstitutes) = @_;
+
+    # Get the closure of this path.
+    my @closure = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs,
+        map { followLinksToStorePath $_ } @{$storePaths})));
+
+    # Optionally use substitutes on the remote host.
+    if (!$dryRun && $useSubstitutes) {
+        system "ssh $sshHost @{$sshOpts} @globalSshOpts nix-store -r --ignore-unknown @closure";
+        # Ignore exit status because this is just an optimisation.
+    }
+
+    # Ask the remote host which paths are invalid.  Because of limits
+    # to the command line length, do this in chunks.  Eventually,
+    # we'll want to use ‘--from-stdin’, but we can't rely on the
+    # target having this option yet.
+    my @missing;
+    my $missingSize = 0;
+    while (scalar(@closure) > 0) {
+        my @ps = splice(@closure, 0, 1500);
+        open(READ, "set -f; ssh $sshHost @{$sshOpts} @globalSshOpts nix-store --check-validity --print-invalid @ps|");
+        while (<READ>) {
+            chomp;
+            push @missing, $_;
+            my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($_, 1);
+            $missingSize += $narSize;
+        }
+        close READ or die;
+    }
+
+    # Export the store paths and import them on the remote machine.
+    if (scalar @missing > 0) {
+        print STDERR "copying ", scalar @missing, " missing paths to ‘$sshHost’...\n";
+        print STDERR "@missing\n";
+        unless ($dryRun) {
+            open SSH, "| ssh $sshHost @{$sshOpts} @globalSshOpts 'nix-store --import' > /dev/null" or die;
+            exportPaths(fileno(SSH), $sign, @missing);
+            close SSH or die "copying store paths to remote machine ‘$sshHost’ failed: $?";
+        }
+    }
+}
+
+
+1;
diff --git a/perl/lib/Nix/Crypto.pm b/perl/lib/Nix/Crypto.pm
new file mode 100644
index 000000000000..0286e88d3d28
--- /dev/null
+++ b/perl/lib/Nix/Crypto.pm
@@ -0,0 +1,42 @@
+package Nix::Crypto;
+
+use strict;
+use MIME::Base64;
+use Nix::Store;
+use Nix::Config;
+use IPC::Open2;
+
+our @ISA = qw(Exporter);
+our @EXPORT = qw(signString isValidSignature);
+
+sub signString {
+    my ($privateKeyFile, $s) = @_;
+    my $hash = hashString("sha256", 0, $s);
+    my ($from, $to);
+    my $pid = open2($from, $to, $Nix::Config::openssl, "rsautl", "-sign", "-inkey", $privateKeyFile);
+    print $to $hash;
+    close $to;
+    local $/ = undef;
+    my $sig = <$from>;
+    close $from;
+    waitpid($pid, 0);
+    die "$0: OpenSSL returned exit code $? while signing hash\n" if $? != 0;
+    my $sig64 = encode_base64($sig, "");
+    return $sig64;
+}
+
+sub isValidSignature {
+    my ($publicKeyFile, $sig64, $s) = @_;
+    my ($from, $to);
+    my $pid = open2($from, $to, $Nix::Config::openssl, "rsautl", "-verify", "-inkey", $publicKeyFile, "-pubin");
+    print $to decode_base64($sig64);
+    close $to;
+    my $decoded = <$from>;
+    close $from;
+    waitpid($pid, 0);
+    return 0 if $? != 0;
+    my $hash = hashString("sha256", 0, $s);
+    return $decoded eq $hash;
+}
+
+1;
diff --git a/perl/lib/Nix/GeneratePatches.pm b/perl/lib/Nix/GeneratePatches.pm
new file mode 100644
index 000000000000..612c8a3a15ba
--- /dev/null
+++ b/perl/lib/Nix/GeneratePatches.pm
@@ -0,0 +1,340 @@
+package Nix::GeneratePatches;
+
+use strict;
+use File::Temp qw(tempdir);
+use File::stat;
+use Nix::Config;
+use Nix::Manifest;
+
+our @ISA = qw(Exporter);
+our @EXPORT = qw(generatePatches propagatePatches copyPatches);
+
+
+# Some patch generations options.
+
+# Max size of NAR archives to generate patches for.
+my $maxNarSize = $ENV{"NIX_MAX_NAR_SIZE"};
+$maxNarSize = 160 * 1024 * 1024 if !defined $maxNarSize;
+
+# If patch is bigger than this fraction of full archive, reject.
+my $maxPatchFraction = $ENV{"NIX_PATCH_FRACTION"};
+$maxPatchFraction = 0.60 if !defined $maxPatchFraction;
+
+my $timeLimit = $ENV{"NIX_BSDIFF_TIME_LIMIT"};
+$timeLimit = 180 if !defined $timeLimit;
+
+my $hashAlgo = "sha256";
+
+
+sub findOutputPaths {
+    my $narFiles = shift;
+
+    my %outPaths;
+    
+    foreach my $p (keys %{$narFiles}) {
+
+        # Ignore derivations.
+        next if ($p =~ /\.drv$/);
+        
+        # Ignore builders (too much ambiguity -- they're all called
+        # `builder.sh').
+        next if ($p =~ /\.sh$/);
+        next if ($p =~ /\.patch$/);
+        
+        # Don't bother including tar files etc.
+        next if ($p =~ /\.tar$/ || $p =~ /\.tar\.(gz|bz2|Z|lzma|xz)$/ || $p =~ /\.zip$/ || $p =~ /\.bin$/ || $p =~ /\.tgz$/ || $p =~ /\.rpm$/ || $p =~ /cvs-export$/ || $p =~ /fetchhg$/);
+
+        $outPaths{$p} = 1;
+    }
+
+    return %outPaths;
+}
+
+
+sub getNameVersion {
+    my $p = shift;
+    $p =~ /\/[0-9a-z]+((?:-[a-zA-Z][^\/-]*)+)([^\/]*)$/;
+    my $name = $1;
+    my $version = $2;
+    return undef unless defined $name && defined $version;
+    $name =~ s/^-//;
+    $version =~ s/^-//;
+    return ($name, $version);
+}
+
+
+# A quick hack to get a measure of the `distance' between two
+# versions: it's just the position of the first character that differs
+# (or 999 if they are the same).
+sub versionDiff {
+    my $s = shift;
+    my $t = shift;
+    my $i;
+    return 999 if $s eq $t;
+    for ($i = 0; $i < length $s; $i++) {
+        return $i if $i >= length $t or
+            substr($s, $i, 1) ne substr($t, $i, 1);
+    }
+    return $i;
+}
+
+
+sub getNarBz2 {
+    my $narPath = shift;
+    my $narFiles = shift;
+    my $storePath = shift;
+    
+    my $narFileList = $$narFiles{$storePath};
+    die "missing path $storePath" unless defined $narFileList;
+
+    my $narFile = @{$narFileList}[0];
+    die unless defined $narFile;
+
+    $narFile->{url} =~ /\/([^\/]+)$/;
+    die unless defined $1;
+    return "$narPath/$1";
+}
+
+
+sub containsPatch {
+    my $patches = shift;
+    my $storePath = shift;
+    my $basePath = shift;
+    my $patchList = $$patches{$storePath};
+    return 0 if !defined $patchList;
+    my $found = 0;
+    foreach my $patch (@{$patchList}) {
+        # !!! baseHash might differ
+        return 1 if $patch->{basePath} eq $basePath;
+    }
+    return 0;
+}
+
+
+sub generatePatches {
+    my ($srcNarFiles, $dstNarFiles, $srcPatches, $dstPatches, $narPath, $patchesPath, $patchesURL, $tmpDir) = @_;
+
+    my %srcOutPaths = findOutputPaths $srcNarFiles;
+    my %dstOutPaths = findOutputPaths $dstNarFiles;
+
+    # For each output path in the destination, see if we need to / can
+    # create a patch.
+
+    print STDERR "creating patches...\n";
+
+    foreach my $p (keys %dstOutPaths) {
+
+        # If exactly the same path already exists in the source, skip it.
+        next if defined $srcOutPaths{$p};
+    
+        print "  $p\n";
+
+        # If not, then we should find the paths in the source that are
+        # `most' likely to be present on a system that wants to
+        # install this path.
+
+        (my $name, my $version) = getNameVersion $p;
+        next unless defined $name && defined $version;
+
+        my @closest = ();
+        my $closestVersion;
+        my $minDist = -1; # actually, larger means closer
+
+        # Find all source paths with the same name.
+
+        foreach my $q (keys %srcOutPaths) {
+            (my $name2, my $version2) = getNameVersion $q;
+            next unless defined $name2 && defined $version2;
+
+            if ($name eq $name2) {
+
+                my $srcSystem = @{$$dstNarFiles{$p}}[0]->{system};
+                my $dstSystem = @{$$srcNarFiles{$q}}[0]->{system};
+                if (defined $srcSystem && defined $dstSystem && $srcSystem ne $dstSystem) {
+                    print "    SKIPPING $q due to different systems ($srcSystem vs. $dstSystem)\n";
+                    next;
+                }
+
+                # If the sizes differ too much, then skip.  This
+                # disambiguates between, e.g., a real component and a
+                # wrapper component (cf. Firefox in Nixpkgs).
+                my $srcSize = @{$$srcNarFiles{$q}}[0]->{size};
+                my $dstSize = @{$$dstNarFiles{$p}}[0]->{size};
+                my $ratio = $srcSize / $dstSize;
+                $ratio = 1 / $ratio if $ratio < 1;
+                # print "  SIZE $srcSize $dstSize $ratio $q\n";
+
+                if ($ratio >= 3) {
+                    print "    SKIPPING $q due to size ratio $ratio ($srcSize vs. $dstSize)\n";
+                    next;
+                }
+
+                # If there are multiple matching names, include the
+                # ones with the closest version numbers.
+                my $dist = versionDiff $version, $version2;
+                if ($dist > $minDist) {
+                    $minDist = $dist;
+                    @closest = ($q);
+                    $closestVersion = $version2;
+                } elsif ($dist == $minDist) {
+                    push @closest, $q;
+                }
+            }
+        }
+
+        if (scalar(@closest) == 0) {
+            print "    NO BASE: $p\n";
+            next;
+        }
+
+        foreach my $closest (@closest) {
+
+            # Generate a patch between $closest and $p.
+            print STDERR "  $p <- $closest\n";
+
+            # If the patch already exists, skip it.
+            if (containsPatch($srcPatches, $p, $closest) ||
+                containsPatch($dstPatches, $p, $closest))
+            {
+                print "    skipping, already exists\n";
+                next;
+            }
+
+            my $srcNarBz2 = getNarBz2 $narPath, $srcNarFiles, $closest;
+            my $dstNarBz2 = getNarBz2 $narPath, $dstNarFiles, $p;
+
+            if (! -f $srcNarBz2) {
+                warn "patch source archive $srcNarBz2 is missing\n";
+                next;
+            }
+
+            system("$Nix::Config::bzip2 -d < $srcNarBz2 > $tmpDir/A") == 0
+                or die "cannot unpack $srcNarBz2";
+
+            if (stat("$tmpDir/A")->size >= $maxNarSize) {
+                print "    skipping, source is too large\n";
+                next;
+            }
+        
+            system("$Nix::Config::bzip2 -d < $dstNarBz2 > $tmpDir/B") == 0
+                or die "cannot unpack $dstNarBz2";
+
+            if (stat("$tmpDir/B")->size >= $maxNarSize) {
+                print "    skipping, destination is too large\n";
+                next;
+            }
+        
+            my $time1 = time();
+            my $res = system("ulimit -t $timeLimit; $Nix::Config::libexecDir/nix/bsdiff $tmpDir/A $tmpDir/B $tmpDir/DIFF");
+            my $time2 = time();
+            if ($res) {
+                warn "binary diff computation aborted after ", $time2 - $time1, " seconds\n";
+                next;
+            }
+
+            my $baseHash = `$Nix::Config::binDir/nix-hash --flat --type $hashAlgo --base32 $tmpDir/A` or die;
+            chomp $baseHash;
+
+            my $narHash = `$Nix::Config::binDir/nix-hash --flat --type $hashAlgo --base32 $tmpDir/B` or die;
+            chomp $narHash;
+
+            my $narDiffHash = `$Nix::Config::binDir/nix-hash --flat --type $hashAlgo --base32 $tmpDir/DIFF` or die;
+            chomp $narDiffHash;
+
+            my $narDiffSize = stat("$tmpDir/DIFF")->size;
+            my $dstNarBz2Size = stat($dstNarBz2)->size;
+
+            print "    size $narDiffSize; full size $dstNarBz2Size; ", $time2 - $time1, " seconds\n";
+        
+            if ($narDiffSize >= $dstNarBz2Size) {
+                print "    rejecting; patch bigger than full archive\n";
+                next;
+            }
+    
+            if ($narDiffSize / $dstNarBz2Size >= $maxPatchFraction) {
+                print "    rejecting; patch too large relative to full archive\n";
+                next;
+            }
+    
+            my $finalName = "$narDiffHash.nar-bsdiff";
+
+            if (-e "$patchesPath/$finalName") {
+                print "    not copying, already exists\n";
+            }
+
+            else {
+                system("cp '$tmpDir/DIFF' '$patchesPath/$finalName.tmp'") == 0
+                    or die "cannot copy diff";
+                rename("$patchesPath/$finalName.tmp", "$patchesPath/$finalName")
+                    or die "cannot rename $patchesPath/$finalName.tmp";
+            }
+        
+            # Add the patch to the manifest.
+            addPatch $dstPatches, $p,
+                { url => "$patchesURL/$finalName", hash => "$hashAlgo:$narDiffHash"
+                , size => $narDiffSize, basePath => $closest, baseHash => "$hashAlgo:$baseHash"
+                , narHash => "$hashAlgo:$narHash", patchType => "nar-bsdiff"
+                };
+        }
+    }
+}
+
+
+# Propagate useful patches from $srcPatches to $dstPatches.  A patch
+# is useful if it produces either paths in the $dstNarFiles or paths
+# that can be used as the base for other useful patches.
+sub propagatePatches {
+    my ($srcPatches, $dstNarFiles, $dstPatches) = @_;
+
+    print STDERR "propagating patches...\n";
+
+    my $changed;
+    do {
+        # !!! we repeat this to reach the transitive closure; inefficient
+        $changed = 0;
+
+        print STDERR "loop\n";
+
+        my %dstBasePaths;
+        foreach my $q (keys %{$dstPatches}) {
+            foreach my $patch (@{$$dstPatches{$q}}) {
+                $dstBasePaths{$patch->{basePath}} = 1;
+            }
+        }
+
+        foreach my $p (keys %{$srcPatches}) {
+            my $patchList = $$srcPatches{$p};
+
+            my $include = 0;
+
+            # Is path $p included in the destination?  If so, include
+            # patches that produce it.
+            $include = 1 if defined $$dstNarFiles{$p};
+
+            # Is path $p a path that serves as a base for paths in the
+            # destination?  If so, include patches that produce it.
+            # !!! check baseHash
+            $include = 1 if defined $dstBasePaths{$p};
+
+            if ($include) {
+                foreach my $patch (@{$patchList}) {
+                    $changed = 1 if addPatch $dstPatches, $p, $patch;
+                }
+            }
+        
+        }
+    
+    } while $changed;
+}
+
+
+# Add all new patches in $srcPatches to $dstPatches.
+sub copyPatches {
+    my ($srcPatches, $dstPatches) = @_;
+    foreach my $p (keys %{$srcPatches}) {
+        addPatch $dstPatches, $p, $_ foreach @{$$srcPatches{$p}};
+    }
+}
+
+
+return 1;
diff --git a/perl/lib/Nix/Manifest.pm b/perl/lib/Nix/Manifest.pm
new file mode 100644
index 000000000000..9b7e89fa42fb
--- /dev/null
+++ b/perl/lib/Nix/Manifest.pm
@@ -0,0 +1,468 @@
+package Nix::Manifest;
+
+use utf8;
+use strict;
+use DBI;
+use DBD::SQLite;
+use Cwd;
+use File::stat;
+use File::Path;
+use Fcntl ':flock';
+use Nix::Config;
+use Nix::Crypto;
+
+our @ISA = qw(Exporter);
+our @EXPORT = qw(readManifest writeManifest updateManifestDB addPatch deleteOldManifests parseNARInfo);
+
+
+sub addNAR {
+    my ($narFiles, $storePath, $info) = @_;
+
+    $$narFiles{$storePath} = []
+        unless defined $$narFiles{$storePath};
+
+    my $narFileList = $$narFiles{$storePath};
+
+    my $found = 0;
+    foreach my $narFile (@{$narFileList}) {
+        $found = 1 if $narFile->{url} eq $info->{url};
+    }
+
+    push @{$narFileList}, $info if !$found;
+}
+
+
+sub addPatch {
+    my ($patches, $storePath, $patch) = @_;
+
+    $$patches{$storePath} = []
+        unless defined $$patches{$storePath};
+
+    my $patchList = $$patches{$storePath};
+
+    my $found = 0;
+    foreach my $patch2 (@{$patchList}) {
+        $found = 1 if
+            $patch2->{url} eq $patch->{url} &&
+            $patch2->{basePath} eq $patch->{basePath};
+    }
+
+    push @{$patchList}, $patch if !$found;
+
+    return !$found;
+}
+
+
+sub readManifest_ {
+    my ($manifest, $addNAR, $addPatch) = @_;
+
+    # Decompress the manifest if necessary.
+    if ($manifest =~ /\.bz2$/) {
+        open MANIFEST, "$Nix::Config::bzip2 -d < $manifest |"
+            or die "cannot decompress ‘$manifest’: $!";
+    } else {
+        open MANIFEST, "<$manifest"
+            or die "cannot open ‘$manifest’: $!";
+    }
+
+    my $inside = 0;
+    my $type;
+
+    my $manifestVersion = 2;
+
+    my ($storePath, $url, $hash, $size, $basePath, $baseHash, $patchType);
+    my ($narHash, $narSize, $references, $deriver, $copyFrom, $system, $compressionType);
+
+    while (<MANIFEST>) {
+        chomp;
+        s/\#.*$//g;
+        next if (/^$/);
+
+        if (!$inside) {
+
+            if (/^\s*(\w*)\s*\{$/) {
+                $type = $1;
+                $type = "narfile" if $type eq "";
+                $inside = 1;
+                undef $storePath;
+                undef $url;
+                undef $hash;
+                undef $size;
+                undef $narHash;
+                undef $narSize;
+                undef $basePath;
+                undef $baseHash;
+                undef $patchType;
+                undef $system;
+                $references = "";
+                $deriver = "";
+                $compressionType = "bzip2";
+            }
+
+        } else {
+
+            if (/^\}$/) {
+                $inside = 0;
+
+                if ($type eq "narfile") {
+                    &$addNAR($storePath,
+                        { url => $url, hash => $hash, size => $size
+                        , narHash => $narHash, narSize => $narSize
+                        , references => $references
+                        , deriver => $deriver
+                        , system => $system
+                        , compressionType => $compressionType
+                        });
+                }
+
+                elsif ($type eq "patch") {
+                    &$addPatch($storePath,
+                        { url => $url, hash => $hash, size => $size
+                        , basePath => $basePath, baseHash => $baseHash
+                        , narHash => $narHash, narSize => $narSize
+                        , patchType => $patchType
+                        });
+                }
+
+            }
+
+            elsif (/^\s*StorePath:\s*(\/\S+)\s*$/) { $storePath = $1; }
+            elsif (/^\s*CopyFrom:\s*(\/\S+)\s*$/) { $copyFrom = $1; }
+            elsif (/^\s*Hash:\s*(\S+)\s*$/) { $hash = $1; }
+            elsif (/^\s*URL:\s*(\S+)\s*$/) { $url = $1; }
+            elsif (/^\s*Compression:\s*(\S+)\s*$/) { $compressionType = $1; }
+            elsif (/^\s*Size:\s*(\d+)\s*$/) { $size = $1; }
+            elsif (/^\s*BasePath:\s*(\/\S+)\s*$/) { $basePath = $1; }
+            elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; }
+            elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; }
+            elsif (/^\s*NarHash:\s*(\S+)\s*$/) { $narHash = $1; }
+            elsif (/^\s*NarSize:\s*(\d+)\s*$/) { $narSize = $1; }
+            elsif (/^\s*References:\s*(.*)\s*$/) { $references = $1; }
+            elsif (/^\s*Deriver:\s*(\S+)\s*$/) { $deriver = $1; }
+            elsif (/^\s*ManifestVersion:\s*(\d+)\s*$/) { $manifestVersion = $1; }
+            elsif (/^\s*System:\s*(\S+)\s*$/) { $system = $1; }
+
+            # Compatibility;
+            elsif (/^\s*NarURL:\s*(\S+)\s*$/) { $url = $1; }
+            elsif (/^\s*MD5:\s*(\S+)\s*$/) { $hash = "md5:$1"; }
+
+        }
+    }
+
+    close MANIFEST;
+
+    return $manifestVersion;
+}
+
+
+sub readManifest {
+    my ($manifest, $narFiles, $patches) = @_;
+    readManifest_($manifest,
+        sub { addNAR($narFiles, @_); },
+        sub { addPatch($patches, @_); } );
+}
+
+
+sub writeManifest {
+    my ($manifest, $narFiles, $patches, $noCompress) = @_;
+
+    open MANIFEST, ">$manifest.tmp"; # !!! check exclusive
+
+    print MANIFEST "version {\n";
+    print MANIFEST "  ManifestVersion: 3\n";
+    print MANIFEST "}\n";
+
+    foreach my $storePath (sort (keys %{$narFiles})) {
+        my $narFileList = $$narFiles{$storePath};
+        foreach my $narFile (@{$narFileList}) {
+            print MANIFEST "{\n";
+            print MANIFEST "  StorePath: $storePath\n";
+            print MANIFEST "  NarURL: $narFile->{url}\n";
+            print MANIFEST "  Compression: $narFile->{compressionType}\n";
+            print MANIFEST "  Hash: $narFile->{hash}\n" if defined $narFile->{hash};
+            print MANIFEST "  Size: $narFile->{size}\n" if defined $narFile->{size};
+            print MANIFEST "  NarHash: $narFile->{narHash}\n";
+            print MANIFEST "  NarSize: $narFile->{narSize}\n" if $narFile->{narSize};
+            print MANIFEST "  References: $narFile->{references}\n"
+                if defined $narFile->{references} && $narFile->{references} ne "";
+            print MANIFEST "  Deriver: $narFile->{deriver}\n"
+                if defined $narFile->{deriver} && $narFile->{deriver} ne "";
+            print MANIFEST "  System: $narFile->{system}\n" if defined $narFile->{system};
+            print MANIFEST "}\n";
+        }
+    }
+
+    foreach my $storePath (sort (keys %{$patches})) {
+        my $patchList = $$patches{$storePath};
+        foreach my $patch (@{$patchList}) {
+            print MANIFEST "patch {\n";
+            print MANIFEST "  StorePath: $storePath\n";
+            print MANIFEST "  NarURL: $patch->{url}\n";
+            print MANIFEST "  Hash: $patch->{hash}\n";
+            print MANIFEST "  Size: $patch->{size}\n";
+            print MANIFEST "  NarHash: $patch->{narHash}\n";
+            print MANIFEST "  NarSize: $patch->{narSize}\n" if $patch->{narSize};
+            print MANIFEST "  BasePath: $patch->{basePath}\n";
+            print MANIFEST "  BaseHash: $patch->{baseHash}\n";
+            print MANIFEST "  Type: $patch->{patchType}\n";
+            print MANIFEST "}\n";
+        }
+    }
+
+
+    close MANIFEST;
+
+    rename("$manifest.tmp", $manifest)
+        or die "cannot rename $manifest.tmp: $!";
+
+
+    # Create a bzipped manifest.
+    unless (defined $noCompress) {
+        system("$Nix::Config::bzip2 < $manifest > $manifest.bz2.tmp") == 0
+            or die "cannot compress manifest";
+
+        rename("$manifest.bz2.tmp", "$manifest.bz2")
+            or die "cannot rename $manifest.bz2.tmp: $!";
+    }
+}
+
+
+sub updateManifestDB {
+    my $manifestDir = $Nix::Config::manifestDir;
+
+    my @manifests = glob "$manifestDir/*.nixmanifest";
+    return undef if scalar @manifests == 0;
+
+    mkpath($manifestDir);
+
+    unlink "$manifestDir/cache.sqlite"; # remove obsolete cache
+    my $dbPath = "$manifestDir/cache-v2.sqlite";
+
+    # Open/create the database.
+    our $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
+        or die "cannot open database ‘$dbPath’";
+    $dbh->{RaiseError} = 1;
+    $dbh->{PrintError} = 0;
+
+    $dbh->do("pragma foreign_keys = on");
+    $dbh->do("pragma synchronous = off"); # we can always reproduce the cache
+    $dbh->do("pragma journal_mode = truncate");
+
+    # Initialise the database schema, if necessary.
+    $dbh->do(<<EOF);
+        create table if not exists Manifests (
+            id        integer primary key autoincrement not null,
+            path      text unique not null,
+            timestamp integer not null
+        );
+EOF
+
+    $dbh->do(<<EOF);
+        create table if not exists NARs (
+            id               integer primary key autoincrement not null,
+            manifest         integer not null,
+            storePath        text not null,
+            url              text not null,
+            compressionType  text not null,
+            hash             text,
+            size             integer,
+            narHash          text,
+            narSize          integer,
+            refs             text,
+            deriver          text,
+            system           text,
+            foreign key (manifest) references Manifests(id) on delete cascade
+        );
+EOF
+
+    $dbh->do("create index if not exists NARs_storePath on NARs(storePath)");
+
+    $dbh->do(<<EOF);
+        create table if not exists Patches (
+            id               integer primary key autoincrement not null,
+            manifest         integer not null,
+            storePath        text not null,
+            basePath         text not null,
+            baseHash         text not null,
+            url              text not null,
+            hash             text,
+            size             integer,
+            narHash          text,
+            narSize          integer,
+            patchType        text not null,
+            foreign key (manifest) references Manifests(id) on delete cascade
+        );
+EOF
+
+    $dbh->do("create index if not exists Patches_storePath on Patches(storePath)");
+
+    # Acquire an exclusive lock to ensure that only one process
+    # updates the DB at the same time.  This isn't really necessary,
+    # but it prevents work duplication and lock contention in SQLite.
+    my $lockFile = "$manifestDir/cache.lock";
+    open MAINLOCK, ">>$lockFile" or die "unable to acquire lock ‘$lockFile’: $!\n";
+    flock(MAINLOCK, LOCK_EX) or die;
+
+    our $insertNAR = $dbh->prepare(
+        "insert into NARs(manifest, storePath, url, compressionType, hash, size, narHash, " .
+        "narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
+
+    our $insertPatch = $dbh->prepare(
+        "insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
+        "size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
+
+    $dbh->begin_work;
+
+    # Read each manifest in $manifestDir and add it to the database,
+    # unless we've already done so on a previous run.
+    my %seen;
+
+    for my $manifestLink (@manifests) {
+        my $manifest = Cwd::abs_path($manifestLink);
+        next unless -f $manifest;
+        my $timestamp = lstat($manifest)->mtime;
+        $seen{$manifest} = 1;
+
+        next if scalar @{$dbh->selectcol_arrayref(
+            "select 1 from Manifests where path = ? and timestamp = ?",
+            {}, $manifest, $timestamp)} == 1;
+
+        print STDERR "caching $manifest...\n";
+
+        $dbh->do("delete from Manifests where path = ?", {}, $manifest);
+
+        $dbh->do("insert into Manifests(path, timestamp) values (?, ?)",
+                 {}, $manifest, $timestamp);
+
+        our $id = $dbh->last_insert_id("", "", "", "");
+
+        sub addNARToDB {
+            my ($storePath, $narFile) = @_;
+            $insertNAR->execute(
+                $id, $storePath, $narFile->{url}, $narFile->{compressionType}, $narFile->{hash},
+                $narFile->{size}, $narFile->{narHash}, $narFile->{narSize}, $narFile->{references},
+                $narFile->{deriver}, $narFile->{system});
+        };
+
+        sub addPatchToDB {
+            my ($storePath, $patch) = @_;
+            $insertPatch->execute(
+                $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
+                $patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize},
+                $patch->{patchType});
+        };
+
+        my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB);
+
+        if ($version < 3) {
+            die "you have an old-style or corrupt manifest ‘$manifestLink’; please delete it\n";
+        }
+        if ($version >= 10) {
+            die "manifest ‘$manifestLink’ is too new; please delete it or upgrade Nix\n";
+        }
+    }
+
+    # Removed cached information for removed manifests from the DB.
+    foreach my $manifest (@{$dbh->selectcol_arrayref("select path from Manifests")}) {
+        next if defined $seen{$manifest};
+        $dbh->do("delete from Manifests where path = ?", {}, $manifest);
+    }
+
+    $dbh->commit;
+
+    close MAINLOCK;
+
+    return $dbh;
+}
+
+
+
+# Delete all old manifests downloaded from a given URL.
+sub deleteOldManifests {
+    my ($url, $curUrlFile) = @_;
+    for my $urlFile (glob "$Nix::Config::manifestDir/*.url") {
+        next if defined $curUrlFile && $urlFile eq $curUrlFile;
+        open URL, "<$urlFile" or die;
+        my $url2 = <URL>;
+        chomp $url2;
+        close URL;
+        next unless $url eq $url2;
+        my $base = $urlFile; $base =~ s/.url$//;
+        unlink "${base}.url";
+        unlink "${base}.nixmanifest";
+    }
+}
+
+
+# Parse a NAR info file.
+sub parseNARInfo {
+    my ($storePath, $content, $requireValidSig, $location) = @_;
+
+    my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver, $system, $sig);
+    my $signedData = "";
+    my $compression = "bzip2";
+    my @refs;
+
+    foreach my $line (split "\n", $content) {
+        return undef unless $line =~ /^(.*): (.*)$/;
+        if ($1 eq "StorePath") { $storePath2 = $2; }
+        elsif ($1 eq "URL") { $url = $2; }
+        elsif ($1 eq "Compression") { $compression = $2; }
+        elsif ($1 eq "FileHash") { $fileHash = $2; }
+        elsif ($1 eq "FileSize") { $fileSize = int($2); }
+        elsif ($1 eq "NarHash") { $narHash = $2; }
+        elsif ($1 eq "NarSize") { $narSize = int($2); }
+        elsif ($1 eq "References") { @refs = split / /, $2; }
+        elsif ($1 eq "Deriver") { $deriver = $2; }
+        elsif ($1 eq "System") { $system = $2; }
+        elsif ($1 eq "Signature") { $sig = $2; last; }
+        $signedData .= "$line\n";
+    }
+
+    return undef if $storePath ne $storePath2 || !defined $url || !defined $narHash;
+
+    my $res =
+        { url => $url
+        , compression => $compression
+        , fileHash => $fileHash
+        , fileSize => $fileSize
+        , narHash => $narHash
+        , narSize => $narSize
+        , refs => [ @refs ]
+        , deriver => $deriver
+        , system => $system
+        };
+
+    if ($requireValidSig) {
+        if (!defined $sig) {
+            warn "NAR info file ‘$location’ lacks a signature; ignoring\n";
+            return undef;
+        }
+        my ($sigVersion, $keyName, $sig64) = split ";", $sig;
+        $sigVersion //= 0;
+        if ($sigVersion != 1) {
+            warn "NAR info file ‘$location’ has unsupported version $sigVersion; ignoring\n";
+            return undef;
+        }
+        return undef unless defined $keyName && defined $sig64;
+        my $publicKeyFile = $Nix::Config::config{"binary-cache-public-key-$keyName"};
+        if (!defined $publicKeyFile) {
+            warn "NAR info file ‘$location’ is signed by unknown key ‘$keyName’; ignoring\n";
+            return undef;
+        }
+        if (! -f $publicKeyFile) {
+            die "binary cache public key file ‘$publicKeyFile’ does not exist\n";
+            return undef;
+        }
+        if (!isValidSignature($publicKeyFile, $sig64, $signedData)) {
+            warn "NAR info file ‘$location’ has an invalid signature; ignoring\n";
+            return undef;
+        }
+        $res->{signedBy} = $keyName;
+    }
+
+    return $res;
+}
+
+
+return 1;
diff --git a/perl/lib/Nix/SSH.pm b/perl/lib/Nix/SSH.pm
new file mode 100644
index 000000000000..9e0c10beb0e0
--- /dev/null
+++ b/perl/lib/Nix/SSH.pm
@@ -0,0 +1,103 @@
+package Nix::SSH;
+
+use strict;
+use File::Temp qw(tempdir);
+use IPC::Open2;
+
+our @ISA = qw(Exporter);
+our @EXPORT = qw(
+  @globalSshOpts
+  readN readInt readString readStrings
+  writeInt writeString writeStrings
+  connectToRemoteNix
+);
+
+
+our @globalSshOpts = split ' ', ($ENV{"NIX_SSHOPTS"} or "");
+
+
+sub readN {
+    my ($bytes, $from) = @_;
+    my $res = "";
+    while ($bytes > 0) {
+        my $s;
+        my $n = sysread($from, $s, $bytes);
+        die "I/O error reading from remote side\n" if !defined $n;
+        die "got EOF while expecting $bytes bytes from remote side\n" if !$n;
+        $bytes -= $n;
+        $res .= $s;
+    }
+    return $res;
+}
+
+
+sub readInt {
+    my ($from) = @_;
+    return unpack("L<x4", readN(8, $from));
+}
+
+
+sub readString {
+    my ($from) = @_;
+    my $len = readInt($from);
+    my $s = readN($len, $from);
+    readN(8 - $len % 8, $from) if $len % 8; # skip padding
+    return $s;
+}
+
+
+sub readStrings {
+    my ($from) = @_;
+    my $n = readInt($from);
+    my @res;
+    push @res, readString($from) while $n--;
+    return @res;
+}
+
+
+sub writeInt {
+    my ($n, $to) = @_;
+    syswrite($to, pack("L<x4", $n)) or die;
+}
+
+
+sub writeString {
+    my ($s, $to) = @_;
+    my $len = length $s;
+    my $req .= pack("L<x4", $len);
+    $req .= $s;
+    $req .= "\000" x (8 - $len % 8) if $len % 8;
+    syswrite($to, $req) or die;
+}
+
+
+sub writeStrings {
+    my ($ss, $to) = @_;
+    writeInt(scalar(@{$ss}), $to);
+    writeString($_, $to) foreach @{$ss};
+}
+
+
+sub connectToRemoteNix {
+    my ($sshHost, $sshOpts, $extraFlags) = @_;
+
+    $extraFlags ||= "";
+
+    # Start ‘nix-store --serve’ on the remote host.
+    my ($from, $to);
+    # FIXME: don't start a shell, start ssh directly.
+    my $pid = open2($from, $to, "exec ssh -x -a $sshHost @globalSshOpts @{$sshOpts} nix-store --serve --write $extraFlags");
+
+    # Do the handshake.
+    my $SERVE_MAGIC_1 = 0x390c9deb; # FIXME
+    my $clientVersion = 0x200;
+    syswrite($to, pack("L<x4L<x4", $SERVE_MAGIC_1, $clientVersion)) or die;
+    die "did not get valid handshake from remote host\n" if readInt($from) != 0x5452eecb;
+    my $serverVersion = readInt($from);
+    die "unsupported server version\n" if $serverVersion < 0x200 || $serverVersion >= 0x300;
+
+    return ($from, $to, $pid);
+}
+
+
+1;
diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm
new file mode 100644
index 000000000000..89cfaefa5fd4
--- /dev/null
+++ b/perl/lib/Nix/Store.pm
@@ -0,0 +1,92 @@
+package Nix::Store;
+
+use strict;
+use warnings;
+use Nix::Config;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw( ) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+    isValidPath queryReferences queryPathInfo queryDeriver queryPathHash
+    queryPathFromHashPart
+    topoSortPaths computeFSClosure followLinksToStorePath exportPaths importPaths
+    hashPath hashFile hashString
+    addToStore makeFixedOutputPath
+    derivationFromPath
+);
+
+our $VERSION = '0.15';
+
+sub backtick {
+    open(RES, "-|", @_) or die;
+    local $/;
+    my $res = <RES> || "";
+    close RES or die;
+    return $res;
+}
+
+if ($Nix::Config::useBindings) {
+    require XSLoader;
+    XSLoader::load('Nix::Store', $VERSION);
+} else {
+
+    # Provide slow fallbacks of some functions on platforms that don't
+    # support the Perl bindings.
+
+    use File::Temp;
+    use Fcntl qw/F_SETFD/;
+
+    *hashFile = sub {
+        my ($algo, $base32, $path) = @_;
+        my $res = backtick("$Nix::Config::binDir/nix-hash", "--flat", $path, "--type", $algo, $base32 ? "--base32" : ());
+        chomp $res;
+        return $res;
+    };
+
+    *hashPath = sub {
+        my ($algo, $base32, $path) = @_;
+        my $res = backtick("$Nix::Config::binDir/nix-hash", $path, "--type", $algo, $base32 ? "--base32" : ());
+        chomp $res;
+        return $res;
+    };
+
+    *hashString = sub {
+        my ($algo, $base32, $s) = @_;
+        my $fh = File::Temp->new();
+        print $fh $s;
+        my $res = backtick("$Nix::Config::binDir/nix-hash", $fh->filename, "--type", $algo, $base32 ? "--base32" : ());
+        chomp $res;
+        return $res;
+    };
+
+    *addToStore = sub {
+        my ($srcPath, $recursive, $algo) = @_;
+        die "not implemented" if $recursive || $algo ne "sha256";
+        my $res = backtick("$Nix::Config::binDir/nix-store", "--add", $srcPath);
+        chomp $res;
+        return $res;
+    };
+
+    *isValidPath = sub {
+        my ($path) = @_;
+        my $res = backtick("$Nix::Config::binDir/nix-store", "--check-validity", "--print-invalid", $path);
+        chomp $res;
+        return $res ne $path;
+    };
+
+    *queryPathHash = sub {
+        my ($path) = @_;
+        my $res = backtick("$Nix::Config::binDir/nix-store", "--query", "--hash", $path);
+        chomp $res;
+        return $res;
+    };
+}
+
+1;
+__END__
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
new file mode 100644
index 000000000000..ff90616d3766
--- /dev/null
+++ b/perl/lib/Nix/Store.xs
@@ -0,0 +1,292 @@
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+/* Prevent a clash between some Perl and libstdc++ macros. */
+#undef do_open
+#undef do_close
+
+#include <store-api.hh>
+#include <globals.hh>
+#include <misc.hh>
+#include <util.hh>
+
+
+using namespace nix;
+
+
+void doInit()
+{
+    if (!store) {
+        try {
+            settings.processEnvironment();
+            settings.loadConfFile();
+            settings.update();
+            settings.lockCPU = false;
+            store = openStore();
+        } catch (Error & e) {
+            croak(e.what());
+        }
+    }
+}
+
+
+MODULE = Nix::Store PACKAGE = Nix::Store
+PROTOTYPES: ENABLE
+
+
+#undef dNOOP // Hack to work around "error: declaration of 'Perl___notused' has a different language linkage" error message on clang.
+#define dNOOP
+
+
+void init()
+    CODE:
+        doInit();
+
+
+int isValidPath(char * path)
+    CODE:
+        try {
+            doInit();
+            RETVAL = store->isValidPath(path);
+        } catch (Error & e) {
+            croak(e.what());
+        }
+    OUTPUT:
+        RETVAL
+
+
+SV * queryReferences(char * path)
+    PPCODE:
+        try {
+            doInit();
+            PathSet paths;
+            store->queryReferences(path, paths);
+            for (PathSet::iterator i = paths.begin(); i != paths.end(); ++i)
+                XPUSHs(sv_2mortal(newSVpv(i->c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * queryPathHash(char * path)
+    PPCODE:
+        try {
+            doInit();
+            Hash hash = store->queryPathHash(path);
+            string s = "sha256:" + printHash32(hash);
+            XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * queryDeriver(char * path)
+    PPCODE:
+        try {
+            doInit();
+            Path deriver = store->queryDeriver(path);
+            if (deriver == "") XSRETURN_UNDEF;
+            XPUSHs(sv_2mortal(newSVpv(deriver.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * queryPathInfo(char * path, int base32)
+    PPCODE:
+        try {
+            doInit();
+            ValidPathInfo info = store->queryPathInfo(path);
+            if (info.deriver == "")
+                XPUSHs(&PL_sv_undef);
+            else
+                XPUSHs(sv_2mortal(newSVpv(info.deriver.c_str(), 0)));
+            string s = "sha256:" + (base32 ? printHash32(info.hash) : printHash(info.hash));
+            XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+            mXPUSHi(info.registrationTime);
+            mXPUSHi(info.narSize);
+            AV * arr = newAV();
+            for (PathSet::iterator i = info.references.begin(); i != info.references.end(); ++i)
+                av_push(arr, newSVpv(i->c_str(), 0));
+            XPUSHs(sv_2mortal(newRV((SV *) arr)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * queryPathFromHashPart(char * hashPart)
+    PPCODE:
+        try {
+            doInit();
+            Path path = store->queryPathFromHashPart(hashPart);
+            XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * computeFSClosure(int flipDirection, int includeOutputs, ...)
+    PPCODE:
+        try {
+            doInit();
+            PathSet paths;
+            for (int n = 2; n < items; ++n)
+                computeFSClosure(*store, SvPV_nolen(ST(n)), paths, flipDirection, includeOutputs);
+            for (PathSet::iterator i = paths.begin(); i != paths.end(); ++i)
+                XPUSHs(sv_2mortal(newSVpv(i->c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * topoSortPaths(...)
+    PPCODE:
+        try {
+            doInit();
+            PathSet paths;
+            for (int n = 0; n < items; ++n) paths.insert(SvPV_nolen(ST(n)));
+            Paths sorted = topoSortPaths(*store, paths);
+            for (Paths::iterator i = sorted.begin(); i != sorted.end(); ++i)
+                XPUSHs(sv_2mortal(newSVpv(i->c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * followLinksToStorePath(char * path)
+    CODE:
+        try {
+            doInit();
+            RETVAL = newSVpv(followLinksToStorePath(path).c_str(), 0);
+        } catch (Error & e) {
+            croak(e.what());
+        }
+    OUTPUT:
+        RETVAL
+
+
+void exportPaths(int fd, int sign, ...)
+    PPCODE:
+        try {
+            doInit();
+            Paths paths;
+            for (int n = 2; n < items; ++n) paths.push_back(SvPV_nolen(ST(n)));
+            FdSink sink(fd);
+            exportPaths(*store, paths, sign, sink);
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+void importPaths(int fd)
+    PPCODE:
+        try {
+            doInit();
+            FdSource source(fd);
+            store->importPaths(false, source);
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * hashPath(char * algo, int base32, char * path)
+    PPCODE:
+        try {
+            Hash h = hashPath(parseHashType(algo), path).first;
+            string s = base32 ? printHash32(h) : printHash(h);
+            XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * hashFile(char * algo, int base32, char * path)
+    PPCODE:
+        try {
+            Hash h = hashFile(parseHashType(algo), path);
+            string s = base32 ? printHash32(h) : printHash(h);
+            XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * hashString(char * algo, int base32, char * s)
+    PPCODE:
+        try {
+            Hash h = hashString(parseHashType(algo), s);
+            string s = base32 ? printHash32(h) : printHash(h);
+            XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * addToStore(char * srcPath, int recursive, char * algo)
+    PPCODE:
+        try {
+            doInit();
+            Path path = store->addToStore(srcPath, recursive, parseHashType(algo));
+            XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
+    PPCODE:
+        try {
+            doInit();
+            HashType ht = parseHashType(algo);
+            Path path = makeFixedOutputPath(recursive, ht,
+                parseHash16or32(ht, hash), name);
+            XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
+        } catch (Error & e) {
+            croak(e.what());
+        }
+
+
+SV * derivationFromPath(char * drvPath)
+    PREINIT:
+        HV *hash;
+    CODE:
+        try {
+            doInit();
+            Derivation drv = derivationFromPath(*store, drvPath);
+            hash = newHV();
+
+            HV * outputs = newHV();
+            for (DerivationOutputs::iterator i = drv.outputs.begin(); i != drv.outputs.end(); ++i)
+                hv_store(outputs, i->first.c_str(), i->first.size(), newSVpv(i->second.path.c_str(), 0), 0);
+            hv_stores(hash, "outputs", newRV((SV *) outputs));
+
+            AV * inputDrvs = newAV();
+            for (DerivationInputs::iterator i = drv.inputDrvs.begin(); i != drv.inputDrvs.end(); ++i)
+                av_push(inputDrvs, newSVpv(i->first.c_str(), 0)); // !!! ignores i->second
+            hv_stores(hash, "inputDrvs", newRV((SV *) inputDrvs));
+
+            AV * inputSrcs = newAV();
+            for (PathSet::iterator i = drv.inputSrcs.begin(); i != drv.inputSrcs.end(); ++i)
+                av_push(inputSrcs, newSVpv(i->c_str(), 0));
+            hv_stores(hash, "inputSrcs", newRV((SV *) inputSrcs));
+
+            hv_stores(hash, "platform", newSVpv(drv.platform.c_str(), 0));
+            hv_stores(hash, "builder", newSVpv(drv.builder.c_str(), 0));
+
+            AV * args = newAV();
+            for (Strings::iterator i = drv.args.begin(); i != drv.args.end(); ++i)
+                av_push(args, newSVpv(i->c_str(), 0));
+            hv_stores(hash, "args", newRV((SV *) args));
+
+            HV * env = newHV();
+            for (StringPairs::iterator i = drv.env.begin(); i != drv.env.end(); ++i)
+                hv_store(env, i->first.c_str(), i->first.size(), newSVpv(i->second.c_str(), 0), 0);
+            hv_stores(hash, "env", newRV((SV *) env));
+
+            RETVAL = newRV_noinc((SV *)hash);
+        } catch (Error & e) {
+            croak(e.what());
+        }
+    OUTPUT:
+        RETVAL
diff --git a/perl/lib/Nix/Utils.pm b/perl/lib/Nix/Utils.pm
new file mode 100644
index 000000000000..32fb0aafb808
--- /dev/null
+++ b/perl/lib/Nix/Utils.pm
@@ -0,0 +1,47 @@
+package Nix::Utils;
+
+use utf8;
+use File::Temp qw(tempdir);
+
+our @ISA = qw(Exporter);
+our @EXPORT = qw(checkURL uniq writeFile readFile mkTempDir);
+
+$urlRE = "(?: [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*]+ )";
+
+sub checkURL {
+    my ($url) = @_;
+    die "invalid URL ‘$url’\n" unless $url =~ /^ $urlRE $ /x;
+}
+
+sub uniq {
+    my %seen;
+    my @res;
+    foreach my $name (@_) {
+        next if $seen{$name};
+        $seen{$name} = 1;
+        push @res, $name;
+    }
+    return @res;
+}
+
+sub writeFile {
+    my ($fn, $s) = @_;
+    open TMP, ">$fn" or die "cannot create file ‘$fn’: $!";
+    print TMP "$s" or die;
+    close TMP or die;
+}
+
+sub readFile {
+    local $/ = undef;
+    my ($fn) = @_;
+    open TMP, "<$fn" or die "cannot open file ‘$fn’: $!";
+    my $s = <TMP>;
+    close TMP or die;
+    return $s;
+}
+
+sub mkTempDir {
+    my ($name) = @_;
+    return tempdir("$name.XXXXXX", CLEANUP => 1, DIR => $ENV{"TMPDIR"} // $ENV{"XDG_RUNTIME_DIR"} // "/tmp")
+        or die "cannot create a temporary directory";
+}
diff --git a/perl/local.mk b/perl/local.mk
new file mode 100644
index 000000000000..564683dffee2
--- /dev/null
+++ b/perl/local.mk
@@ -0,0 +1,40 @@
+nix_perl_sources := \
+  $(d)/lib/Nix/Store.pm \
+  $(d)/lib/Nix/Manifest.pm \
+  $(d)/lib/Nix/GeneratePatches.pm \
+  $(d)/lib/Nix/SSH.pm \
+  $(d)/lib/Nix/CopyClosure.pm \
+  $(d)/lib/Nix/Config.pm.in \
+  $(d)/lib/Nix/Utils.pm \
+  $(d)/lib/Nix/Crypto.pm
+
+nix_perl_modules := $(nix_perl_sources:.in=)
+
+$(foreach x, $(nix_perl_modules), $(eval $(call install-data-in, $(x), $(perllibdir)/Nix)))
+
+ifeq ($(perlbindings), yes)
+
+  $(d)/lib/Nix/Store.cc: $(d)/lib/Nix/Store.xs
+	$(trace-gen) xsubpp $^ -output $@
+
+  libraries += Store
+
+  Store_DIR := $(d)/lib/Nix
+
+  Store_SOURCES := $(Store_DIR)/Store.cc
+
+  Store_LIBS = libstore
+
+  Store_CXXFLAGS = \
+    -I$(shell $(perl) -e 'use Config; print $$Config{archlibexp};')/CORE \
+    -D_FILE_OFFSET_BITS=64 -Wno-unused-variable -Wno-literal-suffix
+
+  Store_ALLOW_UNDEFINED = 1
+
+  Store_FORCE_INSTALL = 1
+
+  Store_INSTALL_DIR = $(perllibdir)/auto/Nix/Store
+
+endif
+
+clean-files += $(d)/lib/Nix/Config.pm $(d)/lib/Nix/Store.cc
diff --git a/release.nix b/release.nix
new file mode 100644
index 000000000000..bc4c06a0b792
--- /dev/null
+++ b/release.nix
@@ -0,0 +1,301 @@
+{ nix ? { outPath = ./.; revCount = 1234; shortRev = "abcdef"; }
+, officialRelease ? false
+}:
+
+let
+
+  pkgs = import <nixpkgs> {};
+
+  systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" /* "x86_64-freebsd" "i686-freebsd" */ ];
+
+
+  jobs = rec {
+
+
+    tarball =
+      with pkgs;
+
+      releaseTools.sourceTarball {
+        name = "nix-tarball";
+        version = builtins.readFile ./version;
+        versionSuffix = if officialRelease then "" else "pre${toString nix.revCount}_${nix.shortRev}";
+        src = if lib.inNixShell then null else nix;
+        inherit officialRelease;
+
+        buildInputs =
+          [ curl bison flex perl libxml2 libxslt w3m bzip2
+            tetex dblatex nukeReferences pkgconfig sqlite git
+          ];
+
+        configureFlags = ''
+          --with-docbook-rng=${docbook5}/xml/rng/docbook
+          --with-docbook-xsl=${docbook5_xsl}/xml/xsl/docbook
+          --with-dbi=${perlPackages.DBI}/${perl.libPrefix}
+          --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
+          --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
+        '';
+
+        postUnpack = ''
+          # Clean up when building from a working tree.
+          (cd $sourceRoot && (git ls-files -o | xargs -r rm -v))
+        '';
+
+        preConfigure = ''
+          # TeX needs a writable font cache.
+          export VARTEXFONTS=$TMPDIR/texfonts
+        '';
+
+        distPhase =
+          ''
+            runHook preDist
+            make dist
+            mkdir -p $out/tarballs
+            cp *.tar.* $out/tarballs
+          '';
+
+        preDist = ''
+          make install docdir=$out/share/doc/nix makefiles=doc/manual/local.mk
+
+          make doc/manual/manual.pdf
+          cp doc/manual/manual.pdf $out/manual.pdf
+
+          # The PDF containes filenames of included graphics (see
+          # http://www.tug.org/pipermail/pdftex/2007-August/007290.html).
+          # This causes a retained dependency on dblatex, which Hydra
+          # doesn't like (the output of the tarball job is distributed
+          # to Windows and Macs, so there should be no Linux binaries
+          # in the closure).
+          nuke-refs $out/manual.pdf
+
+          echo "doc manual $out/share/doc/nix/manual" >> $out/nix-support/hydra-build-products
+          echo "doc-pdf manual $out/manual.pdf" >> $out/nix-support/hydra-build-products
+          echo "doc release-notes $out/share/doc/nix/manual release-notes.html" >> $out/nix-support/hydra-build-products
+        '';
+      };
+
+
+    build = pkgs.lib.genAttrs systems (system:
+
+      with import <nixpkgs> { inherit system; };
+
+      releaseTools.nixBuild {
+        name = "nix";
+        src = tarball;
+
+        buildInputs = [ curl perl bzip2 openssl pkgconfig sqlite boehmgc ];
+
+        configureFlags = ''
+          --disable-init-state
+          --with-dbi=${perlPackages.DBI}/${perl.libPrefix}
+          --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
+          --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
+          --enable-gc
+          --sysconfdir=/etc
+        '';
+
+        # Provide a default value for the ‘build-chroot-dirs’ setting
+        # that includes /bin/sh pointing to bash.
+        preHook = lib.optionalString stdenv.isLinux (
+          let sh = stdenv.shell; in
+          ''
+            export DEFAULT_CHROOT_DIRS="/bin/sh=${sh} $(tr '\n' ' ' < ${writeReferencesToFile sh})"
+          '');
+
+        enableParallelBuilding = true;
+
+        makeFlags = "profiledir=$(out)/etc/profile.d";
+
+        preBuild = "unset NIX_INDENT_MAKE";
+
+        installFlags = "sysconfdir=$(out)/etc";
+
+        doInstallCheck = true;
+        installCheckFlags = "sysconfdir=$(out)/etc";
+      });
+
+
+    binaryTarball = pkgs.lib.genAttrs systems (system:
+
+      with import <nixpkgs> { inherit system; };
+
+      let
+        toplevel = builtins.getAttr system jobs.build;
+        version = toplevel.src.version;
+      in
+
+      runCommand "nix-binary-tarball-${version}"
+        { exportReferencesGraph = [ "closure" toplevel ];
+          buildInputs = [ perl ];
+          meta.description = "Distribution-independent Nix bootstrap binaries for ${system}";
+        }
+        ''
+          storePaths=$(perl ${pathsFromGraph} ./closure)
+          printRegistration=1 perl ${pathsFromGraph} ./closure > $TMPDIR/reginfo
+          substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
+            --subst-var-by nix ${toplevel}
+          chmod +x $TMPDIR/install
+          dir=nix-${version}-${system}
+          fn=$out/$dir.tar.bz2
+          mkdir -p $out/nix-support
+          echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
+          tar cvfj $fn \
+            --owner=0 --group=0 --mode=u+rw,uga+r \
+            --absolute-names \
+            --hard-dereference \
+            --transform "s,$TMPDIR/install,$dir/install," \
+            --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
+            --transform "s,$NIX_STORE,$dir/store,S" \
+            $TMPDIR/install $TMPDIR/reginfo $storePaths
+        '');
+
+
+    coverage =
+      with import <nixpkgs> { system = "x86_64-linux"; };
+
+      releaseTools.coverageAnalysis {
+        name = "nix-build";
+        src = tarball;
+
+        buildInputs =
+          [ curl perl bzip2 openssl pkgconfig sqlite
+            # These are for "make check" only:
+            graphviz libxml2 libxslt
+          ];
+
+        configureFlags = ''
+          --disable-init-state
+          --with-dbi=${perlPackages.DBI}/${perl.libPrefix}
+          --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
+          --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
+        '';
+
+        dontInstall = false;
+
+        doInstallCheck = true;
+
+        lcovFilter = [ "*/boost/*" "*-tab.*" ];
+
+        # We call `dot', and even though we just use it to
+        # syntax-check generated dot files, it still requires some
+        # fonts.  So provide those.
+        FONTCONFIG_FILE = texFunctions.fontsConf;
+      };
+
+
+    rpm_fedora16i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora16i386) 50;
+    rpm_fedora16x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora16x86_64) 50;
+    rpm_fedora18i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora18i386) 60;
+    rpm_fedora18x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora18x86_64) 60;
+    rpm_fedora19i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora19i386) 70;
+    rpm_fedora19x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora19x86_64) 70;
+    rpm_fedora20i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora20i386) 70;
+    rpm_fedora20x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora20x86_64) 70;
+
+
+    #deb_debian60i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian60i386) 50;
+    #deb_debian60x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian60x86_64) 50;
+    deb_debian7i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian7i386) 60;
+    deb_debian7x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian7x86_64) 60;
+
+    deb_ubuntu1110i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1110i386) 60;
+    deb_ubuntu1110x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1110x86_64) 60;
+    deb_ubuntu1204i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1204i386) 60;
+    deb_ubuntu1204x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1204x86_64) 60;
+    deb_ubuntu1210i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1210i386) 70;
+    deb_ubuntu1210x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1210x86_64) 70;
+    deb_ubuntu1304i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1304i386) 80;
+    deb_ubuntu1304x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1304x86_64) 80;
+    deb_ubuntu1310i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1310i386) 90;
+    deb_ubuntu1310x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1310x86_64) 90;
+    deb_ubuntu1404i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1404i386) 90;
+    deb_ubuntu1404x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1404x86_64) 90;
+
+
+    # System tests.
+    tests.remote_builds = (import ./tests/remote-builds.nix rec {
+      nix = build.x86_64-linux; system = "x86_64-linux";
+    }).test;
+
+    tests.nix_copy_closure = (import ./tests/nix-copy-closure.nix rec {
+      nix = build.x86_64-linux; system = "x86_64-linux";
+    }).test;
+
+
+    # Aggregate job containing the release-critical jobs.
+    release = pkgs.releaseTools.aggregate {
+      name = "nix-${tarball.version}";
+      meta.description = "Release-critical builds";
+      constituents =
+        [ tarball
+          #build.i686-freebsd
+          build.i686-linux
+          build.x86_64-darwin
+          #build.x86_64-freebsd
+          build.x86_64-linux
+          #binaryTarball.i686-freebsd
+          binaryTarball.i686-linux
+          binaryTarball.x86_64-darwin
+          #binaryTarball.x86_64-freebsd
+          binaryTarball.x86_64-linux
+          deb_debian7i386
+          deb_debian7x86_64
+          deb_ubuntu1304i386
+          deb_ubuntu1304x86_64
+          deb_ubuntu1310i386
+          deb_ubuntu1310x86_64
+          deb_ubuntu1404i386
+          deb_ubuntu1404x86_64
+          rpm_fedora19i386
+          rpm_fedora19x86_64
+          rpm_fedora20i386
+          rpm_fedora20x86_64
+          tests.remote_builds
+          tests.nix_copy_closure
+        ];
+    };
+
+  };
+
+
+  makeRPM_i686 = makeRPM "i686-linux";
+  makeRPM_x86_64 = makeRPM "x86_64-linux";
+
+  makeRPM =
+    system: diskImageFun: prio:
+
+    with import <nixpkgs> { inherit system; };
+
+    releaseTools.rpmBuild rec {
+      name = "nix-rpm";
+      src = jobs.tarball;
+      diskImage = (diskImageFun vmTools.diskImageFuns)
+        { extraPackages = [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "perl-WWW-Curl" ]; };
+      memSize = 1024;
+      meta.schedulingPriority = prio;
+      postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck";
+    };
+
+
+  makeDeb_i686 = makeDeb "i686-linux";
+  makeDeb_x86_64 = makeDeb "x86_64-linux";
+
+  makeDeb =
+    system: diskImageFun: prio:
+
+    with import <nixpkgs> { inherit system; };
+
+    releaseTools.debBuild {
+      name = "nix-deb";
+      src = jobs.tarball;
+      diskImage = (diskImageFun vmTools.diskImageFuns)
+        { extraPackages = [ "libdbd-sqlite3-perl" "libsqlite3-dev" "libbz2-dev" "libwww-curl-perl" ]; };
+      memSize = 1024;
+      meta.schedulingPriority = prio;
+      configureFlags = "--sysconfdir=/etc";
+      debRequires = [ "curl" "libdbd-sqlite3-perl" "libsqlite3-0" "libbz2-1.0" "bzip2" "xz-utils" "libwww-curl-perl" ];
+      debMaintainer = "Eelco Dolstra <eelco.dolstra@logicblox.com>";
+      doInstallCheck = true;
+    };
+
+
+in jobs
diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in
new file mode 100755
index 000000000000..2eb339675904
--- /dev/null
+++ b/scripts/build-remote.pl.in
@@ -0,0 +1,275 @@
+#! @perl@ -w @perlFlags@
+
+use Fcntl qw(:DEFAULT :flock);
+use English '-no_match_vars';
+use IO::Handle;
+use Nix::Config;
+use Nix::SSH;
+use Nix::CopyClosure;
+use Nix::Store;
+no warnings('once');
+
+
+# General operation:
+#
+# Try to find a free machine of type $neededSystem.  We do this as
+# follows:
+# - We acquire an exclusive lock on $currentLoad/main-lock.
+# - For each machine $machine of type $neededSystem and for each $slot
+#   less than the maximum load for that machine, we try to get an
+#   exclusive lock on $currentLoad/$machine-$slot (without blocking).
+#   If we get such a lock, we send "accept" to the caller.  Otherwise,
+#   we send "postpone" and exit.
+# - We release the exclusive lock on $currentLoad/main-lock.
+# - We perform the build on $neededSystem.
+# - We release the exclusive lock on $currentLoad/$machine-$slot.
+#
+# The nice thing about this scheme is that if we die prematurely, the
+# locks are released automatically.
+
+
+# Make sure that we don't get any SSH passphrase or host key popups -
+# if there is any problem it should fail, not do something
+# interactive.
+$ENV{"DISPLAY"} = "";
+$ENV{"SSH_ASKPASS"} = "";
+
+
+sub sendReply {
+    my $reply = shift;
+    print STDERR "# $reply\n";
+}
+
+sub all { $_ || return 0 for @_; 1 }
+
+
+# Initialisation.
+my $loadIncreased = 0;
+
+my ($localSystem, $maxSilentTime, $printBuildTrace, $buildTimeout) = @ARGV;
+
+my $currentLoad = $ENV{"NIX_CURRENT_LOAD"} // "/run/nix/current-load";
+my $conf = $ENV{"NIX_REMOTE_SYSTEMS"} // "@sysconfdir@/nix/machines";
+
+
+sub openSlotLock {
+    my ($machine, $slot) = @_;
+    my $slotLockFn = "$currentLoad/" . (join '+', @{$machine->{systemTypes}}) . "-" . $machine->{hostName} . "-$slot";
+    my $slotLock = new IO::Handle;
+    sysopen $slotLock, "$slotLockFn", O_RDWR|O_CREAT, 0600 or die;
+    return $slotLock;
+}
+
+
+# Read the list of machines.
+my @machines;
+if (defined $conf && -e $conf) {
+    open CONF, "<$conf" or die;
+    while (<CONF>) {
+        chomp;
+        s/\#.*$//g;
+        next if /^\s*$/;
+        my @tokens = split /\s/, $_;
+        my @supportedFeatures = split(/,/, $tokens[5] || "");
+        my @mandatoryFeatures = split(/,/, $tokens[6] || "");
+        push @machines,
+            { hostName => $tokens[0]
+            , systemTypes => [ split(/,/, $tokens[1]) ]
+            , sshKeys => $tokens[2]
+            , maxJobs => int($tokens[3])
+            , speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1)
+            , supportedFeatures => [ @supportedFeatures, @mandatoryFeatures ]
+            , mandatoryFeatures => [ @mandatoryFeatures ]
+            , enabled => 1
+            };
+    }
+    close CONF;
+}
+
+
+
+# Wait for the calling process to ask us whether we can build some derivation.
+my ($drvPath, $hostName, $slotLock);
+my ($from, $to);
+
+REQ: while (1) {
+    $_ = <STDIN> || exit 0;
+    (my $amWilling, my $neededSystem, $drvPath, my $requiredFeatures) = split;
+    my @requiredFeatures = split /,/, $requiredFeatures;
+
+    my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem);
+
+    if (!defined $currentLoad) {
+        sendReply "decline";
+        next;
+    }
+
+    # Acquire the exclusive lock on $currentLoad/main-lock.
+    mkdir $currentLoad, 0777 or die unless -d $currentLoad;
+    my $mainLock = "$currentLoad/main-lock";
+    sysopen MAINLOCK, "$mainLock", O_RDWR|O_CREAT, 0600 or die;
+    flock(MAINLOCK, LOCK_EX) or die;
+
+
+    while (1) {
+        # Find all machine that can execute this build, i.e., that
+        # support builds for the given platform and features, and are
+        # not at their job limit.
+        my $rightType = 0;
+        my @available = ();
+        LOOP: foreach my $cur (@machines) {
+            if ($cur->{enabled}
+                && (grep { $neededSystem eq $_ } @{$cur->{systemTypes}})
+                && all(map { my $f = $_; 0 != grep { $f eq $_ } @{$cur->{supportedFeatures}} } (@requiredFeatures, @mandatoryFeatures))
+                && all(map { my $f = $_; 0 != grep { $f eq $_ } @requiredFeatures } @{$cur->{mandatoryFeatures}})
+                )
+            {
+                $rightType = 1;
+
+                # We have a machine of the right type.  Determine the load on
+                # the machine.
+                my $slot = 0;
+                my $load = 0;
+                my $free;
+                while ($slot < $cur->{maxJobs}) {
+                    my $slotLock = openSlotLock($cur, $slot);
+                    if (flock($slotLock, LOCK_EX | LOCK_NB)) {
+                        $free = $slot unless defined $free;
+                        flock($slotLock, LOCK_UN) or die;
+                    } else {
+                        $load++;
+                    }
+                    close $slotLock;
+                    $slot++;
+                }
+
+                push @available, { machine => $cur, load => $load, free => $free }
+                if $load < $cur->{maxJobs};
+            }
+        }
+
+        if (defined $ENV{NIX_DEBUG_HOOK}) {
+            print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n"
+                foreach @available;
+        }
+
+
+        # Didn't find any available machine?  Then decline or postpone.
+        if (scalar @available == 0) {
+            # Postpone if we have a machine of the right type, except
+            # if the local system can and wants to do the build.
+            if ($rightType && !$canBuildLocally) {
+                sendReply "postpone";
+            } else {
+                sendReply "decline";
+            }
+            close MAINLOCK;
+            next REQ;
+        }
+
+
+        # Prioritise the available machines as follows:
+        # - First by load divided by speed factor, rounded to the nearest
+        #   integer.  This causes fast machines to be preferred over slow
+        #   machines with similar loads.
+        # - Then by speed factor.
+        # - Finally by load.
+        sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); }
+        @available = sort
+            { lf($a) <=> lf($b)
+                  || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor}
+                  || $a->{load} <=> $b->{load}
+            } @available;
+
+
+        # Select the best available machine and lock a free slot.
+        my $selected = $available[0];
+        my $machine = $selected->{machine};
+
+        $slotLock = openSlotLock($machine, $selected->{free});
+        flock($slotLock, LOCK_EX | LOCK_NB) or die;
+        utime undef, undef, $slotLock;
+
+        close MAINLOCK;
+
+
+        # Connect to the selected machine.
+        my @sshOpts = ("-i", $machine->{sshKeys});
+        $hostName = $machine->{hostName};
+        eval {
+            ($from, $to) = connectToRemoteNix($hostName, \@sshOpts, "2>&4");
+            # FIXME: check if builds are inhibited.
+        };
+        last REQ unless $@;
+        print STDERR "$@";
+        warn "unable to open SSH connection to ‘$hostName’, trying other available machines...\n";
+        $from = undef;
+        $to = undef;
+        $machine->{enabled} = 0;
+    }
+}
+
+
+# Tell Nix we've accepted the build.
+sendReply "accept";
+my @inputs = split /\s/, readline(STDIN);
+my @outputs = split /\s/, readline(STDIN);
+
+
+print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace;
+
+
+my $maybeSign = "";
+$maybeSign = "--sign" if -e "$Nix::Config::confDir/signing-key.sec";
+
+
+# Copy the derivation and its dependencies to the build machine.  This
+# is guarded by an exclusive lock per machine to prevent multiple
+# build-remote instances from copying to a machine simultaneously.
+# That's undesirable because we may end up with N instances uploading
+# the same missing path simultaneously, causing the effective network
+# bandwidth and target disk speed to be divided by N.
+my $uploadLock = "$currentLoad/$hostName.upload-lock";
+sysopen UPLOADLOCK, "$uploadLock", O_RDWR|O_CREAT, 0600 or die;
+eval {
+    local $SIG{ALRM} = sub { die "alarm\n" };
+    # Don't wait forever, so that a process that gets stuck while
+    # holding the lock doesn't block everybody else indefinitely.
+    # It's safe to continue after a timeout, just (potentially)
+    # inefficient.
+    alarm 15 * 60;
+    flock(UPLOADLOCK, LOCK_EX);
+    alarm 0;
+};
+if ($@) {
+    die unless $@ eq "alarm\n";
+    print STDERR "somebody is hogging $uploadLock, continuing...\n";
+    unlink $uploadLock;
+}
+Nix::CopyClosure::copyToOpen($from, $to, $hostName, [ $drvPath, @inputs ], 0, 0, $maybeSign ne "");
+close UPLOADLOCK;
+
+
+# Perform the build.
+print STDERR "building ‘$drvPath’ on ‘$hostName’\n";
+writeInt(6, $to) or die; # == cmdBuildPaths
+writeStrings([$drvPath], $to);
+writeInt($maxSilentTime, $to);
+writeInt($buildTimeout, $to);
+my $res = readInt($from);
+if ($res != 0) {
+    my $msg = readString($from);
+    print STDERR "error: $msg on ‘$hostName’\n";
+    exit $res;
+}
+
+
+# Copy the output from the build machine.
+my @outputs2 = grep { !isValidPath($_) } @outputs;
+if (scalar @outputs2 > 0) {
+    writeInt(5, $to); # == cmdExportPaths
+    writeInt(0, $to); # don't sign
+    writeStrings(\@outputs2, $to);
+    $ENV{'NIX_HELD_LOCKS'} = "@outputs2"; # FIXME: ugly
+    importPaths(fileno($from));
+}
diff --git a/scripts/copy-from-other-stores.pl.in b/scripts/copy-from-other-stores.pl.in
new file mode 100755
index 000000000000..cf36bae9e803
--- /dev/null
+++ b/scripts/copy-from-other-stores.pl.in
@@ -0,0 +1,102 @@
+#! @perl@ -w @perlFlags@
+
+use utf8;
+use strict;
+use File::Basename;
+use IO::Handle;
+
+my $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
+
+
+STDOUT->autoflush(1);
+
+my @remoteStoresAll = split ':', ($ENV{"NIX_OTHER_STORES"} or "");
+
+my @remoteStores;
+foreach my $dir (@remoteStoresAll) {
+    push @remoteStores, glob($dir);
+}
+
+exit if scalar @remoteStores == 0;
+print "\n";
+
+
+$ENV{"NIX_REMOTE"} = "";
+
+
+sub findStorePath {
+    my $storePath = shift;
+    foreach my $store (@remoteStores) {
+        my $sourcePath = "$store/store/" . basename $storePath;
+        next unless -e $sourcePath || -l $sourcePath;
+        $ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
+        return ($store, $sourcePath) if
+            system("$binDir/nix-store --check-validity $storePath") == 0;
+    }
+    return undef;
+}
+
+
+if ($ARGV[0] eq "--query") {
+
+    while (<STDIN>) {
+        chomp;
+        my ($cmd, @args) = split " ", $_;
+
+        if ($cmd eq "have") {
+            foreach my $storePath (@args) {
+                print "$storePath\n" if defined findStorePath($storePath);
+            }
+            print "\n";
+        }
+
+        elsif ($cmd eq "info") {
+            foreach my $storePath (@args) {
+                my ($store, $sourcePath) = findStorePath($storePath);
+                next unless defined $store;
+
+                $ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
+
+                my $deriver = `$binDir/nix-store --query --deriver $storePath`;
+                die "cannot query deriver of ‘$storePath’" if $? != 0;
+                chomp $deriver;
+                $deriver = "" if $deriver eq "unknown-deriver";
+
+                my @references = split "\n",
+                    `$binDir/nix-store --query --references $storePath`;
+                die "cannot query references of ‘$storePath’" if $? != 0;
+
+                my $narSize = `$binDir/nix-store --query --size $storePath`;
+                die "cannot query size of ‘$storePath’" if $? != 0;
+                chomp $narSize;
+
+                print "$storePath\n";
+                print "$deriver\n";
+                print scalar @references, "\n";
+                print "$_\n" foreach @references;
+                print "0\n";
+                print "$narSize\n";
+            }
+
+            print "\n";
+        }
+
+        else { die "unknown command ‘$cmd’"; }
+    }
+}
+
+
+elsif ($ARGV[0] eq "--substitute") {
+    die unless scalar @ARGV == 3;
+    my $storePath = $ARGV[1];
+    my $destPath = $ARGV[2];
+    my ($store, $sourcePath) = findStorePath $storePath;
+    die unless $store;
+    print STDERR "\n*** Copying ‘$storePath’ from ‘$sourcePath’\n\n";
+    system("$binDir/nix-store --dump $sourcePath | $binDir/nix-store --restore $destPath") == 0
+        or die "cannot copy ‘$sourcePath’ to ‘$storePath’";
+    print "\n"; # no hash to verify
+}
+
+
+else { die; }
diff --git a/scripts/download-from-binary-cache.pl.in b/scripts/download-from-binary-cache.pl.in
new file mode 100644
index 000000000000..b7eb72a30187
--- /dev/null
+++ b/scripts/download-from-binary-cache.pl.in
@@ -0,0 +1,609 @@
+#! @perl@ -w @perlFlags@
+
+use DBI;
+use DBD::SQLite;
+use File::Basename;
+use IO::Select;
+use Nix::Config;
+use Nix::Store;
+use Nix::Utils;
+use Nix::Manifest;
+use WWW::Curl::Easy;
+use WWW::Curl::Multi;
+use strict;
+
+
+Nix::Config::readConfig;
+
+my @caches;
+my $gotCaches = 0;
+
+my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150);
+$maxParallelRequests = 1 if $maxParallelRequests < 1;
+
+my $ttlNegative = 24 * 3600; # when to purge negative lookups from the database
+my $ttlNegativeUse = 3600; # how long negative lookups are valid for non-"have" lookups
+my $didExpiration = 0;
+
+my $showAfter = 5; # show that we're waiting for a request after this many seconds
+
+my $debug = ($Nix::Config::config{"debug-subst"} // "") eq 1 || ($Nix::Config::config{"untrusted-debug-subst"} // "") eq 1;
+
+my $cacheFileURLs = ($ENV{"_NIX_CACHE_FILE_URLS"} // "") eq 1; # for testing
+
+my ($dbh, $queryCache, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence, $expireNARExistence);
+
+my $curlm = WWW::Curl::Multi->new;
+my $activeRequests = 0;
+my $curlIdCount = 1;
+my %requests;
+my %scheduled;
+my $caBundle = $ENV{"SSL_CERT_FILE"} // $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"};
+
+my $userName = getpwuid($<) || $ENV{"USER"} or die "cannot figure out user name";
+
+my $requireSignedBinaryCaches = ($Nix::Config::config{"signed-binary-caches"} // "0") ne "0";
+
+my $curlConnectTimeout = int(
+    $Nix::Config::config{"untrusted-connect-timeout"} //
+    $Nix::Config::config{"connect-timeout"} //
+    $ENV{"NIX_CONNECT_TIMEOUT"} // 0);
+
+
+sub addRequest {
+    my ($storePath, $url, $head) = @_;
+
+    my $curl = WWW::Curl::Easy->new;
+    my $curlId = $curlIdCount++;
+    $requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "", type => $head ? "HEAD" : "GET"
+                         , shown => 0, started => time() };
+
+    $curl->setopt(CURLOPT_PRIVATE, $curlId);
+    $curl->setopt(CURLOPT_URL, $url);
+    open (my $fh, ">", \$requests{$curlId}->{content});
+    $curl->setopt(CURLOPT_WRITEDATA, $fh);
+    $curl->setopt(CURLOPT_FOLLOWLOCATION, 1);
+    $curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle;
+    $curl->setopt(CURLOPT_USERAGENT, "Nix/$Nix::Config::version");
+    $curl->setopt(CURLOPT_NOBODY, 1) if $head;
+    $curl->setopt(CURLOPT_FAILONERROR, 1);
+    $curl->setopt(CURLOPT_CONNECTTIMEOUT, $curlConnectTimeout);
+
+    if ($activeRequests >= $maxParallelRequests) {
+        $scheduled{$curlId} = 1;
+    } else {
+        $curlm->add_handle($curl);
+        $activeRequests++;
+    }
+
+    return $requests{$curlId};
+}
+
+
+sub processRequests {
+    while ($activeRequests) {
+        my ($rfds, $wfds, $efds) = $curlm->fdset();
+        #print STDERR "R = @{$rfds}, W = @{$wfds}, E = @{$efds}\n";
+
+        # Sleep until we can read or write some data.
+        if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) {
+            IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 1.0);
+        }
+
+        if ($curlm->perform() != $activeRequests) {
+            while (my ($id, $result) = $curlm->info_read) {
+                if ($id) {
+                    my $request = $requests{$id} or die;
+                    my $handle = $request->{handle};
+                    $request->{result} = $result;
+                    $request->{httpStatus} = $handle->getinfo(CURLINFO_RESPONSE_CODE);
+
+                    print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug;
+
+                    $activeRequests--;
+                    delete $request->{handle};
+
+                    if (scalar(keys %scheduled) > 0) {
+                        my $id2 = (keys %scheduled)[0];
+                        $curlm->add_handle($requests{$id2}->{handle});
+                        $activeRequests++;
+                        delete $scheduled{$id2};
+                    }
+                }
+            }
+        }
+
+        my $time = time();
+        while (my ($key, $request) = each %requests) {
+            next unless defined $request->{handle};
+            next if $request->{shown};
+            if ($time > $request->{started} + $showAfter) {
+                print STDERR "still waiting for ‘$request->{url}’ after $showAfter seconds...\n";
+                $request->{shown} = 1;
+            }
+        }
+    }
+}
+
+
+sub initCache {
+    my $dbPath = "$Nix::Config::stateDir/binary-cache-v3.sqlite";
+
+    unlink "$Nix::Config::stateDir/binary-cache-v1.sqlite";
+    unlink "$Nix::Config::stateDir/binary-cache-v2.sqlite";
+
+    # Open/create the database.
+    $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
+        or die "cannot open database ‘$dbPath’";
+    $dbh->{RaiseError} = 1;
+    $dbh->{PrintError} = 0;
+
+    $dbh->sqlite_busy_timeout(60 * 60 * 1000);
+
+    $dbh->do("pragma synchronous = off"); # we can always reproduce the cache
+    $dbh->do("pragma journal_mode = truncate");
+
+    # Initialise the database schema, if necessary.
+    $dbh->do(<<EOF);
+        create table if not exists BinaryCaches (
+            id        integer primary key autoincrement not null,
+            url       text unique not null,
+            timestamp integer not null,
+            storeDir  text not null,
+            wantMassQuery integer not null,
+            priority  integer not null
+        );
+EOF
+
+    $dbh->do(<<EOF);
+        create table if not exists NARs (
+            cache            integer not null,
+            storePath        text not null,
+            url              text not null,
+            compression      text not null,
+            fileHash         text,
+            fileSize         integer,
+            narHash          text,
+            narSize          integer,
+            refs             text,
+            deriver          text,
+            signedBy         text,
+            timestamp        integer not null,
+            primary key (cache, storePath),
+            foreign key (cache) references BinaryCaches(id) on delete cascade
+        );
+EOF
+
+    $dbh->do(<<EOF);
+        create table if not exists NARExistence (
+            cache            integer not null,
+            storePath        text not null,
+            exist            integer not null,
+            timestamp        integer not null,
+            primary key (cache, storePath),
+            foreign key (cache) references BinaryCaches(id) on delete cascade
+        );
+EOF
+
+    $dbh->do("create index if not exists NARExistenceByExistTimestamp on NARExistence (exist, timestamp)");
+
+    $queryCache = $dbh->prepare("select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?") or die;
+
+    $insertNAR = $dbh->prepare(
+        "insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " .
+        "narSize, refs, deriver, signedBy, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
+
+    $queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die;
+
+    $insertNARExistence = $dbh->prepare(
+        "insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)") or die;
+
+    $queryNARExistence = $dbh->prepare("select exist, timestamp from NARExistence where cache = ? and storePath = ?") or die;
+
+    $expireNARExistence = $dbh->prepare("delete from NARExistence where exist = ? and timestamp < ?") or die;
+}
+
+
+sub getAvailableCaches {
+    return if $gotCaches;
+    $gotCaches = 1;
+
+    sub strToList {
+        my ($s) = @_;
+        return map { s/\/+$//; $_ } split(/ /, $s);
+    }
+
+    my @urls = strToList($Nix::Config::config{"binary-caches"} //
+        ($Nix::Config::storeDir eq "/nix/store" ? "http://cache.nixos.org" : ""));
+
+    my $urlsFiles = $Nix::Config::config{"binary-cache-files"}
+        // "$Nix::Config::stateDir/profiles/per-user/$userName/channels/binary-caches/*";
+    foreach my $urlFile (glob $urlsFiles) {
+        next unless -f $urlFile;
+        open FILE, "<$urlFile" or die "cannot open ‘$urlFile’\n";
+        my $url = <FILE>; chomp $url;
+        close FILE;
+        push @urls, strToList($url);
+    }
+
+    push @urls, strToList($Nix::Config::config{"extra-binary-caches"} // "");
+
+    # Allow Nix daemon users to override the binary caches to a subset
+    # of those listed in the config file.  Note that ‘untrusted-*’
+    # denotes options passed by the client.
+    my @trustedUrls = uniq(@urls, strToList($Nix::Config::config{"trusted-binary-caches"} // ""));
+
+    if (defined $Nix::Config::config{"untrusted-binary-caches"}) {
+        my @untrustedUrls = strToList $Nix::Config::config{"untrusted-binary-caches"};
+        @urls = ();
+        foreach my $url (@untrustedUrls) {
+            die "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n"
+                unless scalar(grep { $url eq $_ } @trustedUrls) > 0;
+            push @urls, $url;
+        }
+    }
+
+    my @untrustedUrls = strToList $Nix::Config::config{"untrusted-extra-binary-caches"} // "";
+    foreach my $url (@untrustedUrls) {
+        unless (scalar(grep { $url eq $_ } @trustedUrls) > 0) {
+            warn "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n";
+            next;
+        }
+        push @urls, $url;
+    }
+
+    foreach my $url (uniq @urls) {
+
+        # FIXME: not atomic.
+        $queryCache->execute($url);
+        my $res = $queryCache->fetchrow_hashref();
+        if (defined $res) {
+            next if $res->{storeDir} ne $Nix::Config::storeDir;
+            push @caches, { id => $res->{id}, url => $url, wantMassQuery => $res->{wantMassQuery}, priority => $res->{priority} };
+            next;
+        }
+
+        # Get the cache info file.
+        my $request = addRequest(undef, $url . "/nix-cache-info");
+        processRequests;
+
+        if ($request->{result} != 0) {
+            print STDERR "could not download ‘$request->{url}’ (" .
+                ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
+            next;
+        }
+
+        my $storeDir = "/nix/store";
+        my $wantMassQuery = 0;
+        my $priority = 50;
+        foreach my $line (split "\n", $request->{content}) {
+            unless ($line =~ /^(.*): (.*)$/) {
+                print STDERR "bad cache info file ‘$request->{url}’\n";
+                return undef;
+            }
+            if ($1 eq "StoreDir") { $storeDir = $2; }
+            elsif ($1 eq "WantMassQuery") { $wantMassQuery = int($2); }
+            elsif ($1 eq "Priority") { $priority = int($2); }
+        }
+
+        $dbh->do("insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)",
+                 {}, $url, time(), $storeDir, $wantMassQuery, $priority);
+        $queryCache->execute($url);
+        $res = $queryCache->fetchrow_hashref() or die;
+        next if $storeDir ne $Nix::Config::storeDir;
+        push @caches, { id => $res->{id}, url => $url, wantMassQuery => $wantMassQuery, priority => $priority };
+    }
+
+    @caches = sort { $a->{priority} <=> $b->{priority} } @caches;
+
+    expireNegative();
+}
+
+
+sub shouldCache {
+    my ($url) = @_;
+    return $cacheFileURLs || $url !~ /^file:/;
+}
+
+
+sub processNARInfo {
+    my ($storePath, $cache, $request) = @_;
+
+    if ($request->{result} != 0) {
+        if ($request->{result} != 37 && $request->{httpStatus} != 404 && $request->{httpStatus} != 403) {
+            print STDERR "could not download ‘$request->{url}’ (" .
+                ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
+        } else {
+            $insertNARExistence->execute($cache->{id}, basename($storePath), 0, time())
+                if shouldCache $request->{url};
+        }
+        return undef;
+    }
+
+    my $narInfo = parseNARInfo($storePath, $request->{content}, $requireSignedBinaryCaches, $request->{url});
+    return undef unless defined $narInfo;
+
+    die if $requireSignedBinaryCaches && !defined $narInfo->{signedBy};
+
+    # Cache the result.
+    $insertNAR->execute(
+        $cache->{id}, basename($storePath), $narInfo->{url}, $narInfo->{compression},
+        $narInfo->{fileHash}, $narInfo->{fileSize}, $narInfo->{narHash}, $narInfo->{narSize},
+        join(" ", @{$narInfo->{refs}}), $narInfo->{deriver}, $narInfo->{signedBy}, time())
+        if shouldCache $request->{url};
+
+    return $narInfo;
+}
+
+
+sub getCachedInfoFrom {
+    my ($storePath, $cache) = @_;
+
+    $queryNAR->execute($cache->{id}, basename($storePath));
+    my $res = $queryNAR->fetchrow_hashref();
+    return undef unless defined $res;
+
+    # We may previously have cached this info when signature checking
+    # was disabled.  In that case, ignore the cached info.
+    return undef if $requireSignedBinaryCaches && !defined $res->{signedBy};
+
+    return
+        { url => $res->{url}
+        , compression => $res->{compression}
+        , fileHash => $res->{fileHash}
+        , fileSize => $res->{fileSize}
+        , narHash => $res->{narHash}
+        , narSize => $res->{narSize}
+        , refs => [ split " ", $res->{refs} ]
+        , deriver => $res->{deriver}
+        , signedBy => $res->{signedBy}
+        } if defined $res;
+}
+
+
+sub negativeHit {
+    my ($storePath, $cache) = @_;
+    $queryNARExistence->execute($cache->{id}, basename($storePath));
+    my $res = $queryNARExistence->fetchrow_hashref();
+    return defined $res && $res->{exist} == 0 && time() - $res->{timestamp} < $ttlNegativeUse;
+}
+
+
+sub positiveHit {
+    my ($storePath, $cache) = @_;
+    return 1 if defined getCachedInfoFrom($storePath, $cache);
+    $queryNARExistence->execute($cache->{id}, basename($storePath));
+    my $res = $queryNARExistence->fetchrow_hashref();
+    return defined $res && $res->{exist} == 1;
+}
+
+
+sub expireNegative {
+    return if $didExpiration;
+    $didExpiration = 1;
+    my $time = time();
+    # Round up to the next multiple of the TTL to ensure that we do
+    # expiration only once per time interval.  E.g. if $ttlNegative ==
+    # 3600, we expire entries at most once per hour.  This is
+    # presumably faster than expiring a few entries per request (and
+    # thus doing a transaction).
+    my $limit = (int($time / $ttlNegative) - 1) * $ttlNegative;
+    $expireNARExistence->execute($limit, 0);
+    print STDERR "expired ", $expireNARExistence->rows, " negative entries\n" if $debug;
+}
+
+
+sub printInfo {
+    my ($storePath, $info) = @_;
+    print "$storePath\n";
+    print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n";
+    print scalar @{$info->{refs}}, "\n";
+    print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}};
+    print $info->{fileSize} || 0, "\n";
+    print $info->{narSize} || 0, "\n";
+}
+
+
+sub infoUrl {
+    my ($binaryCacheUrl, $storePath) = @_;
+    my $pathHash = substr(basename($storePath), 0, 32);
+    my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo";
+}
+
+
+sub printInfoParallel {
+    my @paths = @_;
+
+    # First print all paths for which we have cached info.
+    my @left;
+    foreach my $storePath (@paths) {
+        my $found = 0;
+        foreach my $cache (@caches) {
+            my $info = getCachedInfoFrom($storePath, $cache);
+            if (defined $info) {
+                printInfo($storePath, $info);
+                $found = 1;
+                last;
+            }
+        }
+        push @left, $storePath if !$found;
+    }
+
+    return if scalar @left == 0;
+
+    foreach my $cache (@caches) {
+
+        my @left2;
+        %requests = ();
+        foreach my $storePath (@left) {
+            if (negativeHit($storePath, $cache)) {
+                push @left2, $storePath;
+                next;
+            }
+            addRequest($storePath, infoUrl($cache->{url}, $storePath));
+        }
+
+        processRequests;
+
+        foreach my $request (values %requests) {
+            my $info = processNARInfo($request->{storePath}, $cache, $request);
+            if (defined $info) {
+                printInfo($request->{storePath}, $info);
+            } else {
+                push @left2, $request->{storePath};
+            }
+        }
+
+        @left = @left2;
+    }
+}
+
+
+sub printSubstitutablePaths {
+    my @paths = @_;
+
+    # First look for paths that have cached info.
+    my @left;
+    foreach my $storePath (@paths) {
+        my $found = 0;
+        foreach my $cache (@caches) {
+            next unless $cache->{wantMassQuery};
+            if (positiveHit($storePath, $cache)) {
+                print "$storePath\n";
+                $found = 1;
+                last;
+            }
+        }
+        push @left, $storePath if !$found;
+    }
+
+    return if scalar @left == 0;
+
+    # For remaining paths, do HEAD requests.
+    foreach my $cache (@caches) {
+        next unless $cache->{wantMassQuery};
+        my @left2;
+        %requests = ();
+        foreach my $storePath (@left) {
+            if (negativeHit($storePath, $cache)) {
+                push @left2, $storePath;
+                next;
+            }
+            addRequest($storePath, infoUrl($cache->{url}, $storePath), 1);
+        }
+
+        processRequests;
+
+        foreach my $request (values %requests) {
+            if ($request->{result} != 0) {
+                if ($request->{result} != 37 && $request->{httpStatus} != 404 && $request->{httpStatus} != 403) {
+                    print STDERR "could not check ‘$request->{url}’ (" .
+                        ($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
+                } else {
+                    $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 0, time())
+                        if shouldCache $request->{url};
+                }
+                push @left2, $request->{storePath};
+            } else {
+                $insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 1, time())
+                    if shouldCache $request->{url};
+                print "$request->{storePath}\n";
+            }
+        }
+
+        @left = @left2;
+    }
+}
+
+
+sub downloadBinary {
+    my ($storePath, $destPath) = @_;
+
+    foreach my $cache (@caches) {
+        my $info = getCachedInfoFrom($storePath, $cache);
+
+        unless (defined $info) {
+            next if negativeHit($storePath, $cache);
+            my $request = addRequest($storePath, infoUrl($cache->{url}, $storePath));
+            processRequests;
+            $info = processNARInfo($storePath, $cache, $request);
+        }
+
+        next unless defined $info;
+
+        my $decompressor;
+        if ($info->{compression} eq "bzip2") { $decompressor = "| $Nix::Config::bzip2 -d"; }
+        elsif ($info->{compression} eq "xz") { $decompressor = "| $Nix::Config::xz -d"; }
+        elsif ($info->{compression} eq "none") { $decompressor = ""; }
+        else {
+            print STDERR "unknown compression method ‘$info->{compression}’\n";
+            next;
+        }
+        my $url = "$cache->{url}/$info->{url}"; # FIXME: handle non-relative URLs
+        die if $requireSignedBinaryCaches && !defined $info->{signedBy};
+        print STDERR "\n*** Downloading ‘$url’ ", ($requireSignedBinaryCaches ? "(signed by ‘$info->{signedBy}’) " : ""), "to ‘$storePath’...\n";
+        checkURL $url;
+        if (system("$Nix::Config::curl --fail --location --insecure --connect-timeout $curlConnectTimeout '$url' $decompressor | $Nix::Config::binDir/nix-store --restore $destPath") != 0) {
+            warn "download of ‘$url’ failed" . ($! ? ": $!" : "") . "\n";
+            next;
+        }
+
+        # Tell Nix about the expected hash so it can verify it.
+        die unless defined $info->{narHash} && $info->{narHash} ne "";
+        print "$info->{narHash}\n";
+
+        print STDERR "\n";
+        return;
+    }
+
+    print STDERR "could not download ‘$storePath’ from any binary cache\n";
+    exit 1;
+}
+
+
+# Bail out right away if binary caches are disabled.
+exit 0 if
+    ($Nix::Config::config{"use-binary-caches"} // "true") eq "false" ||
+    ($Nix::Config::config{"untrusted-use-binary-caches"} // "true") eq "false";
+print "\n";
+flush STDOUT;
+
+initCache();
+
+
+if ($ARGV[0] eq "--query") {
+
+    while (<STDIN>) {
+        getAvailableCaches;
+        chomp;
+        my ($cmd, @args) = split " ", $_;
+
+        if ($cmd eq "have") {
+            print STDERR "checking binary caches for existence of @args\n" if $debug;
+            printSubstitutablePaths(@args);
+            print "\n";
+        }
+
+        elsif ($cmd eq "info") {
+            print STDERR "checking binary caches for info on @args\n" if $debug;
+            printInfoParallel(@args);
+            print "\n";
+        }
+
+        else { die "unknown command ‘$cmd’"; }
+
+        flush STDOUT;
+    }
+
+}
+
+elsif ($ARGV[0] eq "--substitute") {
+    my $storePath = $ARGV[1] or die;
+    my $destPath = $ARGV[2] or die;
+    getAvailableCaches;
+    downloadBinary($storePath, $destPath);
+}
+
+else {
+    die;
+}
diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in
new file mode 100755
index 000000000000..e849a930e71e
--- /dev/null
+++ b/scripts/download-using-manifests.pl.in
@@ -0,0 +1,376 @@
+#! @perl@ -w @perlFlags@
+
+use utf8;
+use strict;
+use Nix::Config;
+use Nix::Manifest;
+use Nix::Store;
+use Nix::Utils;
+use POSIX qw(strftime);
+
+STDOUT->autoflush(1);
+
+my $logFile = "$Nix::Config::logDir/downloads";
+
+# For queries, skip expensive calls to nix-hash etc.  We're just
+# estimating the expected download size.
+my $fast = 1;
+
+# ‘--insecure’ is fine because Nix verifies the hash of the result.
+my $curl = "$Nix::Config::curl --fail --location --insecure";
+
+
+# Open the manifest cache and update it if necessary.
+my $dbh = updateManifestDB();
+exit 0 unless defined $dbh; # exit if there are no manifests
+print "\n";
+
+
+# $hashCache->{$algo}->{$path} yields the $algo-hash of $path.
+my $hashCache;
+
+
+sub parseHash {
+    my $hash = shift;
+    if ($hash =~ /^(.+):(.+)$/) {
+        return ($1, $2);
+    } else {
+        return ("md5", $hash);
+    }
+}
+
+
+# Compute the most efficient sequence of downloads to produce the
+# given path.
+sub computeSmallestDownload {
+    my $targetPath = shift;
+
+    # Build a graph of all store paths that might contribute to the
+    # construction of $targetPath, and the special node "start".  The
+    # edges are either patch operations, or downloads of full NAR
+    # files.  The latter edges only occur between "start" and a store
+    # path.
+    my %graph;
+
+    $graph{"start"} = {d => 0, pred => undef, edges => []};
+
+    my @queue = ();
+    my $queueFront = 0;
+    my %done;
+
+    sub addNode {
+        my $graph = shift;
+        my $u = shift;
+        $$graph{$u} = {d => 999999999999, pred => undef, edges => []}
+            unless defined $$graph{$u};
+    }
+
+    sub addEdge {
+        my $graph = shift;
+        my $u = shift;
+        my $v = shift;
+        my $w = shift;
+        my $type = shift;
+        my $info = shift;
+        addNode $graph, $u;
+        push @{$$graph{$u}->{edges}},
+            {weight => $w, start => $u, end => $v, type => $type, info => $info};
+        my $n = scalar @{$$graph{$u}->{edges}};
+    }
+
+    push @queue, $targetPath;
+
+    while ($queueFront < scalar @queue) {
+        my $u = $queue[$queueFront++];
+        next if defined $done{$u};
+        $done{$u} = 1;
+
+        addNode \%graph, $u;
+
+        # If the path already exists, it has distance 0 from the
+        # "start" node.
+        if (isValidPath($u)) {
+            addEdge \%graph, "start", $u, 0, "present", undef;
+        }
+
+        else {
+
+            # Add patch edges.
+            my $patchList = $dbh->selectall_arrayref(
+                "select * from Patches where storePath = ?",
+                { Slice => {} }, $u);
+
+            foreach my $patch (@{$patchList}) {
+                if (isValidPath($patch->{basePath})) {
+                    my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
+
+                    my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}};
+                    if (!defined $hash) {
+                        $hash = $fast && $baseHashAlgo eq "sha256"
+                            ? queryPathHash($patch->{basePath})
+                            : hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath});
+                        $hash =~ s/.*://;
+                        $hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash;
+                    }
+
+                    next if $hash ne $baseHash;
+                }
+                push @queue, $patch->{basePath};
+                addEdge \%graph, $patch->{basePath}, $u, $patch->{size}, "patch", $patch;
+            }
+
+            # Add NAR file edges to the start node.
+            my $narFileList = $dbh->selectall_arrayref(
+                "select * from NARs where storePath = ?",
+                { Slice => {} }, $u);
+
+            foreach my $narFile (@{$narFileList}) {
+                # !!! how to handle files whose size is not known in advance?
+                # For now, assume some arbitrary size (1 GB).
+                # This has the side-effect of preferring non-Hydra downloads.
+                addEdge \%graph, "start", $u, ($narFile->{size} || 1000000000), "narfile", $narFile;
+            }
+        }
+    }
+
+
+    # Run Dijkstra's shortest path algorithm to determine the shortest
+    # sequence of download and/or patch actions that will produce
+    # $targetPath.
+
+    my @todo = keys %graph;
+
+    while (scalar @todo > 0) {
+
+        # Remove the closest element from the todo list.
+        # !!! inefficient, use a priority queue
+        @todo = sort { -($graph{$a}->{d} <=> $graph{$b}->{d}) } @todo;
+        my $u = pop @todo;
+
+        my $u_ = $graph{$u};
+
+        foreach my $edge (@{$u_->{edges}}) {
+            my $v_ = $graph{$edge->{end}};
+            if ($v_->{d} > $u_->{d} + $edge->{weight}) {
+                $v_->{d} = $u_->{d} + $edge->{weight};
+                # Store the edge; to edge->start is actually the
+                # predecessor.
+                $v_->{pred} = $edge;
+            }
+        }
+    }
+
+
+    # Retrieve the shortest path from "start" to $targetPath.
+    my @path = ();
+    my $cur = $targetPath;
+    return () unless defined $graph{$targetPath}->{pred};
+    while ($cur ne "start") {
+        push @path, $graph{$cur}->{pred};
+        $cur = $graph{$cur}->{pred}->{start};
+    }
+
+    return @path;
+}
+
+
+# Parse the arguments.
+
+if ($ARGV[0] eq "--query") {
+
+    while (<STDIN>) {
+        chomp;
+        my ($cmd, @args) = split " ", $_;
+
+        if ($cmd eq "have") {
+            foreach my $storePath (@args) {
+                print "$storePath\n" if scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0;
+            }
+            print "\n";
+        }
+
+        elsif ($cmd eq "info") {
+            foreach my $storePath (@args) {
+
+                my $infos = $dbh->selectall_arrayref(
+                    "select * from NARs where storePath = ?",
+                    { Slice => {} }, $storePath);
+
+                next unless scalar @{$infos} > 0;
+                my $info = @{$infos}[0];
+
+                print "$storePath\n";
+                print "$info->{deriver}\n";
+                my @references = split " ", $info->{refs};
+                print scalar @references, "\n";
+                print "$_\n" foreach @references;
+
+                my @path = computeSmallestDownload $storePath;
+
+                my $downloadSize = 0;
+                while (scalar @path > 0) {
+                    my $edge = pop @path;
+                    my $u = $edge->{start};
+                    my $v = $edge->{end};
+                    if ($edge->{type} eq "patch") {
+                        $downloadSize += $edge->{info}->{size} || 0;
+                    }
+                    elsif ($edge->{type} eq "narfile") {
+                        $downloadSize += $edge->{info}->{size} || 0;
+                    }
+                }
+
+                print "$downloadSize\n";
+
+                my $narSize = $info->{narSize} || 0;
+                print "$narSize\n";
+            }
+
+            print "\n";
+        }
+
+        else { die "unknown command ‘$cmd’"; }
+    }
+
+    exit 0;
+}
+
+elsif ($ARGV[0] ne "--substitute") {
+    die;
+}
+
+
+die unless scalar @ARGV == 3;
+my $targetPath = $ARGV[1];
+my $destPath = $ARGV[2];
+$fast = 0;
+
+
+# Create a temporary directory.
+my $tmpDir = mkTempDir("nix-download");
+
+my $tmpNar = "$tmpDir/nar";
+my $tmpNar2 = "$tmpDir/nar2";
+
+
+open LOGFILE, ">>$logFile" or die "cannot open log file $logFile";
+
+my $date = strftime ("%F %H:%M:%S UTC", gmtime (time));
+print LOGFILE "$$ get $targetPath $date\n";
+
+print STDERR "\n*** Trying to download/patch ‘$targetPath’\n";
+
+
+# Compute the shortest path.
+my @path = computeSmallestDownload $targetPath;
+die "don't know how to produce $targetPath\n" if scalar @path == 0;
+
+
+# We don't need the manifest anymore, so close it as an optimisation:
+# if we still have SQLite locks blocking other processes (we
+# shouldn't), this gets rid of them.
+$dbh->disconnect;
+
+
+# Traverse the shortest path, perform the actions described by the
+# edges.
+my $curStep = 1;
+my $maxStep = scalar @path;
+
+my $finalNarHash;
+
+while (scalar @path > 0) {
+    my $edge = pop @path;
+    my $u = $edge->{start};
+    my $v = $edge->{end};
+
+    print STDERR "\n*** Step $curStep/$maxStep: ";
+
+    if ($edge->{type} eq "present") {
+        print STDERR "using already present path ‘$v’\n";
+        print LOGFILE "$$ present $v\n";
+
+        if ($curStep < $maxStep) {
+            # Since this is not the last step, the path will be used
+            # as a base to one or more patches.  So turn the base path
+            # into a NAR archive, to which we can apply the patch.
+            print STDERR "  packing base path...\n";
+            system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0
+                or die "cannot dump ‘$v’";
+        }
+    }
+
+    elsif ($edge->{type} eq "patch") {
+        my $patch = $edge->{info};
+        print STDERR "applying patch ‘$patch->{url}’ to ‘$u’ to create ‘$v’\n";
+
+        print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n";
+
+        # Download the patch.
+        print STDERR "  downloading patch...\n";
+        my $patchPath = "$tmpDir/patch";
+        checkURL $patch->{url};
+        system("$curl '$patch->{url}' -o $patchPath") == 0
+            or die "cannot download patch ‘$patch->{url}’\n";
+
+        # Apply the patch to the NAR archive produced in step 1 (for
+        # the already present path) or a later step (for patch sequences).
+        print STDERR "  applying patch...\n";
+        system("$Nix::Config::libexecDir/nix/bspatch $tmpNar $tmpNar2 $patchPath") == 0
+            or die "cannot apply patch ‘$patchPath’ to $tmpNar\n";
+
+        if ($curStep < $maxStep) {
+            # The archive will be used as the base of the next patch.
+            rename "$tmpNar2", "$tmpNar" or die "cannot rename NAR archive: $!";
+        } else {
+            # This was the last patch.  Unpack the final NAR archive
+            # into the target path.
+            print STDERR "  unpacking patched archive...\n";
+            system("$Nix::Config::binDir/nix-store --restore $destPath < $tmpNar2") == 0
+                or die "cannot unpack $tmpNar2 to ‘$v’\n";
+        }
+
+        $finalNarHash = $patch->{narHash};
+    }
+
+    elsif ($edge->{type} eq "narfile") {
+        my $narFile = $edge->{info};
+        print STDERR "downloading ‘$narFile->{url}’ to ‘$v’\n";
+
+        my $size = $narFile->{size} || -1;
+        print LOGFILE "$$ narfile $narFile->{url} $size $v\n";
+
+        checkURL $narFile->{url};
+
+        my $decompressor =
+            $narFile->{compressionType} eq "bzip2" ? "| $Nix::Config::bzip2 -d" :
+            $narFile->{compressionType} eq "xz" ? "| $Nix::Config::xz -d" :
+            $narFile->{compressionType} eq "none" ? "" :
+            die "unknown compression type ‘$narFile->{compressionType}’";
+
+        if ($curStep < $maxStep) {
+            # The archive will be used a base to a patch.
+            system("$curl '$narFile->{url}' $decompressor > $tmpNar") == 0
+                or die "cannot download and unpack ‘$narFile->{url}’ to ‘$v’\n";
+        } else {
+            # Unpack the archive to the target path.
+            system("$curl '$narFile->{url}' $decompressor | $Nix::Config::binDir/nix-store --restore '$destPath'") == 0
+                or die "cannot download and unpack ‘$narFile->{url}’ to ‘$v’\n";
+        }
+
+        $finalNarHash = $narFile->{narHash};
+    }
+
+    $curStep++;
+}
+
+
+# Tell Nix about the expected hash so it can verify it.
+die "cannot check integrity of the downloaded path since its hash is not known\n"
+    unless defined $finalNarHash;
+print "$finalNarHash\n";
+
+
+print STDERR "\n";
+print LOGFILE "$$ success\n";
+close LOGFILE;
diff --git a/scripts/find-runtime-roots.pl.in b/scripts/find-runtime-roots.pl.in
new file mode 100755
index 000000000000..e1a2dde556b6
--- /dev/null
+++ b/scripts/find-runtime-roots.pl.in
@@ -0,0 +1,79 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use Nix::Utils;
+use Nix::Config;
+
+
+sub readProc {
+    return unless -d "/proc";
+
+    opendir DIR, "/proc" or return;
+
+    foreach my $name (readdir DIR) {
+        next unless $name =~ /^\d+$/;
+
+        my $process = "/proc/$name";
+
+        #print STDERR "=== $process\n";
+
+        my $target;
+        print "$target\n" if $target = readlink "$process/exe";
+        print "$target\n" if $target = readlink "$process/cwd";
+
+        if (opendir FDS, "$process/fd") {
+            foreach my $name (readdir FDS) {
+                $target = readlink "$process/fd/$name";
+                print "$target\n" if $target && substr($target, 0, 1) eq "/";
+            }
+            closedir FDS;
+        }
+
+        if (open MAP, "<$process/maps") {
+            while (<MAP>) {
+                next unless /^ \s* \S+ \s+ \S+ \s+ \S+ \s+ \S+ \s+ \S+ \s+ (\/\S+) \s* $/x;
+                print "$1\n";
+            }
+            close MAP;
+        }
+
+        # Get all store paths that appear in the environment of this process.
+        eval {
+            my $env = Nix::Utils::readFile "$process/environ";
+            my @matches = $env =~ /\Q$Nix::Config::storeDir\E\/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*/g;
+            print "$_\n" foreach @matches;
+        }
+    }
+
+    closedir DIR;
+}
+
+
+sub lsof {
+    return unless open LSOF, "lsof -n -w -F n 2> /dev/null |";
+
+    while (<LSOF>) {
+        next unless /^n (\/ .*)$/x;
+        print $1, "\n";
+    }
+
+    close LSOF;
+}
+
+
+readProc;
+lsof;
+
+
+sub printFile {
+    my ($fn) = @_;
+    if (-e $fn) {
+        print Nix::Utils::readFile($fn), "\n";
+    }
+}
+
+
+# This is rather NixOS-specific, so it probably shouldn't be here.
+printFile "/proc/sys/kernel/modprobe";
+printFile "/proc/sys/kernel/fbsplash";
+printFile "/proc/sys/kernel/poweroff_cmd";
diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh
new file mode 100644
index 000000000000..ef56149915b1
--- /dev/null
+++ b/scripts/install-nix-from-closure.sh
@@ -0,0 +1,123 @@
+#! /usr/bin/env bash
+
+set -e
+
+dest="/nix"
+self="$(dirname "$0")"
+nix="@nix@"
+
+if ! [ -e $self/.reginfo ]; then
+    echo "$0: incomplete installer (.reginfo is missing)" >&2
+    exit 1
+fi
+
+if [ -z "$USER" ]; then
+    echo "$0: \$USER is not set" >&2
+    exit 1
+fi
+
+if [ "$(id -u)" -eq 0 ]; then
+    echo "warning: installing Nix as root is not recommended" >&2
+fi
+
+echo "performing a single-user installation of Nix..." >&2
+
+if ! [ -e $dest ]; then
+    cmd="mkdir -m 0755 $dest && chown $USER $dest"
+    echo "directory $dest does not exist; creating it by running ‘$cmd’ using sudo" >&2
+    if ! sudo sh -c "$cmd"; then
+        echo "$0: please manually run ‘$cmd’ as root to create $dest" >&2
+        exit 1
+    fi
+fi
+
+if ! [ -w $dest ]; then
+    echo "$0: directory $dest exists, but is not writable by you; please run ‘chown -R $USER $dest’ as root" >&2
+    exit 1
+fi
+
+mkdir -p $dest/store
+
+echo -n "copying Nix to $dest/store..." >&2
+
+for i in $(cd $self/store && echo *); do
+    echo -n "." >&2
+    i_tmp="$dest/store/$i.$$"
+    if [ -e "$i_tmp" ]; then
+        rm -rf "$i_tmp"
+    fi
+    if ! [ -e "$dest/store/$i" ]; then
+        cp -Rp "$self/store/$i" "$i_tmp"
+        mv "$i_tmp" "$dest/store/$i"
+    fi
+done
+echo "" >&2
+
+echo "initialising Nix database..." >&2
+if ! $nix/bin/nix-store --init; then
+    echo "$0: failed to initialize the Nix database" >&2
+    exit 1
+fi
+
+if ! $nix/bin/nix-store --load-db < $self/.reginfo; then
+    echo "$0: unable to register valid paths" >&2
+    exit 1
+fi
+
+. $nix/etc/profile.d/nix.sh
+
+if ! $nix/bin/nix-env -i $nix; then
+    echo "$0: unable to install Nix into your default profile" >&2
+    exit 1
+fi
+
+# Subscribe the user to the Nixpkgs channel and fetch it.
+if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then
+    if [ -n "$SSL_CERT_FILE" ]; then
+        $nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
+    else
+        $nix/bin/nix-channel --add http://nixos.org/channels/nixpkgs-unstable
+    fi
+fi
+$nix/bin/nix-channel --update nixpkgs
+
+# Install an SSL certificate bundle.
+$nix/bin/nix-env -iA nixpkgs.cacert || true
+
+# Make the shell source nix.sh during login.
+p=$NIX_LINK/etc/profile.d/nix.sh
+
+added=
+for i in .bash_profile .bash_login .profile; do
+    fn="$HOME/$i"
+    if [ -e "$fn" ]; then
+        if ! grep -q "$p" "$fn"; then
+            echo "modifying $fn..." >&2
+            echo "if [ -e $p ]; then . $p; fi # added by Nix installer" >> $fn
+        fi
+        added=1
+        break
+    fi
+done
+
+if [ -z "$added" ]; then
+    cat >&2 <<EOF
+
+Installation finished!  To ensure that the necessary environment
+variables are set, please add the line
+
+  . $p
+
+to your shell profile (e.g. ~/.profile).
+EOF
+else
+    cat >&2 <<EOF
+
+Installation finished!  To ensure that the necessary environment
+variables are set, either log in again, or type
+
+  . $p
+
+in your shell.
+EOF
+fi
diff --git a/scripts/local.mk b/scripts/local.mk
new file mode 100644
index 000000000000..f4c5e8097de4
--- /dev/null
+++ b/scripts/local.mk
@@ -0,0 +1,37 @@
+nix_bin_scripts := \
+  $(d)/nix-build \
+  $(d)/nix-channel \
+  $(d)/nix-collect-garbage \
+  $(d)/nix-copy-closure \
+  $(d)/nix-generate-patches \
+  $(d)/nix-install-package \
+  $(d)/nix-prefetch-url \
+  $(d)/nix-pull \
+  $(d)/nix-push
+
+bin-scripts += $(nix_bin_scripts)
+
+nix_substituters := \
+  $(d)/copy-from-other-stores.pl \
+  $(d)/download-from-binary-cache.pl \
+  $(d)/download-using-manifests.pl
+
+nix_noinst_scripts := \
+  $(d)/build-remote.pl \
+  $(d)/find-runtime-roots.pl \
+  $(d)/nix-http-export.cgi \
+  $(d)/nix-profile.sh \
+  $(d)/nix-reduce-build \
+  $(nix_substituters)
+
+noinst-scripts += $(nix_noinst_scripts)
+
+profiledir = $(sysconfdir)/profile.d
+
+$(eval $(call install-file-as, $(d)/nix-profile.sh, $(profiledir)/nix.sh, 0644))
+$(eval $(call install-program-in, $(d)/find-runtime-roots.pl, $(libexecdir)/nix))
+$(eval $(call install-program-in, $(d)/build-remote.pl, $(libexecdir)/nix))
+$(foreach prog, $(nix_substituters), $(eval $(call install-program-in, $(prog), $(libexecdir)/nix/substituters)))
+$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell))
+
+clean-files += $(nix_bin_scripts) $(nix_noinst_scripts)
diff --git a/scripts/nix-build.in b/scripts/nix-build.in
new file mode 100755
index 000000000000..fb92a4909d4c
--- /dev/null
+++ b/scripts/nix-build.in
@@ -0,0 +1,286 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use Nix::Config;
+use Nix::Store;
+use Nix::Utils;
+
+
+my $dryRun = 0;
+my $verbose = 0;
+my $runEnv = $0 =~ /nix-shell$/;
+my $pure = 0;
+my $fromArgs = 0;
+my $packages = 0;
+
+my @instArgs = ();
+my @buildArgs = ();
+my @exprs = ();
+
+my $shell = $ENV{SHELL} || "/bin/sh";
+my $envCommand = ""; # interactive shell
+my @envExclude = ();
+
+my $myName = $runEnv ? "nix-shell" : "nix-build";
+
+
+my $tmpDir = mkTempDir($myName);
+
+my $outLink = "./result";
+my $drvLink = "$tmpDir/derivation";
+
+# Ensure that the $tmpDir is deleted.
+$SIG{'INT'} = sub { exit 1 };
+
+
+for (my $n = 0; $n < scalar @ARGV; $n++) {
+    my $arg = $ARGV[$n];
+
+    if ($arg eq "--help") {
+        exec "man $myName" or die;
+    }
+
+    elsif ($arg eq "--version") {
+        print "$myName (Nix) $Nix::Config::version\n";
+        exit 0;
+    }
+
+    elsif ($arg eq "--add-drv-link") {
+        $drvLink = "./derivation";
+    }
+
+    elsif ($arg eq "--no-out-link" || $arg eq "--no-link") {
+        $outLink = "$tmpDir/result";
+    }
+
+    elsif ($arg eq "--drv-link") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $drvLink = $ARGV[$n];
+    }
+
+    elsif ($arg eq "--out-link" || $arg eq "-o") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $outLink = $ARGV[$n];
+    }
+
+    elsif ($arg eq "--attr" || $arg eq "-A" || $arg eq "-I") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        push @instArgs, ($arg, $ARGV[$n]);
+    }
+
+    elsif ($arg eq "--arg" || $arg eq "--argstr") {
+        die "$0: ‘$arg’ requires two arguments\n" unless $n + 2 < scalar @ARGV;
+        push @instArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]);
+        $n += 2;
+    }
+
+    elsif ($arg eq "--log-type") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        push @instArgs, ($arg, $ARGV[$n]);
+        push @buildArgs, ($arg, $ARGV[$n]);
+    }
+
+    elsif ($arg eq "--option") {
+        die "$0: ‘$arg’ requires two arguments\n" unless $n + 2 < scalar @ARGV;
+        push @instArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]);
+        push @buildArgs, ($arg, $ARGV[$n + 1], $ARGV[$n + 2]);
+        $n += 2;
+    }
+
+    elsif ($arg eq "--max-jobs" || $arg eq "-j" || $arg eq "--max-silent-time" || $arg eq "--log-type" || $arg eq "--cores" || $arg eq "--timeout" || $arg eq '--add-root') {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        push @buildArgs, ($arg, $ARGV[$n]);
+    }
+
+    elsif ($arg eq "--dry-run") {
+        push @buildArgs, "--dry-run";
+        $dryRun = 1;
+    }
+
+    elsif ($arg eq "--show-trace") {
+        push @instArgs, $arg;
+    }
+
+    elsif ($arg eq "-") {
+        @exprs = ("-");
+    }
+
+    elsif ($arg eq "--verbose" || substr($arg, 0, 2) eq "-v") {
+        push @buildArgs, $arg;
+        push @instArgs, $arg;
+        $verbose = 1;
+    }
+
+    elsif ($arg eq "--quiet" || $arg eq "--repair") {
+        push @buildArgs, $arg;
+        push @instArgs, $arg;
+    }
+
+    elsif ($arg eq "--check") {
+        push @buildArgs, $arg;
+    }
+
+    elsif ($arg eq "--run-env") { # obsolete
+        $runEnv = 1;
+    }
+
+    elsif ($arg eq "--command") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $envCommand = "$ARGV[$n]\nexit $!";
+    }
+
+    elsif ($arg eq "--exclude") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        push @envExclude, $ARGV[$n];
+    }
+
+    elsif ($arg eq "--pure") { $pure = 1; }
+    elsif ($arg eq "--impure") { $pure = 0; }
+
+    elsif ($arg eq "--expr" || $arg eq "-E") {
+        $fromArgs = 1;
+        push @instArgs, "--expr";
+    }
+
+    elsif ($arg eq "--packages" || $arg eq "-p") {
+        $packages = 1;
+    }
+
+    elsif (substr($arg, 0, 1) eq "-") {
+        push @buildArgs, $arg;
+    }
+
+    else {
+        push @exprs, $arg;
+    }
+}
+
+if ($packages) {
+    push @instArgs, "--expr";
+    @exprs = (
+        'with import <nixpkgs> { }; runCommand "shell" { buildInputs = [ '
+        . (join " ", map { "($_)" } @exprs) . ']; } ""');
+} elsif (!$fromArgs) {
+    @exprs = ("shell.nix") if scalar @exprs == 0 && $runEnv && -e "shell.nix";
+    @exprs = ("default.nix") if scalar @exprs == 0;
+}
+
+$ENV{'IN_NIX_SHELL'} = 1 if $runEnv;
+
+
+foreach my $expr (@exprs) {
+
+    # Instantiate.
+    my @drvPaths;
+    if ($expr !~ /^\/.*\.drv$/) {
+        # !!! would prefer the perl 5.8.0 pipe open feature here.
+        my $pid = open(DRVPATHS, "-|") || exec "$Nix::Config::binDir/nix-instantiate", "--add-root", $drvLink, "--indirect", @instArgs, $expr;
+        while (<DRVPATHS>) {chomp; push @drvPaths, $_;}
+        if (!close DRVPATHS) {
+            die "nix-instantiate killed by signal " . ($? & 127) . "\n" if ($? & 127);
+            exit 1;
+        }
+    } else {
+        push @drvPaths, $expr;
+    }
+
+    if ($runEnv) {
+        die "$0: a single derivation is required\n" if scalar @drvPaths != 1;
+        my $drvPath = $drvPaths[0];
+        $drvPath = (split '!',$drvPath)[0];
+        $drvPath = readlink $drvPath or die "cannot read symlink ‘$drvPath’" if -l $drvPath;
+        my $drv = derivationFromPath($drvPath);
+
+        # Build or fetch all dependencies of the derivation.
+        my @inputDrvs = grep { my $x = $_; (grep { $x =~ $_ } @envExclude) == 0 } @{$drv->{inputDrvs}};
+        system("$Nix::Config::binDir/nix-store", "-r", "--no-output", "--no-gc-warning", @buildArgs, @inputDrvs, @{$drv->{inputSrcs}}) == 0
+            or die "$0: failed to build all dependencies\n";
+
+        # Set the environment.
+        my $tmp = $ENV{"TMPDIR"} // $ENV{"XDG_RUNTIME_DIR"} // "/tmp";
+        if ($pure) {
+            foreach my $name (keys %ENV) {
+                next if grep { $_ eq $name } ("HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER");
+                delete $ENV{$name};
+            }
+            # NixOS hack: prevent /etc/bashrc from sourcing /etc/profile.
+            $ENV{'__ETC_PROFILE_SOURCED'} = 1;
+        }
+        $ENV{'NIX_BUILD_TOP'} = $ENV{'TMPDIR'} = $ENV{'TEMPDIR'} = $ENV{'TMP'} = $ENV{'TEMP'} = $tmp;
+        $ENV{'NIX_STORE'} = $Nix::Config::storeDir;
+        $ENV{$_} = $drv->{env}->{$_} foreach keys %{$drv->{env}};
+
+        # Run a shell using the derivation's environment.  For
+        # convenience, source $stdenv/setup to setup additional
+        # environment variables and shell functions.  Also don't lose
+        # the current $PATH directories.
+        my $rcfile = "$tmpDir/rc";
+        writeFile(
+            $rcfile,
+            "rm -rf '$tmpDir'; " .
+            'unset BASH_ENV; ' .
+            '[ -n "$PS1" ] && [ -e ~/.bashrc ] && source ~/.bashrc; ' .
+            ($pure ? '' : 'p=$PATH; ' ) .
+            'dontAddDisableDepTrack=1; ' .
+            '[ -e $stdenv/setup ] && source $stdenv/setup; ' .
+            'if [ "$(type -t runHook)" = function ]; then runHook shellHook; fi; ' .
+            ($pure ? '' : 'PATH=$PATH:$p; unset p; ') .
+            'set +e; ' .
+            '[ -n "$PS1" ] && PS1="\n\[\033[1;32m\][nix-shell:\w]$\[\033[0m\] "; ' .
+            'unset NIX_ENFORCE_PURITY; ' .
+            'unset NIX_INDENT_MAKE; ' .
+            'shopt -u nullglob; ' .
+            'unset TZ; ' . (defined $ENV{'TZ'} ? "export TZ='${ENV{'TZ'}}'; " : '') .
+            $envCommand);
+        $ENV{BASH_ENV} = $rcfile;
+        exec($ENV{NIX_BUILD_SHELL} // "bash", "--rcfile", $rcfile);
+        die;
+    }
+
+    # Ugly hackery to make "nix-build -A foo.all" produce symlinks
+    # ./result, ./result-dev, and so on, rather than ./result,
+    # ./result-2-dev, and so on.  This combines multiple derivation
+    # paths into one "/nix/store/drv-path!out1,out2,..." argument.
+    my $prevDrvPath = "";
+    my @drvPaths2;
+    foreach my $drvPath (@drvPaths) {
+        my $p = $drvPath; my $output = "out";
+        if ($drvPath =~ /(.*)!(.*)/) {
+            $p = $1; $output = $2;
+        } else {
+            $p = $drvPath;
+        }
+        my $target = readlink $p or die "cannot read symlink ‘$p’";
+        print STDERR "derivation is $target\n" if $verbose;
+        if ($target eq $prevDrvPath) {
+            push @drvPaths2, (pop @drvPaths2) . "," . $output;
+        } else {
+            push @drvPaths2, $target . "!" . $output;
+            $prevDrvPath = $target;
+        }
+    }
+
+    # Build.
+    my @outPaths;
+    my $pid = open(OUTPATHS, "-|") || exec "$Nix::Config::binDir/nix-store", "--add-root", $outLink, "--indirect", "-r",
+        @buildArgs, @drvPaths2;
+    while (<OUTPATHS>) {chomp; push @outPaths, $_;}
+    if (!close OUTPATHS) {
+        die "nix-store killed by signal " . ($? & 127) . "\n" if ($? & 127);
+        exit $? >> 8 || 1;
+    }
+
+    next if $dryRun;
+
+    foreach my $outPath (@outPaths) {
+        my $target = readlink $outPath or die "cannot read symlink ‘$outPath’";
+        print "$target\n";
+    }
+}
diff --git a/scripts/nix-channel.in b/scripts/nix-channel.in
new file mode 100755
index 000000000000..407f27490410
--- /dev/null
+++ b/scripts/nix-channel.in
@@ -0,0 +1,209 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use File::Basename;
+use File::Path qw(mkpath);
+use Nix::Config;
+use Nix::Manifest;
+
+Nix::Config::readConfig;
+
+my $manifestDir = $Nix::Config::manifestDir;
+
+
+# Turn on caching in nix-prefetch-url.
+my $channelCache = "$Nix::Config::stateDir/channel-cache";
+mkdir $channelCache, 0755 unless -e $channelCache;
+$ENV{'NIX_DOWNLOAD_CACHE'} = $channelCache if -W $channelCache;
+
+# Figure out the name of the `.nix-channels' file to use.
+my $home = $ENV{"HOME"} or die '$HOME not set\n';
+my $channelsList = "$home/.nix-channels";
+my $nixDefExpr = "$home/.nix-defexpr";
+
+# Figure out the name of the channels profile.
+my $userName = getpwuid($<) || $ENV{"USER"} or die "cannot figure out user name";
+my $profile = "$Nix::Config::stateDir/profiles/per-user/$userName/channels";
+mkpath(dirname $profile, 0, 0755);
+
+my %channels;
+
+
+# Reads the list of channels.
+sub readChannels {
+    return if (!-f $channelsList);
+    open CHANNELS, "<$channelsList" or die "cannot open ‘$channelsList’: $!";
+    while (<CHANNELS>) {
+        chomp;
+        next if /^\s*\#/;
+        my ($url, $name) = split ' ', $_;
+        $url =~ s/\/*$//; # remove trailing slashes
+        $name = basename $url unless defined $name;
+        $channels{$name} = $url;
+    }
+    close CHANNELS;
+}
+
+
+# Writes the list of channels.
+sub writeChannels {
+    open CHANNELS, ">$channelsList" or die "cannot open ‘$channelsList’: $!";
+    foreach my $name (keys %channels) {
+        print CHANNELS "$channels{$name} $name\n";
+    }
+    close CHANNELS;
+}
+
+
+# Adds a channel.
+sub addChannel {
+    my ($url, $name) = @_;
+    readChannels;
+    $channels{$name} = $url;
+    writeChannels;
+}
+
+
+# Remove a channel.
+sub removeChannel {
+    my ($name) = @_;
+    readChannels;
+    my $url = $channels{$name};
+    deleteOldManifests($url . "/MANIFEST", undef) if defined $url;
+    delete $channels{$name};
+    writeChannels;
+
+    system("$Nix::Config::binDir/nix-env --profile '$profile' -e '$name'") == 0
+        or die "cannot remove channel ‘$name’\n";
+}
+
+
+# Fetch Nix expressions and pull manifests from the subscribed
+# channels.
+sub update {
+    my @channelNames = @_;
+
+    readChannels;
+
+    # Download each channel.
+    my $exprs = "";
+    foreach my $name (keys %channels) {
+        next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames};
+
+        my $url = $channels{$name};
+        my $origUrl = "$url/MANIFEST";
+
+        # Check if $url is a redirect.  If so, follow it now to ensure
+        # consistency if the redirection is changed between
+        # downloading the manifest and the tarball.
+        my $headers = `$Nix::Config::curl --silent --head '$url'`;
+        die "$0: unable to check ‘$url’\n" if $? != 0;
+        $headers =~ s/\r//g;
+        $url = $1 if $headers =~ /^Location:\s*(.*)\s*$/m;
+
+        # Check if the channel advertises a binary cache.
+        my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`;
+        my $extraAttrs = "";
+        my $getManifest = ($Nix::Config::config{"force-manifest"} // "false") eq "true";
+        if ($? == 0 && $binaryCacheURL ne "") {
+            $extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; ";
+            deleteOldManifests($origUrl, undef);
+        } else {
+            $getManifest = 1;
+        }
+
+        if ($getManifest) {
+            # No binary cache, so pull the channel manifest.
+            mkdir $manifestDir, 0755 unless -e $manifestDir;
+            die "$0: you do not have write permission to ‘$manifestDir’!\n" unless -W $manifestDir;
+            $ENV{'NIX_ORIG_URL'} = $origUrl;
+            system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0
+                or die "cannot pull manifest from ‘$url’\n";
+        }
+
+        # Download the channel tarball.
+        my $fullURL = "$url/nixexprs.tar.xz";
+        system("$Nix::Config::curl --fail --silent --head '$fullURL' > /dev/null") == 0 or
+            $fullURL = "$url/nixexprs.tar.bz2";
+        print STDERR "downloading Nix expressions from ‘$fullURL’...\n";
+        my ($hash, $path) = `PRINT_PATH=1 QUIET=1 $Nix::Config::binDir/nix-prefetch-url '$fullURL'`;
+        die "cannot fetch ‘$fullURL’\n" if $? != 0;
+        chomp $path;
+
+        # If the URL contains a version number, append it to the name
+        # attribute (so that "nix-env -q" on the channels profile
+        # shows something useful).
+        my $cname = $name;
+        $cname .= $1 if basename($url) =~ /(-\d.*)$/;
+
+        $exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; $extraAttrs }' ";
+    }
+
+    # Unpack the channel tarballs into the Nix store and install them
+    # into the channels profile.
+    print STDERR "unpacking channels...\n";
+    system("$Nix::Config::binDir/nix-env --profile '$profile' " .
+           "-f '<nix/unpack-channel.nix>' -i -E $exprs --quiet") == 0
+           or die "cannot unpack the channels";
+
+    # Make the channels appear in nix-env.
+    unlink $nixDefExpr if -l $nixDefExpr; # old-skool ~/.nix-defexpr
+    mkdir $nixDefExpr or die "cannot create directory ‘$nixDefExpr’" if !-e $nixDefExpr;
+    my $channelLink = "$nixDefExpr/channels";
+    unlink $channelLink; # !!! not atomic
+    symlink($profile, $channelLink) or die "cannot symlink ‘$channelLink’ to ‘$profile’";
+}
+
+
+die "$0: argument expected\n" if scalar @ARGV == 0;
+
+
+while (scalar @ARGV) {
+    my $arg = shift @ARGV;
+
+    if ($arg eq "--add") {
+        die "$0: ‘--add’ requires one or two arguments\n" if scalar @ARGV < 1 || scalar @ARGV > 2;
+        my $url = shift @ARGV;
+        my $name = shift @ARGV;
+        unless (defined $name) {
+            $name = basename $url;
+            $name =~ s/-unstable//;
+            $name =~ s/-stable//;
+        }
+        addChannel($url, $name);
+        last;
+    }
+
+    if ($arg eq "--remove") {
+        die "$0: ‘--remove’ requires one argument\n" if scalar @ARGV != 1;
+        removeChannel(shift @ARGV);
+        last;
+    }
+
+    if ($arg eq "--list") {
+        die "$0: ‘--list’ requires one argument\n" if scalar @ARGV != 0;
+        readChannels;
+        foreach my $name (keys %channels) {
+            print "$name $channels{$name}\n";
+        }
+        last;
+    }
+
+    elsif ($arg eq "--update") {
+        update(@ARGV);
+        last;
+    }
+
+    elsif ($arg eq "--help") {
+        exec "man nix-channel" or die;
+    }
+
+    elsif ($arg eq "--version") {
+        print "nix-channel (Nix) $Nix::Config::version\n";
+        exit 0;
+    }
+
+    else {
+        die "unknown argument ‘$arg’; try ‘--help’\n";
+    }
+}
diff --git a/scripts/nix-collect-garbage.in b/scripts/nix-collect-garbage.in
new file mode 100755
index 000000000000..55e0ba7a6fab
--- /dev/null
+++ b/scripts/nix-collect-garbage.in
@@ -0,0 +1,65 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use Nix::Config;
+
+my $profilesDir = "@localstatedir@/nix/profiles";
+
+
+# Process the command line arguments.
+my @args = ();
+my $arg;
+
+my $removeOld = 0;
+my $gen;
+my $dryRun = 0;
+
+while ($arg = shift) {
+    if ($arg eq "--delete-old" || $arg eq "-d") {
+        $removeOld = 1;
+        $gen = "old";
+    } elsif ($arg eq "--delete-older-than") {
+        $removeOld = 1;
+        $gen = shift;
+    } elsif ($arg eq "--dry-run") {
+        $dryRun = 1;
+    } elsif ($arg eq "--help") {
+        exec "man nix-collect-garbage" or die;
+    } else {
+        push @args, $arg;
+    }
+}
+
+
+# If `-d' was specified, remove all old generations of all profiles.
+# Of course, this makes rollbacks to before this point in time
+# impossible.
+
+sub removeOldGenerations;
+sub removeOldGenerations {
+    my $dir = shift;
+
+    my $dh;
+    opendir $dh, $dir or die;
+
+    foreach my $name (sort (readdir $dh)) {
+        next if $name eq "." || $name eq "..";
+        $name = $dir . "/" . $name;
+        if (-l $name && (readlink($name) =~ /link/)) {
+            print STDERR "removing old generations of profile $name\n";
+
+            system("$Nix::Config::binDir/nix-env", "-p", $name, "--delete-generations", $gen, $dryRun ? "--dry-run" : ());
+        }
+        elsif (! -l $name && -d $name) {
+            removeOldGenerations $name;
+        }
+    }
+
+    closedir $dh or die;
+}
+
+removeOldGenerations $profilesDir if $removeOld;
+
+
+# Run the actual garbage collector.
+exec "$Nix::Config::binDir/nix-store", "--gc", @args unless $dryRun;
diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in
new file mode 100755
index 000000000000..10c2a9171d67
--- /dev/null
+++ b/scripts/nix-copy-closure.in
@@ -0,0 +1,99 @@
+#! @perl@ -w @perlFlags@
+
+use Nix::SSH;
+use Nix::Config;
+use Nix::Store;
+use Nix::CopyClosure;
+use List::Util qw(sum);
+
+
+if (scalar @ARGV < 1) {
+    print STDERR <<EOF
+Usage: nix-copy-closure [--from | --to] HOSTNAME [--sign] [--gzip] [--bzip2] [--xz] PATHS...
+EOF
+    ;
+    exit 1;
+}
+
+
+# Get the target host.
+my $sshHost;
+my $sign = 0;
+my $toMode = 1;
+my $includeOutputs = 0;
+my $dryRun = 0;
+my $useSubstitutes = 0;
+
+
+# !!! Copied from nix-pack-closure, should put this in a module.
+my @storePaths = ();
+
+while (@ARGV) {
+    my $arg = shift @ARGV;
+
+    if ($arg eq "--help") {
+        exec "man nix-copy-closure" or die;
+    }
+    elsif ($arg eq "--sign") {
+        $sign = 1;
+    }
+    elsif ($arg eq "--gzip" || $arg eq "--bzip2" || $arg eq "--xz") {
+        warn "$0: ‘$arg’ is not implemented\n" if $arg ne "--gzip";
+        push @globalSshOpts, "-C";
+    }
+    elsif ($arg eq "--from") {
+        $toMode = 0;
+    }
+    elsif ($arg eq "--to") {
+        $toMode = 1;
+    }
+    elsif ($arg eq "--include-outputs") {
+        $includeOutputs = 1;
+    }
+    elsif ($arg eq "--show-progress") {
+        warn "$0: ‘$arg’ is not implemented\n";
+    }
+    elsif ($arg eq "--dry-run") {
+        $dryRun = 1;
+    }
+    elsif ($arg eq "--use-substitutes" || $arg eq "-s") {
+        $useSubstitutes = 1;
+    }
+    elsif (!defined $sshHost) {
+        $sshHost = $arg;
+    }
+    else {
+        push @storePaths, $arg;
+    }
+}
+
+die "$0: you did not specify a host name\n" unless defined $sshHost;
+
+
+if ($toMode) { # Copy TO the remote machine.
+    Nix::CopyClosure::copyTo(
+        $sshHost, [ @sshOpts ], [ @storePaths ],
+        $includeOutputs, $dryRun, $sign, $useSubstitutes);
+}
+
+else { # Copy FROM the remote machine.
+
+    my ($from, $to) = connectToRemoteNix($sshHost, [ @sshOpts ]);
+
+    # Query the closure of the given store paths on the remote
+    # machine.  Paths are assumed to be store paths; there is no
+    # resolution (following of symlinks).
+    syswrite($to, pack("L<x4L<x4", 7, $includeOutputs ? 1 : 0)) or die;
+    writeStrings(\@storePaths, $to);
+    my @missing = grep { !isValidPath($_) } readStrings($from);
+
+    # Export the store paths on the remote machine and import them locally.
+    if (scalar @missing > 0) {
+        print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n";
+        writeInt(5, $to); # == cmdExportPaths
+        writeInt($sign ? 1 : 0, $to);
+        writeStrings(\@missing, $to);
+        importPaths(fileno($from));
+    }
+
+}
diff --git a/scripts/nix-generate-patches.in b/scripts/nix-generate-patches.in
new file mode 100755
index 000000000000..0a29c0548c1f
--- /dev/null
+++ b/scripts/nix-generate-patches.in
@@ -0,0 +1,51 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use Nix::Manifest;
+use Nix::GeneratePatches;
+use Nix::Utils;
+
+if (scalar @ARGV != 5) {
+    print STDERR <<EOF;
+Usage: nix-generate-patches NAR-DIR PATCH-DIR PATCH-URI OLD-MANIFEST NEW-MANIFEST
+
+This command generates binary patches between NAR files listed in
+OLD-MANIFEST and NEW-MANIFEST.  The patches are written to the
+directory PATCH-DIR, and the prefix PATCH-URI is used to generate URIs
+for the patches.  The patches are added to NEW-MANIFEST.  All NARs are
+required to exist in NAR-DIR.  Patches are generated between
+succeeding versions of packages with the same name.
+EOF
+    exit 1;
+}
+
+my $narPath = $ARGV[0];
+my $patchesPath = $ARGV[1];
+my $patchesURL = $ARGV[2];
+my $srcManifest = $ARGV[3];
+my $dstManifest = $ARGV[4];
+
+my (%srcNarFiles, %srcLocalPaths, %srcPatches);
+readManifest $srcManifest, \%srcNarFiles, \%srcPatches;
+
+my (%dstNarFiles, %dstLocalPaths, %dstPatches);
+readManifest $dstManifest, \%dstNarFiles, \%dstPatches;
+
+my $tmpDir = mkTempDir("nix-generate-patches");
+
+generatePatches \%srcNarFiles, \%dstNarFiles, \%srcPatches, \%dstPatches,
+    $narPath, $patchesPath, $patchesURL, $tmpDir;
+
+propagatePatches \%srcPatches, \%dstNarFiles, \%dstPatches;
+
+# Optionally add all new patches to the manifest in $NIX_ALL_PATCHES.
+my $allPatchesFile = $ENV{"NIX_ALL_PATCHES"};
+if (defined $allPatchesFile) {
+    my (%dummy, %allPatches);
+    readManifest("$patchesPath/all-patches", \%dummy, \%allPatches)
+        if -f $allPatchesFile;
+    copyPatches \%dstPatches, \%allPatches;
+    writeManifest($allPatchesFile, {}, \%allPatches, 0);
+}
+
+writeManifest $dstManifest, \%dstNarFiles, \%dstPatches;
diff --git a/scripts/nix-http-export.cgi.in b/scripts/nix-http-export.cgi.in
new file mode 100755
index 000000000000..19a505af1c50
--- /dev/null
+++ b/scripts/nix-http-export.cgi.in
@@ -0,0 +1,51 @@
+#! /bin/sh
+
+export HOME=/tmp
+export NIX_REMOTE=daemon
+
+TMP_DIR="${TMP_DIR:-/tmp/nix-export}"
+
+@coreutils@/mkdir -p "$TMP_DIR" || true
+@coreutils@/chmod a+r "$TMP_DIR"
+
+needed_path="?$QUERY_STRING"
+needed_path="${needed_path#*[?&]needed_path=}"
+needed_path="${needed_path%%&*}"
+#needed_path="$(echo $needed_path  | ./unhttp)"
+needed_path="${needed_path//%2B/+}"
+needed_path="${needed_path//%3D/=}"
+
+echo needed_path: "$needed_path" >&2
+
+NIX_STORE="${NIX_STORE_DIR:-/nix/store}"
+
+echo NIX_STORE: "${NIX_STORE}" >&2
+
+full_path="${NIX_STORE}"/"$needed_path"
+
+if [ "$needed_path" != "${needed_path%.drv}" ]; then
+	echo "Status: 403 You should create the derivation file yourself"
+	echo "Content-Type: text/plain"
+	echo
+	echo "Refusing to disclose derivation contents"
+	exit
+fi
+
+if @bindir@/nix-store --check-validity "$full_path"; then
+	if ! [ -e nix-export/"$needed_path".nar.gz ]; then
+		@bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz
+		@coreutils@/ln -fs  "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz 
+	fi;
+	echo "Status: 301 Moved"
+	echo "Location: nix-export/"$needed_path".nar.gz"
+	echo
+else 
+	echo "Status: 404 No such path found"
+	echo "Content-Type: text/plain"
+	echo
+	echo "Path not found:"
+	echo "$needed_path"
+	echo "checked:"
+	echo "$full_path"
+fi
+
diff --git a/scripts/nix-install-package.in b/scripts/nix-install-package.in
new file mode 100755
index 000000000000..9340f1b729e6
--- /dev/null
+++ b/scripts/nix-install-package.in
@@ -0,0 +1,136 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use Nix::Config;
+use Nix::Utils;
+
+
+# Parse the command line arguments.
+my @args = @ARGV;
+
+my $source;
+my $fromURL = 0;
+my @extraNixEnvArgs = ();
+my $interactive = 1;
+
+while (scalar @args) {
+    my $arg = shift @args;
+    if ($arg eq "--help") {
+        exec "man nix-install-package" or die;
+    }
+    elsif ($arg eq "--url") {
+        $fromURL = 1;
+    }
+    elsif ($arg eq "--profile" || $arg eq "-p") {
+        my $profile = shift @args;
+        die "$0: ‘--profile’ requires an argument\n" if !defined $profile;
+        push @extraNixEnvArgs, "-p", $profile;
+    }
+    elsif ($arg eq "--non-interactive") {
+        $interactive = 0;
+    }
+    else {
+        $source = $arg;
+    }
+}
+
+die "$0: please specify a .nixpkg file or URL\n" unless defined $source;
+
+
+# Re-execute in a terminal, if necessary, so that if we're executed
+# from a web browser, the user gets to see us.
+if ($interactive && !defined $ENV{"NIX_HAVE_TERMINAL"}) {
+    $ENV{"NIX_HAVE_TERMINAL"} = "1";
+    $ENV{"LD_LIBRARY_PATH"} = "";
+    foreach my $term ("xterm", "konsole", "gnome-terminal", "xterm") {
+        exec($term, "-e", "$Nix::Config::binDir/nix-install-package", @ARGV);
+    }
+    die "cannot execute ‘xterm’";
+}
+
+
+my $tmpDir = mkTempDir("nix-install-package");
+
+
+sub barf {
+    my $msg = shift;
+    print "\nInstallation failed: $msg\n";
+    <STDIN> if $interactive;
+    exit 1;
+}
+
+
+# Download the package description, if necessary.
+my $pkgFile = $source;
+if ($fromURL) {
+    $pkgFile = "$tmpDir/tmp.nixpkg";
+    system("@curl@", "--silent", $source, "-o", $pkgFile) == 0
+        or barf "curl failed: $?";
+}
+
+
+# Read and parse the package file.
+open PKGFILE, "<$pkgFile" or barf "cannot open ‘$pkgFile’: $!";
+my $contents = <PKGFILE>;
+close PKGFILE;
+
+my $nameRE = "(?: [A-Za-z0-9\+\-\.\_\?\=]+ )"; # see checkStoreName()
+my $systemRE = "(?: [A-Za-z0-9\+\-\_]+ )";
+my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )";
+
+# Note: $pathRE doesn't check that whether we're looking at a valid
+# store path.  We'll let nix-env do that.
+
+$contents =~
+    / ^ \s* (\S+) \s+ ($Nix::Utils::urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )?  /x
+    or barf "invalid package contents";
+my $version = $1;
+my $manifestURL = $2;
+my $drvName = $3;
+my $system = $4;
+my $drvPath = $5;
+my $outPath = $6;
+my $binaryCacheURL = $8;
+
+barf "invalid package version ‘$version’" unless $version eq "NIXPKG1";
+
+
+if ($interactive) {
+    # Ask confirmation.
+    print "Do you want to install ‘$drvName’ (Y/N)? ";
+    my $reply = <STDIN>;
+    chomp $reply;
+    exit if $reply ne "y" && $reply ne "Y";
+}
+
+
+if (defined $binaryCacheURL) {
+
+    push @extraNixEnvArgs, "--option", "extra-binary-caches", $binaryCacheURL;
+
+} else {
+
+    # Store the manifest in the temporary directory so that we don't
+    # pollute /nix/var/nix/manifests.  This also requires that we
+    # don't use the Nix daemon (because otherwise
+    # download-using-manifests won't see our NIX_MANIFESTS_DIRS
+    # environment variable).
+    $ENV{NIX_MANIFESTS_DIR} = $tmpDir;
+    $ENV{NIX_REMOTE} = "";
+
+    print "\nPulling manifests...\n";
+    system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0
+        or barf "nix-pull failed: $?";
+
+}
+
+
+print "\nInstalling package...\n";
+system("$Nix::Config::binDir/nix-env", "--install", $outPath, "--force-name", $drvName, @extraNixEnvArgs) == 0
+    or barf "nix-env failed: $?";
+
+
+if ($interactive) {
+    print "\nInstallation succeeded! Press Enter to continue.\n";
+    <STDIN>;
+}
diff --git a/scripts/nix-prefetch-url.in b/scripts/nix-prefetch-url.in
new file mode 100755
index 000000000000..869d29c39909
--- /dev/null
+++ b/scripts/nix-prefetch-url.in
@@ -0,0 +1,128 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use File::Basename;
+use File::stat;
+use Nix::Store;
+use Nix::Config;
+use Nix::Utils;
+
+my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256"; # obsolete
+my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'};
+
+my @args;
+my $arg;
+while ($arg = shift) {
+    if ($arg eq "--help") {
+        exec "man nix-prefetch-url" or die;
+    } elsif ($arg eq "--type") {
+        $hashType = shift;
+        die "$0: ‘$arg’ requires an argument\n" unless defined $hashType;
+    } elsif (substr($arg, 0, 1) eq "-") {
+        die "$0: unknown flag ‘$arg’\n";
+    } else {
+        push @args, $arg;
+    }
+}
+
+my $url = $args[0];
+my $expHash = $args[1];
+
+
+if (!defined $url || $url eq "") {
+    print STDERR <<EOF
+Usage: nix-prefetch-url URL [EXPECTED-HASH]
+EOF
+    ;
+    exit 1;
+}
+
+my $tmpDir = mkTempDir("nix-prefetch-url");
+
+# Hack to support the mirror:// scheme from Nixpkgs.
+if ($url =~ /^mirror:\/\//) {
+    system("$Nix::Config::binDir/nix-build '<nixpkgs>' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0
+        or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n";
+    my @expanded = split ' ', readFile("$tmpDir/urls");
+    die "$0: cannot resolve ‘$url’" unless scalar @expanded > 0;
+    print STDERR "$url expands to $expanded[0]\n";
+    $url = $expanded[0];
+}
+
+# Handle escaped characters in the URI.  `+', `=' and `?' are the only
+# characters that are valid in Nix store path names but have a special
+# meaning in URIs.
+my $name = basename $url;
+die "cannot figure out file name for ‘$url’\n" if $name eq ""; 
+$name =~ s/%2b/+/g;
+$name =~ s/%3d/=/g;
+$name =~ s/%3f/?/g;
+
+my $finalPath;
+my $hash;
+
+# If the hash was given, a file with that hash may already be in the
+# store.
+if (defined $expHash) {
+    $finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name);
+    if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; }
+}
+
+# If we don't know the hash or a file with that hash doesn't exist,
+# download the file and add it to the store.
+if (!defined $finalPath) {
+
+    my $tmpFile = "$tmpDir/$name";
+    
+    # Optionally do timestamp-based caching of the download.
+    # Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is
+    # the hash and the timestamp of the file at $url.  The caching of
+    # the file *contents* is done in Nix store, where it can be
+    # garbage-collected independently.
+    my ($cachedTimestampFN, $cachedHashFN, @cacheFlags);
+    if (defined $cacheDir) {
+        my $urlHash = hashString("sha256", 1, $url);
+        writeFile "$cacheDir/$urlHash.url", $url;
+        $cachedHashFN = "$cacheDir/$urlHash.$hashType";
+        $cachedTimestampFN = "$cacheDir/$urlHash.stamp";
+        @cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN;
+    }
+    
+    # Perform the download.
+    my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || "")));
+    (system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of ‘$url’ failed\n";
+
+    if (defined $cacheDir && ! -e $tmpFile) {
+        # Curl didn't create $tmpFile, so apparently there's no newer
+        # file on the server.
+        $hash = readFile $cachedHashFN or die;
+        $finalPath = makeFixedOutputPath(0, $hashType, $hash, $name);
+        unless (isValidPath $finalPath) {
+            print STDERR "cached contents of ‘$url’ disappeared, redownloading...\n";
+            $finalPath = undef;
+            (system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of ‘$url’ failed\n";
+        }
+    }
+
+    if (!defined $finalPath) {
+        
+        # Compute the hash.
+        $hash = hashFile($hashType, $hashType ne "md5", $tmpFile);
+
+        if (defined $cacheDir) {
+            writeFile $cachedHashFN, $hash;
+            my $st = stat($tmpFile) or die;
+            open STAMP, ">$cachedTimestampFN" or die; close STAMP;
+            utime($st->atime, $st->mtime, $cachedTimestampFN) or die;
+        }
+    
+        # Add the downloaded file to the Nix store.
+        $finalPath = addToStore($tmpFile, 0, $hashType);
+    }
+
+    die "$0: hash mismatch for ‘$url’\n" if defined $expHash && $expHash ne $hash;
+}
+
+print STDERR "path is ‘$finalPath’\n" unless $ENV{'QUIET'};
+print "$hash\n";
+print "$finalPath\n" if $ENV{'PRINT_PATH'};
diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in
new file mode 100644
index 000000000000..7dd7968c314a
--- /dev/null
+++ b/scripts/nix-profile.sh.in
@@ -0,0 +1,31 @@
+if [ -n "$HOME" ]; then
+    NIX_LINK="$HOME/.nix-profile"
+
+    # Set the default profile.
+    if ! [ -L "$NIX_LINK" ]; then
+        echo "creating $NIX_LINK" >&2
+        _NIX_DEF_LINK=@localstatedir@/nix/profiles/default
+        @coreutils@/ln -s "$_NIX_DEF_LINK" "$NIX_LINK"
+    fi
+
+    export PATH=$NIX_LINK/bin:$NIX_LINK/sbin:$PATH
+
+    # Subscribe the root user to the Nixpkgs channel by default.
+    if [ ! -e $HOME/.nix-channels ]; then
+        echo "http://nixos.org/channels/nixpkgs-unstable nixpkgs" > $HOME/.nix-channels
+    fi
+
+    # Append ~/.nix-defexpr/channels/nixpkgs to $NIX_PATH so that
+    # <nixpkgs> paths work when the user has fetched the Nixpkgs
+    # channel.
+    export NIX_PATH=${NIX_PATH:+$NIX_PATH:}nixpkgs=$HOME/.nix-defexpr/channels/nixpkgs
+
+    # Set $SSL_CERT_FILE so that Nixpkgs applications like curl work.
+    if [ -e /etc/ssl/certs/ca-bundle.crt ]; then # Fedora, NixOS
+        export SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt
+    elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # Ubuntu, Debian
+        export SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
+    elif [ -e "$NIX_LINK/etc/ca-bundle.crt" ]; then # fall back to Nix profile
+        export SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt"
+    fi
+fi
diff --git a/scripts/nix-pull.in b/scripts/nix-pull.in
new file mode 100755
index 000000000000..f9785d8e5d19
--- /dev/null
+++ b/scripts/nix-pull.in
@@ -0,0 +1,99 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use Nix::Config;
+use Nix::Manifest;
+
+my $manifestDir = $Nix::Config::manifestDir;
+
+
+# Prevent access problems in shared-stored installations.
+umask 0022;
+
+
+# Create the manifests directory if it doesn't exist.
+if (! -e $manifestDir) {
+    mkdir $manifestDir, 0755 or die "cannot create directory ‘$manifestDir’";
+}
+
+
+# Make sure that the manifests directory is scanned for GC roots.
+my $gcRootsDir = "$Nix::Config::stateDir/gcroots";
+my $manifestDirLink = "$gcRootsDir/manifests";
+if (! -l $manifestDirLink) {
+    symlink($manifestDir, $manifestDirLink) or die "cannot create symlink ‘$manifestDirLink’";
+}
+
+
+# Process the URLs specified on the command line.
+
+sub downloadFile {
+    my $url = shift;
+    $ENV{"PRINT_PATH"} = 1;
+    $ENV{"QUIET"} = 1;
+    my ($dummy, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`;
+    die "cannot fetch ‘$url’" if $? != 0;
+    die "nix-prefetch-url did not return a path" unless defined $path;
+    chomp $path;
+    return $path;
+}
+
+sub processURL {
+    my $url = shift;
+
+    $url =~ s/\/$//;
+
+    my $manifest;
+
+    my $origUrl = $ENV{'NIX_ORIG_URL'} || $url;
+
+    # First see if a bzipped manifest is available.
+    if (system("$Nix::Config::curl --fail --silent --location --head '$url'.bz2 > /dev/null") == 0) {
+        print "fetching list of Nix archives at ‘$url.bz2’...\n";
+        $manifest = downloadFile "$url.bz2";
+    }
+
+    # Otherwise, just get the uncompressed manifest.
+    else {
+        print "fetching list of Nix archives at ‘$url’...\n";
+        $manifest = downloadFile $url;
+    }
+
+    my $baseName = "unnamed";
+    if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component
+        $baseName = $1;
+    }
+
+    my $hash = `$Nix::Config::binDir/nix-hash --flat '$manifest'`
+        or die "cannot hash ‘$manifest’";
+    chomp $hash;
+
+    my $urlFile = "$manifestDir/$baseName-$hash.url";
+    open URL, ">$urlFile" or die "cannot create ‘$urlFile’";
+    print URL $origUrl;
+    close URL;
+
+    my $finalPath = "$manifestDir/$baseName-$hash.nixmanifest";
+
+    unlink $finalPath if -e $finalPath;
+
+    symlink("$manifest", "$finalPath")
+        or die "cannot link ‘$finalPath’ to ‘$manifest’";
+
+    deleteOldManifests($origUrl, $urlFile);
+}
+
+while (@ARGV) {
+    my $url = shift @ARGV;
+    if ($url eq "--help") {
+        exec "man nix-pull" or die;
+    } elsif ($url eq "--skip-wrong-store") {
+        # No-op, no longer supported.
+    } else {
+        processURL $url;
+    }
+}
+
+
+# Update the cache.
+updateManifestDB();
diff --git a/scripts/nix-push.in b/scripts/nix-push.in
new file mode 100755
index 000000000000..b0cb6d0da782
--- /dev/null
+++ b/scripts/nix-push.in
@@ -0,0 +1,291 @@
+#! @perl@ -w @perlFlags@
+
+use strict;
+use File::Basename;
+use File::Path qw(mkpath);
+use File::stat;
+use File::Copy;
+use Nix::Config;
+use Nix::Store;
+use Nix::Manifest;
+use Nix::Utils;
+use Nix::Crypto;
+
+my $tmpDir = mkTempDir("nix-push");
+
+my $nixExpr = "$tmpDir/create-nars.nix";
+
+
+# Parse the command line.
+my $compressionType = "xz";
+my $force = 0;
+my $destDir;
+my $writeManifest = 0;
+my $manifestPath;
+my $archivesURL;
+my $link = 0;
+my $privateKeyFile;
+my $keyName;
+my @roots;
+
+for (my $n = 0; $n < scalar @ARGV; $n++) {
+    my $arg = $ARGV[$n];
+
+    if ($arg eq "--help") {
+        exec "man nix-push" or die;
+    } elsif ($arg eq "--bzip2") {
+        $compressionType = "bzip2";
+    } elsif ($arg eq "--none") {
+        $compressionType = "none";
+    } elsif ($arg eq "--force") {
+        $force = 1;
+    } elsif ($arg eq "--dest") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $destDir = $ARGV[$n];
+        mkpath($destDir, 0, 0755);
+    } elsif ($arg eq "--manifest") {
+        $writeManifest = 1;
+    } elsif ($arg eq "--manifest-path") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $manifestPath = $ARGV[$n];
+        $writeManifest = 1;
+        mkpath(dirname($manifestPath), 0, 0755);
+    } elsif ($arg eq "--url-prefix") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $archivesURL = $ARGV[$n];
+    } elsif ($arg eq "--link") {
+        $link = 1;
+    } elsif ($arg eq "--key") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $privateKeyFile = $ARGV[$n];
+    } elsif ($arg eq "--key-name") {
+        $n++;
+        die "$0: ‘$arg’ requires an argument\n" unless $n < scalar @ARGV;
+        $keyName = $ARGV[$n];
+    } elsif (substr($arg, 0, 1) eq "-") {
+        die "$0: unknown flag ‘$arg’\n";
+    } else {
+        push @roots, $arg;
+    }
+}
+
+die "$0: please specify a destination directory\n" if !defined $destDir;
+
+$archivesURL = "file://$destDir" unless defined $archivesURL;
+
+
+# From the given store paths, determine the set of requisite store
+# paths, i.e, the paths required to realise them.
+my %storePaths;
+
+foreach my $path (@roots) {
+    # Get all paths referenced by the normalisation of the given
+    # Nix expression.
+    my $pid = open(READ,
+        "$Nix::Config::binDir/nix-store --query --requisites --force-realise " .
+        "--include-outputs '$path'|") or die;
+
+    while (<READ>) {
+        chomp;
+        die "bad: $_" unless /^\//;
+        $storePaths{$_} = "";
+    }
+
+    close READ or die "nix-store failed: $?";
+}
+
+my @storePaths = keys %storePaths;
+
+
+# Don't create archives for files that are already in the binary cache.
+my @storePaths2;
+my %narFiles;
+foreach my $storePath (@storePaths) {
+    my $pathHash = substr(basename($storePath), 0, 32);
+    my $narInfoFile = "$destDir/$pathHash.narinfo";
+    if (-e $narInfoFile) {
+        my $narInfo = parseNARInfo($storePath, readFile($narInfoFile), 0, $narInfoFile) or die "cannot read ‘$narInfoFile’\n";
+        my $narFile = "$destDir/$narInfo->{url}";
+        if (-e $narFile) {
+            print STDERR "skipping existing $storePath\n";
+            # Add the NAR info to $narFiles if we're writing a
+            # manifest.
+            $narFiles{$storePath} = [
+                { url => ("$archivesURL/" . basename $narInfo->{url})
+                  , hash => $narInfo->{fileHash}
+                  , size => $narInfo->{fileSize}
+                  , compressionType => $narInfo->{compression}
+                  , narHash => $narInfo->{narHash}
+                  , narSize => $narInfo->{narSize}
+                  , references => join(" ", map { "$Nix::Config::storeDir/$_" } @{$narInfo->{refs}})
+                  , deriver => $narInfo->{deriver} ? "$Nix::Config::storeDir/$narInfo->{deriver}" : undef
+                  }
+            ] if $writeManifest;
+            next;
+        }
+    }
+    push @storePaths2, $storePath;
+}
+
+
+# Create a list of Nix derivations that turn each path into a Nix
+# archive.
+open NIX, ">$nixExpr";
+print NIX "[";
+
+foreach my $storePath (@storePaths2) {
+    die unless ($storePath =~ /\/[0-9a-z]{32}[^\"\\\$]*$/);
+
+    # Construct a Nix expression that creates a Nix archive.
+    my $nixexpr =
+        "(import <nix/nar.nix> " .
+        "{ storePath = builtins.storePath \"$storePath\"; hashAlgo = \"sha256\"; compressionType = \"$compressionType\"; }) ";
+
+    print NIX $nixexpr;
+}
+
+print NIX "]";
+close NIX;
+
+
+# Build the Nix expression.
+print STDERR "building compressed archives...\n";
+my @narPaths;
+my $pid = open(READ, "$Nix::Config::binDir/nix-build $nixExpr -o $tmpDir/result |")
+    or die "cannot run nix-build";
+while (<READ>) {
+    chomp;
+    die unless /^\//;
+    push @narPaths, $_;
+}
+close READ or die "nix-build failed: $?";
+
+
+# Write the cache info file.
+my $cacheInfoFile = "$destDir/nix-cache-info";
+if (! -e $cacheInfoFile) {
+    open FILE, ">$cacheInfoFile" or die "cannot create $cacheInfoFile: $!";
+    print FILE "StoreDir: $Nix::Config::storeDir\n";
+    print FILE "WantMassQuery: 0\n"; # by default, don't hit this cache for "nix-env -qas"
+    close FILE;
+}
+
+
+# Copy the archives and the corresponding NAR info files.
+print STDERR "copying archives...\n";
+
+my $totalNarSize = 0;
+my $totalCompressedSize = 0;
+
+for (my $n = 0; $n < scalar @storePaths2; $n++) {
+    my $storePath = $storePaths2[$n];
+    my $narDir = $narPaths[$n];
+    my $baseName = basename $storePath;
+
+    # Get info about the store path.
+    my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
+
+    # In some exceptional cases (such as VM tests that use the Nix
+    # store of the host), the database doesn't contain the hash.  So
+    # compute it.
+    if ($narHash =~ /^sha256:0*$/) {
+        my $nar = "$tmpDir/nar";
+        system("$Nix::Config::binDir/nix-store --dump $storePath > $nar") == 0
+            or die "cannot dump $storePath\n";
+        $narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 --flat $nar`;
+        die "cannot hash ‘$nar’" if $? != 0;
+        chomp $narHash;
+        $narHash = "sha256:$narHash";
+        $narSize = stat("$nar")->size;
+        unlink $nar or die;
+    }
+
+    $totalNarSize += $narSize;
+
+    # Get info about the compressed NAR.
+    open HASH, "$narDir/nar-compressed-hash" or die "cannot open nar-compressed-hash";
+    my $compressedHash = <HASH>;
+    chomp $compressedHash;
+    $compressedHash =~ /^[0-9a-z]+$/ or die "invalid hash";
+    close HASH;
+
+    my $narName = "$compressedHash.nar" . ($compressionType eq "xz" ? ".xz" : $compressionType eq "bzip2" ? ".bz2" : "");
+
+    my $narFile = "$narDir/$narName";
+    (-f $narFile) or die "NAR file for $storePath not found";
+
+    my $compressedSize = stat($narFile)->size;
+    $totalCompressedSize += $compressedSize;
+
+    printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath,
+        $compressedSize / (1024 * 1024), $compressedSize / $narSize * 100;
+
+    # Copy the compressed NAR.
+    my $dst = "$destDir/$narName";
+    if (! -f $dst) {
+        my $tmp = "$destDir/.tmp.$$.$narName";
+        if ($link) {
+            link($narFile, $tmp) or die "cannot link $tmp to $narFile: $!\n";
+        } else {
+            copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n";
+        }
+        rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n";
+    }
+
+    # Write the info file.
+    my $info;
+    $info .= "StorePath: $storePath\n";
+    $info .= "URL: $narName\n";
+    $info .= "Compression: $compressionType\n";
+    $info .= "FileHash: sha256:$compressedHash\n";
+    $info .= "FileSize: $compressedSize\n";
+    $info .= "NarHash: $narHash\n";
+    $info .= "NarSize: $narSize\n";
+    $info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
+    if (defined $deriver) {
+        $info .= "Deriver: " . basename $deriver . "\n";
+        if (isValidPath($deriver)) {
+            my $drv = derivationFromPath($deriver);
+            $info .= "System: $drv->{platform}\n";
+        }
+    }
+
+    if (defined $privateKeyFile && defined $keyName) {
+        my $sig = signString($privateKeyFile, $info);
+        $info .= "Signature: 1;$keyName;$sig\n";
+    }
+
+    my $pathHash = substr(basename($storePath), 0, 32);
+
+    $dst = "$destDir/$pathHash.narinfo";
+    if ($force || ! -f $dst) {
+        my $tmp = "$destDir/.tmp.$$.$pathHash.narinfo";
+        open INFO, ">$tmp" or die;
+        print INFO "$info" or die;
+        close INFO or die;
+        rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n";
+    }
+
+    $narFiles{$storePath} = [
+        { url => "$archivesURL/$narName"
+        , hash => "sha256:$compressedHash"
+        , size => $compressedSize
+        , compressionType => $compressionType
+        , narHash => "$narHash"
+        , narSize => $narSize
+        , references => join(" ", @{$refs})
+        , deriver => $deriver
+        }
+    ] if $writeManifest;
+}
+
+printf STDERR "total compressed size %.2f MiB, %.1f%%\n",
+    $totalCompressedSize / (1024 * 1024), $totalCompressedSize / ($totalNarSize || 1) * 100;
+
+
+# Optionally write a manifest.
+writeManifest($manifestPath // "$destDir/MANIFEST", \%narFiles, \()) if $writeManifest;
diff --git a/scripts/nix-reduce-build.in b/scripts/nix-reduce-build.in
new file mode 100755
index 000000000000..50beb9d10b16
--- /dev/null
+++ b/scripts/nix-reduce-build.in
@@ -0,0 +1,171 @@
+#! @bash@
+
+WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX);
+cd "$WORKING_DIRECTORY";
+
+if test -z "$1" || test "a--help" = "a$1" ; then
+	echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2
+	echo As in: >&2
+	echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2
+	echo nix-reduce-build /etc/nixos/nixos -- \\
+	echo "   " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2
+	echo "  store path name will be added into the end of the URL" >&2
+	echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2
+	echo "  that should be a directory where gzipped 'nix-store --export' ">&2
+	echo "  files are located (they should have .nar.gz extension)"  >&2
+	echo "        Or all together: " >&2
+	echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\
+	"    ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2
+	echo "        Also supports best-effort local builds of failing expression set:" >&2
+	echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2
+	echo "  nix-daemon:// builds using daemon"
+	echo "  nix-self:// builds directly using nix-store from current installation" >&2
+	echo "  nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2;
+	echo "derivations with specified output hash (sha256, sha1 or md5)." >&2
+	echo "  nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2;
+	echo "maximum amount of paths" >&2;
+	echo "  nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2;
+	echo "maximum amount of paths" >&2;
+	echo "        If no package sources are specified, required paths are listed." >&2;
+	exit;
+fi;
+
+while ! test "$1" = "--" || test "$1" = "" ; do 
+	echo "$1" >> initial; >&2
+	shift;
+done
+shift;
+echo Will work on $(cat initial | wc -l) targets. >&2
+
+while read ; do
+	case "$REPLY" in 
+		${NIX_STORE_DIR:-/nix/store}/*)
+			echo "$REPLY" >> paths; >&2
+			;;
+		*)
+			(
+				IFS=: ;
+				nix-instantiate $REPLY >> paths;
+			);
+			;;
+	esac;
+done < initial;
+echo Proceeding $(cat paths | wc -l) paths. >&2
+
+while read; do
+	case "$REPLY" in
+		*.drv)
+			echo "$REPLY" >> derivers; >&2
+			;;
+		*)
+			nix-store --query --deriver "$REPLY" >>derivers;
+			;;
+	esac;
+done < paths;
+echo Found $(cat derivers | wc -l) derivers. >&2
+
+cat derivers | xargs nix-store --query -R > derivers-closure;
+echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2
+
+cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths;
+cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths;
+echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2
+
+cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
+echo We need $(cat needed-paths | wc -l) paths. >&2
+
+egrep '[.]drv$' derivers-closure > critical-derivers;
+
+if test -z "$1" ; then
+	cat needed-paths;	
+fi;
+
+refresh_critical_derivers() {
+    echo "Finding needed derivers..." >&2;
+    cat critical-derivers | while read; do
+        if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then
+            echo "$REPLY";
+        fi;
+    done > new-critical-derivers;
+    mv new-critical-derivers critical-derivers;
+    echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2
+}
+
+build_here() {
+    cat critical-derivers | while read; do 
+        echo "Realising $REPLY using nix-daemon" >&2
+        @bindir@/nix-store -r "${REPLY}"
+    done;
+}
+
+try_to_substitute(){
+    cat needed-paths | while read ; do 
+        echo "Building $REPLY using nix-daemon" >&2
+        @bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}"
+    done;
+}
+
+for i in "$@"; do 
+	sshHost="${i#ssh://}";
+	httpHost="${i#http://}";
+	httpsHost="${i#https://}";
+	filePath="${i#file:/}";
+	if [ "$i" != "$sshHost" ]; then
+		cat needed-paths | while read; do 
+			echo "Getting $REPLY and its closure over ssh" >&2
+			nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true; 
+		done;
+	elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then
+		cat needed-paths | while read; do
+			echo "Getting $REPLY over http/https" >&2
+			curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import;
+		done;
+	elif [ "$i" != "$filePath" ] ; then
+		cat needed-paths | while read; do 
+			echo "Installing $REPLY from file" >&2
+			gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import;
+		done;
+	elif [ "$i" = "nix-daemon://" ] ; then
+		NIX_REMOTE=daemon try_to_substitute;
+		refresh_critical_derivers;
+		NIX_REMOTE=daemon build_here;
+	elif [ "$i" = "nix-self://" ] ; then
+		NIX_REMOTE= try_to_substitute;
+		refresh_critical_derivers;
+		NIX_REMOTE= build_here;
+	elif [ "$i" = "nix-daemon-fixed://" ] ; then
+		refresh_critical_derivers;
+
+		cat critical-derivers | while read; do 
+			if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
+				echo "Realising $REPLY using nix-daemon" >&2
+				NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}"
+			fi;
+		done;
+	elif [ "$i" = "nix-self-fixed://" ] ; then
+		refresh_critical_derivers;
+
+		cat critical-derivers | while read; do 
+			if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
+				echo "Realising $REPLY using direct Nix build" >&2
+				NIX_REMOTE= @bindir@/nix-store -r "${REPLY}"
+			fi;
+		done;
+	elif [ "$i" = "nix-daemon-substitute://" ] ; then
+		NIX_REMOTE=daemon try_to_substitute;
+	elif [ "$i" = "nix-self-substitute://" ] ; then
+		NIX_REMOTE= try_to_substitute;
+	elif [ "$i" = "nix-daemon-build://" ] ; then
+		refresh_critical_derivers;
+		NIX_REMOTE=daemon build_here;
+	elif [ "$i" = "nix-self-build://" ] ; then
+		refresh_critical_derivers;
+		NIX_REMOTE= build_here;
+	fi;
+	mv needed-paths wanted-paths;
+	cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
+	echo We still need $(cat needed-paths | wc -l) paths. >&2
+done;
+
+cd /
+rm -r "$WORKING_DIRECTORY"
diff --git a/scripts/show-duplication.pl b/scripts/show-duplication.pl
new file mode 100755
index 000000000000..0604c6696c7a
--- /dev/null
+++ b/scripts/show-duplication.pl
@@ -0,0 +1,73 @@
+#! /usr/bin/perl -w
+
+if (scalar @ARGV != 1) {
+    print "syntax: show-duplication.pl PATH\n";
+    exit 1;
+}
+
+my $root = $ARGV[0];
+
+
+my $nameRE = "(?:(?:[A-Za-z0-9\+\_]|(?:-[^0-9]))+)";
+my $versionRE = "(?:[A-Za-z0-9\.\-]+)";
+
+
+my %pkgInstances;
+
+
+my $pid = open(PATHS, "-|") || exec "nix-store", "-qR", $root;
+while (<PATHS>) {
+    chomp;
+    /^.*\/[0-9a-z]*-(.*)$/;
+    my $nameVersion = $1;
+    $nameVersion =~ /^($nameRE)(-($versionRE))?$/;
+    $name = $1;
+    $version = $3;
+    $version = "(unnumbered)" unless defined $version;
+#    print "$nameVersion $name $version\n";
+    push @{$pkgInstances{$name}}, {version => $version, path => $_};
+}
+close PATHS or exit 1;
+
+
+sub pathSize {
+    my $path = shift;
+    my @st = lstat $path or die;
+
+    my $size = $st[7];
+
+    if (-d $path) {
+        opendir DIR, $path or die;
+        foreach my $name (readdir DIR) {
+            next if $name eq "." || $name eq "..";
+            $size += pathSize("$path/$name");
+        }
+    }
+    
+    return $size;
+}
+
+
+my $totalPaths = 0;
+my $totalSize = 0, $totalWaste = 0;
+
+foreach my $name (sort {scalar @{$pkgInstances{$b}} <=> scalar @{$pkgInstances{$a}}} (keys %pkgInstances)) {
+    print "$name ", scalar @{$pkgInstances{$name}}, "\n";
+    my $allSize = 0;
+    foreach my $x (sort {$a->{version} cmp $b->{version}} @{$pkgInstances{$name}}) {
+        $totalPaths++;
+        my $size = pathSize $x->{path};
+        $allSize += $size;
+        print "    $x->{version} $size\n";
+    }
+    my $avgSize = int($allSize / scalar @{$pkgInstances{$name}});
+    my $waste = $allSize - $avgSize;
+    $totalSize += $allSize;
+    $totalWaste += $waste;
+    print "    average $avgSize, waste $waste\n";
+}
+
+
+my $avgDupl = $totalPaths / scalar (keys %pkgInstances);
+my $wasteFactor = ($totalWaste / $totalSize) * 100;
+print "average package duplication $avgDupl, total size $totalSize, total waste $totalWaste, $wasteFactor% wasted\n";
diff --git a/src/boost/assert.hpp b/src/boost/assert.hpp
new file mode 100644
index 000000000000..754ebb954bce
--- /dev/null
+++ b/src/boost/assert.hpp
@@ -0,0 +1,38 @@
+//
+//  boost/assert.hpp - BOOST_ASSERT(expr)
+//
+//  Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd.
+//
+//  Permission to copy, use, modify, sell and distribute this software
+//  is granted provided this copyright notice appears in all copies.
+//  This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+//
+//  Note: There are no include guards. This is intentional.
+//
+//  See http://www.boost.org/libs/utility/assert.html for documentation.
+//
+
+#undef BOOST_ASSERT
+
+#if defined(BOOST_DISABLE_ASSERTS)
+
+# define BOOST_ASSERT(expr) ((void)0)
+
+#elif defined(BOOST_ENABLE_ASSERT_HANDLER)
+
+#include <boost/current_function.hpp>
+
+namespace boost
+{
+
+void assertion_failed(char const * expr, char const * function, char const * file, long line); // user defined
+
+} // namespace boost
+
+#define BOOST_ASSERT(expr) ((expr)? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
+
+#else
+# include <assert.h>
+# define BOOST_ASSERT(expr) assert(expr)
+#endif
diff --git a/src/boost/format.hpp b/src/boost/format.hpp
new file mode 100644
index 000000000000..f965f0f33e9a
--- /dev/null
+++ b/src/boost/format.hpp
@@ -0,0 +1,64 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// format.hpp :  primary header
+// ----------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_HPP
+#define BOOST_FORMAT_HPP
+
+#include <vector>
+#include <string>
+#include <sstream>
+#include <cassert>
+
+#if HAVE_LOCALE
+#include <locale>
+#else
+#define BOOST_NO_STD_LOCALE
+#define BOOST_NO_LOCALE_ISIDIGIT
+#include <cctype>
+#endif
+
+#include <boost/format/macros_default.hpp>
+
+
+// ****  Forward declarations ----------------------------------
+#include <boost/format/format_fwd.hpp>           // basic_format<Ch,Tr>, and other frontends
+#include <boost/format/internals_fwd.hpp>        // misc forward declarations for internal use
+
+
+// ****  Auxiliary structs (stream_format_state<Ch,Tr> , and format_item<Ch,Tr> )
+#include <boost/format/internals.hpp>    
+
+// ****  Format  class  interface --------------------------------
+#include <boost/format/format_class.hpp>
+
+// **** Exceptions -----------------------------------------------
+#include <boost/format/exceptions.hpp>
+
+// **** Implementation -------------------------------------------
+//#include <boost/format/format_implementation.hpp>   // member functions
+
+#include <boost/format/group.hpp>                   // class for grouping arguments
+
+#include <boost/format/feed_args.hpp>               // argument-feeding functions
+//#include <boost/format/parsing.hpp>                 // format-string parsing (member-)functions
+
+// **** Implementation of the free functions ----------------------
+//#include <boost/format/free_funcs.hpp>
+
+
+#endif // BOOST_FORMAT_HPP
diff --git a/src/boost/format/exceptions.hpp b/src/boost/format/exceptions.hpp
new file mode 100644
index 000000000000..79e452449ef8
--- /dev/null
+++ b/src/boost/format/exceptions.hpp
@@ -0,0 +1,96 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// exceptions.hpp 
+// ------------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_EXCEPTIONS_HPP
+#define BOOST_FORMAT_EXCEPTIONS_HPP
+
+
+#include <stdexcept>
+
+
+namespace boost {
+
+namespace io {
+
+// **** exceptions -----------------------------------------------
+
+class format_error : public std::exception
+{
+public:
+  format_error() {}
+  virtual const char *what() const throw()
+  {
+    return "boost::format_error: "
+      "format generic failure";
+  }
+};
+
+class bad_format_string : public format_error
+{
+public:
+  bad_format_string() {}
+  virtual const char *what() const throw()
+  {
+    return "boost::bad_format_string: "
+      "format-string is ill-formed";
+  }
+};
+
+class too_few_args : public format_error
+{
+public:
+  too_few_args() {}
+  virtual const char *what() const throw()
+  {
+    return "boost::too_few_args: "
+      "format-string refered to more arguments than were passed";
+  }
+};
+
+class too_many_args : public format_error
+{
+public:
+  too_many_args() {}
+  virtual const char *what() const throw()
+  {
+    return "boost::too_many_args: "
+      "format-string refered to less arguments than were passed";
+  }
+};
+
+
+class  out_of_range : public format_error
+{
+public:
+  out_of_range() {}
+  virtual const char *what() const throw()
+  {
+    return "boost::out_of_range: "
+      "tried to refer to an argument (or item) number which is out of range, "
+      "according to the format string.";
+  }
+};
+
+
+} // namespace io
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_EXCEPTIONS_HPP
diff --git a/src/boost/format/feed_args.hpp b/src/boost/format/feed_args.hpp
new file mode 100644
index 000000000000..3d0b47b4a12e
--- /dev/null
+++ b/src/boost/format/feed_args.hpp
@@ -0,0 +1,247 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// feed_args.hpp :  functions for processing each argument 
+//                      (feed, feed_manip, and distribute)
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_FEED_ARGS_HPP
+#define BOOST_FORMAT_FEED_ARGS_HPP
+
+#include "boost/format/format_class.hpp"
+#include "boost/format/group.hpp"
+
+#include "boost/throw_exception.hpp"
+
+namespace boost {
+namespace io {
+namespace detail {
+namespace  { 
+
+  inline
+  void empty_buf(BOOST_IO_STD ostringstream & os) { 
+    static const std::string emptyStr;
+    os.str(emptyStr); 
+  }
+
+  void do_pad( std::string & s, 
+                std::streamsize w, 
+                const char c, 
+                std::ios::fmtflags f, 
+                bool center) 
+    // applies centered / left / right  padding  to the string s.
+    // Effects : string s is padded.
+  {
+    std::streamsize n=w-s.size();
+    if(n<=0) {
+      return;
+    }
+    if(center) 
+      {
+        s.reserve(w); // allocate once for the 2 inserts
+        const std::streamsize n1 = n /2, n0 = n - n1; 
+        s.insert(s.begin(), n0, c);
+        s.append(n1, c);
+      } 
+    else 
+      {
+        if(f & std::ios::left) {
+          s.append(n, c);
+        }
+        else {
+          s.insert(s.begin(), n, c);
+        }
+      }
+  } // -do_pad(..) 
+
+
+  template<class T> inline
+  void put_head(BOOST_IO_STD ostream& , const T& ) {
+  }
+
+  template<class T> inline
+  void put_head( BOOST_IO_STD ostream& os, const group1<T>& x ) {
+    os << group_head(x.a1_); // send the first N-1 items, not the last
+  }
+
+  template<class T> inline
+  void put_last( BOOST_IO_STD ostream& os, const T& x ) {
+    os << x ;
+  }
+
+  template<class T> inline
+  void put_last( BOOST_IO_STD ostream& os, const group1<T>& x ) {
+    os << group_last(x.a1_); // this selects the last element
+  }
+
+#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST 
+  template<class T> inline
+  void put_head( BOOST_IO_STD ostream& , T& ) {
+  }
+
+  template<class T> inline
+  void put_last( BOOST_IO_STD ostream& os, T& x ) {
+    os << x ;
+  }
+#endif
+
+
+
+  
+template<class T> 
+void put( T x, 
+          const format_item& specs, 
+          std::string & res, 
+          BOOST_IO_STD ostringstream& oss_ )
+{
+  // does the actual conversion of x, with given params, into a string
+  // using the *supplied* strinstream. (the stream state is important)
+
+  typedef std::string string_t;
+  typedef format_item  format_item_t;
+
+  stream_format_state   prev_state(oss_);
+    
+  specs.state_.apply_on(oss_);
+
+  // in case x is a group, apply the manip part of it, 
+  // in order to find width
+  put_head( oss_, x );
+  empty_buf( oss_);
+
+  const std::streamsize w=oss_.width();
+  const std::ios::fmtflags fl=oss_.flags();
+  const bool internal = (fl & std::ios::internal) != 0;
+  const bool two_stepped_padding = internal
+    &&  ! ( specs.pad_scheme_ & format_item_t::spacepad ) 
+    && specs.truncate_ < 0 ;
+      
+
+  if(! two_stepped_padding) 
+    {
+      if(w>0) // handle simple padding via do_pad, not natively in stream 
+        oss_.width(0);
+      put_last( oss_, x);
+      res = oss_.str();
+
+      if (specs.truncate_ >= 0)
+        res.erase(specs.truncate_);
+
+      // complex pads :
+      if(specs.pad_scheme_ & format_item_t::spacepad)
+        {
+          if( res.size()==0 ||   ( res[0]!='+' && res[0]!='-'  ))
+            {
+              res.insert(res.begin(), 1, ' '); // insert 1 space at  pos 0
+            }
+        }
+      if(w > 0) // need do_pad
+        {
+          do_pad(res,w,oss_.fill(), fl, (specs.pad_scheme_ & format_item_t::centered) !=0 );
+        }
+    } 
+  else  // 2-stepped padding
+    {
+      put_last( oss_, x); // oss_.width() may result in padding.
+      res = oss_.str();
+      
+      if (specs.truncate_ >= 0)
+        res.erase(specs.truncate_);
+
+      if( res.size() - w > 0)
+        { //   length w exceeded
+          // either it was multi-output with first output padding up all width..
+          // either it was one big arg and we are fine.
+          empty_buf( oss_);
+          oss_.width(0);
+          put_last(oss_, x );
+          string_t tmp = oss_.str();  // minimal-length output
+          std::streamsize d;
+          if( (d=w - tmp.size()) <=0 ) 
+            {
+              // minimal length is already >= w, so no padding  (cool!)
+              res.swap(tmp);
+            }
+          else
+            { // hum..  we need to pad (it was necessarily multi-output)
+              typedef typename string_t::size_type size_type;
+              size_type i = 0;
+              while( i<tmp.size() && tmp[i] == res[i] ) // find where we should pad.
+                ++i;
+              tmp.insert(i, static_cast<size_type>( d ), oss_.fill());
+              res.swap( tmp );
+            }
+        }
+      else 
+        { // okay, only one thing was printed and padded, so res is fine.
+        }
+    }
+
+  prev_state.apply_on(oss_);
+  empty_buf( oss_);
+  oss_.clear();
+} // end- put(..)
+
+
+}  // local namespace
+
+
+
+
+
+template<class T> 
+void distribute(basic_format& self, T x) 
+  // call put(x, ..) on every occurence of the current argument :
+{
+  if(self.cur_arg_ >= self.num_args_)
+    {
+      if( self.exceptions() & too_many_args_bit )
+        boost::throw_exception(too_many_args()); // too many variables have been supplied !
+      else return;
+    }
+  for(unsigned long i=0; i < self.items_.size(); ++i)
+    {
+      if(self.items_[i].argN_ == self.cur_arg_)
+        {
+          put<T> (x, self.items_[i], self.items_[i].res_, self.oss_ );
+        }
+    }
+}
+
+template<class T> 
+basic_format&  feed(basic_format& self, T x) 
+{
+  if(self.dumped_) self.clear();
+  distribute<T> (self, x);
+  ++self.cur_arg_;
+  if(self.bound_.size() != 0)
+    {
+      while( self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_] )
+        ++self.cur_arg_;
+    }
+
+  // this arg is finished, reset the stream's format state
+  self.state0_.apply_on(self.oss_);
+  return self;
+}
+    
+
+} // namespace detail
+} // namespace io
+} // namespace boost
+
+
+#endif //  BOOST_FORMAT_FEED_ARGS_HPP
diff --git a/src/boost/format/format_class.hpp b/src/boost/format/format_class.hpp
new file mode 100644
index 000000000000..6875623acb47
--- /dev/null
+++ b/src/boost/format/format_class.hpp
@@ -0,0 +1,135 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// format_class.hpp :  class interface
+// ------------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_CLASS_HPP
+#define BOOST_FORMAT_CLASS_HPP
+
+#include <vector>
+#include <string>
+
+#include <boost/format/format_fwd.hpp>
+#include <boost/format/internals_fwd.hpp>
+
+#include <boost/format/internals.hpp>
+
+namespace boost {
+
+class basic_format 
+{
+public:
+  typedef std::string                string_t;
+  typedef BOOST_IO_STD ostringstream internal_stream_t;
+private:
+  typedef BOOST_IO_STD ostream       stream_t;
+  typedef io::detail::stream_format_state  stream_format_state;
+  typedef io::detail::format_item          format_item_t;
+
+public:
+  basic_format(const char* str);
+  basic_format(const string_t& s);
+#ifndef BOOST_NO_STD_LOCALE
+  basic_format(const char* str, const std::locale & loc);
+  basic_format(const string_t& s, const std::locale & loc);
+#endif // no locale
+  basic_format(const basic_format& x);
+  basic_format& operator= (const basic_format& x);
+
+  basic_format& clear(); // empty the string buffers (except bound arguments, see clear_binds() )
+
+  // pass arguments through those operators :
+  template<class T>  basic_format&   operator%(const T& x) 
+  { 
+    return io::detail::feed<const T&>(*this,x);
+  }
+
+#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+  template<class T>  basic_format&   operator%(T& x) 
+  {
+    return io::detail::feed<T&>(*this,x);
+  }
+#endif
+
+
+  // system for binding arguments :
+  template<class T>  
+  basic_format&         bind_arg(int argN, const T& val) 
+  {
+    return io::detail::bind_arg_body(*this, argN, val); 
+  }
+  basic_format&         clear_bind(int argN);
+  basic_format&         clear_binds();
+
+  // modify the params of a directive, by applying a manipulator :
+  template<class T> 
+  basic_format&  modify_item(int itemN, const T& manipulator) 
+  {
+    return io::detail::modify_item_body(*this, itemN, manipulator) ;
+  }
+
+  // Choosing which errors will throw exceptions :
+  unsigned char exceptions() const;
+  unsigned char exceptions(unsigned char newexcept);
+
+  // final output
+  string_t str() const;
+  friend BOOST_IO_STD ostream& 
+  operator<< ( BOOST_IO_STD ostream& , const basic_format& ); 
+                      
+
+  template<class T>  friend basic_format&  
+  io::detail::feed(basic_format&, T);
+    
+  template<class T>  friend   
+  void io::detail::distribute(basic_format&, T);
+  
+  template<class T>  friend
+  basic_format&  io::detail::modify_item_body(basic_format&, int, const T&);
+
+  template<class T> friend
+  basic_format&  io::detail::bind_arg_body(basic_format&, int, const T&);
+
+// make the members private only if the friend templates are supported
+private:
+
+  // flag bits, used for style_
+  enum style_values  { ordered = 1,        // set only if all directives are  positional directives
+                       special_needs = 4 };     
+
+  // parse the format string :
+  void parse(const string_t&);
+
+  int                           style_;         // style of format-string :  positional or not, etc
+  int                           cur_arg_;       // keep track of wich argument will come
+  int                           num_args_;      // number of expected arguments
+  mutable bool                  dumped_;        // true only after call to str() or <<
+  std::vector<format_item_t>    items_;         // vector of directives (aka items)
+  string_t                      prefix_;        // piece of string to insert before first item
+
+  std::vector<bool>             bound_;         // stores which arguments were bound
+                                                //   size = num_args OR zero
+  internal_stream_t             oss_;           // the internal stream.
+  stream_format_state           state0_;        // reference state for oss_
+  unsigned char                 exceptions_;
+}; // class basic_format
+
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_CLASS_HPP
diff --git a/src/boost/format/format_fwd.hpp b/src/boost/format/format_fwd.hpp
new file mode 100644
index 000000000000..97c55f6684c3
--- /dev/null
+++ b/src/boost/format/format_fwd.hpp
@@ -0,0 +1,49 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// format_fwd.hpp :  forward declarations, for primary header format.hpp
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_FWD_HPP
+#define BOOST_FORMAT_FWD_HPP
+
+#include <string>
+#include <iosfwd>
+
+namespace boost {
+
+class basic_format;
+
+typedef basic_format    format;
+
+namespace io {
+enum format_error_bits { bad_format_string_bit = 1, 
+                         too_few_args_bit = 2, too_many_args_bit = 4,
+                         out_of_range_bit = 8,
+                         all_error_bits = 255, no_error_bits=0 };
+                  
+// Convertion:  format   to   string
+std::string     str(const basic_format& ) ;
+
+} // namespace io
+
+
+BOOST_IO_STD ostream& 
+operator<<( BOOST_IO_STD ostream&, const basic_format&);
+
+
+} // namespace boost
+
+#endif // BOOST_FORMAT_FWD_HPP
diff --git a/src/boost/format/format_implementation.cc b/src/boost/format/format_implementation.cc
new file mode 100644
index 000000000000..aa191afe1132
--- /dev/null
+++ b/src/boost/format/format_implementation.cc
@@ -0,0 +1,256 @@
+// -*- C++ -*-
+//  Boost general library format ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// format_implementation.hpp  Implementation of the basic_format class
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_IMPLEMENTATION_HPP
+#define BOOST_FORMAT_IMPLEMENTATION_HPP
+
+#include <boost/throw_exception.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+
+namespace boost {
+
+// --------  format:: -------------------------------------------
+basic_format::basic_format(const char* str)
+    : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+      items_(), oss_(), exceptions_(io::all_error_bits)
+{
+    state0_.set_by_stream(oss_);
+    string_t emptyStr;
+    if( !str) str = emptyStr.c_str();
+    parse( str );
+}
+
+#ifndef BOOST_NO_STD_LOCALE
+basic_format::basic_format(const char* str, const std::locale & loc)
+    : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+      items_(), oss_(), exceptions_(io::all_error_bits)
+{
+    oss_.imbue( loc );
+    state0_.set_by_stream(oss_);
+    string_t emptyStr;
+    if( !str) str = emptyStr.c_str();
+    parse( str );
+}
+
+basic_format::basic_format(const string_t& s, const std::locale & loc)
+    : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+      items_(),  oss_(), exceptions_(io::all_error_bits)
+{
+    oss_.imbue( loc );
+    state0_.set_by_stream(oss_);
+    parse(s);  
+}
+#endif //BOOST_NO_STD_LOCALE
+
+basic_format::basic_format(const string_t& s)
+    : style_(0), cur_arg_(0), num_args_(0), dumped_(false),
+      items_(),  oss_(), exceptions_(io::all_error_bits)
+{
+    state0_.set_by_stream(oss_);
+    parse(s);  
+}
+
+basic_format:: basic_format(const basic_format& x)
+    : style_(x.style_), cur_arg_(x.cur_arg_), num_args_(x.num_args_), dumped_(false), 
+      items_(x.items_), prefix_(x.prefix_), bound_(x.bound_), 
+      oss_(),   // <- we obviously can't copy x.oss_
+      state0_(x.state0_), exceptions_(x.exceptions_)
+{ 
+    state0_.apply_on(oss_);
+} 
+
+basic_format& basic_format::operator= (const basic_format& x)
+{
+    if(this == &x)
+      return *this;
+    state0_ = x.state0_;
+    state0_.apply_on(oss_);
+
+    // plus all the other (trivial) assignments :
+    exceptions_ = x.exceptions_;
+    items_ = x.items_;
+    prefix_ = x.prefix_;
+    bound_=x.bound_;
+    style_=x.style_; 
+    cur_arg_=x.cur_arg_; 
+    num_args_=x.num_args_;
+    dumped_=x.dumped_;
+    return *this;
+}
+
+
+unsigned char basic_format::exceptions() const 
+{
+  return exceptions_; 
+}
+
+unsigned char basic_format::exceptions(unsigned char newexcept) 
+{ 
+  unsigned char swp = exceptions_; 
+  exceptions_ = newexcept; 
+  return swp; 
+}
+
+
+basic_format& basic_format ::clear()
+  // empty the string buffers (except bound arguments, see clear_binds() )
+  // and make the format object ready for formatting a new set of arguments
+{
+    BOOST_ASSERT( bound_.size()==0 || num_args_ == static_cast<int>(bound_.size()) );
+
+    for(unsigned long i=0; i<items_.size(); ++i){
+      items_[i].state_ = items_[i].ref_state_;
+      // clear converted strings only if the corresponding argument is not  bound :
+      if( bound_.size()==0 || !bound_[ items_[i].argN_ ] )  items_[i].res_.resize(0);
+    }
+    cur_arg_=0; dumped_=false;
+    // maybe first arg is bound:
+    if(bound_.size() != 0)
+      {
+        while(cur_arg_ < num_args_ && bound_[cur_arg_] )      ++cur_arg_;
+      }
+    return *this;
+}
+
+basic_format& basic_format ::clear_binds() 
+  // cancel all bindings, and clear()
+{
+    bound_.resize(0);
+    clear();
+    return *this;
+}
+
+basic_format& basic_format::clear_bind(int argN) 
+  // cancel the binding of ONE argument, and clear()
+{
+    if(argN<1 || argN > num_args_ || bound_.size()==0 || !bound_[argN-1] ) 
+      {
+        if( exceptions() & io::out_of_range_bit )
+          boost::throw_exception(io::out_of_range()); // arg not in range.
+        else return *this;
+      }
+    bound_[argN-1]=false;
+    clear();
+    return *this;
+}
+
+
+
+std::string basic_format::str() const
+{
+  dumped_=true;
+  if(items_.size()==0)
+    return prefix_;
+  if( cur_arg_ < num_args_)
+      if( exceptions() & io::too_few_args_bit )
+        boost::throw_exception(io::too_few_args()); // not enough variables have been supplied !
+
+  unsigned long sz = prefix_.size();
+  unsigned long i;
+  for(i=0; i < items_.size(); ++i) 
+    sz += items_[i].res_.size() + items_[i].appendix_.size();
+  string_t res;
+  res.reserve(sz);
+
+  res += prefix_;
+  for(i=0; i < items_.size(); ++i) 
+  {
+    const format_item_t& item = items_[i];
+    res += item.res_;
+    if( item.argN_ == format_item_t::argN_tabulation) 
+    { 
+      BOOST_ASSERT( item.pad_scheme_ & format_item_t::tabulation);
+      std::streamsize  n = item.state_.width_ - res.size();
+      if( n > 0 )
+        res.append( n, item.state_.fill_ );
+    }
+    res += item.appendix_;
+  }
+  return res;
+}
+
+namespace io {
+namespace detail {
+
+template<class T>
+basic_format&  bind_arg_body( basic_format& self, 
+                                      int argN, 
+                                      const T& val)
+  // bind one argument to a fixed value
+  // this is persistent over clear() calls, thus also over str() and <<
+{
+    if(self.dumped_) self.clear(); // needed, because we will modify cur_arg_..
+    if(argN<1 || argN > self.num_args_) 
+      {
+        if( self.exceptions() & io::out_of_range_bit )
+          boost::throw_exception(io::out_of_range()); // arg not in range.
+        else return self;
+      }
+    if(self.bound_.size()==0) 
+      self.bound_.assign(self.num_args_,false);
+    else 
+      BOOST_ASSERT( self.num_args_ == static_cast<signed int>(self.bound_.size()) );
+    int o_cur_arg = self.cur_arg_;
+    self.cur_arg_ = argN-1; // arrays begin at 0
+
+    self.bound_[self.cur_arg_]=false; // if already set, we unset and re-sets..
+    self.operator%(val); // put val at the right place, because cur_arg is set
+    
+
+    // Now re-position cur_arg before leaving :
+    self.cur_arg_ = o_cur_arg; 
+    self.bound_[argN-1]=true;
+    if(self.cur_arg_ == argN-1 )
+      // hum, now this arg is bound, so move to next free arg
+      {
+        while(self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_])   ++self.cur_arg_;
+      }
+    // In any case, we either have all args, or are on a non-binded arg :
+    BOOST_ASSERT( self.cur_arg_ >= self.num_args_ || ! self.bound_[self.cur_arg_]);
+    return self;
+}
+
+template<class T>
+basic_format&  modify_item_body( basic_format& self,
+                                      int itemN, 
+                                      const T& manipulator)
+  // applies a manipulator to the format_item describing a given directive.
+  // this is a permanent change, clear or clear_binds won't cancel that.
+{
+  if(itemN<1 || itemN >= static_cast<signed int>(self.items_.size() )) 
+    {
+      if( self.exceptions() & io::out_of_range_bit ) 
+        boost::throw_exception(io::out_of_range()); // item not in range.
+      else return self;
+    }
+  self.items_[itemN-1].ref_state_.apply_manip( manipulator );
+  self.items_[itemN-1].state_ = self.items_[itemN-1].ref_state_;
+  return self;
+}
+
+} // namespace detail
+
+} // namespace io
+
+} // namespace boost
+
+
+
+#endif  // BOOST_FORMAT_IMPLEMENTATION_HPP
diff --git a/src/boost/format/free_funcs.cc b/src/boost/format/free_funcs.cc
new file mode 100644
index 000000000000..151db37a0ac9
--- /dev/null
+++ b/src/boost/format/free_funcs.cc
@@ -0,0 +1,71 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// free_funcs.hpp :  implementation of the free functions declared in namespace format
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_FUNCS_HPP
+#define BOOST_FORMAT_FUNCS_HPP
+
+#include "boost/format.hpp"
+#include "boost/throw_exception.hpp"
+
+namespace boost {
+
+namespace io {
+  inline 
+  std::string str(const basic_format& f) 
+    // adds up all pieces of strings and converted items, and return the formatted string
+  {
+    return f.str();
+  }
+}   // - namespace io
+
+BOOST_IO_STD ostream& 
+operator<<( BOOST_IO_STD ostream& os, 
+            const boost::basic_format& f) 
+  // effect: "return os << str(f);" but we can try to do it faster
+{
+  typedef boost::basic_format   format_t;
+  if(f.items_.size()==0) 
+    os << f.prefix_;
+  else {
+    if(f.cur_arg_ < f.num_args_)
+      if( f.exceptions() & io::too_few_args_bit )
+        boost::throw_exception(io::too_few_args()); // not enough variables have been supplied !
+    if(f.style_ & format_t::special_needs) 
+        os << f.str();
+    else {
+    // else we dont have to count chars output, so we dump directly to os :
+      os << f.prefix_;
+      for(unsigned long i=0; i<f.items_.size(); ++i) 
+        {
+          const format_t::format_item_t& item = f.items_[i];
+          os << item.res_;
+          os << item.appendix_;
+
+        }
+    }
+  }
+  f.dumped_=true;
+  return os;
+}
+
+
+
+} // namespace boost
+
+
+#endif // BOOST_FORMAT_FUNCS_HPP
diff --git a/src/boost/format/group.hpp b/src/boost/format/group.hpp
new file mode 100644
index 000000000000..ac63f3f0bab0
--- /dev/null
+++ b/src/boost/format/group.hpp
@@ -0,0 +1,680 @@
+
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+
+// group.hpp :  encapsulates a group of manipulators along with an argument
+//                      
+// group_head : cut the last element of a group out.
+// (is overloaded below on each type of group)
+
+// group_last : returns the last element of a group
+// (is overloaded below on each type of group)
+
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_GROUP_HPP
+#define BOOST_FORMAT_GROUP_HPP
+
+
+namespace boost {
+namespace io {
+
+
+namespace detail {
+
+
+// empty group, but useful even though.
+struct group0 
+{
+    group0()      {}
+};
+
+template <class Ch, class Tr>
+inline
+BOOST_IO_STD ostream&
+operator << ( BOOST_IO_STD ostream& os,
+             const group0& )
+{ 
+   return os; 
+}
+
+template <class T1>
+struct group1
+{
+    T1 a1_;
+    group1(T1 a1)
+      : a1_(a1)
+      {}
+};
+
+template <class Ch, class Tr, class T1>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group1<T1>& x)
+{ 
+   os << x.a1_;  
+   return os; 
+}
+
+
+
+
+template <class T1,class T2>
+struct group2
+{
+    T1 a1_;
+    T2 a2_;
+    group2(T1 a1,T2 a2)
+      : a1_(a1),a2_(a2)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group2<T1,T2>& x)
+{ 
+   os << x.a1_<< x.a2_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3>
+struct group3
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    group3(T1 a1,T2 a2,T3 a3)
+      : a1_(a1),a2_(a2),a3_(a3)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group3<T1,T2,T3>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4>
+struct group4
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    group4(T1 a1,T2 a2,T3 a3,T4 a4)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group4<T1,T2,T3,T4>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4,class T5>
+struct group5
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    T5 a5_;
+    group5(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group5<T1,T2,T3,T4,T5>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6>
+struct group6
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    T5 a5_;
+    T6 a6_;
+    group6(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group6<T1,T2,T3,T4,T5,T6>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+struct group7
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    T5 a5_;
+    T6 a6_;
+    T7 a7_;
+    group7(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group7<T1,T2,T3,T4,T5,T6,T7>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+struct group8
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    T5 a5_;
+    T6 a6_;
+    T7 a7_;
+    T8 a8_;
+    group8(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group8<T1,T2,T3,T4,T5,T6,T7,T8>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+struct group9
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    T5 a5_;
+    T6 a6_;
+    T7 a7_;
+    T8 a8_;
+    T9 a9_;
+    group9(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group9<T1,T2,T3,T4,T5,T6,T7,T8,T9>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_;  
+   return os; 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+struct group10
+{
+    T1 a1_;
+    T2 a2_;
+    T3 a3_;
+    T4 a4_;
+    T5 a5_;
+    T6 a6_;
+    T7 a7_;
+    T8 a8_;
+    T9 a9_;
+    T10 a10_;
+    group10(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9,T10 a10)
+      : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9),a10_(a10)
+      {}
+};
+
+template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+inline
+BOOST_IO_STD ostream&
+operator << (BOOST_IO_STD ostream& os,
+             const group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10>& x)
+{ 
+   os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_<< x.a10_;  
+   return os; 
+}
+
+
+
+
+template <class T1,class T2>
+inline
+group1<T1> 
+group_head( group2<T1,T2> const& x)
+{
+   return group1<T1> (x.a1_); 
+}
+
+template <class T1,class T2>
+inline
+group1<T2> 
+group_last( group2<T1,T2> const& x)
+{
+   return group1<T2> (x.a2_); 
+}
+
+
+
+template <class T1,class T2,class T3>
+inline
+group2<T1,T2> 
+group_head( group3<T1,T2,T3> const& x)
+{
+   return group2<T1,T2> (x.a1_,x.a2_); 
+}
+
+template <class T1,class T2,class T3>
+inline
+group1<T3> 
+group_last( group3<T1,T2,T3> const& x)
+{
+   return group1<T3> (x.a3_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4>
+inline
+group3<T1,T2,T3> 
+group_head( group4<T1,T2,T3,T4> const& x)
+{
+   return group3<T1,T2,T3> (x.a1_,x.a2_,x.a3_); 
+}
+
+template <class T1,class T2,class T3,class T4>
+inline
+group1<T4> 
+group_last( group4<T1,T2,T3,T4> const& x)
+{
+   return group1<T4> (x.a4_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5>
+inline
+group4<T1,T2,T3,T4> 
+group_head( group5<T1,T2,T3,T4,T5> const& x)
+{
+   return group4<T1,T2,T3,T4> (x.a1_,x.a2_,x.a3_,x.a4_); 
+}
+
+template <class T1,class T2,class T3,class T4,class T5>
+inline
+group1<T5> 
+group_last( group5<T1,T2,T3,T4,T5> const& x)
+{
+   return group1<T5> (x.a5_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6>
+inline
+group5<T1,T2,T3,T4,T5> 
+group_head( group6<T1,T2,T3,T4,T5,T6> const& x)
+{
+   return group5<T1,T2,T3,T4,T5> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_); 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6>
+inline
+group1<T6> 
+group_last( group6<T1,T2,T3,T4,T5,T6> const& x)
+{
+   return group1<T6> (x.a6_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+inline
+group6<T1,T2,T3,T4,T5,T6> 
+group_head( group7<T1,T2,T3,T4,T5,T6,T7> const& x)
+{
+   return group6<T1,T2,T3,T4,T5,T6> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_); 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
+inline
+group1<T7> 
+group_last( group7<T1,T2,T3,T4,T5,T6,T7> const& x)
+{
+   return group1<T7> (x.a7_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+inline
+group7<T1,T2,T3,T4,T5,T6,T7> 
+group_head( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x)
+{
+   return group7<T1,T2,T3,T4,T5,T6,T7> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_); 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
+inline
+group1<T8> 
+group_last( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x)
+{
+   return group1<T8> (x.a8_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+inline
+group8<T1,T2,T3,T4,T5,T6,T7,T8> 
+group_head( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x)
+{
+   return group8<T1,T2,T3,T4,T5,T6,T7,T8> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_); 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
+inline
+group1<T9> 
+group_last( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x)
+{
+   return group1<T9> (x.a9_); 
+}
+
+
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+inline
+group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> 
+group_head( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x)
+{
+   return group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_,x.a9_); 
+}
+
+template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
+inline
+group1<T10> 
+group_last( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x)
+{
+   return group1<T10> (x.a10_); 
+}
+
+
+
+
+
+} // namespace detail
+
+
+
+// helper functions
+
+
+inline detail::group1< detail::group0 >  
+group() { return detail::group1< detail::group0 > ( detail::group0() ); }
+
+template  <class T1, class Var> 
+inline
+detail::group1< detail::group2<T1, Var const&> >
+  group(T1 a1, Var const& var)
+{ 
+   return detail::group1< detail::group2<T1, Var const&> >
+                   ( detail::group2<T1, Var const&> 
+                        (a1, var) 
+                  );
+}
+
+template  <class T1,class T2, class Var> 
+inline
+detail::group1< detail::group3<T1,T2, Var const&> >
+  group(T1 a1,T2 a2, Var const& var)
+{ 
+   return detail::group1< detail::group3<T1,T2, Var const&> >
+                   ( detail::group3<T1,T2, Var const&> 
+                        (a1,a2, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3, class Var> 
+inline
+detail::group1< detail::group4<T1,T2,T3, Var const&> >
+  group(T1 a1,T2 a2,T3 a3, Var const& var)
+{ 
+   return detail::group1< detail::group4<T1,T2,T3, Var const&> >
+                   ( detail::group4<T1,T2,T3, Var const&> 
+                        (a1,a2,a3, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4, class Var> 
+inline
+detail::group1< detail::group5<T1,T2,T3,T4, Var const&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4, Var const& var)
+{ 
+   return detail::group1< detail::group5<T1,T2,T3,T4, Var const&> >
+                   ( detail::group5<T1,T2,T3,T4, Var const&> 
+                        (a1,a2,a3,a4, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5, class Var> 
+inline
+detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var const& var)
+{ 
+   return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> >
+                   ( detail::group6<T1,T2,T3,T4,T5, Var const&> 
+                        (a1,a2,a3,a4,a5, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6, class Var> 
+inline
+detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var const& var)
+{ 
+   return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> >
+                   ( detail::group7<T1,T2,T3,T4,T5,T6, Var const&> 
+                        (a1,a2,a3,a4,a5,a6, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var> 
+inline
+detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var const& var)
+{ 
+   return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> >
+                   ( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> 
+                        (a1,a2,a3,a4,a5,a6,a7, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var> 
+inline
+detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var const& var)
+{ 
+   return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> >
+                   ( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> 
+                        (a1,a2,a3,a4,a5,a6,a7,a8, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var> 
+inline
+detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var const& var)
+{ 
+   return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> >
+                   ( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> 
+                        (a1,a2,a3,a4,a5,a6,a7,a8,a9, var) 
+                  );
+}
+
+
+#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+
+template  <class T1, class Var> 
+inline
+detail::group1< detail::group2<T1, Var&> >
+  group(T1 a1, Var& var)
+{ 
+   return detail::group1< detail::group2<T1, Var&> >
+                   ( detail::group2<T1, Var&> 
+                        (a1, var) 
+                  );
+}
+
+template  <class T1,class T2, class Var> 
+inline
+detail::group1< detail::group3<T1,T2, Var&> >
+  group(T1 a1,T2 a2, Var& var)
+{ 
+   return detail::group1< detail::group3<T1,T2, Var&> >
+                   ( detail::group3<T1,T2, Var&> 
+                        (a1,a2, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3, class Var> 
+inline
+detail::group1< detail::group4<T1,T2,T3, Var&> >
+  group(T1 a1,T2 a2,T3 a3, Var& var)
+{ 
+   return detail::group1< detail::group4<T1,T2,T3, Var&> >
+                   ( detail::group4<T1,T2,T3, Var&> 
+                        (a1,a2,a3, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4, class Var> 
+inline
+detail::group1< detail::group5<T1,T2,T3,T4, Var&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4, Var& var)
+{ 
+   return detail::group1< detail::group5<T1,T2,T3,T4, Var&> >
+                   ( detail::group5<T1,T2,T3,T4, Var&> 
+                        (a1,a2,a3,a4, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5, class Var> 
+inline
+detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var& var)
+{ 
+   return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> >
+                   ( detail::group6<T1,T2,T3,T4,T5, Var&> 
+                        (a1,a2,a3,a4,a5, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6, class Var> 
+inline
+detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var& var)
+{ 
+   return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> >
+                   ( detail::group7<T1,T2,T3,T4,T5,T6, Var&> 
+                        (a1,a2,a3,a4,a5,a6, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var> 
+inline
+detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var& var)
+{ 
+   return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> >
+                   ( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> 
+                        (a1,a2,a3,a4,a5,a6,a7, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var> 
+inline
+detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var& var)
+{ 
+   return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> >
+                   ( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> 
+                        (a1,a2,a3,a4,a5,a6,a7,a8, var) 
+                  );
+}
+
+template  <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var> 
+inline
+detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> >
+  group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var& var)
+{ 
+   return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> >
+                   ( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> 
+                        (a1,a2,a3,a4,a5,a6,a7,a8,a9, var) 
+                  );
+}
+
+
+#endif  //end- #ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
+
+
+} // namespace io
+
+} // namespace boost
+
+
+#endif   // BOOST_FORMAT_GROUP_HPP
diff --git a/src/boost/format/internals.hpp b/src/boost/format/internals.hpp
new file mode 100644
index 000000000000..d25eb4c864c4
--- /dev/null
+++ b/src/boost/format/internals.hpp
@@ -0,0 +1,167 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream
+
+// ----------------------------------------------------------------------------
+// internals.hpp :  internal structs. included by format.hpp
+//                              stream_format_state, and format_item
+// ----------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_INTERNALS_HPP
+#define BOOST_FORMAT_INTERNALS_HPP
+
+
+#include <string>
+#include <sstream>
+
+namespace boost {
+namespace io {
+namespace detail {
+
+
+// --------------
+// set of params that define the format state of a stream
+
+struct stream_format_state 
+{
+  typedef std::ios   basic_ios;
+
+  std::streamsize width_;
+  std::streamsize precision_;
+  char fill_; 
+  std::ios::fmtflags flags_;
+
+  stream_format_state()       : width_(-1), precision_(-1), fill_(0), flags_(std::ios::dec)  {}
+  stream_format_state(basic_ios& os)                  {set_by_stream(os); }
+
+  void apply_on(basic_ios & os) const;                //- applies format_state to the stream
+  template<class T> void apply_manip(T manipulator)   //- modifies state by applying manipulator.
+       { apply_manip_body<T>( *this, manipulator) ; }
+  void reset();                                       //- sets to default state.
+  void set_by_stream(const basic_ios& os);            //- sets to os's state.
+};  
+
+
+
+// --------------
+// format_item : stores all parameters that can be defined by directives in the format-string
+
+struct format_item 
+{     
+  enum pad_values { zeropad = 1, spacepad =2, centered=4, tabulation = 8 };
+
+  enum arg_values { argN_no_posit   = -1, // non-positional directive. argN will be set later.
+                    argN_tabulation = -2, // tabulation directive. (no argument read) 
+                    argN_ignored    = -3  // ignored directive. (no argument read)
+  };
+  typedef BOOST_IO_STD ios              basic_ios;
+  typedef detail::stream_format_state         stream_format_state;
+  typedef std::string           string_t;
+  typedef BOOST_IO_STD ostringstream    internal_stream_t;
+
+
+  int         argN_;           //- argument number (starts at 0,  eg : %1 => argN=0)
+                               //  negative values are used for items that don't process
+                               //  an argument
+  string_t    res_;            //- result of the formatting of this item
+  string_t    appendix_;       //- piece of string between this item and the next
+
+  stream_format_state ref_state_;// set by parsing the format_string, is only affected by modify_item
+  stream_format_state state_;  // always same as ref_state, _unless_ modified by manipulators 'group(..)'
+
+  // non-stream format-state parameters
+  signed int truncate_;        //- is >=0 for directives like %.5s (take 5 chars from the string)
+  unsigned int pad_scheme_;    //- several possible padding schemes can mix. see pad_values
+
+  format_item() : argN_(argN_no_posit), truncate_(-1), pad_scheme_(0)  {}
+
+  void compute_states();      // sets states  according to truncate and pad_scheme.
+}; 
+
+
+
+// -----------------------------------------------------------
+// Definitions
+// -----------------------------------------------------------
+
+// --- stream_format_state:: -------------------------------------------
+inline
+void stream_format_state::apply_on(basic_ios & os) const
+  // set the state of this stream according to our params
+{
+      if(width_ != -1)
+        os.width(width_);
+      if(precision_ != -1)
+        os.precision(precision_);
+      if(fill_ != 0)
+        os.fill(fill_);
+      os.flags(flags_);
+}
+
+inline
+void stream_format_state::set_by_stream(const basic_ios& os) 
+  // set our params according to the state of this stream
+{
+      flags_ = os.flags();
+      width_ = os.width();
+      precision_ = os.precision();
+      fill_ = os.fill();
+}
+
+template<class T>  inline
+void apply_manip_body( stream_format_state& self,
+                       T manipulator) 
+  // modify our params according to the manipulator
+{
+      BOOST_IO_STD stringstream  ss;
+      self.apply_on( ss );
+      ss << manipulator;
+      self.set_by_stream( ss );
+}
+
+inline
+void stream_format_state::reset() 
+  // set our params to standard's default state
+{
+      width_=-1; precision_=-1; fill_=0; 
+      flags_ = std::ios::dec; 
+}
+
+
+// --- format_items:: -------------------------------------------
+inline
+void format_item::compute_states() 
+  // reflect pad_scheme_   on  state_ and ref_state_ 
+  //   because some pad_schemes has complex consequences on several state params.
+{
+  if(pad_scheme_ & zeropad) 
+  {
+    if(ref_state_.flags_ & std::ios::left) 
+    {
+      pad_scheme_ = pad_scheme_ & (~zeropad); // ignore zeropad in left alignment
+    }
+    else 
+    { 
+      ref_state_.fill_='0'; 
+      ref_state_.flags_ |= std::ios::internal;
+    }
+  }
+  state_ = ref_state_;
+}
+
+
+} } } // namespaces boost :: io :: detail
+
+
+#endif // BOOST_FORMAT_INTERNALS_HPP
diff --git a/src/boost/format/internals_fwd.hpp b/src/boost/format/internals_fwd.hpp
new file mode 100644
index 000000000000..a8ebf7c3abc1
--- /dev/null
+++ b/src/boost/format/internals_fwd.hpp
@@ -0,0 +1,65 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// internals_fwd.hpp :  forward declarations, for internal headers
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_INTERNAL_FWD_HPP
+#define BOOST_FORMAT_INTERNAL_FWD_HPP
+
+#include "boost/format/format_fwd.hpp"
+
+
+namespace boost {
+namespace io {
+
+namespace detail {
+  struct stream_format_state;
+  struct format_item;
+}
+
+
+namespace detail {
+
+  // these functions were intended as methods, 
+  // but MSVC have problems with template member functions :
+
+  // defined in format_implementation.hpp :
+     template<class T> 
+     basic_format&  modify_item_body( basic_format& self, 
+                                          int itemN, const T& manipulator);
+
+     template<class T> 
+     basic_format&  bind_arg_body( basic_format& self,
+                                           int argN, const T& val);
+
+    template<class T> 
+    void apply_manip_body( stream_format_state& self,
+                           T manipulator);
+
+  // argument feeding (defined in feed_args.hpp ) :
+     template<class T> 
+     void distribute(basic_format& self, T x);
+
+     template<class T> 
+     basic_format& feed(basic_format& self, T x);
+ 
+} // namespace detail
+
+} // namespace io
+} // namespace boost
+
+
+#endif //  BOOST_FORMAT_INTERNAL_FWD_HPP
diff --git a/src/boost/format/local.mk b/src/boost/format/local.mk
new file mode 100644
index 000000000000..3776eff382fe
--- /dev/null
+++ b/src/boost/format/local.mk
@@ -0,0 +1,7 @@
+libraries += libformat
+
+libformat_NAME = libnixformat
+
+libformat_DIR := $(d)
+
+libformat_SOURCES := $(wildcard $(d)/*.cc)
diff --git a/src/boost/format/macros_default.hpp b/src/boost/format/macros_default.hpp
new file mode 100644
index 000000000000..4fd84a163fb3
--- /dev/null
+++ b/src/boost/format/macros_default.hpp
@@ -0,0 +1,48 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rdiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// macros_default.hpp : configuration for the format library
+//                       provides default values for the stl workaround macros
+// ------------------------------------------------------------------------------
+
+#ifndef BOOST_FORMAT_MACROS_DEFAULT_HPP
+#define BOOST_FORMAT_MACROS_DEFAULT_HPP
+
+// *** This should go to "boost/config/suffix.hpp".
+
+#ifndef BOOST_IO_STD
+#  define BOOST_IO_STD std::
+#endif
+
+// **** Workaround for io streams, stlport and msvc.
+#ifdef BOOST_IO_NEEDS_USING_DECLARATION
+namespace boost {
+  using std::char_traits;
+  using std::basic_ostream;
+  using std::basic_ostringstream;
+  namespace io {
+    using std::basic_ostream;
+    namespace detail {
+      using std::basic_ios;
+      using std::basic_ostream;
+      using std::basic_ostringstream;
+    }
+  }
+}
+#endif
+
+// ------------------------------------------------------------------------------
+
+#endif // BOOST_FORMAT_MACROS_DEFAULT_HPP
diff --git a/src/boost/format/parsing.cc b/src/boost/format/parsing.cc
new file mode 100644
index 000000000000..34c36adeb734
--- /dev/null
+++ b/src/boost/format/parsing.cc
@@ -0,0 +1,454 @@
+// -*- C++ -*-
+//  Boost general library 'format'   ---------------------------
+//  See http://www.boost.org for updates, documentation, and revision history.
+
+//  (C) Samuel Krempp 2001
+//                  krempp@crans.ens-cachan.fr
+//  Permission to copy, use, modify, sell and
+//  distribute this software is granted provided this copyright notice appears
+//  in all copies. This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+
+// ideas taken from Rudiger Loos's format class
+// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
+
+// ------------------------------------------------------------------------------
+// parsing.hpp :  implementation of the parsing member functions
+//                      ( parse, parse_printf_directive)
+// ------------------------------------------------------------------------------
+
+
+#ifndef BOOST_FORMAT_PARSING_HPP
+#define BOOST_FORMAT_PARSING_HPP
+
+
+#include <boost/format.hpp>
+#include <boost/throw_exception.hpp>
+#include <boost/assert.hpp>
+
+
+namespace boost {
+namespace io {
+namespace detail {
+
+  template<class Stream> inline
+  bool wrap_isdigit(char c, Stream &os) 
+  {
+#ifndef BOOST_NO_LOCALE_ISIDIGIT
+    return std::isdigit(c, os.rdbuf()->getloc() );
+# else
+    using namespace std;
+    return isdigit(c); 
+#endif 
+  } //end- wrap_isdigit(..)
+
+  template<class Res> inline
+  Res str2int(const std::string& s, 
+              std::string::size_type start, 
+              BOOST_IO_STD ios &os,
+              const Res = Res(0)  ) 
+    // Input : char string, with starting index
+    //         a basic_ios& merely to call its widen/narrow member function in the desired locale.
+    // Effects : reads s[start:] and converts digits into an integral n, of type Res
+    // Returns : n
+  {
+    Res n = 0;
+    while(start<s.size() && wrap_isdigit(s[start], os) ) {
+      char cur_ch = s[start];
+      BOOST_ASSERT(cur_ch != 0 ); // since we called isdigit, this should not happen.
+      n *= 10;
+      n += cur_ch - '0'; // 22.2.1.1.2 of the C++ standard
+      ++start;
+    }
+    return n;
+  }
+
+  void skip_asterisk(const std::string & buf, 
+                     std::string::size_type * pos_p,
+                     BOOST_IO_STD ios &os)
+    // skip printf's "asterisk-fields" directives in the format-string buf
+    // Input : char string, with starting index *pos_p
+    //         a basic_ios& merely to call its widen/narrow member function in the desired locale.
+    // Effects : advance *pos_p by skipping printf's asterisk fields.
+    // Returns : nothing
+  {
+    using namespace std;
+    BOOST_ASSERT( pos_p != 0);
+    if(*pos_p >= buf.size() ) return;
+    if(buf[ *pos_p]=='*') {
+      ++ (*pos_p);
+      while (*pos_p < buf.size() && wrap_isdigit(buf[*pos_p],os)) ++(*pos_p);
+      if(buf[*pos_p]=='$') ++(*pos_p);
+    }
+  }
+
+
+  inline void maybe_throw_exception( unsigned char exceptions)
+    // auxiliary func called by parse_printf_directive
+    // for centralising error handling
+    // it either throws if user sets the corresponding flag, or does nothing.
+  {
+    if(exceptions & io::bad_format_string_bit)
+          boost::throw_exception(io::bad_format_string());
+  }
+    
+
+
+  bool parse_printf_directive(const std::string & buf,
+                              std::string::size_type * pos_p,
+                              detail::format_item * fpar,
+                              BOOST_IO_STD ios &os,
+                              unsigned char exceptions)
+    // Input   : a 'printf-directive' in the format-string, starting at buf[ *pos_p ]
+    //           a basic_ios& merely to call its widen/narrow member function in the desired locale.
+    //           a bitset'excpetions' telling whether to throw exceptions on errors.
+    // Returns : true if parse somehow succeeded (possibly ignoring errors if exceptions disabled) 
+    //           false if it failed so bad that the directive should be printed verbatim
+    // Effects : - *pos_p is incremented so that buf[*pos_p] is the first char after the directive
+    //           - *fpar is set with the parameters read in the directive
+  {
+    typedef format_item  format_item_t;
+    BOOST_ASSERT( pos_p != 0);
+    std::string::size_type       &i1 = *pos_p,      
+                                                        i0; 
+    fpar->argN_ = format_item_t::argN_no_posit;  // if no positional-directive
+
+    bool in_brackets=false;
+    if(buf[i1]=='|')
+      {
+        in_brackets=true;
+        if( ++i1 >= buf.size() ) {
+          maybe_throw_exception(exceptions);
+          return false;
+        }
+      }
+
+    // the flag '0' would be picked as a digit for argument order, but here it's a flag :
+    if(buf[i1]=='0') 
+      goto parse_flags;
+
+    // handle argument order (%2$d)  or possibly width specification: %2d
+    i0 = i1;  // save position before digits
+    while (i1 < buf.size() && wrap_isdigit(buf[i1], os))
+      ++i1;
+    if (i1!=i0) 
+      {
+        if( i1 >= buf.size() ) {
+          maybe_throw_exception(exceptions);
+          return false;
+        }
+        int n=str2int(buf,i0, os, int(0) );
+        
+        // %N% case : this is already the end of the directive
+        if( buf[i1] == '%' ) 
+          {
+            fpar->argN_ = n-1;
+            ++i1;
+            if( in_brackets) 
+              maybe_throw_exception(exceptions); 
+              // but don't return.  maybe "%" was used in lieu of '$', so we go on.
+            else return true;
+          }
+
+        if ( buf[i1]=='$' ) 
+          {
+            fpar->argN_ = n-1;
+            ++i1;
+          } 
+        else  
+          {
+            // non-positionnal directive
+            fpar->ref_state_.width_ = n;
+            fpar->argN_  = format_item_t::argN_no_posit;
+            goto parse_precision;
+          }
+      }
+    
+  parse_flags: 
+    // handle flags
+    while ( i1 <buf.size()) // as long as char is one of + - = # 0 l h   or ' '
+      {  
+        // misc switches
+        switch (buf[i1]) 
+          {
+          case '\'' : break; // no effect yet. (painful to implement)
+          case 'l':
+          case 'h':  // short/long modifier : for printf-comaptibility (no action needed)
+             break;
+          case '-':
+            fpar->ref_state_.flags_ |= std::ios::left;
+            break;
+          case '=':
+            fpar->pad_scheme_ |= format_item_t::centered;
+            break;
+          case ' ':
+            fpar->pad_scheme_ |= format_item_t::spacepad;
+            break;
+          case '+':
+            fpar->ref_state_.flags_ |= std::ios::showpos;
+            break;
+          case '0':
+            fpar->pad_scheme_ |= format_item_t::zeropad; 
+            // need to know alignment before really setting flags,
+            // so just add 'zeropad' flag for now, it will be processed later.
+            break;
+          case '#':
+            fpar->ref_state_.flags_ |= std::ios::showpoint | std::ios::showbase;
+            break;
+          default:
+            goto parse_width;
+          }
+        ++i1;
+      } // loop on flag.
+    if( i1>=buf.size()) {
+      maybe_throw_exception(exceptions);
+      return true; 
+    }
+
+  parse_width:
+    // handle width spec
+    skip_asterisk(buf, &i1, os); // skips 'asterisk fields' :  *, or *N$
+    i0 = i1;  // save position before digits
+    while (i1<buf.size() && wrap_isdigit(buf[i1], os))
+      i1++;
+    
+    if (i1!=i0) 
+      { fpar->ref_state_.width_ = str2int( buf,i0, os, std::streamsize(0) ); }
+
+  parse_precision:
+    if( i1>=buf.size()) { 
+      maybe_throw_exception(exceptions);
+      return true;
+    }
+    // handle precision spec
+    if (buf[i1]=='.')  
+      {
+        ++i1;
+        skip_asterisk(buf, &i1, os);
+        i0 = i1;  // save position before digits
+        while (i1<buf.size() && wrap_isdigit(buf[i1], os))
+          ++i1;
+
+        if(i1==i0)
+          fpar->ref_state_.precision_ = 0;
+        else 
+          fpar->ref_state_.precision_ = str2int(buf,i0, os, std::streamsize(0) );
+      }
+    
+    // handle  formatting-type flags :
+    while( i1<buf.size() && 
+           ( buf[i1]=='l' || buf[i1]=='L' || buf[i1]=='h') )
+      ++i1;
+    if( i1>=buf.size()) {
+      maybe_throw_exception(exceptions);
+      return true;
+    }
+    
+    if( in_brackets && buf[i1]=='|' ) 
+      {
+        ++i1;
+        return true;
+      }
+    switch (buf[i1])  
+      {
+      case 'X':
+        fpar->ref_state_.flags_ |= std::ios::uppercase;
+      case 'p': // pointer => set hex.
+      case 'x':
+        fpar->ref_state_.flags_ &= ~std::ios::basefield;
+        fpar->ref_state_.flags_ |= std::ios::hex;
+        break;
+      
+      case 'o':
+        fpar->ref_state_.flags_ &= ~std::ios::basefield;
+        fpar->ref_state_.flags_ |=  std::ios::oct;
+        break;
+
+      case 'E':
+        fpar->ref_state_.flags_ |=  std::ios::uppercase;
+      case 'e':
+        fpar->ref_state_.flags_ &= ~std::ios::floatfield;
+        fpar->ref_state_.flags_ |=  std::ios::scientific;
+
+        fpar->ref_state_.flags_ &= ~std::ios::basefield;
+        fpar->ref_state_.flags_ |=  std::ios::dec;
+        break;
+      
+      case 'f':
+        fpar->ref_state_.flags_ &= ~std::ios::floatfield;
+        fpar->ref_state_.flags_ |=  std::ios::fixed;
+      case 'u':
+      case 'd':
+      case 'i':
+        fpar->ref_state_.flags_ &= ~std::ios::basefield;
+        fpar->ref_state_.flags_ |=  std::ios::dec;
+        break;
+
+      case 'T':
+        ++i1;
+        if( i1 >= buf.size())
+          maybe_throw_exception(exceptions);
+        else
+          fpar->ref_state_.fill_ = buf[i1];
+        fpar->pad_scheme_ |= format_item_t::tabulation;
+        fpar->argN_ = format_item_t::argN_tabulation; 
+        break;
+      case 't': 
+        fpar->ref_state_.fill_ = ' ';
+        fpar->pad_scheme_ |= format_item_t::tabulation;
+        fpar->argN_ = format_item_t::argN_tabulation; 
+        break;
+
+      case 'G':
+        fpar->ref_state_.flags_ |= std::ios::uppercase;
+        break;
+      case 'g': // 'g' conversion is default for floats.
+        fpar->ref_state_.flags_ &= ~std::ios::basefield;
+        fpar->ref_state_.flags_ |=  std::ios::dec;
+
+        // CLEAR all floatield flags, so stream will CHOOSE
+        fpar->ref_state_.flags_ &= ~std::ios::floatfield; 
+        break;
+
+      case 'C':
+      case 'c': 
+        fpar->truncate_ = 1;
+        break;
+      case 'S':
+      case 's': 
+        fpar->truncate_ = fpar->ref_state_.precision_;
+        fpar->ref_state_.precision_ = -1;
+        break;
+      case 'n' :  
+        fpar->argN_ = format_item_t::argN_ignored;
+        break;
+      default: 
+        maybe_throw_exception(exceptions);
+      }
+    ++i1;
+
+    if( in_brackets )
+      {
+        if( i1<buf.size() && buf[i1]=='|' ) 
+          {
+            ++i1;
+            return true;
+          }
+        else  maybe_throw_exception(exceptions);
+      }
+    return true;
+  }
+
+} // detail namespace
+} // io namespace
+
+
+// -----------------------------------------------
+//  format :: parse(..)
+
+void basic_format::parse(const string_t & buf) 
+  // parse the format-string
+{
+    using namespace std;
+    const char arg_mark = '%';
+    bool ordered_args=true; 
+    int max_argN=-1;
+    string_t::size_type i1=0;
+    int num_items=0;
+    
+    // A: find upper_bound on num_items and allocates arrays
+    i1=0; 
+    while( (i1=buf.find(arg_mark,i1)) != string::npos ) 
+    {
+      if( i1+1 >= buf.size() ) {
+        if(exceptions() & io::bad_format_string_bit)
+          boost::throw_exception(io::bad_format_string()); // must not end in "bla bla %"
+        else break; // stop there, ignore last '%'
+      }
+      if(buf[i1+1] == buf[i1] ) { i1+=2; continue; } // escaped "%%" / "##"
+      ++i1;
+      
+      // in case of %N% directives, dont count it double (wastes allocations..) :
+      while(i1 < buf.size() && io::detail::wrap_isdigit(buf[i1],oss_)) ++i1;
+      if( i1 < buf.size() && buf[i1] == arg_mark ) ++ i1;
+
+      ++num_items;
+    }
+    items_.assign( num_items, format_item_t() );
+    
+    // B: Now the real parsing of the format string :
+    num_items=0;
+    i1 = 0;
+    string_t::size_type i0 = i1;
+    bool special_things=false;
+    int cur_it=0;
+    while( (i1=buf.find(arg_mark,i1)) != string::npos ) 
+    {
+      string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_;
+
+      if( buf[i1+1] == buf[i1] ) // escaped mark, '%%'
+      {
+        piece += buf.substr(i0, i1-i0) + buf[i1]; 
+        i1+=2; i0=i1;
+        continue; 
+      }
+      BOOST_ASSERT(  static_cast<unsigned int>(cur_it) < items_.size() || cur_it==0);
+
+      if(i1!=i0) piece += buf.substr(i0, i1-i0);
+      ++i1;
+      
+      bool parse_ok;
+      parse_ok = io::detail::parse_printf_directive(buf, &i1, &items_[cur_it], oss_, exceptions());
+      if( ! parse_ok ) continue; // the directive will be printed verbatim
+
+      i0=i1;
+      items_[cur_it].compute_states(); // process complex options, like zeropad, into stream params.
+
+      int argN=items_[cur_it].argN_;
+      if(argN == format_item_t::argN_ignored)
+        continue;
+      if(argN ==format_item_t::argN_no_posit)
+        ordered_args=false;
+      else if(argN == format_item_t::argN_tabulation) special_things=true;
+      else if(argN > max_argN) max_argN = argN;
+      ++num_items;
+      ++cur_it;
+    } // loop on %'s
+    BOOST_ASSERT(cur_it == num_items);
+    
+    // store the final piece of string
+    string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_;
+    piece += buf.substr(i0);
+    
+    if( !ordered_args) 
+    {
+      if(max_argN >= 0 )  // dont mix positional with non-positionnal directives
+        {
+          if(exceptions() & io::bad_format_string_bit)
+            boost::throw_exception(io::bad_format_string());
+          // else do nothing. => positionnal arguments are processed as non-positionnal
+        }
+      // set things like it would have been with positional directives :
+      int non_ordered_items = 0;
+      for(int i=0; i< num_items; ++i)
+        if(items_[i].argN_ == format_item_t::argN_no_posit) 
+          {
+            items_[i].argN_ = non_ordered_items;
+            ++non_ordered_items;
+          }
+      max_argN = non_ordered_items-1;
+    }
+    
+    // C: set some member data :
+    items_.resize(num_items);
+
+    if(special_things) style_ |= special_needs;
+    num_args_ = max_argN + 1;
+    if(ordered_args) style_ |=  ordered;
+    else style_ &= ~ordered;
+}
+
+} // namespace boost
+
+
+#endif //  BOOST_FORMAT_PARSING_HPP
diff --git a/src/boost/throw_exception.hpp b/src/boost/throw_exception.hpp
new file mode 100644
index 000000000000..07b4ae5ceae7
--- /dev/null
+++ b/src/boost/throw_exception.hpp
@@ -0,0 +1,47 @@
+#ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED
+#define BOOST_THROW_EXCEPTION_HPP_INCLUDED
+
+// MS compatible compilers support #pragma once
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1020)
+# pragma once
+#endif
+
+//
+//  boost/throw_exception.hpp
+//
+//  Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
+//
+//  Permission to copy, use, modify, sell and distribute this software
+//  is granted provided this copyright notice appears in all copies.
+//  This software is provided "as is" without express or implied
+//  warranty, and with no claim as to its suitability for any purpose.
+//
+//  http://www.boost.org/libs/utility/throw_exception.html
+//
+
+//#include <boost/config.hpp>
+
+#ifdef BOOST_NO_EXCEPTIONS
+# include <exception>
+#endif
+
+namespace boost
+{
+
+#ifdef BOOST_NO_EXCEPTIONS
+
+void throw_exception(std::exception const & e); // user defined
+
+#else
+
+template<class E> void throw_exception(E const & e)
+{
+    throw e;
+}
+
+#endif
+
+} // namespace boost
+
+#endif // #ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED
diff --git a/src/bsdiff-4.3/bsdiff.1 b/src/bsdiff-4.3/bsdiff.1
new file mode 100644
index 000000000000..ead6c4deb57f
--- /dev/null
+++ b/src/bsdiff-4.3/bsdiff.1
@@ -0,0 +1,63 @@
+.\"-
+.\" Copyright 2003-2005 Colin Percival
+.\" All rights reserved
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted providing that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+.\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/usr.bin/bsdiff/bsdiff/bsdiff.1,v 1.1 2005/08/06 01:59:05 cperciva Exp $
+.\"
+.Dd May 18, 2003
+.Dt BSDIFF 1
+.Os FreeBSD
+.Sh NAME
+.Nm bsdiff
+.Nd generate a patch between two binary files
+.Sh SYNOPSIS
+.Nm
+.Ao Ar oldfile Ac Ao Ar newfile Ac Ao Ar patchfile Ac
+.Sh DESCRIPTION
+.Nm
+compares
+.Ao Ar oldfile Ac
+to
+.Ao Ar newfile Ac
+and writes to
+.Ao Ar patchfile Ac
+a binary patch suitable for use by bspatch(1).
+When
+.Ao Ar oldfile Ac
+and
+.Ao Ar newfile Ac
+are two versions of an executable program, the
+patches produced are on average a factor of five smaller
+than those produced by any other binary patch tool known
+to the author.
+.Pp
+.Nm
+uses memory equal to 17 times the size of 
+.Ao Ar oldfile Ac ,
+and requires
+an absolute minimum working set size of 8 times the size of oldfile.
+.Sh SEE ALSO
+.Xr bspatch 1
+.Sh AUTHORS
+.An Colin Percival Aq cperciva@freebsd.org
diff --git a/src/bsdiff-4.3/bsdiff.c b/src/bsdiff-4.3/bsdiff.c
new file mode 100644
index 000000000000..374ed038fa1f
--- /dev/null
+++ b/src/bsdiff-4.3/bsdiff.c
@@ -0,0 +1,405 @@
+/*-
+ * Copyright 2003-2005 Colin Percival
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions 
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+__FBSDID("$FreeBSD: src/usr.bin/bsdiff/bsdiff/bsdiff.c,v 1.1 2005/08/06 01:59:05 cperciva Exp $");
+#endif
+
+#include <sys/types.h>
+
+#include <bzlib.h>
+#include <err.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define MIN(x,y) (((x)<(y)) ? (x) : (y))
+
+static void split(off_t *I,off_t *V,off_t start,off_t len,off_t h)
+{
+	off_t i,j,k,x,tmp,jj,kk;
+
+	if(len<16) {
+		for(k=start;k<start+len;k+=j) {
+			j=1;x=V[I[k]+h];
+			for(i=1;k+i<start+len;i++) {
+				if(V[I[k+i]+h]<x) {
+					x=V[I[k+i]+h];
+					j=0;
+				};
+				if(V[I[k+i]+h]==x) {
+					tmp=I[k+j];I[k+j]=I[k+i];I[k+i]=tmp;
+					j++;
+				};
+			};
+			for(i=0;i<j;i++) V[I[k+i]]=k+j-1;
+			if(j==1) I[k]=-1;
+		};
+		return;
+	};
+
+	x=V[I[start+len/2]+h];
+	jj=0;kk=0;
+	for(i=start;i<start+len;i++) {
+		if(V[I[i]+h]<x) jj++;
+		if(V[I[i]+h]==x) kk++;
+	};
+	jj+=start;kk+=jj;
+
+	i=start;j=0;k=0;
+	while(i<jj) {
+		if(V[I[i]+h]<x) {
+			i++;
+		} else if(V[I[i]+h]==x) {
+			tmp=I[i];I[i]=I[jj+j];I[jj+j]=tmp;
+			j++;
+		} else {
+			tmp=I[i];I[i]=I[kk+k];I[kk+k]=tmp;
+			k++;
+		};
+	};
+
+	while(jj+j<kk) {
+		if(V[I[jj+j]+h]==x) {
+			j++;
+		} else {
+			tmp=I[jj+j];I[jj+j]=I[kk+k];I[kk+k]=tmp;
+			k++;
+		};
+	};
+
+	if(jj>start) split(I,V,start,jj-start,h);
+
+	for(i=0;i<kk-jj;i++) V[I[jj+i]]=kk-1;
+	if(jj==kk-1) I[jj]=-1;
+
+	if(start+len>kk) split(I,V,kk,start+len-kk,h);
+}
+
+static void qsufsort(off_t *I,off_t *V,u_char *old,off_t oldsize)
+{
+	off_t buckets[256];
+	off_t i,h,len;
+
+	for(i=0;i<256;i++) buckets[i]=0;
+	for(i=0;i<oldsize;i++) buckets[old[i]]++;
+	for(i=1;i<256;i++) buckets[i]+=buckets[i-1];
+	for(i=255;i>0;i--) buckets[i]=buckets[i-1];
+	buckets[0]=0;
+
+	for(i=0;i<oldsize;i++) I[++buckets[old[i]]]=i;
+	I[0]=oldsize;
+	for(i=0;i<oldsize;i++) V[i]=buckets[old[i]];
+	V[oldsize]=0;
+	for(i=1;i<256;i++) if(buckets[i]==buckets[i-1]+1) I[buckets[i]]=-1;
+	I[0]=-1;
+
+	for(h=1;I[0]!=-(oldsize+1);h+=h) {
+		len=0;
+		for(i=0;i<oldsize+1;) {
+			if(I[i]<0) {
+				len-=I[i];
+				i-=I[i];
+			} else {
+				if(len) I[i-len]=-len;
+				len=V[I[i]]+1-i;
+				split(I,V,i,len,h);
+				i+=len;
+				len=0;
+			};
+		};
+		if(len) I[i-len]=-len;
+	};
+
+	for(i=0;i<oldsize+1;i++) I[V[i]]=i;
+}
+
+static off_t matchlen(u_char *old,off_t oldsize,u_char *new,off_t newsize)
+{
+	off_t i;
+
+	for(i=0;(i<oldsize)&&(i<newsize);i++)
+		if(old[i]!=new[i]) break;
+
+	return i;
+}
+
+static off_t search(off_t *I,u_char *old,off_t oldsize,
+		u_char *new,off_t newsize,off_t st,off_t en,off_t *pos)
+{
+	off_t x,y;
+
+	if(en-st<2) {
+		x=matchlen(old+I[st],oldsize-I[st],new,newsize);
+		y=matchlen(old+I[en],oldsize-I[en],new,newsize);
+
+		if(x>y) {
+			*pos=I[st];
+			return x;
+		} else {
+			*pos=I[en];
+			return y;
+		}
+	};
+
+	x=st+(en-st)/2;
+	if(memcmp(old+I[x],new,MIN(oldsize-I[x],newsize))<0) {
+		return search(I,old,oldsize,new,newsize,x,en,pos);
+	} else {
+		return search(I,old,oldsize,new,newsize,st,x,pos);
+	};
+}
+
+static void offtout(off_t x,u_char *buf)
+{
+	off_t y;
+
+	if(x<0) y=-x; else y=x;
+
+		buf[0]=y%256;y-=buf[0];
+	y=y/256;buf[1]=y%256;y-=buf[1];
+	y=y/256;buf[2]=y%256;y-=buf[2];
+	y=y/256;buf[3]=y%256;y-=buf[3];
+	y=y/256;buf[4]=y%256;y-=buf[4];
+	y=y/256;buf[5]=y%256;y-=buf[5];
+	y=y/256;buf[6]=y%256;y-=buf[6];
+	y=y/256;buf[7]=y%256;
+
+	if(x<0) buf[7]|=0x80;
+}
+
+int main(int argc,char *argv[])
+{
+	int fd;
+	u_char *old,*new;
+	off_t oldsize,newsize;
+	off_t *I,*V;
+	off_t scan,pos,len;
+	off_t lastscan,lastpos,lastoffset;
+	off_t oldscore,scsc;
+	off_t s,Sf,lenf,Sb,lenb;
+	off_t overlap,Ss,lens;
+	off_t i;
+	off_t dblen,eblen;
+	u_char *db,*eb;
+	u_char buf[8];
+	u_char header[32];
+	FILE * pf;
+	BZFILE * pfbz2;
+	int bz2err;
+
+	if(argc!=4) errx(1,"usage: %s oldfile newfile patchfile\n",argv[0]);
+
+	/* Allocate oldsize+1 bytes instead of oldsize bytes to ensure
+		that we never try to malloc(0) and get a NULL pointer */
+	if(((fd=open(argv[1],O_RDONLY,0))<0) ||
+		((oldsize=lseek(fd,0,SEEK_END))==-1) ||
+		((old=malloc(oldsize+1))==NULL) ||
+		(lseek(fd,0,SEEK_SET)!=0) ||
+		(read(fd,old,oldsize)!=oldsize) ||
+		(close(fd)==-1)) err(1,"%s",argv[1]);
+
+	if(((I=malloc((oldsize+1)*sizeof(off_t)))==NULL) ||
+		((V=malloc((oldsize+1)*sizeof(off_t)))==NULL)) err(1,NULL);
+
+	qsufsort(I,V,old,oldsize);
+
+	free(V);
+
+	/* Allocate newsize+1 bytes instead of newsize bytes to ensure
+		that we never try to malloc(0) and get a NULL pointer */
+	if(((fd=open(argv[2],O_RDONLY,0))<0) ||
+		((newsize=lseek(fd,0,SEEK_END))==-1) ||
+		((new=malloc(newsize+1))==NULL) ||
+		(lseek(fd,0,SEEK_SET)!=0) ||
+		(read(fd,new,newsize)!=newsize) ||
+		(close(fd)==-1)) err(1,"%s",argv[2]);
+
+	if(((db=malloc(newsize+1))==NULL) ||
+		((eb=malloc(newsize+1))==NULL)) err(1,NULL);
+	dblen=0;
+	eblen=0;
+
+	/* Create the patch file */
+	if ((pf = fopen(argv[3], "w")) == NULL)
+		err(1, "%s", argv[3]);
+
+	/* Header is
+		0	8	 "BSDIFF40"
+		8	8	length of bzip2ed ctrl block
+		16	8	length of bzip2ed diff block
+		24	8	length of new file */
+	/* File is
+		0	32	Header
+		32	??	Bzip2ed ctrl block
+		??	??	Bzip2ed diff block
+		??	??	Bzip2ed extra block */
+	memcpy(header,"BSDIFF40",8);
+	offtout(0, header + 8);
+	offtout(0, header + 16);
+	offtout(newsize, header + 24);
+	if (fwrite(header, 32, 1, pf) != 1)
+		err(1, "fwrite(%s)", argv[3]);
+
+	/* Compute the differences, writing ctrl as we go */
+	if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL)
+		errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err);
+	scan=0;len=0;
+	lastscan=0;lastpos=0;lastoffset=0;
+	while(scan<newsize) {
+		oldscore=0;
+
+		for(scsc=scan+=len;scan<newsize;scan++) {
+			len=search(I,old,oldsize,new+scan,newsize-scan,
+					0,oldsize,&pos);
+			if (len > 64 * 1024) break;
+
+			for(;scsc<scan+len;scsc++)
+			if((scsc+lastoffset<oldsize) &&
+				(old[scsc+lastoffset] == new[scsc]))
+				oldscore++;
+
+			if(((len==oldscore) && (len!=0)) || 
+				(len>oldscore+8)) break;
+
+			if((scan+lastoffset<oldsize) &&
+				(old[scan+lastoffset] == new[scan]))
+				oldscore--;
+		};
+
+		if((len!=oldscore) || (scan==newsize)) {
+			s=0;Sf=0;lenf=0;
+			for(i=0;(lastscan+i<scan)&&(lastpos+i<oldsize);) {
+				if(old[lastpos+i]==new[lastscan+i]) s++;
+				i++;
+				if(s*2-i>Sf*2-lenf) { Sf=s; lenf=i; };
+			};
+
+			lenb=0;
+			if(scan<newsize) {
+				s=0;Sb=0;
+				for(i=1;(scan>=lastscan+i)&&(pos>=i);i++) {
+					if(old[pos-i]==new[scan-i]) s++;
+					if(s*2-i>Sb*2-lenb) { Sb=s; lenb=i; };
+				};
+			};
+
+			if(lastscan+lenf>scan-lenb) {
+				overlap=(lastscan+lenf)-(scan-lenb);
+				s=0;Ss=0;lens=0;
+				for(i=0;i<overlap;i++) {
+					if(new[lastscan+lenf-overlap+i]==
+					   old[lastpos+lenf-overlap+i]) s++;
+					if(new[scan-lenb+i]==
+					   old[pos-lenb+i]) s--;
+					if(s>Ss) { Ss=s; lens=i+1; };
+				};
+
+				lenf+=lens-overlap;
+				lenb-=lens;
+			};
+
+			for(i=0;i<lenf;i++)
+				db[dblen+i]=new[lastscan+i]-old[lastpos+i];
+			for(i=0;i<(scan-lenb)-(lastscan+lenf);i++)
+				eb[eblen+i]=new[lastscan+lenf+i];
+
+			dblen+=lenf;
+			eblen+=(scan-lenb)-(lastscan+lenf);
+
+			offtout(lenf,buf);
+			BZ2_bzWrite(&bz2err, pfbz2, buf, 8);
+			if (bz2err != BZ_OK)
+				errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
+
+			offtout((scan-lenb)-(lastscan+lenf),buf);
+			BZ2_bzWrite(&bz2err, pfbz2, buf, 8);
+			if (bz2err != BZ_OK)
+				errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
+
+			offtout((pos-lenb)-(lastpos+lenf),buf);
+			BZ2_bzWrite(&bz2err, pfbz2, buf, 8);
+			if (bz2err != BZ_OK)
+				errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
+
+			lastscan=scan-lenb;
+			lastpos=pos-lenb;
+			lastoffset=pos-scan;
+		};
+	};
+	BZ2_bzWriteClose(&bz2err, pfbz2, 0, NULL, NULL);
+	if (bz2err != BZ_OK)
+		errx(1, "BZ2_bzWriteClose, bz2err = %d", bz2err);
+
+	/* Compute size of compressed ctrl data */
+	if ((len = ftello(pf)) == -1)
+		err(1, "ftello");
+	offtout(len-32, header + 8);
+
+	/* Write compressed diff data */
+	if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL)
+		errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err);
+	BZ2_bzWrite(&bz2err, pfbz2, db, dblen);
+	if (bz2err != BZ_OK)
+		errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
+	BZ2_bzWriteClose(&bz2err, pfbz2, 0, NULL, NULL);
+	if (bz2err != BZ_OK)
+		errx(1, "BZ2_bzWriteClose, bz2err = %d", bz2err);
+
+	/* Compute size of compressed diff data */
+	if ((newsize = ftello(pf)) == -1)
+		err(1, "ftello");
+	offtout(newsize - len, header + 16);
+
+	/* Write compressed extra data */
+	if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL)
+		errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err);
+	BZ2_bzWrite(&bz2err, pfbz2, eb, eblen);
+	if (bz2err != BZ_OK)
+		errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
+	BZ2_bzWriteClose(&bz2err, pfbz2, 0, NULL, NULL);
+	if (bz2err != BZ_OK)
+		errx(1, "BZ2_bzWriteClose, bz2err = %d", bz2err);
+
+	/* Seek to the beginning, write the header, and close the file */
+	if (fseeko(pf, 0, SEEK_SET))
+		err(1, "fseeko");
+	if (fwrite(header, 32, 1, pf) != 1)
+		err(1, "fwrite(%s)", argv[3]);
+	if (fclose(pf))
+		err(1, "fclose");
+
+	/* Free the memory we used */
+	free(db);
+	free(eb);
+	free(I);
+	free(old);
+	free(new);
+
+	return 0;
+}
diff --git a/src/bsdiff-4.3/bspatch.1 b/src/bsdiff-4.3/bspatch.1
new file mode 100644
index 000000000000..82a2781aa7dc
--- /dev/null
+++ b/src/bsdiff-4.3/bspatch.1
@@ -0,0 +1,59 @@
+.\"-
+.\" Copyright 2003-2005 Colin Percival
+.\" All rights reserved
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted providing that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+.\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/usr.bin/bsdiff/bspatch/bspatch.1,v 1.1 2005/08/06 01:59:06 cperciva Exp $
+.\"
+.Dd May 18, 2003
+.Dt BSPATCH 1
+.Os FreeBSD
+.Sh NAME
+.Nm bspatch
+.Nd apply a patch built with bsdiff(1)
+.Sh SYNOPSIS
+.Nm
+.Ao Ar oldfile Ac Ao Ar newfile Ac Ao Ar patchfile Ac
+.Sh DESCRIPTION
+.Nm
+generates
+.Ao Ar newfile Ac
+from
+.Ao Ar oldfile Ac
+and
+.Ao Ar patchfile Ac
+where
+.Ao Ar patchfile Ac
+is a binary patch built by bsdiff(1).
+.Pp
+.Nm
+uses memory equal to the size of 
+.Ao Ar oldfile Ac
+plus the size of 
+.Ao Ar newfile Ac ,
+but can tolerate a very small working set without a dramatic loss
+of performance.
+.Sh SEE ALSO
+.Xr bsdiff 1
+.Sh AUTHORS
+.An Colin Percival Aq cperciva@freebsd.org
diff --git a/src/bsdiff-4.3/bspatch.c b/src/bsdiff-4.3/bspatch.c
new file mode 100644
index 000000000000..f9d33ddd64a2
--- /dev/null
+++ b/src/bsdiff-4.3/bspatch.c
@@ -0,0 +1,224 @@
+/*-
+ * Copyright 2003-2005 Colin Percival
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions 
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+__FBSDID("$FreeBSD: src/usr.bin/bsdiff/bspatch/bspatch.c,v 1.1 2005/08/06 01:59:06 cperciva Exp $");
+#endif
+
+#include <bzlib.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <err.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+
+static off_t offtin(u_char *buf)
+{
+	off_t y;
+
+	y=buf[7]&0x7F;
+	y=y*256;y+=buf[6];
+	y=y*256;y+=buf[5];
+	y=y*256;y+=buf[4];
+	y=y*256;y+=buf[3];
+	y=y*256;y+=buf[2];
+	y=y*256;y+=buf[1];
+	y=y*256;y+=buf[0];
+
+	if(buf[7]&0x80) y=-y;
+
+	return y;
+}
+
+
+void writeFull(const char * name, int fd,
+    const unsigned char * buf, size_t count)
+{
+    while (count) {
+        ssize_t res = write(fd, (char *) buf, count);
+        if (res == -1) {
+            if (errno == EINTR) continue;
+            err(1,"writing to %s",name);
+        }
+        count -= res;
+        buf += res;
+    }
+}
+
+
+int main(int argc,char * argv[])
+{
+	FILE * f, * cpf, * dpf, * epf;
+	BZFILE * cpfbz2, * dpfbz2, * epfbz2;
+	int cbz2err, dbz2err, ebz2err;
+	int fd;
+	ssize_t oldsize,newsize;
+	ssize_t bzctrllen,bzdatalen;
+	u_char header[32],buf[8];
+	u_char *old, *new;
+	off_t oldpos,newpos;
+	off_t ctrl[3];
+	off_t lenread;
+	off_t i;
+
+	if(argc!=4) errx(1,"usage: %s oldfile newfile patchfile\n",argv[0]);
+
+	/* Open patch file */
+	if ((f = fopen(argv[3], "r")) == NULL)
+		err(1, "fopen(%s)", argv[3]);
+
+	/*
+	File format:
+		0	8	"BSDIFF40"
+		8	8	X
+		16	8	Y
+		24	8	sizeof(newfile)
+		32	X	bzip2(control block)
+		32+X	Y	bzip2(diff block)
+		32+X+Y	???	bzip2(extra block)
+	with control block a set of triples (x,y,z) meaning "add x bytes
+	from oldfile to x bytes from the diff block; copy y bytes from the
+	extra block; seek forwards in oldfile by z bytes".
+	*/
+
+	/* Read header */
+	if (fread(header, 1, 32, f) < 32) {
+		if (feof(f))
+			errx(1, "Corrupt patch\n");
+		err(1, "fread(%s)", argv[3]);
+	}
+
+	/* Check for appropriate magic */
+	if (memcmp(header, "BSDIFF40", 8) != 0)
+		errx(1, "Corrupt patch\n");
+
+	/* Read lengths from header */
+	bzctrllen=offtin(header+8);
+	bzdatalen=offtin(header+16);
+	newsize=offtin(header+24);
+	if((bzctrllen<0) || (bzdatalen<0) || (newsize<0))
+		errx(1,"Corrupt patch\n");
+
+	/* Close patch file and re-open it via libbzip2 at the right places */
+	if (fclose(f))
+		err(1, "fclose(%s)", argv[3]);
+	if ((cpf = fopen(argv[3], "r")) == NULL)
+		err(1, "fopen(%s)", argv[3]);
+	if (fseeko(cpf, 32, SEEK_SET))
+		err(1, "fseeko(%s, %lld)", argv[3],
+		    (long long)32);
+	if ((cpfbz2 = BZ2_bzReadOpen(&cbz2err, cpf, 0, 0, NULL, 0)) == NULL)
+		errx(1, "BZ2_bzReadOpen, bz2err = %d", cbz2err);
+	if ((dpf = fopen(argv[3], "r")) == NULL)
+		err(1, "fopen(%s)", argv[3]);
+	if (fseeko(dpf, 32 + bzctrllen, SEEK_SET))
+		err(1, "fseeko(%s, %lld)", argv[3],
+		    (long long)(32 + bzctrllen));
+	if ((dpfbz2 = BZ2_bzReadOpen(&dbz2err, dpf, 0, 0, NULL, 0)) == NULL)
+		errx(1, "BZ2_bzReadOpen, bz2err = %d", dbz2err);
+	if ((epf = fopen(argv[3], "r")) == NULL)
+		err(1, "fopen(%s)", argv[3]);
+	if (fseeko(epf, 32 + bzctrllen + bzdatalen, SEEK_SET))
+		err(1, "fseeko(%s, %lld)", argv[3],
+		    (long long)(32 + bzctrllen + bzdatalen));
+	if ((epfbz2 = BZ2_bzReadOpen(&ebz2err, epf, 0, 0, NULL, 0)) == NULL)
+		errx(1, "BZ2_bzReadOpen, bz2err = %d", ebz2err);
+
+	if(((fd=open(argv[1],O_RDONLY,0))<0) ||
+		((oldsize=lseek(fd,0,SEEK_END))==-1) ||
+		((old=malloc(oldsize+1))==NULL) ||
+		(lseek(fd,0,SEEK_SET)!=0) ||
+		(read(fd,old,oldsize)!=oldsize) ||
+		(close(fd)==-1)) err(1,"%s",argv[1]);
+	if((new=malloc(newsize+1))==NULL) err(1,NULL);
+
+	oldpos=0;newpos=0;
+	while(newpos<newsize) {
+		/* Read control data */
+		for(i=0;i<=2;i++) {
+			lenread = BZ2_bzRead(&cbz2err, cpfbz2, buf, 8);
+			if ((lenread < 8) || ((cbz2err != BZ_OK) &&
+			    (cbz2err != BZ_STREAM_END)))
+				errx(1, "Corrupt patch\n");
+			ctrl[i]=offtin(buf);
+		};
+
+		/* Sanity-check */
+		if(newpos+ctrl[0]>newsize)
+			errx(1,"Corrupt patch\n");
+
+		/* Read diff string */
+		lenread = BZ2_bzRead(&dbz2err, dpfbz2, new + newpos, ctrl[0]);
+		if ((lenread < ctrl[0]) ||
+		    ((dbz2err != BZ_OK) && (dbz2err != BZ_STREAM_END)))
+			errx(1, "Corrupt patch\n");
+
+		/* Add old data to diff string */
+		for(i=0;i<ctrl[0];i++)
+			if((oldpos+i>=0) && (oldpos+i<oldsize))
+				new[newpos+i]+=old[oldpos+i];
+
+		/* Adjust pointers */
+		newpos+=ctrl[0];
+		oldpos+=ctrl[0];
+
+		/* Sanity-check */
+		if(newpos+ctrl[1]>newsize)
+			errx(1,"Corrupt patch\n");
+
+		/* Read extra string */
+		lenread = BZ2_bzRead(&ebz2err, epfbz2, new + newpos, ctrl[1]);
+		if ((lenread < ctrl[1]) ||
+		    ((ebz2err != BZ_OK) && (ebz2err != BZ_STREAM_END)))
+			errx(1, "Corrupt patch\n");
+
+		/* Adjust pointers */
+		newpos+=ctrl[1];
+		oldpos+=ctrl[2];
+	};
+
+	/* Clean up the bzip2 reads */
+	BZ2_bzReadClose(&cbz2err, cpfbz2);
+	BZ2_bzReadClose(&dbz2err, dpfbz2);
+	BZ2_bzReadClose(&ebz2err, epfbz2);
+	if (fclose(cpf) || fclose(dpf) || fclose(epf))
+		err(1, "fclose(%s)", argv[3]);
+
+	/* Write the new file */
+	if((fd=open(argv[2],O_CREAT|O_TRUNC|O_WRONLY,0666))<0)
+                err(1,"%s",argv[2]);
+        writeFull(argv[2], fd, new, newsize);
+        if(close(fd)==-1)
+		err(1,"%s",argv[2]);
+
+	free(new);
+	free(old);
+
+	return 0;
+}
diff --git a/src/bsdiff-4.3/compat-include/err.h b/src/bsdiff-4.3/compat-include/err.h
new file mode 100644
index 000000000000..a851ded6f907
--- /dev/null
+++ b/src/bsdiff-4.3/compat-include/err.h
@@ -0,0 +1,12 @@
+/* Simulate BSD's <err.h> functionality. */
+
+#ifndef COMPAT_ERR_H_INCLUDED
+#define COMPAT_ERR_H_INCLUDED 1
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define err(rc,...)  do { fprintf(stderr,__VA_ARGS__); exit(rc); } while(0)
+#define errx(rc,...) do { fprintf(stderr,__VA_ARGS__); exit(rc); } while(0)
+
+#endif
diff --git a/src/bsdiff-4.3/local.mk b/src/bsdiff-4.3/local.mk
new file mode 100644
index 000000000000..c957ceab0c0f
--- /dev/null
+++ b/src/bsdiff-4.3/local.mk
@@ -0,0 +1,11 @@
+programs += bsdiff bspatch
+
+bsdiff_DIR := $(d)
+bsdiff_SOURCES := $(d)/bsdiff.c
+bsdiff_LDFLAGS = -lbz2 $(bsddiff_compat_include)
+bsdiff_INSTALL_DIR = $(libexecdir)/nix
+
+bspatch_DIR := $(d)
+bspatch_SOURCES := $(d)/bspatch.c
+bspatch_LDFLAGS = -lbz2 $(bsddiff_compat_include)
+bspatch_INSTALL_DIR = $(libexecdir)/nix
diff --git a/src/download-via-ssh/download-via-ssh.cc b/src/download-via-ssh/download-via-ssh.cc
new file mode 100644
index 000000000000..d221fa8b51fb
--- /dev/null
+++ b/src/download-via-ssh/download-via-ssh.cc
@@ -0,0 +1,141 @@
+#include "shared.hh"
+#include "util.hh"
+#include "serialise.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "globals.hh"
+#include "serve-protocol.hh"
+#include "worker-protocol.hh"
+#include "store-api.hh"
+
+#include <iostream>
+#include <unistd.h>
+
+using namespace nix;
+
+// !!! TODO:
+// * Respect more than the first host
+// * use a database
+// * show progress
+
+
+static std::pair<FdSink, FdSource> connect(const string & conn)
+{
+    Pipe to, from;
+    to.create();
+    from.create();
+    startProcess([&]() {
+        if (dup2(to.readSide, STDIN_FILENO) == -1)
+            throw SysError("dupping stdin");
+        if (dup2(from.writeSide, STDOUT_FILENO) == -1)
+            throw SysError("dupping stdout");
+        execlp("ssh", "ssh", "-x", "-T", conn.c_str(), "nix-store --serve", NULL);
+        throw SysError("executing ssh");
+    });
+    // If child exits unexpectedly, we'll EPIPE or EOF early.
+    // If we exit unexpectedly, child will EPIPE or EOF early.
+    // So no need to keep track of it.
+
+    return std::pair<FdSink, FdSource>(to.writeSide.borrow(), from.readSide.borrow());
+}
+
+
+static void substitute(std::pair<FdSink, FdSource> & pipes, Path storePath, Path destPath)
+{
+    writeInt(cmdDumpStorePath, pipes.first);
+    writeString(storePath, pipes.first);
+    pipes.first.flush();
+    restorePath(destPath, pipes.second);
+    std::cout << std::endl;
+}
+
+
+static void query(std::pair<FdSink, FdSource> & pipes)
+{
+    for (string line; getline(std::cin, line);) {
+        Strings tokenized = tokenizeString<Strings>(line);
+        string cmd = tokenized.front();
+        tokenized.pop_front();
+        if (cmd == "have") {
+            writeInt(cmdQueryValidPaths, pipes.first);
+            writeInt(0, pipes.first); // don't lock
+            writeInt(0, pipes.first); // don't substitute
+            writeStrings(tokenized, pipes.first);
+            pipes.first.flush();
+            PathSet paths = readStrings<PathSet>(pipes.second);
+            foreach (PathSet::iterator, i, paths)
+                std::cout << *i << std::endl;
+        } else if (cmd == "info") {
+            writeInt(cmdQueryPathInfos, pipes.first);
+            writeStrings(tokenized, pipes.first);
+            pipes.first.flush();
+            while (1) {
+                Path path = readString(pipes.second);
+                if (path.empty()) break;
+                assertStorePath(path);
+                std::cout << path << std::endl;
+                string deriver = readString(pipes.second);
+                if (!deriver.empty()) assertStorePath(deriver);
+                std::cout << deriver << std::endl;
+                PathSet references = readStorePaths<PathSet>(pipes.second);
+                std::cout << references.size() << std::endl;
+                foreach (PathSet::iterator, i, references)
+                    std::cout << *i << std::endl;
+                std::cout << readLongLong(pipes.second) << std::endl;
+                std::cout << readLongLong(pipes.second) << std::endl;
+            }
+        } else
+            throw Error(format("unknown substituter query ‘%1%’") % cmd);
+        std::cout << std::endl;
+    }
+}
+
+
+int main(int argc, char * * argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        if (argc < 2)
+            throw UsageError("download-via-ssh requires an argument");
+
+        initNix();
+
+        settings.update();
+
+        if (settings.sshSubstituterHosts.empty())
+            return;
+
+        std::cout << std::endl;
+
+        /* Pass on the location of the daemon client's SSH
+           authentication socket. */
+        string sshAuthSock = settings.get("ssh-auth-sock", "");
+        if (sshAuthSock != "") setenv("SSH_AUTH_SOCK", sshAuthSock.c_str(), 1);
+
+        string host = settings.sshSubstituterHosts.front();
+        std::pair<FdSink, FdSource> pipes = connect(host);
+
+        /* Exchange the greeting */
+        writeInt(SERVE_MAGIC_1, pipes.first);
+        pipes.first.flush();
+        unsigned int magic = readInt(pipes.second);
+        if (magic != SERVE_MAGIC_2)
+            throw Error("protocol mismatch");
+        readInt(pipes.second); // Server version, unused for now
+        writeInt(SERVE_PROTOCOL_VERSION, pipes.first);
+        pipes.first.flush();
+
+        string arg = argv[1];
+        if (arg == "--query")
+            query(pipes);
+        else if (arg == "--substitute") {
+            if (argc != 4)
+                throw UsageError("download-via-ssh: --substitute takes exactly two arguments");
+            Path storePath = argv[2];
+            Path destPath = argv[3];
+            printMsg(lvlError, format("downloading ‘%1%’ via SSH from ‘%2%’...") % storePath % host);
+            substitute(pipes, storePath, destPath);
+        }
+        else
+            throw UsageError(format("download-via-ssh: unknown command ‘%1%’") % arg);
+    });
+}
diff --git a/src/download-via-ssh/local.mk b/src/download-via-ssh/local.mk
new file mode 100644
index 000000000000..80f4c385acb3
--- /dev/null
+++ b/src/download-via-ssh/local.mk
@@ -0,0 +1,11 @@
+programs += download-via-ssh
+
+download-via-ssh_DIR := $(d)
+
+download-via-ssh_SOURCES := $(d)/download-via-ssh.cc
+
+download-via-ssh_INSTALL_DIR := $(libexecdir)/nix/substituters
+
+download-via-ssh_CXXFLAGS = -Isrc/nix-store
+
+download-via-ssh_LIBS = libmain libstore libutil libformat
diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc
new file mode 100644
index 000000000000..fdd61a5fd375
--- /dev/null
+++ b/src/libexpr/attr-path.cc
@@ -0,0 +1,97 @@
+#include "attr-path.hh"
+#include "eval-inline.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+static Strings parseAttrPath(const string & s)
+{
+    Strings res;
+    string cur;
+    string::const_iterator i = s.begin();
+    while (i != s.end()) {
+        if (*i == '.') {
+            res.push_back(cur);
+            cur.clear();
+        } else if (*i == '"') {
+            ++i;
+            while (1) {
+                if (i == s.end())
+                    throw Error(format("missing closing quote in selection path ‘%1%’") % s);
+                if (*i == '"') break;
+                cur.push_back(*i++);
+            }
+        } else
+            cur.push_back(*i);
+        ++i;
+    }
+    if (!cur.empty()) res.push_back(cur);
+    return res;
+}
+
+
+Value * findAlongAttrPath(EvalState & state, const string & attrPath,
+    Bindings & autoArgs, Value & vIn)
+{
+    Strings tokens = parseAttrPath(attrPath);
+
+    Error attrError =
+        Error(format("attribute selection path ‘%1%’ does not match expression") % attrPath);
+
+    Value * v = &vIn;
+
+    foreach (Strings::iterator, i, tokens) {
+
+        /* Is *i an index (integer) or a normal attribute name? */
+        enum { apAttr, apIndex } apType = apAttr;
+        string attr = *i;
+        unsigned int attrIndex;
+        if (string2Int(attr, attrIndex)) apType = apIndex;
+
+        /* Evaluate the expression. */
+        Value * vNew = state.allocValue();
+        state.autoCallFunction(autoArgs, *v, *vNew);
+        v = vNew;
+        state.forceValue(*v);
+
+        /* It should evaluate to either a set or an expression,
+           according to what is specified in the attrPath. */
+
+        if (apType == apAttr) {
+
+            if (v->type != tAttrs)
+                throw TypeError(
+                    format("the expression selected by the selection path ‘%1%’ should be a set but is %2%")
+                    % attrPath % showType(*v));
+
+            if (attr.empty())
+                throw Error(format("empty attribute name in selection path ‘%1%’") % attrPath);
+
+            Bindings::iterator a = v->attrs->find(state.symbols.create(attr));
+            if (a == v->attrs->end())
+                throw Error(format("attribute ‘%1%’ in selection path ‘%2%’ not found") % attr % attrPath);
+            v = &*a->value;
+        }
+
+        else if (apType == apIndex) {
+
+            if (v->type != tList)
+                throw TypeError(
+                    format("the expression selected by the selection path ‘%1%’ should be a list but is %2%")
+                    % attrPath % showType(*v));
+
+            if (attrIndex >= v->list.length)
+                throw Error(format("list index %1% in selection path ‘%2%’ is out of range") % attrIndex % attrPath);
+
+            v = v->list.elems[attrIndex];
+        }
+
+    }
+
+    return v;
+}
+
+
+}
diff --git a/src/libexpr/attr-path.hh b/src/libexpr/attr-path.hh
new file mode 100644
index 000000000000..46a341950939
--- /dev/null
+++ b/src/libexpr/attr-path.hh
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+namespace nix {
+
+Value * findAlongAttrPath(EvalState & state, const string & attrPath,
+    Bindings & autoArgs, Value & vIn);
+
+}
diff --git a/src/libexpr/common-opts.cc b/src/libexpr/common-opts.cc
new file mode 100644
index 000000000000..25f1e7117b76
--- /dev/null
+++ b/src/libexpr/common-opts.cc
@@ -0,0 +1,62 @@
+#include "common-opts.hh"
+#include "../libmain/shared.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+bool parseAutoArgs(Strings::iterator & i,
+    const Strings::iterator & argsEnd, std::map<string, string> & res)
+{
+    string arg = *i;
+    if (arg != "--arg" && arg != "--argstr") return false;
+
+    UsageError error(format("‘%1%’ requires two arguments") % arg);
+
+    if (++i == argsEnd) throw error;
+    string name = *i;
+    if (++i == argsEnd) throw error;
+    string value = *i;
+
+    res[name] = (arg == "--arg" ? 'E' : 'S') + value;
+
+    return true;
+}
+
+
+void evalAutoArgs(EvalState & state, std::map<string, string> & in, Bindings & out)
+{
+    for (auto & i: in) {
+        Value * v = state.allocValue();
+        if (i.second[0] == 'E')
+            state.mkThunk_(*v, state.parseExprFromString(string(i.second, 1), absPath(".")));
+        else
+            mkString(*v, string(i.second, 1));
+        out.push_back(Attr(state.symbols.create(i.first), v));
+    }
+    out.sort();
+}
+
+
+bool parseSearchPathArg(Strings::iterator & i,
+    const Strings::iterator & argsEnd, Strings & searchPath)
+{
+    if (*i != "-I") return false;
+    if (++i == argsEnd) throw UsageError("‘-I’ requires an argument");
+    searchPath.push_back(*i);
+    return true;
+}
+
+
+Path lookupFileArg(EvalState & state, string s)
+{
+    if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
+        Path p = s.substr(1, s.size() - 2);
+        return state.findFile(p);
+    } else
+        return absPath(s);
+}
+
+
+}
diff --git a/src/libexpr/common-opts.hh b/src/libexpr/common-opts.hh
new file mode 100644
index 000000000000..bb6d399a8a61
--- /dev/null
+++ b/src/libexpr/common-opts.hh
@@ -0,0 +1,18 @@
+#pragma once
+
+#include "eval.hh"
+
+namespace nix {
+
+/* Some common option parsing between nix-env and nix-instantiate. */
+bool parseAutoArgs(Strings::iterator & i,
+    const Strings::iterator & argsEnd, std::map<string, string> & res);
+
+void evalAutoArgs(EvalState & state, std::map<string, string> & in, Bindings & out);
+
+bool parseSearchPathArg(Strings::iterator & i,
+    const Strings::iterator & argsEnd, Strings & searchPath);
+
+Path lookupFileArg(EvalState & state, string s);
+
+}
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
new file mode 100644
index 000000000000..c275f7ba83e8
--- /dev/null
+++ b/src/libexpr/eval-inline.hh
@@ -0,0 +1,82 @@
+#pragma once
+
+#include "eval.hh"
+
+#define LocalNoInline(f) static f __attribute__((noinline)); f
+#define LocalNoInlineNoReturn(f) static f __attribute__((noinline, noreturn)); f
+
+namespace nix {
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s))
+{
+    throw EvalError(s);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const Value & v))
+{
+    throw TypeError(format(s) % showType(v));
+}
+
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const Value & v, const Pos & pos))
+{
+    throw TypeError(format(s) % showType(v) % pos);
+}
+
+
+void EvalState::forceValue(Value & v)
+{
+    if (v.type == tThunk) {
+        Env * env = v.thunk.env;
+        Expr * expr = v.thunk.expr;
+        try {
+            v.type = tBlackhole;
+            //checkInterrupt();
+            expr->eval(*this, *env, v);
+        } catch (Error & e) {
+            v.type = tThunk;
+            v.thunk.env = env;
+            v.thunk.expr = expr;
+            throw;
+        }
+    }
+    else if (v.type == tApp)
+        callFunction(*v.app.left, *v.app.right, v, noPos);
+    else if (v.type == tBlackhole)
+        throwEvalError("infinite recursion encountered");
+}
+
+
+inline void EvalState::forceAttrs(Value & v)
+{
+    forceValue(v);
+    if (v.type != tAttrs)
+        throwTypeError("value is %1% while a set was expected", v);
+}
+
+
+inline void EvalState::forceAttrs(Value & v, const Pos & pos)
+{
+    forceValue(v);
+    if (v.type != tAttrs)
+        throwTypeError("value is %1% while a set was expected, at %2%", v, pos);
+}
+
+
+inline void EvalState::forceList(Value & v)
+{
+    forceValue(v);
+    if (v.type != tList)
+        throwTypeError("value is %1% while a list was expected", v);
+}
+
+
+inline void EvalState::forceList(Value & v, const Pos & pos)
+{
+    forceValue(v);
+    if (v.type != tList)
+        throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
+}
+
+
+}
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
new file mode 100644
index 000000000000..5cb6fc3cfe7b
--- /dev/null
+++ b/src/libexpr/eval.cc
@@ -0,0 +1,1467 @@
+#include "eval.hh"
+#include "hash.hh"
+#include "util.hh"
+#include "store-api.hh"
+#include "derivations.hh"
+#include "globals.hh"
+#include "eval-inline.hh"
+
+#include <algorithm>
+#include <cstring>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#if HAVE_BOEHMGC
+
+#include <gc/gc.h>
+#include <gc/gc_cpp.h>
+
+#define NEW new (UseGC)
+
+#else
+
+#define GC_STRDUP strdup
+#define GC_MALLOC malloc
+
+#define NEW new
+
+#endif
+
+
+namespace nix {
+
+
+Bindings::iterator Bindings::find(const Symbol & name)
+{
+    Attr key(name, 0);
+    iterator i = lower_bound(begin(), end(), key);
+    if (i != end() && i->name == name) return i;
+    return end();
+}
+
+
+void Bindings::sort()
+{
+    std::sort(begin(), end());
+}
+
+
+std::ostream & operator << (std::ostream & str, const Value & v)
+{
+    switch (v.type) {
+    case tInt:
+        str << v.integer;
+        break;
+    case tBool:
+        str << (v.boolean ? "true" : "false");
+        break;
+    case tString:
+        str << "\"";
+        for (const char * i = v.string.s; *i; i++)
+            if (*i == '\"' || *i == '\\') str << "\\" << *i;
+            else if (*i == '\n') str << "\\n";
+            else if (*i == '\r') str << "\\r";
+            else if (*i == '\t') str << "\\t";
+            else str << *i;
+        str << "\"";
+        break;
+    case tPath:
+        str << v.path; // !!! escaping?
+        break;
+    case tNull:
+        str << "null";
+        break;
+    case tAttrs: {
+        str << "{ ";
+        typedef std::map<string, Value *> Sorted;
+        Sorted sorted;
+        foreach (Bindings::iterator, i, *v.attrs)
+            sorted[i->name] = i->value;
+        foreach (Sorted::iterator, i, sorted)
+            str << i->first << " = " << *i->second << "; ";
+        str << "}";
+        break;
+    }
+    case tList:
+        str << "[ ";
+        for (unsigned int n = 0; n < v.list.length; ++n)
+            str << *v.list.elems[n] << " ";
+        str << "]";
+        break;
+    case tThunk:
+    case tApp:
+        str << "<CODE>";
+        break;
+    case tLambda:
+        str << "<LAMBDA>";
+        break;
+    case tPrimOp:
+        str << "<PRIMOP>";
+        break;
+    case tPrimOpApp:
+        str << "<PRIMOP-APP>";
+        break;
+    default:
+        throw Error("invalid value");
+    }
+    return str;
+}
+
+
+string showType(const Value & v)
+{
+    switch (v.type) {
+        case tInt: return "an integer";
+        case tBool: return "a boolean";
+        case tString: return "a string";
+        case tPath: return "a path";
+        case tNull: return "null";
+        case tAttrs: return "a set";
+        case tList: return "a list";
+        case tThunk: return "a thunk";
+        case tApp: return "a function application";
+        case tLambda: return "a function";
+        case tBlackhole: return "a black hole";
+        case tPrimOp: return "a built-in function";
+        case tPrimOpApp: return "a partially applied built-in function";
+    }
+    abort();
+}
+
+
+#if HAVE_BOEHMGC
+/* Called when the Boehm GC runs out of memory. */
+static void * oomHandler(size_t requested)
+{
+    /* Convert this to a proper C++ exception. */
+    throw std::bad_alloc();
+}
+#endif
+
+
+static Symbol getName(const AttrName & name, EvalState & state, Env & env)
+{
+    if (name.symbol.set()) {
+        return name.symbol;
+    } else {
+        Value nameValue;
+        name.expr->eval(state, env, nameValue);
+        state.forceStringNoCtx(nameValue);
+        return state.symbols.create(nameValue.string.s);
+    }
+}
+
+
+EvalState::EvalState(const Strings & _searchPath)
+    : sWith(symbols.create("<with>"))
+    , sOutPath(symbols.create("outPath"))
+    , sDrvPath(symbols.create("drvPath"))
+    , sType(symbols.create("type"))
+    , sMeta(symbols.create("meta"))
+    , sName(symbols.create("name"))
+    , sValue(symbols.create("value"))
+    , sSystem(symbols.create("system"))
+    , sOverrides(symbols.create("__overrides"))
+    , sOutputs(symbols.create("outputs"))
+    , sOutputName(symbols.create("outputName"))
+    , sIgnoreNulls(symbols.create("__ignoreNulls"))
+    , sFile(symbols.create("file"))
+    , sLine(symbols.create("line"))
+    , sColumn(symbols.create("column"))
+    , repair(false)
+    , baseEnv(allocEnv(128))
+    , staticBaseEnv(false, 0)
+    , baseEnvDispl(0)
+{
+    nrEnvs = nrValuesInEnvs = nrValues = nrListElems = 0;
+    nrAttrsets = nrOpUpdates = nrOpUpdateValuesCopied = 0;
+    nrListConcats = nrPrimOpCalls = nrFunctionCalls = 0;
+    countCalls = getEnv("NIX_COUNT_CALLS", "0") != "0";
+
+#if HAVE_BOEHMGC
+    static bool gcInitialised = false;
+    if (!gcInitialised) {
+
+        /* Initialise the Boehm garbage collector.  This isn't
+           necessary on most platforms, but for portability we do it
+           anyway. */
+        GC_INIT();
+
+        GC_oom_fn = oomHandler;
+
+        /* Set the initial heap size to something fairly big (25% of
+           physical RAM, up to a maximum of 384 MiB) so that in most
+           cases we don't need to garbage collect at all.  (Collection
+           has a fairly significant overhead.)  The heap size can be
+           overridden through libgc's GC_INITIAL_HEAP_SIZE environment
+           variable.  We should probably also provide a nix.conf
+           setting for this.  Note that GC_expand_hp() causes a lot of
+           virtual, but not physical (resident) memory to be
+           allocated.  This might be a problem on systems that don't
+           overcommit. */
+        if (!getenv("GC_INITIAL_HEAP_SIZE")) {
+            size_t maxSize = 384 * 1024 * 1024;
+            size_t size = 32 * 1024 * 1024;
+#if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES)
+            long pageSize = sysconf(_SC_PAGESIZE);
+            long pages = sysconf(_SC_PHYS_PAGES);
+            if (pageSize != -1)
+                size = (pageSize * pages) / 4; // 25% of RAM
+            if (size > maxSize) size = maxSize;
+#endif
+            debug(format("setting initial heap size to %1% bytes") % size);
+            GC_expand_hp(size);
+        }
+
+        gcInitialised = true;
+    }
+#endif
+
+    /* Initialise the Nix expression search path. */
+    Strings paths = tokenizeString<Strings>(getEnv("NIX_PATH", ""), ":");
+    for (auto & i : _searchPath) addToSearchPath(i, true);
+    for (auto & i : paths) addToSearchPath(i);
+    addToSearchPath("nix=" + settings.nixDataDir + "/nix/corepkgs");
+
+    createBaseEnv();
+}
+
+
+EvalState::~EvalState()
+{
+}
+
+
+void EvalState::addConstant(const string & name, Value & v)
+{
+    Value * v2 = allocValue();
+    *v2 = v;
+    staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
+    baseEnv.values[baseEnvDispl++] = v2;
+    string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
+    baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2));
+}
+
+
+void EvalState::addPrimOp(const string & name,
+    unsigned int arity, PrimOpFun primOp)
+{
+    Value * v = allocValue();
+    string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
+    Symbol sym = symbols.create(name2);
+    v->type = tPrimOp;
+    v->primOp = NEW PrimOp(primOp, arity, sym);
+    staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
+    baseEnv.values[baseEnvDispl++] = v;
+    baseEnv.values[0]->attrs->push_back(Attr(sym, v));
+}
+
+
+void EvalState::getBuiltin(const string & name, Value & v)
+{
+    v = *baseEnv.values[0]->attrs->find(symbols.create(name))->value;
+}
+
+
+/* Every "format" object (even temporary) takes up a few hundred bytes
+   of stack space, which is a real killer in the recursive
+   evaluator.  So here are some helper functions for throwing
+   exceptions. */
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2))
+{
+    throw EvalError(format(s) % s2);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const Pos & pos))
+{
+    throw EvalError(format(s) % pos);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2, const Pos & pos))
+{
+    throw EvalError(format(s) % s2 % pos);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2, const string & s3))
+{
+    throw EvalError(format(s) % s2 % s3);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const string & s2, const string & s3, const Pos & pos))
+{
+    throw EvalError(format(s) % s2 % s3 % pos);
+}
+
+LocalNoInlineNoReturn(void throwEvalError(const char * s, const Symbol & sym, const Pos & p1, const Pos & p2))
+{
+    throw EvalError(format(s) % sym % p1 % p2);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s))
+{
+    throw TypeError(s);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const Pos & pos))
+{
+    throw TypeError(format(s) % pos);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const string & s1))
+{
+    throw TypeError(format(s) % s1);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const string & s1, const string & s2))
+{
+    throw TypeError(format(s) % s1 % s2);
+}
+
+LocalNoInlineNoReturn(void throwTypeError(const char * s, const ExprLambda & fun, const Symbol & s2, const Pos & pos))
+{
+    throw TypeError(format(s) % fun.showNamePos() % s2 % pos);
+}
+
+LocalNoInlineNoReturn(void throwAssertionError(const char * s, const Pos & pos))
+{
+    throw AssertionError(format(s) % pos);
+}
+
+LocalNoInlineNoReturn(void throwUndefinedVarError(const char * s, const string & s1, const Pos & pos))
+{
+    throw UndefinedVarError(format(s) % s1 % pos);
+}
+
+LocalNoInline(void addErrorPrefix(Error & e, const char * s, const string & s2))
+{
+    e.addPrefix(format(s) % s2);
+}
+
+LocalNoInline(void addErrorPrefix(Error & e, const char * s, const ExprLambda & fun, const Pos & pos))
+{
+    e.addPrefix(format(s) % fun.showNamePos() % pos);
+}
+
+LocalNoInline(void addErrorPrefix(Error & e, const char * s, const string & s2, const Pos & pos))
+{
+    e.addPrefix(format(s) % s2 % pos);
+}
+
+
+void mkString(Value & v, const char * s)
+{
+    mkStringNoCopy(v, GC_STRDUP(s));
+}
+
+
+void mkString(Value & v, const string & s, const PathSet & context)
+{
+    mkString(v, s.c_str());
+    if (!context.empty()) {
+        unsigned int n = 0;
+        v.string.context = (const char * *)
+            GC_MALLOC((context.size() + 1) * sizeof(char *));
+        foreach (PathSet::const_iterator, i, context)
+            v.string.context[n++] = GC_STRDUP(i->c_str());
+        v.string.context[n] = 0;
+    }
+}
+
+
+void mkPath(Value & v, const char * s)
+{
+    mkPathNoCopy(v, GC_STRDUP(s));
+}
+
+
+inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
+{
+    for (unsigned int l = var.level; l; --l, env = env->up) ;
+
+    if (!var.fromWith) return env->values[var.displ];
+
+    while (1) {
+        if (!env->haveWithAttrs) {
+            if (noEval) return 0;
+            Value * v = allocValue();
+            evalAttrs(*env->up, (Expr *) env->values[0], *v);
+            env->values[0] = v;
+            env->haveWithAttrs = true;
+        }
+        Bindings::iterator j = env->values[0]->attrs->find(var.name);
+        if (j != env->values[0]->attrs->end()) {
+            if (countCalls && j->pos) attrSelects[*j->pos]++;
+            return j->value;
+        }
+        if (!env->prevWith)
+            throwUndefinedVarError("undefined variable ‘%1%’ at %2%", var.name, var.pos);
+        for (unsigned int l = env->prevWith; l; --l, env = env->up) ;
+    }
+}
+
+
+Value * EvalState::allocValue()
+{
+    nrValues++;
+    return (Value *) GC_MALLOC(sizeof(Value));
+}
+
+
+Env & EvalState::allocEnv(unsigned int size)
+{
+    nrEnvs++;
+    nrValuesInEnvs += size;
+    Env * env = (Env *) GC_MALLOC(sizeof(Env) + size * sizeof(Value *));
+
+    /* Clear the values because maybeThunk() and lookupVar fromWith expects this. */
+    for (unsigned i = 0; i < size; ++i)
+        env->values[i] = 0;
+
+    return *env;
+}
+
+
+Value * EvalState::allocAttr(Value & vAttrs, const Symbol & name)
+{
+    Value * v = allocValue();
+    vAttrs.attrs->push_back(Attr(name, v));
+    return v;
+}
+
+
+void EvalState::mkList(Value & v, unsigned int length)
+{
+    v.type = tList;
+    v.list.length = length;
+    v.list.elems = length ? (Value * *) GC_MALLOC(length * sizeof(Value *)) : 0;
+    nrListElems += length;
+}
+
+
+void EvalState::mkAttrs(Value & v, unsigned int expected)
+{
+    clearValue(v);
+    v.type = tAttrs;
+    v.attrs = NEW Bindings;
+    v.attrs->reserve(expected);
+    nrAttrsets++;
+}
+
+
+unsigned long nrThunks = 0;
+
+static inline void mkThunk(Value & v, Env & env, Expr * expr)
+{
+    v.type = tThunk;
+    v.thunk.env = &env;
+    v.thunk.expr = expr;
+    nrThunks++;
+}
+
+
+void EvalState::mkThunk_(Value & v, Expr * expr)
+{
+    mkThunk(v, baseEnv, expr);
+}
+
+
+void EvalState::mkPos(Value & v, Pos * pos)
+{
+    if (pos) {
+        mkAttrs(v, 3);
+        mkString(*allocAttr(v, sFile), pos->file);
+        mkInt(*allocAttr(v, sLine), pos->line);
+        mkInt(*allocAttr(v, sColumn), pos->column);
+        v.attrs->sort();
+    } else
+        mkNull(v);
+}
+
+
+/* Create a thunk for the delayed computation of the given expression
+   in the given environment.  But if the expression is a variable,
+   then look it up right away.  This significantly reduces the number
+   of thunks allocated. */
+Value * Expr::maybeThunk(EvalState & state, Env & env)
+{
+    Value * v = state.allocValue();
+    mkThunk(*v, env, this);
+    return v;
+}
+
+
+unsigned long nrAvoided = 0;
+
+Value * ExprVar::maybeThunk(EvalState & state, Env & env)
+{
+    Value * v = state.lookupVar(&env, *this, true);
+    /* The value might not be initialised in the environment yet.
+       In that case, ignore it. */
+    if (v) { nrAvoided++; return v; }
+    return Expr::maybeThunk(state, env);
+}
+
+
+Value * ExprString::maybeThunk(EvalState & state, Env & env)
+{
+    nrAvoided++;
+    return &v;
+}
+
+Value * ExprInt::maybeThunk(EvalState & state, Env & env)
+{
+    nrAvoided++;
+    return &v;
+}
+
+Value * ExprPath::maybeThunk(EvalState & state, Env & env)
+{
+    nrAvoided++;
+    return &v;
+}
+
+
+void EvalState::evalFile(const Path & path, Value & v)
+{
+    FileEvalCache::iterator i;
+    if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) {
+        v = i->second;
+        return;
+    }
+
+    Path path2 = resolveExprPath(path);
+    if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) {
+        v = i->second;
+        return;
+    }
+
+    startNest(nest, lvlTalkative, format("evaluating file ‘%1%’") % path2);
+    Expr * e = parseExprFromFile(path2);
+    try {
+        eval(e, v);
+    } catch (Error & e) {
+        addErrorPrefix(e, "while evaluating the file ‘%1%’:\n", path2);
+        throw;
+    }
+
+    fileEvalCache[path2] = v;
+    if (path != path2) fileEvalCache[path] = v;
+}
+
+
+void EvalState::resetFileCache()
+{
+    fileEvalCache.clear();
+}
+
+
+void EvalState::eval(Expr * e, Value & v)
+{
+    e->eval(*this, baseEnv, v);
+}
+
+
+inline bool EvalState::evalBool(Env & env, Expr * e)
+{
+    Value v;
+    e->eval(*this, env, v);
+    if (v.type != tBool)
+        throwTypeError("value is %1% while a Boolean was expected", v);
+    return v.boolean;
+}
+
+
+inline bool EvalState::evalBool(Env & env, Expr * e, const Pos & pos)
+{
+    Value v;
+    e->eval(*this, env, v);
+    if (v.type != tBool)
+        throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos);
+    return v.boolean;
+}
+
+
+inline void EvalState::evalAttrs(Env & env, Expr * e, Value & v)
+{
+    e->eval(*this, env, v);
+    if (v.type != tAttrs)
+        throwTypeError("value is %1% while a set was expected", v);
+}
+
+
+void Expr::eval(EvalState & state, Env & env, Value & v)
+{
+    abort();
+}
+
+
+void ExprInt::eval(EvalState & state, Env & env, Value & v)
+{
+    v = this->v;
+}
+
+
+void ExprString::eval(EvalState & state, Env & env, Value & v)
+{
+    v = this->v;
+}
+
+
+void ExprPath::eval(EvalState & state, Env & env, Value & v)
+{
+    v = this->v;
+}
+
+
+void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
+{
+    state.mkAttrs(v, attrs.size());
+    Env *dynamicEnv = &env;
+
+    if (recursive) {
+        /* Create a new environment that contains the attributes in
+           this `rec'. */
+        Env & env2(state.allocEnv(attrs.size()));
+        env2.up = &env;
+        dynamicEnv = &env2;
+
+        AttrDefs::iterator overrides = attrs.find(state.sOverrides);
+        bool hasOverrides = overrides != attrs.end();
+
+        /* The recursive attributes are evaluated in the new
+           environment, while the inherited attributes are evaluated
+           in the original environment. */
+        unsigned int displ = 0;
+        foreach (AttrDefs::iterator, i, attrs) {
+            Value * vAttr;
+            if (hasOverrides && !i->second.inherited) {
+                vAttr = state.allocValue();
+                mkThunk(*vAttr, env2, i->second.e);
+            } else
+                vAttr = i->second.e->maybeThunk(state, i->second.inherited ? env : env2);
+            env2.values[displ++] = vAttr;
+            v.attrs->push_back(Attr(i->first, vAttr, &i->second.pos));
+        }
+
+        /* If the rec contains an attribute called `__overrides', then
+           evaluate it, and add the attributes in that set to the rec.
+           This allows overriding of recursive attributes, which is
+           otherwise not possible.  (You can use the // operator to
+           replace an attribute, but other attributes in the rec will
+           still reference the original value, because that value has
+           been substituted into the bodies of the other attributes.
+           Hence we need __overrides.) */
+        if (hasOverrides) {
+            Value * vOverrides = (*v.attrs)[overrides->second.displ].value;
+            state.forceAttrs(*vOverrides);
+            foreach (Bindings::iterator, i, *vOverrides->attrs) {
+                AttrDefs::iterator j = attrs.find(i->name);
+                if (j != attrs.end()) {
+                    (*v.attrs)[j->second.displ] = *i;
+                    env2.values[j->second.displ] = i->value;
+                } else
+                    v.attrs->push_back(*i);
+            }
+            v.attrs->sort();
+        }
+    }
+
+    else
+        foreach (AttrDefs::iterator, i, attrs)
+            v.attrs->push_back(Attr(i->first, i->second.e->maybeThunk(state, env), &i->second.pos));
+
+    /* Dynamic attrs apply *after* rec and __overrides. */
+    foreach (DynamicAttrDefs::iterator, i, dynamicAttrs) {
+        Value nameVal;
+        if (i->nameExpr->es->size() == 1) {
+            i->nameExpr->es->front()->eval(state, *dynamicEnv, nameVal);
+            state.forceValue(nameVal);
+            if (nameVal.type == tNull)
+                continue;
+        }
+        i->nameExpr->eval(state, *dynamicEnv, nameVal);
+        state.forceStringNoCtx(nameVal);
+        Symbol nameSym = state.symbols.create(nameVal.string.s);
+        Bindings::iterator j = v.attrs->find(nameSym);
+        if (j != v.attrs->end())
+            throwEvalError("dynamic attribute ‘%1%’ at %2% already defined at %3%", nameSym, i->pos, *j->pos);
+
+        i->valueExpr->setName(nameSym);
+        /* Keep sorted order so find can catch duplicates */
+        v.attrs->insert(lower_bound(v.attrs->begin(), v.attrs->end(), Attr(nameSym, 0)),
+                Attr(nameSym, i->valueExpr->maybeThunk(state, *dynamicEnv), &i->pos));
+    }
+}
+
+
+void ExprLet::eval(EvalState & state, Env & env, Value & v)
+{
+    /* Create a new environment that contains the attributes in this
+       `let'. */
+    Env & env2(state.allocEnv(attrs->attrs.size()));
+    env2.up = &env;
+
+    /* The recursive attributes are evaluated in the new environment,
+       while the inherited attributes are evaluated in the original
+       environment. */
+    unsigned int displ = 0;
+    foreach (ExprAttrs::AttrDefs::iterator, i, attrs->attrs)
+        env2.values[displ++] = i->second.e->maybeThunk(state, i->second.inherited ? env : env2);
+
+    body->eval(state, env2, v);
+}
+
+
+void ExprList::eval(EvalState & state, Env & env, Value & v)
+{
+    state.mkList(v, elems.size());
+    for (unsigned int n = 0; n < v.list.length; ++n)
+        v.list.elems[n] = elems[n]->maybeThunk(state, env);
+}
+
+
+void ExprVar::eval(EvalState & state, Env & env, Value & v)
+{
+    Value * v2 = state.lookupVar(&env, *this, false);
+    state.forceValue(*v2);
+    v = *v2;
+}
+
+
+unsigned long nrLookups = 0;
+
+void ExprSelect::eval(EvalState & state, Env & env, Value & v)
+{
+    Value vTmp;
+    Pos * pos2 = 0;
+    Value * vAttrs = &vTmp;
+
+    e->eval(state, env, vTmp);
+
+    try {
+
+        foreach (AttrPath::const_iterator, i, attrPath) {
+            nrLookups++;
+            Bindings::iterator j;
+            Symbol name = getName(*i, state, env);
+            if (def) {
+                state.forceValue(*vAttrs);
+                if (vAttrs->type != tAttrs ||
+                    (j = vAttrs->attrs->find(name)) == vAttrs->attrs->end())
+                {
+                    def->eval(state, env, v);
+                    return;
+                }
+            } else {
+                state.forceAttrs(*vAttrs, pos);
+                if ((j = vAttrs->attrs->find(name)) == vAttrs->attrs->end()) {
+                    AttrPath staticPath;
+                    AttrPath::const_iterator j;
+                    for (j = attrPath.begin(); j != i; ++j)
+                        staticPath.push_back(AttrName(getName(*j, state, env)));
+                    staticPath.push_back(AttrName(getName(*j, state, env)));
+                    for (j = j + 1; j != attrPath.end(); ++j)
+                        staticPath.push_back(*j);
+                    throwEvalError("attribute ‘%1%’ missing, at %2%", showAttrPath(staticPath), pos);
+                }
+            }
+            vAttrs = j->value;
+            pos2 = j->pos;
+            if (state.countCalls && pos2) state.attrSelects[*pos2]++;
+        }
+
+        state.forceValue(*vAttrs);
+
+    } catch (Error & e) {
+        if (pos2 && pos2->file != state.sDerivationNix)
+            addErrorPrefix(e, "while evaluating the attribute ‘%1%’ at %2%:\n",
+                showAttrPath(attrPath), *pos2);
+        throw;
+    }
+
+    v = *vAttrs;
+}
+
+
+void ExprOpHasAttr::eval(EvalState & state, Env & env, Value & v)
+{
+    Value vTmp;
+    Value * vAttrs = &vTmp;
+
+    e->eval(state, env, vTmp);
+
+    foreach (AttrPath::const_iterator, i, attrPath) {
+        state.forceValue(*vAttrs);
+        Bindings::iterator j;
+        Symbol name = getName(*i, state, env);
+        if (vAttrs->type != tAttrs ||
+            (j = vAttrs->attrs->find(name)) == vAttrs->attrs->end())
+        {
+            mkBool(v, false);
+            return;
+        } else {
+            vAttrs = j->value;
+        }
+    }
+
+    mkBool(v, true);
+}
+
+
+void ExprLambda::eval(EvalState & state, Env & env, Value & v)
+{
+    v.type = tLambda;
+    v.lambda.env = &env;
+    v.lambda.fun = this;
+}
+
+
+void ExprApp::eval(EvalState & state, Env & env, Value & v)
+{
+    /* FIXME: vFun prevents GCC from doing tail call optimisation. */
+    Value vFun;
+    e1->eval(state, env, vFun);
+    state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos);
+}
+
+
+void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
+{
+    /* Figure out the number of arguments still needed. */
+    unsigned int argsDone = 0;
+    Value * primOp = &fun;
+    while (primOp->type == tPrimOpApp) {
+        argsDone++;
+        primOp = primOp->primOpApp.left;
+    }
+    assert(primOp->type == tPrimOp);
+    unsigned int arity = primOp->primOp->arity;
+    unsigned int argsLeft = arity - argsDone;
+
+    if (argsLeft == 1) {
+        /* We have all the arguments, so call the primop. */
+
+        /* Put all the arguments in an array. */
+        Value * vArgs[arity];
+        unsigned int n = arity - 1;
+        vArgs[n--] = &arg;
+        for (Value * arg = &fun; arg->type == tPrimOpApp; arg = arg->primOpApp.left)
+            vArgs[n--] = arg->primOpApp.right;
+
+        /* And call the primop. */
+        nrPrimOpCalls++;
+        if (countCalls) primOpCalls[primOp->primOp->name]++;
+        primOp->primOp->fun(*this, pos, vArgs, v);
+    } else {
+        Value * fun2 = allocValue();
+        *fun2 = fun;
+        v.type = tPrimOpApp;
+        v.primOpApp.left = fun2;
+        v.primOpApp.right = &arg;
+    }
+}
+
+
+void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
+{
+    if (fun.type == tPrimOp || fun.type == tPrimOpApp) {
+        callPrimOp(fun, arg, v, pos);
+        return;
+    }
+
+    if (fun.type != tLambda)
+        throwTypeError("attempt to call something which is not a function but %1%, at %2%", fun, pos);
+
+    ExprLambda & lambda(*fun.lambda.fun);
+
+    unsigned int size =
+        (lambda.arg.empty() ? 0 : 1) +
+        (lambda.matchAttrs ? lambda.formals->formals.size() : 0);
+    Env & env2(allocEnv(size));
+    env2.up = fun.lambda.env;
+
+    unsigned int displ = 0;
+
+    if (!lambda.matchAttrs)
+        env2.values[displ++] = &arg;
+
+    else {
+        forceAttrs(arg, pos);
+
+        if (!lambda.arg.empty())
+            env2.values[displ++] = &arg;
+
+        /* For each formal argument, get the actual argument.  If
+           there is no matching actual argument but the formal
+           argument has a default, use the default. */
+        unsigned int attrsUsed = 0;
+        foreach (Formals::Formals_::iterator, i, lambda.formals->formals) {
+            Bindings::iterator j = arg.attrs->find(i->name);
+            if (j == arg.attrs->end()) {
+                if (!i->def) throwTypeError("%1% called without required argument ‘%2%’, at %3%",
+                    lambda, i->name, pos);
+                env2.values[displ++] = i->def->maybeThunk(*this, env2);
+            } else {
+                attrsUsed++;
+                env2.values[displ++] = j->value;
+            }
+        }
+
+        /* Check that each actual argument is listed as a formal
+           argument (unless the attribute match specifies a `...'). */
+        if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) {
+            /* Nope, so show the first unexpected argument to the
+               user. */
+            foreach (Bindings::iterator, i, *arg.attrs)
+                if (lambda.formals->argNames.find(i->name) == lambda.formals->argNames.end())
+                    throwTypeError("%1% called with unexpected argument ‘%2%’, at %3%", lambda, i->name, pos);
+            abort(); // can't happen
+        }
+    }
+
+    nrFunctionCalls++;
+    if (countCalls) incrFunctionCall(&lambda);
+
+    /* Evaluate the body.  This is conditional on showTrace, because
+       catching exceptions makes this function not tail-recursive. */
+    if (settings.showTrace)
+        try {
+            lambda.body->eval(*this, env2, v);
+        } catch (Error & e) {
+            addErrorPrefix(e, "while evaluating %1%, called from %2%:\n", lambda, pos);
+            throw;
+        }
+    else
+        fun.lambda.fun->body->eval(*this, env2, v);
+}
+
+
+// Lifted out of callFunction() because it creates a temporary that
+// prevents tail-call optimisation.
+void EvalState::incrFunctionCall(ExprLambda * fun)
+{
+    functionCalls[fun]++;
+}
+
+
+void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
+{
+    forceValue(fun);
+
+    if (fun.type != tLambda || !fun.lambda.fun->matchAttrs) {
+        res = fun;
+        return;
+    }
+
+    Value * actualArgs = allocValue();
+    mkAttrs(*actualArgs, fun.lambda.fun->formals->formals.size());
+
+    foreach (Formals::Formals_::iterator, i, fun.lambda.fun->formals->formals) {
+        Bindings::iterator j = args.find(i->name);
+        if (j != args.end())
+            actualArgs->attrs->push_back(*j);
+        else if (!i->def)
+            throwTypeError("cannot auto-call a function that has an argument without a default value (‘%1%’)", i->name);
+    }
+
+    actualArgs->attrs->sort();
+
+    callFunction(fun, *actualArgs, res, noPos);
+}
+
+
+void ExprWith::eval(EvalState & state, Env & env, Value & v)
+{
+    Env & env2(state.allocEnv(1));
+    env2.up = &env;
+    env2.prevWith = prevWith;
+    env2.haveWithAttrs = false;
+    env2.values[0] = (Value *) attrs;
+
+    body->eval(state, env2, v);
+}
+
+
+void ExprIf::eval(EvalState & state, Env & env, Value & v)
+{
+    (state.evalBool(env, cond) ? then : else_)->eval(state, env, v);
+}
+
+
+void ExprAssert::eval(EvalState & state, Env & env, Value & v)
+{
+    if (!state.evalBool(env, cond, pos))
+        throwAssertionError("assertion failed at %1%", pos);
+    body->eval(state, env, v);
+}
+
+
+void ExprOpNot::eval(EvalState & state, Env & env, Value & v)
+{
+    mkBool(v, !state.evalBool(env, e));
+}
+
+
+void ExprOpEq::eval(EvalState & state, Env & env, Value & v)
+{
+    Value v1; e1->eval(state, env, v1);
+    Value v2; e2->eval(state, env, v2);
+    mkBool(v, state.eqValues(v1, v2));
+}
+
+
+void ExprOpNEq::eval(EvalState & state, Env & env, Value & v)
+{
+    Value v1; e1->eval(state, env, v1);
+    Value v2; e2->eval(state, env, v2);
+    mkBool(v, !state.eqValues(v1, v2));
+}
+
+
+void ExprOpAnd::eval(EvalState & state, Env & env, Value & v)
+{
+    mkBool(v, state.evalBool(env, e1, pos) && state.evalBool(env, e2, pos));
+}
+
+
+void ExprOpOr::eval(EvalState & state, Env & env, Value & v)
+{
+    mkBool(v, state.evalBool(env, e1, pos) || state.evalBool(env, e2, pos));
+}
+
+
+void ExprOpImpl::eval(EvalState & state, Env & env, Value & v)
+{
+    mkBool(v, !state.evalBool(env, e1, pos) || state.evalBool(env, e2, pos));
+}
+
+
+void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v)
+{
+    Value v1, v2;
+    state.evalAttrs(env, e1, v1);
+    state.evalAttrs(env, e2, v2);
+
+    state.nrOpUpdates++;
+
+    if (v1.attrs->size() == 0) { v = v2; return; }
+    if (v2.attrs->size() == 0) { v = v1; return; }
+
+    state.mkAttrs(v, v1.attrs->size() + v2.attrs->size());
+
+    /* Merge the sets, preferring values from the second set.  Make
+       sure to keep the resulting vector in sorted order. */
+    Bindings::iterator i = v1.attrs->begin();
+    Bindings::iterator j = v2.attrs->begin();
+
+    while (i != v1.attrs->end() && j != v2.attrs->end()) {
+        if (i->name == j->name) {
+            v.attrs->push_back(*j);
+            ++i; ++j;
+        }
+        else if (i->name < j->name)
+            v.attrs->push_back(*i++);
+        else
+            v.attrs->push_back(*j++);
+    }
+
+    while (i != v1.attrs->end()) v.attrs->push_back(*i++);
+    while (j != v2.attrs->end()) v.attrs->push_back(*j++);
+
+    state.nrOpUpdateValuesCopied += v.attrs->size();
+}
+
+
+void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v)
+{
+    Value v1; e1->eval(state, env, v1);
+    Value v2; e2->eval(state, env, v2);
+    Value * lists[2] = { &v1, &v2 };
+    state.concatLists(v, 2, lists, pos);
+}
+
+
+void EvalState::concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos)
+{
+    nrListConcats++;
+
+    Value * nonEmpty = 0;
+    unsigned int len = 0;
+    for (unsigned int n = 0; n < nrLists; ++n) {
+        forceList(*lists[n], pos);
+        unsigned int l = lists[n]->list.length;
+        len += l;
+        if (l) nonEmpty = lists[n];
+    }
+
+    if (nonEmpty && len == nonEmpty->list.length) {
+        v = *nonEmpty;
+        return;
+    }
+
+    mkList(v, len);
+    for (unsigned int n = 0, pos = 0; n < nrLists; ++n) {
+        unsigned int l = lists[n]->list.length;
+        memcpy(v.list.elems + pos, lists[n]->list.elems, l * sizeof(Value *));
+        pos += l;
+    }
+}
+
+
+void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
+{
+    PathSet context;
+    std::ostringstream s;
+    NixInt n = 0;
+
+    bool first = !forceString;
+    ValueType firstType = tString;
+
+    foreach (vector<Expr *>::iterator, i, *es) {
+        Value vTmp;
+        (*i)->eval(state, env, vTmp);
+
+        /* If the first element is a path, then the result will also
+           be a path, we don't copy anything (yet - that's done later,
+           since paths are copied when they are used in a derivation),
+           and none of the strings are allowed to have contexts. */
+        if (first) {
+            firstType = vTmp.type;
+            first = false;
+        }
+
+        if (firstType == tInt) {
+            if (vTmp.type != tInt)
+                throwEvalError("cannot add %1% to an integer, at %2%", showType(vTmp), pos);
+            n += vTmp.integer;
+        } else
+            s << state.coerceToString(pos, vTmp, context, false, firstType == tString);
+    }
+
+    if (firstType == tInt)
+        mkInt(v, n);
+    else if (firstType == tPath) {
+        if (!context.empty())
+            throwEvalError("a string that refers to a store path cannot be appended to a path, at %1%", pos);
+        mkPath(v, s.str().c_str());
+    } else
+        mkString(v, s.str(), context);
+}
+
+
+void ExprPos::eval(EvalState & state, Env & env, Value & v)
+{
+    state.mkPos(v, &pos);
+}
+
+
+void EvalState::strictForceValue(Value & v)
+{
+    forceValue(v);
+
+    if (v.type == tAttrs) {
+        foreach (Bindings::iterator, i, *v.attrs)
+            strictForceValue(*i->value);
+    }
+
+    else if (v.type == tList) {
+        for (unsigned int n = 0; n < v.list.length; ++n)
+            strictForceValue(*v.list.elems[n]);
+    }
+}
+
+
+NixInt EvalState::forceInt(Value & v, const Pos & pos)
+{
+    forceValue(v);
+    if (v.type != tInt)
+        throwTypeError("value is %1% while an integer was expected, at %2%", v, pos);
+    return v.integer;
+}
+
+
+bool EvalState::forceBool(Value & v)
+{
+    forceValue(v);
+    if (v.type != tBool)
+        throwTypeError("value is %1% while a Boolean was expected", v);
+    return v.boolean;
+}
+
+
+void EvalState::forceFunction(Value & v, const Pos & pos)
+{
+    forceValue(v);
+    if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp)
+        throwTypeError("value is %1% while a function was expected, at %2%", v, pos);
+}
+
+
+string EvalState::forceString(Value & v, const Pos & pos)
+{
+    forceValue(v);
+    if (v.type != tString) {
+        if (pos)
+            throwTypeError("value is %1% while a string was expected, at %2%", v, pos);
+        else
+            throwTypeError("value is %1% while a string was expected", v);
+    }
+    return string(v.string.s);
+}
+
+
+void copyContext(const Value & v, PathSet & context)
+{
+    if (v.string.context)
+        for (const char * * p = v.string.context; *p; ++p)
+            context.insert(*p);
+}
+
+
+string EvalState::forceString(Value & v, PathSet & context)
+{
+    string s = forceString(v);
+    copyContext(v, context);
+    return s;
+}
+
+
+string EvalState::forceStringNoCtx(Value & v, const Pos & pos)
+{
+    string s = forceString(v, pos);
+    if (v.string.context) {
+        if (pos)
+            throwEvalError("the string ‘%1%’ is not allowed to refer to a store path (such as ‘%2%’), at %3%",
+                v.string.s, v.string.context[0], pos);
+        else
+            throwEvalError("the string ‘%1%’ is not allowed to refer to a store path (such as ‘%2%’)",
+                v.string.s, v.string.context[0]);
+    }
+    return s;
+}
+
+
+bool EvalState::isDerivation(Value & v)
+{
+    if (v.type != tAttrs) return false;
+    Bindings::iterator i = v.attrs->find(sType);
+    if (i == v.attrs->end()) return false;
+    forceValue(*i->value);
+    if (i->value->type != tString) return false;
+    return strcmp(i->value->string.s, "derivation") == 0;
+}
+
+
+string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
+    bool coerceMore, bool copyToStore)
+{
+    forceValue(v);
+
+    string s;
+
+    if (v.type == tString) {
+        copyContext(v, context);
+        return v.string.s;
+    }
+
+    if (v.type == tPath) {
+        Path path(canonPath(v.path));
+        return copyToStore ? copyPathToStore(context, path) : path;
+    }
+
+    if (v.type == tAttrs) {
+        Bindings::iterator i = v.attrs->find(sOutPath);
+        if (i == v.attrs->end()) throwTypeError("cannot coerce a set to a string, at %1%", pos);
+        return coerceToString(pos, *i->value, context, coerceMore, copyToStore);
+    }
+
+    if (coerceMore) {
+
+        /* Note that `false' is represented as an empty string for
+           shell scripting convenience, just like `null'. */
+        if (v.type == tBool && v.boolean) return "1";
+        if (v.type == tBool && !v.boolean) return "";
+        if (v.type == tInt) return int2String(v.integer);
+        if (v.type == tNull) return "";
+
+        if (v.type == tList) {
+            string result;
+            for (unsigned int n = 0; n < v.list.length; ++n) {
+                result += coerceToString(pos, *v.list.elems[n],
+                    context, coerceMore, copyToStore);
+                if (n < v.list.length - 1
+                    /* !!! not quite correct */
+                    && (v.list.elems[n]->type != tList || v.list.elems[n]->list.length != 0))
+                    result += " ";
+            }
+            return result;
+        }
+    }
+
+    throwTypeError("cannot coerce %1% to a string, at %2%", v, pos);
+}
+
+
+string EvalState::copyPathToStore(PathSet & context, const Path & path)
+{
+    if (nix::isDerivation(path))
+        throwEvalError("file names are not allowed to end in ‘%1%’", drvExtension);
+
+    Path dstPath;
+    if (srcToStore[path] != "")
+        dstPath = srcToStore[path];
+    else {
+        dstPath = settings.readOnlyMode
+            ? computeStorePathForPath(path).first
+            : store->addToStore(path, true, htSHA256, defaultPathFilter, repair);
+        srcToStore[path] = dstPath;
+        printMsg(lvlChatty, format("copied source ‘%1%’ -> ‘%2%’")
+            % path % dstPath);
+    }
+
+    context.insert(dstPath);
+    return dstPath;
+}
+
+
+Path EvalState::coerceToPath(const Pos & pos, Value & v, PathSet & context)
+{
+    string path = coerceToString(pos, v, context, false, false);
+    if (path == "" || path[0] != '/')
+        throwEvalError("string ‘%1%’ doesn't represent an absolute path, at %1%", path, pos);
+    return path;
+}
+
+
+bool EvalState::eqValues(Value & v1, Value & v2)
+{
+    forceValue(v1);
+    forceValue(v2);
+
+    /* !!! Hack to support some old broken code that relies on pointer
+       equality tests between sets.  (Specifically, builderDefs calls
+       uniqList on a list of sets.)  Will remove this eventually. */
+    if (&v1 == &v2) return true;
+
+    if (v1.type != v2.type) return false;
+
+    switch (v1.type) {
+
+        case tInt:
+            return v1.integer == v2.integer;
+
+        case tBool:
+            return v1.boolean == v2.boolean;
+
+        case tString:
+            return strcmp(v1.string.s, v2.string.s) == 0;
+
+        case tPath:
+            return strcmp(v1.path, v2.path) == 0;
+
+        case tNull:
+            return true;
+
+        case tList:
+            if (v1.list.length != v2.list.length) return false;
+            for (unsigned int n = 0; n < v1.list.length; ++n)
+                if (!eqValues(*v1.list.elems[n], *v2.list.elems[n])) return false;
+            return true;
+
+        case tAttrs: {
+            /* If both sets denote a derivation (type = "derivation"),
+               then compare their outPaths. */
+            if (isDerivation(v1) && isDerivation(v2)) {
+                Bindings::iterator i = v1.attrs->find(sOutPath);
+                Bindings::iterator j = v2.attrs->find(sOutPath);
+                if (i != v1.attrs->end() && j != v2.attrs->end())
+                    return eqValues(*i->value, *j->value);
+            }
+
+            if (v1.attrs->size() != v2.attrs->size()) return false;
+
+            /* Otherwise, compare the attributes one by one. */
+            Bindings::iterator i, j;
+            for (i = v1.attrs->begin(), j = v2.attrs->begin(); i != v1.attrs->end(); ++i, ++j)
+                if (i->name != j->name || !eqValues(*i->value, *j->value))
+                    return false;
+
+            return true;
+        }
+
+        /* Functions are incomparable. */
+        case tLambda:
+        case tPrimOp:
+        case tPrimOpApp:
+            return false;
+
+        default:
+            throwEvalError("cannot compare %1% with %2%", showType(v1), showType(v2));
+    }
+}
+
+
+void EvalState::printStats()
+{
+    bool showStats = getEnv("NIX_SHOW_STATS", "0") != "0";
+    Verbosity v = showStats ? lvlInfo : lvlDebug;
+    printMsg(v, "evaluation statistics:");
+
+    struct rusage buf;
+    getrusage(RUSAGE_SELF, &buf);
+    float cpuTime = buf.ru_utime.tv_sec + ((float) buf.ru_utime.tv_usec / 1000000);
+
+    printMsg(v, format("  time elapsed: %1%") % cpuTime);
+    printMsg(v, format("  size of a value: %1%") % sizeof(Value));
+    printMsg(v, format("  environments allocated: %1% (%2% bytes)")
+        % nrEnvs % (nrEnvs * sizeof(Env) + nrValuesInEnvs * sizeof(Value *)));
+    printMsg(v, format("  list elements: %1% (%2% bytes)")
+        % nrListElems % (nrListElems * sizeof(Value *)));
+    printMsg(v, format("  list concatenations: %1%") % nrListConcats);
+    printMsg(v, format("  values allocated: %1% (%2% bytes)")
+        % nrValues % (nrValues * sizeof(Value)));
+    printMsg(v, format("  sets allocated: %1%") % nrAttrsets);
+    printMsg(v, format("  right-biased unions: %1%") % nrOpUpdates);
+    printMsg(v, format("  values copied in right-biased unions: %1%") % nrOpUpdateValuesCopied);
+    printMsg(v, format("  symbols in symbol table: %1%") % symbols.size());
+    printMsg(v, format("  size of symbol table: %1%") % symbols.totalSize());
+    printMsg(v, format("  number of thunks: %1%") % nrThunks);
+    printMsg(v, format("  number of thunks avoided: %1%") % nrAvoided);
+    printMsg(v, format("  number of attr lookups: %1%") % nrLookups);
+    printMsg(v, format("  number of primop calls: %1%") % nrPrimOpCalls);
+    printMsg(v, format("  number of function calls: %1%") % nrFunctionCalls);
+
+    if (countCalls) {
+        v = lvlInfo;
+
+        printMsg(v, format("calls to %1% primops:") % primOpCalls.size());
+        typedef std::multimap<unsigned int, Symbol> PrimOpCalls_;
+        PrimOpCalls_ primOpCalls_;
+        foreach (PrimOpCalls::iterator, i, primOpCalls)
+            primOpCalls_.insert(std::pair<unsigned int, Symbol>(i->second, i->first));
+        foreach_reverse (PrimOpCalls_::reverse_iterator, i, primOpCalls_)
+            printMsg(v, format("%1$10d %2%") % i->first % i->second);
+
+        printMsg(v, format("calls to %1% functions:") % functionCalls.size());
+        typedef std::multimap<unsigned int, ExprLambda *> FunctionCalls_;
+        FunctionCalls_ functionCalls_;
+        foreach (FunctionCalls::iterator, i, functionCalls)
+            functionCalls_.insert(std::pair<unsigned int, ExprLambda *>(i->second, i->first));
+        foreach_reverse (FunctionCalls_::reverse_iterator, i, functionCalls_)
+            printMsg(v, format("%1$10d %2%") % i->first % i->second->showNamePos());
+
+        printMsg(v, format("evaluations of %1% attributes:") % attrSelects.size());
+        typedef std::multimap<unsigned int, Pos> AttrSelects_;
+        AttrSelects_ attrSelects_;
+        foreach (AttrSelects::iterator, i, attrSelects)
+            attrSelects_.insert(std::pair<unsigned int, Pos>(i->second, i->first));
+        foreach_reverse (AttrSelects_::reverse_iterator, i, attrSelects_)
+            printMsg(v, format("%1$10d %2%") % i->first % i->second);
+
+    }
+}
+
+
+}
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
new file mode 100644
index 000000000000..aa706cf98359
--- /dev/null
+++ b/src/libexpr/eval.hh
@@ -0,0 +1,299 @@
+#pragma once
+
+#include "value.hh"
+#include "nixexpr.hh"
+#include "symbol-table.hh"
+#include "hash.hh"
+
+#include <map>
+
+#if HAVE_BOEHMGC
+#include <gc/gc_allocator.h>
+#endif
+
+
+namespace nix {
+
+
+class EvalState;
+struct Attr;
+
+
+/* Sets are represented as a vector of attributes, sorted by symbol
+   (i.e. pointer to the attribute name in the symbol table). */
+#if HAVE_BOEHMGC
+typedef std::vector<Attr, gc_allocator<Attr> > BindingsBase;
+#else
+typedef std::vector<Attr> BindingsBase;
+#endif
+
+
+class Bindings : public BindingsBase
+{
+public:
+    iterator find(const Symbol & name);
+    void sort();
+};
+
+
+typedef void (* PrimOpFun) (EvalState & state, const Pos & pos, Value * * args, Value & v);
+
+
+struct PrimOp
+{
+    PrimOpFun fun;
+    unsigned int arity;
+    Symbol name;
+    PrimOp(PrimOpFun fun, unsigned int arity, Symbol name)
+        : fun(fun), arity(arity), name(name) { }
+};
+
+
+struct Env
+{
+    Env * up;
+    unsigned short prevWith; // nr of levels up to next `with' environment
+    bool haveWithAttrs;
+    Value * values[0];
+};
+
+
+struct Attr
+{
+    Symbol name;
+    Value * value;
+    Pos * pos;
+    Attr(Symbol name, Value * value, Pos * pos = &noPos)
+        : name(name), value(value), pos(pos) { };
+    Attr() : pos(&noPos) { };
+    bool operator < (const Attr & a) const
+    {
+        return name < a.name;
+    }
+};
+
+
+void mkString(Value & v, const string & s, const PathSet & context = PathSet());
+
+void copyContext(const Value & v, PathSet & context);
+
+
+/* Cache for calls to addToStore(); maps source paths to the store
+   paths. */
+typedef std::map<Path, Path> SrcToStore;
+
+
+std::ostream & operator << (std::ostream & str, const Value & v);
+
+
+typedef list<std::pair<string, Path> > SearchPath;
+
+
+class EvalState
+{
+public:
+    SymbolTable symbols;
+
+    const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue,
+        sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls,
+        sFile, sLine, sColumn;
+    Symbol sDerivationNix;
+
+    /* If set, force copying files to the Nix store even if they
+       already exist there. */
+    bool repair;
+
+private:
+    SrcToStore srcToStore;
+
+    /* A cache from path names to values. */
+#if HAVE_BOEHMGC
+    typedef std::map<Path, Value, std::less<Path>, gc_allocator<std::pair<const Path, Value> > > FileEvalCache;
+#else
+    typedef std::map<Path, Value> FileEvalCache;
+#endif
+    FileEvalCache fileEvalCache;
+
+    SearchPath searchPath;
+
+public:
+
+    EvalState(const Strings & _searchPath);
+    ~EvalState();
+
+    void addToSearchPath(const string & s, bool warn = false);
+
+    /* Parse a Nix expression from the specified file. */
+    Expr * parseExprFromFile(const Path & path);
+    Expr * parseExprFromFile(const Path & path, StaticEnv & staticEnv);
+
+    /* Parse a Nix expression from the specified string. */
+    Expr * parseExprFromString(const string & s, const Path & basePath, StaticEnv & staticEnv);
+    Expr * parseExprFromString(const string & s, const Path & basePath);
+
+    /* Evaluate an expression read from the given file to normal
+       form. */
+    void evalFile(const Path & path, Value & v);
+
+    void resetFileCache();
+
+    /* Look up a file in the search path. */
+    Path findFile(const string & path);
+    Path findFile(SearchPath & searchPath, const string & path);
+
+    /* Evaluate an expression to normal form, storing the result in
+       value `v'. */
+    void eval(Expr * e, Value & v);
+
+    /* Evaluation the expression, then verify that it has the expected
+       type. */
+    inline bool evalBool(Env & env, Expr * e);
+    inline bool evalBool(Env & env, Expr * e, const Pos & pos);
+    inline void evalAttrs(Env & env, Expr * e, Value & v);
+
+    /* If `v' is a thunk, enter it and overwrite `v' with the result
+       of the evaluation of the thunk.  If `v' is a delayed function
+       application, call the function and overwrite `v' with the
+       result.  Otherwise, this is a no-op. */
+    inline void forceValue(Value & v);
+
+    /* Force a value, then recursively force list elements and
+       attributes. */
+    void strictForceValue(Value & v);
+
+    /* Force `v', and then verify that it has the expected type. */
+    NixInt forceInt(Value & v, const Pos & pos);
+    bool forceBool(Value & v);
+    inline void forceAttrs(Value & v);
+    inline void forceAttrs(Value & v, const Pos & pos);
+    inline void forceList(Value & v);
+    inline void forceList(Value & v, const Pos & pos);
+    void forceFunction(Value & v, const Pos & pos); // either lambda or primop
+    string forceString(Value & v, const Pos & pos = noPos);
+    string forceString(Value & v, PathSet & context);
+    string forceStringNoCtx(Value & v, const Pos & pos = noPos);
+
+    /* Return true iff the value `v' denotes a derivation (i.e. a
+       set with attribute `type = "derivation"'). */
+    bool isDerivation(Value & v);
+
+    /* String coercion.  Converts strings, paths and derivations to a
+       string.  If `coerceMore' is set, also converts nulls, integers,
+       booleans and lists to a string.  If `copyToStore' is set,
+       referenced paths are copied to the Nix store as a side effect. */
+    string coerceToString(const Pos & pos, Value & v, PathSet & context,
+        bool coerceMore = false, bool copyToStore = true);
+
+    string copyPathToStore(PathSet & context, const Path & path);
+
+    /* Path coercion.  Converts strings, paths and derivations to a
+       path.  The result is guaranteed to be a canonicalised, absolute
+       path.  Nothing is copied to the store. */
+    Path coerceToPath(const Pos & pos, Value & v, PathSet & context);
+
+public:
+
+    /* The base environment, containing the builtin functions and
+       values. */
+    Env & baseEnv;
+
+    /* The same, but used during parsing to resolve variables. */
+    StaticEnv staticBaseEnv; // !!! should be private
+
+private:
+
+    unsigned int baseEnvDispl;
+
+    void createBaseEnv();
+
+    void addConstant(const string & name, Value & v);
+
+    void addPrimOp(const string & name,
+        unsigned int arity, PrimOpFun primOp);
+
+public:
+
+    void getBuiltin(const string & name, Value & v);
+
+private:
+
+    inline Value * lookupVar(Env * env, const ExprVar & var, bool noEval);
+
+    friend struct ExprVar;
+    friend struct ExprAttrs;
+    friend struct ExprLet;
+
+    Expr * parse(const char * text, const Path & path,
+        const Path & basePath, StaticEnv & staticEnv);
+
+public:
+
+    /* Do a deep equality test between two values.  That is, list
+       elements and attributes are compared recursively. */
+    bool eqValues(Value & v1, Value & v2);
+
+    void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
+    void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
+
+    /* Automatically call a function for which each argument has a
+       default value or has a binding in the `args' map. */
+    void autoCallFunction(Bindings & args, Value & fun, Value & res);
+
+    /* Allocation primitives. */
+    Value * allocValue();
+    Env & allocEnv(unsigned int size);
+
+    Value * allocAttr(Value & vAttrs, const Symbol & name);
+
+    void mkList(Value & v, unsigned int length);
+    void mkAttrs(Value & v, unsigned int expected);
+    void mkThunk_(Value & v, Expr * expr);
+    void mkPos(Value & v, Pos * pos);
+
+    void concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos);
+
+    /* Print statistics. */
+    void printStats();
+
+private:
+
+    unsigned long nrEnvs;
+    unsigned long nrValuesInEnvs;
+    unsigned long nrValues;
+    unsigned long nrListElems;
+    unsigned long nrAttrsets;
+    unsigned long nrOpUpdates;
+    unsigned long nrOpUpdateValuesCopied;
+    unsigned long nrListConcats;
+    unsigned long nrPrimOpCalls;
+    unsigned long nrFunctionCalls;
+
+    bool countCalls;
+
+    typedef std::map<Symbol, unsigned int> PrimOpCalls;
+    PrimOpCalls primOpCalls;
+
+    typedef std::map<ExprLambda *, unsigned int> FunctionCalls;
+    FunctionCalls functionCalls;
+
+    void incrFunctionCall(ExprLambda * fun);
+
+    typedef std::map<Pos, unsigned int> AttrSelects;
+    AttrSelects attrSelects;
+
+    friend struct ExprOpUpdate;
+    friend struct ExprOpConcatLists;
+    friend struct ExprSelect;
+    friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
+};
+
+
+/* Return a string representing the type of the value `v'. */
+string showType(const Value & v);
+
+
+/* If `path' refers to a directory, then append "/default.nix". */
+Path resolveExprPath(Path path);
+
+
+}
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
new file mode 100644
index 000000000000..db91eab37156
--- /dev/null
+++ b/src/libexpr/get-drvs.cc
@@ -0,0 +1,302 @@
+#include "get-drvs.hh"
+#include "util.hh"
+#include "eval-inline.hh"
+
+#include <cstring>
+
+
+namespace nix {
+
+
+string DrvInfo::queryDrvPath()
+{
+    if (drvPath == "" && attrs) {
+        Bindings::iterator i = attrs->find(state->sDrvPath);
+        PathSet context;
+        drvPath = i != attrs->end() ? state->coerceToPath(*i->pos, *i->value, context) : "";
+    }
+    return drvPath;
+}
+
+
+string DrvInfo::queryOutPath()
+{
+    if (outPath == "" && attrs) {
+        Bindings::iterator i = attrs->find(state->sOutPath);
+        PathSet context;
+        outPath = i != attrs->end() ? state->coerceToPath(*i->pos, *i->value, context) : "";
+    }
+    return outPath;
+}
+
+
+DrvInfo::Outputs DrvInfo::queryOutputs()
+{
+    if (outputs.empty()) {
+        /* Get the ‘outputs’ list. */
+        Bindings::iterator i;
+        if (attrs && (i = attrs->find(state->sOutputs)) != attrs->end()) {
+            state->forceList(*i->value, *i->pos);
+
+            /* For each output... */
+            for (unsigned int j = 0; j < i->value->list.length; ++j) {
+                /* Evaluate the corresponding set. */
+                string name = state->forceStringNoCtx(*i->value->list.elems[j], *i->pos);
+                Bindings::iterator out = attrs->find(state->symbols.create(name));
+                if (out == attrs->end()) continue; // FIXME: throw error?
+                state->forceAttrs(*out->value);
+
+                /* And evaluate its ‘outPath’ attribute. */
+                Bindings::iterator outPath = out->value->attrs->find(state->sOutPath);
+                if (outPath == out->value->attrs->end()) continue; // FIXME: throw error?
+                PathSet context;
+                outputs[name] = state->coerceToPath(*outPath->pos, *outPath->value, context);
+            }
+        } else
+            outputs["out"] = queryOutPath();
+    }
+    return outputs;
+}
+
+
+string DrvInfo::queryOutputName()
+{
+    if (outputName == "" && attrs) {
+        Bindings::iterator i = attrs->find(state->sOutputName);
+        outputName = i != attrs->end() ? state->forceStringNoCtx(*i->value) : "";
+    }
+    return outputName;
+}
+
+
+Bindings * DrvInfo::getMeta()
+{
+    if (meta) return meta;
+    if (!attrs) return 0;
+    Bindings::iterator a = attrs->find(state->sMeta);
+    if (a == attrs->end()) return 0;
+    state->forceAttrs(*a->value, *a->pos);
+    meta = a->value->attrs;
+    return meta;
+}
+
+
+StringSet DrvInfo::queryMetaNames()
+{
+    StringSet res;
+    if (!getMeta()) return res;
+    foreach (Bindings::iterator, i, *meta)
+        res.insert(i->name);
+    return res;
+}
+
+
+bool DrvInfo::checkMeta(Value & v)
+{
+    state->forceValue(v);
+    if (v.type == tList) {
+        for (unsigned int n = 0; n < v.list.length; ++n)
+            if (!checkMeta(*v.list.elems[n])) return false;
+        return true;
+    }
+    else if (v.type == tAttrs) {
+        Bindings::iterator i = v.attrs->find(state->sOutPath);
+        if (i != v.attrs->end()) return false;
+        foreach (Bindings::iterator, i, *v.attrs)
+            if (!checkMeta(*i->value)) return false;
+        return true;
+    }
+    else return v.type == tInt || v.type == tBool || v.type == tString;
+}
+
+
+Value * DrvInfo::queryMeta(const string & name)
+{
+    if (!getMeta()) return 0;
+    Bindings::iterator a = meta->find(state->symbols.create(name));
+    if (a == meta->end() || !checkMeta(*a->value)) return 0;
+    return a->value;
+}
+
+
+string DrvInfo::queryMetaString(const string & name)
+{
+    Value * v = queryMeta(name);
+    if (!v || v->type != tString) return "";
+    return v->string.s;
+}
+
+
+int DrvInfo::queryMetaInt(const string & name, int def)
+{
+    Value * v = queryMeta(name);
+    if (!v) return def;
+    if (v->type == tInt) return v->integer;
+    if (v->type == tString) {
+        /* Backwards compatibility with before we had support for
+           integer meta fields. */
+        int n;
+        if (string2Int(v->string.s, n)) return n;
+    }
+    return def;
+}
+
+
+bool DrvInfo::queryMetaBool(const string & name, bool def)
+{
+    Value * v = queryMeta(name);
+    if (!v) return def;
+    if (v->type == tBool) return v->boolean;
+    if (v->type == tString) {
+        /* Backwards compatibility with before we had support for
+           Boolean meta fields. */
+        if (strcmp(v->string.s, "true") == 0) return true;
+        if (strcmp(v->string.s, "false") == 0) return false;
+    }
+    return def;
+}
+
+
+void DrvInfo::setMeta(const string & name, Value * v)
+{
+    getMeta();
+    Bindings * old = meta;
+    meta = new Bindings();
+    Symbol sym = state->symbols.create(name);
+    if (old)
+        foreach (Bindings::iterator, i, *old)
+            if (i->name != sym)
+                meta->push_back(*i);
+    if (v) meta->push_back(Attr(sym, v));
+    meta->sort();
+}
+
+
+/* Cache for already considered attrsets. */
+typedef set<Bindings *> Done;
+
+
+/* Evaluate value `v'.  If it evaluates to a set of type `derivation',
+   then put information about it in `drvs' (unless it's already in
+   `doneExprs').  The result boolean indicates whether it makes sense
+   for the caller to recursively search for derivations in `v'. */
+static bool getDerivation(EvalState & state, Value & v,
+    const string & attrPath, DrvInfos & drvs, Done & done,
+    bool ignoreAssertionFailures)
+{
+    try {
+        state.forceValue(v);
+        if (!state.isDerivation(v)) return true;
+
+        /* Remove spurious duplicates (e.g., a set like `rec { x =
+           derivation {...}; y = x;}'. */
+        if (done.find(v.attrs) != done.end()) return false;
+        done.insert(v.attrs);
+
+        Bindings::iterator i = v.attrs->find(state.sName);
+        /* !!! We really would like to have a decent back trace here. */
+        if (i == v.attrs->end()) throw TypeError("derivation name missing");
+
+        Bindings::iterator i2 = v.attrs->find(state.sSystem);
+
+        DrvInfo drv(state, state.forceStringNoCtx(*i->value), attrPath,
+            i2 == v.attrs->end() ? "unknown" : state.forceStringNoCtx(*i2->value, *i2->pos),
+            v.attrs);
+
+        drvs.push_back(drv);
+        return false;
+
+    } catch (AssertionError & e) {
+        if (ignoreAssertionFailures) return false;
+        throw;
+    }
+}
+
+
+bool getDerivation(EvalState & state, Value & v, DrvInfo & drv,
+    bool ignoreAssertionFailures)
+{
+    Done done;
+    DrvInfos drvs;
+    getDerivation(state, v, "", drvs, done, ignoreAssertionFailures);
+    if (drvs.size() != 1) return false;
+    drv = drvs.front();
+    return true;
+}
+
+
+static string addToPath(const string & s1, const string & s2)
+{
+    return s1.empty() ? s2 : s1 + "." + s2;
+}
+
+
+static void getDerivations(EvalState & state, Value & vIn,
+    const string & pathPrefix, Bindings & autoArgs,
+    DrvInfos & drvs, Done & done,
+    bool ignoreAssertionFailures)
+{
+    Value v;
+    state.autoCallFunction(autoArgs, vIn, v);
+
+    /* Process the expression. */
+    if (!getDerivation(state, v, pathPrefix, drvs, done, ignoreAssertionFailures)) ;
+
+    else if (v.type == tAttrs) {
+
+        /* !!! undocumented hackery to support combining channels in
+           nix-env.cc. */
+        bool combineChannels = v.attrs->find(state.symbols.create("_combineChannels")) != v.attrs->end();
+
+        /* Consider the attributes in sorted order to get more
+           deterministic behaviour in nix-env operations (e.g. when
+           there are names clashes between derivations, the derivation
+           bound to the attribute with the "lower" name should take
+           precedence). */
+        typedef std::map<string, Symbol> SortedSymbols;
+        SortedSymbols attrs;
+        foreach (Bindings::iterator, i, *v.attrs)
+            attrs.insert(std::pair<string, Symbol>(i->name, i->name));
+
+        foreach (SortedSymbols::iterator, i, attrs) {
+            startNest(nest, lvlDebug, format("evaluating attribute ‘%1%’") % i->first);
+            string pathPrefix2 = addToPath(pathPrefix, i->first);
+            Value & v2(*v.attrs->find(i->second)->value);
+            if (combineChannels)
+                getDerivations(state, v2, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
+            else if (getDerivation(state, v2, pathPrefix2, drvs, done, ignoreAssertionFailures)) {
+                /* If the value of this attribute is itself a set,
+                   should we recurse into it?  => Only if it has a
+                   `recurseForDerivations = true' attribute. */
+                if (v2.type == tAttrs) {
+                    Bindings::iterator j = v2.attrs->find(state.symbols.create("recurseForDerivations"));
+                    if (j != v2.attrs->end() && state.forceBool(*j->value))
+                        getDerivations(state, v2, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
+                }
+            }
+        }
+    }
+
+    else if (v.type == tList) {
+        for (unsigned int n = 0; n < v.list.length; ++n) {
+            startNest(nest, lvlDebug,
+                format("evaluating list element"));
+            string pathPrefix2 = addToPath(pathPrefix, (format("%1%") % n).str());
+            if (getDerivation(state, *v.list.elems[n], pathPrefix2, drvs, done, ignoreAssertionFailures))
+                getDerivations(state, *v.list.elems[n], pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
+        }
+    }
+
+    else throw TypeError("expression does not evaluate to a derivation (or a set or list of those)");
+}
+
+
+void getDerivations(EvalState & state, Value & v, const string & pathPrefix,
+    Bindings & autoArgs, DrvInfos & drvs, bool ignoreAssertionFailures)
+{
+    Done done;
+    getDerivations(state, v, pathPrefix, autoArgs, drvs, done, ignoreAssertionFailures);
+}
+
+
+}
diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh
new file mode 100644
index 000000000000..98f762494aa5
--- /dev/null
+++ b/src/libexpr/get-drvs.hh
@@ -0,0 +1,91 @@
+#pragma once
+
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+
+namespace nix {
+
+
+struct DrvInfo
+{
+public:
+    typedef std::map<string, Path> Outputs;
+
+private:
+    EvalState * state;
+
+    string drvPath;
+    string outPath;
+    string outputName;
+    Outputs outputs;
+
+    bool failed; // set if we get an AssertionError
+
+    Bindings * attrs, * meta;
+
+    Bindings * getMeta();
+
+    bool checkMeta(Value & v);
+
+public:
+    string name;
+    string attrPath; /* path towards the derivation */
+    string system;
+
+    DrvInfo(EvalState & state) : state(&state), failed(false), attrs(0), meta(0) { };
+    DrvInfo(EvalState & state, const string & name, const string & attrPath, const string & system, Bindings * attrs)
+        : state(&state), failed(false), attrs(attrs), meta(0), name(name), attrPath(attrPath), system(system) { };
+
+    string queryDrvPath();
+    string queryOutPath();
+    string queryOutputName();
+    Outputs queryOutputs();
+
+    StringSet queryMetaNames();
+    Value * queryMeta(const string & name);
+    string queryMetaString(const string & name);
+    int queryMetaInt(const string & name, int def);
+    bool queryMetaBool(const string & name, bool def);
+    void setMeta(const string & name, Value * v);
+
+    /*
+    MetaInfo queryMetaInfo(EvalState & state) const;
+    MetaValue queryMetaInfo(EvalState & state, const string & name) const;
+    */
+
+    void setDrvPath(const string & s)
+    {
+        drvPath = s;
+    }
+
+    void setOutPath(const string & s)
+    {
+        outPath = s;
+    }
+
+    void setFailed() { failed = true; };
+    bool hasFailed() { return failed; };
+};
+
+
+#if HAVE_BOEHMGC
+typedef list<DrvInfo, traceable_allocator<DrvInfo> > DrvInfos;
+#else
+typedef list<DrvInfo> DrvInfos;
+#endif
+
+
+/* If value `v' denotes a derivation, store information about the
+   derivation in `drv' and return true.  Otherwise, return false. */
+bool getDerivation(EvalState & state, Value & v, DrvInfo & drv,
+    bool ignoreAssertionFailures);
+
+void getDerivations(EvalState & state, Value & v, const string & pathPrefix,
+    Bindings & autoArgs, DrvInfos & drvs,
+    bool ignoreAssertionFailures);
+
+
+}
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
new file mode 100644
index 000000000000..af4394b0bbba
--- /dev/null
+++ b/src/libexpr/json-to-value.cc
@@ -0,0 +1,144 @@
+#include "config.h"
+#include "json-to-value.hh"
+
+#include <cstring>
+
+namespace nix {
+
+
+static void skipWhitespace(const char * & s)
+{
+    while (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r') s++;
+}
+
+
+#if HAVE_BOEHMGC
+typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
+#else
+typedef std::vector<Value *> ValueVector;
+#endif
+
+
+static string parseJSONString(const char * & s)
+{
+    string res;
+    if (*s++ != '"') throw JSONParseError("expected JSON string");
+    while (*s != '"') {
+        if (!*s) throw JSONParseError("got end-of-string in JSON string");
+        if (*s == '\\') {
+            s++;
+            if (*s == '"') res += '"';
+            else if (*s == '\\') res += '\\';
+            else if (*s == '/') res += '/';
+            else if (*s == '/') res += '/';
+            else if (*s == 'b') res += '\b';
+            else if (*s == 'f') res += '\f';
+            else if (*s == 'n') res += '\n';
+            else if (*s == 'r') res += '\r';
+            else if (*s == 't') res += '\t';
+            else if (*s == 'u') throw JSONParseError("\\u characters in JSON strings are currently not supported");
+            else throw JSONParseError("invalid escaped character in JSON string");
+            s++;
+        } else
+            res += *s++;
+    }
+    s++;
+    return res;
+}
+
+
+static void parseJSON(EvalState & state, const char * & s, Value & v)
+{
+    skipWhitespace(s);
+
+    if (!*s) throw JSONParseError("expected JSON value");
+
+    if (*s == '[') {
+        s++;
+        ValueVector values;
+        values.reserve(128);
+        skipWhitespace(s);
+        while (1) {
+            if (values.empty() && *s == ']') break;
+            Value * v2 = state.allocValue();
+            parseJSON(state, s, *v2);
+            values.push_back(v2);
+            skipWhitespace(s);
+            if (*s == ']') break;
+            if (*s != ',') throw JSONParseError("expected ‘,’ or ‘]’ after JSON array element");
+            s++;
+        }
+        s++;
+        state.mkList(v, values.size());
+        for (size_t n = 0; n < values.size(); ++n)
+            v.list.elems[n] = values[n];
+    }
+
+    else if (*s == '{') {
+        s++;
+        state.mkAttrs(v, 1);
+        while (1) {
+            skipWhitespace(s);
+            if (v.attrs->empty() && *s == '}') break;
+            string name = parseJSONString(s);
+            skipWhitespace(s);
+            if (*s != ':') throw JSONParseError("expected ‘:’ in JSON object");
+            s++;
+            Value * v2 = state.allocValue();
+            parseJSON(state, s, *v2);
+            v.attrs->push_back(Attr(state.symbols.create(name), v2));
+            skipWhitespace(s);
+            if (*s == '}') break;
+            if (*s != ',') throw JSONParseError("expected ‘,’ or ‘}’ after JSON member");
+            s++;
+        }
+        v.attrs->sort();
+        s++;
+    }
+
+    else if (*s == '"') {
+        mkString(v, parseJSONString(s));
+    }
+
+    else if (isdigit(*s) || *s == '-') {
+        bool neg = false;
+        if (*s == '-') {
+            neg = true;
+            if (!*++s) throw JSONParseError("unexpected end of JSON number");
+        }
+        NixInt n = 0;
+        // FIXME: detect overflow
+        while (isdigit(*s)) n = n * 10 + (*s++ - '0');
+        if (*s == '.' || *s == 'e') throw JSONParseError("floating point JSON numbers are not supported");
+        mkInt(v, neg ? -n : n);
+    }
+
+    else if (strncmp(s, "true", 4) == 0) {
+        s += 4;
+        mkBool(v, true);
+    }
+
+    else if (strncmp(s, "false", 5) == 0) {
+        s += 5;
+        mkBool(v, false);
+    }
+
+    else if (strncmp(s, "null", 4) == 0) {
+        s += 4;
+        mkNull(v);
+    }
+
+    else throw JSONParseError("unrecognised JSON value");
+}
+
+
+void parseJSON(EvalState & state, const string & s_, Value & v)
+{
+    const char * s = s_.c_str();
+    parseJSON(state, s, v);
+    skipWhitespace(s);
+    if (*s) throw JSONParseError(format("expected end-of-string while parsing JSON value: %1%") % s);
+}
+
+
+}
diff --git a/src/libexpr/json-to-value.hh b/src/libexpr/json-to-value.hh
new file mode 100644
index 000000000000..33f35b16ce89
--- /dev/null
+++ b/src/libexpr/json-to-value.hh
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "eval.hh"
+
+#include <string>
+
+namespace nix {
+
+MakeError(JSONParseError, EvalError)
+
+void parseJSON(EvalState & state, const string & s, Value & v);
+
+}
diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l
new file mode 100644
index 000000000000..82520ee7a59a
--- /dev/null
+++ b/src/libexpr/lexer.l
@@ -0,0 +1,193 @@
+%option reentrant bison-bridge bison-locations
+%option noyywrap
+%option never-interactive
+
+
+%x STRING
+%x IND_STRING
+
+
+%{
+#include "nixexpr.hh"
+#include "parser-tab.hh"
+
+using namespace nix;
+
+namespace nix {
+
+
+static void initLoc(YYLTYPE * loc)
+{
+    loc->first_line = loc->last_line = 1;
+    loc->first_column = loc->last_column = 1;
+}
+
+
+static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
+{
+    loc->first_line = loc->last_line;
+    loc->first_column = loc->last_column;
+
+    while (len--) {
+       switch (*s++) {
+       case '\r':
+           if (*s == '\n') /* cr/lf */
+               s++;
+           /* fall through */
+       case '\n':
+           ++loc->last_line;
+           loc->last_column = 1;
+           break;
+       default:
+           ++loc->last_column;
+       }
+    }
+}
+
+
+static Expr * unescapeStr(SymbolTable & symbols, const char * s)
+{
+    string t;
+    char c;
+    while ((c = *s++)) {
+        if (c == '\\') {
+            assert(*s);
+            c = *s++;
+            if (c == 'n') t += '\n';
+            else if (c == 'r') t += '\r';
+            else if (c == 't') t += '\t';
+            else t += c;
+        }
+        else if (c == '\r') {
+            /* Normalise CR and CR/LF into LF. */
+            t += '\n';
+            if (*s == '\n') s++; /* cr/lf */
+        }
+        else t += c;
+    }
+    return new ExprString(symbols.create(t));
+}
+
+
+}
+
+#define YY_USER_INIT initLoc(yylloc)
+#define YY_USER_ACTION adjustLoc(yylloc, yytext, yyleng);
+
+%}
+
+
+ID          [a-zA-Z\_][a-zA-Z0-9\_\'\-]*
+INT         [0-9]+
+PATH        [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+
+SPATH       \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\>
+URI         [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+
+
+
+%%
+
+
+if          { return IF; }
+then        { return THEN; }
+else        { return ELSE; }
+assert      { return ASSERT; }
+with        { return WITH; }
+let         { return LET; }
+in          { return IN; }
+rec         { return REC; }
+inherit     { return INHERIT; }
+or          { return OR_KW; }
+\.\.\.      { return ELLIPSIS; }
+
+\=\=        { return EQ; }
+\!\=        { return NEQ; }
+\<\=        { return LEQ; }
+\>\=        { return GEQ; }
+\&\&        { return AND; }
+\|\|        { return OR; }
+\-\>        { return IMPL; }
+\/\/        { return UPDATE; }
+\+\+        { return CONCAT; }
+
+{ID}        { yylval->id = strdup(yytext); return ID; }
+{INT}       { errno = 0;
+              yylval->n = strtol(yytext, 0, 10);
+              if (errno != 0)
+                  throw ParseError(format("invalid integer ‘%1%’") % yytext);
+              return INT;
+            }
+
+\$\{        { return DOLLAR_CURLY; }
+
+\"          { BEGIN(STRING); return '"'; }
+<STRING>([^\$\"\\]|\$[^\{\"]|\\.)+ {
+              /* !!! Not quite right: we want a follow restriction on
+                 "$", it shouldn't be followed by a "{".  Right now
+                 "$\"" will be consumed as part of a string, rather
+                 than a "$" followed by the string terminator.
+                 Disallow "$\"" for now. */
+              yylval->e = unescapeStr(data->symbols, yytext);
+              return STR;
+            }
+<STRING>\$\{  { BEGIN(INITIAL); return DOLLAR_CURLY; }
+<STRING>\"  { BEGIN(INITIAL); return '"'; }
+<STRING>.   return yytext[0]; /* just in case: shouldn't be reached */
+
+\'\'(\ *\n)?     { BEGIN(IND_STRING); return IND_STRING_OPEN; }
+<IND_STRING>([^\$\']|\$[^\{\']|\'[^\'\$])+ {
+                   yylval->e = new ExprIndStr(yytext);
+                   return IND_STR;
+                 }
+<IND_STRING>\'\'\$ {
+                   yylval->e = new ExprIndStr("$");
+                   return IND_STR;
+                 }
+<IND_STRING>\'\'\' {
+                   yylval->e = new ExprIndStr("''");
+                   return IND_STR;
+                 }
+<IND_STRING>\'\'\\. {
+                   yylval->e = unescapeStr(data->symbols, yytext + 2);
+                   return IND_STR;
+                 }
+<IND_STRING>\$\{ { BEGIN(INITIAL); return DOLLAR_CURLY; }
+<IND_STRING>\'\' { BEGIN(INITIAL); return IND_STRING_CLOSE; }
+<IND_STRING>\'   {
+                   yylval->e = new ExprIndStr("'");
+                   return IND_STR;
+                 }
+<IND_STRING>.    return yytext[0]; /* just in case: shouldn't be reached */
+
+{PATH}      { yylval->path = strdup(yytext); return PATH; }
+{SPATH}     { yylval->path = strdup(yytext); return SPATH; }
+{URI}       { yylval->uri = strdup(yytext); return URI; }
+
+[ \t\r\n]+    /* eat up whitespace */
+\#[^\r\n]*    /* single-line comments */
+\/\*([^*]|\*[^\/])*\*\/  /* long comments */
+
+.           return yytext[0];
+
+
+%%
+
+
+namespace nix {
+
+/* Horrible, disgusting hack: allow the parser to set the scanner
+   start condition back to STRING.  Necessary in interpolations like
+   "foo${expr}bar"; after the close brace we have to go back to the
+   STRING state. */
+void backToString(yyscan_t scanner)
+{
+    struct yyguts_t * yyg = (struct yyguts_t *) scanner;
+    BEGIN(STRING);
+}
+
+void backToIndString(yyscan_t scanner)
+{
+    struct yyguts_t * yyg = (struct yyguts_t *) scanner;
+    BEGIN(IND_STRING);
+}
+
+}
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
new file mode 100644
index 000000000000..75a0e185e369
--- /dev/null
+++ b/src/libexpr/local.mk
@@ -0,0 +1,30 @@
+libraries += libexpr
+
+libexpr_NAME = libnixexpr
+
+libexpr_DIR := $(d)
+
+libexpr_SOURCES := $(wildcard $(d)/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc
+
+libexpr_LIBS = libutil libstore libformat
+
+libexpr_LDFLAGS = -ldl
+
+# The dependency on libgc must be propagated (i.e. meaning that
+# programs/libraries that use libexpr must explicitly pass -lgc),
+# because inline functions in libexpr's header files call libgc.
+libexpr_LDFLAGS_PROPAGATED = $(BDW_GC_LIBS)
+
+$(d)/parser-tab.cc $(d)/parser-tab.hh: $(d)/parser.y
+	$(trace-gen) bison -v -o $(libexpr_DIR)/parser-tab.cc $< -d
+
+$(d)/lexer-tab.cc $(d)/lexer-tab.hh: $(d)/lexer.l
+	$(trace-gen) flex --outfile $(libexpr_DIR)/lexer-tab.cc --header-file=$(libexpr_DIR)/lexer-tab.hh $<
+
+$(d)/lexer-tab.o: $(d)/lexer-tab.hh $(d)/parser-tab.hh
+
+$(d)/parser-tab.o: $(d)/lexer-tab.hh $(d)/parser-tab.hh
+
+clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
+
+dist-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
diff --git a/src/libexpr/names.cc b/src/libexpr/names.cc
new file mode 100644
index 000000000000..781c2b6468f9
--- /dev/null
+++ b/src/libexpr/names.cc
@@ -0,0 +1,104 @@
+#include "names.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+DrvName::DrvName()
+{
+    name = "";
+}
+
+
+/* Parse a derivation name.  The `name' part of a derivation name is
+   everything up to but not including the first dash *not* followed by
+   a letter.  The `version' part is the rest (excluding the separating
+   dash).  E.g., `apache-httpd-2.0.48' is parsed to (`apache-httpd',
+   '2.0.48'). */
+DrvName::DrvName(const string & s) : hits(0)
+{
+    name = fullName = s;
+    for (unsigned int i = 0; i < s.size(); ++i) {
+        /* !!! isalpha/isdigit are affected by the locale. */
+        if (s[i] == '-' && i + 1 < s.size() && !isalpha(s[i + 1])) {
+            name = string(s, 0, i);
+            version = string(s, i + 1);
+            break;
+        }
+    }
+}
+
+
+bool DrvName::matches(DrvName & n)
+{
+    if (name != "*" && name != n.name) return false;
+    if (version != "" && version != n.version) return false;
+    return true;
+}
+
+
+static string nextComponent(string::const_iterator & p,
+    const string::const_iterator end)
+{
+    /* Skip any dots and dashes (component separators). */
+    while (p != end && (*p == '.' || *p == '-')) ++p;
+
+    if (p == end) return "";
+
+    /* If the first character is a digit, consume the longest sequence
+       of digits.  Otherwise, consume the longest sequence of
+       non-digit, non-separator characters. */
+    string s;
+    if (isdigit(*p))
+        while (p != end && isdigit(*p)) s += *p++;
+    else
+        while (p != end && (!isdigit(*p) && *p != '.' && *p != '-'))
+            s += *p++;
+
+    return s;
+}
+
+
+static bool componentsLT(const string & c1, const string & c2)
+{
+    int n1, n2;
+    bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2);
+
+    if (c1Num && c2Num) return n1 < n2;
+    else if (c1 == "" && c2Num) return true;
+    else if (c1 == "pre" && c2 != "pre") return true;
+    else if (c2 == "pre") return false;
+    /* Assume that `2.3a' < `2.3.1'. */
+    else if (c2Num) return true;
+    else if (c1Num) return false;
+    else return c1 < c2;
+}
+
+
+int compareVersions(const string & v1, const string & v2)
+{
+    string::const_iterator p1 = v1.begin();
+    string::const_iterator p2 = v2.begin();
+
+    while (p1 != v1.end() || p2 != v2.end()) {
+        string c1 = nextComponent(p1, v1.end());
+        string c2 = nextComponent(p2, v2.end());
+        if (componentsLT(c1, c2)) return -1;
+        else if (componentsLT(c2, c1)) return 1;
+    }
+
+    return 0;
+}
+
+
+DrvNames drvNamesFromArgs(const Strings & opArgs)
+{
+    DrvNames result;
+    foreach (Strings::const_iterator, i, opArgs)
+        result.push_back(DrvName(*i));
+    return result;
+}
+
+
+}
diff --git a/src/libexpr/names.hh b/src/libexpr/names.hh
new file mode 100644
index 000000000000..ebe113e82ac1
--- /dev/null
+++ b/src/libexpr/names.hh
@@ -0,0 +1,24 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+struct DrvName
+{
+    string fullName;
+    string name;
+    string version;
+    unsigned int hits;
+
+    DrvName();
+    DrvName(const string & s);
+    bool matches(DrvName & n);
+};
+
+typedef list<DrvName> DrvNames;
+
+int compareVersions(const string & v1, const string & v2);
+DrvNames drvNamesFromArgs(const Strings & opArgs);
+
+}
diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc
new file mode 100644
index 000000000000..c8521718b82a
--- /dev/null
+++ b/src/libexpr/nixexpr.cc
@@ -0,0 +1,383 @@
+#include "nixexpr.hh"
+#include "derivations.hh"
+#include "util.hh"
+
+#include <cstdlib>
+
+
+namespace nix {
+
+
+/* Displaying abstract syntax trees. */
+
+std::ostream & operator << (std::ostream & str, Expr & e)
+{
+    e.show(str);
+    return str;
+}
+
+void Expr::show(std::ostream & str)
+{
+    abort();
+}
+
+void ExprInt::show(std::ostream & str)
+{
+    str << n;
+}
+
+void ExprString::show(std::ostream & str)
+{
+    str << "\"" << s << "\""; // !!! escaping
+}
+
+void ExprPath::show(std::ostream & str)
+{
+    str << s;
+}
+
+void ExprVar::show(std::ostream & str)
+{
+    str << name;
+}
+
+void ExprSelect::show(std::ostream & str)
+{
+    str << "(" << *e << ")." << showAttrPath(attrPath);
+    if (def) str << " or " << *def;
+}
+
+void ExprOpHasAttr::show(std::ostream & str)
+{
+    str << "(" << *e << ") ? " << showAttrPath(attrPath);
+}
+
+void ExprAttrs::show(std::ostream & str)
+{
+    if (recursive) str << "rec ";
+    str << "{ ";
+    foreach (AttrDefs::iterator, i, attrs)
+        if (i->second.inherited)
+            str << "inherit " << i->first << " " << "; ";
+        else
+            str << i->first << " = " << *i->second.e << "; ";
+    foreach (DynamicAttrDefs::iterator, i, dynamicAttrs)
+        str << "\"${" << *i->nameExpr << "}\" = " << *i->valueExpr << "; ";
+    str << "}";
+}
+
+void ExprList::show(std::ostream & str)
+{
+    str << "[ ";
+    foreach (vector<Expr *>::iterator, i, elems)
+        str << "(" << **i << ") ";
+    str << "]";
+}
+
+void ExprLambda::show(std::ostream & str)
+{
+    str << "(";
+    if (matchAttrs) {
+        str << "{ ";
+        bool first = true;
+        foreach (Formals::Formals_::iterator, i, formals->formals) {
+            if (first) first = false; else str << ", ";
+            str << i->name;
+            if (i->def) str << " ? " << *i->def;
+        }
+        str << " }";
+        if (!arg.empty()) str << " @ ";
+    }
+    if (!arg.empty()) str << arg;
+    str << ": " << *body << ")";
+}
+
+void ExprLet::show(std::ostream & str)
+{
+    str << "let ";
+    foreach (ExprAttrs::AttrDefs::iterator, i, attrs->attrs)
+        if (i->second.inherited)
+            str << "inherit " << i->first << "; ";
+        else
+            str << i->first << " = " << *i->second.e << "; ";
+    str << "in " << *body;
+}
+
+void ExprWith::show(std::ostream & str)
+{
+    str << "with " << *attrs << "; " << *body;
+}
+
+void ExprIf::show(std::ostream & str)
+{
+    str << "if " << *cond << " then " << *then << " else " << *else_;
+}
+
+void ExprAssert::show(std::ostream & str)
+{
+    str << "assert " << *cond << "; " << *body;
+}
+
+void ExprOpNot::show(std::ostream & str)
+{
+    str << "! " << *e;
+}
+
+void ExprConcatStrings::show(std::ostream & str)
+{
+    bool first = true;
+    foreach (vector<Expr *>::iterator, i, *es) {
+        if (first) first = false; else str << " + ";
+        str << **i;
+    }
+}
+
+void ExprPos::show(std::ostream & str)
+{
+    str << "__curPos";
+}
+
+
+std::ostream & operator << (std::ostream & str, const Pos & pos)
+{
+    if (!pos)
+        str << "undefined position";
+    else
+        str << (format(ANSI_BOLD "%1%" ANSI_NORMAL ":%2%:%3%") % pos.file % pos.line % pos.column).str();
+    return str;
+}
+
+
+string showAttrPath(const AttrPath & attrPath)
+{
+    std::ostringstream out;
+    bool first = true;
+    foreach (AttrPath::const_iterator, i, attrPath) {
+        if (!first)
+            out << '.';
+        else
+            first = false;
+        if (i->symbol.set())
+            out << i->symbol;
+        else
+            out << "\"${" << *i->expr << "}\"";
+    }
+    return out.str();
+}
+
+
+Pos noPos;
+
+
+/* Computing levels/displacements for variables. */
+
+void Expr::bindVars(const StaticEnv & env)
+{
+    abort();
+}
+
+void ExprInt::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprString::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprPath::bindVars(const StaticEnv & env)
+{
+}
+
+void ExprVar::bindVars(const StaticEnv & env)
+{
+    /* Check whether the variable appears in the environment.  If so,
+       set its level and displacement. */
+    const StaticEnv * curEnv;
+    unsigned int level;
+    int withLevel = -1;
+    for (curEnv = &env, level = 0; curEnv; curEnv = curEnv->up, level++) {
+        if (curEnv->isWith) {
+            if (withLevel == -1) withLevel = level;
+        } else {
+            StaticEnv::Vars::const_iterator i = curEnv->vars.find(name);
+            if (i != curEnv->vars.end()) {
+                fromWith = false;
+                this->level = level;
+                displ = i->second;
+                return;
+            }
+        }
+    }
+
+    /* Otherwise, the variable must be obtained from the nearest
+       enclosing `with'.  If there is no `with', then we can issue an
+       "undefined variable" error now. */
+    if (withLevel == -1) throw UndefinedVarError(format("undefined variable ‘%1%’ at %2%") % name % pos);
+
+    fromWith = true;
+    this->level = withLevel;
+}
+
+void ExprSelect::bindVars(const StaticEnv & env)
+{
+    e->bindVars(env);
+    if (def) def->bindVars(env);
+    foreach (AttrPath::iterator, i, attrPath)
+        if (!i->symbol.set())
+            i->expr->bindVars(env);
+}
+
+void ExprOpHasAttr::bindVars(const StaticEnv & env)
+{
+    e->bindVars(env);
+    foreach (AttrPath::iterator, i, attrPath)
+        if (!i->symbol.set())
+            i->expr->bindVars(env);
+}
+
+void ExprAttrs::bindVars(const StaticEnv & env)
+{
+    const StaticEnv * dynamicEnv = &env;
+    StaticEnv newEnv(false, &env);
+
+    if (recursive) {
+        dynamicEnv = &newEnv;
+
+        unsigned int displ = 0;
+        foreach (AttrDefs::iterator, i, attrs)
+            newEnv.vars[i->first] = i->second.displ = displ++;
+
+        foreach (AttrDefs::iterator, i, attrs)
+            i->second.e->bindVars(i->second.inherited ? env : newEnv);
+    }
+
+    else
+        foreach (AttrDefs::iterator, i, attrs)
+            i->second.e->bindVars(env);
+
+    foreach (DynamicAttrDefs::iterator, i, dynamicAttrs) {
+        i->nameExpr->bindVars(*dynamicEnv);
+        i->valueExpr->bindVars(*dynamicEnv);
+    }
+}
+
+void ExprList::bindVars(const StaticEnv & env)
+{
+    foreach (vector<Expr *>::iterator, i, elems)
+        (*i)->bindVars(env);
+}
+
+void ExprLambda::bindVars(const StaticEnv & env)
+{
+    StaticEnv newEnv(false, &env);
+
+    unsigned int displ = 0;
+
+    if (!arg.empty()) newEnv.vars[arg] = displ++;
+
+    if (matchAttrs) {
+        foreach (Formals::Formals_::iterator, i, formals->formals)
+            newEnv.vars[i->name] = displ++;
+
+        foreach (Formals::Formals_::iterator, i, formals->formals)
+            if (i->def) i->def->bindVars(newEnv);
+    }
+
+    body->bindVars(newEnv);
+}
+
+void ExprLet::bindVars(const StaticEnv & env)
+{
+    StaticEnv newEnv(false, &env);
+
+    unsigned int displ = 0;
+    foreach (ExprAttrs::AttrDefs::iterator, i, attrs->attrs)
+        newEnv.vars[i->first] = i->second.displ = displ++;
+
+    foreach (ExprAttrs::AttrDefs::iterator, i, attrs->attrs)
+        i->second.e->bindVars(i->second.inherited ? env : newEnv);
+
+    body->bindVars(newEnv);
+}
+
+void ExprWith::bindVars(const StaticEnv & env)
+{
+    /* Does this `with' have an enclosing `with'?  If so, record its
+       level so that `lookupVar' can look up variables in the previous
+       `with' if this one doesn't contain the desired attribute. */
+    const StaticEnv * curEnv;
+    unsigned int level;
+    prevWith = 0;
+    for (curEnv = &env, level = 1; curEnv; curEnv = curEnv->up, level++)
+        if (curEnv->isWith) {
+            prevWith = level;
+            break;
+        }
+
+    attrs->bindVars(env);
+    StaticEnv newEnv(true, &env);
+    body->bindVars(newEnv);
+}
+
+void ExprIf::bindVars(const StaticEnv & env)
+{
+    cond->bindVars(env);
+    then->bindVars(env);
+    else_->bindVars(env);
+}
+
+void ExprAssert::bindVars(const StaticEnv & env)
+{
+    cond->bindVars(env);
+    body->bindVars(env);
+}
+
+void ExprOpNot::bindVars(const StaticEnv & env)
+{
+    e->bindVars(env);
+}
+
+void ExprConcatStrings::bindVars(const StaticEnv & env)
+{
+    foreach (vector<Expr *>::iterator, i, *es)
+        (*i)->bindVars(env);
+}
+
+void ExprPos::bindVars(const StaticEnv & env)
+{
+}
+
+
+/* Storing function names. */
+
+void Expr::setName(Symbol & name)
+{
+}
+
+
+void ExprLambda::setName(Symbol & name)
+{
+    this->name = name;
+    body->setName(name);
+}
+
+
+string ExprLambda::showNamePos() const
+{
+    return (format("%1% at %2%") % (name.set() ? "‘" + (string) name + "’" : "anonymous function") % pos).str();
+}
+
+
+
+/* Symbol table. */
+
+size_t SymbolTable::totalSize() const
+{
+    size_t n = 0;
+    foreach (Symbols::const_iterator, i, symbols)
+        n += i->size();
+    return n;
+}
+
+
+}
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
new file mode 100644
index 000000000000..2619a7026fca
--- /dev/null
+++ b/src/libexpr/nixexpr.hh
@@ -0,0 +1,332 @@
+#pragma once
+
+#include "value.hh"
+#include "symbol-table.hh"
+
+#include <map>
+
+
+namespace nix {
+
+
+MakeError(EvalError, Error)
+MakeError(ParseError, Error)
+MakeError(AssertionError, EvalError)
+MakeError(ThrownError, AssertionError)
+MakeError(Abort, EvalError)
+MakeError(TypeError, EvalError)
+MakeError(UndefinedVarError, Error)
+
+
+/* Position objects. */
+
+struct Pos
+{
+    Symbol file;
+    unsigned int line, column;
+    Pos() : line(0), column(0) { };
+    Pos(const Symbol & file, unsigned int line, unsigned int column)
+        : file(file), line(line), column(column) { };
+    operator bool() const
+    {
+        return line != 0;
+    }
+    bool operator < (const Pos & p2) const
+    {
+        if (!line) return p2.line;
+        if (!p2.line) return false;
+        int d = ((string) file).compare((string) p2.file);
+        if (d < 0) return true;
+        if (d > 0) return false;
+        if (line < p2.line) return true;
+        if (line > p2.line) return false;
+        return column < p2.column;
+    }
+};
+
+extern Pos noPos;
+
+std::ostream & operator << (std::ostream & str, const Pos & pos);
+
+
+struct Env;
+struct Value;
+class EvalState;
+struct StaticEnv;
+struct ExprConcatStrings;
+
+
+/* An attribute path is a sequence of attribute names. */
+struct AttrName
+{
+    Symbol symbol;
+    ExprConcatStrings * expr;
+    AttrName(const Symbol & s) : symbol(s) {};
+    AttrName(ExprConcatStrings * e) : expr(e) {};
+};
+
+typedef std::vector<AttrName> AttrPath;
+
+string showAttrPath(const AttrPath & attrPath);
+
+
+/* Abstract syntax of Nix expressions. */
+
+struct Expr
+{
+    virtual ~Expr() { };
+    virtual void show(std::ostream & str);
+    virtual void bindVars(const StaticEnv & env);
+    virtual void eval(EvalState & state, Env & env, Value & v);
+    virtual Value * maybeThunk(EvalState & state, Env & env);
+    virtual void setName(Symbol & name);
+};
+
+std::ostream & operator << (std::ostream & str, Expr & e);
+
+#define COMMON_METHODS \
+    void show(std::ostream & str); \
+    void eval(EvalState & state, Env & env, Value & v); \
+    void bindVars(const StaticEnv & env);
+
+struct ExprInt : Expr
+{
+    NixInt n;
+    Value v;
+    ExprInt(NixInt n) : n(n) { mkInt(v, n); };
+    COMMON_METHODS
+    Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprString : Expr
+{
+    Symbol s;
+    Value v;
+    ExprString(const Symbol & s) : s(s) { mkString(v, s); };
+    COMMON_METHODS
+    Value * maybeThunk(EvalState & state, Env & env);
+};
+
+/* Temporary class used during parsing of indented strings. */
+struct ExprIndStr : Expr
+{
+    string s;
+    ExprIndStr(const string & s) : s(s) { };
+};
+
+struct ExprPath : Expr
+{
+    string s;
+    Value v;
+    ExprPath(const string & s) : s(s) { mkPathNoCopy(v, this->s.c_str()); };
+    COMMON_METHODS
+    Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprVar : Expr
+{
+    Pos pos;
+    Symbol name;
+
+    /* Whether the variable comes from an environment (e.g. a rec, let
+       or function argument) or from a "with". */
+    bool fromWith;
+
+    /* In the former case, the value is obtained by going `level'
+       levels up from the current environment and getting the
+       `displ'th value in that environment.  In the latter case, the
+       value is obtained by getting the attribute named `name' from
+       the set stored in the environment that is `level' levels up
+       from the current one.*/
+    unsigned int level;
+    unsigned int displ;
+
+    ExprVar(const Symbol & name) : name(name) { };
+    ExprVar(const Pos & pos, const Symbol & name) : pos(pos), name(name) { };
+    COMMON_METHODS
+    Value * maybeThunk(EvalState & state, Env & env);
+};
+
+struct ExprSelect : Expr
+{
+    Pos pos;
+    Expr * e, * def;
+    AttrPath attrPath;
+    ExprSelect(const Pos & pos, Expr * e, const AttrPath & attrPath, Expr * def) : pos(pos), e(e), def(def), attrPath(attrPath) { };
+    ExprSelect(const Pos & pos, Expr * e, const Symbol & name) : pos(pos), e(e), def(0) { attrPath.push_back(AttrName(name)); };
+    COMMON_METHODS
+};
+
+struct ExprOpHasAttr : Expr
+{
+    Expr * e;
+    AttrPath attrPath;
+    ExprOpHasAttr(Expr * e, const AttrPath & attrPath) : e(e), attrPath(attrPath) { };
+    COMMON_METHODS
+};
+
+struct ExprAttrs : Expr
+{
+    bool recursive;
+    struct AttrDef {
+        bool inherited;
+        Expr * e;
+        Pos pos;
+        unsigned int displ; // displacement
+        AttrDef(Expr * e, const Pos & pos, bool inherited=false) : inherited(inherited), e(e), pos(pos) { };
+        AttrDef() { };
+    };
+    typedef std::map<Symbol, AttrDef> AttrDefs;
+    AttrDefs attrs;
+    struct DynamicAttrDef {
+        ExprConcatStrings * nameExpr;
+        Expr * valueExpr;
+        Pos pos;
+        DynamicAttrDef(ExprConcatStrings * nameExpr, Expr * valueExpr, const Pos & pos) : nameExpr(nameExpr), valueExpr(valueExpr), pos(pos) { };
+    };
+    typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
+    DynamicAttrDefs dynamicAttrs;
+    ExprAttrs() : recursive(false) { };
+    COMMON_METHODS
+};
+
+struct ExprList : Expr
+{
+    std::vector<Expr *> elems;
+    ExprList() { };
+    COMMON_METHODS
+};
+
+struct Formal
+{
+    Symbol name;
+    Expr * def;
+    Formal(const Symbol & name, Expr * def) : name(name), def(def) { };
+};
+
+struct Formals
+{
+    typedef std::list<Formal> Formals_;
+    Formals_ formals;
+    std::set<Symbol> argNames; // used during parsing
+    bool ellipsis;
+};
+
+struct ExprLambda : Expr
+{
+    Pos pos;
+    Symbol name;
+    Symbol arg;
+    bool matchAttrs;
+    Formals * formals;
+    Expr * body;
+    ExprLambda(const Pos & pos, const Symbol & arg, bool matchAttrs, Formals * formals, Expr * body)
+        : pos(pos), arg(arg), matchAttrs(matchAttrs), formals(formals), body(body)
+    {
+        if (!arg.empty() && formals && formals->argNames.find(arg) != formals->argNames.end())
+            throw ParseError(format("duplicate formal function argument ‘%1%’ at %2%")
+                % arg % pos);
+    };
+    void setName(Symbol & name);
+    string showNamePos() const;
+    COMMON_METHODS
+};
+
+struct ExprLet : Expr
+{
+    ExprAttrs * attrs;
+    Expr * body;
+    ExprLet(ExprAttrs * attrs, Expr * body) : attrs(attrs), body(body) { };
+    COMMON_METHODS
+};
+
+struct ExprWith : Expr
+{
+    Pos pos;
+    Expr * attrs, * body;
+    unsigned int prevWith;
+    ExprWith(const Pos & pos, Expr * attrs, Expr * body) : pos(pos), attrs(attrs), body(body) { };
+    COMMON_METHODS
+};
+
+struct ExprIf : Expr
+{
+    Expr * cond, * then, * else_;
+    ExprIf(Expr * cond, Expr * then, Expr * else_) : cond(cond), then(then), else_(else_) { };
+    COMMON_METHODS
+};
+
+struct ExprAssert : Expr
+{
+    Pos pos;
+    Expr * cond, * body;
+    ExprAssert(const Pos & pos, Expr * cond, Expr * body) : pos(pos), cond(cond), body(body) { };
+    COMMON_METHODS
+};
+
+struct ExprOpNot : Expr
+{
+    Expr * e;
+    ExprOpNot(Expr * e) : e(e) { };
+    COMMON_METHODS
+};
+
+#define MakeBinOp(name, s) \
+    struct Expr##name : Expr \
+    { \
+        Pos pos; \
+        Expr * e1, * e2; \
+        Expr##name(Expr * e1, Expr * e2) : e1(e1), e2(e2) { }; \
+        Expr##name(const Pos & pos, Expr * e1, Expr * e2) : pos(pos), e1(e1), e2(e2) { }; \
+        void show(std::ostream & str) \
+        { \
+            str << *e1 << " " s " " << *e2; \
+        } \
+        void bindVars(const StaticEnv & env) \
+        { \
+            e1->bindVars(env); e2->bindVars(env); \
+        } \
+        void eval(EvalState & state, Env & env, Value & v); \
+    };
+
+MakeBinOp(App, "")
+MakeBinOp(OpEq, "==")
+MakeBinOp(OpNEq, "!=")
+MakeBinOp(OpAnd, "&&")
+MakeBinOp(OpOr, "||")
+MakeBinOp(OpImpl, "->")
+MakeBinOp(OpUpdate, "//")
+MakeBinOp(OpConcatLists, "++")
+
+struct ExprConcatStrings : Expr
+{
+    Pos pos;
+    bool forceString;
+    vector<Expr *> * es;
+    ExprConcatStrings(const Pos & pos, bool forceString, vector<Expr *> * es)
+        : pos(pos), forceString(forceString), es(es) { };
+    COMMON_METHODS
+};
+
+struct ExprPos : Expr
+{
+    Pos pos;
+    ExprPos(const Pos & pos) : pos(pos) { };
+    COMMON_METHODS
+};
+
+
+/* Static environments are used to map variable names onto (level,
+   displacement) pairs used to obtain the value of the variable at
+   runtime. */
+struct StaticEnv
+{
+    bool isWith;
+    const StaticEnv * up;
+    typedef std::map<Symbol, unsigned int> Vars;
+    Vars vars;
+    StaticEnv(bool isWith, const StaticEnv * up) : isWith(isWith), up(up) { };
+};
+
+
+}
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
new file mode 100644
index 000000000000..60a87ca94e10
--- /dev/null
+++ b/src/libexpr/parser.y
@@ -0,0 +1,656 @@
+%glr-parser
+%pure-parser
+%locations
+%error-verbose
+%defines
+/* %no-lines */
+%parse-param { void * scanner }
+%parse-param { nix::ParseData * data }
+%lex-param { void * scanner }
+%lex-param { nix::ParseData * data }
+%expect 1
+%expect-rr 1
+
+%code requires {
+
+#ifndef BISON_HEADER
+#define BISON_HEADER
+
+#include "util.hh"
+
+#include "nixexpr.hh"
+#include "eval.hh"
+
+namespace nix {
+
+    struct ParseData
+    {
+        EvalState & state;
+        SymbolTable & symbols;
+        Expr * result;
+        Path basePath;
+        Symbol path;
+        string error;
+        Symbol sLetBody;
+        ParseData(EvalState & state)
+            : state(state)
+            , symbols(state.symbols)
+            , sLetBody(symbols.create("<let-body>"))
+            { };
+    };
+
+}
+
+#define YY_DECL int yylex \
+    (YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParseData * data)
+
+#endif
+
+}
+
+%{
+
+#include "parser-tab.hh"
+#include "lexer-tab.hh"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+YY_DECL;
+
+using namespace nix;
+
+
+namespace nix {
+
+
+static void dupAttr(const AttrPath & attrPath, const Pos & pos, const Pos & prevPos)
+{
+    throw ParseError(format("attribute ‘%1%’ at %2% already defined at %3%")
+        % showAttrPath(attrPath) % pos % prevPos);
+}
+
+
+static void dupAttr(Symbol attr, const Pos & pos, const Pos & prevPos)
+{
+    throw ParseError(format("attribute ‘%1%’ at %2% already defined at %3%")
+        % attr % pos % prevPos);
+}
+
+
+static void addAttr(ExprAttrs * attrs, AttrPath & attrPath,
+    Expr * e, const Pos & pos)
+{
+    AttrPath::iterator i;
+    // All attrpaths have at least one attr
+    assert(!attrPath.empty());
+    for (i = attrPath.begin(); i + 1 < attrPath.end(); i++) {
+        if (i->symbol.set()) {
+            ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(i->symbol);
+            if (j != attrs->attrs.end()) {
+                if (!j->second.inherited) {
+                    ExprAttrs * attrs2 = dynamic_cast<ExprAttrs *>(j->second.e);
+                    if (!attrs2) dupAttr(attrPath, pos, j->second.pos);
+                    attrs = attrs2;
+                } else
+                    dupAttr(attrPath, pos, j->second.pos);
+            } else {
+                ExprAttrs * nested = new ExprAttrs;
+                attrs->attrs[i->symbol] = ExprAttrs::AttrDef(nested, pos);
+                attrs = nested;
+            }
+        } else {
+            ExprAttrs *nested = new ExprAttrs;
+            attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, nested, pos));
+            attrs = nested;
+        }
+    }
+    if (i->symbol.set()) {
+        ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(i->symbol);
+        if (j != attrs->attrs.end()) {
+            dupAttr(attrPath, pos, j->second.pos);
+        } else {
+            attrs->attrs[i->symbol] = ExprAttrs::AttrDef(e, pos);
+            e->setName(i->symbol);
+        }
+    } else {
+        attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, e, pos));
+    }
+}
+
+
+static void addFormal(const Pos & pos, Formals * formals, const Formal & formal)
+{
+    if (formals->argNames.find(formal.name) != formals->argNames.end())
+        throw ParseError(format("duplicate formal function argument ‘%1%’ at %2%")
+            % formal.name % pos);
+    formals->formals.push_front(formal);
+    formals->argNames.insert(formal.name);
+}
+
+
+static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Expr *> & es)
+{
+    if (es.empty()) return new ExprString(symbols.create(""));
+
+    /* Figure out the minimum indentation.  Note that by design
+       whitespace-only final lines are not taken into account.  (So
+       the " " in "\n ''" is ignored, but the " " in "\n foo''" is.) */
+    bool atStartOfLine = true; /* = seen only whitespace in the current line */
+    unsigned int minIndent = 1000000;
+    unsigned int curIndent = 0;
+    foreach (vector<Expr *>::iterator, i, es) {
+        ExprIndStr * e = dynamic_cast<ExprIndStr *>(*i);
+        if (!e) {
+            /* Anti-quotations end the current start-of-line whitespace. */
+            if (atStartOfLine) {
+                atStartOfLine = false;
+                if (curIndent < minIndent) minIndent = curIndent;
+            }
+            continue;
+        }
+        for (unsigned int j = 0; j < e->s.size(); ++j) {
+            if (atStartOfLine) {
+                if (e->s[j] == ' ')
+                    curIndent++;
+                else if (e->s[j] == '\n') {
+                    /* Empty line, doesn't influence minimum
+                       indentation. */
+                    curIndent = 0;
+                } else {
+                    atStartOfLine = false;
+                    if (curIndent < minIndent) minIndent = curIndent;
+                }
+            } else if (e->s[j] == '\n') {
+                atStartOfLine = true;
+                curIndent = 0;
+            }
+        }
+    }
+
+    /* Strip spaces from each line. */
+    vector<Expr *> * es2 = new vector<Expr *>;
+    atStartOfLine = true;
+    unsigned int curDropped = 0;
+    unsigned int n = es.size();
+    for (vector<Expr *>::iterator i = es.begin(); i != es.end(); ++i, --n) {
+        ExprIndStr * e = dynamic_cast<ExprIndStr *>(*i);
+        if (!e) {
+            atStartOfLine = false;
+            curDropped = 0;
+            es2->push_back(*i);
+            continue;
+        }
+
+        string s2;
+        for (unsigned int j = 0; j < e->s.size(); ++j) {
+            if (atStartOfLine) {
+                if (e->s[j] == ' ') {
+                    if (curDropped++ >= minIndent)
+                        s2 += e->s[j];
+                }
+                else if (e->s[j] == '\n') {
+                    curDropped = 0;
+                    s2 += e->s[j];
+                } else {
+                    atStartOfLine = false;
+                    curDropped = 0;
+                    s2 += e->s[j];
+                }
+            } else {
+                s2 += e->s[j];
+                if (e->s[j] == '\n') atStartOfLine = true;
+            }
+        }
+
+        /* Remove the last line if it is empty and consists only of
+           spaces. */
+        if (n == 1) {
+            string::size_type p = s2.find_last_of('\n');
+            if (p != string::npos && s2.find_first_not_of(' ', p + 1) == string::npos)
+                s2 = string(s2, 0, p + 1);
+        }
+
+        es2->push_back(new ExprString(symbols.create(s2)));
+    }
+
+    /* If this is a single string, then don't do a concatenation. */
+    return es2->size() == 1 && dynamic_cast<ExprString *>((*es2)[0]) ? (*es2)[0] : new ExprConcatStrings(pos, true, es2);
+}
+
+
+void backToString(yyscan_t scanner);
+void backToIndString(yyscan_t scanner);
+
+
+static inline Pos makeCurPos(const YYLTYPE & loc, ParseData * data)
+{
+    return Pos(data->path, loc.first_line, loc.first_column);
+}
+
+#define CUR_POS makeCurPos(*yylocp, data)
+
+
+}
+
+
+void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * error)
+{
+    data->error = (format("%1%, at %2%")
+        % error % makeCurPos(*loc, data)).str();
+}
+
+
+%}
+
+%union {
+  // !!! We're probably leaking stuff here.
+  nix::Expr * e;
+  nix::ExprList * list;
+  nix::ExprAttrs * attrs;
+  nix::Formals * formals;
+  nix::Formal * formal;
+  nix::NixInt n;
+  const char * id; // !!! -> Symbol
+  char * path;
+  char * uri;
+  std::vector<nix::AttrName> * attrNames;
+  std::vector<nix::Expr *> * string_parts;
+}
+
+%type <e> start expr expr_function expr_if expr_op
+%type <e> expr_app expr_select expr_simple
+%type <list> expr_list
+%type <attrs> binds
+%type <formals> formals
+%type <formal> formal
+%type <attrNames> attrs attrpath
+%type <string_parts> string_parts_interpolated ind_string_parts
+%type <e> string_parts string_attr
+%type <id> attr
+%token <id> ID ATTRPATH
+%token <e> STR IND_STR
+%token <n> INT
+%token <path> PATH SPATH
+%token <uri> URI
+%token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW
+%token DOLLAR_CURLY /* == ${ */
+%token IND_STRING_OPEN IND_STRING_CLOSE
+%token ELLIPSIS
+
+%nonassoc IMPL
+%left OR
+%left AND
+%nonassoc EQ NEQ
+%left '<' '>' LEQ GEQ
+%right UPDATE
+%left NOT
+%left '+' '-'
+%left '*' '/'
+%right CONCAT
+%nonassoc '?'
+%nonassoc '~'
+%nonassoc NEGATE
+
+%%
+
+start: expr { data->result = $1; };
+
+expr: expr_function;
+
+expr_function
+  : ID ':' expr_function
+    { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), false, 0, $3); }
+  | '{' formals '}' ':' expr_function
+    { $$ = new ExprLambda(CUR_POS, data->symbols.create(""), true, $2, $5); }
+  | '{' formals '}' '@' ID ':' expr_function
+    { $$ = new ExprLambda(CUR_POS, data->symbols.create($5), true, $2, $7); }
+  | ID '@' '{' formals '}' ':' expr_function
+    { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), true, $4, $7); }
+  | ASSERT expr ';' expr_function
+    { $$ = new ExprAssert(CUR_POS, $2, $4); }
+  | WITH expr ';' expr_function
+    { $$ = new ExprWith(CUR_POS, $2, $4); }
+  | LET binds IN expr_function
+    { if (!$2->dynamicAttrs.empty())
+        throw ParseError(format("dynamic attributes not allowed in let at %1%")
+            % CUR_POS);
+      $$ = new ExprLet($2, $4);
+    }
+  | expr_if
+  ;
+
+expr_if
+  : IF expr THEN expr ELSE expr { $$ = new ExprIf($2, $4, $6); }
+  | expr_op
+  ;
+
+expr_op
+  : '!' expr_op %prec NOT { $$ = new ExprOpNot($2); }
+  | '-' expr_op %prec NEGATE { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), new ExprInt(0)), $2); }
+  | expr_op EQ expr_op { $$ = new ExprOpEq($1, $3); }
+  | expr_op NEQ expr_op { $$ = new ExprOpNEq($1, $3); }
+  | expr_op '<' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3); }
+  | expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1)); }
+  | expr_op '>' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1); }
+  | expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3)); }
+  | expr_op AND expr_op { $$ = new ExprOpAnd(CUR_POS, $1, $3); }
+  | expr_op OR expr_op { $$ = new ExprOpOr(CUR_POS, $1, $3); }
+  | expr_op IMPL expr_op { $$ = new ExprOpImpl(CUR_POS, $1, $3); }
+  | expr_op UPDATE expr_op { $$ = new ExprOpUpdate(CUR_POS, $1, $3); }
+  | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, *$3); }
+  | expr_op '+' expr_op
+    { vector<Expr *> * l = new vector<Expr *>;
+      l->push_back($1);
+      l->push_back($3);
+      $$ = new ExprConcatStrings(CUR_POS, false, l);
+    }
+  | expr_op '-' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), $1), $3); }
+  | expr_op '*' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__mul")), $1), $3); }
+  | expr_op '/' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__div")), $1), $3); }
+  | expr_op CONCAT expr_op { $$ = new ExprOpConcatLists(CUR_POS, $1, $3); }
+  | expr_app
+  ;
+
+expr_app
+  : expr_app expr_select
+    { $$ = new ExprApp(CUR_POS, $1, $2); }
+  | expr_select { $$ = $1; }
+  ;
+
+expr_select
+  : expr_simple '.' attrpath
+    { $$ = new ExprSelect(CUR_POS, $1, *$3, 0); }
+  | expr_simple '.' attrpath OR_KW expr_select
+    { $$ = new ExprSelect(CUR_POS, $1, *$3, $5); }
+  | /* Backwards compatibility: because Nixpkgs has a rarely used
+       function named ‘or’, allow stuff like ‘map or [...]’. */
+    expr_simple OR_KW
+    { $$ = new ExprApp(CUR_POS, $1, new ExprVar(CUR_POS, data->symbols.create("or"))); }
+  | expr_simple { $$ = $1; }
+  ;
+
+expr_simple
+  : ID {
+      if (strcmp($1, "__curPos") == 0)
+          $$ = new ExprPos(CUR_POS);
+      else
+          $$ = new ExprVar(CUR_POS, data->symbols.create($1));
+  }
+  | INT { $$ = new ExprInt($1); }
+  | '"' string_parts '"' { $$ = $2; }
+  | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
+      $$ = stripIndentation(CUR_POS, data->symbols, *$2);
+  }
+  | PATH { $$ = new ExprPath(absPath($1, data->basePath)); }
+  | SPATH {
+      string path($1 + 1, strlen($1) - 2);
+      $$ = new ExprApp(CUR_POS,
+          new ExprApp(new ExprVar(data->symbols.create("__findFile")),
+              new ExprVar(data->symbols.create("__nixPath"))),
+          new ExprString(data->symbols.create(path)));
+  }
+  | URI { $$ = new ExprString(data->symbols.create($1)); }
+  | '(' expr ')' { $$ = $2; }
+  /* Let expressions `let {..., body = ...}' are just desugared
+     into `(rec {..., body = ...}).body'. */
+  | LET '{' binds '}'
+    { $3->recursive = true; $$ = new ExprSelect(noPos, $3, data->symbols.create("body")); }
+  | REC '{' binds '}'
+    { $3->recursive = true; $$ = $3; }
+  | '{' binds '}'
+    { $$ = $2; }
+  | '[' expr_list ']' { $$ = $2; }
+  ;
+
+string_parts
+  : STR
+  | string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, $1); }
+  | { $$ = new ExprString(data->symbols.create("")); }
+  ;
+
+string_parts_interpolated
+  : string_parts_interpolated STR { $$ = $1; $1->push_back($2); }
+  | string_parts_interpolated DOLLAR_CURLY expr '}' { backToString(scanner); $$ = $1; $1->push_back($3); }
+  | STR DOLLAR_CURLY expr '}'
+    {
+      backToString(scanner);
+      $$ = new vector<Expr *>;
+      $$->push_back($1);
+      $$->push_back($3);
+    }
+  | DOLLAR_CURLY expr '}'
+    {
+      backToString(scanner);
+      $$ = new vector<Expr *>;
+      $$->push_back($2);
+    }
+  ;
+
+ind_string_parts
+  : ind_string_parts IND_STR { $$ = $1; $1->push_back($2); }
+  | ind_string_parts DOLLAR_CURLY expr '}' { backToIndString(scanner); $$ = $1; $1->push_back($3); }
+  | { $$ = new vector<Expr *>; }
+  ;
+
+binds
+  : binds attrpath '=' expr ';' { $$ = $1; addAttr($$, *$2, $4, makeCurPos(@2, data)); }
+  | binds INHERIT attrs ';'
+    { $$ = $1;
+      foreach (AttrPath::iterator, i, *$3) {
+          if ($$->attrs.find(i->symbol) != $$->attrs.end())
+              dupAttr(i->symbol, makeCurPos(@3, data), $$->attrs[i->symbol].pos);
+          Pos pos = makeCurPos(@3, data);
+          $$->attrs[i->symbol] = ExprAttrs::AttrDef(new ExprVar(CUR_POS, i->symbol), pos, true);
+      }
+    }
+  | binds INHERIT '(' expr ')' attrs ';'
+    { $$ = $1;
+      /* !!! Should ensure sharing of the expression in $4. */
+      foreach (AttrPath::iterator, i, *$6) {
+          if ($$->attrs.find(i->symbol) != $$->attrs.end())
+              dupAttr(i->symbol, makeCurPos(@6, data), $$->attrs[i->symbol].pos);
+          $$->attrs[i->symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i->symbol), makeCurPos(@6, data));
+      }
+    }
+  | { $$ = new ExprAttrs; }
+  ;
+
+attrs
+  : attrs attr { $$ = $1; $1->push_back(AttrName(data->symbols.create($2))); }
+  | attrs string_attr
+    { $$ = $1;
+      ExprString *str = dynamic_cast<ExprString *>($2);
+      if (str) {
+          $$->push_back(AttrName(str->s));
+          delete str;
+      } else
+        throw ParseError(format("dynamic attributes not allowed in inherit at %1%")
+            % makeCurPos(@2, data));
+    }
+  | { $$ = new AttrPath; }
+  ;
+
+attrpath
+  : attrpath '.' attr { $$ = $1; $1->push_back(AttrName(data->symbols.create($3))); }
+  | attrpath '.' string_attr
+    { $$ = $1;
+      ExprString *str = dynamic_cast<ExprString *>($3);
+      if (str) {
+          $$->push_back(AttrName(str->s));
+          delete str;
+      } else
+          $$->push_back(AttrName(static_cast<ExprConcatStrings *>($3)));
+    }
+  | attr { $$ = new vector<AttrName>; $$->push_back(AttrName(data->symbols.create($1))); }
+  | string_attr
+    { $$ = new vector<AttrName>;
+      ExprString *str = dynamic_cast<ExprString *>($1);
+      if (str) {
+          $$->push_back(AttrName(str->s));
+          delete str;
+      } else
+          $$->push_back(AttrName(static_cast<ExprConcatStrings *>($1)));
+    }
+  ;
+
+attr
+  : ID { $$ = $1; }
+  | OR_KW { $$ = "or"; }
+  ;
+
+string_attr
+  : '"' string_parts '"' { $$ = $2; }
+  | DOLLAR_CURLY expr '}' { $$ = new ExprConcatStrings(CUR_POS, true, new vector<Expr*>(1, $2)); }
+  ;
+
+expr_list
+  : expr_list expr_select { $$ = $1; $1->elems.push_back($2); /* !!! dangerous */ }
+  | { $$ = new ExprList; }
+  ;
+
+formals
+  : formal ',' formals
+    { $$ = $3; addFormal(CUR_POS, $$, *$1); }
+  | formal
+    { $$ = new Formals; addFormal(CUR_POS, $$, *$1); $$->ellipsis = false; }
+  |
+    { $$ = new Formals; $$->ellipsis = false; }
+  | ELLIPSIS
+    { $$ = new Formals; $$->ellipsis = true; }
+  ;
+
+formal
+  : ID { $$ = new Formal(data->symbols.create($1), 0); }
+  | ID '?' expr { $$ = new Formal(data->symbols.create($1), $3); }
+  ;
+
+%%
+
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <eval.hh>
+
+
+namespace nix {
+
+
+Expr * EvalState::parse(const char * text,
+    const Path & path, const Path & basePath, StaticEnv & staticEnv)
+{
+    yyscan_t scanner;
+    ParseData data(*this);
+    data.basePath = basePath;
+    data.path = data.symbols.create(path);
+
+    yylex_init(&scanner);
+    yy_scan_string(text, scanner);
+    int res = yyparse(scanner, &data);
+    yylex_destroy(scanner);
+
+    if (res) throw ParseError(data.error);
+
+    data.result->bindVars(staticEnv);
+
+    return data.result;
+}
+
+
+Path resolveExprPath(Path path)
+{
+    assert(path[0] == '/');
+
+    /* If `path' is a symlink, follow it.  This is so that relative
+       path references work. */
+    struct stat st;
+    while (true) {
+        if (lstat(path.c_str(), &st))
+            throw SysError(format("getting status of ‘%1%’") % path);
+        if (!S_ISLNK(st.st_mode)) break;
+        path = absPath(readLink(path), dirOf(path));
+    }
+
+    /* If `path' refers to a directory, append `/default.nix'. */
+    if (S_ISDIR(st.st_mode))
+        path = canonPath(path + "/default.nix");
+
+    return path;
+}
+
+
+Expr * EvalState::parseExprFromFile(const Path & path)
+{
+    return parseExprFromFile(path, staticBaseEnv);
+}
+
+
+Expr * EvalState::parseExprFromFile(const Path & path, StaticEnv & staticEnv)
+{
+    return parse(readFile(path).c_str(), path, dirOf(path), staticEnv);
+}
+
+
+Expr * EvalState::parseExprFromString(const string & s, const Path & basePath, StaticEnv & staticEnv)
+{
+    return parse(s.c_str(), "(string)", basePath, staticEnv);
+}
+
+
+Expr * EvalState::parseExprFromString(const string & s, const Path & basePath)
+{
+    return parseExprFromString(s, basePath, staticBaseEnv);
+}
+
+
+void EvalState::addToSearchPath(const string & s, bool warn)
+{
+    size_t pos = s.find('=');
+    string prefix;
+    Path path;
+    if (pos == string::npos) {
+        path = s;
+    } else {
+        prefix = string(s, 0, pos);
+        path = string(s, pos + 1);
+    }
+
+    path = absPath(path);
+    if (pathExists(path)) {
+        debug(format("adding path ‘%1%’ to the search path") % path);
+        searchPath.push_back(std::pair<string, Path>(prefix, path));
+    } else if (warn)
+        printMsg(lvlError, format("warning: Nix search path entry ‘%1%’ does not exist, ignoring") % path);
+}
+
+
+Path EvalState::findFile(const string & path)
+{
+    return findFile(searchPath, path);
+}
+
+
+Path EvalState::findFile(SearchPath & searchPath, const string & path)
+{
+    foreach (SearchPath::iterator, i, searchPath) {
+        Path res;
+        if (i->first.empty())
+            res = i->second + "/" + path;
+        else {
+            if (path.compare(0, i->first.size(), i->first) != 0 ||
+                (path.size() > i->first.size() && path[i->first.size()] != '/'))
+                continue;
+            res = i->second +
+                (path.size() == i->first.size() ? "" : "/" + string(path, i->first.size()));
+        }
+        if (pathExists(res)) return canonPath(res);
+    }
+    throw ThrownError(format("file ‘%1%’ was not found in the Nix search path (add it using $NIX_PATH or -I)") % path);
+}
+
+
+}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
new file mode 100644
index 000000000000..0c4381b11368
--- /dev/null
+++ b/src/libexpr/primops.cc
@@ -0,0 +1,1483 @@
+#include "eval.hh"
+#include "misc.hh"
+#include "globals.hh"
+#include "store-api.hh"
+#include "util.hh"
+#include "archive.hh"
+#include "value-to-xml.hh"
+#include "value-to-json.hh"
+#include "json-to-value.hh"
+#include "names.hh"
+#include "eval-inline.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cstring>
+#include <dlfcn.h>
+
+
+namespace nix {
+
+
+/*************************************************************
+ * Miscellaneous
+ *************************************************************/
+
+
+/* Decode a context string ‘!<name>!<path>’ into a pair <path,
+   name>. */
+std::pair<string, string> decodeContext(const string & s)
+{
+    if (s.at(0) == '!') {
+        size_t index = s.find("!", 1);
+        return std::pair<string, string>(string(s, index + 1), string(s, 1, index - 1));
+    } else
+        return std::pair<string, string>(s, "");
+}
+
+
+struct InvalidPathError : EvalError
+{
+    Path path;
+    InvalidPathError(const Path & path) :
+        EvalError(format("path ‘%1%’ is not valid") % path), path(path) {};
+    ~InvalidPathError() throw () { };
+};
+
+
+static void realiseContext(const PathSet & context)
+{
+    PathSet drvs;
+    for (auto & i : context) {
+        std::pair<string, string> decoded = decodeContext(i);
+        Path ctx = decoded.first;
+        assert(isStorePath(ctx));
+        if (!store->isValidPath(ctx))
+            throw InvalidPathError(ctx);
+        if (isDerivation(ctx))
+            drvs.insert(decoded.first + "!" + decoded.second);
+    }
+    if (!drvs.empty()) {
+        /* For performance, prefetch all substitute info. */
+        PathSet willBuild, willSubstitute, unknown;
+        unsigned long long downloadSize, narSize;
+        queryMissing(*store, drvs,
+            willBuild, willSubstitute, unknown, downloadSize, narSize);
+
+        store->buildPaths(drvs);
+    }
+}
+
+
+/* Load and evaluate an expression from path specified by the
+   argument. */
+static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[1], context);
+
+    try {
+        realiseContext(context);
+    } catch (InvalidPathError & e) {
+        throw EvalError(format("cannot import ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+            % path % e.path % pos);
+    }
+
+    if (isStorePath(path) && store->isValidPath(path) && isDerivation(path)) {
+        Derivation drv = readDerivation(path);
+        Value & w = *state.allocValue();
+        state.mkAttrs(w, 1 + drv.outputs.size());
+        mkString(*state.allocAttr(w, state.sDrvPath), path, singleton<PathSet>("=" + path));
+        state.mkList(*state.allocAttr(w, state.symbols.create("outputs")), drv.outputs.size());
+        unsigned int outputs_index = 0;
+
+        Value * outputsVal = w.attrs->find(state.symbols.create("outputs"))->value;
+        foreach (DerivationOutputs::iterator, i, drv.outputs) {
+            mkString(*state.allocAttr(w, state.symbols.create(i->first)),
+                i->second.path, singleton<PathSet>("!" + i->first + "!" + path));
+            mkString(*(outputsVal->list.elems[outputs_index++] = state.allocValue()),
+                i->first);
+        }
+        w.attrs->sort();
+        Value fun;
+        state.evalFile(state.findFile("nix/imported-drv-to-derivation.nix"), fun);
+        state.forceFunction(fun, pos);
+        mkApp(v, fun, w);
+        state.forceAttrs(v, pos);
+    } else {
+        state.forceAttrs(*args[0]);
+        if (args[0]->attrs->empty())
+            state.evalFile(path, v);
+        else {
+            Env * env = &state.allocEnv(args[0]->attrs->size());
+            env->up = &state.baseEnv;
+
+            StaticEnv staticEnv(false, &state.staticBaseEnv);
+
+            unsigned int displ = 0;
+            for (auto & attr : *args[0]->attrs) {
+                staticEnv.vars[attr.name] = displ;
+                env->values[displ++] = attr.value;
+            }
+
+            startNest(nest, lvlTalkative, format("evaluating file ‘%1%’") % path);
+            Expr * e = state.parseExprFromFile(resolveExprPath(path), staticEnv);
+
+            e->eval(state, *env, v);
+        }
+    }
+}
+
+
+/* Want reasonable symbol names, so extern C */
+/* !!! Should we pass the Pos or the file name too? */
+extern "C" typedef void (*ValueInitializer)(EvalState & state, Value & v);
+
+/* Load a ValueInitializer from a dso and return whatever it initializes */
+static void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[0], context);
+
+    try {
+        realiseContext(context);
+    } catch (InvalidPathError & e) {
+        throw EvalError(format("cannot import ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+            % path % e.path % pos);
+    }
+
+    string sym = state.forceStringNoCtx(*args[1], pos);
+
+    void *handle = dlopen(path.c_str(), RTLD_LAZY | RTLD_LOCAL);
+    if (!handle)
+        throw EvalError(format("could not open ‘%1%’: %2%") % path % dlerror());
+
+    dlerror();
+    ValueInitializer func = (ValueInitializer) dlsym(handle, sym.c_str());
+    if(!func) {
+        char *message = dlerror();
+        if (message)
+            throw EvalError(format("could not load symbol ‘%1%’ from ‘%2%’: %3%") % sym % path % message);
+        else
+            throw EvalError(format("symbol ‘%1%’ from ‘%2%’ resolved to NULL when a function pointer was expected")
+                    % sym % path);
+    }
+
+    (func)(state, v);
+
+    /* We don't dlclose because v may be a primop referencing a function in the shared object file */
+}
+
+
+/* Return a string representing the type of the expression. */
+static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    string t;
+    switch (args[0]->type) {
+        case tInt: t = "int"; break;
+        case tBool: t = "bool"; break;
+        case tString: t = "string"; break;
+        case tPath: t = "path"; break;
+        case tNull: t = "null"; break;
+        case tAttrs: t = "set"; break;
+        case tList: t = "list"; break;
+        case tLambda:
+        case tPrimOp:
+        case tPrimOpApp:
+            t = "lambda";
+            break;
+        default: abort();
+    }
+    mkString(v, state.symbols.create(t));
+}
+
+
+/* Determine whether the argument is the null value. */
+static void prim_isNull(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tNull);
+}
+
+
+/* Determine whether the argument is a function. */
+static void prim_isFunction(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tLambda);
+}
+
+
+/* Determine whether the argument is an integer. */
+static void prim_isInt(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tInt);
+}
+
+
+/* Determine whether the argument is a string. */
+static void prim_isString(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tString);
+}
+
+
+/* Determine whether the argument is a Boolean. */
+static void prim_isBool(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tBool);
+}
+
+
+struct CompareValues
+{
+    bool operator () (const Value * v1, const Value * v2) const
+    {
+        if (v1->type != v2->type)
+            throw EvalError("cannot compare values of different types");
+        switch (v1->type) {
+            case tInt:
+                return v1->integer < v2->integer;
+            case tString:
+                return strcmp(v1->string.s, v2->string.s) < 0;
+            case tPath:
+                return strcmp(v1->path, v2->path) < 0;
+            default:
+                throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % showType(*v2));
+        }
+    }
+};
+
+
+#if HAVE_BOEHMGC
+typedef list<Value *, gc_allocator<Value *> > ValueList;
+#else
+typedef list<Value *> ValueList;
+#endif
+
+
+static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    startNest(nest, lvlDebug, "finding dependencies");
+
+    state.forceAttrs(*args[0], pos);
+
+    /* Get the start set. */
+    Bindings::iterator startSet =
+        args[0]->attrs->find(state.symbols.create("startSet"));
+    if (startSet == args[0]->attrs->end())
+        throw EvalError(format("attribute ‘startSet’ required, at %1%") % pos);
+    state.forceList(*startSet->value, pos);
+
+    ValueList workSet;
+    for (unsigned int n = 0; n < startSet->value->list.length; ++n)
+        workSet.push_back(startSet->value->list.elems[n]);
+
+    /* Get the operator. */
+    Bindings::iterator op =
+        args[0]->attrs->find(state.symbols.create("operator"));
+    if (op == args[0]->attrs->end())
+        throw EvalError(format("attribute ‘operator’ required, at %1%") % pos);
+    state.forceValue(*op->value);
+
+    /* Construct the closure by applying the operator to element of
+       `workSet', adding the result to `workSet', continuing until
+       no new elements are found. */
+    ValueList res;
+    // `doneKeys' doesn't need to be a GC root, because its values are
+    // reachable from res.
+    set<Value *, CompareValues> doneKeys;
+    while (!workSet.empty()) {
+        Value * e = *(workSet.begin());
+        workSet.pop_front();
+
+        state.forceAttrs(*e, pos);
+
+        Bindings::iterator key =
+            e->attrs->find(state.symbols.create("key"));
+        if (key == e->attrs->end())
+            throw EvalError(format("attribute ‘key’ required, at %1%") % pos);
+        state.forceValue(*key->value);
+
+        if (doneKeys.find(key->value) != doneKeys.end()) continue;
+        doneKeys.insert(key->value);
+        res.push_back(e);
+
+        /* Call the `operator' function with `e' as argument. */
+        Value call;
+        mkApp(call, *op->value, *e);
+        state.forceList(call, pos);
+
+        /* Add the values returned by the operator to the work set. */
+        for (unsigned int n = 0; n < call.list.length; ++n) {
+            state.forceValue(*call.list.elems[n]);
+            workSet.push_back(call.list.elems[n]);
+        }
+    }
+
+    /* Create the result list. */
+    state.mkList(v, res.size());
+    unsigned int n = 0;
+    foreach (ValueList::iterator, i, res)
+        v.list.elems[n++] = *i;
+}
+
+
+static void prim_abort(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    throw Abort(format("evaluation aborted with the following error message: ‘%1%’") %
+        state.coerceToString(pos, *args[0], context));
+}
+
+
+static void prim_throw(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    throw ThrownError(format("%1%") % state.coerceToString(pos, *args[0], context));
+}
+
+
+static void prim_addErrorContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    try {
+        state.forceValue(*args[1]);
+        v = *args[1];
+    } catch (Error & e) {
+        PathSet context;
+        e.addPrefix(format("%1%\n") % state.coerceToString(pos, *args[0], context));
+        throw;
+    }
+}
+
+
+/* Try evaluating the argument. Success => {success=true; value=something;},
+ * else => {success=false; value=false;} */
+static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.mkAttrs(v, 2);
+    try {
+        state.forceValue(*args[0]);
+        v.attrs->push_back(Attr(state.sValue, args[0]));
+        mkBool(*state.allocAttr(v, state.symbols.create("success")), true);
+    } catch (AssertionError & e) {
+        mkBool(*state.allocAttr(v, state.sValue), false);
+        mkBool(*state.allocAttr(v, state.symbols.create("success")), false);
+    }
+    v.attrs->sort();
+}
+
+
+/* Return an environment variable.  Use with care. */
+static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string name = state.forceStringNoCtx(*args[0], pos);
+    mkString(v, getEnv(name));
+}
+
+
+/* Evaluate the first expression and print it on standard error.  Then
+   return the second expression.  Useful for debugging. */
+static void prim_trace(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    if (args[0]->type == tString)
+        printMsg(lvlError, format("trace: %1%") % args[0]->string.s);
+    else
+        printMsg(lvlError, format("trace: %1%") % *args[0]);
+    state.forceValue(*args[1]);
+    v = *args[1];
+}
+
+
+/*************************************************************
+ * Derivations
+ *************************************************************/
+
+
+/* Construct (as a unobservable side effect) a Nix derivation
+   expression that performs the derivation described by the argument
+   set.  Returns the original set extended with the following
+   attributes: `outPath' containing the primary output path of the
+   derivation; `drvPath' containing the path of the Nix expression;
+   and `type' set to `derivation' to indicate that this is a
+   derivation. */
+static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    startNest(nest, lvlVomit, "evaluating derivation");
+
+    state.forceAttrs(*args[0], pos);
+
+    /* Figure out the name first (for stack backtraces). */
+    Bindings::iterator attr = args[0]->attrs->find(state.sName);
+    if (attr == args[0]->attrs->end())
+        throw EvalError(format("required attribute ‘name’ missing, at %1%") % pos);
+    string drvName;
+    Pos & posDrvName(*attr->pos);
+    try {
+        drvName = state.forceStringNoCtx(*attr->value, pos);
+    } catch (Error & e) {
+        e.addPrefix(format("while evaluating the derivation attribute ‘name’ at %1%:\n") % posDrvName);
+        throw;
+    }
+
+    /* Check whether null attributes should be ignored. */
+    bool ignoreNulls = false;
+    attr = args[0]->attrs->find(state.sIgnoreNulls);
+    if (attr != args[0]->attrs->end())
+        ignoreNulls = state.forceBool(*attr->value);
+
+    /* Build the derivation expression by processing the attributes. */
+    Derivation drv;
+
+    PathSet context;
+
+    string outputHash, outputHashAlgo;
+    bool outputHashRecursive = false;
+
+    StringSet outputs;
+    outputs.insert("out");
+
+    foreach (Bindings::iterator, i, *args[0]->attrs) {
+        if (i->name == state.sIgnoreNulls) continue;
+        string key = i->name;
+        startNest(nest, lvlVomit, format("processing attribute ‘%1%’") % key);
+
+        try {
+
+            if (ignoreNulls) {
+                state.forceValue(*i->value);
+                if (i->value->type == tNull) continue;
+            }
+
+            /* The `args' attribute is special: it supplies the
+               command-line arguments to the builder. */
+            if (key == "args") {
+                state.forceList(*i->value, pos);
+                for (unsigned int n = 0; n < i->value->list.length; ++n) {
+                    string s = state.coerceToString(posDrvName, *i->value->list.elems[n], context, true);
+                    drv.args.push_back(s);
+                }
+            }
+
+            /* All other attributes are passed to the builder through
+               the environment. */
+            else {
+                string s = state.coerceToString(posDrvName, *i->value, context, true);
+                drv.env[key] = s;
+                if (key == "builder") drv.builder = s;
+                else if (i->name == state.sSystem) drv.platform = s;
+                else if (i->name == state.sName) {
+                    drvName = s;
+                    printMsg(lvlVomit, format("derivation name is ‘%1%’") % drvName);
+                }
+                else if (key == "outputHash") outputHash = s;
+                else if (key == "outputHashAlgo") outputHashAlgo = s;
+                else if (key == "outputHashMode") {
+                    if (s == "recursive") outputHashRecursive = true;
+                    else if (s == "flat") outputHashRecursive = false;
+                    else throw EvalError(format("invalid value ‘%1%’ for ‘outputHashMode’ attribute, at %2%") % s % posDrvName);
+                }
+                else if (key == "outputs") {
+                    Strings tmp = tokenizeString<Strings>(s);
+                    outputs.clear();
+                    foreach (Strings::iterator, j, tmp) {
+                        if (outputs.find(*j) != outputs.end())
+                            throw EvalError(format("duplicate derivation output ‘%1%’, at %2%") % *j % posDrvName);
+                        /* !!! Check whether *j is a valid attribute
+                           name. */
+                        /* Derivations cannot be named ‘drv’, because
+                           then we'd have an attribute ‘drvPath’ in
+                           the resulting set. */
+                        if (*j == "drv")
+                            throw EvalError(format("invalid derivation output name ‘drv’, at %1%") % posDrvName);
+                        outputs.insert(*j);
+                    }
+                    if (outputs.empty())
+                        throw EvalError(format("derivation cannot have an empty set of outputs, at %1%") % posDrvName);
+                }
+            }
+
+        } catch (Error & e) {
+            e.addPrefix(format("while evaluating the attribute ‘%1%’ of the derivation ‘%2%’ at %3%:\n")
+                % key % drvName % posDrvName);
+            throw;
+        }
+    }
+
+    /* Everything in the context of the strings in the derivation
+       attributes should be added as dependencies of the resulting
+       derivation. */
+    foreach (PathSet::iterator, i, context) {
+        Path path = *i;
+
+        /* Paths marked with `=' denote that the path of a derivation
+           is explicitly passed to the builder.  Since that allows the
+           builder to gain access to every path in the dependency
+           graph of the derivation (including all outputs), all paths
+           in the graph must be added to this derivation's list of
+           inputs to ensure that they are available when the builder
+           runs. */
+        if (path.at(0) == '=') {
+            /* !!! This doesn't work if readOnlyMode is set. */
+            PathSet refs; computeFSClosure(*store, string(path, 1), refs);
+            foreach (PathSet::iterator, j, refs) {
+                drv.inputSrcs.insert(*j);
+                if (isDerivation(*j))
+                    drv.inputDrvs[*j] = store->queryDerivationOutputNames(*j);
+            }
+        }
+
+        /* See prim_unsafeDiscardOutputDependency. */
+        else if (path.at(0) == '~')
+            drv.inputSrcs.insert(string(path, 1));
+
+        /* Handle derivation outputs of the form ‘!<name>!<path>’. */
+        else if (path.at(0) == '!') {
+            std::pair<string, string> ctx = decodeContext(path);
+            drv.inputDrvs[ctx.first].insert(ctx.second);
+        }
+
+        /* Handle derivation contexts returned by
+           ‘builtins.storePath’. */
+        else if (isDerivation(path))
+            drv.inputDrvs[path] = store->queryDerivationOutputNames(path);
+
+        /* Otherwise it's a source file. */
+        else
+            drv.inputSrcs.insert(path);
+    }
+
+    /* Do we have all required attributes? */
+    if (drv.builder == "")
+        throw EvalError(format("required attribute ‘builder’ missing, at %1%") % posDrvName);
+    if (drv.platform == "")
+        throw EvalError(format("required attribute ‘system’ missing, at %1%") % posDrvName);
+
+    /* Check whether the derivation name is valid. */
+    checkStoreName(drvName);
+    if (isDerivation(drvName))
+        throw EvalError(format("derivation names are not allowed to end in ‘%1%’, at %2%")
+            % drvExtension % posDrvName);
+
+    if (outputHash != "") {
+        /* Handle fixed-output derivations. */
+        if (outputs.size() != 1 || *(outputs.begin()) != "out")
+            throw Error(format("multiple outputs are not supported in fixed-output derivations, at %1%") % posDrvName);
+
+        HashType ht = parseHashType(outputHashAlgo);
+        if (ht == htUnknown)
+            throw EvalError(format("unknown hash algorithm ‘%1%’, at %2%") % outputHashAlgo % posDrvName);
+        Hash h = parseHash16or32(ht, outputHash);
+        outputHash = printHash(h);
+        if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;
+
+        Path outPath = makeFixedOutputPath(outputHashRecursive, ht, h, drvName);
+        drv.env["out"] = outPath;
+        drv.outputs["out"] = DerivationOutput(outPath, outputHashAlgo, outputHash);
+    }
+
+    else {
+        /* Construct the "masked" store derivation, which is the final
+           one except that in the list of outputs, the output paths
+           are empty, and the corresponding environment variables have
+           an empty value.  This ensures that changes in the set of
+           output names do get reflected in the hash. */
+        foreach (StringSet::iterator, i, outputs) {
+            drv.env[*i] = "";
+            drv.outputs[*i] = DerivationOutput("", "", "");
+        }
+
+        /* Use the masked derivation expression to compute the output
+           path. */
+        Hash h = hashDerivationModulo(*store, drv);
+
+        foreach (DerivationOutputs::iterator, i, drv.outputs)
+            if (i->second.path == "") {
+                Path outPath = makeOutputPath(i->first, h, drvName);
+                drv.env[i->first] = outPath;
+                i->second.path = outPath;
+            }
+    }
+
+    /* Write the resulting term into the Nix store directory. */
+    Path drvPath = writeDerivation(*store, drv, drvName, state.repair);
+
+    printMsg(lvlChatty, format("instantiated ‘%1%’ -> ‘%2%’")
+        % drvName % drvPath);
+
+    /* Optimisation, but required in read-only mode! because in that
+       case we don't actually write store derivations, so we can't
+       read them later. */
+    drvHashes[drvPath] = hashDerivationModulo(*store, drv);
+
+    state.mkAttrs(v, 1 + drv.outputs.size());
+    mkString(*state.allocAttr(v, state.sDrvPath), drvPath, singleton<PathSet>("=" + drvPath));
+    foreach (DerivationOutputs::iterator, i, drv.outputs) {
+        mkString(*state.allocAttr(v, state.symbols.create(i->first)),
+            i->second.path, singleton<PathSet>("!" + i->first + "!" + drvPath));
+    }
+    v.attrs->sort();
+}
+
+
+/*************************************************************
+ * Paths
+ *************************************************************/
+
+
+/* Convert the argument to a path.  !!! obsolete? */
+static void prim_toPath(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[0], context);
+    mkString(v, canonPath(path), context);
+}
+
+
+/* Allow a valid store path to be used in an expression.  This is
+   useful in some generated expressions such as in nix-push, which
+   generates a call to a function with an already existing store path
+   as argument.  You don't want to use `toPath' here because it copies
+   the path to the Nix store, which yields a copy like
+   /nix/store/newhash-oldhash-oldname.  In the past, `toPath' had
+   special case behaviour for store paths, but that created weird
+   corner cases. */
+static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[0], context);
+    /* Resolve symlinks in ‘path’, unless ‘path’ itself is a symlink
+       directly in the store.  The latter condition is necessary so
+       e.g. nix-push does the right thing. */
+    if (!isStorePath(path)) path = canonPath(path, true);
+    if (!isInStore(path))
+        throw EvalError(format("path ‘%1%’ is not in the Nix store, at %2%") % path % pos);
+    Path path2 = toStorePath(path);
+    if (!settings.readOnlyMode)
+        store->ensurePath(path2);
+    context.insert(path2);
+    mkString(v, path, context);
+}
+
+
+static void prim_pathExists(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[0], context);
+    if (!context.empty())
+        throw EvalError(format("string ‘%1%’ cannot refer to other paths, at %2%") % path % pos);
+    mkBool(v, pathExists(path));
+}
+
+
+/* Return the base name of the given string, i.e., everything
+   following the last slash. */
+static void prim_baseNameOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    mkString(v, baseNameOf(state.coerceToString(pos, *args[0], context)), context);
+}
+
+
+/* Return the directory of the given path, i.e., everything before the
+   last slash.  Return either a path or a string depending on the type
+   of the argument. */
+static void prim_dirOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path dir = dirOf(state.coerceToPath(pos, *args[0], context));
+    if (args[0]->type == tPath) mkPath(v, dir.c_str()); else mkString(v, dir, context);
+}
+
+
+/* Return the contents of a file as a string. */
+static void prim_readFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[0], context);
+    if (!context.empty())
+        throw EvalError(format("string ‘%1%’ cannot refer to other paths, at %2%") % path % pos);
+    mkString(v, readFile(path).c_str());
+}
+
+
+/* Find a file in the Nix search path. Used to implement <x> paths,
+   which are desugared to ‘findFile __nixPath "x"’. */
+static void prim_findFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceList(*args[0], pos);
+
+    SearchPath searchPath;
+
+    PathSet context;
+    for (unsigned int n = 0; n < args[0]->list.length; ++n) {
+        Value & v2(*args[0]->list.elems[n]);
+        state.forceAttrs(v2, pos);
+
+        string prefix;
+        Bindings::iterator i = v2.attrs->find(state.symbols.create("prefix"));
+        if (i != v2.attrs->end())
+            prefix = state.forceStringNoCtx(*i->value, pos);
+
+        i = v2.attrs->find(state.symbols.create("path"));
+        if (i == v2.attrs->end())
+            throw EvalError(format("attribute ‘path’ missing, at %1%") % pos);
+        string path = state.coerceToPath(pos, *i->value, context);
+
+        searchPath.push_back(std::pair<string, Path>(prefix, path));
+    }
+
+    string path = state.forceStringNoCtx(*args[1], pos);
+
+    try {
+        realiseContext(context);
+    } catch (InvalidPathError & e) {
+        throw EvalError(format("cannot find ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+            % path % e.path % pos);
+    }
+
+    mkPath(v, state.findFile(searchPath, path).c_str());
+}
+
+
+/*************************************************************
+ * Creating files
+ *************************************************************/
+
+
+/* Convert the argument (which can be any Nix expression) to an XML
+   representation returned in a string.  Not all Nix expressions can
+   be sensibly or completely represented (e.g., functions). */
+static void prim_toXML(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    std::ostringstream out;
+    PathSet context;
+    printValueAsXML(state, true, false, *args[0], out, context);
+    mkString(v, out.str(), context);
+}
+
+
+/* Convert the argument (which can be any Nix expression) to a JSON
+   string.  Not all Nix expressions can be sensibly or completely
+   represented (e.g., functions). */
+static void prim_toJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    std::ostringstream out;
+    PathSet context;
+    printValueAsJSON(state, true, *args[0], out, context);
+    mkString(v, out.str(), context);
+}
+
+
+/* Parse a JSON string to a value. */
+static void prim_fromJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string s = state.forceStringNoCtx(*args[0], pos);
+    parseJSON(state, s, v);
+}
+
+
+/* Store a string in the Nix store as a source file that can be used
+   as an input by derivations. */
+static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    string name = state.forceStringNoCtx(*args[0], pos);
+    string contents = state.forceString(*args[1], context);
+
+    PathSet refs;
+
+    foreach (PathSet::iterator, i, context) {
+        Path path = *i;
+        if (path.at(0) == '=') path = string(path, 1);
+        if (isDerivation(path))
+            throw EvalError(format("in ‘toFile’: the file ‘%1%’ cannot refer to derivation outputs, at %2%") % name % pos);
+        refs.insert(path);
+    }
+
+    Path storePath = settings.readOnlyMode
+        ? computeStorePathForText(name, contents, refs)
+        : store->addTextToStore(name, contents, refs, state.repair);
+
+    /* Note: we don't need to add `context' to the context of the
+       result, since `storePath' itself has references to the paths
+       used in args[1]. */
+
+    mkString(v, storePath, singleton<PathSet>(storePath));
+}
+
+
+struct FilterFromExpr : PathFilter
+{
+    EvalState & state;
+    Value & filter;
+
+    FilterFromExpr(EvalState & state, Value & filter)
+        : state(state), filter(filter)
+    {
+    }
+
+    bool operator () (const Path & path)
+    {
+        struct stat st;
+        if (lstat(path.c_str(), &st))
+            throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+        /* Call the filter function.  The first argument is the path,
+           the second is a string indicating the type of the file. */
+        Value arg1;
+        mkString(arg1, path);
+
+        Value fun2;
+        state.callFunction(filter, arg1, fun2, noPos);
+
+        Value arg2;
+        mkString(arg2,
+            S_ISREG(st.st_mode) ? "regular" :
+            S_ISDIR(st.st_mode) ? "directory" :
+            S_ISLNK(st.st_mode) ? "symlink" :
+            "unknown" /* not supported, will fail! */);
+
+        Value res;
+        state.callFunction(fun2, arg2, res, noPos);
+
+        return state.forceBool(res);
+    }
+};
+
+
+static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    Path path = state.coerceToPath(pos, *args[1], context);
+    if (!context.empty())
+        throw EvalError(format("string ‘%1%’ cannot refer to other paths, at %2%") % path % pos);
+
+    state.forceValue(*args[0]);
+    if (args[0]->type != tLambda)
+        throw TypeError(format("first argument in call to ‘filterSource’ is not a function but %1%, at %2%") % showType(*args[0]) % pos);
+
+    FilterFromExpr filter(state, *args[0]);
+
+    Path dstPath = settings.readOnlyMode
+        ? computeStorePathForPath(path, true, htSHA256, filter).first
+        : store->addToStore(path, true, htSHA256, filter, state.repair);
+
+    mkString(v, dstPath, singleton<PathSet>(dstPath));
+}
+
+
+/*************************************************************
+ * Sets
+ *************************************************************/
+
+
+/* Return the names of the attributes in a set as a sorted list of
+   strings. */
+static void prim_attrNames(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceAttrs(*args[0], pos);
+
+    state.mkList(v, args[0]->attrs->size());
+
+    StringSet names;
+    foreach (Bindings::iterator, i, *args[0]->attrs)
+        names.insert(i->name);
+
+    unsigned int n = 0;
+    foreach (StringSet::iterator, i, names)
+        mkString(*(v.list.elems[n++] = state.allocValue()), *i);
+}
+
+
+/* Dynamic version of the `.' operator. */
+void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string attr = state.forceStringNoCtx(*args[0], pos);
+    state.forceAttrs(*args[1], pos);
+    // !!! Should we create a symbol here or just do a lookup?
+    Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
+    if (i == args[1]->attrs->end())
+        throw EvalError(format("attribute ‘%1%’ missing, at %2%") % attr % pos);
+    // !!! add to stack trace?
+    if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
+    state.forceValue(*i->value);
+    v = *i->value;
+}
+
+
+/* Return position information of the specified attribute. */
+void prim_unsafeGetAttrPos(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string attr = state.forceStringNoCtx(*args[0], pos);
+    state.forceAttrs(*args[1], pos);
+    Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
+    if (i == args[1]->attrs->end())
+        mkNull(v);
+    else
+        state.mkPos(v, i->pos);
+}
+
+
+/* Dynamic version of the `?' operator. */
+static void prim_hasAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string attr = state.forceStringNoCtx(*args[0], pos);
+    state.forceAttrs(*args[1], pos);
+    mkBool(v, args[1]->attrs->find(state.symbols.create(attr)) != args[1]->attrs->end());
+}
+
+
+/* Determine whether the argument is a set. */
+static void prim_isAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tAttrs);
+}
+
+
+static void prim_removeAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceAttrs(*args[0], pos);
+    state.forceList(*args[1], pos);
+
+    /* Get the attribute names to be removed. */
+    std::set<Symbol> names;
+    for (unsigned int i = 0; i < args[1]->list.length; ++i) {
+        state.forceStringNoCtx(*args[1]->list.elems[i], pos);
+        names.insert(state.symbols.create(args[1]->list.elems[i]->string.s));
+    }
+
+    /* Copy all attributes not in that set.  Note that we don't need
+       to sort v.attrs because it's a subset of an already sorted
+       vector. */
+    state.mkAttrs(v, args[0]->attrs->size());
+    foreach (Bindings::iterator, i, *args[0]->attrs) {
+        if (names.find(i->name) == names.end())
+            v.attrs->push_back(*i);
+    }
+}
+
+
+/* Builds a set from a list specifying (name, value) pairs.  To be
+   precise, a list [{name = "name1"; value = value1;} ... {name =
+   "nameN"; value = valueN;}] is transformed to {name1 = value1;
+   ... nameN = valueN;}.  In case of duplicate occurences of the same
+   name, the first takes precedence. */
+static void prim_listToAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceList(*args[0], pos);
+
+    state.mkAttrs(v, args[0]->list.length);
+
+    std::set<Symbol> seen;
+
+    for (unsigned int i = 0; i < args[0]->list.length; ++i) {
+        Value & v2(*args[0]->list.elems[i]);
+        state.forceAttrs(v2, pos);
+
+        Bindings::iterator j = v2.attrs->find(state.sName);
+        if (j == v2.attrs->end())
+            throw TypeError(format("‘name’ attribute missing in a call to ‘listToAttrs’, at %1%") % pos);
+        string name = state.forceStringNoCtx(*j->value, pos);
+
+        Symbol sym = state.symbols.create(name);
+        if (seen.find(sym) == seen.end()) {
+            Bindings::iterator j2 = v2.attrs->find(state.symbols.create(state.sValue));
+            if (j2 == v2.attrs->end())
+                throw TypeError(format("‘value’ attribute missing in a call to ‘listToAttrs’, at %1%") % pos);
+
+            v.attrs->push_back(Attr(sym, j2->value, j2->pos));
+            seen.insert(sym);
+        }
+    }
+
+    v.attrs->sort();
+}
+
+
+/* Return the right-biased intersection of two sets as1 and as2,
+   i.e. a set that contains every attribute from as2 that is also a
+   member of as1. */
+static void prim_intersectAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceAttrs(*args[0], pos);
+    state.forceAttrs(*args[1], pos);
+
+    state.mkAttrs(v, std::min(args[0]->attrs->size(), args[1]->attrs->size()));
+
+    foreach (Bindings::iterator, i, *args[0]->attrs) {
+        Bindings::iterator j = args[1]->attrs->find(i->name);
+        if (j != args[1]->attrs->end())
+            v.attrs->push_back(*j);
+    }
+}
+
+
+/* Return a set containing the names of the formal arguments expected
+   by the function `f'.  The value of each attribute is a Boolean
+   denoting whether has a default value.  For instance,
+
+      functionArgs ({ x, y ? 123}: ...)
+   => { x = false; y = true; }
+
+   "Formal argument" here refers to the attributes pattern-matched by
+   the function.  Plain lambdas are not included, e.g.
+
+      functionArgs (x: ...)
+   => { }
+*/
+static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    if (args[0]->type != tLambda)
+        throw TypeError(format("‘functionArgs’ requires a function, at %1%") % pos);
+
+    if (!args[0]->lambda.fun->matchAttrs) {
+        state.mkAttrs(v, 0);
+        return;
+    }
+
+    state.mkAttrs(v, args[0]->lambda.fun->formals->formals.size());
+    foreach (Formals::Formals_::iterator, i, args[0]->lambda.fun->formals->formals)
+        // !!! should optimise booleans (allocate only once)
+        mkBool(*state.allocAttr(v, i->name), i->def);
+    v.attrs->sort();
+}
+
+
+/*************************************************************
+ * Lists
+ *************************************************************/
+
+
+/* Determine whether the argument is a list. */
+static void prim_isList(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    mkBool(v, args[0]->type == tList);
+}
+
+
+static void elemAt(EvalState & state, const Pos & pos, Value & list, int n, Value & v)
+{
+    state.forceList(list, pos);
+    if (n < 0 || (unsigned int) n >= list.list.length)
+        throw Error(format("list index %1% is out of bounds, at %2%") % n % pos);
+    state.forceValue(*list.list.elems[n]);
+    v = *list.list.elems[n];
+}
+
+
+/* Return the n-1'th element of a list. */
+static void prim_elemAt(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    elemAt(state, pos, *args[0], state.forceInt(*args[1], pos), v);
+}
+
+
+/* Return the first element of a list. */
+static void prim_head(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    elemAt(state, pos, *args[0], 0, v);
+}
+
+
+/* Return a list consisting of everything but the the first element of
+   a list.  Warning: this function takes O(n) time, so you probably
+   don't want to use it!  */
+static void prim_tail(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceList(*args[0], pos);
+    if (args[0]->list.length == 0)
+        throw Error(format("‘tail’ called on an empty list, at %1%") % pos);
+    state.mkList(v, args[0]->list.length - 1);
+    for (unsigned int n = 0; n < v.list.length; ++n)
+        v.list.elems[n] = args[0]->list.elems[n + 1];
+}
+
+
+/* Apply a function to every element of a list. */
+static void prim_map(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceFunction(*args[0], pos);
+    state.forceList(*args[1], pos);
+
+    state.mkList(v, args[1]->list.length);
+
+    for (unsigned int n = 0; n < v.list.length; ++n)
+        mkApp(*(v.list.elems[n] = state.allocValue()),
+            *args[0], *args[1]->list.elems[n]);
+}
+
+
+/* Filter a list using a predicate; that is, return a list containing
+   every element from the list for which the predicate function
+   returns true. */
+static void prim_filter(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceFunction(*args[0], pos);
+    state.forceList(*args[1], pos);
+
+    // FIXME: putting this on the stack is risky.
+    Value * vs[args[1]->list.length];
+    unsigned int k = 0;
+
+    bool same = true;
+    for (unsigned int n = 0; n < args[1]->list.length; ++n) {
+        Value res;
+        state.callFunction(*args[0], *args[1]->list.elems[n], res, noPos);
+        if (state.forceBool(res))
+            vs[k++] = args[1]->list.elems[n];
+        else
+            same = false;
+    }
+
+    if (same)
+        v = *args[1];
+    else {
+        state.mkList(v, k);
+        for (unsigned int n = 0; n < k; ++n) v.list.elems[n] = vs[n];
+    }
+}
+
+
+/* Return true if a list contains a given element. */
+static void prim_elem(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    bool res = false;
+    state.forceList(*args[1], pos);
+    for (unsigned int n = 0; n < args[1]->list.length; ++n)
+        if (state.eqValues(*args[0], *args[1]->list.elems[n])) {
+            res = true;
+            break;
+        }
+    mkBool(v, res);
+}
+
+
+/* Concatenate a list of lists. */
+static void prim_concatLists(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceList(*args[0], pos);
+    state.concatLists(v, args[0]->list.length, args[0]->list.elems, pos);
+}
+
+
+/* Return the length of a list.  This is an O(1) time operation. */
+static void prim_length(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceList(*args[0], pos);
+    mkInt(v, args[0]->list.length);
+}
+
+
+/*************************************************************
+ * Integer arithmetic
+ *************************************************************/
+
+
+static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) + state.forceInt(*args[1], pos));
+}
+
+
+static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) - state.forceInt(*args[1], pos));
+}
+
+
+static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    mkInt(v, state.forceInt(*args[0], pos) * state.forceInt(*args[1], pos));
+}
+
+
+static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    NixInt i2 = state.forceInt(*args[1], pos);
+    if (i2 == 0) throw EvalError(format("division by zero, at %1%") % pos);
+    mkInt(v, state.forceInt(*args[0], pos) / i2);
+}
+
+
+static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    state.forceValue(*args[0]);
+    state.forceValue(*args[1]);
+    CompareValues comp;
+    mkBool(v, comp(args[0], args[1]));
+}
+
+
+/*************************************************************
+ * String manipulation
+ *************************************************************/
+
+
+/* Convert the argument to a string.  Paths are *not* copied to the
+   store, so `toString /foo/bar' yields `"/foo/bar"', not
+   `"/nix/store/whatever..."'. */
+static void prim_toString(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    string s = state.coerceToString(pos, *args[0], context, true, false);
+    mkString(v, s, context);
+}
+
+
+/* `substring start len str' returns the substring of `str' starting
+   at character position `min(start, stringLength str)' inclusive and
+   ending at `min(start + len, stringLength str)'.  `start' must be
+   non-negative. */
+static void prim_substring(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    int start = state.forceInt(*args[0], pos);
+    int len = state.forceInt(*args[1], pos);
+    PathSet context;
+    string s = state.coerceToString(pos, *args[2], context);
+
+    if (start < 0) throw EvalError(format("negative start position in ‘substring’, at %1%") % pos);
+
+    mkString(v, (unsigned int) start >= s.size() ? "" : string(s, start, len), context);
+}
+
+
+static void prim_stringLength(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    string s = state.coerceToString(pos, *args[0], context);
+    mkInt(v, s.size());
+}
+
+
+static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    string s = state.coerceToString(pos, *args[0], context);
+    mkString(v, s, PathSet());
+}
+
+
+/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
+   builder without causing the derivation to be built (for instance,
+   in the derivation that builds NARs in nix-push, when doing
+   source-only deployment).  This primop marks the string context so
+   that builtins.derivation adds the path to drv.inputSrcs rather than
+   drv.inputDrvs. */
+static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    PathSet context;
+    string s = state.coerceToString(pos, *args[0], context);
+
+    PathSet context2;
+    foreach (PathSet::iterator, i, context) {
+        Path p = *i;
+        if (p.at(0) == '=') p = "~" + string(p, 1);
+        context2.insert(p);
+    }
+
+    mkString(v, s, context2);
+}
+
+
+/* Return the cryptographic hash of a string in base-16. */
+static void prim_hashString(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string type = state.forceStringNoCtx(*args[0], pos);
+    HashType ht = parseHashType(type);
+    if (ht == htUnknown)
+      throw Error(format("unknown hash type ‘%1%’, at %2%") % type % pos);
+
+    PathSet context; // discarded
+    string s = state.forceString(*args[1], context);
+
+    mkString(v, printHash(hashString(ht, s)), context);
+};
+
+
+/*************************************************************
+ * Versions
+ *************************************************************/
+
+
+static void prim_parseDrvName(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string name = state.forceStringNoCtx(*args[0], pos);
+    DrvName parsed(name);
+    state.mkAttrs(v, 2);
+    mkString(*state.allocAttr(v, state.sName), parsed.name);
+    mkString(*state.allocAttr(v, state.symbols.create("version")), parsed.version);
+    v.attrs->sort();
+}
+
+
+static void prim_compareVersions(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    string version1 = state.forceStringNoCtx(*args[0], pos);
+    string version2 = state.forceStringNoCtx(*args[1], pos);
+    mkInt(v, compareVersions(version1, version2));
+}
+
+
+/*************************************************************
+ * Primop registration
+ *************************************************************/
+
+
+void EvalState::createBaseEnv()
+{
+    baseEnv.up = 0;
+
+    /* Add global constants such as `true' to the base environment. */
+    Value v;
+
+    /* `builtins' must be first! */
+    mkAttrs(v, 128);
+    addConstant("builtins", v);
+
+    mkBool(v, true);
+    addConstant("true", v);
+
+    mkBool(v, false);
+    addConstant("false", v);
+
+    mkNull(v);
+    addConstant("null", v);
+
+    mkInt(v, time(0));
+    addConstant("__currentTime", v);
+
+    mkString(v, settings.thisSystem.c_str());
+    addConstant("__currentSystem", v);
+
+    mkString(v, nixVersion.c_str());
+    addConstant("__nixVersion", v);
+
+    /* Language version.  This should be increased every time a new
+       language feature gets added.  It's not necessary to increase it
+       when primops get added, because you can just use `builtins ?
+       primOp' to check. */
+    mkInt(v, 3);
+    addConstant("__langVersion", v);
+
+    // Miscellaneous
+    addPrimOp("scopedImport", 2, prim_scopedImport);
+    Value * v2 = allocValue();
+    mkAttrs(*v2, 0);
+    mkApp(v, *baseEnv.values[baseEnvDispl - 1], *v2);
+    forceValue(v);
+    addConstant("import", v);
+    if (settings.enableImportNative)
+        addPrimOp("__importNative", 2, prim_importNative);
+    addPrimOp("__typeOf", 1, prim_typeOf);
+    addPrimOp("isNull", 1, prim_isNull);
+    addPrimOp("__isFunction", 1, prim_isFunction);
+    addPrimOp("__isString", 1, prim_isString);
+    addPrimOp("__isInt", 1, prim_isInt);
+    addPrimOp("__isBool", 1, prim_isBool);
+    addPrimOp("__genericClosure", 1, prim_genericClosure);
+    addPrimOp("abort", 1, prim_abort);
+    addPrimOp("throw", 1, prim_throw);
+    addPrimOp("__addErrorContext", 2, prim_addErrorContext);
+    addPrimOp("__tryEval", 1, prim_tryEval);
+    addPrimOp("__getEnv", 1, prim_getEnv);
+    addPrimOp("__trace", 2, prim_trace);
+
+    // Paths
+    addPrimOp("__toPath", 1, prim_toPath);
+    addPrimOp("__storePath", 1, prim_storePath);
+    addPrimOp("__pathExists", 1, prim_pathExists);
+    addPrimOp("baseNameOf", 1, prim_baseNameOf);
+    addPrimOp("dirOf", 1, prim_dirOf);
+    addPrimOp("__readFile", 1, prim_readFile);
+    addPrimOp("__findFile", 2, prim_findFile);
+
+    // Creating files
+    addPrimOp("__toXML", 1, prim_toXML);
+    addPrimOp("__toJSON", 1, prim_toJSON);
+    addPrimOp("__fromJSON", 1, prim_fromJSON);
+    addPrimOp("__toFile", 2, prim_toFile);
+    addPrimOp("__filterSource", 2, prim_filterSource);
+
+    // Sets
+    addPrimOp("__attrNames", 1, prim_attrNames);
+    addPrimOp("__getAttr", 2, prim_getAttr);
+    addPrimOp("__unsafeGetAttrPos", 2, prim_unsafeGetAttrPos);
+    addPrimOp("__hasAttr", 2, prim_hasAttr);
+    addPrimOp("__isAttrs", 1, prim_isAttrs);
+    addPrimOp("removeAttrs", 2, prim_removeAttrs);
+    addPrimOp("__listToAttrs", 1, prim_listToAttrs);
+    addPrimOp("__intersectAttrs", 2, prim_intersectAttrs);
+    addPrimOp("__functionArgs", 1, prim_functionArgs);
+
+    // Lists
+    addPrimOp("__isList", 1, prim_isList);
+    addPrimOp("__elemAt", 2, prim_elemAt);
+    addPrimOp("__head", 1, prim_head);
+    addPrimOp("__tail", 1, prim_tail);
+    addPrimOp("map", 2, prim_map);
+    addPrimOp("__filter", 2, prim_filter);
+    addPrimOp("__elem", 2, prim_elem);
+    addPrimOp("__concatLists", 1, prim_concatLists);
+    addPrimOp("__length", 1, prim_length);
+
+    // Integer arithmetic
+    addPrimOp("__add", 2, prim_add);
+    addPrimOp("__sub", 2, prim_sub);
+    addPrimOp("__mul", 2, prim_mul);
+    addPrimOp("__div", 2, prim_div);
+    addPrimOp("__lessThan", 2, prim_lessThan);
+
+    // String manipulation
+    addPrimOp("toString", 1, prim_toString);
+    addPrimOp("__substring", 3, prim_substring);
+    addPrimOp("__stringLength", 1, prim_stringLength);
+    addPrimOp("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
+    addPrimOp("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
+    addPrimOp("__hashString", 2, prim_hashString);
+
+    // Versions
+    addPrimOp("__parseDrvName", 1, prim_parseDrvName);
+    addPrimOp("__compareVersions", 2, prim_compareVersions);
+
+    // Derivations
+    addPrimOp("derivationStrict", 1, prim_derivationStrict);
+
+    /* Add a wrapper around the derivation primop that computes the
+       `drvPath' and `outPath' attributes lazily. */
+    string path = findFile("nix/derivation.nix");
+    assert(!path.empty());
+    sDerivationNix = symbols.create(path);
+    evalFile(path, v);
+    addConstant("derivation", v);
+
+    /* Add a value containing the current Nix expression search path. */
+    mkList(v, searchPath.size());
+    int n = 0;
+    for (auto & i : searchPath) {
+        v2 = v.list.elems[n++] = allocValue();
+        mkAttrs(*v2, 2);
+        mkString(*allocAttr(*v2, symbols.create("path")), i.second);
+        mkString(*allocAttr(*v2, symbols.create("prefix")), i.first);
+        v2->attrs->sort();
+    }
+    addConstant("__nixPath", v);
+
+    /* Now that we've added all primops, sort the `builtins' set,
+       because attribute lookups expect it to be sorted. */
+    baseEnv.values[0]->attrs->sort();
+}
+
+
+}
diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh
new file mode 100644
index 000000000000..140662b51501
--- /dev/null
+++ b/src/libexpr/symbol-table.hh
@@ -0,0 +1,88 @@
+#pragma once
+
+#include "config.h"
+
+#include <map>
+#include <unordered_set>
+
+#include "types.hh"
+
+namespace nix {
+
+/* Symbol table used by the parser and evaluator to represent and look
+   up identifiers and attributes efficiently.  SymbolTable::create()
+   converts a string into a symbol.  Symbols have the property that
+   they can be compared efficiently (using a pointer equality test),
+   because the symbol table stores only one copy of each string. */
+
+class Symbol
+{
+private:
+    const string * s; // pointer into SymbolTable
+    Symbol(const string * s) : s(s) { };
+    friend class SymbolTable;
+
+public:
+    Symbol() : s(0) { };
+
+    bool operator == (const Symbol & s2) const
+    {
+        return s == s2.s;
+    }
+
+    bool operator != (const Symbol & s2) const
+    {
+        return s != s2.s;
+    }
+
+    bool operator < (const Symbol & s2) const
+    {
+        return s < s2.s;
+    }
+
+    operator const string & () const
+    {
+        return *s;
+    }
+
+    bool set() const
+    {
+        return s;
+    }
+
+    bool empty() const
+    {
+        return s->empty();
+    }
+
+    friend std::ostream & operator << (std::ostream & str, const Symbol & sym);
+};
+
+inline std::ostream & operator << (std::ostream & str, const Symbol & sym)
+{
+    str << *sym.s;
+    return str;
+}
+
+class SymbolTable
+{
+private:
+    typedef std::unordered_set<string> Symbols;
+    Symbols symbols;
+
+public:
+    Symbol create(const string & s)
+    {
+        std::pair<Symbols::iterator, bool> res = symbols.insert(s);
+        return Symbol(&*res.first);
+    }
+
+    unsigned int size() const
+    {
+        return symbols.size();
+    }
+
+    size_t totalSize() const;
+};
+
+}
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
new file mode 100644
index 000000000000..a2004df5c8c5
--- /dev/null
+++ b/src/libexpr/value-to-json.cc
@@ -0,0 +1,86 @@
+#include "value-to-json.hh"
+#include "eval-inline.hh"
+#include "util.hh"
+
+#include <cstdlib>
+
+
+namespace nix {
+
+
+void escapeJSON(std::ostream & str, const string & s)
+{
+    str << "\"";
+    foreach (string::const_iterator, i, s)
+        if (*i == '\"' || *i == '\\') str << "\\" << *i;
+        else if (*i == '\n') str << "\\n";
+        else if (*i == '\r') str << "\\r";
+        else if (*i == '\t') str << "\\t";
+        else str << *i;
+    str << "\"";
+}
+
+
+void printValueAsJSON(EvalState & state, bool strict,
+    Value & v, std::ostream & str, PathSet & context)
+{
+    checkInterrupt();
+
+    if (strict) state.forceValue(v);
+
+    switch (v.type) {
+
+        case tInt:
+            str << v.integer;
+            break;
+
+        case tBool:
+            str << (v.boolean ? "true" : "false");
+            break;
+
+        case tString:
+            copyContext(v, context);
+            escapeJSON(str, v.string.s);
+            break;
+
+        case tPath:
+            escapeJSON(str, state.copyPathToStore(context, v.path));
+            break;
+
+        case tNull:
+            str << "null";
+            break;
+
+        case tAttrs: {
+            Bindings::iterator i = v.attrs->find(state.sOutPath);
+            if (i == v.attrs->end()) {
+                JSONObject json(str);
+                StringSet names;
+                foreach (Bindings::iterator, i, *v.attrs)
+                    names.insert(i->name);
+                foreach (StringSet::iterator, i, names) {
+                    Attr & a(*v.attrs->find(state.symbols.create(*i)));
+                    json.attr(*i);
+                    printValueAsJSON(state, strict, *a.value, str, context);
+                }
+            } else
+                printValueAsJSON(state, strict, *i->value, str, context);
+            break;
+        }
+
+        case tList: {
+            JSONList json(str);
+            for (unsigned int n = 0; n < v.list.length; ++n) {
+                json.elem();
+                printValueAsJSON(state, strict, *v.list.elems[n], str, context);
+            }
+            break;
+        }
+
+        default:
+            throw TypeError(format("cannot convert %1% to JSON") % showType(v));
+    }
+}
+
+
+}
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
new file mode 100644
index 000000000000..e3a97efe4269
--- /dev/null
+++ b/src/libexpr/value-to-json.hh
@@ -0,0 +1,64 @@
+#pragma once
+
+#include "nixexpr.hh"
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+namespace nix {
+
+void printValueAsJSON(EvalState & state, bool strict,
+    Value & v, std::ostream & out, PathSet & context);
+
+void escapeJSON(std::ostream & str, const string & s);
+
+struct JSONObject
+{
+    std::ostream & str;
+    bool first;
+    JSONObject(std::ostream & str) : str(str), first(true)
+    {
+        str << "{";
+    }
+    ~JSONObject()
+    {
+        str << "}";
+    }
+    void attr(const string & s)
+    {
+        if (!first) str << ","; else first = false;
+        escapeJSON(str, s);
+        str << ":";
+    }
+    void attr(const string & s, const string & t)
+    {
+        attr(s);
+        escapeJSON(str, t);
+    }
+};
+
+struct JSONList
+{
+    std::ostream & str;
+    bool first;
+    JSONList(std::ostream & str) : str(str), first(true)
+    {
+        str << "[";
+    }
+    ~JSONList()
+    {
+        str << "]";
+    }
+    void elem()
+    {
+        if (!first) str << ","; else first = false;
+    }
+    void elem(const string & s)
+    {
+        elem();
+        escapeJSON(str, s);
+    }
+};
+
+}
diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc
new file mode 100644
index 000000000000..3934a83eec25
--- /dev/null
+++ b/src/libexpr/value-to-xml.cc
@@ -0,0 +1,163 @@
+#include "value-to-xml.hh"
+#include "xml-writer.hh"
+#include "eval-inline.hh"
+#include "util.hh"
+
+#include <cstdlib>
+
+
+namespace nix {
+
+    
+static XMLAttrs singletonAttrs(const string & name, const string & value)
+{
+    XMLAttrs attrs;
+    attrs[name] = value;
+    return attrs;
+}
+
+
+static void printValueAsXML(EvalState & state, bool strict, bool location,
+    Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen);
+
+
+static void posToXML(XMLAttrs & xmlAttrs, const Pos & pos)
+{
+    xmlAttrs["path"] = pos.file;
+    xmlAttrs["line"] = (format("%1%") % pos.line).str();
+    xmlAttrs["column"] = (format("%1%") % pos.column).str();
+}
+
+
+static void showAttrs(EvalState & state, bool strict, bool location,
+    Bindings & attrs, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
+{
+    StringSet names;
+    
+    foreach (Bindings::iterator, i, attrs)
+        names.insert(i->name);
+    
+    foreach (StringSet::iterator, i, names) {
+        Attr & a(*attrs.find(state.symbols.create(*i)));
+        
+        XMLAttrs xmlAttrs;
+        xmlAttrs["name"] = *i;
+        if (location && a.pos != &noPos) posToXML(xmlAttrs, *a.pos);
+        
+        XMLOpenElement _(doc, "attr", xmlAttrs);
+        printValueAsXML(state, strict, location,
+            *a.value, doc, context, drvsSeen);
+    }
+}
+
+
+static void printValueAsXML(EvalState & state, bool strict, bool location,
+    Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
+{
+    checkInterrupt();
+
+    if (strict) state.forceValue(v);
+        
+    switch (v.type) {
+
+        case tInt:
+            doc.writeEmptyElement("int", singletonAttrs("value", (format("%1%") % v.integer).str()));
+            break;
+
+        case tBool:
+            doc.writeEmptyElement("bool", singletonAttrs("value", v.boolean ? "true" : "false"));
+            break;
+
+        case tString:
+            /* !!! show the context? */
+            copyContext(v, context);
+            doc.writeEmptyElement("string", singletonAttrs("value", v.string.s));
+            break;
+
+        case tPath:
+            doc.writeEmptyElement("path", singletonAttrs("value", v.path));
+            break;
+
+        case tNull:
+            doc.writeEmptyElement("null");
+            break;
+
+        case tAttrs:
+            if (state.isDerivation(v)) {
+                XMLAttrs xmlAttrs;
+            
+                Bindings::iterator a = v.attrs->find(state.symbols.create("derivation"));
+
+                Path drvPath;
+                a = v.attrs->find(state.sDrvPath);
+                if (a != v.attrs->end()) {
+                    if (strict) state.forceValue(*a->value);
+                    if (a->value->type == tString)
+                        xmlAttrs["drvPath"] = drvPath = a->value->string.s;
+                }
+        
+                a = v.attrs->find(state.sOutPath);
+                if (a != v.attrs->end()) {
+                    if (strict) state.forceValue(*a->value);
+                    if (a->value->type == tString)
+                        xmlAttrs["outPath"] = a->value->string.s;
+                }
+
+                XMLOpenElement _(doc, "derivation", xmlAttrs);
+
+                if (drvPath != "" && drvsSeen.find(drvPath) == drvsSeen.end()) {
+                    drvsSeen.insert(drvPath);
+                    showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen);
+                } else
+                    doc.writeEmptyElement("repeated");
+            }
+
+            else {
+                XMLOpenElement _(doc, "attrs");
+                showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen);
+            }
+            
+            break;
+
+        case tList: {
+            XMLOpenElement _(doc, "list");
+            for (unsigned int n = 0; n < v.list.length; ++n)
+                printValueAsXML(state, strict, location, *v.list.elems[n], doc, context, drvsSeen);
+            break;
+        }
+
+        case tLambda: {
+            XMLAttrs xmlAttrs;
+            if (location) posToXML(xmlAttrs, v.lambda.fun->pos);
+            XMLOpenElement _(doc, "function", xmlAttrs);
+            
+            if (v.lambda.fun->matchAttrs) {
+                XMLAttrs attrs;
+                if (!v.lambda.fun->arg.empty()) attrs["name"] = v.lambda.fun->arg;
+                if (v.lambda.fun->formals->ellipsis) attrs["ellipsis"] = "1";
+                XMLOpenElement _(doc, "attrspat", attrs);
+                foreach (Formals::Formals_::iterator, i, v.lambda.fun->formals->formals)
+                    doc.writeEmptyElement("attr", singletonAttrs("name", i->name));
+            } else
+                doc.writeEmptyElement("varpat", singletonAttrs("name", v.lambda.fun->arg));
+            
+            break;
+        }
+
+        default:
+            doc.writeEmptyElement("unevaluated");
+    }
+}
+
+
+void printValueAsXML(EvalState & state, bool strict, bool location,
+    Value & v, std::ostream & out, PathSet & context)
+{
+    XMLWriter doc(true, out);
+    XMLOpenElement root(doc, "expr");
+    PathSet drvsSeen;    
+    printValueAsXML(state, strict, location, v, doc, context, drvsSeen);
+}
+
+ 
+}
diff --git a/src/libexpr/value-to-xml.hh b/src/libexpr/value-to-xml.hh
new file mode 100644
index 000000000000..97657327edba
--- /dev/null
+++ b/src/libexpr/value-to-xml.hh
@@ -0,0 +1,14 @@
+#pragma once
+
+#include "nixexpr.hh"
+#include "eval.hh"
+
+#include <string>
+#include <map>
+
+namespace nix {
+
+void printValueAsXML(EvalState & state, bool strict, bool location,
+    Value & v, std::ostream & out, PathSet & context);
+    
+}
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
new file mode 100644
index 000000000000..2feb2f9492ca
--- /dev/null
+++ b/src/libexpr/value.hh
@@ -0,0 +1,162 @@
+#pragma once
+
+#include "symbol-table.hh"
+
+namespace nix {
+
+
+typedef enum {
+    tInt = 1,
+    tBool,
+    tString,
+    tPath,
+    tNull,
+    tAttrs,
+    tList,
+    tThunk,
+    tApp,
+    tLambda,
+    tBlackhole,
+    tPrimOp,
+    tPrimOpApp,
+} ValueType;
+
+
+class Bindings;
+struct Env;
+struct Expr;
+struct ExprLambda;
+struct PrimOp;
+struct PrimOp;
+class Symbol;
+
+
+typedef long NixInt;
+
+
+struct Value
+{
+    ValueType type;
+    union
+    {
+        NixInt integer;
+        bool boolean;
+
+        /* Strings in the evaluator carry a so-called `context' which
+           is a list of strings representing store paths.  This is to
+           allow users to write things like
+
+             "--with-freetype2-library=" + freetype + "/lib"
+
+           where `freetype' is a derivation (or a source to be copied
+           to the store).  If we just concatenated the strings without
+           keeping track of the referenced store paths, then if the
+           string is used as a derivation attribute, the derivation
+           will not have the correct dependencies in its inputDrvs and
+           inputSrcs.
+
+           The semantics of the context is as follows: when a string
+           with context C is used as a derivation attribute, then the
+           derivations in C will be added to the inputDrvs of the
+           derivation, and the other store paths in C will be added to
+           the inputSrcs of the derivations.
+
+           For canonicity, the store paths should be in sorted order. */
+        struct {
+            const char * s;
+            const char * * context; // must be in sorted order
+        } string;
+
+        const char * path;
+        Bindings * attrs;
+        struct {
+            unsigned int length;
+            Value * * elems;
+        } list;
+        struct {
+            Env * env;
+            Expr * expr;
+        } thunk;
+        struct {
+            Value * left, * right;
+        } app;
+        struct {
+            Env * env;
+            ExprLambda * fun;
+        } lambda;
+        PrimOp * primOp;
+        struct {
+            Value * left, * right;
+        } primOpApp;
+    };
+};
+
+
+/* After overwriting an app node, be sure to clear pointers in the
+   Value to ensure that the target isn't kept alive unnecessarily. */
+static inline void clearValue(Value & v)
+{
+    v.app.right = 0;
+}
+
+
+static inline void mkInt(Value & v, NixInt n)
+{
+    clearValue(v);
+    v.type = tInt;
+    v.integer = n;
+}
+
+
+static inline void mkBool(Value & v, bool b)
+{
+    clearValue(v);
+    v.type = tBool;
+    v.boolean = b;
+}
+
+
+static inline void mkNull(Value & v)
+{
+    v.type = tNull;
+    v.app.left = v.app.right = 00; // scrub
+}
+
+
+static inline void mkApp(Value & v, Value & left, Value & right)
+{
+    v.type = tApp;
+    v.app.left = &left;
+    v.app.right = &right;
+}
+
+
+static inline void mkStringNoCopy(Value & v, const char * s)
+{
+    v.type = tString;
+    v.string.s = s;
+    v.string.context = 0;
+}
+
+
+static inline void mkString(Value & v, const Symbol & s)
+{
+    mkStringNoCopy(v, ((const string &) s).c_str());
+}
+
+
+void mkString(Value & v, const char * s);
+
+
+static inline void mkPathNoCopy(Value & v, const char * s)
+{
+    clearValue(v);
+    v.type = tPath;
+    v.path = s;
+}
+
+
+void mkPath(Value & v, const char * s);
+
+
+}
diff --git a/src/libmain/local.mk b/src/libmain/local.mk
new file mode 100644
index 000000000000..71a07d1979ab
--- /dev/null
+++ b/src/libmain/local.mk
@@ -0,0 +1,11 @@
+libraries += libmain
+
+libmain_NAME = libnixmain
+
+libmain_DIR := $(d)
+
+libmain_SOURCES := $(wildcard $(d)/*.cc)
+
+libmain_LIBS = libstore libutil libformat
+
+libmain_ALLOW_UNDEFINED = 1
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
new file mode 100644
index 000000000000..263e1744c089
--- /dev/null
+++ b/src/libmain/shared.cc
@@ -0,0 +1,327 @@
+#include "config.h"
+
+#include "shared.hh"
+#include "globals.hh"
+#include "store-api.hh"
+#include "util.hh"
+#include "misc.hh"
+
+#include <iostream>
+#include <cctype>
+#include <exception>
+
+#include <sys/time.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <signal.h>
+
+
+namespace nix {
+
+
+volatile sig_atomic_t blockInt = 0;
+
+
+static void sigintHandler(int signo)
+{
+    if (!blockInt) {
+        _isInterrupted = 1;
+        blockInt = 1;
+    }
+}
+
+
+static bool gcWarning = true;
+
+void printGCWarning()
+{
+    if (!gcWarning) return;
+    static bool haveWarned = false;
+    warnOnce(haveWarned,
+        "you did not specify ‘--add-root’; "
+        "the result might be removed by the garbage collector");
+}
+
+
+void printMissing(StoreAPI & store, const PathSet & paths)
+{
+    unsigned long long downloadSize, narSize;
+    PathSet willBuild, willSubstitute, unknown;
+    queryMissing(store, paths, willBuild, willSubstitute, unknown, downloadSize, narSize);
+    printMissing(willBuild, willSubstitute, unknown, downloadSize, narSize);
+}
+
+
+void printMissing(const PathSet & willBuild,
+    const PathSet & willSubstitute, const PathSet & unknown,
+    unsigned long long downloadSize, unsigned long long narSize)
+{
+    if (!willBuild.empty()) {
+        printMsg(lvlInfo, format("these derivations will be built:"));
+        foreach (PathSet::iterator, i, willBuild)
+            printMsg(lvlInfo, format("  %1%") % *i);
+    }
+
+    if (!willSubstitute.empty()) {
+        printMsg(lvlInfo, format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
+            % (downloadSize / (1024.0 * 1024.0))
+            % (narSize / (1024.0 * 1024.0)));
+        foreach (PathSet::iterator, i, willSubstitute)
+            printMsg(lvlInfo, format("  %1%") % *i);
+    }
+
+    if (!unknown.empty()) {
+        printMsg(lvlInfo, format("don't know how to build these paths%1%:")
+            % (settings.readOnlyMode ? " (may be caused by read-only store access)" : ""));
+        foreach (PathSet::iterator, i, unknown)
+            printMsg(lvlInfo, format("  %1%") % *i);
+    }
+}
+
+
+static void setLogType(string lt)
+{
+    if (lt == "pretty") logType = ltPretty;
+    else if (lt == "escapes") logType = ltEscapes;
+    else if (lt == "flat") logType = ltFlat;
+    else throw UsageError("unknown log type");
+}
+
+
+string getArg(const string & opt,
+    Strings::iterator & i, const Strings::iterator & end)
+{
+    ++i;
+    if (i == end) throw UsageError(format("‘%1%’ requires an argument") % opt);
+    return *i;
+}
+
+
+void detectStackOverflow();
+
+
+void initNix()
+{
+    /* Turn on buffering for cerr. */
+#if HAVE_PUBSETBUF
+    static char buf[1024];
+    std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
+#endif
+
+    std::ios::sync_with_stdio(false);
+
+    settings.processEnvironment();
+    settings.loadConfFile();
+
+    /* Catch SIGINT. */
+    struct sigaction act;
+    act.sa_handler = sigintHandler;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = 0;
+    if (sigaction(SIGINT, &act, 0))
+        throw SysError("installing handler for SIGINT");
+    if (sigaction(SIGTERM, &act, 0))
+        throw SysError("installing handler for SIGTERM");
+    if (sigaction(SIGHUP, &act, 0))
+        throw SysError("installing handler for SIGHUP");
+
+    /* Ignore SIGPIPE. */
+    act.sa_handler = SIG_IGN;
+    act.sa_flags = 0;
+    if (sigaction(SIGPIPE, &act, 0))
+        throw SysError("ignoring SIGPIPE");
+
+    /* Reset SIGCHLD to its default. */
+    act.sa_handler = SIG_DFL;
+    act.sa_flags = 0;
+    if (sigaction(SIGCHLD, &act, 0))
+        throw SysError("resetting SIGCHLD");
+
+    /* Register a SIGSEGV handler to detect stack overflows. */
+    detectStackOverflow();
+
+    /* There is no privacy in the Nix system ;-)  At least not for
+       now.  In particular, store objects should be readable by
+       everybody. */
+    umask(0022);
+
+    /* Initialise the PRNG. */
+    struct timeval tv;
+    gettimeofday(&tv, 0);
+    srandom(tv.tv_usec);
+
+    if (char *pack = getenv("_NIX_OPTIONS"))
+        settings.unpack(pack);
+}
+
+
+void parseCmdLine(int argc, char * * argv,
+    std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
+{
+    /* Put the arguments in a vector. */
+    Strings args;
+    argc--; argv++;
+    while (argc--) args.push_back(*argv++);
+
+    /* Process default options. */
+    for (Strings::iterator i = args.begin(); i != args.end(); ++i) {
+        string arg = *i;
+
+        /* Expand compound dash options (i.e., `-qlf' -> `-q -l -f'). */
+        if (arg.length() > 2 && arg[0] == '-' && arg[1] != '-' && isalpha(arg[1])) {
+            *i = (string) "-" + arg[1];
+            auto next = i; ++next;
+            for (unsigned int j = 2; j < arg.length(); j++)
+                if (isalpha(arg[j]))
+                    args.insert(next, (string) "-" + arg[j]);
+                else {
+                    args.insert(next, string(arg, j));
+                    break;
+                }
+            arg = *i;
+        }
+
+        if (arg == "--verbose" || arg == "-v") verbosity = (Verbosity) (verbosity + 1);
+        else if (arg == "--quiet") verbosity = verbosity > lvlError ? (Verbosity) (verbosity - 1) : lvlError;
+        else if (arg == "--log-type") {
+            string s = getArg(arg, i, args.end());
+            setLogType(s);
+        }
+        else if (arg == "--no-build-output" || arg == "-Q")
+            settings.buildVerbosity = lvlVomit;
+        else if (arg == "--print-build-trace")
+            settings.printBuildTrace = true;
+        else if (arg == "--keep-failed" || arg == "-K")
+            settings.keepFailed = true;
+        else if (arg == "--keep-going" || arg == "-k")
+            settings.keepGoing = true;
+        else if (arg == "--fallback")
+            settings.set("build-fallback", "true");
+        else if (arg == "--max-jobs" || arg == "-j")
+            settings.set("build-max-jobs", getArg(arg, i, args.end()));
+        else if (arg == "--cores")
+            settings.set("build-cores", getArg(arg, i, args.end()));
+        else if (arg == "--readonly-mode")
+            settings.readOnlyMode = true;
+        else if (arg == "--max-silent-time")
+            settings.set("build-max-silent-time", getArg(arg, i, args.end()));
+        else if (arg == "--timeout")
+            settings.set("build-timeout", getArg(arg, i, args.end()));
+        else if (arg == "--no-build-hook")
+            settings.useBuildHook = false;
+        else if (arg == "--show-trace")
+            settings.showTrace = true;
+        else if (arg == "--no-gc-warning")
+            gcWarning = false;
+        else if (arg == "--option") {
+            ++i; if (i == args.end()) throw UsageError("‘--option’ requires two arguments");
+            string name = *i;
+            ++i; if (i == args.end()) throw UsageError("‘--option’ requires two arguments");
+            string value = *i;
+            settings.set(name, value);
+        }
+        else {
+            if (!parseArg(i, args.end()))
+                throw UsageError(format("unrecognised option ‘%1%’") % *i);
+        }
+    }
+
+    settings.update();
+}
+
+
+void printVersion(const string & programName)
+{
+    std::cout << format("%1% (Nix) %2%") % programName % nixVersion << std::endl;
+    throw Exit();
+}
+
+
+void showManPage(const string & name)
+{
+    restoreSIGPIPE();
+    execlp("man", "man", name.c_str(), NULL);
+    throw SysError(format("command ‘man %1%’ failed") % name.c_str());
+}
+
+
+int handleExceptions(const string & programName, std::function<void()> fun)
+{
+    string error = ANSI_RED "error:" ANSI_NORMAL " ";
+    try {
+        try {
+            fun();
+        } catch (...) {
+            /* Subtle: we have to make sure that any `interrupted'
+               condition is discharged before we reach printMsg()
+               below, since otherwise it will throw an (uncaught)
+               exception. */
+            blockInt = 1; /* ignore further SIGINTs */
+            _isInterrupted = 0;
+            throw;
+        }
+    } catch (Exit & e) {
+        return e.status;
+    } catch (UsageError & e) {
+        printMsg(lvlError,
+            format(error + " %1%\nTry ‘%2% --help’ for more information.")
+            % e.what() % programName);
+        return 1;
+    } catch (BaseError & e) {
+        printMsg(lvlError, format(error + "%1%%2%") % (settings.showTrace ? e.prefix() : "") % e.msg());
+        if (e.prefix() != "" && !settings.showTrace)
+            printMsg(lvlError, "(use ‘--show-trace’ to show detailed location information)");
+        return e.status;
+    } catch (std::bad_alloc & e) {
+        printMsg(lvlError, error + "out of memory");
+        return 1;
+    } catch (std::exception & e) {
+        printMsg(lvlError, error + e.what());
+        return 1;
+    }
+
+    return 0;
+}
+
+
+RunPager::RunPager()
+{
+    string pager = getEnv("PAGER");
+    if (!isatty(STDOUT_FILENO) || pager.empty()) return;
+
+    /* Ignore SIGINT. The pager will handle it (and we'll get
+       SIGPIPE). */
+    struct sigaction act;
+    act.sa_handler = SIG_IGN;
+    act.sa_flags = 0;
+    sigemptyset(&act.sa_mask);
+    if (sigaction(SIGINT, &act, 0)) throw SysError("ignoring SIGINT");
+
+    restoreSIGPIPE();
+
+    Pipe toPager;
+    toPager.create();
+
+    pid = startProcess([&]() {
+        if (dup2(toPager.readSide, STDIN_FILENO) == -1)
+            throw SysError("dupping stdin");
+        execl("/bin/sh", "sh", "-c", pager.c_str(), NULL);
+        throw SysError(format("executing ‘%1%’") % pager);
+    });
+
+    if (dup2(toPager.writeSide, STDOUT_FILENO) == -1)
+        throw SysError("dupping stdout");
+
+}
+
+
+RunPager::~RunPager()
+{
+    if (pid != -1) {
+        close(STDOUT_FILENO);
+        pid.wait(true);
+    }
+}
+
+
+}
diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh
new file mode 100644
index 000000000000..b662c6ccea96
--- /dev/null
+++ b/src/libmain/shared.hh
@@ -0,0 +1,86 @@
+#pragma once
+
+#include "util.hh"
+
+#include <signal.h>
+
+#include <locale>
+
+
+namespace nix {
+
+MakeError(UsageError, nix::Error);
+
+class Exit : public std::exception
+{
+public:
+    int status;
+    Exit() : status(0) { }
+    Exit(int status) : status(status) { }
+};
+
+class StoreAPI;
+
+int handleExceptions(const string & programName, std::function<void()> fun);
+
+void initNix();
+
+void parseCmdLine(int argc, char * * argv,
+    std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg);
+
+void printVersion(const string & programName);
+
+/* Ugh.  No better place to put this. */
+void printGCWarning();
+
+void printMissing(StoreAPI & store, const PathSet & paths);
+
+void printMissing(const PathSet & willBuild,
+    const PathSet & willSubstitute, const PathSet & unknown,
+    unsigned long long downloadSize, unsigned long long narSize);
+
+string getArg(const string & opt,
+    Strings::iterator & i, const Strings::iterator & end);
+
+template<class N> N getIntArg(const string & opt,
+    Strings::iterator & i, const Strings::iterator & end, bool allowUnit)
+{
+    ++i;
+    if (i == end) throw UsageError(format("‘%1%’ requires an argument") % opt);
+    string s = *i;
+    N multiplier = 1;
+    if (allowUnit && !s.empty()) {
+        char u = std::toupper(*s.rbegin());
+        if (std::isalpha(u)) {
+            if (u == 'K') multiplier = 1ULL << 10;
+            else if (u == 'M') multiplier = 1ULL << 20;
+            else if (u == 'G') multiplier = 1ULL << 30;
+            else if (u == 'T') multiplier = 1ULL << 40;
+            else throw UsageError(format("invalid unit specifier ‘%1%’") % u);
+            s.resize(s.size() - 1);
+        }
+    }
+    N n;
+    if (!string2Int(s, n))
+        throw UsageError(format("‘%1%’ requires an integer argument") % opt);
+    return n * multiplier;
+}
+
+/* Show the manual page for the specified program. */
+void showManPage(const string & name);
+
+/* The constructor of this class starts a pager if stdout is a
+   terminal and $PAGER is set. Stdout is redirected to the pager. */
+class RunPager
+{
+public:
+    RunPager();
+    ~RunPager();
+
+private:
+    Pid pid;
+};
+
+extern volatile ::sig_atomic_t blockInt;
+
+}
diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc
new file mode 100644
index 000000000000..64df95547e0b
--- /dev/null
+++ b/src/libmain/stack.cc
@@ -0,0 +1,72 @@
+#include "config.h"
+
+#include "types.hh"
+
+#include <cstring>
+#include <cstddef>
+#include <cstdlib>
+
+#include <unistd.h>
+#include <signal.h>
+
+namespace nix {
+
+
+static void sigsegvHandler(int signo, siginfo_t * info, void * ctx)
+{
+    /* Detect stack overflows by comparing the faulting address with
+       the stack pointer.  Unfortunately, getting the stack pointer is
+       not portable. */
+    bool haveSP = true;
+    char * sp;
+#if defined(__x86_64__) && defined(REG_RSP)
+    sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_RSP];
+#elif defined(REG_ESP)
+    sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_ESP];
+#else
+    haveSP = false;
+#endif
+
+    if (haveSP) {
+        ptrdiff_t diff = (char *) info->si_addr - sp;
+        if (diff < 0) diff = -diff;
+        if (diff < 4096) {
+            char msg[] = "error: stack overflow (possible infinite recursion)\n";
+            write(2, msg, strlen(msg));
+            _exit(1); // maybe abort instead?
+        }
+    }
+
+    /* Restore default behaviour (i.e. segfault and dump core). */
+    struct sigaction act;
+    sigfillset(&act.sa_mask);
+    act.sa_handler = SIG_DFL;
+    act.sa_flags = 0;
+    if (sigaction(SIGSEGV, &act, 0)) abort();
+}
+
+
+void detectStackOverflow()
+{
+#if defined(SA_SIGINFO) && defined (SA_ONSTACK)
+    /* Install a SIGSEGV handler to detect stack overflows.  This
+       requires an alternative stack, otherwise the signal cannot be
+       delivered when we're out of stack space. */
+    stack_t stack;
+    stack.ss_size = 4096 * 4 + MINSIGSTKSZ;
+    stack.ss_sp = new char[stack.ss_size];
+    if (!stack.ss_sp) throw Error("cannot allocate alternative stack");
+    stack.ss_flags = 0;
+    if (sigaltstack(&stack, 0) == -1) throw SysError("cannot set alternative stack");
+
+    struct sigaction act;
+    sigfillset(&act.sa_mask);
+    act.sa_sigaction = sigsegvHandler;
+    act.sa_flags = SA_SIGINFO | SA_ONSTACK;
+    if (sigaction(SIGSEGV, &act, 0))
+        throw SysError("resetting SIGCHLD");
+#endif
+}
+
+
+}
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
new file mode 100644
index 000000000000..856e5f8200d6
--- /dev/null
+++ b/src/libstore/build.cc
@@ -0,0 +1,3327 @@
+#include "config.h"
+
+#include "references.hh"
+#include "pathlocks.hh"
+#include "misc.hh"
+#include "globals.hh"
+#include "local-store.hh"
+#include "util.hh"
+#include "archive.hh"
+#include "affinity.hh"
+
+#include <map>
+#include <sstream>
+#include <algorithm>
+
+#include <limits.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/utsname.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <cstring>
+
+#include <pwd.h>
+#include <grp.h>
+
+#include <bzlib.h>
+
+/* Includes required for chroot support. */
+#if HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#if HAVE_SYS_MOUNT_H
+#include <sys/mount.h>
+#endif
+#if HAVE_SCHED_H
+#include <sched.h>
+#endif
+
+/* In GNU libc 2.11, <sys/mount.h> does not define `MS_PRIVATE', but
+   <linux/fs.h> does.  */
+#if !defined MS_PRIVATE && defined HAVE_LINUX_FS_H
+#include <linux/fs.h>
+#endif
+
+#define CHROOT_ENABLED HAVE_CHROOT && HAVE_UNSHARE && HAVE_SYS_MOUNT_H && defined(MS_BIND) && defined(MS_PRIVATE) && defined(CLONE_NEWNS)
+
+#if CHROOT_ENABLED
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/ip.h>
+#endif
+
+#if HAVE_SYS_PERSONALITY_H
+#include <sys/personality.h>
+#define CAN_DO_LINUX32_BUILDS
+#endif
+
+#if HAVE_STATVFS
+#include <sys/statvfs.h>
+#endif
+
+
+namespace nix {
+
+using std::map;
+
+
+static string pathNullDevice = "/dev/null";
+
+
+/* Forward definition. */
+class Worker;
+struct HookInstance;
+
+
+/* A pointer to a goal. */
+class Goal;
+typedef std::shared_ptr<Goal> GoalPtr;
+typedef std::weak_ptr<Goal> WeakGoalPtr;
+
+/* Set of goals. */
+typedef set<GoalPtr> Goals;
+typedef list<WeakGoalPtr> WeakGoals;
+
+/* A map of paths to goals (and the other way around). */
+typedef map<Path, WeakGoalPtr> WeakGoalMap;
+
+
+
+class Goal : public std::enable_shared_from_this<Goal>
+{
+public:
+    typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
+
+protected:
+
+    /* Backlink to the worker. */
+    Worker & worker;
+
+    /* Goals that this goal is waiting for. */
+    Goals waitees;
+
+    /* Goals waiting for this one to finish.  Must use weak pointers
+       here to prevent cycles. */
+    WeakGoals waiters;
+
+    /* Number of goals we are/were waiting for that have failed. */
+    unsigned int nrFailed;
+
+    /* Number of substitution goals we are/were waiting for that
+       failed because there are no substituters. */
+    unsigned int nrNoSubstituters;
+
+    /* Number of substitution goals we are/were waiting for that
+       failed because othey had unsubstitutable references. */
+    unsigned int nrIncompleteClosure;
+
+    /* Name of this goal for debugging purposes. */
+    string name;
+
+    /* Whether the goal is finished. */
+    ExitCode exitCode;
+
+    Goal(Worker & worker) : worker(worker)
+    {
+        nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
+        exitCode = ecBusy;
+    }
+
+    virtual ~Goal()
+    {
+        trace("goal destroyed");
+    }
+
+public:
+    virtual void work() = 0;
+
+    void addWaitee(GoalPtr waitee);
+
+    virtual void waiteeDone(GoalPtr waitee, ExitCode result);
+
+    virtual void handleChildOutput(int fd, const string & data)
+    {
+        abort();
+    }
+
+    virtual void handleEOF(int fd)
+    {
+        abort();
+    }
+
+    void trace(const format & f);
+
+    string getName()
+    {
+        return name;
+    }
+
+    ExitCode getExitCode()
+    {
+        return exitCode;
+    }
+
+    /* Cancel the goal.  It should wake up its waiters, get rid of any
+       running child processes that are being monitored by the worker
+       (important!), etc. */
+    virtual void cancel(bool timeout) = 0;
+
+protected:
+    void amDone(ExitCode result);
+};
+
+
+/* A mapping used to remember for each child process to what goal it
+   belongs, and file descriptors for receiving log data and output
+   path creation commands. */
+struct Child
+{
+    WeakGoalPtr goal;
+    set<int> fds;
+    bool respectTimeouts;
+    bool inBuildSlot;
+    time_t lastOutput; /* time we last got output on stdout/stderr */
+    time_t timeStarted;
+};
+
+typedef map<pid_t, Child> Children;
+
+
+/* The worker class. */
+class Worker
+{
+private:
+
+    /* Note: the worker should only have strong pointers to the
+       top-level goals. */
+
+    /* The top-level goals of the worker. */
+    Goals topGoals;
+
+    /* Goals that are ready to do some work. */
+    WeakGoals awake;
+
+    /* Goals waiting for a build slot. */
+    WeakGoals wantingToBuild;
+
+    /* Child processes currently running. */
+    Children children;
+
+    /* Number of build slots occupied.  This includes local builds and
+       substitutions but not remote builds via the build hook. */
+    unsigned int nrLocalBuilds;
+
+    /* Maps used to prevent multiple instantiations of a goal for the
+       same derivation / path. */
+    WeakGoalMap derivationGoals;
+    WeakGoalMap substitutionGoals;
+
+    /* Goals waiting for busy paths to be unlocked. */
+    WeakGoals waitingForAnyGoal;
+
+    /* Goals sleeping for a few seconds (polling a lock). */
+    WeakGoals waitingForAWhile;
+
+    /* Last time the goals in `waitingForAWhile' where woken up. */
+    time_t lastWokenUp;
+
+public:
+
+    /* Set if at least one derivation had a BuildError (i.e. permanent
+       failure). */
+    bool permanentFailure;
+
+    /* Set if at least one derivation had a timeout. */
+    bool timedOut;
+
+    LocalStore & store;
+
+    std::shared_ptr<HookInstance> hook;
+
+    Worker(LocalStore & store);
+    ~Worker();
+
+    /* Make a goal (with caching). */
+    GoalPtr makeDerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
+    GoalPtr makeSubstitutionGoal(const Path & storePath, bool repair = false);
+
+    /* Remove a dead goal. */
+    void removeGoal(GoalPtr goal);
+
+    /* Wake up a goal (i.e., there is something for it to do). */
+    void wakeUp(GoalPtr goal);
+
+    /* Return the number of local build and substitution processes
+       currently running (but not remote builds via the build
+       hook). */
+    unsigned int getNrLocalBuilds();
+
+    /* Registers a running child process.  `inBuildSlot' means that
+       the process counts towards the jobs limit. */
+    void childStarted(GoalPtr goal, pid_t pid,
+        const set<int> & fds, bool inBuildSlot, bool respectTimeouts);
+
+    /* Unregisters a running child process.  `wakeSleepers' should be
+       false if there is no sense in waking up goals that are sleeping
+       because they can't run yet (e.g., there is no free build slot,
+       or the hook would still say `postpone'). */
+    void childTerminated(pid_t pid, bool wakeSleepers = true);
+
+    /* Put `goal' to sleep until a build slot becomes available (which
+       might be right away). */
+    void waitForBuildSlot(GoalPtr goal);
+
+    /* Wait for any goal to finish.  Pretty indiscriminate way to
+       wait for some resource that some other goal is holding. */
+    void waitForAnyGoal(GoalPtr goal);
+
+    /* Wait for a few seconds and then retry this goal.  Used when
+       waiting for a lock held by another process.  This kind of
+       polling is inefficient, but POSIX doesn't really provide a way
+       to wait for multiple locks in the main select() loop. */
+    void waitForAWhile(GoalPtr goal);
+
+    /* Loop until the specified top-level goals have finished. */
+    void run(const Goals & topGoals);
+
+    /* Wait for input to become available. */
+    void waitForInput();
+
+    unsigned int exitStatus();
+};
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+void addToWeakGoals(WeakGoals & goals, GoalPtr p)
+{
+    // FIXME: necessary?
+    foreach (WeakGoals::iterator, i, goals)
+        if (i->lock() == p) return;
+    goals.push_back(p);
+}
+
+
+void Goal::addWaitee(GoalPtr waitee)
+{
+    waitees.insert(waitee);
+    addToWeakGoals(waitee->waiters, shared_from_this());
+}
+
+
+void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
+{
+    assert(waitees.find(waitee) != waitees.end());
+    waitees.erase(waitee);
+
+    trace(format("waitee ‘%1%’ done; %2% left") %
+        waitee->name % waitees.size());
+
+    if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed;
+
+    if (result == ecNoSubstituters) ++nrNoSubstituters;
+
+    if (result == ecIncompleteClosure) ++nrIncompleteClosure;
+
+    if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) {
+
+        /* If we failed and keepGoing is not set, we remove all
+           remaining waitees. */
+        foreach (Goals::iterator, i, waitees) {
+            GoalPtr goal = *i;
+            WeakGoals waiters2;
+            foreach (WeakGoals::iterator, j, goal->waiters)
+                if (j->lock() != shared_from_this()) waiters2.push_back(*j);
+            goal->waiters = waiters2;
+        }
+        waitees.clear();
+
+        worker.wakeUp(shared_from_this());
+    }
+}
+
+
+void Goal::amDone(ExitCode result)
+{
+    trace("done");
+    assert(exitCode == ecBusy);
+    assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
+    exitCode = result;
+    foreach (WeakGoals::iterator, i, waiters) {
+        GoalPtr goal = i->lock();
+        if (goal) goal->waiteeDone(shared_from_this(), result);
+    }
+    waiters.clear();
+    worker.removeGoal(shared_from_this());
+}
+
+
+void Goal::trace(const format & f)
+{
+    debug(format("%1%: %2%") % name % f);
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+/* Common initialisation performed in child processes. */
+static void commonChildInit(Pipe & logPipe)
+{
+    /* Put the child in a separate session (and thus a separate
+       process group) so that it has no controlling terminal (meaning
+       that e.g. ssh cannot open /dev/tty) and it doesn't receive
+       terminal signals. */
+    if (setsid() == -1)
+        throw SysError(format("creating a new session"));
+
+    /* Dup the write side of the logger pipe into stderr. */
+    if (dup2(logPipe.writeSide, STDERR_FILENO) == -1)
+        throw SysError("cannot pipe standard error into log file");
+
+    /* Dup stderr to stdout. */
+    if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1)
+        throw SysError("cannot dup stderr into stdout");
+
+    /* Reroute stdin to /dev/null. */
+    int fdDevNull = open(pathNullDevice.c_str(), O_RDWR);
+    if (fdDevNull == -1)
+        throw SysError(format("cannot open ‘%1%’") % pathNullDevice);
+    if (dup2(fdDevNull, STDIN_FILENO) == -1)
+        throw SysError("cannot dup null device into stdin");
+    close(fdDevNull);
+}
+
+
+/* Convert a string list to an array of char pointers.  Careful: the
+   string list should outlive the array. */
+const char * * strings2CharPtrs(const Strings & ss)
+{
+    const char * * arr = new const char * [ss.size() + 1];
+    const char * * p = arr;
+    foreach (Strings::const_iterator, i, ss) *p++ = i->c_str();
+    *p = 0;
+    return arr;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+class UserLock
+{
+private:
+    /* POSIX locks suck.  If we have a lock on a file, and we open and
+       close that file again (without closing the original file
+       descriptor), we lose the lock.  So we have to be *very* careful
+       not to open a lock file on which we are holding a lock. */
+    static PathSet lockedPaths; /* !!! not thread-safe */
+
+    Path fnUserLock;
+    AutoCloseFD fdUserLock;
+
+    string user;
+    uid_t uid;
+    gid_t gid;
+
+public:
+    UserLock();
+    ~UserLock();
+
+    void acquire();
+    void release();
+
+    void kill();
+
+    string getUser() { return user; }
+    uid_t getUID() { return uid; }
+    uid_t getGID() { return gid; }
+
+    bool enabled() { return uid != 0; }
+
+};
+
+
+PathSet UserLock::lockedPaths;
+
+
+UserLock::UserLock()
+{
+    uid = gid = 0;
+}
+
+
+UserLock::~UserLock()
+{
+    release();
+}
+
+
+void UserLock::acquire()
+{
+    assert(uid == 0);
+
+    assert(settings.buildUsersGroup != "");
+
+    /* Get the members of the build-users-group. */
+    struct group * gr = getgrnam(settings.buildUsersGroup.c_str());
+    if (!gr)
+        throw Error(format("the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+            % settings.buildUsersGroup);
+    gid = gr->gr_gid;
+
+    /* Copy the result of getgrnam. */
+    Strings users;
+    for (char * * p = gr->gr_mem; *p; ++p) {
+        debug(format("found build user ‘%1%’") % *p);
+        users.push_back(*p);
+    }
+
+    if (users.empty())
+        throw Error(format("the build users group ‘%1%’ has no members")
+            % settings.buildUsersGroup);
+
+    /* Find a user account that isn't currently in use for another
+       build. */
+    foreach (Strings::iterator, i, users) {
+        debug(format("trying user ‘%1%’") % *i);
+
+        struct passwd * pw = getpwnam(i->c_str());
+        if (!pw)
+            throw Error(format("the user ‘%1%’ in the group ‘%2%’ does not exist")
+                % *i % settings.buildUsersGroup);
+
+        createDirs(settings.nixStateDir + "/userpool");
+
+        fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
+
+        if (lockedPaths.find(fnUserLock) != lockedPaths.end())
+            /* We already have a lock on this one. */
+            continue;
+
+        AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT, 0600);
+        if (fd == -1)
+            throw SysError(format("opening user lock ‘%1%’") % fnUserLock);
+        closeOnExec(fd);
+
+        if (lockFile(fd, ltWrite, false)) {
+            fdUserLock = fd.borrow();
+            lockedPaths.insert(fnUserLock);
+            user = *i;
+            uid = pw->pw_uid;
+
+            /* Sanity check... */
+            if (uid == getuid() || uid == geteuid())
+                throw Error(format("the Nix user should not be a member of ‘%1%’")
+                    % settings.buildUsersGroup);
+
+            return;
+        }
+    }
+
+    throw Error(format("all build users are currently in use; "
+        "consider creating additional users and adding them to the ‘%1%’ group")
+        % settings.buildUsersGroup);
+}
+
+
+void UserLock::release()
+{
+    if (uid == 0) return;
+    fdUserLock.close(); /* releases lock */
+    assert(lockedPaths.find(fnUserLock) != lockedPaths.end());
+    lockedPaths.erase(fnUserLock);
+    fnUserLock = "";
+    uid = 0;
+}
+
+
+void UserLock::kill()
+{
+    assert(enabled());
+    killUser(uid);
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+struct HookInstance
+{
+    /* Pipes for talking to the build hook. */
+    Pipe toHook;
+
+    /* Pipe for the hook's standard output/error. */
+    Pipe fromHook;
+
+    /* Pipe for the builder's standard output/error. */
+    Pipe builderOut;
+
+    /* The process ID of the hook. */
+    Pid pid;
+
+    HookInstance();
+
+    ~HookInstance();
+};
+
+
+HookInstance::HookInstance()
+{
+    debug("starting build hook");
+
+    Path buildHook = getEnv("NIX_BUILD_HOOK");
+    if (string(buildHook, 0, 1) != "/") buildHook = settings.nixLibexecDir + "/nix/" + buildHook;
+    buildHook = canonPath(buildHook);
+
+    /* Create a pipe to get the output of the child. */
+    fromHook.create();
+
+    /* Create the communication pipes. */
+    toHook.create();
+
+    /* Create a pipe to get the output of the builder. */
+    builderOut.create();
+
+    /* Fork the hook. */
+    pid = startProcess([&]() {
+
+        commonChildInit(fromHook);
+
+        if (chdir("/") == -1) throw SysError("changing into /");
+
+        /* Dup the communication pipes. */
+        if (dup2(toHook.readSide, STDIN_FILENO) == -1)
+            throw SysError("dupping to-hook read side");
+
+        /* Use fd 4 for the builder's stdout/stderr. */
+        if (dup2(builderOut.writeSide, 4) == -1)
+            throw SysError("dupping builder's stdout/stderr");
+
+        execl(buildHook.c_str(), buildHook.c_str(), settings.thisSystem.c_str(),
+            (format("%1%") % settings.maxSilentTime).str().c_str(),
+            (format("%1%") % settings.printBuildTrace).str().c_str(),
+            (format("%1%") % settings.buildTimeout).str().c_str(),
+            NULL);
+
+        throw SysError(format("executing ‘%1%’") % buildHook);
+    });
+
+    pid.setSeparatePG(true);
+    fromHook.writeSide.close();
+    toHook.readSide.close();
+}
+
+
+HookInstance::~HookInstance()
+{
+    try {
+        toHook.writeSide.close();
+        pid.kill(true);
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef map<string, string> HashRewrites;
+
+
+string rewriteHashes(string s, const HashRewrites & rewrites)
+{
+    foreach (HashRewrites::const_iterator, i, rewrites) {
+        assert(i->first.size() == i->second.size());
+        size_t j = 0;
+        while ((j = s.find(i->first, j)) != string::npos) {
+            debug(format("rewriting @ %1%") % j);
+            s.replace(j, i->second.size(), i->second);
+        }
+    }
+    return s;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
+
+class SubstitutionGoal;
+
+class DerivationGoal : public Goal
+{
+private:
+    /* The path of the derivation. */
+    Path drvPath;
+
+    /* The specific outputs that we need to build.  Empty means all of
+       them. */
+    StringSet wantedOutputs;
+
+    /* Whether additional wanted outputs have been added. */
+    bool needRestart;
+
+    /* Whether to retry substituting the outputs after building the
+       inputs. */
+    bool retrySubstitution;
+
+    /* The derivation stored at drvPath. */
+    Derivation drv;
+
+    /* The remainder is state held during the build. */
+
+    /* Locks on the output paths. */
+    PathLocks outputLocks;
+
+    /* All input paths (that is, the union of FS closures of the
+       immediate input paths). */
+    PathSet inputPaths;
+
+    /* Referenceable paths (i.e., input and output paths). */
+    PathSet allPaths;
+
+    /* Outputs that are already valid.  If we're repairing, these are
+       the outputs that are valid *and* not corrupt. */
+    PathSet validPaths;
+
+    /* Outputs that are corrupt or not valid. */
+    PathSet missingPaths;
+
+    /* User selected for running the builder. */
+    UserLock buildUser;
+
+    /* The process ID of the builder. */
+    Pid pid;
+
+    /* The temporary directory. */
+    Path tmpDir;
+
+    /* File descriptor for the log file. */
+    FILE * fLogFile;
+    BZFILE * bzLogFile;
+    AutoCloseFD fdLogFile;
+
+    /* Number of bytes received from the builder's stdout/stderr. */
+    unsigned long logSize;
+
+    /* Pipe for the builder's standard output/error. */
+    Pipe builderOut;
+
+    /* The build hook. */
+    std::shared_ptr<HookInstance> hook;
+
+    /* Whether we're currently doing a chroot build. */
+    bool useChroot;
+
+    Path chrootRootDir;
+
+    /* RAII object to delete the chroot directory. */
+    std::shared_ptr<AutoDelete> autoDelChroot;
+
+    /* All inputs that are regular files. */
+    PathSet regularInputPaths;
+
+    /* Whether this is a fixed-output derivation. */
+    bool fixedOutput;
+
+    typedef void (DerivationGoal::*GoalState)();
+    GoalState state;
+
+    /* Stuff we need to pass to initChild(). */
+    typedef map<Path, Path> DirsInChroot; // maps target path to source path
+    DirsInChroot dirsInChroot;
+    typedef map<string, string> Environment;
+    Environment env;
+
+    /* Hash rewriting. */
+    HashRewrites rewritesToTmp, rewritesFromTmp;
+    typedef map<Path, Path> RedirectedOutputs;
+    RedirectedOutputs redirectedOutputs;
+
+    BuildMode buildMode;
+
+    /* If we're repairing without a chroot, there may be outputs that
+       are valid but corrupt.  So we redirect these outputs to
+       temporary paths. */
+    PathSet redirectedBadOutputs;
+
+    /* Set of inodes seen during calls to canonicalisePathMetaData()
+       for this build's outputs.  This needs to be shared between
+       outputs to allow hard links between outputs. */
+    InodesSeen inodesSeen;
+
+public:
+    DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode = bmNormal);
+    ~DerivationGoal();
+
+    void cancel(bool timeout);
+
+    void work();
+
+    Path getDrvPath()
+    {
+        return drvPath;
+    }
+
+    /* Add wanted outputs to an already existing derivation goal. */
+    void addWantedOutputs(const StringSet & outputs);
+
+private:
+    /* The states. */
+    void init();
+    void haveDerivation();
+    void outputsSubstituted();
+    void closureRepaired();
+    void inputsRealised();
+    void tryToBuild();
+    void buildDone();
+
+    /* Is the build hook willing to perform the build? */
+    HookReply tryBuildHook();
+
+    /* Start building a derivation. */
+    void startBuilder();
+
+    /* Initialise the builder's process. */
+    void initChild();
+
+    friend int childEntry(void *);
+
+    /* Check that the derivation outputs all exist and register them
+       as valid. */
+    void registerOutputs();
+
+    /* Open a log file and a pipe to it. */
+    Path openLogFile();
+
+    /* Close the log file. */
+    void closeLogFile();
+
+    /* Delete the temporary directory, if we have one. */
+    void deleteTmpDir(bool force);
+
+    /* Callback used by the worker to write to the log. */
+    void handleChildOutput(int fd, const string & data);
+    void handleEOF(int fd);
+
+    /* Return the set of (in)valid paths. */
+    PathSet checkPathValidity(bool returnValid, bool checkHash);
+
+    /* Abort the goal if `path' failed to build. */
+    bool pathFailed(const Path & path);
+
+    /* Forcibly kill the child process, if any. */
+    void killChild();
+
+    Path addHashRewrite(const Path & path);
+
+    void repairClosure();
+};
+
+
+DerivationGoal::DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode)
+    : Goal(worker)
+    , wantedOutputs(wantedOutputs)
+    , needRestart(false)
+    , retrySubstitution(false)
+    , fLogFile(0)
+    , bzLogFile(0)
+    , useChroot(false)
+    , buildMode(buildMode)
+{
+    this->drvPath = drvPath;
+    state = &DerivationGoal::init;
+    name = (format("building of ‘%1%’") % drvPath).str();
+    trace("created");
+}
+
+
+DerivationGoal::~DerivationGoal()
+{
+    /* Careful: we should never ever throw an exception from a
+       destructor. */
+    try {
+        killChild();
+        deleteTmpDir(false);
+        closeLogFile();
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void DerivationGoal::killChild()
+{
+    if (pid != -1) {
+        worker.childTerminated(pid);
+
+        if (buildUser.enabled()) {
+            /* If we're using a build user, then there is a tricky
+               race condition: if we kill the build user before the
+               child has done its setuid() to the build user uid, then
+               it won't be killed, and we'll potentially lock up in
+               pid.wait().  So also send a conventional kill to the
+               child. */
+            ::kill(-pid, SIGKILL); /* ignore the result */
+            buildUser.kill();
+            pid.wait(true);
+        } else
+            pid.kill();
+
+        assert(pid == -1);
+    }
+
+    hook.reset();
+}
+
+
+void DerivationGoal::cancel(bool timeout)
+{
+    if (settings.printBuildTrace && timeout)
+        printMsg(lvlError, format("@ build-failed %1% - timeout") % drvPath);
+    killChild();
+    amDone(ecFailed);
+}
+
+
+void DerivationGoal::work()
+{
+    (this->*state)();
+}
+
+
+void DerivationGoal::addWantedOutputs(const StringSet & outputs)
+{
+    /* If we already want all outputs, there is nothing to do. */
+    if (wantedOutputs.empty()) return;
+
+    if (outputs.empty()) {
+        wantedOutputs.clear();
+        needRestart = true;
+    } else
+        foreach (StringSet::const_iterator, i, outputs)
+            if (wantedOutputs.find(*i) == wantedOutputs.end()) {
+                wantedOutputs.insert(*i);
+                needRestart = true;
+            }
+}
+
+
+void DerivationGoal::init()
+{
+    trace("init");
+
+    if (settings.readOnlyMode)
+        throw Error(format("cannot build derivation ‘%1%’ - no write access to the Nix store") % drvPath);
+
+    /* The first thing to do is to make sure that the derivation
+       exists.  If it doesn't, it may be created through a
+       substitute. */
+    addWaitee(worker.makeSubstitutionGoal(drvPath));
+
+    state = &DerivationGoal::haveDerivation;
+}
+
+
+void DerivationGoal::haveDerivation()
+{
+    trace("loading derivation");
+
+    if (nrFailed != 0) {
+        printMsg(lvlError, format("cannot build missing derivation ‘%1%’") % drvPath);
+        amDone(ecFailed);
+        return;
+    }
+
+    /* `drvPath' should already be a root, but let's be on the safe
+       side: if the user forgot to make it a root, we wouldn't want
+       things being garbage collected while we're busy. */
+    worker.store.addTempRoot(drvPath);
+
+    assert(worker.store.isValidPath(drvPath));
+
+    /* Get the derivation. */
+    drv = derivationFromPath(worker.store, drvPath);
+
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        worker.store.addTempRoot(i->second.path);
+
+    /* Check what outputs paths are not already valid. */
+    PathSet invalidOutputs = checkPathValidity(false, buildMode == bmRepair);
+
+    /* If they are all valid, then we're done. */
+    if (invalidOutputs.size() == 0 && buildMode == bmNormal) {
+        amDone(ecSuccess);
+        return;
+    }
+
+    /* Check whether any output previously failed to build.  If so,
+       don't bother. */
+    foreach (PathSet::iterator, i, invalidOutputs)
+        if (pathFailed(*i)) return;
+
+    /* We are first going to try to create the invalid output paths
+       through substitutes.  If that doesn't work, we'll build
+       them. */
+    if (settings.useSubstitutes && !willBuildLocally(drv))
+        foreach (PathSet::iterator, i, invalidOutputs)
+            addWaitee(worker.makeSubstitutionGoal(*i, buildMode == bmRepair));
+
+    if (waitees.empty()) /* to prevent hang (no wake-up event) */
+        outputsSubstituted();
+    else
+        state = &DerivationGoal::outputsSubstituted;
+}
+
+
+void DerivationGoal::outputsSubstituted()
+{
+    trace("all outputs substituted (maybe)");
+
+    if (nrFailed > 0 && nrFailed > nrNoSubstituters + nrIncompleteClosure && !settings.tryFallback)
+        throw Error(format("some substitutes for the outputs of derivation ‘%1%’ failed (usually happens due to networking issues); try ‘--fallback’ to build derivation from source ") % drvPath);
+
+    /*  If the substitutes form an incomplete closure, then we should
+        build the dependencies of this derivation, but after that, we
+        can still use the substitutes for this derivation itself. */
+    if (nrIncompleteClosure > 0 && !retrySubstitution) retrySubstitution = true;
+
+    nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
+
+    if (needRestart) {
+        needRestart = false;
+        haveDerivation();
+        return;
+    }
+
+    unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size();
+    if (buildMode == bmNormal && nrInvalid == 0) {
+        amDone(ecSuccess);
+        return;
+    }
+    if (buildMode == bmRepair && nrInvalid == 0) {
+        repairClosure();
+        return;
+    }
+    if (buildMode == bmCheck && nrInvalid > 0)
+        throw Error(format("some outputs of ‘%1%’ are not valid, so checking is not possible") % drvPath);
+
+    /* Otherwise, at least one of the output paths could not be
+       produced using a substitute.  So we have to build instead. */
+
+    /* Make sure checkPathValidity() from now on checks all
+       outputs. */
+    wantedOutputs = PathSet();
+
+    /* The inputs must be built before we can build this goal. */
+    foreach (DerivationInputs::iterator, i, drv.inputDrvs)
+        addWaitee(worker.makeDerivationGoal(i->first, i->second, buildMode == bmRepair ? bmRepair : bmNormal));
+
+    foreach (PathSet::iterator, i, drv.inputSrcs)
+        addWaitee(worker.makeSubstitutionGoal(*i));
+
+    if (waitees.empty()) /* to prevent hang (no wake-up event) */
+        inputsRealised();
+    else
+        state = &DerivationGoal::inputsRealised;
+}
+
+
+void DerivationGoal::repairClosure()
+{
+    /* If we're repairing, we now know that our own outputs are valid.
+       Now check whether the other paths in the outputs closure are
+       good.  If not, then start derivation goals for the derivations
+       that produced those outputs. */
+
+    /* Get the output closure. */
+    PathSet outputClosure;
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        computeFSClosure(worker.store, i->second.path, outputClosure);
+
+    /* Filter out our own outputs (which we have already checked). */
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        outputClosure.erase(i->second.path);
+
+    /* Get all dependencies of this derivation so that we know which
+       derivation is responsible for which path in the output
+       closure. */
+    PathSet inputClosure;
+    computeFSClosure(worker.store, drvPath, inputClosure);
+    std::map<Path, Path> outputsToDrv;
+    foreach (PathSet::iterator, i, inputClosure)
+        if (isDerivation(*i)) {
+            Derivation drv = derivationFromPath(worker.store, *i);
+            foreach (DerivationOutputs::iterator, j, drv.outputs)
+                outputsToDrv[j->second.path] = *i;
+        }
+
+    /* Check each path (slow!). */
+    PathSet broken;
+    foreach (PathSet::iterator, i, outputClosure) {
+        if (worker.store.pathContentsGood(*i)) continue;
+        printMsg(lvlError, format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % *i % drvPath);
+        Path drvPath2 = outputsToDrv[*i];
+        if (drvPath2 == "")
+            addWaitee(worker.makeSubstitutionGoal(*i, true));
+        else
+            addWaitee(worker.makeDerivationGoal(drvPath2, PathSet(), bmRepair));
+    }
+
+    if (waitees.empty()) {
+        amDone(ecSuccess);
+        return;
+    }
+
+    state = &DerivationGoal::closureRepaired;
+}
+
+
+void DerivationGoal::closureRepaired()
+{
+    trace("closure repaired");
+    if (nrFailed > 0)
+        throw Error(format("some paths in the output closure of derivation ‘%1%’ could not be repaired") % drvPath);
+    amDone(ecSuccess);
+}
+
+
+void DerivationGoal::inputsRealised()
+{
+    trace("all inputs realised");
+
+    if (nrFailed != 0) {
+        printMsg(lvlError,
+            format("cannot build derivation ‘%1%’: %2% dependencies couldn't be built")
+            % drvPath % nrFailed);
+        amDone(ecFailed);
+        return;
+    }
+
+    if (retrySubstitution) {
+        haveDerivation();
+        return;
+    }
+
+    /* Gather information necessary for computing the closure and/or
+       running the build hook. */
+
+    /* The outputs are referenceable paths. */
+    foreach (DerivationOutputs::iterator, i, drv.outputs) {
+        debug(format("building path ‘%1%’") % i->second.path);
+        allPaths.insert(i->second.path);
+    }
+
+    /* Determine the full set of input paths. */
+
+    /* First, the input derivations. */
+    foreach (DerivationInputs::iterator, i, drv.inputDrvs) {
+        /* Add the relevant output closures of the input derivation
+           `*i' as input paths.  Only add the closures of output paths
+           that are specified as inputs. */
+        assert(worker.store.isValidPath(i->first));
+        Derivation inDrv = derivationFromPath(worker.store, i->first);
+        foreach (StringSet::iterator, j, i->second)
+            if (inDrv.outputs.find(*j) != inDrv.outputs.end())
+                computeFSClosure(worker.store, inDrv.outputs[*j].path, inputPaths);
+            else
+                throw Error(
+                    format("derivation ‘%1%’ requires non-existent output ‘%2%’ from input derivation ‘%3%’")
+                    % drvPath % *j % i->first);
+    }
+
+    /* Second, the input sources. */
+    foreach (PathSet::iterator, i, drv.inputSrcs)
+        computeFSClosure(worker.store, *i, inputPaths);
+
+    debug(format("added input paths %1%") % showPaths(inputPaths));
+
+    allPaths.insert(inputPaths.begin(), inputPaths.end());
+
+    /* Is this a fixed-output derivation? */
+    fixedOutput = true;
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        if (i->second.hash == "") fixedOutput = false;
+
+    /* Okay, try to build.  Note that here we don't wait for a build
+       slot to become available, since we don't need one if there is a
+       build hook. */
+    state = &DerivationGoal::tryToBuild;
+    worker.wakeUp(shared_from_this());
+}
+
+
+PathSet outputPaths(const DerivationOutputs & outputs)
+{
+    PathSet paths;
+    foreach (DerivationOutputs::const_iterator, i, outputs)
+        paths.insert(i->second.path);
+    return paths;
+}
+
+
+static string get(const StringPairs & map, const string & key)
+{
+    StringPairs::const_iterator i = map.find(key);
+    return i == map.end() ? (string) "" : i->second;
+}
+
+
+static bool canBuildLocally(const string & platform)
+{
+    return platform == settings.thisSystem
+#ifdef CAN_DO_LINUX32_BUILDS
+        || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux")
+#endif
+        ;
+}
+
+
+bool willBuildLocally(const Derivation & drv)
+{
+    return get(drv.env, "preferLocalBuild") == "1" && canBuildLocally(drv.platform);
+}
+
+
+void DerivationGoal::tryToBuild()
+{
+    trace("trying to build");
+
+    /* Check for the possibility that some other goal in this process
+       has locked the output since we checked in haveDerivation().
+       (It can't happen between here and the lockPaths() call below
+       because we're not allowing multi-threading.)  If so, put this
+       goal to sleep until another goal finishes, then try again. */
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        if (pathIsLockedByMe(i->second.path)) {
+            debug(format("putting derivation ‘%1%’ to sleep because ‘%2%’ is locked by another goal")
+                % drvPath % i->second.path);
+            worker.waitForAnyGoal(shared_from_this());
+            return;
+        }
+
+    /* Obtain locks on all output paths.  The locks are automatically
+       released when we exit this function or Nix crashes.  If we
+       can't acquire the lock, then continue; hopefully some other
+       goal can start a build, and if not, the main loop will sleep a
+       few seconds and then retry this goal. */
+    if (!outputLocks.lockPaths(outputPaths(drv.outputs), "", false)) {
+        worker.waitForAWhile(shared_from_this());
+        return;
+    }
+
+    /* Now check again whether the outputs are valid.  This is because
+       another process may have started building in parallel.  After
+       it has finished and released the locks, we can (and should)
+       reuse its results.  (Strictly speaking the first check can be
+       omitted, but that would be less efficient.)  Note that since we
+       now hold the locks on the output paths, no other process can
+       build this derivation, so no further checks are necessary. */
+    validPaths = checkPathValidity(true, buildMode == bmRepair);
+    assert(buildMode != bmCheck || validPaths.size() == drv.outputs.size());
+    if (buildMode != bmCheck && validPaths.size() == drv.outputs.size()) {
+        debug(format("skipping build of derivation ‘%1%’, someone beat us to it") % drvPath);
+        outputLocks.setDeletion(true);
+        amDone(ecSuccess);
+        return;
+    }
+
+    missingPaths = outputPaths(drv.outputs);
+    if (buildMode != bmCheck)
+        foreach (PathSet::iterator, i, validPaths) missingPaths.erase(*i);
+
+    /* If any of the outputs already exist but are not valid, delete
+       them. */
+    foreach (DerivationOutputs::iterator, i, drv.outputs) {
+        Path path = i->second.path;
+        if (worker.store.isValidPath(path)) continue;
+        if (!pathExists(path)) continue;
+        debug(format("removing invalid path ‘%1%’") % path);
+        deletePath(path);
+    }
+
+    /* Check again whether any output previously failed to build,
+       because some other process may have tried and failed before we
+       acquired the lock. */
+    foreach (DerivationOutputs::iterator, i, drv.outputs)
+        if (pathFailed(i->second.path)) return;
+
+    /* Don't do a remote build if the derivation has the attribute
+       `preferLocalBuild' set.  Also, check and repair modes are only
+       supported for local builds. */
+    bool buildLocally = buildMode != bmNormal || willBuildLocally(drv);
+
+    /* Is the build hook willing to accept this job? */
+    if (!buildLocally) {
+        switch (tryBuildHook()) {
+            case rpAccept:
+                /* Yes, it has started doing so.  Wait until we get
+                   EOF from the hook. */
+                state = &DerivationGoal::buildDone;
+                return;
+            case rpPostpone:
+                /* Not now; wait until at least one child finishes or
+                   the wake-up timeout expires. */
+                worker.waitForAWhile(shared_from_this());
+                outputLocks.unlock();
+                return;
+            case rpDecline:
+                /* We should do it ourselves. */
+                break;
+        }
+    }
+
+    /* Make sure that we are allowed to start a build.  If this
+       derivation prefers to be done locally, do it even if
+       maxBuildJobs is 0. */
+    unsigned int curBuilds = worker.getNrLocalBuilds();
+    if (curBuilds >= settings.maxBuildJobs && !(buildLocally && curBuilds == 0)) {
+        worker.waitForBuildSlot(shared_from_this());
+        outputLocks.unlock();
+        return;
+    }
+
+    try {
+
+        /* Okay, we have to build. */
+        startBuilder();
+
+    } catch (BuildError & e) {
+        printMsg(lvlError, e.msg());
+        outputLocks.unlock();
+        buildUser.release();
+        if (settings.printBuildTrace)
+            printMsg(lvlError, format("@ build-failed %1% - %2% %3%")
+                % drvPath % 0 % e.msg());
+        worker.permanentFailure = true;
+        amDone(ecFailed);
+        return;
+    }
+
+    /* This state will be reached when we get EOF on the child's
+       log pipe. */
+    state = &DerivationGoal::buildDone;
+}
+
+
+void replaceValidPath(const Path & storePath, const Path tmpPath)
+{
+    /* We can't atomically replace storePath (the original) with
+       tmpPath (the replacement), so we have to move it out of the
+       way first.  We'd better not be interrupted here, because if
+       we're repairing (say) Glibc, we end up with a broken system. */
+    Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % rand()).str();
+    if (pathExists(storePath))
+        rename(storePath.c_str(), oldPath.c_str());
+    if (rename(tmpPath.c_str(), storePath.c_str()) == -1)
+        throw SysError(format("moving ‘%1%’ to ‘%2%’") % tmpPath % storePath);
+    if (pathExists(oldPath))
+        deletePath(oldPath);
+}
+
+
+void DerivationGoal::buildDone()
+{
+    trace("build done");
+
+    /* Since we got an EOF on the logger pipe, the builder is presumed
+       to have terminated.  In fact, the builder could also have
+       simply have closed its end of the pipe --- just don't do that
+       :-) */
+    int status;
+    pid_t savedPid;
+    if (hook) {
+        savedPid = hook->pid;
+        status = hook->pid.wait(true);
+    } else {
+        /* !!! this could block! security problem! solution: kill the
+           child */
+        savedPid = pid;
+        status = pid.wait(true);
+    }
+
+    debug(format("builder process for ‘%1%’ finished") % drvPath);
+
+    /* So the child is gone now. */
+    worker.childTerminated(savedPid);
+
+    /* Close the read side of the logger pipe. */
+    if (hook) {
+        hook->builderOut.readSide.close();
+        hook->fromHook.readSide.close();
+    }
+    else builderOut.readSide.close();
+
+    /* Close the log file. */
+    closeLogFile();
+
+    /* When running under a build user, make sure that all processes
+       running under that uid are gone.  This is to prevent a
+       malicious user from leaving behind a process that keeps files
+       open and modifies them after they have been chown'ed to
+       root. */
+    if (buildUser.enabled()) buildUser.kill();
+
+    bool diskFull = false;
+
+    try {
+
+        /* Check the exit status. */
+        if (!statusOk(status)) {
+
+            /* Heuristically check whether the build failure may have
+               been caused by a disk full condition.  We have no way
+               of knowing whether the build actually got an ENOSPC.
+               So instead, check if the disk is (nearly) full now.  If
+               so, we don't mark this build as a permanent failure. */
+#if HAVE_STATVFS
+            unsigned long long required = 8ULL * 1024 * 1024; // FIXME: make configurable
+            struct statvfs st;
+            if (statvfs(settings.nixStore.c_str(), &st) == 0 &&
+                (unsigned long long) st.f_bavail * st.f_bsize < required)
+                diskFull = true;
+            if (statvfs(tmpDir.c_str(), &st) == 0 &&
+                (unsigned long long) st.f_bavail * st.f_bsize < required)
+                diskFull = true;
+#endif
+
+            deleteTmpDir(false);
+
+            /* Move paths out of the chroot for easier debugging of
+               build failures. */
+            if (useChroot && buildMode == bmNormal)
+                foreach (PathSet::iterator, i, missingPaths)
+                    if (pathExists(chrootRootDir + *i))
+                        rename((chrootRootDir + *i).c_str(), i->c_str());
+
+            if (diskFull)
+                printMsg(lvlError, "note: build failure may have been caused by lack of free disk space");
+
+            throw BuildError(format("builder for ‘%1%’ %2%")
+                % drvPath % statusToString(status));
+        }
+
+        /* Compute the FS closure of the outputs and register them as
+           being valid. */
+        registerOutputs();
+
+        if (buildMode == bmCheck) {
+            amDone(ecSuccess);
+            return;
+        }
+
+        /* Delete unused redirected outputs (when doing hash rewriting). */
+        foreach (RedirectedOutputs::iterator, i, redirectedOutputs)
+            if (pathExists(i->second)) deletePath(i->second);
+
+        /* Delete the chroot (if we were using one). */
+        autoDelChroot.reset(); /* this runs the destructor */
+
+        deleteTmpDir(true);
+
+        /* It is now safe to delete the lock files, since all future
+           lockers will see that the output paths are valid; they will
+           not create new lock files with the same names as the old
+           (unlinked) lock files. */
+        outputLocks.setDeletion(true);
+        outputLocks.unlock();
+
+    } catch (BuildError & e) {
+        if (!hook)
+            printMsg(lvlError, e.msg());
+        outputLocks.unlock();
+        buildUser.release();
+
+        if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101) {
+            if (settings.printBuildTrace)
+                printMsg(lvlError, format("@ build-failed %1% - timeout") % drvPath);
+            worker.timedOut = true;
+        }
+
+        else if (hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100)) {
+            if (settings.printBuildTrace)
+                printMsg(lvlError, format("@ hook-failed %1% - %2% %3%")
+                    % drvPath % status % e.msg());
+        }
+
+        else {
+            if (settings.printBuildTrace)
+                printMsg(lvlError, format("@ build-failed %1% - %2% %3%")
+                    % drvPath % 1 % e.msg());
+            worker.permanentFailure = !fixedOutput && !diskFull;
+
+            /* Register the outputs of this build as "failed" so we
+               won't try to build them again (negative caching).
+               However, don't do this for fixed-output derivations,
+               since they're likely to fail for transient reasons
+               (e.g., fetchurl not being able to access the network).
+               Hook errors (like communication problems with the
+               remote machine) shouldn't be cached either. */
+            if (settings.cacheFailure && !fixedOutput && !diskFull)
+                foreach (DerivationOutputs::iterator, i, drv.outputs)
+                    worker.store.registerFailedPath(i->second.path);
+        }
+
+        amDone(ecFailed);
+        return;
+    }
+
+    /* Release the build user, if applicable. */
+    buildUser.release();
+
+    if (settings.printBuildTrace)
+        printMsg(lvlError, format("@ build-succeeded %1% -") % drvPath);
+
+    amDone(ecSuccess);
+}
+
+
+HookReply DerivationGoal::tryBuildHook()
+{
+    if (!settings.useBuildHook || getEnv("NIX_BUILD_HOOK") == "") return rpDecline;
+
+    if (!worker.hook)
+        worker.hook = std::shared_ptr<HookInstance>(new HookInstance);
+
+    /* Tell the hook about system features (beyond the system type)
+       required from the build machine.  (The hook could parse the
+       drv file itself, but this is easier.) */
+    Strings features = tokenizeString<Strings>(get(drv.env, "requiredSystemFeatures"));
+    foreach (Strings::iterator, i, features) checkStoreName(*i); /* !!! abuse */
+
+    /* Send the request to the hook. */
+    writeLine(worker.hook->toHook.writeSide, (format("%1% %2% %3% %4%")
+        % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
+        % drv.platform % drvPath % concatStringsSep(",", features)).str());
+
+    /* Read the first line of input, which should be a word indicating
+       whether the hook wishes to perform the build. */
+    string reply;
+    while (true) {
+        string s = readLine(worker.hook->fromHook.readSide);
+        if (string(s, 0, 2) == "# ") {
+            reply = string(s, 2);
+            break;
+        }
+        s += "\n";
+        writeToStderr(s);
+    }
+
+    debug(format("hook reply is ‘%1%’") % reply);
+
+    if (reply == "decline" || reply == "postpone")
+        return reply == "decline" ? rpDecline : rpPostpone;
+    else if (reply != "accept")
+        throw Error(format("bad hook reply ‘%1%’") % reply);
+
+    printMsg(lvlTalkative, format("using hook to build path(s) %1%") % showPaths(missingPaths));
+
+    hook = worker.hook;
+    worker.hook.reset();
+
+    /* Tell the hook all the inputs that have to be copied to the
+       remote system.  This unfortunately has to contain the entire
+       derivation closure to ensure that the validity invariant holds
+       on the remote system.  (I.e., it's unfortunate that we have to
+       list it since the remote system *probably* already has it.) */
+    PathSet allInputs;
+    allInputs.insert(inputPaths.begin(), inputPaths.end());
+    computeFSClosure(worker.store, drvPath, allInputs);
+
+    string s;
+    foreach (PathSet::iterator, i, allInputs) { s += *i; s += ' '; }
+    writeLine(hook->toHook.writeSide, s);
+
+    /* Tell the hooks the missing outputs that have to be copied back
+       from the remote system. */
+    s = "";
+    foreach (PathSet::iterator, i, missingPaths) { s += *i; s += ' '; }
+    writeLine(hook->toHook.writeSide, s);
+
+    hook->toHook.writeSide.close();
+
+    /* Create the log file and pipe. */
+    Path logFile = openLogFile();
+
+    set<int> fds;
+    fds.insert(hook->fromHook.readSide);
+    fds.insert(hook->builderOut.readSide);
+    worker.childStarted(shared_from_this(), hook->pid, fds, false, false);
+
+    if (settings.printBuildTrace)
+        printMsg(lvlError, format("@ build-started %1% - %2% %3%")
+            % drvPath % drv.platform % logFile);
+
+    return rpAccept;
+}
+
+
+void chmod_(const Path & path, mode_t mode)
+{
+    if (chmod(path.c_str(), mode) == -1)
+        throw SysError(format("setting permissions on ‘%1%’") % path);
+}
+
+
+int childEntry(void * arg)
+{
+    ((DerivationGoal *) arg)->initChild();
+    return 1;
+}
+
+
+void DerivationGoal::startBuilder()
+{
+    startNest(nest, lvlInfo, format(
+            buildMode == bmRepair ? "repairing path(s) %1%" :
+            buildMode == bmCheck ? "checking path(s) %1%" :
+            "building path(s) %1%") % showPaths(missingPaths));
+
+    /* Right platform? */
+    if (!canBuildLocally(drv.platform)) {
+        if (settings.printBuildTrace)
+            printMsg(lvlError, format("@ unsupported-platform %1% %2%") % drvPath % drv.platform);
+        throw Error(
+            format("a ‘%1%’ is required to build ‘%3%’, but I am a ‘%2%’")
+            % drv.platform % settings.thisSystem % drvPath);
+    }
+
+    /* Construct the environment passed to the builder. */
+
+    /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
+       PATH is not set.  We don't want this, so we fill it in with some dummy
+       value. */
+    env["PATH"] = "/path-not-set";
+
+    /* Set HOME to a non-existing path to prevent certain programs from using
+       /etc/passwd (or NIS, or whatever) to locate the home directory (for
+       example, wget looks for ~/.wgetrc).  I.e., these tools use /etc/passwd
+       if HOME is not set, but they will just assume that the settings file
+       they are looking for does not exist if HOME is set but points to some
+       non-existing path. */
+    Path homeDir = "/homeless-shelter";
+    env["HOME"] = homeDir;
+
+    /* Tell the builder where the Nix store is.  Usually they
+       shouldn't care, but this is useful for purity checking (e.g.,
+       the compiler or linker might only want to accept paths to files
+       in the store or in the build directory). */
+    env["NIX_STORE"] = settings.nixStore;
+
+    /* The maximum number of cores to utilize for parallel building. */
+    env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str();
+
+    /* Add all bindings specified in the derivation. */
+    foreach (StringPairs::iterator, i, drv.env)
+        env[i->first] = i->second;
+
+    /* Create a temporary directory where the build will take
+       place. */
+    tmpDir = createTempDir("", "nix-build-" + storePathToName(drvPath), false, false, 0700);
+
+    /* For convenience, set an environment pointing to the top build
+       directory. */
+    env["NIX_BUILD_TOP"] = tmpDir;
+
+    /* Also set TMPDIR and variants to point to this directory. */
+    env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDir;
+
+    /* Explicitly set PWD to prevent problems with chroot builds.  In
+       particular, dietlibc cannot figure out the cwd because the
+       inode of the current directory doesn't appear in .. (because
+       getdents returns the inode of the mount point). */
+    env["PWD"] = tmpDir;
+
+    /* Compatibility hack with Nix <= 0.7: if this is a fixed-output
+       derivation, tell the builder, so that for instance `fetchurl'
+       can skip checking the output.  On older Nixes, this environment
+       variable won't be set, so `fetchurl' will do the check. */
+    if (fixedOutput) env["NIX_OUTPUT_CHECKED"] = "1";
+
+    /* *Only* if this is a fixed-output derivation, propagate the
+       values of the environment variables specified in the
+       `impureEnvVars' attribute to the builder.  This allows for
+       instance environment variables for proxy configuration such as
+       `http_proxy' to be easily passed to downloaders like
+       `fetchurl'.  Passing such environment variables from the caller
+       to the builder is generally impure, but the output of
+       fixed-output derivations is by definition pure (since we
+       already know the cryptographic hash of the output). */
+    if (fixedOutput) {
+        Strings varNames = tokenizeString<Strings>(get(drv.env, "impureEnvVars"));
+        foreach (Strings::iterator, i, varNames) env[*i] = getEnv(*i);
+    }
+
+    /* The `exportReferencesGraph' feature allows the references graph
+       to be passed to a builder.  This attribute should be a list of
+       pairs [name1 path1 name2 path2 ...].  The references graph of
+       each `pathN' will be stored in a text file `nameN' in the
+       temporary build directory.  The text files have the format used
+       by `nix-store --register-validity'.  However, the deriver
+       fields are left empty. */
+    string s = get(drv.env, "exportReferencesGraph");
+    Strings ss = tokenizeString<Strings>(s);
+    if (ss.size() % 2 != 0)
+        throw BuildError(format("odd number of tokens in ‘exportReferencesGraph’: ‘%1%’") % s);
+    for (Strings::iterator i = ss.begin(); i != ss.end(); ) {
+        string fileName = *i++;
+        checkStoreName(fileName); /* !!! abuse of this function */
+
+        /* Check that the store path is valid. */
+        Path storePath = *i++;
+        if (!isInStore(storePath))
+            throw BuildError(format("‘exportReferencesGraph’ contains a non-store path ‘%1%’")
+                % storePath);
+        storePath = toStorePath(storePath);
+        if (!worker.store.isValidPath(storePath))
+            throw BuildError(format("‘exportReferencesGraph’ contains an invalid path ‘%1%’")
+                % storePath);
+
+        /* If there are derivations in the graph, then include their
+           outputs as well.  This is useful if you want to do things
+           like passing all build-time dependencies of some path to a
+           derivation that builds a NixOS DVD image. */
+        PathSet paths, paths2;
+        computeFSClosure(worker.store, storePath, paths);
+        paths2 = paths;
+
+        foreach (PathSet::iterator, j, paths2) {
+            if (isDerivation(*j)) {
+                Derivation drv = derivationFromPath(worker.store, *j);
+                foreach (DerivationOutputs::iterator, k, drv.outputs)
+                    computeFSClosure(worker.store, k->second.path, paths);
+            }
+        }
+
+        /* Write closure info to `fileName'. */
+        writeFile(tmpDir + "/" + fileName,
+            worker.store.makeValidityRegistration(paths, false, false));
+    }
+
+
+    /* If `build-users-group' is not empty, then we have to build as
+       one of the members of that group. */
+    if (settings.buildUsersGroup != "") {
+        buildUser.acquire();
+        assert(buildUser.getUID() != 0);
+        assert(buildUser.getGID() != 0);
+
+        /* Make sure that no other processes are executing under this
+           uid. */
+        buildUser.kill();
+
+        /* Change ownership of the temporary build directory. */
+        if (chown(tmpDir.c_str(), buildUser.getUID(), buildUser.getGID()) == -1)
+            throw SysError(format("cannot change ownership of ‘%1%’") % tmpDir);
+
+        /* Check that the Nix store has the appropriate permissions,
+           i.e., owned by root and mode 1775 (sticky bit on so that
+           the builder can create its output but not mess with the
+           outputs of other processes). */
+        struct stat st;
+        if (stat(settings.nixStore.c_str(), &st) == -1)
+            throw SysError(format("cannot stat ‘%1%’") % settings.nixStore);
+        if (!(st.st_mode & S_ISVTX) ||
+            ((st.st_mode & S_IRWXG) != S_IRWXG) ||
+            (st.st_gid != buildUser.getGID()))
+            throw Error(format(
+                "builder does not have write permission to ‘%2%’; "
+                "try ‘chgrp %1% %2%; chmod 1775 %2%’")
+                % buildUser.getGID() % settings.nixStore);
+    }
+
+
+    /* Are we doing a chroot build?  Note that fixed-output
+       derivations are never done in a chroot, mainly so that
+       functions like fetchurl (which needs a proper /etc/resolv.conf)
+       work properly.  Purity checking for fixed-output derivations
+       is somewhat pointless anyway. */
+    useChroot = settings.useChroot;
+
+    if (fixedOutput) useChroot = false;
+
+    /* Hack to allow derivations to disable chroot builds. */
+    if (get(drv.env, "__noChroot") == "1") useChroot = false;
+
+    if (useChroot) {
+#if CHROOT_ENABLED
+        /* Create a temporary directory in which we set up the chroot
+           environment using bind-mounts.  We put it in the Nix store
+           to ensure that we can create hard-links to non-directory
+           inputs in the fake Nix store in the chroot (see below). */
+        chrootRootDir = drvPath + ".chroot";
+        if (pathExists(chrootRootDir)) deletePath(chrootRootDir);
+
+        /* Clean up the chroot directory automatically. */
+        autoDelChroot = std::shared_ptr<AutoDelete>(new AutoDelete(chrootRootDir));
+
+        printMsg(lvlChatty, format("setting up chroot environment in ‘%1%’") % chrootRootDir);
+
+        /* Create a writable /tmp in the chroot.  Many builders need
+           this.  (Of course they should really respect $TMPDIR
+           instead.) */
+        Path chrootTmpDir = chrootRootDir + "/tmp";
+        createDirs(chrootTmpDir);
+        chmod_(chrootTmpDir, 01777);
+
+        /* Create a /etc/passwd with entries for the build user and the
+           nobody account.  The latter is kind of a hack to support
+           Samba-in-QEMU. */
+        createDirs(chrootRootDir + "/etc");
+
+        writeFile(chrootRootDir + "/etc/passwd",
+            (format(
+                "nixbld:x:%1%:%2%:Nix build user:/:/noshell\n"
+                "nobody:x:65534:65534:Nobody:/:/noshell\n")
+                % (buildUser.enabled() ? buildUser.getUID() : getuid())
+                % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+
+        /* Declare the build user's group so that programs get a consistent
+           view of the system (e.g., "id -gn"). */
+        writeFile(chrootRootDir + "/etc/group",
+            (format("nixbld:!:%1%:\n")
+                % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+
+        /* Create /etc/hosts with localhost entry. */
+        writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
+
+        /* Bind-mount a user-configurable set of directories from the
+           host file system. */
+        PathSet dirs = tokenizeString<StringSet>(settings.get("build-chroot-dirs", DEFAULT_CHROOT_DIRS));
+        PathSet dirs2 = tokenizeString<StringSet>(settings.get("build-extra-chroot-dirs", ""));
+        dirs.insert(dirs2.begin(), dirs2.end());
+        for (auto & i : dirs) {
+            size_t p = i.find('=');
+            if (p == string::npos)
+                dirsInChroot[i] = i;
+            else
+                dirsInChroot[string(i, 0, p)] = string(i, p + 1);
+        }
+        dirsInChroot[tmpDir] = tmpDir;
+
+        /* Make the closure of the inputs available in the chroot,
+           rather than the whole Nix store.  This prevents any access
+           to undeclared dependencies.  Directories are bind-mounted,
+           while other inputs are hard-linked (since only directories
+           can be bind-mounted).  !!! As an extra security
+           precaution, make the fake Nix store only writable by the
+           build user. */
+        createDirs(chrootRootDir + settings.nixStore);
+        chmod_(chrootRootDir + settings.nixStore, 01777);
+
+        foreach (PathSet::iterator, i, inputPaths) {
+            struct stat st;
+            if (lstat(i->c_str(), &st))
+                throw SysError(format("getting attributes of path ‘%1%’") % *i);
+            if (S_ISDIR(st.st_mode))
+                dirsInChroot[*i] = *i;
+            else {
+                Path p = chrootRootDir + *i;
+                if (link(i->c_str(), p.c_str()) == -1) {
+                    /* Hard-linking fails if we exceed the maximum
+                       link count on a file (e.g. 32000 of ext3),
+                       which is quite possible after a `nix-store
+                       --optimise'. */
+                    if (errno != EMLINK)
+                        throw SysError(format("linking ‘%1%’ to ‘%2%’") % p % *i);
+                    StringSink sink;
+                    dumpPath(*i, sink);
+                    StringSource source(sink.s);
+                    restorePath(p, source);
+                }
+
+                regularInputPaths.insert(*i);
+            }
+        }
+
+        /* If we're repairing or checking, it's possible that we're
+           rebuilding a path that is in settings.dirsInChroot
+           (typically the dependencies of /bin/sh).  Throw them
+           out. */
+        if (buildMode != bmNormal)
+            foreach (DerivationOutputs::iterator, i, drv.outputs)
+                dirsInChroot.erase(i->second.path);
+
+#else
+        throw Error("chroot builds are not supported on this platform");
+#endif
+    }
+
+    else {
+
+        if (pathExists(homeDir))
+            throw Error(format("directory ‘%1%’ exists; please remove it") % homeDir);
+
+        /* We're not doing a chroot build, but we have some valid
+           output paths.  Since we can't just overwrite or delete
+           them, we have to do hash rewriting: i.e. in the
+           environment/arguments passed to the build, we replace the
+           hashes of the valid outputs with unique dummy strings;
+           after the build, we discard the redirected outputs
+           corresponding to the valid outputs, and rewrite the
+           contents of the new outputs to replace the dummy strings
+           with the actual hashes. */
+        if (validPaths.size() > 0)
+            foreach (PathSet::iterator, i, validPaths)
+                addHashRewrite(*i);
+
+        /* If we're repairing, then we don't want to delete the
+           corrupt outputs in advance.  So rewrite them as well. */
+        if (buildMode == bmRepair)
+            foreach (PathSet::iterator, i, missingPaths)
+                if (worker.store.isValidPath(*i) && pathExists(*i)) {
+                    addHashRewrite(*i);
+                    redirectedBadOutputs.insert(*i);
+                }
+    }
+
+
+    /* Run the builder. */
+    printMsg(lvlChatty, format("executing builder ‘%1%’") % drv.builder);
+
+    /* Create the log file. */
+    Path logFile = openLogFile();
+
+    /* Create a pipe to get the output of the builder. */
+    builderOut.create();
+
+    /* Fork a child to build the package.  Note that while we
+       currently use forks to run and wait for the children, it
+       shouldn't be hard to use threads for this on systems where
+       fork() is unavailable or inefficient.
+
+       If we're building in a chroot, then also set up private
+       namespaces for the build:
+
+       - The PID namespace causes the build to start as PID 1.
+         Processes outside of the chroot are not visible to those on
+         the inside, but processes inside the chroot are visible from
+         the outside (though with different PIDs).
+
+       - The private mount namespace ensures that all the bind mounts
+         we do will only show up in this process and its children, and
+         will disappear automatically when we're done.
+
+       - The private network namespace ensures that the builder cannot
+         talk to the outside world (or vice versa).  It only has a
+         private loopback interface.
+
+       - The IPC namespace prevents the builder from communicating
+         with outside processes using SysV IPC mechanisms (shared
+         memory, message queues, semaphores).  It also ensures that
+         all IPC objects are destroyed when the builder exits.
+
+       - The UTS namespace ensures that builders see a hostname of
+         localhost rather than the actual hostname.
+    */
+#if CHROOT_ENABLED
+    if (useChroot) {
+        char stack[32 * 1024];
+        pid = clone(childEntry, stack + sizeof(stack) - 8,
+            CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWNET | CLONE_NEWIPC | CLONE_NEWUTS | SIGCHLD, this);
+    } else
+#endif
+    {
+        pid = fork();
+        if (pid == 0) initChild();
+    }
+
+    if (pid == -1) throw SysError("unable to fork");
+
+    /* parent */
+    pid.setSeparatePG(true);
+    builderOut.writeSide.close();
+    worker.childStarted(shared_from_this(), pid,
+        singleton<set<int> >(builderOut.readSide), true, true);
+
+    /* Check if setting up the build environment failed. */
+    string msg = readLine(builderOut.readSide);
+    if (!msg.empty()) throw Error(msg);
+
+    if (settings.printBuildTrace) {
+        printMsg(lvlError, format("@ build-started %1% - %2% %3%")
+            % drvPath % drv.platform % logFile);
+    }
+
+}
+
+
+void DerivationGoal::initChild()
+{
+    /* Warning: in the child we should absolutely not make any SQLite
+       calls! */
+
+    try { /* child */
+
+        _writeToStderr = 0;
+
+        restoreAffinity();
+
+        commonChildInit(builderOut);
+
+#if CHROOT_ENABLED
+        if (useChroot) {
+            /* Initialise the loopback interface. */
+            AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP));
+            if (fd == -1) throw SysError("cannot open IP socket");
+
+            struct ifreq ifr;
+            strcpy(ifr.ifr_name, "lo");
+            ifr.ifr_flags = IFF_UP | IFF_LOOPBACK | IFF_RUNNING;
+            if (ioctl(fd, SIOCSIFFLAGS, &ifr) == -1)
+                throw SysError("cannot set loopback interface flags");
+
+            fd.close();
+
+            /* Set the hostname etc. to fixed values. */
+            char hostname[] = "localhost";
+            sethostname(hostname, sizeof(hostname));
+            char domainname[] = "(none)"; // kernel default
+            setdomainname(domainname, sizeof(domainname));
+
+            /* Make all filesystems private.  This is necessary
+               because subtrees may have been mounted as "shared"
+               (MS_SHARED).  (Systemd does this, for instance.)  Even
+               though we have a private mount namespace, mounting
+               filesystems on top of a shared subtree still propagates
+               outside of the namespace.  Making a subtree private is
+               local to the namespace, though, so setting MS_PRIVATE
+               does not affect the outside world. */
+            Strings mounts = tokenizeString<Strings>(readFile("/proc/self/mountinfo", true), "\n");
+            foreach (Strings::iterator, i, mounts) {
+                vector<string> fields = tokenizeString<vector<string> >(*i, " ");
+                string fs = decodeOctalEscaped(fields.at(4));
+                if (mount(0, fs.c_str(), 0, MS_PRIVATE, 0) == -1)
+                    throw SysError(format("unable to make filesystem ‘%1%’ private") % fs);
+            }
+
+            /* Set up a nearly empty /dev, unless the user asked to
+               bind-mount the host /dev. */
+            if (dirsInChroot.find("/dev") == dirsInChroot.end()) {
+                createDirs(chrootRootDir + "/dev/shm");
+                createDirs(chrootRootDir + "/dev/pts");
+                Strings ss;
+                ss.push_back("/dev/full");
+#ifdef __linux__
+                if (pathExists("/dev/kvm"))
+                    ss.push_back("/dev/kvm");
+#endif
+                ss.push_back("/dev/null");
+                ss.push_back("/dev/random");
+                ss.push_back("/dev/tty");
+                ss.push_back("/dev/urandom");
+                ss.push_back("/dev/zero");
+                foreach (Strings::iterator, i, ss) dirsInChroot[*i] = *i;
+                createSymlink("/proc/self/fd", chrootRootDir + "/dev/fd");
+                createSymlink("/proc/self/fd/0", chrootRootDir + "/dev/stdin");
+                createSymlink("/proc/self/fd/1", chrootRootDir + "/dev/stdout");
+                createSymlink("/proc/self/fd/2", chrootRootDir + "/dev/stderr");
+            }
+
+            /* Bind-mount all the directories from the "host"
+               filesystem that we want in the chroot
+               environment. */
+            foreach (DirsInChroot::iterator, i, dirsInChroot) {
+                struct stat st;
+                Path source = i->second;
+                Path target = chrootRootDir + i->first;
+                if (source == "/proc") continue; // backwards compatibility
+                debug(format("bind mounting ‘%1%’ to ‘%2%’") % source % target);
+                if (stat(source.c_str(), &st) == -1)
+                    throw SysError(format("getting attributes of path ‘%1%’") % source);
+                if (S_ISDIR(st.st_mode))
+                    createDirs(target);
+                else {
+                    createDirs(dirOf(target));
+                    writeFile(target, "");
+                }
+                if (mount(source.c_str(), target.c_str(), "", MS_BIND, 0) == -1)
+                    throw SysError(format("bind mount from ‘%1%’ to ‘%2%’ failed") % source % target);
+            }
+
+            /* Bind a new instance of procfs on /proc to reflect our
+               private PID namespace. */
+            createDirs(chrootRootDir + "/proc");
+            if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
+                throw SysError("mounting /proc");
+
+            /* Mount a new tmpfs on /dev/shm to ensure that whatever
+               the builder puts in /dev/shm is cleaned up automatically. */
+            if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0, 0) == -1)
+                throw SysError("mounting /dev/shm");
+
+            /* Mount a new devpts on /dev/pts.  Note that this
+               requires the kernel to be compiled with
+               CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case
+               if /dev/ptx/ptmx exists). */
+            if (pathExists("/dev/pts/ptmx") &&
+                !pathExists(chrootRootDir + "/dev/ptmx")
+                && dirsInChroot.find("/dev/pts") == dirsInChroot.end())
+            {
+                if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, "newinstance,mode=0620") == -1)
+                    throw SysError("mounting /dev/pts");
+                createSymlink("/dev/pts/ptmx", chrootRootDir + "/dev/ptmx");
+
+                /* Make sure /dev/pts/ptmx is world-writable.  With some
+                   Linux versions, it is created with permissions 0.  */
+                chmod_(chrootRootDir + "/dev/pts/ptmx", 0666);
+            }
+
+            /* Do the chroot().  Below we do a chdir() to the
+               temporary build directory to make sure the current
+               directory is in the chroot.  (Actually the order
+               doesn't matter, since due to the bind mount tmpDir and
+               tmpRootDit/tmpDir are the same directories.) */
+            if (chroot(chrootRootDir.c_str()) == -1)
+                throw SysError(format("cannot change root directory to ‘%1%’") % chrootRootDir);
+        }
+#endif
+
+        if (chdir(tmpDir.c_str()) == -1)
+            throw SysError(format("changing into ‘%1%’") % tmpDir);
+
+        /* Close all other file descriptors. */
+        closeMostFDs(set<int>());
+
+#ifdef CAN_DO_LINUX32_BUILDS
+        /* Change the personality to 32-bit if we're doing an
+           i686-linux build on an x86_64-linux machine. */
+        struct utsname utsbuf;
+        uname(&utsbuf);
+        if (drv.platform == "i686-linux" &&
+            (settings.thisSystem == "x86_64-linux" ||
+             (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64")))) {
+            if (personality(0x0008 | 0x8000000 /* == PER_LINUX32_3GB */) == -1)
+                throw SysError("cannot set i686-linux personality");
+        }
+
+        /* Impersonate a Linux 2.6 machine to get some determinism in
+           builds that depend on the kernel version. */
+        if ((drv.platform == "i686-linux" || drv.platform == "x86_64-linux") && settings.impersonateLinux26) {
+            int cur = personality(0xffffffff);
+            if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
+        }
+#endif
+
+        /* Fill in the environment. */
+        Strings envStrs;
+        foreach (Environment::const_iterator, i, env)
+            envStrs.push_back(rewriteHashes(i->first + "=" + i->second, rewritesToTmp));
+        const char * * envArr = strings2CharPtrs(envStrs);
+
+        Path program = drv.builder.c_str();
+        std::vector<const char *> args; /* careful with c_str()! */
+        string user; /* must be here for its c_str()! */
+
+        /* If we are running in `build-users' mode, then switch to the
+           user we allocated above.  Make sure that we drop all root
+           privileges.  Note that above we have closed all file
+           descriptors except std*, so that's safe.  Also note that
+           setuid() when run as root sets the real, effective and
+           saved UIDs. */
+        if (buildUser.enabled()) {
+            printMsg(lvlChatty, format("switching to user ‘%1%’") % buildUser.getUser());
+
+            if (setgroups(0, 0) == -1)
+                throw SysError("cannot clear the set of supplementary groups");
+
+            if (setgid(buildUser.getGID()) == -1 ||
+                getgid() != buildUser.getGID() ||
+                getegid() != buildUser.getGID())
+                throw SysError("setgid failed");
+
+            if (setuid(buildUser.getUID()) == -1 ||
+                getuid() != buildUser.getUID() ||
+                geteuid() != buildUser.getUID())
+                throw SysError("setuid failed");
+        }
+
+        /* Fill in the arguments. */
+        string builderBasename = baseNameOf(drv.builder);
+        args.push_back(builderBasename.c_str());
+        foreach (Strings::iterator, i, drv.args)
+            args.push_back(rewriteHashes(*i, rewritesToTmp).c_str());
+        args.push_back(0);
+
+        restoreSIGPIPE();
+
+        /* Indicate that we managed to set up the build environment. */
+        writeToStderr("\n");
+
+        /* Execute the program.  This should not return. */
+        execve(program.c_str(), (char * *) &args[0], (char * *) envArr);
+
+        throw SysError(format("executing ‘%1%’") % drv.builder);
+
+    } catch (std::exception & e) {
+        writeToStderr("while setting up the build environment: " + string(e.what()) + "\n");
+        _exit(1);
+    }
+
+    abort(); /* never reached */
+}
+
+
+/* Parse a list of reference specifiers.  Each element must either be
+   a store path, or the symbolic name of the output of the derivation
+   (such as `out'). */
+PathSet parseReferenceSpecifiers(const Derivation & drv, string attr)
+{
+    PathSet result;
+    Paths paths = tokenizeString<Paths>(attr);
+    foreach (Strings::iterator, i, paths) {
+        if (isStorePath(*i))
+            result.insert(*i);
+        else if (drv.outputs.find(*i) != drv.outputs.end())
+            result.insert(drv.outputs.find(*i)->second.path);
+        else throw BuildError(
+            format("derivation contains an illegal reference specifier ‘%1%’")
+            % *i);
+    }
+    return result;
+}
+
+
+void DerivationGoal::registerOutputs()
+{
+    /* When using a build hook, the build hook can register the output
+       as valid (by doing `nix-store --import').  If so we don't have
+       to do anything here. */
+    if (hook) {
+        bool allValid = true;
+        foreach (DerivationOutputs::iterator, i, drv.outputs)
+            if (!worker.store.isValidPath(i->second.path)) allValid = false;
+        if (allValid) return;
+    }
+
+    ValidPathInfos infos;
+
+    /* Check whether the output paths were created, and grep each
+       output path to determine what other paths it references.  Also make all
+       output paths read-only. */
+    foreach (DerivationOutputs::iterator, i, drv.outputs) {
+        Path path = i->second.path;
+        if (missingPaths.find(path) == missingPaths.end()) continue;
+
+        Path actualPath = path;
+        if (useChroot) {
+            actualPath = chrootRootDir + path;
+            if (pathExists(actualPath)) {
+                /* Move output paths from the chroot to the Nix store. */
+                if (buildMode == bmRepair)
+                    replaceValidPath(path, actualPath);
+                else
+                    if (buildMode != bmCheck && rename(actualPath.c_str(), path.c_str()) == -1)
+                        throw SysError(format("moving build output ‘%1%’ from the chroot to the Nix store") % path);
+            }
+            if (buildMode != bmCheck) actualPath = path;
+        } else {
+            Path redirected = redirectedOutputs[path];
+            if (buildMode == bmRepair
+                && redirectedBadOutputs.find(path) != redirectedBadOutputs.end()
+                && pathExists(redirected))
+                replaceValidPath(path, redirected);
+            if (buildMode == bmCheck)
+                actualPath = redirected;
+        }
+
+        struct stat st;
+        if (lstat(actualPath.c_str(), &st) == -1) {
+            if (errno == ENOENT)
+                throw BuildError(
+                    format("builder for ‘%1%’ failed to produce output path ‘%2%’")
+                    % drvPath % path);
+            throw SysError(format("getting attributes of path ‘%1%’") % actualPath);
+        }
+
+#ifndef __CYGWIN__
+        /* Check that the output is not group or world writable, as
+           that means that someone else can have interfered with the
+           build.  Also, the output should be owned by the build
+           user. */
+        if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) ||
+            (buildUser.enabled() && st.st_uid != buildUser.getUID()))
+            throw BuildError(format("suspicious ownership or permission on ‘%1%’; rejecting this build output") % path);
+#endif
+
+        /* Apply hash rewriting if necessary. */
+        bool rewritten = false;
+        if (!rewritesFromTmp.empty()) {
+            printMsg(lvlError, format("warning: rewriting hashes in ‘%1%’; cross fingers") % path);
+
+            /* Canonicalise first.  This ensures that the path we're
+               rewriting doesn't contain a hard link to /etc/shadow or
+               something like that. */
+            canonicalisePathMetaData(actualPath, buildUser.enabled() ? buildUser.getUID() : -1, inodesSeen);
+
+            /* FIXME: this is in-memory. */
+            StringSink sink;
+            dumpPath(actualPath, sink);
+            deletePath(actualPath);
+            sink.s = rewriteHashes(sink.s, rewritesFromTmp);
+            StringSource source(sink.s);
+            restorePath(actualPath, source);
+
+            rewritten = true;
+        }
+
+        startNest(nest, lvlTalkative,
+            format("scanning for references inside ‘%1%’") % path);
+
+        /* Check that fixed-output derivations produced the right
+           outputs (i.e., the content hash should match the specified
+           hash). */
+        if (i->second.hash != "") {
+
+            bool recursive; HashType ht; Hash h;
+            i->second.parseHashInfo(recursive, ht, h);
+
+            if (!recursive) {
+                /* The output path should be a regular file without
+                   execute permission. */
+                if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
+                    throw BuildError(
+                        format("output path ‘%1%’ should be a non-executable regular file") % path);
+            }
+
+            /* Check the hash. */
+            Hash h2 = recursive ? hashPath(ht, actualPath).first : hashFile(ht, actualPath);
+            if (h != h2)
+                throw BuildError(
+                    format("output path ‘%1%’ should have %2% hash ‘%3%’, instead has ‘%4%’")
+                    % path % i->second.hashAlgo % printHash16or32(h) % printHash16or32(h2));
+        }
+
+        /* Get rid of all weird permissions.  This also checks that
+           all files are owned by the build user, if applicable. */
+        canonicalisePathMetaData(actualPath,
+            buildUser.enabled() && !rewritten ? buildUser.getUID() : -1, inodesSeen);
+
+        /* For this output path, find the references to other paths
+           contained in it.  Compute the SHA-256 NAR hash at the same
+           time.  The hash is stored in the database so that we can
+           verify later on whether nobody has messed with the store. */
+        HashResult hash;
+        PathSet references = scanForReferences(actualPath, allPaths, hash);
+
+        if (buildMode == bmCheck) {
+            ValidPathInfo info = worker.store.queryPathInfo(path);
+            if (hash.first != info.hash)
+                throw Error(format("derivation ‘%2%’ may not be deterministic: hash mismatch in output ‘%1%’") % drvPath % path);
+            continue;
+        }
+
+        /* For debugging, print out the referenced and unreferenced
+           paths. */
+        foreach (PathSet::iterator, i, inputPaths) {
+            PathSet::iterator j = references.find(*i);
+            if (j == references.end())
+                debug(format("unreferenced input: ‘%1%’") % *i);
+            else
+                debug(format("referenced input: ‘%1%’") % *i);
+        }
+
+        /* If the derivation specifies an `allowedReferences'
+           attribute (containing a list of paths that the output may
+           refer to), check that all references are in that list.  !!!
+           allowedReferences should really be per-output. */
+        if (drv.env.find("allowedReferences") != drv.env.end()) {
+            PathSet allowed = parseReferenceSpecifiers(drv, get(drv.env, "allowedReferences"));
+            foreach (PathSet::iterator, i, references)
+                if (allowed.find(*i) == allowed.end())
+                    throw BuildError(format("output is not allowed to refer to path ‘%1%’") % *i);
+        }
+
+        worker.store.optimisePath(path); // FIXME: combine with scanForReferences()
+
+        worker.store.markContentsGood(path);
+
+        ValidPathInfo info;
+        info.path = path;
+        info.hash = hash.first;
+        info.narSize = hash.second;
+        info.references = references;
+        info.deriver = drvPath;
+        infos.push_back(info);
+    }
+
+    if (buildMode == bmCheck) return;
+
+    /* Register each output path as valid, and register the sets of
+       paths referenced by each of them.  If there are cycles in the
+       outputs, this will fail. */
+    worker.store.registerValidPaths(infos);
+}
+
+
+string drvsLogDir = "drvs";
+
+
+Path DerivationGoal::openLogFile()
+{
+    logSize = 0;
+
+    if (!settings.keepLog) return "";
+
+    string baseName = baseNameOf(drvPath);
+
+    /* Create a log file. */
+    Path dir = (format("%1%/%2%/%3%/") % settings.nixLogDir % drvsLogDir % string(baseName, 0, 2)).str();
+    createDirs(dir);
+
+    if (settings.compressLog) {
+
+        Path logFileName = (format("%1%/%2%.bz2") % dir % string(baseName, 2)).str();
+        AutoCloseFD fd = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0666);
+        if (fd == -1) throw SysError(format("creating log file ‘%1%’") % logFileName);
+        closeOnExec(fd);
+
+        if (!(fLogFile = fdopen(fd.borrow(), "w")))
+            throw SysError(format("opening file ‘%1%’") % logFileName);
+
+        int err;
+        if (!(bzLogFile = BZ2_bzWriteOpen(&err, fLogFile, 9, 0, 0)))
+            throw Error(format("cannot open compressed log file ‘%1%’") % logFileName);
+
+        return logFileName;
+
+    } else {
+        Path logFileName = (format("%1%/%2%") % dir % string(baseName, 2)).str();
+        fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0666);
+        if (fdLogFile == -1) throw SysError(format("creating log file ‘%1%’") % logFileName);
+        closeOnExec(fdLogFile);
+        return logFileName;
+    }
+}
+
+
+void DerivationGoal::closeLogFile()
+{
+    if (bzLogFile) {
+        int err;
+        BZ2_bzWriteClose(&err, bzLogFile, 0, 0, 0);
+        bzLogFile = 0;
+        if (err != BZ_OK) throw Error(format("cannot close compressed log file (BZip2 error = %1%)") % err);
+    }
+
+    if (fLogFile) {
+        fclose(fLogFile);
+        fLogFile = 0;
+    }
+
+    fdLogFile.close();
+}
+
+
+void DerivationGoal::deleteTmpDir(bool force)
+{
+    if (tmpDir != "") {
+        if (settings.keepFailed && !force) {
+            printMsg(lvlError,
+                format("note: keeping build directory ‘%2%’")
+                % drvPath % tmpDir);
+            chmod(tmpDir.c_str(), 0755);
+        }
+        else
+            deletePath(tmpDir);
+        tmpDir = "";
+    }
+}
+
+
+void DerivationGoal::handleChildOutput(int fd, const string & data)
+{
+    if ((hook && fd == hook->builderOut.readSide) ||
+        (!hook && fd == builderOut.readSide))
+    {
+        logSize += data.size();
+        if (settings.maxLogSize && logSize > settings.maxLogSize) {
+            printMsg(lvlError,
+                format("%1% killed after writing more than %2% bytes of log output")
+                % getName() % settings.maxLogSize);
+            cancel(true); // not really a timeout, but close enough
+            return;
+        }
+        if (verbosity >= settings.buildVerbosity)
+            writeToStderr(filterANSIEscapes(data, true));
+        if (bzLogFile) {
+            int err;
+            BZ2_bzWrite(&err, bzLogFile, (unsigned char *) data.data(), data.size());
+            if (err != BZ_OK) throw Error(format("cannot write to compressed log file (BZip2 error = %1%)") % err);
+        } else if (fdLogFile != -1)
+            writeFull(fdLogFile, (unsigned char *) data.data(), data.size());
+    }
+
+    if (hook && fd == hook->fromHook.readSide)
+        writeToStderr(data);
+}
+
+
+void DerivationGoal::handleEOF(int fd)
+{
+    worker.wakeUp(shared_from_this());
+}
+
+
+PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash)
+{
+    PathSet result;
+    foreach (DerivationOutputs::iterator, i, drv.outputs) {
+        if (!wantOutput(i->first, wantedOutputs)) continue;
+        bool good =
+            worker.store.isValidPath(i->second.path) &&
+            (!checkHash || worker.store.pathContentsGood(i->second.path));
+        if (good == returnValid) result.insert(i->second.path);
+    }
+    return result;
+}
+
+
+bool DerivationGoal::pathFailed(const Path & path)
+{
+    if (!settings.cacheFailure) return false;
+
+    if (!worker.store.hasPathFailed(path)) return false;
+
+    printMsg(lvlError, format("builder for ‘%1%’ failed previously (cached)") % path);
+
+    if (settings.printBuildTrace)
+        printMsg(lvlError, format("@ build-failed %1% - cached") % drvPath);
+
+    worker.permanentFailure = true;
+    amDone(ecFailed);
+
+    return true;
+}
+
+
+Path DerivationGoal::addHashRewrite(const Path & path)
+{
+    string h1 = string(path, settings.nixStore.size() + 1, 32);
+    string h2 = string(printHash32(hashString(htSHA256, "rewrite:" + drvPath + ":" + path)), 0, 32);
+    Path p = settings.nixStore + "/" + h2 + string(path, settings.nixStore.size() + 33);
+    if (pathExists(p)) deletePath(p);
+    assert(path.size() == p.size());
+    rewritesToTmp[h1] = h2;
+    rewritesFromTmp[h2] = h1;
+    redirectedOutputs[path] = p;
+    return p;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+class SubstitutionGoal : public Goal
+{
+    friend class Worker;
+
+private:
+    /* The store path that should be realised through a substitute. */
+    Path storePath;
+
+    /* The remaining substituters. */
+    Paths subs;
+
+    /* The current substituter. */
+    Path sub;
+
+    /* Whether any substituter can realise this path */
+    bool hasSubstitute;
+
+    /* Path info returned by the substituter's query info operation. */
+    SubstitutablePathInfo info;
+
+    /* Pipe for the substituter's standard output. */
+    Pipe outPipe;
+
+    /* Pipe for the substituter's standard error. */
+    Pipe logPipe;
+
+    /* The process ID of the builder. */
+    Pid pid;
+
+    /* Lock on the store path. */
+    std::shared_ptr<PathLocks> outputLock;
+
+    /* Whether to try to repair a valid path. */
+    bool repair;
+
+    /* Location where we're downloading the substitute.  Differs from
+       storePath when doing a repair. */
+    Path destPath;
+
+    typedef void (SubstitutionGoal::*GoalState)();
+    GoalState state;
+
+public:
+    SubstitutionGoal(const Path & storePath, Worker & worker, bool repair = false);
+    ~SubstitutionGoal();
+
+    void cancel(bool timeout);
+
+    void work();
+
+    /* The states. */
+    void init();
+    void tryNext();
+    void gotInfo();
+    void referencesValid();
+    void tryToRun();
+    void finished();
+
+    /* Callback used by the worker to write to the log. */
+    void handleChildOutput(int fd, const string & data);
+    void handleEOF(int fd);
+
+    Path getStorePath() { return storePath; }
+};
+
+
+SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, bool repair)
+    : Goal(worker)
+    , hasSubstitute(false)
+    , repair(repair)
+{
+    this->storePath = storePath;
+    state = &SubstitutionGoal::init;
+    name = (format("substitution of ‘%1%’") % storePath).str();
+    trace("created");
+}
+
+
+SubstitutionGoal::~SubstitutionGoal()
+{
+    if (pid != -1) worker.childTerminated(pid);
+}
+
+
+void SubstitutionGoal::cancel(bool timeout)
+{
+    if (settings.printBuildTrace && timeout)
+        printMsg(lvlError, format("@ substituter-failed %1% timeout") % storePath);
+    if (pid != -1) {
+        pid_t savedPid = pid;
+        pid.kill();
+        worker.childTerminated(savedPid);
+    }
+    amDone(ecFailed);
+}
+
+
+void SubstitutionGoal::work()
+{
+    (this->*state)();
+}
+
+
+void SubstitutionGoal::init()
+{
+    trace("init");
+
+    worker.store.addTempRoot(storePath);
+
+    /* If the path already exists we're done. */
+    if (!repair && worker.store.isValidPath(storePath)) {
+        amDone(ecSuccess);
+        return;
+    }
+
+    if (settings.readOnlyMode)
+        throw Error(format("cannot substitute path ‘%1%’ - no write access to the Nix store") % storePath);
+
+    subs = settings.substituters;
+
+    tryNext();
+}
+
+
+void SubstitutionGoal::tryNext()
+{
+    trace("trying next substituter");
+
+    if (subs.size() == 0) {
+        /* None left.  Terminate this goal and let someone else deal
+           with it. */
+        debug(format("path ‘%1%’ is required, but there is no substituter that can build it") % storePath);
+        /* Hack: don't indicate failure if there were no substituters.
+           In that case the calling derivation should just do a
+           build. */
+        amDone(hasSubstitute ? ecFailed : ecNoSubstituters);
+        return;
+    }
+
+    sub = subs.front();
+    subs.pop_front();
+
+    SubstitutablePathInfos infos;
+    PathSet dummy(singleton<PathSet>(storePath));
+    worker.store.querySubstitutablePathInfos(sub, dummy, infos);
+    SubstitutablePathInfos::iterator k = infos.find(storePath);
+    if (k == infos.end()) { tryNext(); return; }
+    info = k->second;
+    hasSubstitute = true;
+
+    /* To maintain the closure invariant, we first have to realise the
+       paths referenced by this one. */
+    foreach (PathSet::iterator, i, info.references)
+        if (*i != storePath) /* ignore self-references */
+            addWaitee(worker.makeSubstitutionGoal(*i));
+
+    if (waitees.empty()) /* to prevent hang (no wake-up event) */
+        referencesValid();
+    else
+        state = &SubstitutionGoal::referencesValid;
+}
+
+
+void SubstitutionGoal::referencesValid()
+{
+    trace("all references realised");
+
+    if (nrFailed > 0) {
+        debug(format("some references of path ‘%1%’ could not be realised") % storePath);
+        amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
+        return;
+    }
+
+    foreach (PathSet::iterator, i, info.references)
+        if (*i != storePath) /* ignore self-references */
+            assert(worker.store.isValidPath(*i));
+
+    state = &SubstitutionGoal::tryToRun;
+    worker.wakeUp(shared_from_this());
+}
+
+
+void SubstitutionGoal::tryToRun()
+{
+    trace("trying to run");
+
+    /* Make sure that we are allowed to start a build.  Note that even
+       is maxBuildJobs == 0 (no local builds allowed), we still allow
+       a substituter to run.  This is because substitutions cannot be
+       distributed to another machine via the build hook. */
+    if (worker.getNrLocalBuilds() >= (settings.maxBuildJobs == 0 ? 1 : settings.maxBuildJobs)) {
+        worker.waitForBuildSlot(shared_from_this());
+        return;
+    }
+
+    /* Maybe a derivation goal has already locked this path
+       (exceedingly unlikely, since it should have used a substitute
+       first, but let's be defensive). */
+    outputLock.reset(); // make sure this goal's lock is gone
+    if (pathIsLockedByMe(storePath)) {
+        debug(format("restarting substitution of ‘%1%’ because it's locked by another goal")
+            % storePath);
+        worker.waitForAnyGoal(shared_from_this());
+        return; /* restart in the tryToRun() state when another goal finishes */
+    }
+
+    /* Acquire a lock on the output path. */
+    outputLock = std::shared_ptr<PathLocks>(new PathLocks);
+    if (!outputLock->lockPaths(singleton<PathSet>(storePath), "", false)) {
+        worker.waitForAWhile(shared_from_this());
+        return;
+    }
+
+    /* Check again whether the path is invalid. */
+    if (!repair && worker.store.isValidPath(storePath)) {
+        debug(format("store path ‘%1%’ has become valid") % storePath);
+        outputLock->setDeletion(true);
+        amDone(ecSuccess);
+        return;
+    }
+
+    printMsg(lvlInfo, format("fetching path ‘%1%’...") % storePath);
+
+    outPipe.create();
+    logPipe.create();
+
+    destPath = repair ? storePath + ".tmp" : storePath;
+
+    /* Remove the (stale) output path if it exists. */
+    if (pathExists(destPath))
+        deletePath(destPath);
+
+    worker.store.setSubstituterEnv();
+
+    /* Fill in the arguments. */
+    Strings args;
+    args.push_back(baseNameOf(sub));
+    args.push_back("--substitute");
+    args.push_back(storePath);
+    args.push_back(destPath);
+    const char * * argArr = strings2CharPtrs(args);
+
+    /* Fork the substitute program. */
+    pid = startProcess([&]() {
+
+        commonChildInit(logPipe);
+
+        if (dup2(outPipe.writeSide, STDOUT_FILENO) == -1)
+            throw SysError("cannot dup output pipe into stdout");
+
+        execv(sub.c_str(), (char * *) argArr);
+
+        throw SysError(format("executing ‘%1%’") % sub);
+    });
+
+    pid.setSeparatePG(true);
+    pid.setKillSignal(SIGTERM);
+    outPipe.writeSide.close();
+    logPipe.writeSide.close();
+    worker.childStarted(shared_from_this(),
+        pid, singleton<set<int> >(logPipe.readSide), true, true);
+
+    state = &SubstitutionGoal::finished;
+
+    if (settings.printBuildTrace)
+        printMsg(lvlError, format("@ substituter-started %1% %2%") % storePath % sub);
+}
+
+
+void SubstitutionGoal::finished()
+{
+    trace("substitute finished");
+
+    /* Since we got an EOF on the logger pipe, the substitute is
+       presumed to have terminated.  */
+    pid_t savedPid = pid;
+    int status = pid.wait(true);
+
+    /* So the child is gone now. */
+    worker.childTerminated(savedPid);
+
+    /* Close the read side of the logger pipe. */
+    logPipe.readSide.close();
+
+    /* Get the hash info from stdout. */
+    string dummy = readLine(outPipe.readSide);
+    string expectedHashStr = statusOk(status) ? readLine(outPipe.readSide) : "";
+    outPipe.readSide.close();
+
+    /* Check the exit status and the build result. */
+    HashResult hash;
+    try {
+
+        if (!statusOk(status))
+            throw SubstError(format("fetching path ‘%1%’ %2%")
+                % storePath % statusToString(status));
+
+        if (!pathExists(destPath))
+            throw SubstError(format("substitute did not produce path ‘%1%’") % destPath);
+
+        hash = hashPath(htSHA256, destPath);
+
+        /* Verify the expected hash we got from the substituer. */
+        if (expectedHashStr != "") {
+            size_t n = expectedHashStr.find(':');
+            if (n == string::npos)
+                throw Error(format("bad hash from substituter: %1%") % expectedHashStr);
+            HashType hashType = parseHashType(string(expectedHashStr, 0, n));
+            if (hashType == htUnknown)
+                throw Error(format("unknown hash algorithm in ‘%1%’") % expectedHashStr);
+            Hash expectedHash = parseHash16or32(hashType, string(expectedHashStr, n + 1));
+            Hash actualHash = hashType == htSHA256 ? hash.first : hashPath(hashType, destPath).first;
+            if (expectedHash != actualHash)
+                throw SubstError(format("hash mismatch in downloaded path ‘%1%’: expected %2%, got %3%")
+                    % storePath % printHash(expectedHash) % printHash(actualHash));
+        }
+
+    } catch (SubstError & e) {
+
+        printMsg(lvlInfo, e.msg());
+
+        if (settings.printBuildTrace) {
+            printMsg(lvlError, format("@ substituter-failed %1% %2% %3%")
+                % storePath % status % e.msg());
+        }
+
+        /* Try the next substitute. */
+        state = &SubstitutionGoal::tryNext;
+        worker.wakeUp(shared_from_this());
+        return;
+    }
+
+    if (repair) replaceValidPath(storePath, destPath);
+
+    canonicalisePathMetaData(storePath, -1);
+
+    worker.store.optimisePath(storePath); // FIXME: combine with hashPath()
+
+    ValidPathInfo info2;
+    info2.path = storePath;
+    info2.hash = hash.first;
+    info2.narSize = hash.second;
+    info2.references = info.references;
+    info2.deriver = info.deriver;
+    worker.store.registerValidPath(info2);
+
+    outputLock->setDeletion(true);
+    outputLock.reset();
+
+    worker.store.markContentsGood(storePath);
+
+    printMsg(lvlChatty,
+        format("substitution of path ‘%1%’ succeeded") % storePath);
+
+    if (settings.printBuildTrace)
+        printMsg(lvlError, format("@ substituter-succeeded %1%") % storePath);
+
+    amDone(ecSuccess);
+}
+
+
+void SubstitutionGoal::handleChildOutput(int fd, const string & data)
+{
+    assert(fd == logPipe.readSide);
+    if (verbosity >= settings.buildVerbosity) writeToStderr(data);
+    /* Don't write substitution output to a log file for now.  We
+       probably should, though. */
+}
+
+
+void SubstitutionGoal::handleEOF(int fd)
+{
+    if (fd == logPipe.readSide) worker.wakeUp(shared_from_this());
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+static bool working = false;
+
+
+Worker::Worker(LocalStore & store)
+    : store(store)
+{
+    /* Debugging: prevent recursive workers. */
+    if (working) abort();
+    working = true;
+    nrLocalBuilds = 0;
+    lastWokenUp = 0;
+    permanentFailure = false;
+    timedOut = false;
+}
+
+
+Worker::~Worker()
+{
+    working = false;
+
+    /* Explicitly get rid of all strong pointers now.  After this all
+       goals that refer to this worker should be gone.  (Otherwise we
+       are in trouble, since goals may call childTerminated() etc. in
+       their destructors). */
+    topGoals.clear();
+}
+
+
+GoalPtr Worker::makeDerivationGoal(const Path & path, const StringSet & wantedOutputs, BuildMode buildMode)
+{
+    GoalPtr goal = derivationGoals[path].lock();
+    if (!goal) {
+        goal = GoalPtr(new DerivationGoal(path, wantedOutputs, *this, buildMode));
+        derivationGoals[path] = goal;
+        wakeUp(goal);
+    } else
+        (dynamic_cast<DerivationGoal *>(goal.get()))->addWantedOutputs(wantedOutputs);
+    return goal;
+}
+
+
+GoalPtr Worker::makeSubstitutionGoal(const Path & path, bool repair)
+{
+    GoalPtr goal = substitutionGoals[path].lock();
+    if (!goal) {
+        goal = GoalPtr(new SubstitutionGoal(path, *this, repair));
+        substitutionGoals[path] = goal;
+        wakeUp(goal);
+    }
+    return goal;
+}
+
+
+static void removeGoal(GoalPtr goal, WeakGoalMap & goalMap)
+{
+    /* !!! inefficient */
+    for (WeakGoalMap::iterator i = goalMap.begin();
+         i != goalMap.end(); )
+        if (i->second.lock() == goal) {
+            WeakGoalMap::iterator j = i; ++j;
+            goalMap.erase(i);
+            i = j;
+        }
+        else ++i;
+}
+
+
+void Worker::removeGoal(GoalPtr goal)
+{
+    nix::removeGoal(goal, derivationGoals);
+    nix::removeGoal(goal, substitutionGoals);
+    if (topGoals.find(goal) != topGoals.end()) {
+        topGoals.erase(goal);
+        /* If a top-level goal failed, then kill all other goals
+           (unless keepGoing was set). */
+        if (goal->getExitCode() == Goal::ecFailed && !settings.keepGoing)
+            topGoals.clear();
+    }
+
+    /* Wake up goals waiting for any goal to finish. */
+    foreach (WeakGoals::iterator, i, waitingForAnyGoal) {
+        GoalPtr goal = i->lock();
+        if (goal) wakeUp(goal);
+    }
+
+    waitingForAnyGoal.clear();
+}
+
+
+void Worker::wakeUp(GoalPtr goal)
+{
+    goal->trace("woken up");
+    addToWeakGoals(awake, goal);
+}
+
+
+unsigned Worker::getNrLocalBuilds()
+{
+    return nrLocalBuilds;
+}
+
+
+void Worker::childStarted(GoalPtr goal,
+    pid_t pid, const set<int> & fds, bool inBuildSlot,
+    bool respectTimeouts)
+{
+    Child child;
+    child.goal = goal;
+    child.fds = fds;
+    child.timeStarted = child.lastOutput = time(0);
+    child.inBuildSlot = inBuildSlot;
+    child.respectTimeouts = respectTimeouts;
+    children[pid] = child;
+    if (inBuildSlot) nrLocalBuilds++;
+}
+
+
+void Worker::childTerminated(pid_t pid, bool wakeSleepers)
+{
+    assert(pid != -1); /* common mistake */
+
+    Children::iterator i = children.find(pid);
+    assert(i != children.end());
+
+    if (i->second.inBuildSlot) {
+        assert(nrLocalBuilds > 0);
+        nrLocalBuilds--;
+    }
+
+    children.erase(pid);
+
+    if (wakeSleepers) {
+
+        /* Wake up goals waiting for a build slot. */
+        foreach (WeakGoals::iterator, i, wantingToBuild) {
+            GoalPtr goal = i->lock();
+            if (goal) wakeUp(goal);
+        }
+
+        wantingToBuild.clear();
+    }
+}
+
+
+void Worker::waitForBuildSlot(GoalPtr goal)
+{
+    debug("wait for build slot");
+    if (getNrLocalBuilds() < settings.maxBuildJobs)
+        wakeUp(goal); /* we can do it right away */
+    else
+        addToWeakGoals(wantingToBuild, goal);
+}
+
+
+void Worker::waitForAnyGoal(GoalPtr goal)
+{
+    debug("wait for any goal");
+    addToWeakGoals(waitingForAnyGoal, goal);
+}
+
+
+void Worker::waitForAWhile(GoalPtr goal)
+{
+    debug("wait for a while");
+    addToWeakGoals(waitingForAWhile, goal);
+}
+
+
+void Worker::run(const Goals & _topGoals)
+{
+    foreach (Goals::iterator, i,  _topGoals) topGoals.insert(*i);
+
+    startNest(nest, lvlDebug, format("entered goal loop"));
+
+    while (1) {
+
+        checkInterrupt();
+
+        /* Call every wake goal. */
+        while (!awake.empty() && !topGoals.empty()) {
+            WeakGoals awake2(awake);
+            awake.clear();
+            foreach (WeakGoals::iterator, i, awake2) {
+                checkInterrupt();
+                GoalPtr goal = i->lock();
+                if (goal) goal->work();
+                if (topGoals.empty()) break;
+            }
+        }
+
+        if (topGoals.empty()) break;
+
+        /* Wait for input. */
+        if (!children.empty() || !waitingForAWhile.empty())
+            waitForInput();
+        else {
+            if (awake.empty() && settings.maxBuildJobs == 0) throw Error(
+                "unable to start any build; either increase ‘--max-jobs’ "
+                "or enable distributed builds");
+            assert(!awake.empty());
+        }
+    }
+
+    /* If --keep-going is not set, it's possible that the main goal
+       exited while some of its subgoals were still active.  But if
+       --keep-going *is* set, then they must all be finished now. */
+    assert(!settings.keepGoing || awake.empty());
+    assert(!settings.keepGoing || wantingToBuild.empty());
+    assert(!settings.keepGoing || children.empty());
+}
+
+
+void Worker::waitForInput()
+{
+    printMsg(lvlVomit, "waiting for children");
+
+    /* Process output from the file descriptors attached to the
+       children, namely log output and output path creation commands.
+       We also use this to detect child termination: if we get EOF on
+       the logger pipe of a build, we assume that the builder has
+       terminated. */
+
+    bool useTimeout = false;
+    struct timeval timeout;
+    timeout.tv_usec = 0;
+    time_t before = time(0);
+
+    /* If we're monitoring for silence on stdout/stderr, or if there
+       is a build timeout, then wait for input until the first
+       deadline for any child. */
+    assert(sizeof(time_t) >= sizeof(long));
+    time_t nearest = LONG_MAX; // nearest deadline
+    foreach (Children::iterator, i, children) {
+        if (!i->second.respectTimeouts) continue;
+        if (settings.maxSilentTime != 0)
+            nearest = std::min(nearest, i->second.lastOutput + settings.maxSilentTime);
+        if (settings.buildTimeout != 0)
+            nearest = std::min(nearest, i->second.timeStarted + settings.buildTimeout);
+    }
+    if (nearest != LONG_MAX) {
+        timeout.tv_sec = std::max((time_t) 1, nearest - before);
+        useTimeout = true;
+        printMsg(lvlVomit, format("sleeping %1% seconds") % timeout.tv_sec);
+    }
+
+    /* If we are polling goals that are waiting for a lock, then wake
+       up after a few seconds at most. */
+    if (!waitingForAWhile.empty()) {
+        useTimeout = true;
+        if (lastWokenUp == 0)
+            printMsg(lvlError, "waiting for locks or build slots...");
+        if (lastWokenUp == 0 || lastWokenUp > before) lastWokenUp = before;
+        timeout.tv_sec = std::max((time_t) 1, (time_t) (lastWokenUp + settings.pollInterval - before));
+    } else lastWokenUp = 0;
+
+    using namespace std;
+    /* Use select() to wait for the input side of any logger pipe to
+       become `available'.  Note that `available' (i.e., non-blocking)
+       includes EOF. */
+    fd_set fds;
+    FD_ZERO(&fds);
+    int fdMax = 0;
+    foreach (Children::iterator, i, children) {
+        foreach (set<int>::iterator, j, i->second.fds) {
+            FD_SET(*j, &fds);
+            if (*j >= fdMax) fdMax = *j + 1;
+        }
+    }
+
+    if (select(fdMax, &fds, 0, 0, useTimeout ? &timeout : 0) == -1) {
+        if (errno == EINTR) return;
+        throw SysError("waiting for input");
+    }
+
+    time_t after = time(0);
+
+    /* Process all available file descriptors. */
+
+    /* Since goals may be canceled from inside the loop below (causing
+       them go be erased from the `children' map), we have to be
+       careful that we don't keep iterators alive across calls to
+       cancel(). */
+    set<pid_t> pids;
+    foreach (Children::iterator, i, children) pids.insert(i->first);
+
+    foreach (set<pid_t>::iterator, i, pids) {
+        checkInterrupt();
+        Children::iterator j = children.find(*i);
+        if (j == children.end()) continue; // child destroyed
+        GoalPtr goal = j->second.goal.lock();
+        assert(goal);
+
+        set<int> fds2(j->second.fds);
+        foreach (set<int>::iterator, k, fds2) {
+            if (FD_ISSET(*k, &fds)) {
+                unsigned char buffer[4096];
+                ssize_t rd = read(*k, buffer, sizeof(buffer));
+                if (rd == -1) {
+                    if (errno != EINTR)
+                        throw SysError(format("reading from %1%")
+                            % goal->getName());
+                } else if (rd == 0) {
+                    debug(format("%1%: got EOF") % goal->getName());
+                    goal->handleEOF(*k);
+                    j->second.fds.erase(*k);
+                } else {
+                    printMsg(lvlVomit, format("%1%: read %2% bytes")
+                        % goal->getName() % rd);
+                    string data((char *) buffer, rd);
+                    j->second.lastOutput = after;
+                    goal->handleChildOutput(*k, data);
+                }
+            }
+        }
+
+        if (goal->getExitCode() == Goal::ecBusy &&
+            settings.maxSilentTime != 0 &&
+            j->second.respectTimeouts &&
+            after - j->second.lastOutput >= (time_t) settings.maxSilentTime)
+        {
+            printMsg(lvlError,
+                format("%1% timed out after %2% seconds of silence")
+                % goal->getName() % settings.maxSilentTime);
+            goal->cancel(true);
+            timedOut = true;
+        }
+
+        else if (goal->getExitCode() == Goal::ecBusy &&
+            settings.buildTimeout != 0 &&
+            j->second.respectTimeouts &&
+            after - j->second.timeStarted >= (time_t) settings.buildTimeout)
+        {
+            printMsg(lvlError,
+                format("%1% timed out after %2% seconds")
+                % goal->getName() % settings.buildTimeout);
+            goal->cancel(true);
+            timedOut = true;
+        }
+    }
+
+    if (!waitingForAWhile.empty() && lastWokenUp + settings.pollInterval <= after) {
+        lastWokenUp = after;
+        foreach (WeakGoals::iterator, i, waitingForAWhile) {
+            GoalPtr goal = i->lock();
+            if (goal) wakeUp(goal);
+        }
+        waitingForAWhile.clear();
+    }
+}
+
+
+unsigned int Worker::exitStatus()
+{
+    return timedOut ? 101 : (permanentFailure ? 100 : 1);
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
+{
+    startNest(nest, lvlDebug,
+        format("building %1%") % showPaths(drvPaths));
+
+    Worker worker(*this);
+
+    Goals goals;
+    foreach (PathSet::const_iterator, i, drvPaths) {
+        DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i);
+        if (isDerivation(i2.first))
+            goals.insert(worker.makeDerivationGoal(i2.first, i2.second, buildMode));
+        else
+            goals.insert(worker.makeSubstitutionGoal(*i, buildMode));
+    }
+
+    worker.run(goals);
+
+    PathSet failed;
+    foreach (Goals::iterator, i, goals)
+        if ((*i)->getExitCode() == Goal::ecFailed) {
+            DerivationGoal * i2 = dynamic_cast<DerivationGoal *>(i->get());
+            if (i2) failed.insert(i2->getDrvPath());
+            else failed.insert(dynamic_cast<SubstitutionGoal *>(i->get())->getStorePath());
+        }
+
+    if (!failed.empty())
+        throw Error(format("build of %1% failed") % showPaths(failed), worker.exitStatus());
+}
+
+
+void LocalStore::ensurePath(const Path & path)
+{
+    /* If the path is already valid, we're done. */
+    if (isValidPath(path)) return;
+
+    Worker worker(*this);
+    GoalPtr goal = worker.makeSubstitutionGoal(path);
+    Goals goals = singleton<Goals>(goal);
+
+    worker.run(goals);
+
+    if (goal->getExitCode() != Goal::ecSuccess)
+        throw Error(format("path ‘%1%’ does not exist and cannot be created") % path, worker.exitStatus());
+}
+
+
+void LocalStore::repairPath(const Path & path)
+{
+    Worker worker(*this);
+    GoalPtr goal = worker.makeSubstitutionGoal(path, true);
+    Goals goals = singleton<Goals>(goal);
+
+    worker.run(goals);
+
+    if (goal->getExitCode() != Goal::ecSuccess)
+        throw Error(format("cannot repair path ‘%1%’") % path, worker.exitStatus());
+}
+
+
+}
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
new file mode 100644
index 000000000000..7234ae5630ed
--- /dev/null
+++ b/src/libstore/derivations.cc
@@ -0,0 +1,288 @@
+#include "derivations.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "util.hh"
+#include "misc.hh"
+
+
+namespace nix {
+
+
+void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash & hash) const
+{
+    recursive = false;
+    string algo = hashAlgo;
+
+    if (string(algo, 0, 2) == "r:") {
+        recursive = true;
+        algo = string(algo, 2);
+    }
+
+    hashType = parseHashType(algo);
+    if (hashType == htUnknown)
+        throw Error(format("unknown hash algorithm ‘%1%’") % algo);
+
+    hash = parseHash(hashType, this->hash);
+}
+
+
+Path writeDerivation(StoreAPI & store,
+    const Derivation & drv, const string & name, bool repair)
+{
+    PathSet references;
+    references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
+    foreach (DerivationInputs::const_iterator, i, drv.inputDrvs)
+        references.insert(i->first);
+    /* Note that the outputs of a derivation are *not* references
+       (that can be missing (of course) and should not necessarily be
+       held during a garbage collection). */
+    string suffix = name + drvExtension;
+    string contents = unparseDerivation(drv);
+    return settings.readOnlyMode
+        ? computeStorePathForText(suffix, contents, references)
+        : store.addTextToStore(suffix, contents, references, repair);
+}
+
+
+static Path parsePath(std::istream & str)
+{
+    string s = parseString(str);
+    if (s.size() == 0 || s[0] != '/')
+        throw FormatError(format("bad path ‘%1%’ in derivation") % s);
+    return s;
+}
+
+
+static StringSet parseStrings(std::istream & str, bool arePaths)
+{
+    StringSet res;
+    while (!endOfList(str))
+        res.insert(arePaths ? parsePath(str) : parseString(str));
+    return res;
+}
+
+
+static Derivation parseDerivation(const string & s)
+{
+    Derivation drv;
+    std::istringstream str(s);
+    expect(str, "Derive([");
+
+    /* Parse the list of outputs. */
+    while (!endOfList(str)) {
+        DerivationOutput out;
+        expect(str, "("); string id = parseString(str);
+        expect(str, ","); out.path = parsePath(str);
+        expect(str, ","); out.hashAlgo = parseString(str);
+        expect(str, ","); out.hash = parseString(str);
+        expect(str, ")");
+        drv.outputs[id] = out;
+    }
+
+    /* Parse the list of input derivations. */
+    expect(str, ",[");
+    while (!endOfList(str)) {
+        expect(str, "(");
+        Path drvPath = parsePath(str);
+        expect(str, ",[");
+        drv.inputDrvs[drvPath] = parseStrings(str, false);
+        expect(str, ")");
+    }
+
+    expect(str, ",["); drv.inputSrcs = parseStrings(str, true);
+    expect(str, ","); drv.platform = parseString(str);
+    expect(str, ","); drv.builder = parseString(str);
+
+    /* Parse the builder arguments. */
+    expect(str, ",[");
+    while (!endOfList(str))
+        drv.args.push_back(parseString(str));
+
+    /* Parse the environment variables. */
+    expect(str, ",[");
+    while (!endOfList(str)) {
+        expect(str, "("); string name = parseString(str);
+        expect(str, ","); string value = parseString(str);
+        expect(str, ")");
+        drv.env[name] = value;
+    }
+
+    expect(str, ")");
+    return drv;
+}
+
+
+Derivation readDerivation(const Path & drvPath)
+{
+    try {
+        return parseDerivation(readFile(drvPath));
+    } catch (FormatError & e) {
+        throw Error(format("error parsing derivation ‘%1%’: %2%") % drvPath % e.msg());
+    }
+}
+
+
+static void printString(string & res, const string & s)
+{
+    res += '"';
+    for (const char * i = s.c_str(); *i; i++)
+        if (*i == '\"' || *i == '\\') { res += "\\"; res += *i; }
+        else if (*i == '\n') res += "\\n";
+        else if (*i == '\r') res += "\\r";
+        else if (*i == '\t') res += "\\t";
+        else res += *i;
+    res += '"';
+}
+
+
+template<class ForwardIterator>
+static void printStrings(string & res, ForwardIterator i, ForwardIterator j)
+{
+    res += '[';
+    bool first = true;
+    for ( ; i != j; ++i) {
+        if (first) first = false; else res += ',';
+        printString(res, *i);
+    }
+    res += ']';
+}
+
+
+string unparseDerivation(const Derivation & drv)
+{
+    string s;
+    s.reserve(65536);
+    s += "Derive([";
+
+    bool first = true;
+    foreach (DerivationOutputs::const_iterator, i, drv.outputs) {
+        if (first) first = false; else s += ',';
+        s += '('; printString(s, i->first);
+        s += ','; printString(s, i->second.path);
+        s += ','; printString(s, i->second.hashAlgo);
+        s += ','; printString(s, i->second.hash);
+        s += ')';
+    }
+
+    s += "],[";
+    first = true;
+    foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) {
+        if (first) first = false; else s += ',';
+        s += '('; printString(s, i->first);
+        s += ','; printStrings(s, i->second.begin(), i->second.end());
+        s += ')';
+    }
+
+    s += "],";
+    printStrings(s, drv.inputSrcs.begin(), drv.inputSrcs.end());
+
+    s += ','; printString(s, drv.platform);
+    s += ','; printString(s, drv.builder);
+    s += ','; printStrings(s, drv.args.begin(), drv.args.end());
+
+    s += ",[";
+    first = true;
+    foreach (StringPairs::const_iterator, i, drv.env) {
+        if (first) first = false; else s += ',';
+        s += '('; printString(s, i->first);
+        s += ','; printString(s, i->second);
+        s += ')';
+    }
+
+    s += "])";
+
+    return s;
+}
+
+
+bool isDerivation(const string & fileName)
+{
+    return hasSuffix(fileName, drvExtension);
+}
+
+
+bool isFixedOutputDrv(const Derivation & drv)
+{
+    return drv.outputs.size() == 1 &&
+        drv.outputs.begin()->first == "out" &&
+        drv.outputs.begin()->second.hash != "";
+}
+
+
+DrvHashes drvHashes;
+
+
+/* Returns the hash of a derivation modulo fixed-output
+   subderivations.  A fixed-output derivation is a derivation with one
+   output (`out') for which an expected hash and hash algorithm are
+   specified (using the `outputHash' and `outputHashAlgo'
+   attributes).  We don't want changes to such derivations to
+   propagate upwards through the dependency graph, changing output
+   paths everywhere.
+
+   For instance, if we change the url in a call to the `fetchurl'
+   function, we do not want to rebuild everything depending on it
+   (after all, (the hash of) the file being downloaded is unchanged).
+   So the *output paths* should not change.  On the other hand, the
+   *derivation paths* should change to reflect the new dependency
+   graph.
+
+   That's what this function does: it returns a hash which is just the
+   hash of the derivation ATerm, except that any input derivation
+   paths have been replaced by the result of a recursive call to this
+   function, and that for fixed-output derivations we return a hash of
+   its output path. */
+Hash hashDerivationModulo(StoreAPI & store, Derivation drv)
+{
+    /* Return a fixed hash for fixed-output derivations. */
+    if (isFixedOutputDrv(drv)) {
+        DerivationOutputs::const_iterator i = drv.outputs.begin();
+        return hashString(htSHA256, "fixed:out:"
+            + i->second.hashAlgo + ":"
+            + i->second.hash + ":"
+            + i->second.path);
+    }
+
+    /* For other derivations, replace the inputs paths with recursive
+       calls to this function.*/
+    DerivationInputs inputs2;
+    foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) {
+        Hash h = drvHashes[i->first];
+        if (h.type == htUnknown) {
+            assert(store.isValidPath(i->first));
+            Derivation drv2 = readDerivation(i->first);
+            h = hashDerivationModulo(store, drv2);
+            drvHashes[i->first] = h;
+        }
+        inputs2[printHash(h)] = i->second;
+    }
+    drv.inputDrvs = inputs2;
+
+    return hashString(htSHA256, unparseDerivation(drv));
+}
+
+
+DrvPathWithOutputs parseDrvPathWithOutputs(const string & s)
+{
+    size_t n = s.find("!");
+    return n == s.npos
+        ? DrvPathWithOutputs(s, std::set<string>())
+        : DrvPathWithOutputs(string(s, 0, n), tokenizeString<std::set<string> >(string(s, n + 1), ","));
+}
+
+
+Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs)
+{
+    return outputs.empty()
+        ? drvPath
+        : drvPath + "!" + concatStringsSep(",", outputs);
+}
+
+
+bool wantOutput(const string & output, const std::set<string> & wanted)
+{
+    return wanted.empty() || wanted.find(output) != wanted.end();
+}
+
+
+}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
new file mode 100644
index 000000000000..04b64dfc88a7
--- /dev/null
+++ b/src/libstore/derivations.hh
@@ -0,0 +1,93 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+#include <map>
+
+
+namespace nix {
+
+
+/* Extension of derivations in the Nix store. */
+const string drvExtension = ".drv";
+
+
+/* Abstract syntax of derivations. */
+
+struct DerivationOutput
+{
+    Path path;
+    string hashAlgo; /* hash used for expected hash computation */
+    string hash; /* expected hash, may be null */
+    DerivationOutput()
+    {
+    }
+    DerivationOutput(Path path, string hashAlgo, string hash)
+    {
+        this->path = path;
+        this->hashAlgo = hashAlgo;
+        this->hash = hash;
+    }
+    void parseHashInfo(bool & recursive, HashType & hashType, Hash & hash) const;
+};
+
+typedef std::map<string, DerivationOutput> DerivationOutputs;
+
+/* For inputs that are sub-derivations, we specify exactly which
+   output IDs we are interested in. */
+typedef std::map<Path, StringSet> DerivationInputs;
+
+typedef std::map<string, string> StringPairs;
+
+struct Derivation
+{
+    DerivationOutputs outputs; /* keyed on symbolic IDs */
+    DerivationInputs inputDrvs; /* inputs that are sub-derivations */
+    PathSet inputSrcs; /* inputs that are sources */
+    string platform;
+    Path builder;
+    Strings args;
+    StringPairs env;
+};
+
+
+class StoreAPI;
+
+
+/* Write a derivation to the Nix store, and return its path. */
+Path writeDerivation(StoreAPI & store,
+    const Derivation & drv, const string & name, bool repair = false);
+
+/* Read a derivation from a file. */
+Derivation readDerivation(const Path & drvPath);
+
+/* Print a derivation. */
+string unparseDerivation(const Derivation & drv);
+
+/* Check whether a file name ends with the extensions for
+   derivations. */
+bool isDerivation(const string & fileName);
+
+/* Return true iff this is a fixed-output derivation. */
+bool isFixedOutputDrv(const Derivation & drv);
+
+Hash hashDerivationModulo(StoreAPI & store, Derivation drv);
+
+/* Memoisation of hashDerivationModulo(). */
+typedef std::map<Path, Hash> DrvHashes;
+
+extern DrvHashes drvHashes;
+
+/* Split a string specifying a derivation and a set of outputs
+   (/nix/store/hash-foo!out1,out2,...) into the derivation path and
+   the outputs. */
+typedef std::pair<string, std::set<string> > DrvPathWithOutputs;
+DrvPathWithOutputs parseDrvPathWithOutputs(const string & s);
+
+Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs);
+
+bool wantOutput(const string & output, const std::set<string> & wanted);
+
+
+}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
new file mode 100644
index 000000000000..481f5a7c35a8
--- /dev/null
+++ b/src/libstore/gc.cc
@@ -0,0 +1,760 @@
+#include "globals.hh"
+#include "misc.hh"
+#include "local-store.hh"
+
+#include <functional>
+#include <queue>
+#include <algorithm>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+
+namespace nix {
+
+
+static string gcLockName = "gc.lock";
+static string tempRootsDir = "temproots";
+static string gcRootsDir = "gcroots";
+
+
+/* Acquire the global GC lock.  This is used to prevent new Nix
+   processes from starting after the temporary root files have been
+   read.  To be precise: when they try to create a new temporary root
+   file, they will block until the garbage collector has finished /
+   yielded the GC lock. */
+int LocalStore::openGCLock(LockType lockType)
+{
+    Path fnGCLock = (format("%1%/%2%")
+        % settings.nixStateDir % gcLockName).str();
+
+    debug(format("acquiring global GC lock ‘%1%’") % fnGCLock);
+
+    AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT, 0600);
+    if (fdGCLock == -1)
+        throw SysError(format("opening global GC lock ‘%1%’") % fnGCLock);
+    closeOnExec(fdGCLock);
+
+    if (!lockFile(fdGCLock, lockType, false)) {
+        printMsg(lvlError, format("waiting for the big garbage collector lock..."));
+        lockFile(fdGCLock, lockType, true);
+    }
+
+    /* !!! Restrict read permission on the GC root.  Otherwise any
+       process that can open the file for reading can DoS the
+       collector. */
+
+    return fdGCLock.borrow();
+}
+
+
+static void makeSymlink(const Path & link, const Path & target)
+{
+    /* Create directories up to `gcRoot'. */
+    createDirs(dirOf(link));
+
+    /* Create the new symlink. */
+    Path tempLink = (format("%1%.tmp-%2%-%3%")
+        % link % getpid() % rand()).str();
+    createSymlink(target, tempLink);
+
+    /* Atomically replace the old one. */
+    if (rename(tempLink.c_str(), link.c_str()) == -1)
+        throw SysError(format("cannot rename ‘%1%’ to ‘%2%’")
+            % tempLink % link);
+}
+
+
+void LocalStore::syncWithGC()
+{
+    AutoCloseFD fdGCLock = openGCLock(ltRead);
+}
+
+
+void LocalStore::addIndirectRoot(const Path & path)
+{
+    string hash = printHash32(hashString(htSHA1, path));
+    Path realRoot = canonPath((format("%1%/%2%/auto/%3%")
+        % settings.nixStateDir % gcRootsDir % hash).str());
+    makeSymlink(realRoot, path);
+}
+
+
+Path addPermRoot(StoreAPI & store, const Path & _storePath,
+    const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
+{
+    Path storePath(canonPath(_storePath));
+    Path gcRoot(canonPath(_gcRoot));
+    assertStorePath(storePath);
+
+    if (isInStore(gcRoot))
+        throw Error(format(
+                "creating a garbage collector root (%1%) in the Nix store is forbidden "
+                "(are you running nix-build inside the store?)") % gcRoot);
+
+    if (indirect) {
+        /* Don't clobber the the link if it already exists and doesn't
+           point to the Nix store. */
+        if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
+            throw Error(format("cannot create symlink ‘%1%’; already exists") % gcRoot);
+        makeSymlink(gcRoot, storePath);
+        store.addIndirectRoot(gcRoot);
+    }
+
+    else {
+        if (!allowOutsideRootsDir) {
+            Path rootsDir = canonPath((format("%1%/%2%") % settings.nixStateDir % gcRootsDir).str());
+
+            if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
+                throw Error(format(
+                    "path ‘%1%’ is not a valid garbage collector root; "
+                    "it's not in the directory ‘%2%’")
+                    % gcRoot % rootsDir);
+        }
+
+        if (baseNameOf(gcRoot) == baseNameOf(storePath))
+            writeFile(gcRoot, "");
+        else
+            makeSymlink(gcRoot, storePath);
+    }
+
+    /* Check that the root can be found by the garbage collector.
+       !!! This can be very slow on machines that have many roots.
+       Instead of reading all the roots, it would be more efficient to
+       check if the root is in a directory in or linked from the
+       gcroots directory. */
+    if (settings.checkRootReachability) {
+        Roots roots = store.findRoots();
+        if (roots.find(gcRoot) == roots.end())
+            printMsg(lvlError,
+                format(
+                    "warning: ‘%1%’ is not in a directory where the garbage collector looks for roots; "
+                    "therefore, ‘%2%’ might be removed by the garbage collector")
+                % gcRoot % storePath);
+    }
+
+    /* Grab the global GC root, causing us to block while a GC is in
+       progress.  This prevents the set of permanent roots from
+       increasing while a GC is in progress. */
+    store.syncWithGC();
+
+    return gcRoot;
+}
+
+
+/* The file to which we write our temporary roots. */
+static Path fnTempRoots;
+static AutoCloseFD fdTempRoots;
+
+
+void LocalStore::addTempRoot(const Path & path)
+{
+    /* Create the temporary roots file for this process. */
+    if (fdTempRoots == -1) {
+
+        while (1) {
+            Path dir = (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str();
+            createDirs(dir);
+
+            fnTempRoots = (format("%1%/%2%")
+                % dir % getpid()).str();
+
+            AutoCloseFD fdGCLock = openGCLock(ltRead);
+
+            if (pathExists(fnTempRoots))
+                /* It *must* be stale, since there can be no two
+                   processes with the same pid. */
+                unlink(fnTempRoots.c_str());
+
+            fdTempRoots = openLockFile(fnTempRoots, true);
+
+            fdGCLock.close();
+
+            debug(format("acquiring read lock on ‘%1%’") % fnTempRoots);
+            lockFile(fdTempRoots, ltRead, true);
+
+            /* Check whether the garbage collector didn't get in our
+               way. */
+            struct stat st;
+            if (fstat(fdTempRoots, &st) == -1)
+                throw SysError(format("statting ‘%1%’") % fnTempRoots);
+            if (st.st_size == 0) break;
+
+            /* The garbage collector deleted this file before we could
+               get a lock.  (It won't delete the file after we get a
+               lock.)  Try again. */
+        }
+
+    }
+
+    /* Upgrade the lock to a write lock.  This will cause us to block
+       if the garbage collector is holding our lock. */
+    debug(format("acquiring write lock on ‘%1%’") % fnTempRoots);
+    lockFile(fdTempRoots, ltWrite, true);
+
+    string s = path + '\0';
+    writeFull(fdTempRoots, (const unsigned char *) s.data(), s.size());
+
+    /* Downgrade to a read lock. */
+    debug(format("downgrading to read lock on ‘%1%’") % fnTempRoots);
+    lockFile(fdTempRoots, ltRead, true);
+}
+
+
+void removeTempRoots()
+{
+    if (fdTempRoots != -1) {
+        fdTempRoots.close();
+        unlink(fnTempRoots.c_str());
+    }
+}
+
+
+/* Automatically clean up the temporary roots file when we exit. */
+struct RemoveTempRoots
+{
+    ~RemoveTempRoots()
+    {
+        removeTempRoots();
+    }
+};
+
+static RemoveTempRoots autoRemoveTempRoots __attribute__((unused));
+
+
+typedef std::shared_ptr<AutoCloseFD> FDPtr;
+typedef list<FDPtr> FDs;
+
+
+static void readTempRoots(PathSet & tempRoots, FDs & fds)
+{
+    /* Read the `temproots' directory for per-process temporary root
+       files. */
+    DirEntries tempRootFiles = readDirectory(
+        (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str());
+
+    for (auto & i : tempRootFiles) {
+        Path path = (format("%1%/%2%/%3%") % settings.nixStateDir % tempRootsDir % i.name).str();
+
+        debug(format("reading temporary root file ‘%1%’") % path);
+        FDPtr fd(new AutoCloseFD(open(path.c_str(), O_RDWR, 0666)));
+        if (*fd == -1) {
+            /* It's okay if the file has disappeared. */
+            if (errno == ENOENT) continue;
+            throw SysError(format("opening temporary roots file ‘%1%’") % path);
+        }
+
+        /* This should work, but doesn't, for some reason. */
+        //FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
+        //if (*fd == -1) continue;
+
+        /* Try to acquire a write lock without blocking.  This can
+           only succeed if the owning process has died.  In that case
+           we don't care about its temporary roots. */
+        if (lockFile(*fd, ltWrite, false)) {
+            printMsg(lvlError, format("removing stale temporary roots file ‘%1%’") % path);
+            unlink(path.c_str());
+            writeFull(*fd, (const unsigned char *) "d", 1);
+            continue;
+        }
+
+        /* Acquire a read lock.  This will prevent the owning process
+           from upgrading to a write lock, therefore it will block in
+           addTempRoot(). */
+        debug(format("waiting for read lock on ‘%1%’") % path);
+        lockFile(*fd, ltRead, true);
+
+        /* Read the entire file. */
+        string contents = readFile(*fd);
+
+        /* Extract the roots. */
+        string::size_type pos = 0, end;
+
+        while ((end = contents.find((char) 0, pos)) != string::npos) {
+            Path root(contents, pos, end - pos);
+            debug(format("got temporary root ‘%1%’") % root);
+            assertStorePath(root);
+            tempRoots.insert(root);
+            pos = end + 1;
+        }
+
+        fds.push_back(fd); /* keep open */
+    }
+}
+
+
+static void foundRoot(StoreAPI & store,
+    const Path & path, const Path & target, Roots & roots)
+{
+    Path storePath = toStorePath(target);
+    if (store.isValidPath(storePath))
+        roots[path] = storePath;
+    else
+        printMsg(lvlInfo, format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath);
+}
+
+
+static void findRoots(StoreAPI & store, const Path & path, unsigned char type, Roots & roots)
+{
+    try {
+
+        if (type == DT_UNKNOWN) {
+            struct stat st = lstat(path);
+            if (S_ISDIR(st.st_mode)) type = DT_DIR;
+            else if (S_ISLNK(st.st_mode)) type = DT_LNK;
+            else if (S_ISREG(st.st_mode)) type = DT_REG;
+        }
+
+        if (type == DT_DIR) {
+            for (auto & i : readDirectory(path))
+                findRoots(store, path + "/" + i.name, i.type, roots);
+        }
+
+        else if (type == DT_LNK) {
+            Path target = readLink(path);
+            if (isInStore(target))
+                foundRoot(store, path, target, roots);
+
+            /* Handle indirect roots. */
+            else {
+                target = absPath(target, dirOf(path));
+                if (!pathExists(target)) {
+                    if (isInDir(path, settings.nixStateDir + "/" + gcRootsDir + "/auto")) {
+                        printMsg(lvlInfo, format("removing stale link from ‘%1%’ to ‘%2%’") % path % target);
+                        unlink(path.c_str());
+                    }
+                } else {
+                    struct stat st2 = lstat(target);
+                    if (!S_ISLNK(st2.st_mode)) return;
+                    Path target2 = readLink(target);
+                    if (isInStore(target2)) foundRoot(store, target, target2, roots);
+                }
+            }
+        }
+
+        else if (type == DT_REG) {
+            Path storePath = settings.nixStore + "/" + baseNameOf(path);
+            if (store.isValidPath(storePath))
+                roots[path] = storePath;
+        }
+
+    }
+
+    catch (SysError & e) {
+        /* We only ignore permanent failures. */
+        if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
+            printMsg(lvlInfo, format("cannot read potential root ‘%1%’") % path);
+        else
+            throw;
+    }
+}
+
+
+Roots LocalStore::findRoots()
+{
+    Roots roots;
+
+    /* Process direct roots in {gcroots,manifests,profiles}. */
+    nix::findRoots(*this, settings.nixStateDir + "/" + gcRootsDir, DT_UNKNOWN, roots);
+    nix::findRoots(*this, settings.nixStateDir + "/manifests", DT_UNKNOWN, roots);
+    nix::findRoots(*this, settings.nixStateDir + "/profiles", DT_UNKNOWN, roots);
+
+    return roots;
+}
+
+
+static void addAdditionalRoots(StoreAPI & store, PathSet & roots)
+{
+    Path rootFinder = getEnv("NIX_ROOT_FINDER",
+        settings.nixLibexecDir + "/nix/find-runtime-roots.pl");
+
+    if (rootFinder.empty()) return;
+
+    debug(format("executing ‘%1%’ to find additional roots") % rootFinder);
+
+    string result = runProgram(rootFinder);
+
+    StringSet paths = tokenizeString<StringSet>(result, "\n");
+
+    foreach (StringSet::iterator, i, paths) {
+        if (isInStore(*i)) {
+            Path path = toStorePath(*i);
+            if (roots.find(path) == roots.end() && store.isValidPath(path)) {
+                debug(format("got additional root ‘%1%’") % path);
+                roots.insert(path);
+            }
+        }
+    }
+}
+
+
+struct GCLimitReached { };
+
+
+struct LocalStore::GCState
+{
+    GCOptions options;
+    GCResults & results;
+    PathSet roots;
+    PathSet tempRoots;
+    PathSet dead;
+    PathSet alive;
+    bool gcKeepOutputs;
+    bool gcKeepDerivations;
+    unsigned long long bytesInvalidated;
+    Path trashDir;
+    bool shouldDelete;
+    GCState(GCResults & results_) : results(results_), bytesInvalidated(0) { }
+};
+
+
+bool LocalStore::isActiveTempFile(const GCState & state,
+    const Path & path, const string & suffix)
+{
+    return hasSuffix(path, suffix)
+        && state.tempRoots.find(string(path, 0, path.size() - suffix.size())) != state.tempRoots.end();
+}
+
+
+void LocalStore::deleteGarbage(GCState & state, const Path & path)
+{
+    unsigned long long bytesFreed;
+    deletePath(path, bytesFreed);
+    state.results.bytesFreed += bytesFreed;
+}
+
+
+void LocalStore::deletePathRecursive(GCState & state, const Path & path)
+{
+    checkInterrupt();
+
+    unsigned long long size = 0;
+
+    if (isValidPath(path)) {
+        PathSet referrers;
+        queryReferrers(path, referrers);
+        foreach (PathSet::iterator, i, referrers)
+            if (*i != path) deletePathRecursive(state, *i);
+        size = queryPathInfo(path).narSize;
+        invalidatePathChecked(path);
+    }
+
+    struct stat st;
+    if (lstat(path.c_str(), &st)) {
+        if (errno == ENOENT) return;
+        throw SysError(format("getting status of %1%") % path);
+    }
+
+    printMsg(lvlInfo, format("deleting ‘%1%’") % path);
+
+    state.results.paths.insert(path);
+
+    /* If the path is not a regular file or symlink, move it to the
+       trash directory.  The move is to ensure that later (when we're
+       not holding the global GC lock) we can delete the path without
+       being afraid that the path has become alive again.  Otherwise
+       delete it right away. */
+    if (S_ISDIR(st.st_mode)) {
+        // Estimate the amount freed using the narSize field.  FIXME:
+        // if the path was not valid, need to determine the actual
+        // size.
+        state.bytesInvalidated += size;
+        if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
+            throw SysError(format("making ‘%1%’ writable") % path);
+        Path tmp = state.trashDir + "/" + baseNameOf(path);
+        if (rename(path.c_str(), tmp.c_str()))
+            throw SysError(format("unable to rename ‘%1%’ to ‘%2%’") % path % tmp);
+    } else
+        deleteGarbage(state, path);
+
+    if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
+        printMsg(lvlInfo, format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
+        throw GCLimitReached();
+    }
+}
+
+
+bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & path)
+{
+    if (visited.find(path) != visited.end()) return false;
+
+    if (state.alive.find(path) != state.alive.end()) {
+        return true;
+    }
+
+    if (state.dead.find(path) != state.dead.end()) {
+        return false;
+    }
+
+    if (state.roots.find(path) != state.roots.end()) {
+        printMsg(lvlDebug, format("cannot delete ‘%1%’ because it's a root") % path);
+        state.alive.insert(path);
+        return true;
+    }
+
+    visited.insert(path);
+
+    if (!isValidPath(path)) return false;
+
+    PathSet incoming;
+
+    /* Don't delete this path if any of its referrers are alive. */
+    queryReferrers(path, incoming);
+
+    /* If gc-keep-derivations is set and this is a derivation, then
+       don't delete the derivation if any of the outputs are alive. */
+    if (state.gcKeepDerivations && isDerivation(path)) {
+        PathSet outputs = queryDerivationOutputs(path);
+        foreach (PathSet::iterator, i, outputs)
+            if (isValidPath(*i) && queryDeriver(*i) == path)
+                incoming.insert(*i);
+    }
+
+    /* If gc-keep-outputs is set, then don't delete this path if there
+       are derivers of this path that are not garbage. */
+    if (state.gcKeepOutputs) {
+        PathSet derivers = queryValidDerivers(path);
+        foreach (PathSet::iterator, i, derivers)
+            incoming.insert(*i);
+    }
+
+    foreach (PathSet::iterator, i, incoming)
+        if (*i != path)
+            if (canReachRoot(state, visited, *i)) {
+                state.alive.insert(path);
+                return true;
+            }
+
+    return false;
+}
+
+
+void LocalStore::tryToDelete(GCState & state, const Path & path)
+{
+    checkInterrupt();
+
+    if (path == linksDir || path == state.trashDir) return;
+
+    startNest(nest, lvlDebug, format("considering whether to delete ‘%1%’") % path);
+
+    if (!isValidPath(path)) {
+        /* A lock file belonging to a path that we're building right
+           now isn't garbage. */
+        if (isActiveTempFile(state, path, ".lock")) return;
+
+        /* Don't delete .chroot directories for derivations that are
+           currently being built. */
+        if (isActiveTempFile(state, path, ".chroot")) return;
+    }
+
+    PathSet visited;
+
+    if (canReachRoot(state, visited, path)) {
+        printMsg(lvlDebug, format("cannot delete ‘%1%’ because it's still reachable") % path);
+    } else {
+        /* No path we visited was a root, so everything is garbage.
+           But we only delete ‘path’ and its referrers here so that
+           ‘nix-store --delete’ doesn't have the unexpected effect of
+           recursing into derivations and outputs. */
+        state.dead.insert(visited.begin(), visited.end());
+        if (state.shouldDelete)
+            deletePathRecursive(state, path);
+    }
+}
+
+
+/* Unlink all files in /nix/store/.links that have a link count of 1,
+   which indicates that there are no other links and so they can be
+   safely deleted.  FIXME: race condition with optimisePath(): we
+   might see a link count of 1 just before optimisePath() increases
+   the link count. */
+void LocalStore::removeUnusedLinks(const GCState & state)
+{
+    AutoCloseDir dir = opendir(linksDir.c_str());
+    if (!dir) throw SysError(format("opening directory ‘%1%’") % linksDir);
+
+    long long actualSize = 0, unsharedSize = 0;
+
+    struct dirent * dirent;
+    while (errno = 0, dirent = readdir(dir)) {
+        checkInterrupt();
+        string name = dirent->d_name;
+        if (name == "." || name == "..") continue;
+        Path path = linksDir + "/" + name;
+
+        struct stat st;
+        if (lstat(path.c_str(), &st) == -1)
+            throw SysError(format("statting ‘%1%’") % path);
+
+        if (st.st_nlink != 1) {
+            unsigned long long size = st.st_blocks * 512ULL;
+            actualSize += size;
+            unsharedSize += (st.st_nlink - 1) * size;
+            continue;
+        }
+
+        printMsg(lvlTalkative, format("deleting unused link ‘%1%’") % path);
+
+        if (unlink(path.c_str()) == -1)
+            throw SysError(format("deleting ‘%1%’") % path);
+
+        state.results.bytesFreed += st.st_blocks * 512;
+    }
+
+    struct stat st;
+    if (stat(linksDir.c_str(), &st) == -1)
+        throw SysError(format("statting ‘%1%’") % linksDir);
+    long long overhead = st.st_blocks * 512ULL;
+
+    printMsg(lvlInfo, format("note: currently hard linking saves %.2f MiB")
+        % ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
+}
+
+
+void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
+{
+    GCState state(results);
+    state.options = options;
+    state.trashDir = settings.nixStore + "/trash";
+    state.gcKeepOutputs = settings.gcKeepOutputs;
+    state.gcKeepDerivations = settings.gcKeepDerivations;
+
+    /* Using `--ignore-liveness' with `--delete' can have unintended
+       consequences if `gc-keep-outputs' or `gc-keep-derivations' are
+       true (the garbage collector will recurse into deleting the
+       outputs or derivers, respectively).  So disable them. */
+    if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
+        state.gcKeepOutputs = false;
+        state.gcKeepDerivations = false;
+    }
+
+    state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+
+    /* Acquire the global GC root.  This prevents
+       a) New roots from being added.
+       b) Processes from creating new temporary root files. */
+    AutoCloseFD fdGCLock = openGCLock(ltWrite);
+
+    /* Find the roots.  Since we've grabbed the GC lock, the set of
+       permanent roots cannot increase now. */
+    printMsg(lvlError, format("finding garbage collector roots..."));
+    Roots rootMap = options.ignoreLiveness ? Roots() : findRoots();
+
+    foreach (Roots::iterator, i, rootMap) state.roots.insert(i->second);
+
+    /* Add additional roots returned by the program specified by the
+       NIX_ROOT_FINDER environment variable.  This is typically used
+       to add running programs to the set of roots (to prevent them
+       from being garbage collected). */
+    if (!options.ignoreLiveness)
+        addAdditionalRoots(*this, state.roots);
+
+    /* Read the temporary roots.  This acquires read locks on all
+       per-process temporary root files.  So after this point no paths
+       can be added to the set of temporary roots. */
+    FDs fds;
+    readTempRoots(state.tempRoots, fds);
+    state.roots.insert(state.tempRoots.begin(), state.tempRoots.end());
+
+    /* After this point the set of roots or temporary roots cannot
+       increase, since we hold locks on everything.  So everything
+       that is not reachable from `roots' is garbage. */
+
+    if (state.shouldDelete) {
+        if (pathExists(state.trashDir)) deleteGarbage(state, state.trashDir);
+        createDirs(state.trashDir);
+    }
+
+    /* Now either delete all garbage paths, or just the specified
+       paths (for gcDeleteSpecific). */
+
+    if (options.action == GCOptions::gcDeleteSpecific) {
+
+        foreach (PathSet::iterator, i, options.pathsToDelete) {
+            assertStorePath(*i);
+            tryToDelete(state, *i);
+            if (state.dead.find(*i) == state.dead.end())
+                throw Error(format("cannot delete path ‘%1%’ since it is still alive") % *i);
+        }
+
+    } else if (options.maxFreed > 0) {
+
+        if (state.shouldDelete)
+            printMsg(lvlError, format("deleting garbage..."));
+        else
+            printMsg(lvlError, format("determining live/dead paths..."));
+
+        try {
+
+            AutoCloseDir dir = opendir(settings.nixStore.c_str());
+            if (!dir) throw SysError(format("opening directory ‘%1%’") % settings.nixStore);
+
+            /* Read the store and immediately delete all paths that
+               aren't valid.  When using --max-freed etc., deleting
+               invalid paths is preferred over deleting unreachable
+               paths, since unreachable paths could become reachable
+               again.  We don't use readDirectory() here so that GCing
+               can start faster. */
+            Paths entries;
+            struct dirent * dirent;
+            while (errno = 0, dirent = readdir(dir)) {
+                checkInterrupt();
+                string name = dirent->d_name;
+                if (name == "." || name == "..") continue;
+                Path path = settings.nixStore + "/" + name;
+                if (isValidPath(path))
+                    entries.push_back(path);
+                else
+                    tryToDelete(state, path);
+            }
+
+            dir.close();
+
+            /* Now delete the unreachable valid paths.  Randomise the
+               order in which we delete entries to make the collector
+               less biased towards deleting paths that come
+               alphabetically first (e.g. /nix/store/000...).  This
+               matters when using --max-freed etc. */
+            vector<Path> entries_(entries.begin(), entries.end());
+            random_shuffle(entries_.begin(), entries_.end());
+
+            foreach (vector<Path>::iterator, i, entries_)
+                tryToDelete(state, *i);
+
+        } catch (GCLimitReached & e) {
+        }
+    }
+
+    if (state.options.action == GCOptions::gcReturnLive) {
+        state.results.paths = state.alive;
+        return;
+    }
+
+    if (state.options.action == GCOptions::gcReturnDead) {
+        state.results.paths = state.dead;
+        return;
+    }
+
+    /* Allow other processes to add to the store from here on. */
+    fdGCLock.close();
+    fds.clear();
+
+    /* Delete the trash directory. */
+    printMsg(lvlInfo, format("deleting ‘%1%’") % state.trashDir);
+    deleteGarbage(state, state.trashDir);
+
+    /* Clean up the links directory. */
+    if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
+        printMsg(lvlError, format("deleting unused links..."));
+        removeUnusedLinks(state);
+    }
+
+    /* While we're at it, vacuum the database. */
+    if (options.action == GCOptions::gcDeleteDead) vacuumDB();
+}
+
+
+}
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
new file mode 100644
index 000000000000..b410cea2ee76
--- /dev/null
+++ b/src/libstore/globals.cc
@@ -0,0 +1,266 @@
+#include "config.h"
+
+#include "globals.hh"
+#include "util.hh"
+#include "archive.hh"
+
+#include <map>
+#include <algorithm>
+#include <unistd.h>
+
+
+namespace nix {
+
+
+/* The default location of the daemon socket, relative to nixStateDir.
+   The socket is in a directory to allow you to control access to the
+   Nix daemon by setting the mode/ownership of the directory
+   appropriately.  (This wouldn't work on the socket itself since it
+   must be deleted and recreated on startup.) */
+#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
+
+
+Settings settings;
+
+
+Settings::Settings()
+{
+    keepFailed = false;
+    keepGoing = false;
+    tryFallback = false;
+    buildVerbosity = lvlError;
+    maxBuildJobs = 1;
+    buildCores = 1;
+#ifdef _SC_NPROCESSORS_ONLN
+    long res = sysconf(_SC_NPROCESSORS_ONLN);
+    if (res > 0) buildCores = res;
+#endif
+    readOnlyMode = false;
+    thisSystem = SYSTEM;
+    maxSilentTime = 0;
+    buildTimeout = 0;
+    useBuildHook = true;
+    printBuildTrace = false;
+    reservedSize = 1024 * 1024;
+    fsyncMetadata = true;
+    useSQLiteWAL = true;
+    syncBeforeRegistering = false;
+    useSubstitutes = true;
+    buildUsersGroup = getuid() == 0 ? "nixbld" : "";
+    useChroot = false;
+    useSshSubstituter = true;
+    impersonateLinux26 = false;
+    keepLog = true;
+    compressLog = true;
+    maxLogSize = 0;
+    cacheFailure = false;
+    pollInterval = 5;
+    checkRootReachability = false;
+    gcKeepOutputs = false;
+    gcKeepDerivations = true;
+    autoOptimiseStore = false;
+    envKeepDerivations = false;
+    lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
+    showTrace = false;
+    enableImportNative = false;
+}
+
+
+void Settings::processEnvironment()
+{
+    nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
+    nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
+    nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
+    nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
+    nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
+    nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
+    nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
+    nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
+    nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH);
+}
+
+
+void Settings::loadConfFile()
+{
+    Path settingsFile = (format("%1%/%2%") % nixConfDir % "nix.conf").str();
+    if (!pathExists(settingsFile)) return;
+    string contents = readFile(settingsFile);
+
+    unsigned int pos = 0;
+
+    while (pos < contents.size()) {
+        string line;
+        while (pos < contents.size() && contents[pos] != '\n')
+            line += contents[pos++];
+        pos++;
+
+        string::size_type hash = line.find('#');
+        if (hash != string::npos)
+            line = string(line, 0, hash);
+
+        vector<string> tokens = tokenizeString<vector<string> >(line);
+        if (tokens.empty()) continue;
+
+        if (tokens.size() < 2 || tokens[1] != "=")
+            throw Error(format("illegal configuration line ‘%1%’ in ‘%2%’") % line % settingsFile);
+
+        string name = tokens[0];
+
+        vector<string>::iterator i = tokens.begin();
+        advance(i, 2);
+        settings[name] = concatStringsSep(" ", Strings(i, tokens.end())); // FIXME: slow
+    };
+}
+
+
+void Settings::set(const string & name, const string & value)
+{
+    settings[name] = value;
+    overrides[name] = value;
+}
+
+
+string Settings::get(const string & name, const string & def)
+{
+    auto i = settings.find(name);
+    if (i == settings.end()) return def;
+    return i->second;
+}
+
+
+Strings Settings::get(const string & name, const Strings & def)
+{
+    auto i = settings.find(name);
+    if (i == settings.end()) return def;
+    return tokenizeString<Strings>(i->second);
+}
+
+
+void Settings::update()
+{
+    _get(tryFallback, "build-fallback");
+    _get(maxBuildJobs, "build-max-jobs");
+    _get(buildCores, "build-cores");
+    _get(thisSystem, "system");
+    _get(maxSilentTime, "build-max-silent-time");
+    _get(buildTimeout, "build-timeout");
+    _get(reservedSize, "gc-reserved-space");
+    _get(fsyncMetadata, "fsync-metadata");
+    _get(useSQLiteWAL, "use-sqlite-wal");
+    _get(syncBeforeRegistering, "sync-before-registering");
+    _get(useSubstitutes, "build-use-substitutes");
+    _get(buildUsersGroup, "build-users-group");
+    _get(useChroot, "build-use-chroot");
+    _get(impersonateLinux26, "build-impersonate-linux-26");
+    _get(keepLog, "build-keep-log");
+    _get(compressLog, "build-compress-log");
+    _get(maxLogSize, "build-max-log-size");
+    _get(cacheFailure, "build-cache-failure");
+    _get(pollInterval, "build-poll-interval");
+    _get(checkRootReachability, "gc-check-reachability");
+    _get(gcKeepOutputs, "gc-keep-outputs");
+    _get(gcKeepDerivations, "gc-keep-derivations");
+    _get(autoOptimiseStore, "auto-optimise-store");
+    _get(envKeepDerivations, "env-keep-derivations");
+    _get(sshSubstituterHosts, "ssh-substituter-hosts");
+    _get(useSshSubstituter, "use-ssh-substituter");
+    _get(logServers, "log-servers");
+    _get(enableImportNative, "allow-unsafe-native-code-during-evaluation");
+    _get(useCaseHack, "use-case-hack");
+
+    string subs = getEnv("NIX_SUBSTITUTERS", "default");
+    if (subs == "default") {
+        substituters.clear();
+#if 0
+        if (getEnv("NIX_OTHER_STORES") != "")
+            substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
+#endif
+        substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
+        substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl");
+        if (useSshSubstituter && !sshSubstituterHosts.empty())
+            substituters.push_back(nixLibexecDir + "/nix/substituters/download-via-ssh");
+    } else
+        substituters = tokenizeString<Strings>(subs, ":");
+}
+
+
+void Settings::_get(string & res, const string & name)
+{
+    SettingsMap::iterator i = settings.find(name);
+    if (i == settings.end()) return;
+    res = i->second;
+}
+
+
+void Settings::_get(bool & res, const string & name)
+{
+    SettingsMap::iterator i = settings.find(name);
+    if (i == settings.end()) return;
+    if (i->second == "true") res = true;
+    else if (i->second == "false") res = false;
+    else throw Error(format("configuration option ‘%1%’ should be either ‘true’ or ‘false’, not ‘%2%’")
+        % name % i->second);
+}
+
+
+void Settings::_get(StringSet & res, const string & name)
+{
+    SettingsMap::iterator i = settings.find(name);
+    if (i == settings.end()) return;
+    res.clear();
+    Strings ss = tokenizeString<Strings>(i->second);
+    res.insert(ss.begin(), ss.end());
+}
+
+void Settings::_get(Strings & res, const string & name)
+{
+    SettingsMap::iterator i = settings.find(name);
+    if (i == settings.end()) return;
+    res = tokenizeString<Strings>(i->second);
+}
+
+
+template<class N> void Settings::_get(N & res, const string & name)
+{
+    SettingsMap::iterator i = settings.find(name);
+    if (i == settings.end()) return;
+    if (!string2Int(i->second, res))
+        throw Error(format("configuration setting ‘%1%’ should have an integer value") % name);
+}
+
+
+string Settings::pack()
+{
+    string s;
+    foreach (SettingsMap::iterator, i, settings) {
+        if (i->first.find('\n') != string::npos ||
+            i->first.find('=') != string::npos ||
+            i->second.find('\n') != string::npos)
+            throw Error("illegal option name/value");
+        s += i->first; s += '='; s += i->second; s += '\n';
+    }
+    return s;
+}
+
+
+void Settings::unpack(const string & pack) {
+    Strings lines = tokenizeString<Strings>(pack, "\n");
+    foreach (Strings::iterator, i, lines) {
+        string::size_type eq = i->find('=');
+        if (eq == string::npos)
+            throw Error("illegal option name/value");
+        set(i->substr(0, eq), i->substr(eq + 1));
+    }
+}
+
+
+Settings::SettingsMap Settings::getOverrides()
+{
+    return overrides;
+}
+
+
+const string nixVersion = PACKAGE_VERSION;
+
+
+}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
new file mode 100644
index 000000000000..55f082e56e63
--- /dev/null
+++ b/src/libstore/globals.hh
@@ -0,0 +1,224 @@
+#pragma once
+
+#include "types.hh"
+
+#include <map>
+#include <sys/types.h>
+
+
+namespace nix {
+
+
+struct Settings {
+
+    typedef std::map<string, string> SettingsMap;
+
+    Settings();
+
+    void processEnvironment();
+
+    void loadConfFile();
+
+    void set(const string & name, const string & value);
+
+    string get(const string & name, const string & def);
+
+    Strings get(const string & name, const Strings & def);
+
+    void update();
+
+    string pack();
+
+    void unpack(const string & pack);
+
+    SettingsMap getOverrides();
+
+    /* The directory where we store sources and derived files. */
+    Path nixStore;
+
+    Path nixDataDir; /* !!! fix */
+
+    /* The directory where we log various operations. */
+    Path nixLogDir;
+
+    /* The directory where state is stored. */
+    Path nixStateDir;
+
+    /* The directory where we keep the SQLite database. */
+    Path nixDBPath;
+
+    /* The directory where configuration files are stored. */
+    Path nixConfDir;
+
+    /* The directory where internal helper programs are stored. */
+    Path nixLibexecDir;
+
+    /* The directory where the main programs are stored. */
+    Path nixBinDir;
+
+    /* File name of the socket the daemon listens to.  */
+    Path nixDaemonSocketFile;
+
+    /* Whether to keep temporary directories of failed builds. */
+    bool keepFailed;
+
+    /* Whether to keep building subgoals when a sibling (another
+       subgoal of the same goal) fails. */
+    bool keepGoing;
+
+    /* Whether, if we cannot realise the known closure corresponding
+       to a derivation, we should try to normalise the derivation
+       instead. */
+    bool tryFallback;
+
+    /* Verbosity level for build output. */
+    Verbosity buildVerbosity;
+
+    /* Maximum number of parallel build jobs.  0 means unlimited. */
+    unsigned int maxBuildJobs;
+
+    /* Number of CPU cores to utilize in parallel within a build,
+       i.e. by passing this number to Make via '-j'. 0 means that the
+       number of actual CPU cores on the local host ought to be
+       auto-detected. */
+    unsigned int buildCores;
+
+    /* Read-only mode.  Don't copy stuff to the store, don't change
+       the database. */
+    bool readOnlyMode;
+
+    /* The canonical system name, as returned by config.guess. */
+    string thisSystem;
+
+    /* The maximum time in seconds that a builer can go without
+       producing any output on stdout/stderr before it is killed.  0
+       means infinity. */
+    time_t maxSilentTime;
+
+    /* The maximum duration in seconds that a builder can run.  0
+       means infinity.  */
+    time_t buildTimeout;
+
+    /* The substituters.  There are programs that can somehow realise
+       a store path without building, e.g., by downloading it or
+       copying it from a CD. */
+    Paths substituters;
+
+    /* Whether to use build hooks (for distributed builds).  Sometimes
+       users want to disable this from the command-line. */
+    bool useBuildHook;
+
+    /* Whether buildDerivations() should print out lines on stderr in
+       a fixed format to allow its progress to be monitored.  Each
+       line starts with a "@".  The following are defined:
+
+       @ build-started <drvpath> <outpath> <system> <logfile>
+       @ build-failed <drvpath> <outpath> <exitcode> <error text>
+       @ build-succeeded <drvpath> <outpath>
+       @ substituter-started <outpath> <substituter>
+       @ substituter-failed <outpath> <exitcode> <error text>
+       @ substituter-succeeded <outpath>
+
+       Best combined with --no-build-output, otherwise stderr might
+       conceivably contain lines in this format printed by the
+       builders. */
+    bool printBuildTrace;
+
+    /* Amount of reserved space for the garbage collector
+       (/nix/var/nix/db/reserved). */
+    off_t reservedSize;
+
+    /* Whether SQLite should use fsync. */
+    bool fsyncMetadata;
+
+    /* Whether SQLite should use WAL mode. */
+    bool useSQLiteWAL;
+
+    /* Whether to call sync() before registering a path as valid. */
+    bool syncBeforeRegistering;
+
+    /* Whether to use substitutes. */
+    bool useSubstitutes;
+
+    /* The Unix group that contains the build users. */
+    string buildUsersGroup;
+
+    /* Whether to build in chroot. */
+    bool useChroot;
+
+    /* Set of ssh connection strings for the ssh substituter */
+    Strings sshSubstituterHosts;
+
+    /* Whether to use the ssh substituter at all */
+    bool useSshSubstituter;
+
+    /* Whether to impersonate a Linux 2.6 machine on newer kernels. */
+    bool impersonateLinux26;
+
+    /* Whether to store build logs. */
+    bool keepLog;
+
+    /* Whether to compress logs. */
+    bool compressLog;
+
+    /* Maximum number of bytes a builder can write to stdout/stderr
+       before being killed (0 means no limit). */
+    unsigned long maxLogSize;
+
+    /* Whether to cache build failures. */
+    bool cacheFailure;
+
+    /* How often (in seconds) to poll for locks. */
+    unsigned int pollInterval;
+
+    /* Whether to check if new GC roots can in fact be found by the
+       garbage collector. */
+    bool checkRootReachability;
+
+    /* Whether the garbage collector should keep outputs of live
+       derivations. */
+    bool gcKeepOutputs;
+
+    /* Whether the garbage collector should keep derivers of live
+       paths. */
+    bool gcKeepDerivations;
+
+    /* Whether to automatically replace files with identical contents
+       with hard links. */
+    bool autoOptimiseStore;
+
+    /* Whether to add derivations as a dependency of user environments
+       (to prevent them from being GCed). */
+    bool envKeepDerivations;
+
+    /* Whether to lock the Nix client and worker to the same CPU. */
+    bool lockCPU;
+
+    /* Whether to show a stack trace if Nix evaluation fails. */
+    bool showTrace;
+
+    /* A list of URL prefixes that can return Nix build logs. */
+    Strings logServers;
+
+    /* Whether the importNative primop should be enabled */
+    bool enableImportNative;
+
+private:
+    SettingsMap settings, overrides;
+
+    void _get(string & res, const string & name);
+    void _get(bool & res, const string & name);
+    void _get(StringSet & res, const string & name);
+    void _get(Strings & res, const string & name);
+    template<class N> void _get(N & res, const string & name);
+};
+
+
+// FIXME: don't use a global variable.
+extern Settings settings;
+
+
+extern const string nixVersion;
+
+
+}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
new file mode 100644
index 000000000000..f91a0889250a
--- /dev/null
+++ b/src/libstore/local-store.cc
@@ -0,0 +1,2019 @@
+#include "config.h"
+#include "local-store.hh"
+#include "globals.hh"
+#include "archive.hh"
+#include "pathlocks.hh"
+#include "worker-protocol.hh"
+#include "derivations.hh"
+#include "affinity.hh"
+
+#include <iostream>
+#include <algorithm>
+#include <cstring>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <utime.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+#include <grp.h>
+
+#if HAVE_UNSHARE && HAVE_STATVFS && HAVE_SYS_MOUNT_H
+#include <sched.h>
+#include <sys/statvfs.h>
+#include <sys/mount.h>
+#endif
+
+#if HAVE_LINUX_FS_H
+#include <linux/fs.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+#endif
+
+#include <sqlite3.h>
+
+
+namespace nix {
+
+
+MakeError(SQLiteError, Error);
+MakeError(SQLiteBusy, SQLiteError);
+
+
+static void throwSQLiteError(sqlite3 * db, const format & f)
+    __attribute__ ((noreturn));
+
+static void throwSQLiteError(sqlite3 * db, const format & f)
+{
+    int err = sqlite3_errcode(db);
+    if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
+        if (err == SQLITE_PROTOCOL)
+            printMsg(lvlError, "warning: SQLite database is busy (SQLITE_PROTOCOL)");
+        else {
+            static bool warned = false;
+            if (!warned) {
+                printMsg(lvlError, "warning: SQLite database is busy");
+                warned = true;
+            }
+        }
+        /* Sleep for a while since retrying the transaction right away
+           is likely to fail again. */
+#if HAVE_NANOSLEEP
+        struct timespec t;
+        t.tv_sec = 0;
+        t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
+        nanosleep(&t, 0);
+#else
+        sleep(1);
+#endif
+        throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
+    }
+    else
+        throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
+}
+
+
+/* Convenience macros for retrying a SQLite transaction. */
+#define retry_sqlite while (1) { try {
+#define end_retry_sqlite break; } catch (SQLiteBusy & e) { } }
+
+
+SQLite::~SQLite()
+{
+    try {
+        if (db && sqlite3_close(db) != SQLITE_OK)
+            throwSQLiteError(db, "closing database");
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void SQLiteStmt::create(sqlite3 * db, const string & s)
+{
+    checkInterrupt();
+    assert(!stmt);
+    if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK)
+        throwSQLiteError(db, "creating statement");
+    this->db = db;
+}
+
+
+void SQLiteStmt::reset()
+{
+    assert(stmt);
+    /* Note: sqlite3_reset() returns the error code for the most
+       recent call to sqlite3_step().  So ignore it. */
+    sqlite3_reset(stmt);
+    curArg = 1;
+}
+
+
+SQLiteStmt::~SQLiteStmt()
+{
+    try {
+        if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
+            throwSQLiteError(db, "finalizing statement");
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void SQLiteStmt::bind(const string & value)
+{
+    if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
+        throwSQLiteError(db, "binding argument");
+}
+
+
+void SQLiteStmt::bind(int value)
+{
+    if (sqlite3_bind_int(stmt, curArg++, value) != SQLITE_OK)
+        throwSQLiteError(db, "binding argument");
+}
+
+
+void SQLiteStmt::bind64(long long value)
+{
+    if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
+        throwSQLiteError(db, "binding argument");
+}
+
+
+void SQLiteStmt::bind()
+{
+    if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
+        throwSQLiteError(db, "binding argument");
+}
+
+
+/* Helper class to ensure that prepared statements are reset when
+   leaving the scope that uses them.  Unfinished prepared statements
+   prevent transactions from being aborted, and can cause locks to be
+   kept when they should be released. */
+struct SQLiteStmtUse
+{
+    SQLiteStmt & stmt;
+    SQLiteStmtUse(SQLiteStmt & stmt) : stmt(stmt)
+    {
+        stmt.reset();
+    }
+    ~SQLiteStmtUse()
+    {
+        try {
+            stmt.reset();
+        } catch (...) {
+            ignoreException();
+        }
+    }
+};
+
+
+struct SQLiteTxn
+{
+    bool active;
+    sqlite3 * db;
+
+    SQLiteTxn(sqlite3 * db) : active(false) {
+        this->db = db;
+        if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
+            throwSQLiteError(db, "starting transaction");
+        active = true;
+    }
+
+    void commit()
+    {
+        if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
+            throwSQLiteError(db, "committing transaction");
+        active = false;
+    }
+
+    ~SQLiteTxn()
+    {
+        try {
+            if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
+                throwSQLiteError(db, "aborting transaction");
+        } catch (...) {
+            ignoreException();
+        }
+    }
+};
+
+
+void checkStoreNotSymlink()
+{
+    if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return;
+    Path path = settings.nixStore;
+    struct stat st;
+    while (path != "/") {
+        if (lstat(path.c_str(), &st))
+            throw SysError(format("getting status of ‘%1%’") % path);
+        if (S_ISLNK(st.st_mode))
+            throw Error(format(
+                "the path ‘%1%’ is a symlink; "
+                "this is not allowed for the Nix store and its parent directories")
+                % path);
+        path = dirOf(path);
+    }
+}
+
+
+LocalStore::LocalStore(bool reserveSpace)
+    : didSetSubstituterEnv(false)
+{
+    schemaPath = settings.nixDBPath + "/schema";
+
+    if (settings.readOnlyMode) {
+        openDB(false);
+        return;
+    }
+
+    /* Create missing state directories if they don't already exist. */
+    createDirs(settings.nixStore);
+    makeStoreWritable();
+    createDirs(linksDir = settings.nixStore + "/.links");
+    Path profilesDir = settings.nixStateDir + "/profiles";
+    createDirs(profilesDir);
+    createDirs(settings.nixStateDir + "/temproots");
+    createDirs(settings.nixDBPath);
+    Path gcRootsDir = settings.nixStateDir + "/gcroots";
+    if (!pathExists(gcRootsDir)) {
+        createDirs(gcRootsDir);
+        createSymlink(profilesDir, gcRootsDir + "/profiles");
+    }
+
+    /* Optionally, create directories and set permissions for a
+       multi-user install. */
+    if (getuid() == 0 && settings.buildUsersGroup != "") {
+
+        Path perUserDir = profilesDir + "/per-user";
+        createDirs(perUserDir);
+        if (chmod(perUserDir.c_str(), 01777) == -1)
+            throw SysError(format("could not set permissions on ‘%1%’ to 1777") % perUserDir);
+
+        struct group * gr = getgrnam(settings.buildUsersGroup.c_str());
+        if (!gr)
+            throw Error(format("the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+                % settings.buildUsersGroup);
+
+        struct stat st;
+        if (stat(settings.nixStore.c_str(), &st))
+            throw SysError(format("getting attributes of path ‘%1%’") % settings.nixStore);
+
+        if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != 01775) {
+            if (chown(settings.nixStore.c_str(), 0, gr->gr_gid) == -1)
+                throw SysError(format("changing ownership of path ‘%1%’") % settings.nixStore);
+            if (chmod(settings.nixStore.c_str(), 01775) == -1)
+                throw SysError(format("changing permissions on path ‘%1%’") % settings.nixStore);
+        }
+    }
+
+    checkStoreNotSymlink();
+
+    /* We can't open a SQLite database if the disk is full.  Since
+       this prevents the garbage collector from running when it's most
+       needed, we reserve some dummy space that we can free just
+       before doing a garbage collection. */
+    try {
+        Path reservedPath = settings.nixDBPath + "/reserved";
+        if (reserveSpace) {
+            struct stat st;
+            if (stat(reservedPath.c_str(), &st) == -1 ||
+                st.st_size != settings.reservedSize)
+                writeFile(reservedPath, string(settings.reservedSize, 'X'));
+        }
+        else
+            deletePath(reservedPath);
+    } catch (SysError & e) { /* don't care about errors */
+    }
+
+    /* Acquire the big fat lock in shared mode to make sure that no
+       schema upgrade is in progress. */
+    try {
+        Path globalLockPath = settings.nixDBPath + "/big-lock";
+        globalLock = openLockFile(globalLockPath.c_str(), true);
+    } catch (SysError & e) {
+        if (e.errNo != EACCES) throw;
+        settings.readOnlyMode = true;
+        openDB(false);
+        return;
+    }
+
+    if (!lockFile(globalLock, ltRead, false)) {
+        printMsg(lvlError, "waiting for the big Nix store lock...");
+        lockFile(globalLock, ltRead, true);
+    }
+
+    /* Check the current database schema and if necessary do an
+       upgrade.  */
+    int curSchema = getSchema();
+    if (curSchema > nixSchemaVersion)
+        throw Error(format("current Nix store schema is version %1%, but I only support %2%")
+            % curSchema % nixSchemaVersion);
+
+    else if (curSchema == 0) { /* new store */
+        curSchema = nixSchemaVersion;
+        openDB(true);
+        writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+    }
+
+    else if (curSchema < nixSchemaVersion) {
+        if (curSchema < 5)
+            throw Error(
+                "Your Nix store has a database in Berkeley DB format,\n"
+                "which is no longer supported. To convert to the new format,\n"
+                "please upgrade Nix to version 0.12 first.");
+
+        if (!lockFile(globalLock, ltWrite, false)) {
+            printMsg(lvlError, "waiting for exclusive access to the Nix store...");
+            lockFile(globalLock, ltWrite, true);
+        }
+
+        /* Get the schema version again, because another process may
+           have performed the upgrade already. */
+        curSchema = getSchema();
+
+        if (curSchema < 6) upgradeStore6();
+        else if (curSchema < 7) { upgradeStore7(); openDB(true); }
+
+        writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+
+        lockFile(globalLock, ltRead, true);
+    }
+
+    else openDB(false);
+}
+
+
+LocalStore::~LocalStore()
+{
+    try {
+        foreach (RunningSubstituters::iterator, i, runningSubstituters) {
+            if (i->second.disabled) continue;
+            i->second.to.close();
+            i->second.from.close();
+            i->second.error.close();
+            i->second.pid.wait(true);
+        }
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+int LocalStore::getSchema()
+{
+    int curSchema = 0;
+    if (pathExists(schemaPath)) {
+        string s = readFile(schemaPath);
+        if (!string2Int(s, curSchema))
+            throw Error(format("‘%1%’ is corrupt") % schemaPath);
+    }
+    return curSchema;
+}
+
+
+void LocalStore::openDB(bool create)
+{
+    if (access(settings.nixDBPath.c_str(), R_OK | W_OK))
+        throw SysError(format("Nix database directory ‘%1%’ is not writable") % settings.nixDBPath);
+
+    /* Open the Nix database. */
+    string dbPath = settings.nixDBPath + "/db.sqlite";
+    if (sqlite3_open_v2(dbPath.c_str(), &db.db,
+            SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
+        throw Error(format("cannot open Nix database ‘%1%’") % dbPath);
+
+    if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
+        throwSQLiteError(db, "setting timeout");
+
+    if (sqlite3_exec(db, "pragma foreign_keys = 1;", 0, 0, 0) != SQLITE_OK)
+        throwSQLiteError(db, "enabling foreign keys");
+
+    /* !!! check whether sqlite has been built with foreign key
+       support */
+
+    /* Whether SQLite should fsync().  "Normal" synchronous mode
+       should be safe enough.  If the user asks for it, don't sync at
+       all.  This can cause database corruption if the system
+       crashes. */
+    string syncMode = settings.fsyncMetadata ? "normal" : "off";
+    if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
+        throwSQLiteError(db, "setting synchronous mode");
+
+    /* Set the SQLite journal mode.  WAL mode is fastest, so it's the
+       default. */
+    string mode = settings.useSQLiteWAL ? "wal" : "truncate";
+    string prevMode;
+    {
+        SQLiteStmt stmt;
+        stmt.create(db, "pragma main.journal_mode;");
+        if (sqlite3_step(stmt) != SQLITE_ROW)
+            throwSQLiteError(db, "querying journal mode");
+        prevMode = string((const char *) sqlite3_column_text(stmt, 0));
+    }
+    if (prevMode != mode &&
+        sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
+        throwSQLiteError(db, "setting journal mode");
+
+    /* Increase the auto-checkpoint interval to 40000 pages.  This
+       seems enough to ensure that instantiating the NixOS system
+       derivation is done in a single fsync(). */
+    if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK)
+        throwSQLiteError(db, "setting autocheckpoint interval");
+
+    /* Initialise the database schema, if necessary. */
+    if (create) {
+        const char * schema =
+#include "schema.sql.hh"
+            ;
+        if (sqlite3_exec(db, (const char *) schema, 0, 0, 0) != SQLITE_OK)
+            throwSQLiteError(db, "initialising database schema");
+    }
+
+    /* Prepare SQL statements. */
+    stmtRegisterValidPath.create(db,
+        "insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
+    stmtUpdatePathInfo.create(db,
+        "update ValidPaths set narSize = ?, hash = ? where path = ?;");
+    stmtAddReference.create(db,
+        "insert or replace into Refs (referrer, reference) values (?, ?);");
+    stmtQueryPathInfo.create(db,
+        "select id, hash, registrationTime, deriver, narSize from ValidPaths where path = ?;");
+    stmtQueryReferences.create(db,
+        "select path from Refs join ValidPaths on reference = id where referrer = ?;");
+    stmtQueryReferrers.create(db,
+        "select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
+    stmtInvalidatePath.create(db,
+        "delete from ValidPaths where path = ?;");
+    stmtRegisterFailedPath.create(db,
+        "insert or ignore into FailedPaths (path, time) values (?, ?);");
+    stmtHasPathFailed.create(db,
+        "select time from FailedPaths where path = ?;");
+    stmtQueryFailedPaths.create(db,
+        "select path from FailedPaths;");
+    // If the path is a derivation, then clear its outputs.
+    stmtClearFailedPath.create(db,
+        "delete from FailedPaths where ?1 = '*' or path = ?1 "
+        "or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);");
+    stmtAddDerivationOutput.create(db,
+        "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
+    stmtQueryValidDerivers.create(db,
+        "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
+    stmtQueryDerivationOutputs.create(db,
+        "select id, path from DerivationOutputs where drv = ?;");
+    // Use "path >= ?" with limit 1 rather than "path like '?%'" to
+    // ensure efficient lookup.
+    stmtQueryPathFromHashPart.create(db,
+        "select path from ValidPaths where path >= ? limit 1;");
+}
+
+
+/* To improve purity, users may want to make the Nix store a read-only
+   bind mount.  So make the Nix store writable for this process. */
+void LocalStore::makeStoreWritable()
+{
+#if HAVE_UNSHARE && HAVE_STATVFS && HAVE_SYS_MOUNT_H && defined(MS_BIND) && defined(MS_REMOUNT)
+    if (getuid() != 0) return;
+    /* Check if /nix/store is on a read-only mount. */
+    struct statvfs stat;
+    if (statvfs(settings.nixStore.c_str(), &stat) != 0)
+        throw SysError("getting info about the Nix store mount point");
+
+    if (stat.f_flag & ST_RDONLY) {
+        if (unshare(CLONE_NEWNS) == -1)
+            throw SysError("setting up a private mount namespace");
+
+        if (mount(0, settings.nixStore.c_str(), 0, MS_REMOUNT | MS_BIND, 0) == -1)
+            throw SysError(format("remounting %1% writable") % settings.nixStore);
+    }
+#endif
+}
+
+
+const time_t mtimeStore = 1; /* 1 second into the epoch */
+
+
+static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st)
+{
+    if (!S_ISLNK(st.st_mode)) {
+
+        /* Mask out all type related bits. */
+        mode_t mode = st.st_mode & ~S_IFMT;
+
+        if (mode != 0444 && mode != 0555) {
+            mode = (st.st_mode & S_IFMT)
+                 | 0444
+                 | (st.st_mode & S_IXUSR ? 0111 : 0);
+            if (chmod(path.c_str(), mode) == -1)
+                throw SysError(format("changing mode of ‘%1%’ to %2$o") % path % mode);
+        }
+
+    }
+
+    if (st.st_mtime != mtimeStore) {
+        struct timeval times[2];
+        times[0].tv_sec = st.st_atime;
+        times[0].tv_usec = 0;
+        times[1].tv_sec = mtimeStore;
+        times[1].tv_usec = 0;
+#if HAVE_LUTIMES
+        if (lutimes(path.c_str(), times) == -1)
+            if (errno != ENOSYS ||
+                (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1))
+#else
+        if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
+#endif
+            throw SysError(format("changing modification time of ‘%1%’") % path);
+    }
+}
+
+
+void canonicaliseTimestampAndPermissions(const Path & path)
+{
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+    canonicaliseTimestampAndPermissions(path, st);
+}
+
+
+static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+{
+    checkInterrupt();
+
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+    /* Really make sure that the path is of a supported type.  This
+       has already been checked in dumpPath(). */
+    assert(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode));
+
+    /* Fail if the file is not owned by the build user.  This prevents
+       us from messing up the ownership/permissions of files
+       hard-linked into the output (e.g. "ln /etc/shadow $out/foo").
+       However, ignore files that we chown'ed ourselves previously to
+       ensure that we don't fail on hard links within the same build
+       (i.e. "touch $out/foo; ln $out/foo $out/bar"). */
+    if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
+        assert(!S_ISDIR(st.st_mode));
+        if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end())
+            throw BuildError(format("invalid ownership on file ‘%1%’") % path);
+        mode_t mode = st.st_mode & ~S_IFMT;
+        assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
+        return;
+    }
+
+    inodesSeen.insert(Inode(st.st_dev, st.st_ino));
+
+    canonicaliseTimestampAndPermissions(path, st);
+
+    /* Change ownership to the current uid.  If it's a symlink, use
+       lchown if available, otherwise don't bother.  Wrong ownership
+       of a symlink doesn't matter, since the owning user can't change
+       the symlink and can't delete it because the directory is not
+       writable.  The only exception is top-level paths in the Nix
+       store (since that directory is group-writable for the Nix build
+       users group); we check for this case below. */
+    if (st.st_uid != geteuid()) {
+#if HAVE_LCHOWN
+        if (lchown(path.c_str(), geteuid(), (gid_t) -1) == -1)
+#else
+        if (!S_ISLNK(st.st_mode) &&
+            chown(path.c_str(), geteuid(), (gid_t) -1) == -1)
+#endif
+            throw SysError(format("changing owner of ‘%1%’ to %2%")
+                % path % geteuid());
+    }
+
+    if (S_ISDIR(st.st_mode)) {
+        DirEntries entries = readDirectory(path);
+        for (auto & i : entries)
+            canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
+    }
+}
+
+
+void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+{
+    canonicalisePathMetaData_(path, fromUid, inodesSeen);
+
+    /* On platforms that don't have lchown(), the top-level path can't
+       be a symlink, since we can't change its ownership. */
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+    if (st.st_uid != geteuid()) {
+        assert(S_ISLNK(st.st_mode));
+        throw Error(format("wrong ownership of top-level store path ‘%1%’") % path);
+    }
+}
+
+
+void canonicalisePathMetaData(const Path & path, uid_t fromUid)
+{
+    InodesSeen inodesSeen;
+    canonicalisePathMetaData(path, fromUid, inodesSeen);
+}
+
+
+void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv)
+{
+    string drvName = storePathToName(drvPath);
+    assert(isDerivation(drvName));
+    drvName = string(drvName, 0, drvName.size() - drvExtension.size());
+
+    if (isFixedOutputDrv(drv)) {
+        DerivationOutputs::const_iterator out = drv.outputs.find("out");
+        if (out == drv.outputs.end())
+            throw Error(format("derivation ‘%1%’ does not have an output named ‘out’") % drvPath);
+
+        bool recursive; HashType ht; Hash h;
+        out->second.parseHashInfo(recursive, ht, h);
+        Path outPath = makeFixedOutputPath(recursive, ht, h, drvName);
+
+        StringPairs::const_iterator j = drv.env.find("out");
+        if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
+            throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
+                % drvPath % out->second.path % outPath);
+    }
+
+    else {
+        Derivation drvCopy(drv);
+        foreach (DerivationOutputs::iterator, i, drvCopy.outputs) {
+            i->second.path = "";
+            drvCopy.env[i->first] = "";
+        }
+
+        Hash h = hashDerivationModulo(*this, drvCopy);
+
+        foreach (DerivationOutputs::const_iterator, i, drv.outputs) {
+            Path outPath = makeOutputPath(i->first, h, drvName);
+            StringPairs::const_iterator j = drv.env.find(i->first);
+            if (i->second.path != outPath || j == drv.env.end() || j->second != outPath)
+                throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
+                    % drvPath % i->second.path % outPath);
+        }
+    }
+}
+
+
+unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool checkOutputs)
+{
+    SQLiteStmtUse use(stmtRegisterValidPath);
+    stmtRegisterValidPath.bind(info.path);
+    stmtRegisterValidPath.bind("sha256:" + printHash(info.hash));
+    stmtRegisterValidPath.bind(info.registrationTime == 0 ? time(0) : info.registrationTime);
+    if (info.deriver != "")
+        stmtRegisterValidPath.bind(info.deriver);
+    else
+        stmtRegisterValidPath.bind(); // null
+    if (info.narSize != 0)
+        stmtRegisterValidPath.bind64(info.narSize);
+    else
+        stmtRegisterValidPath.bind(); // null
+    if (sqlite3_step(stmtRegisterValidPath) != SQLITE_DONE)
+        throwSQLiteError(db, format("registering valid path ‘%1%’ in database") % info.path);
+    unsigned long long id = sqlite3_last_insert_rowid(db);
+
+    /* If this is a derivation, then store the derivation outputs in
+       the database.  This is useful for the garbage collector: it can
+       efficiently query whether a path is an output of some
+       derivation. */
+    if (isDerivation(info.path)) {
+        Derivation drv = readDerivation(info.path);
+
+        /* Verify that the output paths in the derivation are correct
+           (i.e., follow the scheme for computing output paths from
+           derivations).  Note that if this throws an error, then the
+           DB transaction is rolled back, so the path validity
+           registration above is undone. */
+        if (checkOutputs) checkDerivationOutputs(info.path, drv);
+
+        foreach (DerivationOutputs::iterator, i, drv.outputs) {
+            SQLiteStmtUse use(stmtAddDerivationOutput);
+            stmtAddDerivationOutput.bind(id);
+            stmtAddDerivationOutput.bind(i->first);
+            stmtAddDerivationOutput.bind(i->second.path);
+            if (sqlite3_step(stmtAddDerivationOutput) != SQLITE_DONE)
+                throwSQLiteError(db, format("adding derivation output for ‘%1%’ in database") % info.path);
+        }
+    }
+
+    return id;
+}
+
+
+void LocalStore::addReference(unsigned long long referrer, unsigned long long reference)
+{
+    SQLiteStmtUse use(stmtAddReference);
+    stmtAddReference.bind(referrer);
+    stmtAddReference.bind(reference);
+    if (sqlite3_step(stmtAddReference) != SQLITE_DONE)
+        throwSQLiteError(db, "adding reference to database");
+}
+
+
+void LocalStore::registerFailedPath(const Path & path)
+{
+    retry_sqlite {
+        SQLiteStmtUse use(stmtRegisterFailedPath);
+        stmtRegisterFailedPath.bind(path);
+        stmtRegisterFailedPath.bind(time(0));
+        if (sqlite3_step(stmtRegisterFailedPath) != SQLITE_DONE)
+            throwSQLiteError(db, format("registering failed path ‘%1%’") % path);
+    } end_retry_sqlite;
+}
+
+
+bool LocalStore::hasPathFailed(const Path & path)
+{
+    retry_sqlite {
+        SQLiteStmtUse use(stmtHasPathFailed);
+        stmtHasPathFailed.bind(path);
+        int res = sqlite3_step(stmtHasPathFailed);
+        if (res != SQLITE_DONE && res != SQLITE_ROW)
+            throwSQLiteError(db, "querying whether path failed");
+        return res == SQLITE_ROW;
+    } end_retry_sqlite;
+}
+
+
+PathSet LocalStore::queryFailedPaths()
+{
+    retry_sqlite {
+        SQLiteStmtUse use(stmtQueryFailedPaths);
+
+        PathSet res;
+        int r;
+        while ((r = sqlite3_step(stmtQueryFailedPaths)) == SQLITE_ROW) {
+            const char * s = (const char *) sqlite3_column_text(stmtQueryFailedPaths, 0);
+            assert(s);
+            res.insert(s);
+        }
+
+        if (r != SQLITE_DONE)
+            throwSQLiteError(db, "error querying failed paths");
+
+        return res;
+    } end_retry_sqlite;
+}
+
+
+void LocalStore::clearFailedPaths(const PathSet & paths)
+{
+    retry_sqlite {
+        SQLiteTxn txn(db);
+
+        foreach (PathSet::const_iterator, i, paths) {
+            SQLiteStmtUse use(stmtClearFailedPath);
+            stmtClearFailedPath.bind(*i);
+            if (sqlite3_step(stmtClearFailedPath) != SQLITE_DONE)
+                throwSQLiteError(db, format("clearing failed path ‘%1%’ in database") % *i);
+        }
+
+        txn.commit();
+    } end_retry_sqlite;
+}
+
+
+Hash parseHashField(const Path & path, const string & s)
+{
+    string::size_type colon = s.find(':');
+    if (colon == string::npos)
+        throw Error(format("corrupt hash ‘%1%’ in valid-path entry for ‘%2%’")
+            % s % path);
+    HashType ht = parseHashType(string(s, 0, colon));
+    if (ht == htUnknown)
+        throw Error(format("unknown hash type ‘%1%’ in valid-path entry for ‘%2%’")
+            % string(s, 0, colon) % path);
+    return parseHash(ht, string(s, colon + 1));
+}
+
+
+ValidPathInfo LocalStore::queryPathInfo(const Path & path)
+{
+    ValidPathInfo info;
+    info.path = path;
+
+    assertStorePath(path);
+
+    retry_sqlite {
+
+        /* Get the path info. */
+        SQLiteStmtUse use1(stmtQueryPathInfo);
+
+        stmtQueryPathInfo.bind(path);
+
+        int r = sqlite3_step(stmtQueryPathInfo);
+        if (r == SQLITE_DONE) throw Error(format("path ‘%1%’ is not valid") % path);
+        if (r != SQLITE_ROW) throwSQLiteError(db, "querying path in database");
+
+        info.id = sqlite3_column_int(stmtQueryPathInfo, 0);
+
+        const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1);
+        assert(s);
+        info.hash = parseHashField(path, s);
+
+        info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2);
+
+        s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3);
+        if (s) info.deriver = s;
+
+        /* Note that narSize = NULL yields 0. */
+        info.narSize = sqlite3_column_int64(stmtQueryPathInfo, 4);
+
+        /* Get the references. */
+        SQLiteStmtUse use2(stmtQueryReferences);
+
+        stmtQueryReferences.bind(info.id);
+
+        while ((r = sqlite3_step(stmtQueryReferences)) == SQLITE_ROW) {
+            s = (const char *) sqlite3_column_text(stmtQueryReferences, 0);
+            assert(s);
+            info.references.insert(s);
+        }
+
+        if (r != SQLITE_DONE)
+            throwSQLiteError(db, format("error getting references of ‘%1%’") % path);
+
+        return info;
+    } end_retry_sqlite;
+}
+
+
+/* Update path info in the database.  Currently only updates the
+   narSize field. */
+void LocalStore::updatePathInfo(const ValidPathInfo & info)
+{
+    SQLiteStmtUse use(stmtUpdatePathInfo);
+    if (info.narSize != 0)
+        stmtUpdatePathInfo.bind64(info.narSize);
+    else
+        stmtUpdatePathInfo.bind(); // null
+    stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash));
+    stmtUpdatePathInfo.bind(info.path);
+    if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE)
+        throwSQLiteError(db, format("updating info of path ‘%1%’ in database") % info.path);
+}
+
+
+unsigned long long LocalStore::queryValidPathId(const Path & path)
+{
+    SQLiteStmtUse use(stmtQueryPathInfo);
+    stmtQueryPathInfo.bind(path);
+    int res = sqlite3_step(stmtQueryPathInfo);
+    if (res == SQLITE_ROW) return sqlite3_column_int(stmtQueryPathInfo, 0);
+    if (res == SQLITE_DONE) throw Error(format("path ‘%1%’ is not valid") % path);
+    throwSQLiteError(db, "querying path in database");
+}
+
+
+bool LocalStore::isValidPath_(const Path & path)
+{
+    SQLiteStmtUse use(stmtQueryPathInfo);
+    stmtQueryPathInfo.bind(path);
+    int res = sqlite3_step(stmtQueryPathInfo);
+    if (res != SQLITE_DONE && res != SQLITE_ROW)
+        throwSQLiteError(db, "querying path in database");
+    return res == SQLITE_ROW;
+}
+
+
+bool LocalStore::isValidPath(const Path & path)
+{
+    retry_sqlite {
+        return isValidPath_(path);
+    } end_retry_sqlite;
+}
+
+
+PathSet LocalStore::queryValidPaths(const PathSet & paths)
+{
+    retry_sqlite {
+        PathSet res;
+        foreach (PathSet::const_iterator, i, paths)
+            if (isValidPath_(*i)) res.insert(*i);
+        return res;
+    } end_retry_sqlite;
+}
+
+
+PathSet LocalStore::queryAllValidPaths()
+{
+    retry_sqlite {
+        SQLiteStmt stmt;
+        stmt.create(db, "select path from ValidPaths");
+
+        PathSet res;
+        int r;
+        while ((r = sqlite3_step(stmt)) == SQLITE_ROW) {
+            const char * s = (const char *) sqlite3_column_text(stmt, 0);
+            assert(s);
+            res.insert(s);
+        }
+
+        if (r != SQLITE_DONE)
+            throwSQLiteError(db, "error getting valid paths");
+
+        return res;
+    } end_retry_sqlite;
+}
+
+
+void LocalStore::queryReferences(const Path & path,
+    PathSet & references)
+{
+    ValidPathInfo info = queryPathInfo(path);
+    references.insert(info.references.begin(), info.references.end());
+}
+
+
+void LocalStore::queryReferrers_(const Path & path, PathSet & referrers)
+{
+    SQLiteStmtUse use(stmtQueryReferrers);
+
+    stmtQueryReferrers.bind(path);
+
+    int r;
+    while ((r = sqlite3_step(stmtQueryReferrers)) == SQLITE_ROW) {
+        const char * s = (const char *) sqlite3_column_text(stmtQueryReferrers, 0);
+        assert(s);
+        referrers.insert(s);
+    }
+
+    if (r != SQLITE_DONE)
+        throwSQLiteError(db, format("error getting references of ‘%1%’") % path);
+}
+
+
+void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
+{
+    assertStorePath(path);
+    retry_sqlite {
+        queryReferrers_(path, referrers);
+    } end_retry_sqlite;
+}
+
+
+Path LocalStore::queryDeriver(const Path & path)
+{
+    return queryPathInfo(path).deriver;
+}
+
+
+PathSet LocalStore::queryValidDerivers(const Path & path)
+{
+    assertStorePath(path);
+
+    retry_sqlite {
+        SQLiteStmtUse use(stmtQueryValidDerivers);
+        stmtQueryValidDerivers.bind(path);
+
+        PathSet derivers;
+        int r;
+        while ((r = sqlite3_step(stmtQueryValidDerivers)) == SQLITE_ROW) {
+            const char * s = (const char *) sqlite3_column_text(stmtQueryValidDerivers, 1);
+            assert(s);
+            derivers.insert(s);
+        }
+
+        if (r != SQLITE_DONE)
+            throwSQLiteError(db, format("error getting valid derivers of ‘%1%’") % path);
+
+        return derivers;
+    } end_retry_sqlite;
+}
+
+
+PathSet LocalStore::queryDerivationOutputs(const Path & path)
+{
+    retry_sqlite {
+        SQLiteStmtUse use(stmtQueryDerivationOutputs);
+        stmtQueryDerivationOutputs.bind(queryValidPathId(path));
+
+        PathSet outputs;
+        int r;
+        while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
+            const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 1);
+            assert(s);
+            outputs.insert(s);
+        }
+
+        if (r != SQLITE_DONE)
+            throwSQLiteError(db, format("error getting outputs of ‘%1%’") % path);
+
+        return outputs;
+    } end_retry_sqlite;
+}
+
+
+StringSet LocalStore::queryDerivationOutputNames(const Path & path)
+{
+    retry_sqlite {
+        SQLiteStmtUse use(stmtQueryDerivationOutputs);
+        stmtQueryDerivationOutputs.bind(queryValidPathId(path));
+
+        StringSet outputNames;
+        int r;
+        while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
+            const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 0);
+            assert(s);
+            outputNames.insert(s);
+        }
+
+        if (r != SQLITE_DONE)
+            throwSQLiteError(db, format("error getting output names of ‘%1%’") % path);
+
+        return outputNames;
+    } end_retry_sqlite;
+}
+
+
+Path LocalStore::queryPathFromHashPart(const string & hashPart)
+{
+    if (hashPart.size() != 32) throw Error("invalid hash part");
+
+    Path prefix = settings.nixStore + "/" + hashPart;
+
+    retry_sqlite {
+        SQLiteStmtUse use(stmtQueryPathFromHashPart);
+        stmtQueryPathFromHashPart.bind(prefix);
+
+        int res = sqlite3_step(stmtQueryPathFromHashPart);
+        if (res == SQLITE_DONE) return "";
+        if (res != SQLITE_ROW) throwSQLiteError(db, "finding path in database");
+
+        const char * s = (const char *) sqlite3_column_text(stmtQueryPathFromHashPart, 0);
+        return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : "";
+    } end_retry_sqlite;
+}
+
+
+void LocalStore::setSubstituterEnv()
+{
+    if (didSetSubstituterEnv) return;
+
+    /* Pass configuration options (including those overridden with
+       --option) to substituters. */
+    setenv("_NIX_OPTIONS", settings.pack().c_str(), 1);
+
+    didSetSubstituterEnv = true;
+}
+
+
+void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run)
+{
+    if (run.disabled || run.pid != -1) return;
+
+    debug(format("starting substituter program ‘%1%’") % substituter);
+
+    Pipe toPipe, fromPipe, errorPipe;
+
+    toPipe.create();
+    fromPipe.create();
+    errorPipe.create();
+
+    setSubstituterEnv();
+
+    run.pid = startProcess([&]() {
+        if (dup2(toPipe.readSide, STDIN_FILENO) == -1)
+            throw SysError("dupping stdin");
+        if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1)
+            throw SysError("dupping stdout");
+        if (dup2(errorPipe.writeSide, STDERR_FILENO) == -1)
+            throw SysError("dupping stderr");
+        execl(substituter.c_str(), substituter.c_str(), "--query", NULL);
+        throw SysError(format("executing ‘%1%’") % substituter);
+    });
+
+    run.program = baseNameOf(substituter);
+    run.to = toPipe.writeSide.borrow();
+    run.from = run.fromBuf.fd = fromPipe.readSide.borrow();
+    run.error = errorPipe.readSide.borrow();
+
+    toPipe.readSide.close();
+    fromPipe.writeSide.close();
+    errorPipe.writeSide.close();
+
+    /* The substituter may exit right away if it's disabled in any way
+       (e.g. copy-from-other-stores.pl will exit if no other stores
+       are configured). */
+    try {
+        getLineFromSubstituter(run);
+    } catch (EndOfFile & e) {
+        run.to.close();
+        run.from.close();
+        run.error.close();
+        run.disabled = true;
+        if (run.pid.wait(true) != 0) throw;
+    }
+}
+
+
+/* Read a line from the substituter's stdout, while also processing
+   its stderr. */
+string LocalStore::getLineFromSubstituter(RunningSubstituter & run)
+{
+    string res, err;
+
+    /* We might have stdout data left over from the last time. */
+    if (run.fromBuf.hasData()) goto haveData;
+
+    while (1) {
+        checkInterrupt();
+
+        fd_set fds;
+        FD_ZERO(&fds);
+        FD_SET(run.from, &fds);
+        FD_SET(run.error, &fds);
+
+        /* Wait for data to appear on the substituter's stdout or
+           stderr. */
+        if (select(run.from > run.error ? run.from + 1 : run.error + 1, &fds, 0, 0, 0) == -1) {
+            if (errno == EINTR) continue;
+            throw SysError("waiting for input from the substituter");
+        }
+
+        /* Completely drain stderr before dealing with stdout. */
+        if (FD_ISSET(run.error, &fds)) {
+            char buf[4096];
+            ssize_t n = read(run.error, (unsigned char *) buf, sizeof(buf));
+            if (n == -1) {
+                if (errno == EINTR) continue;
+                throw SysError("reading from substituter's stderr");
+            }
+            if (n == 0) throw EndOfFile(format("substituter ‘%1%’ died unexpectedly") % run.program);
+            err.append(buf, n);
+            string::size_type p;
+            while ((p = err.find('\n')) != string::npos) {
+                printMsg(lvlError, run.program + ": " + string(err, 0, p));
+                err = string(err, p + 1);
+            }
+        }
+
+        /* Read from stdout until we get a newline or the buffer is empty. */
+        else if (run.fromBuf.hasData() || FD_ISSET(run.from, &fds)) {
+        haveData:
+            do {
+                unsigned char c;
+                run.fromBuf(&c, 1);
+                if (c == '\n') {
+                    if (!err.empty()) printMsg(lvlError, run.program + ": " + err);
+                    return res;
+                }
+                res += c;
+            } while (run.fromBuf.hasData());
+        }
+    }
+}
+
+
+template<class T> T LocalStore::getIntLineFromSubstituter(RunningSubstituter & run)
+{
+    string s = getLineFromSubstituter(run);
+    T res;
+    if (!string2Int(s, res)) throw Error("integer expected from stream");
+    return res;
+}
+
+
+PathSet LocalStore::querySubstitutablePaths(const PathSet & paths)
+{
+    PathSet res;
+    foreach (Paths::iterator, i, settings.substituters) {
+        if (res.size() == paths.size()) break;
+        RunningSubstituter & run(runningSubstituters[*i]);
+        startSubstituter(*i, run);
+        if (run.disabled) continue;
+        string s = "have ";
+        foreach (PathSet::const_iterator, j, paths)
+            if (res.find(*j) == res.end()) { s += *j; s += " "; }
+        writeLine(run.to, s);
+        while (true) {
+            /* FIXME: we only read stderr when an error occurs, so
+               substituters should only write (short) messages to
+               stderr when they fail.  I.e. they shouldn't write debug
+               output. */
+            Path path = getLineFromSubstituter(run);
+            if (path == "") break;
+            res.insert(path);
+        }
+    }
+    return res;
+}
+
+
+void LocalStore::querySubstitutablePathInfos(const Path & substituter,
+    PathSet & paths, SubstitutablePathInfos & infos)
+{
+    RunningSubstituter & run(runningSubstituters[substituter]);
+    startSubstituter(substituter, run);
+    if (run.disabled) return;
+
+    string s = "info ";
+    foreach (PathSet::const_iterator, i, paths)
+        if (infos.find(*i) == infos.end()) { s += *i; s += " "; }
+    writeLine(run.to, s);
+
+    while (true) {
+        Path path = getLineFromSubstituter(run);
+        if (path == "") break;
+        if (paths.find(path) == paths.end())
+            throw Error(format("got unexpected path ‘%1%’ from substituter") % path);
+        paths.erase(path);
+        SubstitutablePathInfo & info(infos[path]);
+        info.deriver = getLineFromSubstituter(run);
+        if (info.deriver != "") assertStorePath(info.deriver);
+        int nrRefs = getIntLineFromSubstituter<int>(run);
+        while (nrRefs--) {
+            Path p = getLineFromSubstituter(run);
+            assertStorePath(p);
+            info.references.insert(p);
+        }
+        info.downloadSize = getIntLineFromSubstituter<long long>(run);
+        info.narSize = getIntLineFromSubstituter<long long>(run);
+    }
+}
+
+
+void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
+    SubstitutablePathInfos & infos)
+{
+    PathSet todo = paths;
+    foreach (Paths::iterator, i, settings.substituters) {
+        if (todo.empty()) break;
+        querySubstitutablePathInfos(*i, todo, infos);
+    }
+}
+
+
+Hash LocalStore::queryPathHash(const Path & path)
+{
+    return queryPathInfo(path).hash;
+}
+
+
+void LocalStore::registerValidPath(const ValidPathInfo & info)
+{
+    ValidPathInfos infos;
+    infos.push_back(info);
+    registerValidPaths(infos);
+}
+
+
+void LocalStore::registerValidPaths(const ValidPathInfos & infos)
+{
+    /* SQLite will fsync by default, but the new valid paths may not be fsync-ed.
+     * So some may want to fsync them before registering the validity, at the
+     * expense of some speed of the path registering operation. */
+    if (settings.syncBeforeRegistering) sync();
+
+    retry_sqlite {
+        SQLiteTxn txn(db);
+        PathSet paths;
+
+        foreach (ValidPathInfos::const_iterator, i, infos) {
+            assert(i->hash.type == htSHA256);
+            if (isValidPath_(i->path))
+                updatePathInfo(*i);
+            else
+                addValidPath(*i, false);
+            paths.insert(i->path);
+        }
+
+        foreach (ValidPathInfos::const_iterator, i, infos) {
+            unsigned long long referrer = queryValidPathId(i->path);
+            foreach (PathSet::iterator, j, i->references)
+                addReference(referrer, queryValidPathId(*j));
+        }
+
+        /* Check that the derivation outputs are correct.  We can't do
+           this in addValidPath() above, because the references might
+           not be valid yet. */
+        foreach (ValidPathInfos::const_iterator, i, infos)
+            if (isDerivation(i->path)) {
+                // FIXME: inefficient; we already loaded the
+                // derivation in addValidPath().
+                Derivation drv = readDerivation(i->path);
+                checkDerivationOutputs(i->path, drv);
+            }
+
+        /* Do a topological sort of the paths.  This will throw an
+           error if a cycle is detected and roll back the
+           transaction.  Cycles can only occur when a derivation
+           has multiple outputs. */
+        topoSortPaths(*this, paths);
+
+        txn.commit();
+    } end_retry_sqlite;
+}
+
+
+/* Invalidate a path.  The caller is responsible for checking that
+   there are no referrers. */
+void LocalStore::invalidatePath(const Path & path)
+{
+    debug(format("invalidating path ‘%1%’") % path);
+
+    drvHashes.erase(path);
+
+    SQLiteStmtUse use(stmtInvalidatePath);
+
+    stmtInvalidatePath.bind(path);
+
+    if (sqlite3_step(stmtInvalidatePath) != SQLITE_DONE)
+        throwSQLiteError(db, format("invalidating path ‘%1%’ in database") % path);
+
+    /* Note that the foreign key constraints on the Refs table take
+       care of deleting the references entries for `path'. */
+}
+
+
+Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
+    bool recursive, HashType hashAlgo, bool repair)
+{
+    Hash h = hashString(hashAlgo, dump);
+
+    Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
+
+    addTempRoot(dstPath);
+
+    if (repair || !isValidPath(dstPath)) {
+
+        /* The first check above is an optimisation to prevent
+           unnecessary lock acquisition. */
+
+        PathLocks outputLock(singleton<PathSet, Path>(dstPath));
+
+        if (repair || !isValidPath(dstPath)) {
+
+            if (pathExists(dstPath)) deletePath(dstPath);
+
+            if (recursive) {
+                StringSource source(dump);
+                restorePath(dstPath, source);
+            } else
+                writeFile(dstPath, dump);
+
+            canonicalisePathMetaData(dstPath, -1);
+
+            /* Register the SHA-256 hash of the NAR serialisation of
+               the path in the database.  We may just have computed it
+               above (if called with recursive == true and hashAlgo ==
+               sha256); otherwise, compute it here. */
+            HashResult hash;
+            if (recursive) {
+                hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
+                hash.second = dump.size();
+            } else
+                hash = hashPath(htSHA256, dstPath);
+
+            optimisePath(dstPath); // FIXME: combine with hashPath()
+
+            ValidPathInfo info;
+            info.path = dstPath;
+            info.hash = hash.first;
+            info.narSize = hash.second;
+            registerValidPath(info);
+        }
+
+        outputLock.setDeletion(true);
+    }
+
+    return dstPath;
+}
+
+
+Path LocalStore::addToStore(const Path & _srcPath,
+    bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+    Path srcPath(absPath(_srcPath));
+    debug(format("adding ‘%1%’ to the store") % srcPath);
+
+    /* Read the whole path into memory. This is not a very scalable
+       method for very large paths, but `copyPath' is mainly used for
+       small files. */
+    StringSink sink;
+    if (recursive)
+        dumpPath(srcPath, sink, filter);
+    else
+        sink.s = readFile(srcPath);
+
+    return addToStoreFromDump(sink.s, baseNameOf(srcPath), recursive, hashAlgo, repair);
+}
+
+
+Path LocalStore::addTextToStore(const string & name, const string & s,
+    const PathSet & references, bool repair)
+{
+    Path dstPath = computeStorePathForText(name, s, references);
+
+    addTempRoot(dstPath);
+
+    if (repair || !isValidPath(dstPath)) {
+
+        PathLocks outputLock(singleton<PathSet, Path>(dstPath));
+
+        if (repair || !isValidPath(dstPath)) {
+
+            if (pathExists(dstPath)) deletePath(dstPath);
+
+            writeFile(dstPath, s);
+
+            canonicalisePathMetaData(dstPath, -1);
+
+            HashResult hash = hashPath(htSHA256, dstPath);
+
+            optimisePath(dstPath);
+
+            ValidPathInfo info;
+            info.path = dstPath;
+            info.hash = hash.first;
+            info.narSize = hash.second;
+            info.references = references;
+            registerValidPath(info);
+        }
+
+        outputLock.setDeletion(true);
+    }
+
+    return dstPath;
+}
+
+
+struct HashAndWriteSink : Sink
+{
+    Sink & writeSink;
+    HashSink hashSink;
+    HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
+    {
+    }
+    virtual void operator () (const unsigned char * data, size_t len)
+    {
+        writeSink(data, len);
+        hashSink(data, len);
+    }
+    Hash currentHash()
+    {
+        return hashSink.currentHash().first;
+    }
+};
+
+
+#define EXPORT_MAGIC 0x4558494e
+
+
+static void checkSecrecy(const Path & path)
+{
+    struct stat st;
+    if (stat(path.c_str(), &st))
+        throw SysError(format("getting status of ‘%1%’") % path);
+    if ((st.st_mode & (S_IRWXG | S_IRWXO)) != 0)
+        throw Error(format("file ‘%1%’ should be secret (inaccessible to everybody else)!") % path);
+}
+
+
+void LocalStore::exportPath(const Path & path, bool sign,
+    Sink & sink)
+{
+    assertStorePath(path);
+
+    printMsg(lvlInfo, format("exporting path ‘%1%’") % path);
+
+    if (!isValidPath(path))
+        throw Error(format("path ‘%1%’ is not valid") % path);
+
+    HashAndWriteSink hashAndWriteSink(sink);
+
+    dumpPath(path, hashAndWriteSink);
+
+    /* Refuse to export paths that have changed.  This prevents
+       filesystem corruption from spreading to other machines.
+       Don't complain if the stored hash is zero (unknown). */
+    Hash hash = hashAndWriteSink.currentHash();
+    Hash storedHash = queryPathHash(path);
+    if (hash != storedHash && storedHash != Hash(storedHash.type))
+        throw Error(format("hash of path ‘%1%’ has changed from ‘%2%’ to ‘%3%’!") % path
+            % printHash(storedHash) % printHash(hash));
+
+    writeInt(EXPORT_MAGIC, hashAndWriteSink);
+
+    writeString(path, hashAndWriteSink);
+
+    PathSet references;
+    queryReferences(path, references);
+    writeStrings(references, hashAndWriteSink);
+
+    Path deriver = queryDeriver(path);
+    writeString(deriver, hashAndWriteSink);
+
+    if (sign) {
+        Hash hash = hashAndWriteSink.currentHash();
+
+        writeInt(1, hashAndWriteSink);
+
+        Path tmpDir = createTempDir();
+        AutoDelete delTmp(tmpDir);
+        Path hashFile = tmpDir + "/hash";
+        writeFile(hashFile, printHash(hash));
+
+        Path secretKey = settings.nixConfDir + "/signing-key.sec";
+        checkSecrecy(secretKey);
+
+        Strings args;
+        args.push_back("rsautl");
+        args.push_back("-sign");
+        args.push_back("-inkey");
+        args.push_back(secretKey);
+        args.push_back("-in");
+        args.push_back(hashFile);
+        string signature = runProgram(OPENSSL_PATH, true, args);
+
+        writeString(signature, hashAndWriteSink);
+
+    } else
+        writeInt(0, hashAndWriteSink);
+}
+
+
+struct HashAndReadSource : Source
+{
+    Source & readSource;
+    HashSink hashSink;
+    bool hashing;
+    HashAndReadSource(Source & readSource) : readSource(readSource), hashSink(htSHA256)
+    {
+        hashing = true;
+    }
+    size_t read(unsigned char * data, size_t len)
+    {
+        size_t n = readSource.read(data, len);
+        if (hashing) hashSink(data, n);
+        return n;
+    }
+};
+
+
+/* Create a temporary directory in the store that won't be
+   garbage-collected. */
+Path LocalStore::createTempDirInStore()
+{
+    Path tmpDir;
+    do {
+        /* There is a slight possibility that `tmpDir' gets deleted by
+           the GC between createTempDir() and addTempRoot(), so repeat
+           until `tmpDir' exists. */
+        tmpDir = createTempDir(settings.nixStore);
+        addTempRoot(tmpDir);
+    } while (!pathExists(tmpDir));
+    return tmpDir;
+}
+
+
+Path LocalStore::importPath(bool requireSignature, Source & source)
+{
+    HashAndReadSource hashAndReadSource(source);
+
+    /* We don't yet know what store path this archive contains (the
+       store path follows the archive data proper), and besides, we
+       don't know yet whether the signature is valid. */
+    Path tmpDir = createTempDirInStore();
+    AutoDelete delTmp(tmpDir);
+    Path unpacked = tmpDir + "/unpacked";
+
+    restorePath(unpacked, hashAndReadSource);
+
+    unsigned int magic = readInt(hashAndReadSource);
+    if (magic != EXPORT_MAGIC)
+        throw Error("Nix archive cannot be imported; wrong format");
+
+    Path dstPath = readStorePath(hashAndReadSource);
+
+    PathSet references = readStorePaths<PathSet>(hashAndReadSource);
+
+    Path deriver = readString(hashAndReadSource);
+    if (deriver != "") assertStorePath(deriver);
+
+    Hash hash = hashAndReadSource.hashSink.finish().first;
+    hashAndReadSource.hashing = false;
+
+    bool haveSignature = readInt(hashAndReadSource) == 1;
+
+    if (requireSignature && !haveSignature)
+        throw Error(format("imported archive of ‘%1%’ lacks a signature") % dstPath);
+
+    if (haveSignature) {
+        string signature = readString(hashAndReadSource);
+
+        if (requireSignature) {
+            Path sigFile = tmpDir + "/sig";
+            writeFile(sigFile, signature);
+
+            Strings args;
+            args.push_back("rsautl");
+            args.push_back("-verify");
+            args.push_back("-inkey");
+            args.push_back(settings.nixConfDir + "/signing-key.pub");
+            args.push_back("-pubin");
+            args.push_back("-in");
+            args.push_back(sigFile);
+            string hash2 = runProgram(OPENSSL_PATH, true, args);
+
+            /* Note: runProgram() throws an exception if the signature
+               is invalid. */
+
+            if (printHash(hash) != hash2)
+                throw Error(
+                    "signed hash doesn't match actual contents of imported "
+                    "archive; archive could be corrupt, or someone is trying "
+                    "to import a Trojan horse");
+        }
+    }
+
+    /* Do the actual import. */
+
+    /* !!! way too much code duplication with addTextToStore() etc. */
+    addTempRoot(dstPath);
+
+    if (!isValidPath(dstPath)) {
+
+        PathLocks outputLock;
+
+        /* Lock the output path.  But don't lock if we're being called
+           from a build hook (whose parent process already acquired a
+           lock on this path). */
+        Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS"));
+        if (find(locksHeld.begin(), locksHeld.end(), dstPath) == locksHeld.end())
+            outputLock.lockPaths(singleton<PathSet, Path>(dstPath));
+
+        if (!isValidPath(dstPath)) {
+
+            if (pathExists(dstPath)) deletePath(dstPath);
+
+            if (rename(unpacked.c_str(), dstPath.c_str()) == -1)
+                throw SysError(format("cannot move ‘%1%’ to ‘%2%’")
+                    % unpacked % dstPath);
+
+            canonicalisePathMetaData(dstPath, -1);
+
+            /* !!! if we were clever, we could prevent the hashPath()
+               here. */
+            HashResult hash = hashPath(htSHA256, dstPath);
+
+            optimisePath(dstPath); // FIXME: combine with hashPath()
+
+            ValidPathInfo info;
+            info.path = dstPath;
+            info.hash = hash.first;
+            info.narSize = hash.second;
+            info.references = references;
+            info.deriver = deriver != "" && isValidPath(deriver) ? deriver : "";
+            registerValidPath(info);
+        }
+
+        outputLock.setDeletion(true);
+    }
+
+    return dstPath;
+}
+
+
+Paths LocalStore::importPaths(bool requireSignature, Source & source)
+{
+    Paths res;
+    while (true) {
+        unsigned long long n = readLongLong(source);
+        if (n == 0) break;
+        if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’");
+        res.push_back(importPath(requireSignature, source));
+    }
+    return res;
+}
+
+
+void LocalStore::invalidatePathChecked(const Path & path)
+{
+    assertStorePath(path);
+
+    retry_sqlite {
+        SQLiteTxn txn(db);
+
+        if (isValidPath_(path)) {
+            PathSet referrers; queryReferrers_(path, referrers);
+            referrers.erase(path); /* ignore self-references */
+            if (!referrers.empty())
+                throw PathInUse(format("cannot delete path ‘%1%’ because it is in use by %2%")
+                    % path % showPaths(referrers));
+            invalidatePath(path);
+        }
+
+        txn.commit();
+    } end_retry_sqlite;
+}
+
+
+bool LocalStore::verifyStore(bool checkContents, bool repair)
+{
+    printMsg(lvlError, format("reading the Nix store..."));
+
+    bool errors = false;
+
+    /* Acquire the global GC lock to prevent a garbage collection. */
+    AutoCloseFD fdGCLock = openGCLock(ltWrite);
+
+    PathSet store;
+    for (auto & i : readDirectory(settings.nixStore)) store.insert(i.name);
+
+    /* Check whether all valid paths actually exist. */
+    printMsg(lvlInfo, "checking path existence...");
+
+    PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
+
+    foreach (PathSet::iterator, i, validPaths2)
+        verifyPath(*i, store, done, validPaths, repair, errors);
+
+    /* Release the GC lock so that checking content hashes (which can
+       take ages) doesn't block the GC or builds. */
+    fdGCLock.close();
+
+    /* Optionally, check the content hashes (slow). */
+    if (checkContents) {
+        printMsg(lvlInfo, "checking hashes...");
+
+        Hash nullHash(htSHA256);
+
+        foreach (PathSet::iterator, i, validPaths) {
+            try {
+                ValidPathInfo info = queryPathInfo(*i);
+
+                /* Check the content hash (optionally - slow). */
+                printMsg(lvlTalkative, format("checking contents of ‘%1%’") % *i);
+                HashResult current = hashPath(info.hash.type, *i);
+
+                if (info.hash != nullHash && info.hash != current.first) {
+                    printMsg(lvlError, format("path ‘%1%’ was modified! "
+                            "expected hash ‘%2%’, got ‘%3%’")
+                        % *i % printHash(info.hash) % printHash(current.first));
+                    if (repair) repairPath(*i); else errors = true;
+                } else {
+
+                    bool update = false;
+
+                    /* Fill in missing hashes. */
+                    if (info.hash == nullHash) {
+                        printMsg(lvlError, format("fixing missing hash on ‘%1%’") % *i);
+                        info.hash = current.first;
+                        update = true;
+                    }
+
+                    /* Fill in missing narSize fields (from old stores). */
+                    if (info.narSize == 0) {
+                        printMsg(lvlError, format("updating size field on ‘%1%’ to %2%") % *i % current.second);
+                        info.narSize = current.second;
+                        update = true;
+                    }
+
+                    if (update) updatePathInfo(info);
+
+                }
+
+            } catch (Error & e) {
+                /* It's possible that the path got GC'ed, so ignore
+                   errors on invalid paths. */
+                if (isValidPath(*i))
+                    printMsg(lvlError, format("error: %1%") % e.msg());
+                else
+                    printMsg(lvlError, format("warning: %1%") % e.msg());
+                errors = true;
+            }
+        }
+    }
+
+    return errors;
+}
+
+
+void LocalStore::verifyPath(const Path & path, const PathSet & store,
+    PathSet & done, PathSet & validPaths, bool repair, bool & errors)
+{
+    checkInterrupt();
+
+    if (done.find(path) != done.end()) return;
+    done.insert(path);
+
+    if (!isStorePath(path)) {
+        printMsg(lvlError, format("path ‘%1%’ is not in the Nix store") % path);
+        invalidatePath(path);
+        return;
+    }
+
+    if (store.find(baseNameOf(path)) == store.end()) {
+        /* Check any referrers first.  If we can invalidate them
+           first, then we can invalidate this path as well. */
+        bool canInvalidate = true;
+        PathSet referrers; queryReferrers(path, referrers);
+        foreach (PathSet::iterator, i, referrers)
+            if (*i != path) {
+                verifyPath(*i, store, done, validPaths, repair, errors);
+                if (validPaths.find(*i) != validPaths.end())
+                    canInvalidate = false;
+            }
+
+        if (canInvalidate) {
+            printMsg(lvlError, format("path ‘%1%’ disappeared, removing from database...") % path);
+            invalidatePath(path);
+        } else {
+            printMsg(lvlError, format("path ‘%1%’ disappeared, but it still has valid referrers!") % path);
+            if (repair)
+                try {
+                    repairPath(path);
+                } catch (Error & e) {
+                    printMsg(lvlError, format("warning: %1%") % e.msg());
+                    errors = true;
+                }
+            else errors = true;
+        }
+
+        return;
+    }
+
+    validPaths.insert(path);
+}
+
+
+bool LocalStore::pathContentsGood(const Path & path)
+{
+    std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
+    if (i != pathContentsGoodCache.end()) return i->second;
+    printMsg(lvlInfo, format("checking path ‘%1%’...") % path);
+    ValidPathInfo info = queryPathInfo(path);
+    bool res;
+    if (!pathExists(path))
+        res = false;
+    else {
+        HashResult current = hashPath(info.hash.type, path);
+        Hash nullHash(htSHA256);
+        res = info.hash == nullHash || info.hash == current.first;
+    }
+    pathContentsGoodCache[path] = res;
+    if (!res) printMsg(lvlError, format("path ‘%1%’ is corrupted or missing!") % path);
+    return res;
+}
+
+
+void LocalStore::markContentsGood(const Path & path)
+{
+    pathContentsGoodCache[path] = true;
+}
+
+
+/* Functions for upgrading from the pre-SQLite database. */
+
+PathSet LocalStore::queryValidPathsOld()
+{
+    PathSet paths;
+    for (auto & i : readDirectory(settings.nixDBPath + "/info"))
+        if (i.name.at(0) != '.') paths.insert(settings.nixStore + "/" + i.name);
+    return paths;
+}
+
+
+ValidPathInfo LocalStore::queryPathInfoOld(const Path & path)
+{
+    ValidPathInfo res;
+    res.path = path;
+
+    /* Read the info file. */
+    string baseName = baseNameOf(path);
+    Path infoFile = (format("%1%/info/%2%") % settings.nixDBPath % baseName).str();
+    if (!pathExists(infoFile))
+        throw Error(format("path ‘%1%’ is not valid") % path);
+    string info = readFile(infoFile);
+
+    /* Parse it. */
+    Strings lines = tokenizeString<Strings>(info, "\n");
+
+    foreach (Strings::iterator, i, lines) {
+        string::size_type p = i->find(':');
+        if (p == string::npos)
+            throw Error(format("corrupt line in ‘%1%’: %2%") % infoFile % *i);
+        string name(*i, 0, p);
+        string value(*i, p + 2);
+        if (name == "References") {
+            Strings refs = tokenizeString<Strings>(value, " ");
+            res.references = PathSet(refs.begin(), refs.end());
+        } else if (name == "Deriver") {
+            res.deriver = value;
+        } else if (name == "Hash") {
+            res.hash = parseHashField(path, value);
+        } else if (name == "Registered-At") {
+            int n = 0;
+            string2Int(value, n);
+            res.registrationTime = n;
+        }
+    }
+
+    return res;
+}
+
+
+/* Upgrade from schema 5 (Nix 0.12) to schema 6 (Nix >= 0.15). */
+void LocalStore::upgradeStore6()
+{
+    printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)...");
+
+    openDB(true);
+
+    PathSet validPaths = queryValidPathsOld();
+
+    SQLiteTxn txn(db);
+
+    foreach (PathSet::iterator, i, validPaths) {
+        addValidPath(queryPathInfoOld(*i), false);
+        std::cerr << ".";
+    }
+
+    std::cerr << "|";
+
+    foreach (PathSet::iterator, i, validPaths) {
+        ValidPathInfo info = queryPathInfoOld(*i);
+        unsigned long long referrer = queryValidPathId(*i);
+        foreach (PathSet::iterator, j, info.references)
+            addReference(referrer, queryValidPathId(*j));
+        std::cerr << ".";
+    }
+
+    std::cerr << "\n";
+
+    txn.commit();
+}
+
+
+#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
+
+static void makeMutable(const Path & path)
+{
+    checkInterrupt();
+
+    struct stat st = lstat(path);
+
+    if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;
+
+    if (S_ISDIR(st.st_mode)) {
+        for (auto & i : readDirectory(path))
+            makeMutable(path + "/" + i.name);
+    }
+
+    /* The O_NOFOLLOW is important to prevent us from changing the
+       mutable bit on the target of a symlink (which would be a
+       security hole). */
+    AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW);
+    if (fd == -1) {
+        if (errno == ELOOP) return; // it's a symlink
+        throw SysError(format("opening file ‘%1%’") % path);
+    }
+
+    unsigned int flags = 0, old;
+
+    /* Silently ignore errors getting/setting the immutable flag so
+       that we work correctly on filesystems that don't support it. */
+    if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) return;
+    old = flags;
+    flags &= ~FS_IMMUTABLE_FL;
+    if (old == flags) return;
+    if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) return;
+}
+
+/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */
+void LocalStore::upgradeStore7()
+{
+    if (getuid() != 0) return;
+    printMsg(lvlError, "removing immutable bits from the Nix store (this may take a while)...");
+    makeMutable(settings.nixStore);
+}
+
+#else
+
+void LocalStore::upgradeStore7()
+{
+}
+
+#endif
+
+
+void LocalStore::vacuumDB()
+{
+    if (sqlite3_exec(db, "vacuum;", 0, 0, 0) != SQLITE_OK)
+        throwSQLiteError(db, "vacuuming SQLite database");
+}
+
+
+}
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
new file mode 100644
index 000000000000..e58e6563f1cf
--- /dev/null
+++ b/src/libstore/local-store.hh
@@ -0,0 +1,336 @@
+#pragma once
+
+#include <string>
+#include <unordered_set>
+
+#include "store-api.hh"
+#include "util.hh"
+#include "pathlocks.hh"
+
+
+class sqlite3;
+class sqlite3_stmt;
+
+
+namespace nix {
+
+
+/* Nix store and database schema version.  Version 1 (or 0) was Nix <=
+   0.7.  Version 2 was Nix 0.8 and 0.9.  Version 3 is Nix 0.10.
+   Version 4 is Nix 0.11.  Version 5 is Nix 0.12-0.16.  Version 6 is
+   Nix 1.0.  Version 7 is Nix 1.3. */
+const int nixSchemaVersion = 7;
+
+
+extern string drvsLogDir;
+
+
+struct Derivation;
+
+
+struct OptimiseStats
+{
+    unsigned long filesLinked;
+    unsigned long long bytesFreed;
+    unsigned long long blocksFreed;
+    OptimiseStats()
+    {
+        filesLinked = 0;
+        bytesFreed = blocksFreed = 0;
+    }
+};
+
+
+struct RunningSubstituter
+{
+    Path program;
+    Pid pid;
+    AutoCloseFD to, from, error;
+    FdSource fromBuf;
+    bool disabled;
+    RunningSubstituter() : disabled(false) { };
+};
+
+
+/* Wrapper object to close the SQLite database automatically. */
+struct SQLite
+{
+    sqlite3 * db;
+    SQLite() { db = 0; }
+    ~SQLite();
+    operator sqlite3 * () { return db; }
+};
+
+
+/* Wrapper object to create and destroy SQLite prepared statements. */
+struct SQLiteStmt
+{
+    sqlite3 * db;
+    sqlite3_stmt * stmt;
+    unsigned int curArg;
+    SQLiteStmt() { stmt = 0; }
+    void create(sqlite3 * db, const string & s);
+    void reset();
+    ~SQLiteStmt();
+    operator sqlite3_stmt * () { return stmt; }
+    void bind(const string & value);
+    void bind(int value);
+    void bind64(long long value);
+    void bind();
+};
+
+
+class LocalStore : public StoreAPI
+{
+private:
+    typedef std::map<Path, RunningSubstituter> RunningSubstituters;
+    RunningSubstituters runningSubstituters;
+
+    Path linksDir;
+
+public:
+
+    /* Initialise the local store, upgrading the schema if
+       necessary. */
+    LocalStore(bool reserveSpace = true);
+
+    ~LocalStore();
+
+    /* Implementations of abstract store API methods. */
+
+    bool isValidPath(const Path & path);
+
+    PathSet queryValidPaths(const PathSet & paths);
+
+    PathSet queryAllValidPaths();
+
+    ValidPathInfo queryPathInfo(const Path & path);
+
+    Hash queryPathHash(const Path & path);
+
+    void queryReferences(const Path & path, PathSet & references);
+
+    void queryReferrers(const Path & path, PathSet & referrers);
+
+    Path queryDeriver(const Path & path);
+
+    PathSet queryValidDerivers(const Path & path);
+
+    PathSet queryDerivationOutputs(const Path & path);
+
+    StringSet queryDerivationOutputNames(const Path & path);
+
+    Path queryPathFromHashPart(const string & hashPart);
+
+    PathSet querySubstitutablePaths(const PathSet & paths);
+
+    void querySubstitutablePathInfos(const Path & substituter,
+        PathSet & paths, SubstitutablePathInfos & infos);
+
+    void querySubstitutablePathInfos(const PathSet & paths,
+        SubstitutablePathInfos & infos);
+
+    Path addToStore(const Path & srcPath,
+        bool recursive = true, HashType hashAlgo = htSHA256,
+        PathFilter & filter = defaultPathFilter, bool repair = false);
+
+    /* Like addToStore(), but the contents of the path are contained
+       in `dump', which is either a NAR serialisation (if recursive ==
+       true) or simply the contents of a regular file (if recursive ==
+       false). */
+    Path addToStoreFromDump(const string & dump, const string & name,
+        bool recursive = true, HashType hashAlgo = htSHA256, bool repair = false);
+
+    Path addTextToStore(const string & name, const string & s,
+        const PathSet & references, bool repair = false);
+
+    void exportPath(const Path & path, bool sign,
+        Sink & sink);
+
+    Paths importPaths(bool requireSignature, Source & source);
+
+    void buildPaths(const PathSet & paths, BuildMode buildMode);
+
+    void ensurePath(const Path & path);
+
+    void addTempRoot(const Path & path);
+
+    void addIndirectRoot(const Path & path);
+
+    void syncWithGC();
+
+    Roots findRoots();
+
+    void collectGarbage(const GCOptions & options, GCResults & results);
+
+    /* Optimise the disk space usage of the Nix store by hard-linking
+       files with the same contents. */
+    void optimiseStore(OptimiseStats & stats);
+
+    /* Optimise a single store path. */
+    void optimisePath(const Path & path);
+
+    /* Check the integrity of the Nix store.  Returns true if errors
+       remain. */
+    bool verifyStore(bool checkContents, bool repair);
+
+    /* Register the validity of a path, i.e., that `path' exists, that
+       the paths referenced by it exists, and in the case of an output
+       path of a derivation, that it has been produced by a successful
+       execution of the derivation (or something equivalent).  Also
+       register the hash of the file system contents of the path.  The
+       hash must be a SHA-256 hash. */
+    void registerValidPath(const ValidPathInfo & info);
+
+    void registerValidPaths(const ValidPathInfos & infos);
+
+    /* Register that the build of a derivation with output `path' has
+       failed. */
+    void registerFailedPath(const Path & path);
+
+    /* Query whether `path' previously failed to build. */
+    bool hasPathFailed(const Path & path);
+
+    PathSet queryFailedPaths();
+
+    void clearFailedPaths(const PathSet & paths);
+
+    void vacuumDB();
+
+    /* Repair the contents of the given path by redownloading it using
+       a substituter (if available). */
+    void repairPath(const Path & path);
+
+    /* Check whether the given valid path exists and has the right
+       contents. */
+    bool pathContentsGood(const Path & path);
+
+    void markContentsGood(const Path & path);
+
+    void setSubstituterEnv();
+
+private:
+
+    Path schemaPath;
+
+    /* Lock file used for upgrading. */
+    AutoCloseFD globalLock;
+
+    /* The SQLite database object. */
+    SQLite db;
+
+    /* Some precompiled SQLite statements. */
+    SQLiteStmt stmtRegisterValidPath;
+    SQLiteStmt stmtUpdatePathInfo;
+    SQLiteStmt stmtAddReference;
+    SQLiteStmt stmtQueryPathInfo;
+    SQLiteStmt stmtQueryReferences;
+    SQLiteStmt stmtQueryReferrers;
+    SQLiteStmt stmtInvalidatePath;
+    SQLiteStmt stmtRegisterFailedPath;
+    SQLiteStmt stmtHasPathFailed;
+    SQLiteStmt stmtQueryFailedPaths;
+    SQLiteStmt stmtClearFailedPath;
+    SQLiteStmt stmtAddDerivationOutput;
+    SQLiteStmt stmtQueryValidDerivers;
+    SQLiteStmt stmtQueryDerivationOutputs;
+    SQLiteStmt stmtQueryPathFromHashPart;
+
+    /* Cache for pathContentsGood(). */
+    std::map<Path, bool> pathContentsGoodCache;
+
+    bool didSetSubstituterEnv;
+
+    int getSchema();
+
+    void openDB(bool create);
+
+    void makeStoreWritable();
+
+    unsigned long long queryValidPathId(const Path & path);
+
+    unsigned long long addValidPath(const ValidPathInfo & info, bool checkOutputs = true);
+
+    void addReference(unsigned long long referrer, unsigned long long reference);
+
+    void appendReferrer(const Path & from, const Path & to, bool lock);
+
+    void rewriteReferrers(const Path & path, bool purge, PathSet referrers);
+
+    void invalidatePath(const Path & path);
+
+    /* Delete a path from the Nix store. */
+    void invalidatePathChecked(const Path & path);
+
+    void verifyPath(const Path & path, const PathSet & store,
+        PathSet & done, PathSet & validPaths, bool repair, bool & errors);
+
+    void updatePathInfo(const ValidPathInfo & info);
+
+    void upgradeStore6();
+    void upgradeStore7();
+    PathSet queryValidPathsOld();
+    ValidPathInfo queryPathInfoOld(const Path & path);
+
+    struct GCState;
+
+    void deleteGarbage(GCState & state, const Path & path);
+
+    void tryToDelete(GCState & state, const Path & path);
+
+    bool canReachRoot(GCState & state, PathSet & visited, const Path & path);
+
+    void deletePathRecursive(GCState & state, const Path & path);
+
+    bool isActiveTempFile(const GCState & state,
+        const Path & path, const string & suffix);
+
+    int openGCLock(LockType lockType);
+
+    void removeUnusedLinks(const GCState & state);
+
+    void startSubstituter(const Path & substituter,
+        RunningSubstituter & runningSubstituter);
+
+    string getLineFromSubstituter(RunningSubstituter & run);
+
+    template<class T> T getIntLineFromSubstituter(RunningSubstituter & run);
+
+    Path createTempDirInStore();
+
+    Path importPath(bool requireSignature, Source & source);
+
+    void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
+
+    typedef std::unordered_set<ino_t> InodeHash;
+
+    InodeHash loadInodeHash();
+    Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
+    void optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash);
+
+    // Internal versions that are not wrapped in retry_sqlite.
+    bool isValidPath_(const Path & path);
+    void queryReferrers_(const Path & path, PathSet & referrers);
+};
+
+
+typedef std::pair<dev_t, ino_t> Inode;
+typedef set<Inode> InodesSeen;
+
+
+/* "Fix", or canonicalise, the meta-data of the files in a store path
+   after it has been built.  In particular:
+   - the last modification date on each file is set to 1 (i.e.,
+     00:00:01 1/1/1970 UTC)
+   - the permissions are set of 444 or 555 (i.e., read-only with or
+     without execute permission; setuid bits etc. are cleared)
+   - the owner and group are set to the Nix user and group, if we're
+     running as root. */
+void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
+void canonicalisePathMetaData(const Path & path, uid_t fromUid);
+
+void canonicaliseTimestampAndPermissions(const Path & path);
+
+MakeError(PathInUse, Error);
+
+}
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
new file mode 100644
index 000000000000..180088d3df8c
--- /dev/null
+++ b/src/libstore/local.mk
@@ -0,0 +1,32 @@
+libraries += libstore
+
+libstore_NAME = libnixstore
+
+libstore_DIR := $(d)
+
+libstore_SOURCES := $(wildcard $(d)/*.cc)
+
+libstore_LIBS = libutil libformat
+
+libstore_LDFLAGS = -lsqlite3 -lbz2
+
+ifeq ($(OS), SunOS)
+	libstore_LDFLAGS += -lsocket
+endif
+
+libstore_CXXFLAGS = \
+ -DNIX_STORE_DIR=\"$(storedir)\" \
+ -DNIX_DATA_DIR=\"$(datadir)\" \
+ -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
+ -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
+ -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
+ -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
+ -DNIX_BIN_DIR=\"$(bindir)\" \
+ -DDEFAULT_CHROOT_DIRS="\"$(DEFAULT_CHROOT_DIRS)\""
+
+$(d)/local-store.cc: $(d)/schema.sql.hh
+
+%.sql.hh: %.sql
+	$(trace-gen) sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $< > $@ || (rm $@ && exit 1)
+
+clean-files += $(d)/schema.sql.hh
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
new file mode 100644
index 000000000000..2facda81583d
--- /dev/null
+++ b/src/libstore/misc.cc
@@ -0,0 +1,220 @@
+#include "misc.hh"
+#include "store-api.hh"
+#include "local-store.hh"
+#include "globals.hh"
+
+
+namespace nix {
+
+
+Derivation derivationFromPath(StoreAPI & store, const Path & drvPath)
+{
+    assertStorePath(drvPath);
+    store.ensurePath(drvPath);
+    return readDerivation(drvPath);
+}
+
+
+void computeFSClosure(StoreAPI & store, const Path & path,
+    PathSet & paths, bool flipDirection, bool includeOutputs, bool includeDerivers)
+{
+    if (paths.find(path) != paths.end()) return;
+    paths.insert(path);
+
+    PathSet edges;
+
+    if (flipDirection) {
+        store.queryReferrers(path, edges);
+
+        if (includeOutputs) {
+            PathSet derivers = store.queryValidDerivers(path);
+            foreach (PathSet::iterator, i, derivers)
+                edges.insert(*i);
+        }
+
+        if (includeDerivers && isDerivation(path)) {
+            PathSet outputs = store.queryDerivationOutputs(path);
+            foreach (PathSet::iterator, i, outputs)
+                if (store.isValidPath(*i) && store.queryDeriver(*i) == path)
+                    edges.insert(*i);
+        }
+
+    } else {
+        store.queryReferences(path, edges);
+
+        if (includeOutputs && isDerivation(path)) {
+            PathSet outputs = store.queryDerivationOutputs(path);
+            foreach (PathSet::iterator, i, outputs)
+                if (store.isValidPath(*i)) edges.insert(*i);
+        }
+
+        if (includeDerivers) {
+            Path deriver = store.queryDeriver(path);
+            if (store.isValidPath(deriver)) edges.insert(deriver);
+        }
+    }
+
+    foreach (PathSet::iterator, i, edges)
+        computeFSClosure(store, *i, paths, flipDirection, includeOutputs, includeDerivers);
+}
+
+
+Path findOutput(const Derivation & drv, string id)
+{
+    foreach (DerivationOutputs::const_iterator, i, drv.outputs)
+        if (i->first == id) return i->second.path;
+    throw Error(format("derivation has no output ‘%1%’") % id);
+}
+
+
+void queryMissing(StoreAPI & store, const PathSet & targets,
+    PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+    unsigned long long & downloadSize, unsigned long long & narSize)
+{
+    downloadSize = narSize = 0;
+
+    PathSet todo(targets.begin(), targets.end()), done;
+
+    /* Getting substitute info has high latency when using the binary
+       cache substituter.  Thus it's essential to do substitute
+       queries in parallel as much as possible.  To accomplish this
+       we do the following:
+
+       - For all paths still to be processed (‘todo’), we add all
+         paths for which we need info to the set ‘query’.  For an
+         unbuilt derivation this is the output paths; otherwise, it's
+         the path itself.
+
+       - We get info about all paths in ‘query’ in parallel.
+
+       - We process the results and add new items to ‘todo’ if
+         necessary.  E.g. if a path is substitutable, then we need to
+         get info on its references.
+
+       - Repeat until ‘todo’ is empty.
+    */
+
+    while (!todo.empty()) {
+
+        PathSet query, todoDrv, todoNonDrv;
+
+        foreach (PathSet::iterator, i, todo) {
+            if (done.find(*i) != done.end()) continue;
+            done.insert(*i);
+
+            DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i);
+
+            if (isDerivation(i2.first)) {
+                if (!store.isValidPath(i2.first)) {
+                    // FIXME: we could try to substitute p.
+                    unknown.insert(*i);
+                    continue;
+                }
+                Derivation drv = derivationFromPath(store, i2.first);
+
+                PathSet invalid;
+                foreach (DerivationOutputs::iterator, j, drv.outputs)
+                    if (wantOutput(j->first, i2.second)
+                        && !store.isValidPath(j->second.path))
+                        invalid.insert(j->second.path);
+                if (invalid.empty()) continue;
+
+                todoDrv.insert(*i);
+                if (settings.useSubstitutes && !willBuildLocally(drv))
+                    query.insert(invalid.begin(), invalid.end());
+            }
+
+            else {
+                if (store.isValidPath(*i)) continue;
+                query.insert(*i);
+                todoNonDrv.insert(*i);
+            }
+        }
+
+        todo.clear();
+
+        SubstitutablePathInfos infos;
+        store.querySubstitutablePathInfos(query, infos);
+
+        foreach (PathSet::iterator, i, todoDrv) {
+            DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i);
+
+            // FIXME: cache this
+            Derivation drv = derivationFromPath(store, i2.first);
+
+            PathSet outputs;
+            bool mustBuild = false;
+            if (settings.useSubstitutes && !willBuildLocally(drv)) {
+                foreach (DerivationOutputs::iterator, j, drv.outputs) {
+                    if (!wantOutput(j->first, i2.second)) continue;
+                    if (!store.isValidPath(j->second.path)) {
+                        if (infos.find(j->second.path) == infos.end())
+                            mustBuild = true;
+                        else
+                            outputs.insert(j->second.path);
+                    }
+                }
+            } else
+                mustBuild = true;
+
+            if (mustBuild) {
+                willBuild.insert(i2.first);
+                todo.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
+                foreach (DerivationInputs::iterator, j, drv.inputDrvs)
+                    todo.insert(makeDrvPathWithOutputs(j->first, j->second));
+            } else
+                todoNonDrv.insert(outputs.begin(), outputs.end());
+        }
+
+        foreach (PathSet::iterator, i, todoNonDrv) {
+            done.insert(*i);
+            SubstitutablePathInfos::iterator info = infos.find(*i);
+            if (info != infos.end()) {
+                willSubstitute.insert(*i);
+                downloadSize += info->second.downloadSize;
+                narSize += info->second.narSize;
+                todo.insert(info->second.references.begin(), info->second.references.end());
+            } else
+                unknown.insert(*i);
+        }
+    }
+}
+
+
+static void dfsVisit(StoreAPI & store, const PathSet & paths,
+    const Path & path, PathSet & visited, Paths & sorted,
+    PathSet & parents)
+{
+    if (parents.find(path) != parents.end())
+        throw BuildError(format("cycle detected in the references of ‘%1%’") % path);
+
+    if (visited.find(path) != visited.end()) return;
+    visited.insert(path);
+    parents.insert(path);
+
+    PathSet references;
+    if (store.isValidPath(path))
+        store.queryReferences(path, references);
+
+    foreach (PathSet::iterator, i, references)
+        /* Don't traverse into paths that don't exist.  That can
+           happen due to substitutes for non-existent paths. */
+        if (*i != path && paths.find(*i) != paths.end())
+            dfsVisit(store, paths, *i, visited, sorted, parents);
+
+    sorted.push_front(path);
+    parents.erase(path);
+}
+
+
+Paths topoSortPaths(StoreAPI & store, const PathSet & paths)
+{
+    Paths sorted;
+    PathSet visited, parents;
+    foreach (PathSet::const_iterator, i, paths)
+        dfsVisit(store, paths, *i, visited, sorted, parents);
+    return sorted;
+}
+
+
+}
diff --git a/src/libstore/misc.hh b/src/libstore/misc.hh
new file mode 100644
index 000000000000..144cb7f457cd
--- /dev/null
+++ b/src/libstore/misc.hh
@@ -0,0 +1,38 @@
+#pragma once
+
+#include "derivations.hh"
+
+
+namespace nix {
+
+
+/* Read a derivation, after ensuring its existence through
+   ensurePath(). */
+Derivation derivationFromPath(StoreAPI & store, const Path & drvPath);
+
+/* Place in `paths' the set of all store paths in the file system
+   closure of `storePath'; that is, all paths than can be directly or
+   indirectly reached from it.  `paths' is not cleared.  If
+   `flipDirection' is true, the set of paths that can reach
+   `storePath' is returned; that is, the closures under the
+   `referrers' relation instead of the `references' relation is
+   returned. */
+void computeFSClosure(StoreAPI & store, const Path & path,
+    PathSet & paths, bool flipDirection = false,
+    bool includeOutputs = false, bool includeDerivers = false);
+
+/* Return the path corresponding to the output identifier `id' in the
+   given derivation. */
+Path findOutput(const Derivation & drv, string id);
+
+/* Given a set of paths that are to be built, return the set of
+   derivations that will be built, and the set of output paths that
+   will be substituted. */
+void queryMissing(StoreAPI & store, const PathSet & targets,
+    PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+    unsigned long long & downloadSize, unsigned long long & narSize);
+
+bool willBuildLocally(const Derivation & drv);
+
+
+}
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
new file mode 100644
index 000000000000..208d9688ed98
--- /dev/null
+++ b/src/libstore/optimise-store.cc
@@ -0,0 +1,238 @@
+#include "config.h"
+
+#include "util.hh"
+#include "local-store.hh"
+#include "globals.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+
+namespace nix {
+
+
+static void makeWritable(const Path & path)
+{
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+    if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
+        throw SysError(format("changing writability of ‘%1%’") % path);
+}
+
+
+struct MakeReadOnly
+{
+    Path path;
+    MakeReadOnly(const Path & path) : path(path) { }
+    ~MakeReadOnly()
+    {
+        try {
+            /* This will make the path read-only. */
+            if (path != "") canonicaliseTimestampAndPermissions(path);
+        } catch (...) {
+            ignoreException();
+        }
+    }
+};
+
+
+LocalStore::InodeHash LocalStore::loadInodeHash()
+{
+    printMsg(lvlDebug, "loading hash inodes in memory");
+    InodeHash inodeHash;
+
+    AutoCloseDir dir = opendir(linksDir.c_str());
+    if (!dir) throw SysError(format("opening directory ‘%1%’") % linksDir);
+
+    struct dirent * dirent;
+    while (errno = 0, dirent = readdir(dir)) { /* sic */
+        checkInterrupt();
+        // We don't care if we hit non-hash files, anything goes
+        inodeHash.insert(dirent->d_ino);
+    }
+    if (errno) throw SysError(format("reading directory ‘%1%’") % linksDir);
+
+    printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
+
+    return inodeHash;
+}
+
+
+Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
+{
+    Strings names;
+
+    AutoCloseDir dir = opendir(path.c_str());
+    if (!dir) throw SysError(format("opening directory ‘%1%’") % path);
+
+    struct dirent * dirent;
+    while (errno = 0, dirent = readdir(dir)) { /* sic */
+        checkInterrupt();
+
+        if (inodeHash.count(dirent->d_ino)) {
+            printMsg(lvlDebug, format("‘%1%’ is already linked") % dirent->d_name);
+            continue;
+        }
+
+        string name = dirent->d_name;
+        if (name == "." || name == "..") continue;
+        names.push_back(name);
+    }
+    if (errno) throw SysError(format("reading directory ‘%1%’") % path);
+
+    return names;
+}
+
+
+void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash)
+{
+    checkInterrupt();
+
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+    if (S_ISDIR(st.st_mode)) {
+        Strings names = readDirectoryIgnoringInodes(path, inodeHash);
+        foreach (Strings::iterator, i, names)
+            optimisePath_(stats, path + "/" + *i, inodeHash);
+        return;
+    }
+
+    /* We can hard link regular files and maybe symlinks. */
+    if (!S_ISREG(st.st_mode)
+#if CAN_LINK_SYMLINK
+        && !S_ISLNK(st.st_mode)
+#endif
+        ) return;
+
+    /* Sometimes SNAFUs can cause files in the Nix store to be
+       modified, in particular when running programs as root under
+       NixOS (example: $fontconfig/var/cache being modified).  Skip
+       those files.  FIXME: check the modification time. */
+    if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
+        printMsg(lvlError, format("skipping suspicious writable file ‘%1%’") % path);
+        return;
+    }
+
+    /* This can still happen on top-level files */
+    if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
+        printMsg(lvlDebug, format("‘%1%’ is already linked, with %2% other file(s).") % path % (st.st_nlink - 2));
+        return;
+    }
+
+    /* Hash the file.  Note that hashPath() returns the hash over the
+       NAR serialisation, which includes the execute bit on the file.
+       Thus, executable and non-executable files with the same
+       contents *won't* be linked (which is good because otherwise the
+       permissions would be screwed up).
+
+       Also note that if `path' is a symlink, then we're hashing the
+       contents of the symlink (i.e. the result of readlink()), not
+       the contents of the target (which may not even exist). */
+    Hash hash = hashPath(htSHA256, path).first;
+    printMsg(lvlDebug, format("‘%1%’ has hash ‘%2%’") % path % printHash(hash));
+
+    /* Check if this is a known hash. */
+    Path linkPath = linksDir + "/" + printHash32(hash);
+
+    if (!pathExists(linkPath)) {
+        /* Nope, create a hard link in the links directory. */
+        if (link(path.c_str(), linkPath.c_str()) == 0) {
+            inodeHash.insert(st.st_ino);
+            return;
+        }
+        if (errno != EEXIST)
+            throw SysError(format("cannot link ‘%1%’ to ‘%2%’") % linkPath % path);
+        /* Fall through if another process created ‘linkPath’ before
+           we did. */
+    }
+
+    /* Yes!  We've seen a file with the same contents.  Replace the
+       current file with a hard link to that file. */
+    struct stat stLink;
+    if (lstat(linkPath.c_str(), &stLink))
+        throw SysError(format("getting attributes of path ‘%1%’") % linkPath);
+
+    if (st.st_ino == stLink.st_ino) {
+        printMsg(lvlDebug, format("‘%1%’ is already linked to ‘%2%’") % path % linkPath);
+        return;
+    }
+
+    printMsg(lvlTalkative, format("linking ‘%1%’ to ‘%2%’") % path % linkPath);
+
+    /* Make the containing directory writable, but only if it's not
+       the store itself (we don't want or need to mess with its
+       permissions). */
+    bool mustToggle = !isStorePath(path);
+    if (mustToggle) makeWritable(dirOf(path));
+
+    /* When we're done, make the directory read-only again and reset
+       its timestamp back to 0. */
+    MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
+
+    Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
+        % settings.nixStore % getpid() % rand()).str();
+
+    if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
+        if (errno == EMLINK) {
+            /* Too many links to the same file (>= 32000 on most file
+               systems).  This is likely to happen with empty files.
+               Just shrug and ignore. */
+            if (st.st_size)
+                printMsg(lvlInfo, format("‘%1%’ has maximum number of links") % linkPath);
+            return;
+        }
+        throw SysError(format("cannot link ‘%1%’ to ‘%2%’") % tempLink % linkPath);
+    }
+
+    /* Atomically replace the old file with the new hard link. */
+    if (rename(tempLink.c_str(), path.c_str()) == -1) {
+        if (unlink(tempLink.c_str()) == -1)
+            printMsg(lvlError, format("unable to unlink ‘%1%’") % tempLink);
+        if (errno == EMLINK) {
+            /* Some filesystems generate too many links on the rename,
+               rather than on the original link.  (Probably it
+               temporarily increases the st_nlink field before
+               decreasing it again.) */
+            if (st.st_size)
+                printMsg(lvlInfo, format("‘%1%’ has maximum number of links") % linkPath);
+            return;
+        }
+        throw SysError(format("cannot rename ‘%1%’ to ‘%2%’") % tempLink % path);
+    }
+
+    stats.filesLinked++;
+    stats.bytesFreed += st.st_size;
+    stats.blocksFreed += st.st_blocks;
+}
+
+
+void LocalStore::optimiseStore(OptimiseStats & stats)
+{
+    PathSet paths = queryAllValidPaths();
+    InodeHash inodeHash = loadInodeHash();
+
+    foreach (PathSet::iterator, i, paths) {
+        addTempRoot(*i);
+        if (!isValidPath(*i)) continue; /* path was GC'ed, probably */
+        startNest(nest, lvlChatty, format("hashing files in ‘%1%’") % *i);
+        optimisePath_(stats, *i, inodeHash);
+    }
+}
+
+
+void LocalStore::optimisePath(const Path & path)
+{
+    OptimiseStats stats;
+    InodeHash inodeHash;
+
+    if (settings.autoOptimiseStore) optimisePath_(stats, path, inodeHash);
+}
+
+
+}
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
new file mode 100644
index 000000000000..f26684afacb9
--- /dev/null
+++ b/src/libstore/pathlocks.cc
@@ -0,0 +1,199 @@
+#include "pathlocks.hh"
+#include "util.hh"
+
+#include <cerrno>
+#include <cstdlib>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+
+namespace nix {
+
+
+int openLockFile(const Path & path, bool create)
+{
+    AutoCloseFD fd;
+
+    fd = open(path.c_str(), O_RDWR | (create ? O_CREAT : 0), 0600);
+    if (fd == -1 && (create || errno != ENOENT))
+        throw SysError(format("opening lock file ‘%1%’") % path);
+
+    closeOnExec(fd);
+
+    return fd.borrow();
+}
+
+
+void deleteLockFile(const Path & path, int fd)
+{
+    /* Get rid of the lock file.  Have to be careful not to introduce
+       races.  Write a (meaningless) token to the file to indicate to
+       other processes waiting on this lock that the lock is stale
+       (deleted). */
+    unlink(path.c_str());
+    writeFull(fd, (const unsigned char *) "d", 1);
+    /* Note that the result of unlink() is ignored; removing the lock
+       file is an optimisation, not a necessity. */
+}
+
+
+bool lockFile(int fd, LockType lockType, bool wait)
+{
+    struct flock lock;
+    if (lockType == ltRead) lock.l_type = F_RDLCK;
+    else if (lockType == ltWrite) lock.l_type = F_WRLCK;
+    else if (lockType == ltNone) lock.l_type = F_UNLCK;
+    else abort();
+    lock.l_whence = SEEK_SET;
+    lock.l_start = 0;
+    lock.l_len = 0; /* entire file */
+
+    if (wait) {
+        while (fcntl(fd, F_SETLKW, &lock) != 0) {
+            checkInterrupt();
+            if (errno != EINTR)
+                throw SysError(format("acquiring/releasing lock"));
+        }
+    } else {
+        while (fcntl(fd, F_SETLK, &lock) != 0) {
+            checkInterrupt();
+            if (errno == EACCES || errno == EAGAIN) return false;
+            if (errno != EINTR) 
+                throw SysError(format("acquiring/releasing lock"));
+        }
+    }
+
+    return true;
+}
+
+
+/* This enables us to check whether are not already holding a lock on
+   a file ourselves.  POSIX locks (fcntl) suck in this respect: if we
+   close a descriptor, the previous lock will be closed as well.  And
+   there is no way to query whether we already have a lock (F_GETLK
+   only works on locks held by other processes). */
+static StringSet lockedPaths; /* !!! not thread-safe */
+
+
+PathLocks::PathLocks()
+    : deletePaths(false)
+{
+}
+
+
+PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
+    : deletePaths(false)
+{
+    lockPaths(paths, waitMsg);
+}
+
+
+bool PathLocks::lockPaths(const PathSet & _paths,
+    const string & waitMsg, bool wait)
+{
+    assert(fds.empty());
+    
+    /* Note that `fds' is built incrementally so that the destructor
+       will only release those locks that we have already acquired. */
+
+    /* Sort the paths.  This assures that locks are always acquired in
+       the same order, thus preventing deadlocks. */
+    Paths paths(_paths.begin(), _paths.end());
+    paths.sort();
+    
+    /* Acquire the lock for each path. */
+    foreach (Paths::iterator, i, paths) {
+        checkInterrupt();
+        Path path = *i;
+        Path lockPath = path + ".lock";
+
+        debug(format("locking path ‘%1%’") % path);
+
+        if (lockedPaths.find(lockPath) != lockedPaths.end())
+            throw Error("deadlock: trying to re-acquire self-held lock");
+
+        AutoCloseFD fd;
+        
+        while (1) {
+
+            /* Open/create the lock file. */
+	    fd = openLockFile(lockPath, true);
+
+            /* Acquire an exclusive lock. */
+            if (!lockFile(fd, ltWrite, false)) {
+                if (wait) {
+                    if (waitMsg != "") printMsg(lvlError, waitMsg);
+                    lockFile(fd, ltWrite, true);
+                } else {
+                    /* Failed to lock this path; release all other
+                       locks. */
+                    unlock();
+                    return false;
+                }
+            }
+
+            debug(format("lock acquired on ‘%1%’") % lockPath);
+
+            /* Check that the lock file hasn't become stale (i.e.,
+               hasn't been unlinked). */
+            struct stat st;
+            if (fstat(fd, &st) == -1)
+                throw SysError(format("statting lock file ‘%1%’") % lockPath);
+            if (st.st_size != 0)
+                /* This lock file has been unlinked, so we're holding
+                   a lock on a deleted file.  This means that other
+                   processes may create and acquire a lock on
+                   `lockPath', and proceed.  So we must retry. */
+                debug(format("open lock file ‘%1%’ has become stale") % lockPath);
+            else
+                break;
+        }
+
+        /* Use borrow so that the descriptor isn't closed. */
+        fds.push_back(FDPair(fd.borrow(), lockPath));
+        lockedPaths.insert(lockPath);
+    }
+
+    return true;
+}
+
+
+PathLocks::~PathLocks()
+{
+    unlock();
+}
+
+
+void PathLocks::unlock()
+{
+    foreach (list<FDPair>::iterator, i, fds) {
+        if (deletePaths) deleteLockFile(i->second, i->first);
+
+        lockedPaths.erase(i->second);
+        if (close(i->first) == -1)
+            printMsg(lvlError,
+                format("error (ignored): cannot close lock file on ‘%1%’") % i->second);
+
+        debug(format("lock released on ‘%1%’") % i->second);
+    }
+
+    fds.clear();
+}
+
+
+void PathLocks::setDeletion(bool deletePaths)
+{
+    this->deletePaths = deletePaths;
+}
+
+
+bool pathIsLockedByMe(const Path & path)
+{
+    Path lockPath = path + ".lock";
+    return lockedPaths.find(lockPath) != lockedPaths.end();
+}
+
+ 
+}
diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh
new file mode 100644
index 000000000000..8a6b1450da2a
--- /dev/null
+++ b/src/libstore/pathlocks.hh
@@ -0,0 +1,45 @@
+#pragma once
+
+#include "types.hh"
+
+
+namespace nix {
+
+
+/* Open (possibly create) a lock file and return the file descriptor.
+   -1 is returned if create is false and the lock could not be opened
+   because it doesn't exist.  Any other error throws an exception. */
+int openLockFile(const Path & path, bool create);
+
+/* Delete an open lock file. */
+void deleteLockFile(const Path & path, int fd);
+
+enum LockType { ltRead, ltWrite, ltNone };
+
+bool lockFile(int fd, LockType lockType, bool wait);
+
+
+class PathLocks 
+{
+private:
+    typedef std::pair<int, Path> FDPair;
+    list<FDPair> fds;
+    bool deletePaths;
+
+public:
+    PathLocks();
+    PathLocks(const PathSet & paths,
+        const string & waitMsg = "");
+    bool lockPaths(const PathSet & _paths,
+        const string & waitMsg = "",
+        bool wait = true);
+    ~PathLocks();
+    void unlock();
+    void setDeletion(bool deletePaths);
+};
+
+
+bool pathIsLockedByMe(const Path & path);
+
+
+}
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
new file mode 100644
index 000000000000..521244a31377
--- /dev/null
+++ b/src/libstore/references.cc
@@ -0,0 +1,122 @@
+#include "references.hh"
+#include "hash.hh"
+#include "util.hh"
+#include "archive.hh"
+
+#include <map>
+#include <cstdlib>
+
+
+namespace nix {
+
+
+static unsigned int refLength = 32; /* characters */
+
+
+static void search(const unsigned char * s, unsigned int len, 
+    StringSet & hashes, StringSet & seen)
+{
+    static bool initialised = false;
+    static bool isBase32[256];
+    if (!initialised) {
+        for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
+        for (unsigned int i = 0; i < base32Chars.size(); ++i)
+            isBase32[(unsigned char) base32Chars[i]] = true;
+        initialised = true;
+    }
+    
+    for (unsigned int i = 0; i + refLength <= len; ) {
+        int j;
+        bool match = true;
+        for (j = refLength - 1; j >= 0; --j)
+            if (!isBase32[(unsigned char) s[i + j]]) {
+                i += j + 1;
+                match = false;
+                break;
+            }
+        if (!match) continue;
+        string ref((const char *) s + i, refLength);
+        if (hashes.find(ref) != hashes.end()) {
+            debug(format("found reference to ‘%1%’ at offset ‘%2%’")
+                  % ref % i);
+            seen.insert(ref);
+            hashes.erase(ref);
+        }
+        ++i;
+    }
+}
+
+
+struct RefScanSink : Sink
+{
+    HashSink hashSink;
+    StringSet hashes;
+    StringSet seen;
+
+    string tail;
+
+    RefScanSink() : hashSink(htSHA256) { }
+    
+    void operator () (const unsigned char * data, size_t len);
+};
+
+
+void RefScanSink::operator () (const unsigned char * data, size_t len)
+{
+    hashSink(data, len);
+
+    /* It's possible that a reference spans the previous and current
+       fragment, so search in the concatenation of the tail of the
+       previous fragment and the start of the current fragment. */
+    string s = tail + string((const char *) data, len > refLength ? refLength : len);
+    search((const unsigned char *) s.data(), s.size(), hashes, seen);
+
+    search(data, len, hashes, seen);
+
+    unsigned int tailLen = len <= refLength ? len : refLength;
+    tail =
+        string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
+        string((const char *) data + len - tailLen, tailLen);
+}
+
+
+PathSet scanForReferences(const string & path,
+    const PathSet & refs, HashResult & hash)
+{
+    RefScanSink sink;
+    std::map<string, Path> backMap;
+
+    /* For efficiency (and a higher hit rate), just search for the
+       hash part of the file name.  (This assumes that all references
+       have the form `HASH-bla'). */
+    foreach (PathSet::const_iterator, i, refs) {
+        string baseName = baseNameOf(*i);
+        string::size_type pos = baseName.find('-');
+        if (pos == string::npos)
+            throw Error(format("bad reference ‘%1%’") % *i);
+        string s = string(baseName, 0, pos);
+        assert(s.size() == refLength);
+        assert(backMap.find(s) == backMap.end());
+        // parseHash(htSHA256, s);
+        sink.hashes.insert(s);
+        backMap[s] = *i;
+    }
+
+    /* Look for the hashes in the NAR dump of the path. */
+    dumpPath(path, sink);
+
+    /* Map the hashes found back to their store paths. */
+    PathSet found;
+    foreach (StringSet::iterator, i, sink.seen) {
+        std::map<string, Path>::iterator j;
+        if ((j = backMap.find(*i)) == backMap.end()) abort();
+        found.insert(j->second);
+    }
+
+    hash = sink.hashSink.finish();
+        
+    return found;
+}
+
+
+}
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
new file mode 100644
index 000000000000..013809d122f3
--- /dev/null
+++ b/src/libstore/references.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+namespace nix {
+
+PathSet scanForReferences(const Path & path, const PathSet & refs,
+    HashResult & hash);
+    
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
new file mode 100644
index 000000000000..8ef673783ec5
--- /dev/null
+++ b/src/libstore/remote-store.cc
@@ -0,0 +1,617 @@
+#include "serialise.hh"
+#include "util.hh"
+#include "remote-store.hh"
+#include "worker-protocol.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "globals.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <fcntl.h>
+
+#include <iostream>
+#include <unistd.h>
+#include <cstring>
+
+namespace nix {
+
+
+Path readStorePath(Source & from)
+{
+    Path path = readString(from);
+    assertStorePath(path);
+    return path;
+}
+
+
+template<class T> T readStorePaths(Source & from)
+{
+    T paths = readStrings<T>(from);
+    foreach (typename T::iterator, i, paths) assertStorePath(*i);
+    return paths;
+}
+
+template PathSet readStorePaths(Source & from);
+
+
+RemoteStore::RemoteStore()
+{
+    initialised = false;
+}
+
+
+void RemoteStore::openConnection(bool reserveSpace)
+{
+    if (initialised) return;
+    initialised = true;
+
+    string remoteMode = getEnv("NIX_REMOTE");
+
+    if (remoteMode == "daemon")
+        /* Connect to a daemon that does the privileged work for
+           us. */
+        connectToDaemon();
+    else
+        throw Error(format("invalid setting for NIX_REMOTE, ‘%1%’") % remoteMode);
+
+    from.fd = fdSocket;
+    to.fd = fdSocket;
+
+    /* Send the magic greeting, check for the reply. */
+    try {
+        writeInt(WORKER_MAGIC_1, to);
+        to.flush();
+        unsigned int magic = readInt(from);
+        if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
+
+        daemonVersion = readInt(from);
+        if (GET_PROTOCOL_MAJOR(daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
+            throw Error("Nix daemon protocol version not supported");
+        writeInt(PROTOCOL_VERSION, to);
+
+        if (GET_PROTOCOL_MINOR(daemonVersion) >= 14) {
+            int cpu = settings.lockCPU ? lockToCurrentCPU() : -1;
+            if (cpu != -1) {
+                writeInt(1, to);
+                writeInt(cpu, to);
+            } else
+                writeInt(0, to);
+        }
+
+        if (GET_PROTOCOL_MINOR(daemonVersion) >= 11)
+            writeInt(reserveSpace, to);
+
+        processStderr();
+    }
+    catch (Error & e) {
+        throw Error(format("cannot start worker (%1%)")
+            % e.msg());
+    }
+
+    setOptions();
+}
+
+
+void RemoteStore::connectToDaemon()
+{
+    fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
+    if (fdSocket == -1)
+        throw SysError("cannot create Unix domain socket");
+    closeOnExec(fdSocket);
+
+    string socketPath = settings.nixDaemonSocketFile;
+
+    /* Urgh, sockaddr_un allows path names of only 108 characters.  So
+       chdir to the socket directory so that we can pass a relative
+       path name.  !!! this is probably a bad idea in multi-threaded
+       applications... */
+    AutoCloseFD fdPrevDir = open(".", O_RDONLY);
+    if (fdPrevDir == -1) throw SysError("couldn't open current directory");
+    chdir(dirOf(socketPath).c_str());
+    Path socketPathRel = "./" + baseNameOf(socketPath);
+
+    struct sockaddr_un addr;
+    addr.sun_family = AF_UNIX;
+    if (socketPathRel.size() >= sizeof(addr.sun_path))
+        throw Error(format("socket path ‘%1%’ is too long") % socketPathRel);
+    using namespace std;
+    strcpy(addr.sun_path, socketPathRel.c_str());
+
+    if (connect(fdSocket, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+        throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath);
+
+    if (fchdir(fdPrevDir) == -1)
+        throw SysError("couldn't change back to previous directory");
+}
+
+
+RemoteStore::~RemoteStore()
+{
+    try {
+        to.flush();
+        fdSocket.close();
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void RemoteStore::setOptions()
+{
+    writeInt(wopSetOptions, to);
+
+    writeInt(settings.keepFailed, to);
+    writeInt(settings.keepGoing, to);
+    writeInt(settings.tryFallback, to);
+    writeInt(verbosity, to);
+    writeInt(settings.maxBuildJobs, to);
+    writeInt(settings.maxSilentTime, to);
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 2)
+        writeInt(settings.useBuildHook, to);
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 4) {
+        writeInt(settings.buildVerbosity, to);
+        writeInt(logType, to);
+        writeInt(settings.printBuildTrace, to);
+    }
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 6)
+        writeInt(settings.buildCores, to);
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 10)
+        writeInt(settings.useSubstitutes, to);
+
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 12) {
+        Settings::SettingsMap overrides = settings.getOverrides();
+        if (overrides["ssh-auth-sock"] == "")
+            overrides["ssh-auth-sock"] = getEnv("SSH_AUTH_SOCK");
+        writeInt(overrides.size(), to);
+        foreach (Settings::SettingsMap::iterator, i, overrides) {
+            writeString(i->first, to);
+            writeString(i->second, to);
+        }
+    }
+
+    processStderr();
+}
+
+
+bool RemoteStore::isValidPath(const Path & path)
+{
+    openConnection();
+    writeInt(wopIsValidPath, to);
+    writeString(path, to);
+    processStderr();
+    unsigned int reply = readInt(from);
+    return reply != 0;
+}
+
+
+PathSet RemoteStore::queryValidPaths(const PathSet & paths)
+{
+    openConnection();
+    if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
+        PathSet res;
+        foreach (PathSet::const_iterator, i, paths)
+            if (isValidPath(*i)) res.insert(*i);
+        return res;
+    } else {
+        writeInt(wopQueryValidPaths, to);
+        writeStrings(paths, to);
+        processStderr();
+        return readStorePaths<PathSet>(from);
+    }
+}
+
+
+PathSet RemoteStore::queryAllValidPaths()
+{
+    openConnection();
+    writeInt(wopQueryAllValidPaths, to);
+    processStderr();
+    return readStorePaths<PathSet>(from);
+}
+
+
+PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
+{
+    openConnection();
+    if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
+        PathSet res;
+        foreach (PathSet::const_iterator, i, paths) {
+            writeInt(wopHasSubstitutes, to);
+            writeString(*i, to);
+            processStderr();
+            if (readInt(from)) res.insert(*i);
+        }
+        return res;
+    } else {
+        writeInt(wopQuerySubstitutablePaths, to);
+        writeStrings(paths, to);
+        processStderr();
+        return readStorePaths<PathSet>(from);
+    }
+}
+
+
+void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
+    SubstitutablePathInfos & infos)
+{
+    if (paths.empty()) return;
+
+    openConnection();
+
+    if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return;
+
+    if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
+
+        foreach (PathSet::const_iterator, i, paths) {
+            SubstitutablePathInfo info;
+            writeInt(wopQuerySubstitutablePathInfo, to);
+            writeString(*i, to);
+            processStderr();
+            unsigned int reply = readInt(from);
+            if (reply == 0) continue;
+            info.deriver = readString(from);
+            if (info.deriver != "") assertStorePath(info.deriver);
+            info.references = readStorePaths<PathSet>(from);
+            info.downloadSize = readLongLong(from);
+            info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
+            infos[*i] = info;
+        }
+
+    } else {
+
+        writeInt(wopQuerySubstitutablePathInfos, to);
+        writeStrings(paths, to);
+        processStderr();
+        unsigned int count = readInt(from);
+        for (unsigned int n = 0; n < count; n++) {
+            Path path = readStorePath(from);
+            SubstitutablePathInfo & info(infos[path]);
+            info.deriver = readString(from);
+            if (info.deriver != "") assertStorePath(info.deriver);
+            info.references = readStorePaths<PathSet>(from);
+            info.downloadSize = readLongLong(from);
+            info.narSize = readLongLong(from);
+        }
+
+    }
+}
+
+
+ValidPathInfo RemoteStore::queryPathInfo(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryPathInfo, to);
+    writeString(path, to);
+    processStderr();
+    ValidPathInfo info;
+    info.path = path;
+    info.deriver = readString(from);
+    if (info.deriver != "") assertStorePath(info.deriver);
+    info.hash = parseHash(htSHA256, readString(from));
+    info.references = readStorePaths<PathSet>(from);
+    info.registrationTime = readInt(from);
+    info.narSize = readLongLong(from);
+    return info;
+}
+
+
+Hash RemoteStore::queryPathHash(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryPathHash, to);
+    writeString(path, to);
+    processStderr();
+    string hash = readString(from);
+    return parseHash(htSHA256, hash);
+}
+
+
+void RemoteStore::queryReferences(const Path & path,
+    PathSet & references)
+{
+    openConnection();
+    writeInt(wopQueryReferences, to);
+    writeString(path, to);
+    processStderr();
+    PathSet references2 = readStorePaths<PathSet>(from);
+    references.insert(references2.begin(), references2.end());
+}
+
+
+void RemoteStore::queryReferrers(const Path & path,
+    PathSet & referrers)
+{
+    openConnection();
+    writeInt(wopQueryReferrers, to);
+    writeString(path, to);
+    processStderr();
+    PathSet referrers2 = readStorePaths<PathSet>(from);
+    referrers.insert(referrers2.begin(), referrers2.end());
+}
+
+
+Path RemoteStore::queryDeriver(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryDeriver, to);
+    writeString(path, to);
+    processStderr();
+    Path drvPath = readString(from);
+    if (drvPath != "") assertStorePath(drvPath);
+    return drvPath;
+}
+
+
+PathSet RemoteStore::queryValidDerivers(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryValidDerivers, to);
+    writeString(path, to);
+    processStderr();
+    return readStorePaths<PathSet>(from);
+}
+
+
+PathSet RemoteStore::queryDerivationOutputs(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryDerivationOutputs, to);
+    writeString(path, to);
+    processStderr();
+    return readStorePaths<PathSet>(from);
+}
+
+
+PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
+{
+    openConnection();
+    writeInt(wopQueryDerivationOutputNames, to);
+    writeString(path, to);
+    processStderr();
+    return readStrings<PathSet>(from);
+}
+
+
+Path RemoteStore::queryPathFromHashPart(const string & hashPart)
+{
+    openConnection();
+    writeInt(wopQueryPathFromHashPart, to);
+    writeString(hashPart, to);
+    processStderr();
+    Path path = readString(from);
+    if (!path.empty()) assertStorePath(path);
+    return path;
+}
+
+
+Path RemoteStore::addToStore(const Path & _srcPath,
+    bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+    if (repair) throw Error("repairing is not supported when building through the Nix daemon");
+
+    openConnection();
+
+    Path srcPath(absPath(_srcPath));
+
+    writeInt(wopAddToStore, to);
+    writeString(baseNameOf(srcPath), to);
+    /* backwards compatibility hack */
+    writeInt((hashAlgo == htSHA256 && recursive) ? 0 : 1, to);
+    writeInt(recursive ? 1 : 0, to);
+    writeString(printHashType(hashAlgo), to);
+
+    try {
+        to.written = 0;
+        to.warn = true;
+        dumpPath(srcPath, to, filter);
+        to.warn = false;
+        processStderr();
+    } catch (SysError & e) {
+        /* Daemon closed while we were sending the path. Probably OOM
+           or I/O error. */
+        if (e.errNo == EPIPE)
+            try {
+                processStderr();
+            } catch (EndOfFile & e) { }
+        throw;
+    }
+
+    return readStorePath(from);
+}
+
+
+Path RemoteStore::addTextToStore(const string & name, const string & s,
+    const PathSet & references, bool repair)
+{
+    if (repair) throw Error("repairing is not supported when building through the Nix daemon");
+
+    openConnection();
+    writeInt(wopAddTextToStore, to);
+    writeString(name, to);
+    writeString(s, to);
+    writeStrings(references, to);
+
+    processStderr();
+    return readStorePath(from);
+}
+
+
+void RemoteStore::exportPath(const Path & path, bool sign,
+    Sink & sink)
+{
+    openConnection();
+    writeInt(wopExportPath, to);
+    writeString(path, to);
+    writeInt(sign ? 1 : 0, to);
+    processStderr(&sink); /* sink receives the actual data */
+    readInt(from);
+}
+
+
+Paths RemoteStore::importPaths(bool requireSignature, Source & source)
+{
+    openConnection();
+    writeInt(wopImportPaths, to);
+    /* We ignore requireSignature, since the worker forces it to true
+       anyway. */
+    processStderr(0, &source);
+    return readStorePaths<Paths>(from);
+}
+
+
+void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
+{
+    if (buildMode != bmNormal) throw Error("repairing or checking is not supported when building through the Nix daemon");
+    openConnection();
+    writeInt(wopBuildPaths, to);
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 13)
+        writeStrings(drvPaths, to);
+    else {
+        /* For backwards compatibility with old daemons, strip output
+           identifiers. */
+        PathSet drvPaths2;
+        foreach (PathSet::const_iterator, i, drvPaths)
+            drvPaths2.insert(string(*i, 0, i->find('!')));
+        writeStrings(drvPaths2, to);
+    }
+    processStderr();
+    readInt(from);
+}
+
+
+void RemoteStore::ensurePath(const Path & path)
+{
+    openConnection();
+    writeInt(wopEnsurePath, to);
+    writeString(path, to);
+    processStderr();
+    readInt(from);
+}
+
+
+void RemoteStore::addTempRoot(const Path & path)
+{
+    openConnection();
+    writeInt(wopAddTempRoot, to);
+    writeString(path, to);
+    processStderr();
+    readInt(from);
+}
+
+
+void RemoteStore::addIndirectRoot(const Path & path)
+{
+    openConnection();
+    writeInt(wopAddIndirectRoot, to);
+    writeString(path, to);
+    processStderr();
+    readInt(from);
+}
+
+
+void RemoteStore::syncWithGC()
+{
+    openConnection();
+    writeInt(wopSyncWithGC, to);
+    processStderr();
+    readInt(from);
+}
+
+
+Roots RemoteStore::findRoots()
+{
+    openConnection();
+    writeInt(wopFindRoots, to);
+    processStderr();
+    unsigned int count = readInt(from);
+    Roots result;
+    while (count--) {
+        Path link = readString(from);
+        Path target = readStorePath(from);
+        result[link] = target;
+    }
+    return result;
+}
+
+
+void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
+{
+    openConnection(false);
+
+    writeInt(wopCollectGarbage, to);
+    writeInt(options.action, to);
+    writeStrings(options.pathsToDelete, to);
+    writeInt(options.ignoreLiveness, to);
+    writeLongLong(options.maxFreed, to);
+    writeInt(0, to);
+    if (GET_PROTOCOL_MINOR(daemonVersion) >= 5) {
+        /* removed options */
+        writeInt(0, to);
+        writeInt(0, to);
+    }
+
+    processStderr();
+
+    results.paths = readStrings<PathSet>(from);
+    results.bytesFreed = readLongLong(from);
+    readLongLong(from); // obsolete
+}
+
+
+PathSet RemoteStore::queryFailedPaths()
+{
+    openConnection();
+    writeInt(wopQueryFailedPaths, to);
+    processStderr();
+    return readStorePaths<PathSet>(from);
+}
+
+
+void RemoteStore::clearFailedPaths(const PathSet & paths)
+{
+    openConnection();
+    writeInt(wopClearFailedPaths, to);
+    writeStrings(paths, to);
+    processStderr();
+    readInt(from);
+}
+
+
+void RemoteStore::processStderr(Sink * sink, Source * source)
+{
+    to.flush();
+    unsigned int msg;
+    while ((msg = readInt(from)) == STDERR_NEXT
+        || msg == STDERR_READ || msg == STDERR_WRITE) {
+        if (msg == STDERR_WRITE) {
+            string s = readString(from);
+            if (!sink) throw Error("no sink");
+            (*sink)((const unsigned char *) s.data(), s.size());
+        }
+        else if (msg == STDERR_READ) {
+            if (!source) throw Error("no source");
+            size_t len = readInt(from);
+            unsigned char * buf = new unsigned char[len];
+            AutoDeleteArray<unsigned char> d(buf);
+            writeString(buf, source->read(buf, len), to);
+            to.flush();
+        }
+        else {
+            string s = readString(from);
+            writeToStderr(s);
+        }
+    }
+    if (msg == STDERR_ERROR) {
+        string error = readString(from);
+        unsigned int status = GET_PROTOCOL_MINOR(daemonVersion) >= 8 ? readInt(from) : 1;
+        throw Error(format("%1%") % error, status);
+    }
+    else if (msg != STDERR_LAST)
+        throw Error("protocol error processing standard error");
+}
+
+
+}
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
new file mode 100644
index 000000000000..b0101476434b
--- /dev/null
+++ b/src/libstore/remote-store.hh
@@ -0,0 +1,103 @@
+#pragma once
+
+#include <string>
+
+#include "store-api.hh"
+
+
+namespace nix {
+
+
+class Pipe;
+class Pid;
+struct FdSink;
+struct FdSource;
+
+
+class RemoteStore : public StoreAPI
+{
+public:
+
+    RemoteStore();
+
+    ~RemoteStore();
+    
+    /* Implementations of abstract store API methods. */
+    
+    bool isValidPath(const Path & path);
+
+    PathSet queryValidPaths(const PathSet & paths);
+    
+    PathSet queryAllValidPaths();
+    
+    ValidPathInfo queryPathInfo(const Path & path);
+
+    Hash queryPathHash(const Path & path);
+
+    void queryReferences(const Path & path, PathSet & references);
+
+    void queryReferrers(const Path & path, PathSet & referrers);
+
+    Path queryDeriver(const Path & path);
+    
+    PathSet queryValidDerivers(const Path & path);
+
+    PathSet queryDerivationOutputs(const Path & path);
+    
+    StringSet queryDerivationOutputNames(const Path & path);
+
+    Path queryPathFromHashPart(const string & hashPart);
+    
+    PathSet querySubstitutablePaths(const PathSet & paths);
+    
+    void querySubstitutablePathInfos(const PathSet & paths,
+        SubstitutablePathInfos & infos);
+    
+    Path addToStore(const Path & srcPath,
+        bool recursive = true, HashType hashAlgo = htSHA256,
+        PathFilter & filter = defaultPathFilter, bool repair = false);
+
+    Path addTextToStore(const string & name, const string & s,
+        const PathSet & references, bool repair = false);
+
+    void exportPath(const Path & path, bool sign,
+        Sink & sink);
+
+    Paths importPaths(bool requireSignature, Source & source);
+    
+    void buildPaths(const PathSet & paths, BuildMode buildMode);
+
+    void ensurePath(const Path & path);
+
+    void addTempRoot(const Path & path);
+
+    void addIndirectRoot(const Path & path);
+    
+    void syncWithGC();
+    
+    Roots findRoots();
+
+    void collectGarbage(const GCOptions & options, GCResults & results);
+    
+    PathSet queryFailedPaths();
+
+    void clearFailedPaths(const PathSet & paths);
+    
+private:
+    AutoCloseFD fdSocket;
+    FdSink to;
+    FdSource from;
+    unsigned int daemonVersion;
+    bool initialised;
+
+    void openConnection(bool reserveSpace = true);
+
+    void processStderr(Sink * sink = 0, Source * source = 0);
+
+    void connectToDaemon();
+
+    void setOptions();
+};
+
+
+}
diff --git a/src/libstore/schema.sql b/src/libstore/schema.sql
new file mode 100644
index 000000000000..c1b4a689afcb
--- /dev/null
+++ b/src/libstore/schema.sql
@@ -0,0 +1,44 @@
+create table if not exists ValidPaths (
+    id               integer primary key autoincrement not null,
+    path             text unique not null,
+    hash             text not null,
+    registrationTime integer not null,
+    deriver          text,
+    narSize          integer
+);
+
+create table if not exists Refs (
+    referrer  integer not null,
+    reference integer not null,
+    primary key (referrer, reference),
+    foreign key (referrer) references ValidPaths(id) on delete cascade,
+    foreign key (reference) references ValidPaths(id) on delete restrict
+);
+
+create index if not exists IndexReferrer on Refs(referrer);
+create index if not exists IndexReference on Refs(reference);
+
+-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
+-- table.  This causes a deletion of the corresponding row in
+-- ValidPaths to cause a foreign key constraint violation (due to `on
+-- delete restrict' on the `reference' column).  Therefore, explicitly
+-- get rid of self-references.
+create trigger if not exists DeleteSelfRefs before delete on ValidPaths
+  begin
+    delete from Refs where referrer = old.id and reference = old.id;
+  end;
+
+create table if not exists DerivationOutputs (
+    drv  integer not null,
+    id   text not null, -- symbolic output id, usually "out"
+    path text not null,
+    primary key (drv, id),
+    foreign key (drv) references ValidPaths(id) on delete cascade
+);
+
+create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
+
+create table if not exists FailedPaths (
+    path text primary key not null,
+    time integer not null
+);
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
new file mode 100644
index 000000000000..d3cbd1e7dee2
--- /dev/null
+++ b/src/libstore/store-api.cc
@@ -0,0 +1,331 @@
+#include "store-api.hh"
+#include "globals.hh"
+#include "util.hh"
+
+#include <climits>
+
+
+namespace nix {
+
+
+GCOptions::GCOptions()
+{
+    action = gcDeleteDead;
+    ignoreLiveness = false;
+    maxFreed = ULLONG_MAX;
+}
+
+
+bool isInStore(const Path & path)
+{
+    return isInDir(path, settings.nixStore);
+}
+
+
+bool isStorePath(const Path & path)
+{
+    return isInStore(path)
+        && path.find('/', settings.nixStore.size() + 1) == Path::npos;
+}
+
+
+void assertStorePath(const Path & path)
+{
+    if (!isStorePath(path))
+        throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+}
+
+
+Path toStorePath(const Path & path)
+{
+    if (!isInStore(path))
+        throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+    Path::size_type slash = path.find('/', settings.nixStore.size() + 1);
+    if (slash == Path::npos)
+        return path;
+    else
+        return Path(path, 0, slash);
+}
+
+
+Path followLinksToStore(const Path & _path)
+{
+    Path path = absPath(_path);
+    while (!isInStore(path)) {
+        if (!isLink(path)) break;
+        string target = readLink(path);
+        path = absPath(target, dirOf(path));
+    }
+    if (!isInStore(path))
+        throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+    return path;
+}
+
+
+Path followLinksToStorePath(const Path & path)
+{
+    return toStorePath(followLinksToStore(path));
+}
+
+
+string storePathToName(const Path & path)
+{
+    assertStorePath(path);
+    return string(path, settings.nixStore.size() + 34);
+}
+
+
+void checkStoreName(const string & name)
+{
+    string validChars = "+-._?=";
+    /* Disallow names starting with a dot for possible security
+       reasons (e.g., "." and ".."). */
+    if (string(name, 0, 1) == ".")
+        throw Error(format("illegal name: ‘%1%’") % name);
+    foreach (string::const_iterator, i, name)
+        if (!((*i >= 'A' && *i <= 'Z') ||
+              (*i >= 'a' && *i <= 'z') ||
+              (*i >= '0' && *i <= '9') ||
+              validChars.find(*i) != string::npos))
+        {
+            throw Error(format("invalid character ‘%1%’ in name ‘%2%’")
+                % *i % name);
+        }
+}
+
+
+/* Store paths have the following form:
+
+   <store>/<h>-<name>
+
+   where
+
+   <store> = the location of the Nix store, usually /nix/store
+   
+   <name> = a human readable name for the path, typically obtained
+     from the name attribute of the derivation, or the name of the
+     source file from which the store path is created.  For derivation
+     outputs other than the default "out" output, the string "-<id>"
+     is suffixed to <name>.
+     
+   <h> = base-32 representation of the first 160 bits of a SHA-256
+     hash of <s>; the hash part of the store name
+     
+   <s> = the string "<type>:sha256:<h2>:<store>:<name>";
+     note that it includes the location of the store as well as the
+     name to make sure that changes to either of those are reflected
+     in the hash (e.g. you won't get /nix/store/<h>-name1 and
+     /nix/store/<h>-name2 with equal hash parts).
+     
+   <type> = one of:
+     "text:<r1>:<r2>:...<rN>"
+       for plain text files written to the store using
+       addTextToStore(); <r1> ... <rN> are the references of the
+       path.
+     "source"
+       for paths copied to the store using addToStore() when recursive
+       = true and hashAlgo = "sha256"
+     "output:<id>"
+       for either the outputs created by derivations, OR paths copied
+       to the store using addToStore() with recursive != true or
+       hashAlgo != "sha256" (in that case "source" is used; it's
+       silly, but it's done that way for compatibility).  <id> is the
+       name of the output (usually, "out").
+
+   <h2> = base-16 representation of a SHA-256 hash of:
+     if <type> = "text:...":
+       the string written to the resulting store path
+     if <type> = "source":
+       the serialisation of the path from which this store path is
+       copied, as returned by hashPath()
+     if <type> = "output:out":
+       for non-fixed derivation outputs:
+         the derivation (see hashDerivationModulo() in
+         primops.cc)
+       for paths copied by addToStore() or produced by fixed-output
+       derivations:
+         the string "fixed:out:<rec><algo>:<hash>:", where
+           <rec> = "r:" for recursive (path) hashes, or "" or flat
+             (file) hashes
+           <algo> = "md5", "sha1" or "sha256"
+           <hash> = base-16 representation of the path or flat hash of
+             the contents of the path (or expected contents of the
+             path for fixed-output derivations)
+
+   It would have been nicer to handle fixed-output derivations under
+   "source", e.g. have something like "source:<rec><algo>", but we're
+   stuck with this for now...
+
+   The main reason for this way of computing names is to prevent name
+   collisions (for security).  For instance, it shouldn't be feasible
+   to come up with a derivation whose output path collides with the
+   path for a copied source.  The former would have a <s> starting with
+   "output:out:", while the latter would have a <2> starting with
+   "source:".
+*/
+
+
+Path makeStorePath(const string & type,
+    const Hash & hash, const string & name)
+{
+    /* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */
+    string s = type + ":sha256:" + printHash(hash) + ":"
+        + settings.nixStore + ":" + name;
+
+    checkStoreName(name);
+
+    return settings.nixStore + "/"
+        + printHash32(compressHash(hashString(htSHA256, s), 20))
+        + "-" + name;
+}
+
+
+Path makeOutputPath(const string & id,
+    const Hash & hash, const string & name)
+{
+    return makeStorePath("output:" + id, hash,
+        name + (id == "out" ? "" : "-" + id));
+}
+
+
+Path makeFixedOutputPath(bool recursive,
+    HashType hashAlgo, Hash hash, string name)
+{
+    return hashAlgo == htSHA256 && recursive
+        ? makeStorePath("source", hash, name)
+        : makeStorePath("output:out", hashString(htSHA256,
+                "fixed:out:" + (recursive ? (string) "r:" : "") +
+                printHashType(hashAlgo) + ":" + printHash(hash) + ":"),
+            name);
+}
+
+
+std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
+    bool recursive, HashType hashAlgo, PathFilter & filter)
+{
+    HashType ht(hashAlgo);
+    Hash h = recursive ? hashPath(ht, srcPath, filter).first : hashFile(ht, srcPath);
+    string name = baseNameOf(srcPath);
+    Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
+    return std::pair<Path, Hash>(dstPath, h);
+}
+
+
+Path computeStorePathForText(const string & name, const string & s,
+    const PathSet & references)
+{
+    Hash hash = hashString(htSHA256, s);
+    /* Stuff the references (if any) into the type.  This is a bit
+       hacky, but we can't put them in `s' since that would be
+       ambiguous. */
+    string type = "text";
+    foreach (PathSet::const_iterator, i, references) {
+        type += ":";
+        type += *i;
+    }
+    return makeStorePath(type, hash, name);
+}
+
+
+/* Return a string accepted by decodeValidPathInfo() that
+   registers the specified paths as valid.  Note: it's the
+   responsibility of the caller to provide a closure. */
+string StoreAPI::makeValidityRegistration(const PathSet & paths,
+    bool showDerivers, bool showHash)
+{
+    string s = "";
+    
+    foreach (PathSet::iterator, i, paths) {
+        s += *i + "\n";
+
+        ValidPathInfo info = queryPathInfo(*i);
+
+        if (showHash) {
+            s += printHash(info.hash) + "\n";
+            s += (format("%1%\n") % info.narSize).str();
+        }
+
+        Path deriver = showDerivers ? info.deriver : "";
+        s += deriver + "\n";
+
+        s += (format("%1%\n") % info.references.size()).str();
+
+        foreach (PathSet::iterator, j, info.references)
+            s += *j + "\n";
+    }
+
+    return s;
+}
+
+
+ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven)
+{
+    ValidPathInfo info;
+    getline(str, info.path);
+    if (str.eof()) { info.path = ""; return info; }
+    if (hashGiven) {
+        string s;
+        getline(str, s);
+        info.hash = parseHash(htSHA256, s);
+        getline(str, s);
+        if (!string2Int(s, info.narSize)) throw Error("number expected");
+    }
+    getline(str, info.deriver);
+    string s; int n;
+    getline(str, s);
+    if (!string2Int(s, n)) throw Error("number expected");
+    while (n--) {
+        getline(str, s);
+        info.references.insert(s);
+    }
+    if (!str || str.eof()) throw Error("missing input");
+    return info;
+}
+
+
+string showPaths(const PathSet & paths)
+{
+    string s;
+    foreach (PathSet::const_iterator, i, paths) {
+        if (s.size() != 0) s += ", ";
+        s += "‘" + *i + "’";
+    }
+    return s;
+}
+
+
+void exportPaths(StoreAPI & store, const Paths & paths,
+    bool sign, Sink & sink)
+{
+    foreach (Paths::const_iterator, i, paths) {
+        writeInt(1, sink);
+        store.exportPath(*i, sign, sink);
+    }
+    writeInt(0, sink);
+}
+
+
+}
+
+
+#include "local-store.hh"
+#include "serialise.hh"
+#include "remote-store.hh"
+
+
+namespace nix {
+
+
+std::shared_ptr<StoreAPI> store;
+
+
+std::shared_ptr<StoreAPI> openStore(bool reserveSpace)
+{
+    if (getEnv("NIX_REMOTE") == "")
+        return std::shared_ptr<StoreAPI>(new LocalStore(reserveSpace));
+    else
+        return std::shared_ptr<StoreAPI>(new RemoteStore());
+}
+
+
+}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
new file mode 100644
index 000000000000..b635fee2cf1a
--- /dev/null
+++ b/src/libstore/store-api.hh
@@ -0,0 +1,366 @@
+#pragma once
+
+#include "hash.hh"
+#include "serialise.hh"
+
+#include <string>
+#include <map>
+#include <memory>
+
+
+namespace nix {
+
+
+typedef std::map<Path, Path> Roots;
+
+
+struct GCOptions
+{
+    /* Garbage collector operation:
+
+       - `gcReturnLive': return the set of paths reachable from
+         (i.e. in the closure of) the roots.
+
+       - `gcReturnDead': return the set of paths not reachable from
+         the roots.
+
+       - `gcDeleteDead': actually delete the latter set.
+
+       - `gcDeleteSpecific': delete the paths listed in
+          `pathsToDelete', insofar as they are not reachable.
+    */
+    typedef enum {
+        gcReturnLive,
+        gcReturnDead,
+        gcDeleteDead,
+        gcDeleteSpecific,
+    } GCAction;
+
+    GCAction action;
+
+    /* If `ignoreLiveness' is set, then reachability from the roots is
+       ignored (dangerous!).  However, the paths must still be
+       unreferenced *within* the store (i.e., there can be no other
+       store paths that depend on them). */
+    bool ignoreLiveness;
+
+    /* For `gcDeleteSpecific', the paths to delete. */
+    PathSet pathsToDelete;
+
+    /* Stop after at least `maxFreed' bytes have been freed. */
+    unsigned long long maxFreed;
+
+    GCOptions();
+};
+
+
+struct GCResults 
+{
+    /* Depending on the action, the GC roots, or the paths that would
+       be or have been deleted. */
+    PathSet paths;
+
+    /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
+       number of bytes that would be or was freed. */
+    unsigned long long bytesFreed;
+
+    GCResults()
+    {
+        bytesFreed = 0;
+    }
+};
+
+
+struct SubstitutablePathInfo
+{
+    Path deriver;
+    PathSet references;
+    unsigned long long downloadSize; /* 0 = unknown or inapplicable */
+    unsigned long long narSize; /* 0 = unknown */
+};
+
+typedef std::map<Path, SubstitutablePathInfo> SubstitutablePathInfos;
+
+
+struct ValidPathInfo 
+{
+    Path path;
+    Path deriver;
+    Hash hash;
+    PathSet references;
+    time_t registrationTime;
+    unsigned long long narSize; // 0 = unknown
+    unsigned long long id; // internal use only
+    ValidPathInfo() : registrationTime(0), narSize(0) { }
+};
+
+typedef list<ValidPathInfo> ValidPathInfos;
+
+
+enum BuildMode { bmNormal, bmRepair, bmCheck };
+
+
+class StoreAPI 
+{
+public:
+
+    virtual ~StoreAPI() { }
+
+    /* Check whether a path is valid. */ 
+    virtual bool isValidPath(const Path & path) = 0;
+
+    /* Query which of the given paths is valid. */
+    virtual PathSet queryValidPaths(const PathSet & paths) = 0;
+
+    /* Query the set of all valid paths. */
+    virtual PathSet queryAllValidPaths() = 0;
+
+    /* Query information about a valid path. */
+    virtual ValidPathInfo queryPathInfo(const Path & path) = 0;
+
+    /* Query the hash of a valid path. */ 
+    virtual Hash queryPathHash(const Path & path) = 0;
+
+    /* Query the set of outgoing FS references for a store path.  The
+       result is not cleared. */
+    virtual void queryReferences(const Path & path,
+        PathSet & references) = 0;
+
+    /* Queries the set of incoming FS references for a store path.
+       The result is not cleared. */
+    virtual void queryReferrers(const Path & path,
+        PathSet & referrers) = 0;
+
+    /* Query the deriver of a store path.  Return the empty string if
+       no deriver has been set. */
+    virtual Path queryDeriver(const Path & path) = 0;
+
+    /* Return all currently valid derivations that have `path' as an
+       output.  (Note that the result of `queryDeriver()' is the
+       derivation that was actually used to produce `path', which may
+       not exist anymore.) */
+    virtual PathSet queryValidDerivers(const Path & path) = 0;
+
+    /* Query the outputs of the derivation denoted by `path'. */
+    virtual PathSet queryDerivationOutputs(const Path & path) = 0;
+
+    /* Query the output names of the derivation denoted by `path'. */
+    virtual StringSet queryDerivationOutputNames(const Path & path) = 0;
+
+    /* Query the full store path given the hash part of a valid store
+       path, or "" if the path doesn't exist. */
+    virtual Path queryPathFromHashPart(const string & hashPart) = 0;
+    
+    /* Query which of the given paths have substitutes. */
+    virtual PathSet querySubstitutablePaths(const PathSet & paths) = 0;
+
+    /* Query substitute info (i.e. references, derivers and download
+       sizes) of a set of paths.  If a path does not have substitute
+       info, it's omitted from the resulting ‘infos’ map. */
+    virtual void querySubstitutablePathInfos(const PathSet & paths,
+        SubstitutablePathInfos & infos) = 0;
+    
+    /* Copy the contents of a path to the store and register the
+       validity the resulting path.  The resulting path is returned.
+       The function object `filter' can be used to exclude files (see
+       libutil/archive.hh). */
+    virtual Path addToStore(const Path & srcPath,
+        bool recursive = true, HashType hashAlgo = htSHA256,
+        PathFilter & filter = defaultPathFilter, bool repair = false) = 0;
+
+    /* Like addToStore, but the contents written to the output path is
+       a regular file containing the given string. */
+    virtual Path addTextToStore(const string & name, const string & s,
+        const PathSet & references, bool repair = false) = 0;
+
+    /* Export a store path, that is, create a NAR dump of the store
+       path and append its references and its deriver.  Optionally, a
+       cryptographic signature (created by OpenSSL) of the preceding
+       data is attached. */
+    virtual void exportPath(const Path & path, bool sign,
+        Sink & sink) = 0;
+
+    /* Import a sequence of NAR dumps created by exportPaths() into
+       the Nix store. */
+    virtual Paths importPaths(bool requireSignature, Source & source) = 0;
+
+    /* For each path, if it's a derivation, build it.  Building a
+       derivation means ensuring that the output paths are valid.  If
+       they are already valid, this is a no-op.  Otherwise, validity
+       can be reached in two ways.  First, if the output paths is
+       substitutable, then build the path that way.  Second, the
+       output paths can be created by running the builder, after
+       recursively building any sub-derivations. For inputs that are
+       not derivations, substitute them. */
+    virtual void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) = 0;
+
+    /* Ensure that a path is valid.  If it is not currently valid, it
+       may be made valid by running a substitute (if defined for the
+       path). */
+    virtual void ensurePath(const Path & path) = 0;
+
+    /* Add a store path as a temporary root of the garbage collector.
+       The root disappears as soon as we exit. */
+    virtual void addTempRoot(const Path & path) = 0;
+
+    /* Add an indirect root, which is merely a symlink to `path' from
+       /nix/var/nix/gcroots/auto/<hash of `path'>.  `path' is supposed
+       to be a symlink to a store path.  The garbage collector will
+       automatically remove the indirect root when it finds that
+       `path' has disappeared. */
+    virtual void addIndirectRoot(const Path & path) = 0;
+
+    /* Acquire the global GC lock, then immediately release it.  This
+       function must be called after registering a new permanent root,
+       but before exiting.  Otherwise, it is possible that a running
+       garbage collector doesn't see the new root and deletes the
+       stuff we've just built.  By acquiring the lock briefly, we
+       ensure that either:
+
+       - The collector is already running, and so we block until the
+         collector is finished.  The collector will know about our
+         *temporary* locks, which should include whatever it is we
+         want to register as a permanent lock.
+
+       - The collector isn't running, or it's just started but hasn't
+         acquired the GC lock yet.  In that case we get and release
+         the lock right away, then exit.  The collector scans the
+         permanent root and sees our's.
+
+       In either case the permanent root is seen by the collector. */
+    virtual void syncWithGC() = 0;
+
+    /* Find the roots of the garbage collector.  Each root is a pair
+       (link, storepath) where `link' is the path of the symlink
+       outside of the Nix store that point to `storePath'.  */
+    virtual Roots findRoots() = 0;
+
+    /* Perform a garbage collection. */
+    virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
+
+    /* Return the set of paths that have failed to build.*/
+    virtual PathSet queryFailedPaths() = 0;
+
+    /* Clear the "failed" status of the given paths.  The special
+       value `*' causes all failed paths to be cleared. */
+    virtual void clearFailedPaths(const PathSet & paths) = 0;
+
+    /* Return a string representing information about the path that
+       can be loaded into the database using `nix-store --load-db' or
+       `nix-store --register-validity'. */
+    string makeValidityRegistration(const PathSet & paths,
+        bool showDerivers, bool showHash);
+};
+
+
+/* !!! These should be part of the store API, I guess. */
+
+/* Throw an exception if `path' is not directly in the Nix store. */
+void assertStorePath(const Path & path);
+
+bool isInStore(const Path & path);
+bool isStorePath(const Path & path);
+
+/* Extract the name part of the given store path. */
+string storePathToName(const Path & path);
+    
+void checkStoreName(const string & name);
+
+
+/* Chop off the parts after the top-level store name, e.g.,
+   /nix/store/abcd-foo/bar => /nix/store/abcd-foo. */
+Path toStorePath(const Path & path);
+
+
+/* Follow symlinks until we end up with a path in the Nix store. */
+Path followLinksToStore(const Path & path);
+
+
+/* Same as followLinksToStore(), but apply toStorePath() to the
+   result. */
+Path followLinksToStorePath(const Path & path);
+
+
+/* Constructs a unique store path name. */
+Path makeStorePath(const string & type,
+    const Hash & hash, const string & name);
+    
+Path makeOutputPath(const string & id,
+    const Hash & hash, const string & name);
+
+Path makeFixedOutputPath(bool recursive,
+    HashType hashAlgo, Hash hash, string name);
+
+
+/* This is the preparatory part of addToStore() and addToStoreFixed();
+   it computes the store path to which srcPath is to be copied.
+   Returns the store path and the cryptographic hash of the
+   contents of srcPath. */
+std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
+    bool recursive = true, HashType hashAlgo = htSHA256,
+    PathFilter & filter = defaultPathFilter);
+
+/* Preparatory part of addTextToStore().
+
+   !!! Computation of the path should take the references given to
+   addTextToStore() into account, otherwise we have a (relatively
+   minor) security hole: a caller can register a source file with
+   bogus references.  If there are too many references, the path may
+   not be garbage collected when it has to be (not really a problem,
+   the caller could create a root anyway), or it may be garbage
+   collected when it shouldn't be (more serious).
+
+   Hashing the references would solve this (bogus references would
+   simply yield a different store path, so other users wouldn't be
+   affected), but it has some backwards compatibility issues (the
+   hashing scheme changes), so I'm not doing that for now. */
+Path computeStorePathForText(const string & name, const string & s,
+    const PathSet & references);
+
+
+/* Remove the temporary roots file for this process.  Any temporary
+   root becomes garbage after this point unless it has been registered
+   as a (permanent) root. */
+void removeTempRoots();
+
+
+/* Register a permanent GC root. */
+Path addPermRoot(StoreAPI & store, const Path & storePath,
+    const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false);
+
+
+/* Sort a set of paths topologically under the references relation.
+   If p refers to q, then p preceeds q in this list. */
+Paths topoSortPaths(StoreAPI & store, const PathSet & paths);
+
+
+/* For now, there is a single global store API object, but we'll
+   purify that in the future. */
+extern std::shared_ptr<StoreAPI> store;
+
+
+/* Factory method: open the Nix database, either through the local or
+   remote implementation. */
+std::shared_ptr<StoreAPI> openStore(bool reserveSpace = true);
+
+
+/* Display a set of paths in human-readable form (i.e., between quotes
+   and separated by commas). */
+string showPaths(const PathSet & paths);
+
+
+ValidPathInfo decodeValidPathInfo(std::istream & str,
+    bool hashGiven = false);
+
+
+/* Export multiple paths in the format expected by ‘nix-store
+   --import’. */
+void exportPaths(StoreAPI & store, const Paths & paths,
+    bool sign, Sink & sink);
+
+
+MakeError(SubstError, Error)
+MakeError(BuildError, Error) /* denotes a permanent build failure */
+
+
+}
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
new file mode 100644
index 000000000000..c7d3a726ab4c
--- /dev/null
+++ b/src/libstore/worker-protocol.hh
@@ -0,0 +1,59 @@
+#pragma once
+
+namespace nix {
+
+
+#define WORKER_MAGIC_1 0x6e697863
+#define WORKER_MAGIC_2 0x6478696f
+
+#define PROTOCOL_VERSION 0x10e
+#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
+#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+
+
+typedef enum {
+    wopIsValidPath = 1,
+    wopHasSubstitutes = 3,
+    wopQueryPathHash = 4,
+    wopQueryReferences = 5,
+    wopQueryReferrers = 6,
+    wopAddToStore = 7,
+    wopAddTextToStore = 8,
+    wopBuildPaths = 9,
+    wopEnsurePath = 10,
+    wopAddTempRoot = 11,
+    wopAddIndirectRoot = 12,
+    wopSyncWithGC = 13,
+    wopFindRoots = 14,
+    wopExportPath = 16,
+    wopQueryDeriver = 18,
+    wopSetOptions = 19,
+    wopCollectGarbage = 20,
+    wopQuerySubstitutablePathInfo = 21,
+    wopQueryDerivationOutputs = 22,
+    wopQueryAllValidPaths = 23,
+    wopQueryFailedPaths = 24,
+    wopClearFailedPaths = 25,
+    wopQueryPathInfo = 26,
+    wopImportPaths = 27,
+    wopQueryDerivationOutputNames = 28,
+    wopQueryPathFromHashPart = 29,
+    wopQuerySubstitutablePathInfos = 30,
+    wopQueryValidPaths = 31,
+    wopQuerySubstitutablePaths = 32,
+    wopQueryValidDerivers = 33,
+} WorkerOp;
+
+
+#define STDERR_NEXT  0x6f6c6d67
+#define STDERR_READ  0x64617461 // data needed from source
+#define STDERR_WRITE 0x64617416 // data for sink
+#define STDERR_LAST  0x616c7473
+#define STDERR_ERROR 0x63787470
+
+
+Path readStorePath(Source & from);
+template<class T> T readStorePaths(Source & from);
+
+
+}
diff --git a/src/libutil/affinity.cc b/src/libutil/affinity.cc
new file mode 100644
index 000000000000..3e21f43a2e9d
--- /dev/null
+++ b/src/libutil/affinity.cc
@@ -0,0 +1,55 @@
+#include "types.hh"
+#include "util.hh"
+#include "affinity.hh"
+
+#if HAVE_SCHED_H
+#include <sched.h>
+#endif
+
+namespace nix {
+
+
+#if HAVE_SCHED_SETAFFINITY
+static bool didSaveAffinity = false;
+static cpu_set_t savedAffinity;
+#endif
+
+
+void setAffinityTo(int cpu)
+{
+#if HAVE_SCHED_SETAFFINITY
+    if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return;
+    didSaveAffinity = true;
+    printMsg(lvlDebug, format("locking this thread to CPU %1%") % cpu);
+    cpu_set_t newAffinity;
+    CPU_ZERO(&newAffinity);
+    CPU_SET(cpu, &newAffinity);
+    if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1)
+        printMsg(lvlError, format("failed to lock thread to CPU %1%") % cpu);
+#endif
+}
+
+
+int lockToCurrentCPU()
+{
+#if HAVE_SCHED_SETAFFINITY
+    int cpu = sched_getcpu();
+    if (cpu != -1) setAffinityTo(cpu);
+    return cpu;
+#else
+    return -1;
+#endif
+}
+
+
+void restoreAffinity()
+{
+#if HAVE_SCHED_SETAFFINITY
+    if (!didSaveAffinity) return;
+    if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1)
+        printMsg(lvlError, "failed to restore affinity %1%");
+#endif
+}
+
+
+}
diff --git a/src/libutil/affinity.hh b/src/libutil/affinity.hh
new file mode 100644
index 000000000000..c1bd28e1367a
--- /dev/null
+++ b/src/libutil/affinity.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+namespace nix {
+
+void setAffinityTo(int cpu);
+int lockToCurrentCPU();
+void restoreAffinity();
+
+}
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
new file mode 100644
index 000000000000..9e16e04ae4b5
--- /dev/null
+++ b/src/libutil/archive.cc
@@ -0,0 +1,362 @@
+#define _XOPEN_SOURCE 600
+
+#include "config.h"
+
+#include <cerrno>
+#include <algorithm>
+#include <vector>
+#include <map>
+
+#include <strings.h> // for strcasecmp
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+
+#include "archive.hh"
+#include "util.hh"
+
+
+namespace nix {
+
+
+bool useCaseHack =
+#if __APPLE__
+    true;
+#else
+    false;
+#endif
+
+static string archiveVersion1 = "nix-archive-1";
+
+static string caseHackSuffix = "~nix~case~hack~";
+
+PathFilter defaultPathFilter;
+
+
+static void dumpContents(const Path & path, size_t size,
+    Sink & sink)
+{
+    writeString("contents", sink);
+    writeLongLong(size, sink);
+
+    AutoCloseFD fd = open(path.c_str(), O_RDONLY);
+    if (fd == -1) throw SysError(format("opening file ‘%1%’") % path);
+
+    unsigned char buf[65536];
+    size_t left = size;
+
+    while (left > 0) {
+        size_t n = left > sizeof(buf) ? sizeof(buf) : left;
+        readFull(fd, buf, n);
+        left -= n;
+        sink(buf, n);
+    }
+
+    writePadding(size, sink);
+}
+
+
+static void dump(const Path & path, Sink & sink, PathFilter & filter)
+{
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+    writeString("(", sink);
+
+    if (S_ISREG(st.st_mode)) {
+        writeString("type", sink);
+        writeString("regular", sink);
+        if (st.st_mode & S_IXUSR) {
+            writeString("executable", sink);
+            writeString("", sink);
+        }
+        dumpContents(path, (size_t) st.st_size, sink);
+    }
+
+    else if (S_ISDIR(st.st_mode)) {
+        writeString("type", sink);
+        writeString("directory", sink);
+
+        /* If we're on a case-insensitive system like Mac OS X, undo
+           the case hack applied by restorePath(). */
+        std::map<string, string> unhacked;
+        for (auto & i : readDirectory(path))
+            if (useCaseHack) {
+                string name(i.name);
+                size_t pos = i.name.find(caseHackSuffix);
+                if (pos != string::npos) {
+                    printMsg(lvlDebug, format("removing case hack suffix from ‘%1%’") % (path + "/" + i.name));
+                    name.erase(pos);
+                }
+                if (unhacked.find(name) != unhacked.end())
+                    throw Error(format("file name collision in between ‘%1%’ and ‘%2%’")
+                        % (path + "/" + unhacked[name]) % (path + "/" + i.name));
+                unhacked[name] = i.name;
+            } else
+                unhacked[i.name] = i.name;
+
+        for (auto & i : unhacked)
+            if (filter(path + "/" + i.first)) {
+                writeString("entry", sink);
+                writeString("(", sink);
+                writeString("name", sink);
+                writeString(i.first, sink);
+                writeString("node", sink);
+                dump(path + "/" + i.second, sink, filter);
+                writeString(")", sink);
+            }
+    }
+
+    else if (S_ISLNK(st.st_mode)) {
+        writeString("type", sink);
+        writeString("symlink", sink);
+        writeString("target", sink);
+        writeString(readLink(path), sink);
+    }
+
+    else throw Error(format("file ‘%1%’ has an unsupported type") % path);
+
+    writeString(")", sink);
+}
+
+
+void dumpPath(const Path & path, Sink & sink, PathFilter & filter)
+{
+    writeString(archiveVersion1, sink);
+    dump(path, sink, filter);
+}
+
+
+static SerialisationError badArchive(string s)
+{
+    return SerialisationError("bad archive: " + s);
+}
+
+
+#if 0
+static void skipGeneric(Source & source)
+{
+    if (readString(source) == "(") {
+        while (readString(source) != ")")
+            skipGeneric(source);
+    }
+}
+#endif
+
+
+static void parseContents(ParseSink & sink, Source & source, const Path & path)
+{
+    unsigned long long size = readLongLong(source);
+
+    sink.preallocateContents(size);
+
+    unsigned long long left = size;
+    unsigned char buf[65536];
+
+    while (left) {
+        checkInterrupt();
+        unsigned int n = sizeof(buf);
+        if ((unsigned long long) n > left) n = left;
+        source(buf, n);
+        sink.receiveContents(buf, n);
+        left -= n;
+    }
+
+    readPadding(size, source);
+}
+
+
+struct CaseInsensitiveCompare
+{
+    bool operator() (const string & a, const string & b) const
+    {
+        return strcasecmp(a.c_str(), b.c_str()) < 0;
+    }
+};
+
+
+static void parse(ParseSink & sink, Source & source, const Path & path)
+{
+    string s;
+
+    s = readString(source);
+    if (s != "(") throw badArchive("expected open tag");
+
+    enum { tpUnknown, tpRegular, tpDirectory, tpSymlink } type = tpUnknown;
+
+    std::map<Path, int, CaseInsensitiveCompare> names;
+
+    while (1) {
+        checkInterrupt();
+
+        s = readString(source);
+
+        if (s == ")") {
+            break;
+        }
+
+        else if (s == "type") {
+            if (type != tpUnknown)
+                throw badArchive("multiple type fields");
+            string t = readString(source);
+
+            if (t == "regular") {
+                type = tpRegular;
+                sink.createRegularFile(path);
+            }
+
+            else if (t == "directory") {
+                sink.createDirectory(path);
+                type = tpDirectory;
+            }
+
+            else if (t == "symlink") {
+                type = tpSymlink;
+            }
+
+            else throw badArchive("unknown file type " + t);
+
+        }
+
+        else if (s == "contents" && type == tpRegular) {
+            parseContents(sink, source, path);
+        }
+
+        else if (s == "executable" && type == tpRegular) {
+            readString(source);
+            sink.isExecutable();
+        }
+
+        else if (s == "entry" && type == tpDirectory) {
+            string name, prevName;
+
+            s = readString(source);
+            if (s != "(") throw badArchive("expected open tag");
+
+            while (1) {
+                checkInterrupt();
+
+                s = readString(source);
+
+                if (s == ")") {
+                    break;
+                } else if (s == "name") {
+                    name = readString(source);
+                    if (name.empty() || name == "." || name == ".." || name.find('/') != string::npos || name.find((char) 0) != string::npos)
+                        throw Error(format("NAR contains invalid file name ‘%1%’") % name);
+                    if (name <= prevName)
+                        throw Error("NAR directory is not sorted");
+                    prevName = name;
+                    if (useCaseHack) {
+                        auto i = names.find(name);
+                        if (i != names.end()) {
+                            printMsg(lvlDebug, format("case collision between ‘%1%’ and ‘%2%’") % i->first % name);
+                            name += caseHackSuffix;
+                            name += int2String(++i->second);
+                        } else
+                            names[name] = 0;
+                    }
+                } else if (s == "node") {
+                    if (s.empty()) throw badArchive("entry name missing");
+                    parse(sink, source, path + "/" + name);
+                } else
+                    throw badArchive("unknown field " + s);
+            }
+        }
+
+        else if (s == "target" && type == tpSymlink) {
+            string target = readString(source);
+            sink.createSymlink(path, target);
+        }
+
+        else
+            throw badArchive("unknown field " + s);
+    }
+}
+
+
+void parseDump(ParseSink & sink, Source & source)
+{
+    string version;
+    try {
+        version = readString(source);
+    } catch (SerialisationError & e) {
+        /* This generally means the integer at the start couldn't be
+           decoded.  Ignore and throw the exception below. */
+    }
+    if (version != archiveVersion1)
+        throw badArchive("input doesn't look like a Nix archive");
+    parse(sink, source, "");
+}
+
+
+struct RestoreSink : ParseSink
+{
+    Path dstPath;
+    AutoCloseFD fd;
+
+    void createDirectory(const Path & path)
+    {
+        Path p = dstPath + path;
+        if (mkdir(p.c_str(), 0777) == -1)
+            throw SysError(format("creating directory ‘%1%’") % p);
+    };
+
+    void createRegularFile(const Path & path)
+    {
+        Path p = dstPath + path;
+        fd.close();
+        fd = open(p.c_str(), O_CREAT | O_EXCL | O_WRONLY, 0666);
+        if (fd == -1) throw SysError(format("creating file ‘%1%’") % p);
+    }
+
+    void isExecutable()
+    {
+        struct stat st;
+        if (fstat(fd, &st) == -1)
+            throw SysError("fstat");
+        if (fchmod(fd, st.st_mode | (S_IXUSR | S_IXGRP | S_IXOTH)) == -1)
+            throw SysError("fchmod");
+    }
+
+    void preallocateContents(unsigned long long len)
+    {
+#if HAVE_POSIX_FALLOCATE
+        if (len) {
+            errno = posix_fallocate(fd, 0, len);
+            /* Note that EINVAL may indicate that the underlying
+               filesystem doesn't support preallocation (e.g. on
+               OpenSolaris).  Since preallocation is just an
+               optimisation, ignore it. */
+            if (errno && errno != EINVAL)
+                throw SysError(format("preallocating file of %1% bytes") % len);
+        }
+#endif
+    }
+
+    void receiveContents(unsigned char * data, unsigned int len)
+    {
+        writeFull(fd, data, len);
+    }
+
+    void createSymlink(const Path & path, const string & target)
+    {
+        Path p = dstPath + path;
+        nix::createSymlink(target, p);
+    }
+};
+
+
+void restorePath(const Path & path, Source & source)
+{
+    RestoreSink sink;
+    sink.dstPath = path;
+    parseDump(sink, source);
+}
+
+
+}
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
new file mode 100644
index 000000000000..c216e9768fd1
--- /dev/null
+++ b/src/libutil/archive.hh
@@ -0,0 +1,79 @@
+#pragma once
+
+#include "types.hh"
+#include "serialise.hh"
+
+
+namespace nix {
+
+
+/* dumpPath creates a Nix archive of the specified path.  The format
+   is as follows:
+
+   IF path points to a REGULAR FILE:
+     dump(path) = attrs(
+       [ ("type", "regular")
+       , ("contents", contents(path))
+       ])
+
+   IF path points to a DIRECTORY:
+     dump(path) = attrs(
+       [ ("type", "directory")
+       , ("entries", concat(map(f, sort(entries(path)))))
+       ])
+       where f(fn) = attrs(
+         [ ("name", fn)
+         , ("file", dump(path + "/" + fn))
+         ])
+
+   where:
+
+     attrs(as) = concat(map(attr, as)) + encN(0)
+     attrs((a, b)) = encS(a) + encS(b)
+
+     encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary)
+
+     encN(n) = 64-bit little-endian encoding of n.
+
+     contents(path) = the contents of a regular file.
+
+     sort(strings) = lexicographic sort by 8-bit value (strcmp).
+
+     entries(path) = the entries of a directory, without `.' and
+     `..'.
+
+     `+' denotes string concatenation. */
+
+struct PathFilter
+{
+    virtual ~PathFilter() { }
+    virtual bool operator () (const Path & path) { return true; }
+};
+
+extern PathFilter defaultPathFilter;
+
+void dumpPath(const Path & path, Sink & sink,
+    PathFilter & filter = defaultPathFilter);
+
+struct ParseSink
+{
+    virtual void createDirectory(const Path & path) { };
+
+    virtual void createRegularFile(const Path & path) { };
+    virtual void isExecutable() { };
+    virtual void preallocateContents(unsigned long long size) { };
+    virtual void receiveContents(unsigned char * data, unsigned int len) { };
+
+    virtual void createSymlink(const Path & path, const string & target) { };
+};
+
+void parseDump(ParseSink & sink, Source & source);
+
+void restorePath(const Path & path, Source & source);
+
+
+// FIXME: global variables are bad m'kay.
+extern bool useCaseHack;
+
+
+}
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
new file mode 100644
index 000000000000..965f3ed47701
--- /dev/null
+++ b/src/libutil/hash.cc
@@ -0,0 +1,382 @@
+#include "config.h"
+
+#include <iostream>
+#include <cstring>
+
+#ifdef HAVE_OPENSSL
+#include <openssl/md5.h>
+#include <openssl/sha.h>
+#else
+extern "C" {
+#include "md5.h"
+#include "sha1.h"
+#include "sha256.h"
+}
+#endif
+
+#include "hash.hh"
+#include "archive.hh"
+#include "util.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+
+namespace nix {
+
+
+Hash::Hash()
+{
+    type = htUnknown;
+    hashSize = 0;
+    memset(hash, 0, maxHashSize);
+}
+
+
+Hash::Hash(HashType type)
+{
+    this->type = type;
+    if (type == htMD5) hashSize = md5HashSize;
+    else if (type == htSHA1) hashSize = sha1HashSize;
+    else if (type == htSHA256) hashSize = sha256HashSize;
+    else throw Error("unknown hash type");
+    assert(hashSize <= maxHashSize);
+    memset(hash, 0, maxHashSize);
+}
+
+
+bool Hash::operator == (const Hash & h2) const
+{
+    if (hashSize != h2.hashSize) return false;
+    for (unsigned int i = 0; i < hashSize; i++)
+        if (hash[i] != h2.hash[i]) return false;
+    return true;
+}
+
+
+bool Hash::operator != (const Hash & h2) const
+{
+    return !(*this == h2);
+}
+
+
+bool Hash::operator < (const Hash & h) const
+{
+    for (unsigned int i = 0; i < hashSize; i++) {
+        if (hash[i] < h.hash[i]) return true;
+        if (hash[i] > h.hash[i]) return false;
+    }
+    return false;
+}
+
+
+const string base16Chars = "0123456789abcdef";
+
+
+string printHash(const Hash & hash)
+{
+    char buf[hash.hashSize * 2];
+    for (unsigned int i = 0; i < hash.hashSize; i++) {
+        buf[i * 2] = base16Chars[hash.hash[i] >> 4];
+        buf[i * 2 + 1] = base16Chars[hash.hash[i] & 0x0f];
+    }
+    return string(buf, hash.hashSize * 2);
+}
+
+    
+Hash parseHash(HashType ht, const string & s)
+{
+    Hash hash(ht);
+    if (s.length() != hash.hashSize * 2)
+        throw Error(format("invalid hash ‘%1%’") % s);
+    for (unsigned int i = 0; i < hash.hashSize; i++) {
+        string s2(s, i * 2, 2);
+        if (!isxdigit(s2[0]) || !isxdigit(s2[1])) 
+            throw Error(format("invalid hash ‘%1%’") % s);
+        std::istringstream str(s2);
+        int n;
+        str >> std::hex >> n;
+        hash.hash[i] = n;
+    }
+    return hash;
+}
+
+
+static unsigned char divMod(unsigned char * bytes, unsigned char y)
+{
+    unsigned int borrow = 0;
+
+    int pos = Hash::maxHashSize - 1;
+    while (pos >= 0 && !bytes[pos]) --pos;
+
+    for ( ; pos >= 0; --pos) {
+        unsigned int s = bytes[pos] + (borrow << 8);
+        unsigned int d = s / y;
+        borrow = s % y;
+        bytes[pos] = d;
+    }
+
+    return borrow;
+}
+
+
+unsigned int hashLength32(const Hash & hash)
+{
+    return (hash.hashSize * 8 - 1) / 5 + 1;
+}
+
+
+// omitted: E O U T
+const string base32Chars = "0123456789abcdfghijklmnpqrsvwxyz";
+
+
+string printHash32(const Hash & hash)
+{
+    Hash hash2(hash);
+    unsigned int len = hashLength32(hash);
+
+    const char * chars = base32Chars.data();
+    
+    string s(len, '0');
+
+    int pos = len - 1;
+    while (pos >= 0) {
+        unsigned char digit = divMod(hash2.hash, 32);
+        s[pos--] = chars[digit];
+    }
+
+    for (unsigned int i = 0; i < hash2.maxHashSize; ++i)
+        assert(hash2.hash[i] == 0);
+
+    return s;
+}
+
+
+string printHash16or32(const Hash & hash)
+{
+    return hash.type == htMD5 ? printHash(hash) : printHash32(hash);
+}
+
+
+static bool mul(unsigned char * bytes, unsigned char y, int maxSize)
+{
+    unsigned char carry = 0;
+
+    for (int pos = 0; pos < maxSize; ++pos) {
+        unsigned int m = bytes[pos] * y + carry;
+        bytes[pos] = m & 0xff;
+        carry = m >> 8;
+    }
+
+    return carry;
+}
+
+
+static bool add(unsigned char * bytes, unsigned char y, int maxSize)
+{
+    unsigned char carry = y;
+
+    for (int pos = 0; pos < maxSize; ++pos) {
+        unsigned int m = bytes[pos] + carry;
+        bytes[pos] = m & 0xff;
+        carry = m >> 8;
+        if (carry == 0) break;
+    }
+
+    return carry;
+}
+
+
+Hash parseHash32(HashType ht, const string & s)
+{
+    Hash hash(ht);
+
+    const char * chars = base32Chars.data();
+
+    for (unsigned int i = 0; i < s.length(); ++i) {
+        char c = s[i];
+        unsigned char digit;
+        for (digit = 0; digit < base32Chars.size(); ++digit) /* !!! slow */
+            if (chars[digit] == c) break;
+        if (digit >= 32)
+            throw Error(format("invalid base-32 hash ‘%1%’") % s);
+        if (mul(hash.hash, 32, hash.hashSize) ||
+            add(hash.hash, digit, hash.hashSize))
+            throw Error(format("base-32 hash ‘%1%’ is too large") % s);
+    }
+
+    return hash;
+}
+
+
+Hash parseHash16or32(HashType ht, const string & s)
+{
+    Hash hash(ht);
+    if (s.size() == hash.hashSize * 2)
+        /* hexadecimal representation */
+        hash = parseHash(ht, s);
+    else if (s.size() == hashLength32(hash))
+        /* base-32 representation */
+        hash = parseHash32(ht, s);
+    else
+        throw Error(format("hash ‘%1%’ has wrong length for hash type ‘%2%’")
+            % s % printHashType(ht));
+    return hash;
+}
+
+
+bool isHash(const string & s)
+{
+    if (s.length() != 32) return false;
+    for (int i = 0; i < 32; i++) {
+        char c = s[i];
+        if (!((c >= '0' && c <= '9') ||
+              (c >= 'a' && c <= 'f')))
+            return false;
+    }
+    return true;
+}
+
+
+union Ctx
+{
+    MD5_CTX md5;
+    SHA_CTX sha1;
+    SHA256_CTX sha256;
+};
+
+
+static void start(HashType ht, Ctx & ctx)
+{
+    if (ht == htMD5) MD5_Init(&ctx.md5);
+    else if (ht == htSHA1) SHA1_Init(&ctx.sha1);
+    else if (ht == htSHA256) SHA256_Init(&ctx.sha256);
+}
+
+
+static void update(HashType ht, Ctx & ctx,
+    const unsigned char * bytes, unsigned int len)
+{
+    if (ht == htMD5) MD5_Update(&ctx.md5, bytes, len);
+    else if (ht == htSHA1) SHA1_Update(&ctx.sha1, bytes, len);
+    else if (ht == htSHA256) SHA256_Update(&ctx.sha256, bytes, len);
+}
+
+
+static void finish(HashType ht, Ctx & ctx, unsigned char * hash)
+{
+    if (ht == htMD5) MD5_Final(hash, &ctx.md5);
+    else if (ht == htSHA1) SHA1_Final(hash, &ctx.sha1);
+    else if (ht == htSHA256) SHA256_Final(hash, &ctx.sha256);
+}
+
+
+Hash hashString(HashType ht, const string & s)
+{
+    Ctx ctx;
+    Hash hash(ht);
+    start(ht, ctx);
+    update(ht, ctx, (const unsigned char *) s.data(), s.length());
+    finish(ht, ctx, hash.hash);
+    return hash;
+}
+
+
+Hash hashFile(HashType ht, const Path & path)
+{
+    Ctx ctx;
+    Hash hash(ht);
+    start(ht, ctx);
+
+    AutoCloseFD fd = open(path.c_str(), O_RDONLY);
+    if (fd == -1) throw SysError(format("opening file ‘%1%’") % path);
+
+    unsigned char buf[8192];
+    ssize_t n;
+    while ((n = read(fd, buf, sizeof(buf)))) {
+        checkInterrupt();
+        if (n == -1) throw SysError(format("reading file ‘%1%’") % path);
+        update(ht, ctx, buf, n);
+    }
+    
+    finish(ht, ctx, hash.hash);
+    return hash;
+}
+
+
+HashSink::HashSink(HashType ht) : ht(ht)
+{
+    ctx = new Ctx;
+    bytes = 0;
+    start(ht, *ctx);
+}
+    
+HashSink::~HashSink()
+{
+    bufPos = 0;
+    delete ctx;
+}
+
+void HashSink::write(const unsigned char * data, size_t len)
+{
+    bytes += len;
+    update(ht, *ctx, data, len);
+}
+
+HashResult HashSink::finish()
+{
+    flush();
+    Hash hash(ht);
+    nix::finish(ht, *ctx, hash.hash);
+    return HashResult(hash, bytes);
+}
+
+HashResult HashSink::currentHash()
+{
+    flush();
+    Ctx ctx2 = *ctx;
+    Hash hash(ht);
+    nix::finish(ht, ctx2, hash.hash);
+    return HashResult(hash, bytes);
+}
+
+
+HashResult hashPath(
+    HashType ht, const Path & path, PathFilter & filter)
+{
+    HashSink sink(ht);
+    dumpPath(path, sink, filter);
+    return sink.finish();
+}
+
+
+Hash compressHash(const Hash & hash, unsigned int newSize)
+{
+    Hash h;
+    h.hashSize = newSize;
+    for (unsigned int i = 0; i < hash.hashSize; ++i)
+        h.hash[i % newSize] ^= hash.hash[i];
+    return h;
+}
+
+
+HashType parseHashType(const string & s)
+{
+    if (s == "md5") return htMD5;
+    else if (s == "sha1") return htSHA1;
+    else if (s == "sha256") return htSHA256;
+    else return htUnknown;
+}
+
+ 
+string printHashType(HashType ht)
+{
+    if (ht == htMD5) return "md5";
+    else if (ht == htSHA1) return "sha1";
+    else if (ht == htSHA256) return "sha256";
+    else throw Error("cannot print unknown hash type");
+}
+
+ 
+}
diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh
new file mode 100644
index 000000000000..2c6f176ec74c
--- /dev/null
+++ b/src/libutil/hash.hh
@@ -0,0 +1,113 @@
+#pragma once
+
+#include "types.hh"
+#include "serialise.hh"
+
+
+namespace nix {
+
+
+typedef enum { htUnknown, htMD5, htSHA1, htSHA256 } HashType;
+
+
+const int md5HashSize = 16;
+const int sha1HashSize = 20;
+const int sha256HashSize = 32;
+
+extern const string base32Chars;
+
+
+struct Hash
+{
+    static const unsigned int maxHashSize = 32;
+    unsigned int hashSize;
+    unsigned char hash[maxHashSize];
+
+    HashType type;
+
+    /* Create an unusable hash object. */
+    Hash();
+
+    /* Create a zero-filled hash object. */
+    Hash(HashType type);
+
+    /* Check whether two hash are equal. */
+    bool operator == (const Hash & h2) const;
+
+    /* Check whether two hash are not equal. */
+    bool operator != (const Hash & h2) const;
+
+    /* For sorting. */
+    bool operator < (const Hash & h) const;
+};
+
+
+/* Convert a hash to a hexadecimal representation. */
+string printHash(const Hash & hash);
+
+/* Parse a hexadecimal representation of a hash code. */
+Hash parseHash(HashType ht, const string & s);
+
+/* Returns the length of a base-32 hash representation. */
+unsigned int hashLength32(const Hash & hash);
+
+/* Convert a hash to a base-32 representation. */
+string printHash32(const Hash & hash);
+
+/* Print a hash in base-16 if it's MD5, or base-32 otherwise. */
+string printHash16or32(const Hash & hash);
+
+/* Parse a base-32 representation of a hash code. */
+Hash parseHash32(HashType ht, const string & s);
+
+/* Parse a base-16 or base-32 representation of a hash code. */
+Hash parseHash16or32(HashType ht, const string & s);
+
+/* Verify that the given string is a valid hash code. */
+bool isHash(const string & s);
+
+/* Compute the hash of the given string. */
+Hash hashString(HashType ht, const string & s);
+
+/* Compute the hash of the given file. */
+Hash hashFile(HashType ht, const Path & path);
+
+/* Compute the hash of the given path.  The hash is defined as
+   (essentially) hashString(ht, dumpPath(path)). */
+struct PathFilter;
+extern PathFilter defaultPathFilter;
+typedef std::pair<Hash, unsigned long long> HashResult;
+HashResult hashPath(HashType ht, const Path & path,
+    PathFilter & filter = defaultPathFilter);
+
+/* Compress a hash to the specified number of bytes by cyclically
+   XORing bytes together. */
+Hash compressHash(const Hash & hash, unsigned int newSize);
+
+/* Parse a string representing a hash type. */
+HashType parseHashType(const string & s);
+
+/* And the reverse. */
+string printHashType(HashType ht);
+
+
+union Ctx;
+
+class HashSink : public BufferedSink
+{
+private:
+    HashType ht;
+    Ctx * ctx;
+    unsigned long long bytes;
+
+public:
+    HashSink(HashType ht);
+    HashSink(const HashSink & h);
+    ~HashSink();
+    void write(const unsigned char * data, size_t len);
+    HashResult finish();
+    HashResult currentHash();
+};
+
+
+}
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
new file mode 100644
index 000000000000..8af2e78d9ce4
--- /dev/null
+++ b/src/libutil/local.mk
@@ -0,0 +1,15 @@
+libraries += libutil
+
+libutil_NAME = libnixutil
+
+libutil_DIR := $(d)
+
+libutil_SOURCES := $(wildcard $(d)/*.cc)
+
+ifeq ($(HAVE_OPENSSL), 1)
+  libutil_LDFLAGS = $(OPENSSL_LIBS)
+else
+  libutil_SOURCES += $(d)/md5.c $(d)/sha1.c $(d)/sha256.c
+endif
+
+libutil_LIBS = libformat
diff --git a/src/libutil/md32_common.h b/src/libutil/md32_common.h
new file mode 100644
index 000000000000..0cbcfaf8a20b
--- /dev/null
+++ b/src/libutil/md32_common.h
@@ -0,0 +1,620 @@
+/* crypto/md32_common.h */
+/* ====================================================================
+ * Copyright (c) 1999-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com).  This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+/*
+ * This is a generic 32 bit "collector" for message digest algorithms.
+ * Whenever needed it collects input character stream into chunks of
+ * 32 bit values and invokes a block function that performs actual hash
+ * calculations.
+ *
+ * Porting guide.
+ *
+ * Obligatory macros:
+ *
+ * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
+ *	this macro defines byte order of input stream.
+ * HASH_CBLOCK
+ *	size of a unit chunk HASH_BLOCK operates on.
+ * HASH_LONG
+ *	has to be at lest 32 bit wide, if it's wider, then
+ *	HASH_LONG_LOG2 *has to* be defined along
+ * HASH_CTX
+ *	context structure that at least contains following
+ *	members:
+ *		typedef struct {
+ *			...
+ *			HASH_LONG	Nl,Nh;
+ *			HASH_LONG	data[HASH_LBLOCK];
+ *			unsigned int	num;
+ *			...
+ *			} HASH_CTX;
+ * HASH_UPDATE
+ *	name of "Update" function, implemented here.
+ * HASH_TRANSFORM
+ *	name of "Transform" function, implemented here.
+ * HASH_FINAL
+ *	name of "Final" function, implemented here.
+ * HASH_BLOCK_HOST_ORDER
+ *	name of "block" function treating *aligned* input message
+ *	in host byte order, implemented externally.
+ * HASH_BLOCK_DATA_ORDER
+ *	name of "block" function treating *unaligned* input message
+ *	in original (data) byte order, implemented externally (it
+ *	actually is optional if data and host are of the same
+ *	"endianess").
+ * HASH_MAKE_STRING
+ *	macro convering context variables to an ASCII hash string.
+ *
+ * Optional macros:
+ *
+ * B_ENDIAN or L_ENDIAN
+ *	defines host byte-order.
+ * HASH_LONG_LOG2
+ *	defaults to 2 if not states otherwise.
+ * HASH_LBLOCK
+ *	assumed to be HASH_CBLOCK/4 if not stated otherwise.
+ * HASH_BLOCK_DATA_ORDER_ALIGNED
+ *	alternative "block" function capable of treating
+ *	aligned input message in original (data) order,
+ *	implemented externally.
+ *
+ * MD5 example:
+ *
+ *	#define DATA_ORDER_IS_LITTLE_ENDIAN
+ *
+ *	#define HASH_LONG		MD5_LONG
+ *	#define HASH_LONG_LOG2		MD5_LONG_LOG2
+ *	#define HASH_CTX		MD5_CTX
+ *	#define HASH_CBLOCK		MD5_CBLOCK
+ *	#define HASH_LBLOCK		MD5_LBLOCK
+ *	#define HASH_UPDATE		MD5_Update
+ *	#define HASH_TRANSFORM		MD5_Transform
+ *	#define HASH_FINAL		MD5_Final
+ *	#define HASH_BLOCK_HOST_ORDER	md5_block_host_order
+ *	#define HASH_BLOCK_DATA_ORDER	md5_block_data_order
+ *
+ *					<appro@fy.chalmers.se>
+ */
+
+#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+#error "DATA_ORDER must be defined!"
+#endif
+
+#ifndef HASH_CBLOCK
+#error "HASH_CBLOCK must be defined!"
+#endif
+#ifndef HASH_LONG
+#error "HASH_LONG must be defined!"
+#endif
+#ifndef HASH_CTX
+#error "HASH_CTX must be defined!"
+#endif
+
+#ifndef HASH_UPDATE
+#error "HASH_UPDATE must be defined!"
+#endif
+#ifndef HASH_TRANSFORM
+#error "HASH_TRANSFORM must be defined!"
+#endif
+#ifndef HASH_FINAL
+#error "HASH_FINAL must be defined!"
+#endif
+
+#ifndef HASH_BLOCK_HOST_ORDER
+#error "HASH_BLOCK_HOST_ORDER must be defined!"
+#endif
+
+#if 0
+/*
+ * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
+ * isn't defined.
+ */
+#ifndef HASH_BLOCK_DATA_ORDER
+#error "HASH_BLOCK_DATA_ORDER must be defined!"
+#endif
+#endif
+
+#ifndef HASH_LBLOCK
+#define HASH_LBLOCK	(HASH_CBLOCK/4)
+#endif
+
+#ifndef HASH_LONG_LOG2
+#define HASH_LONG_LOG2	2
+#endif
+
+/*
+ * Engage compiler specific rotate intrinsic function if available.
+ */
+#undef ROTATE
+#ifndef PEDANTIC
+# if defined(_MSC_VER) || defined(__ICC)
+#  define ROTATE(a,n)	_lrotl(a,n)
+# elif defined(__MWERKS__)
+#  if defined(__POWERPC__)
+#   define ROTATE(a,n)	__rlwinm(a,n,0,31)
+#  elif defined(__MC68K__)
+    /* Motorola specific tweak. <appro@fy.chalmers.se> */
+#   define ROTATE(a,n)	( n<24 ? __rol(a,n) : __ror(a,32-n) )
+#  else
+#   define ROTATE(a,n)	__rol(a,n)
+#  endif
+# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+  /*
+   * Some GNU C inline assembler templates. Note that these are
+   * rotates by *constant* number of bits! But that's exactly
+   * what we need here...
+   * 					<appro@fy.chalmers.se>
+   */
+#  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+#   define ROTATE(a,n)	({ register unsigned int ret;	\
+				asm (			\
+				"roll %1,%0"		\
+				: "=r"(ret)		\
+				: "I"(n), "0"(a)	\
+				: "cc");		\
+			   ret;				\
+			})
+#  elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
+#   define ROTATE(a,n)	({ register unsigned int ret;	\
+				asm (			\
+				"rlwinm %0,%1,%2,0,31"	\
+				: "=r"(ret)		\
+				: "r"(a), "I"(n));	\
+			   ret;				\
+			})
+#  endif
+# endif
+#endif /* PEDANTIC */
+
+#if HASH_LONG_LOG2==2	/* Engage only if sizeof(HASH_LONG)== 4 */
+/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
+#ifdef ROTATE
+/* 5 instructions with rotate instruction, else 9 */
+#define REVERSE_FETCH32(a,l)	(					\
+		l=*(const HASH_LONG *)(a),				\
+		((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24)))	\
+				)
+#else
+/* 6 instructions with rotate instruction, else 8 */
+#define REVERSE_FETCH32(a,l)	(				\
+		l=*(const HASH_LONG *)(a),			\
+		l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)),	\
+		ROTATE(l,16)					\
+				)
+/*
+ * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
+ * It's rewritten as above for two reasons:
+ *	- RISCs aren't good at long constants and have to explicitely
+ *	  compose 'em with several (well, usually 2) instructions in a
+ *	  register before performing the actual operation and (as you
+ *	  already realized:-) having same constant should inspire the
+ *	  compiler to permanently allocate the only register for it;
+ *	- most modern CPUs have two ALUs, but usually only one has
+ *	  circuitry for shifts:-( this minor tweak inspires compiler
+ *	  to schedule shift instructions in a better way...
+ *
+ *				<appro@fy.chalmers.se>
+ */
+#endif
+#endif
+
+#ifndef ROTATE
+#define ROTATE(a,n)     (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
+#endif
+
+/*
+ * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
+ * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
+ * and host are of the same "endianess". It's possible to mask
+ * this with blank #define HASH_BLOCK_DATA_ORDER though...
+ *
+ *				<appro@fy.chalmers.se>
+ */
+#if defined(B_ENDIAN)
+#  if defined(DATA_ORDER_IS_BIG_ENDIAN)
+#    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
+#      define HASH_BLOCK_DATA_ORDER_ALIGNED	HASH_BLOCK_HOST_ORDER
+#    endif
+#  endif
+#elif defined(L_ENDIAN)
+#  if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+#    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
+#      define HASH_BLOCK_DATA_ORDER_ALIGNED	HASH_BLOCK_HOST_ORDER
+#    endif
+#  endif
+#endif
+
+#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
+#ifndef HASH_BLOCK_DATA_ORDER
+#error "HASH_BLOCK_DATA_ORDER must be defined!"
+#endif
+#endif
+
+#if defined(DATA_ORDER_IS_BIG_ENDIAN)
+
+#ifndef PEDANTIC
+# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+#  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+    /*
+     * This gives ~30-40% performance improvement in SHA-256 compiled
+     * with gcc [on P4]. Well, first macro to be frank. We can pull
+     * this trick on x86* platforms only, because these CPUs can fetch
+     * unaligned data without raising an exception.
+     */
+#   define HOST_c2l(c,l)	({ unsigned int r=*((const unsigned int *)(c));	\
+				   asm ("bswapl %0":"=r"(r):"0"(r));	\
+				   (c)+=4; (l)=r;			})
+#   define HOST_l2c(l,c)	({ unsigned int r=(l);			\
+				   asm ("bswapl %0":"=r"(r):"0"(r));	\
+				   *((unsigned int *)(c))=r; (c)+=4; r;	})
+#  endif
+# endif
+#endif
+
+#ifndef HOST_c2l
+#define HOST_c2l(c,l)	(l =(((unsigned long)(*((c)++)))<<24),		\
+			 l|=(((unsigned long)(*((c)++)))<<16),		\
+			 l|=(((unsigned long)(*((c)++)))<< 8),		\
+			 l|=(((unsigned long)(*((c)++)))    ),		\
+			 l)
+#endif
+#define HOST_p_c2l(c,l,n)	{					\
+			switch (n) {					\
+			case 0: l =((unsigned long)(*((c)++)))<<24;	\
+			case 1: l|=((unsigned long)(*((c)++)))<<16;	\
+			case 2: l|=((unsigned long)(*((c)++)))<< 8;	\
+			case 3: l|=((unsigned long)(*((c)++)));		\
+				} }
+#define HOST_p_c2l_p(c,l,sc,len) {					\
+			switch (sc) {					\
+			case 0: l =((unsigned long)(*((c)++)))<<24;	\
+				if (--len == 0) break;			\
+			case 1: l|=((unsigned long)(*((c)++)))<<16;	\
+				if (--len == 0) break;			\
+			case 2: l|=((unsigned long)(*((c)++)))<< 8;	\
+				} }
+/* NOTE the pointer is not incremented at the end of this */
+#define HOST_c2l_p(c,l,n)	{					\
+			l=0; (c)+=n;					\
+			switch (n) {					\
+			case 3: l =((unsigned long)(*(--(c))))<< 8;	\
+			case 2: l|=((unsigned long)(*(--(c))))<<16;	\
+			case 1: l|=((unsigned long)(*(--(c))))<<24;	\
+				} }
+#ifndef HOST_l2c
+#define HOST_l2c(l,c)	(*((c)++)=(unsigned char)(((l)>>24)&0xff),	\
+			 *((c)++)=(unsigned char)(((l)>>16)&0xff),	\
+			 *((c)++)=(unsigned char)(((l)>> 8)&0xff),	\
+			 *((c)++)=(unsigned char)(((l)    )&0xff),	\
+			 l)
+#endif
+
+#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+
+#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+  /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
+# define HOST_c2l(c,l)	((l)=*((const unsigned int *)(c)), (c)+=4, l)
+# define HOST_l2c(l,c)	(*((unsigned int *)(c))=(l), (c)+=4, l)
+#endif
+
+#ifndef HOST_c2l
+#define HOST_c2l(c,l)	(l =(((unsigned long)(*((c)++)))    ),		\
+			 l|=(((unsigned long)(*((c)++)))<< 8),		\
+			 l|=(((unsigned long)(*((c)++)))<<16),		\
+			 l|=(((unsigned long)(*((c)++)))<<24),		\
+			 l)
+#endif
+#define HOST_p_c2l(c,l,n)	{					\
+			switch (n) {					\
+			case 0: l =((unsigned long)(*((c)++)));		\
+			case 1: l|=((unsigned long)(*((c)++)))<< 8;	\
+			case 2: l|=((unsigned long)(*((c)++)))<<16;	\
+			case 3: l|=((unsigned long)(*((c)++)))<<24;	\
+				} }
+#define HOST_p_c2l_p(c,l,sc,len) {					\
+			switch (sc) {					\
+			case 0: l =((unsigned long)(*((c)++)));		\
+				if (--len == 0) break;			\
+			case 1: l|=((unsigned long)(*((c)++)))<< 8;	\
+				if (--len == 0) break;			\
+			case 2: l|=((unsigned long)(*((c)++)))<<16;	\
+				} }
+/* NOTE the pointer is not incremented at the end of this */
+#define HOST_c2l_p(c,l,n)	{					\
+			l=0; (c)+=n;					\
+			switch (n) {					\
+			case 3: l =((unsigned long)(*(--(c))))<<16;	\
+			case 2: l|=((unsigned long)(*(--(c))))<< 8;	\
+			case 1: l|=((unsigned long)(*(--(c))));		\
+				} }
+#ifndef HOST_l2c
+#define HOST_l2c(l,c)	(*((c)++)=(unsigned char)(((l)    )&0xff),	\
+			 *((c)++)=(unsigned char)(((l)>> 8)&0xff),	\
+			 *((c)++)=(unsigned char)(((l)>>16)&0xff),	\
+			 *((c)++)=(unsigned char)(((l)>>24)&0xff),	\
+			 l)
+#endif
+
+#endif
+
+/*
+ * Time for some action:-)
+ */
+
+int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
+	{
+	const unsigned char *data=data_;
+	register HASH_LONG * p;
+	register HASH_LONG l;
+	size_t sw,sc,ew,ec;
+
+	if (len==0) return 1;
+
+	l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
+	/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
+	 * Wei Dai <weidai@eskimo.com> for pointing it out. */
+	if (l < c->Nl) /* overflow */
+		c->Nh++;
+	c->Nh+=(len>>29);	/* might cause compiler warning on 16-bit */
+	c->Nl=l;
+
+	if (c->num != 0)
+		{
+		p=c->data;
+		sw=c->num>>2;
+		sc=c->num&0x03;
+
+		if ((c->num+len) >= HASH_CBLOCK)
+			{
+			l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
+			for (; sw<HASH_LBLOCK; sw++)
+				{
+				HOST_c2l(data,l); p[sw]=l;
+				}
+			HASH_BLOCK_HOST_ORDER (c,p,1);
+			len-=(HASH_CBLOCK-c->num);
+			c->num=0;
+			/* drop through and do the rest */
+			}
+		else
+			{
+			c->num+=(unsigned int)len;
+			if ((sc+len) < 4) /* ugly, add char's to a word */
+				{
+				l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
+				}
+			else
+				{
+				ew=(c->num>>2);
+				ec=(c->num&0x03);
+				if (sc)
+					l=p[sw];
+				HOST_p_c2l(data,l,sc);
+				p[sw++]=l;
+				for (; sw < ew; sw++)
+					{
+					HOST_c2l(data,l); p[sw]=l;
+					}
+				if (ec)
+					{
+					HOST_c2l_p(data,l,ec); p[sw]=l;
+					}
+				}
+			return 1;
+			}
+		}
+
+	sw=len/HASH_CBLOCK;
+	if (sw > 0)
+		{
+#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
+		/*
+		 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
+		 * only if sizeof(HASH_LONG)==4.
+		 */
+		if ((((size_t)data)%4) == 0)
+			{
+			/* data is properly aligned so that we can cast it: */
+			HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
+			sw*=HASH_CBLOCK;
+			data+=sw;
+			len-=sw;
+			}
+		else
+#if !defined(HASH_BLOCK_DATA_ORDER)
+			while (sw--)
+				{
+				memcpy (p=c->data,data,HASH_CBLOCK);
+				HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
+				data+=HASH_CBLOCK;
+				len-=HASH_CBLOCK;
+				}
+#endif
+#endif
+#if defined(HASH_BLOCK_DATA_ORDER)
+			{
+			HASH_BLOCK_DATA_ORDER(c,data,sw);
+			sw*=HASH_CBLOCK;
+			data+=sw;
+			len-=sw;
+			}
+#endif
+		}
+
+	if (len!=0)
+		{
+		p = c->data;
+		c->num = len;
+		ew=len>>2;	/* words to copy */
+		ec=len&0x03;
+		for (; ew; ew--,p++)
+			{
+			HOST_c2l(data,l); *p=l;
+			}
+		HOST_c2l_p(data,l,ec);
+		*p=l;
+		}
+	return 1;
+	}
+
+
+void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
+	{
+#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
+	if ((((size_t)data)%4) == 0)
+		/* data is properly aligned so that we can cast it: */
+		HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
+	else
+#if !defined(HASH_BLOCK_DATA_ORDER)
+		{
+		memcpy (c->data,data,HASH_CBLOCK);
+		HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
+		}
+#endif
+#endif
+#if defined(HASH_BLOCK_DATA_ORDER)
+	HASH_BLOCK_DATA_ORDER (c,data,1);
+#endif
+	}
+
+
+int HASH_FINAL (unsigned char *md, HASH_CTX *c)
+	{
+	register HASH_LONG *p;
+	register unsigned long l;
+	register int i,j;
+	static const unsigned char end[4]={0x80,0x00,0x00,0x00};
+	const unsigned char *cp=end;
+
+	/* c->num should definitly have room for at least one more byte. */
+	p=c->data;
+	i=c->num>>2;
+	j=c->num&0x03;
+
+#if 0
+	/* purify often complains about the following line as an
+	 * Uninitialized Memory Read.  While this can be true, the
+	 * following p_c2l macro will reset l when that case is true.
+	 * This is because j&0x03 contains the number of 'valid' bytes
+	 * already in p[i].  If and only if j&0x03 == 0, the UMR will
+	 * occur but this is also the only time p_c2l will do
+	 * l= *(cp++) instead of l|= *(cp++)
+	 * Many thanks to Alex Tang <altitude@cic.net> for pickup this
+	 * 'potential bug' */
+#ifdef PURIFY
+	if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
+#endif
+	l=p[i];
+#else
+	l = (j==0) ? 0 : p[i];
+#endif
+	HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
+
+	if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
+		{
+		if (i<HASH_LBLOCK) p[i]=0;
+		HASH_BLOCK_HOST_ORDER (c,p,1);
+		i=0;
+		}
+	for (; i<(HASH_LBLOCK-2); i++)
+		p[i]=0;
+
+#if   defined(DATA_ORDER_IS_BIG_ENDIAN)
+	p[HASH_LBLOCK-2]=c->Nh;
+	p[HASH_LBLOCK-1]=c->Nl;
+#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+	p[HASH_LBLOCK-2]=c->Nl;
+	p[HASH_LBLOCK-1]=c->Nh;
+#endif
+	HASH_BLOCK_HOST_ORDER (c,p,1);
+
+#ifndef HASH_MAKE_STRING
+#error "HASH_MAKE_STRING must be defined!"
+#else
+	HASH_MAKE_STRING(c,md);
+#endif
+
+	c->num=0;
+	/* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
+	 * but I'm not worried :-)
+	OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
+	 */
+	return 1;
+	}
+
+#ifndef MD32_REG_T
+#define MD32_REG_T long
+/*
+ * This comment was originaly written for MD5, which is why it
+ * discusses A-D. But it basically applies to all 32-bit digests,
+ * which is why it was moved to common header file.
+ *
+ * In case you wonder why A-D are declared as long and not
+ * as MD5_LONG. Doing so results in slight performance
+ * boost on LP64 architectures. The catch is we don't
+ * really care if 32 MSBs of a 64-bit register get polluted
+ * with eventual overflows as we *save* only 32 LSBs in
+ * *either* case. Now declaring 'em long excuses the compiler
+ * from keeping 32 MSBs zeroed resulting in 13% performance
+ * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
+ * Well, to be honest it should say that this *prevents* 
+ * performance degradation.
+ *				<appro@fy.chalmers.se>
+ * Apparently there're LP64 compilers that generate better
+ * code if A-D are declared int. Most notably GCC-x86_64
+ * generates better code.
+ *				<appro@fy.chalmers.se>
+ */
+#endif
diff --git a/src/libutil/md5.c b/src/libutil/md5.c
new file mode 100644
index 000000000000..b31640cdcced
--- /dev/null
+++ b/src/libutil/md5.c
@@ -0,0 +1,365 @@
+/* Functions to compute MD5 message digest of files or memory blocks.
+   according to the definition of MD5 in RFC 1321 from April 1992.
+   Copyright (C) 1995,1996,1997,1999,2000,2001 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Written by Ulrich Drepper <drepper@gnu.ai.mit.edu>, 1995.  */
+
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "md5.h"
+
+
+static md5_uint32 SWAP(md5_uint32 n)
+{
+  static int checked = 0;
+  static int bigendian = 0;
+  static md5_uint32 test;
+
+  if (!checked) {
+    test = 1;
+    if (* (char *) &test == 0)
+      bigendian = 1;
+    checked = 1;
+  }
+
+  if (bigendian)
+    return (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24));
+  else
+    return n;
+}
+
+
+/* This array contains the bytes used to pad the buffer to the next
+   64-byte boundary.  (RFC 1321, 3.1: Step 1)  */
+static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ...  */ };
+
+
+/* Initialize structure containing state of computation.
+   (RFC 1321, 3.3: Step 3)  */
+void
+MD5_Init (ctx)
+     struct MD5_CTX *ctx;
+{
+  ctx->A = 0x67452301;
+  ctx->B = 0xefcdab89;
+  ctx->C = 0x98badcfe;
+  ctx->D = 0x10325476;
+
+  ctx->total[0] = ctx->total[1] = 0;
+  ctx->buflen = 0;
+}
+
+/* Put result from CTX in first 16 bytes following RESBUF.  The result
+   must be in little endian byte order.
+
+   IMPORTANT: On some systems it is required that RESBUF is correctly
+   aligned for a 32 bits value.  */
+void *
+md5_read_ctx (ctx, resbuf)
+     const struct MD5_CTX *ctx;
+     void *resbuf;
+{
+  ((md5_uint32 *) resbuf)[0] = SWAP (ctx->A);
+  ((md5_uint32 *) resbuf)[1] = SWAP (ctx->B);
+  ((md5_uint32 *) resbuf)[2] = SWAP (ctx->C);
+  ((md5_uint32 *) resbuf)[3] = SWAP (ctx->D);
+
+  return resbuf;
+}
+
+/* Process the remaining bytes in the internal buffer and the usual
+   prolog according to the standard and write the result to RESBUF.
+
+   IMPORTANT: On some systems it is required that RESBUF is correctly
+   aligned for a 32 bits value.  */
+void *
+MD5_Final (resbuf, ctx)
+     void *resbuf;
+     struct MD5_CTX *ctx;
+{
+  /* Take yet unprocessed bytes into account.  */
+  md5_uint32 bytes = ctx->buflen;
+  size_t pad;
+
+  /* Now count remaining bytes.  */
+  ctx->total[0] += bytes;
+  if (ctx->total[0] < bytes)
+    ++ctx->total[1];
+
+  pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
+  memcpy (&ctx->buffer[bytes], fillbuf, pad);
+
+  /* Put the 64-bit file length in *bits* at the end of the buffer.  */
+  *(md5_uint32 *) &ctx->buffer[bytes + pad] = SWAP (ctx->total[0] << 3);
+  *(md5_uint32 *) &ctx->buffer[bytes + pad + 4] = SWAP ((ctx->total[1] << 3) |
+							(ctx->total[0] >> 29));
+
+  /* Process last bytes.  */
+  md5_process_block (ctx->buffer, bytes + pad + 8, ctx);
+
+  return md5_read_ctx (ctx, resbuf);
+}
+
+void
+MD5_Update (ctx, buffer, len)
+     struct MD5_CTX *ctx;
+     const void *buffer;
+     size_t len;
+{
+  /* When we already have some bits in our internal buffer concatenate
+     both inputs first.  */
+  if (ctx->buflen != 0)
+    {
+      size_t left_over = ctx->buflen;
+      size_t add = 128 - left_over > len ? len : 128 - left_over;
+
+      memcpy (&ctx->buffer[left_over], buffer, add);
+      ctx->buflen += add;
+
+      if (ctx->buflen > 64)
+	{
+	  md5_process_block (ctx->buffer, ctx->buflen & ~63, ctx);
+
+	  ctx->buflen &= 63;
+	  /* The regions in the following copy operation cannot overlap.  */
+	  memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
+		  ctx->buflen);
+	}
+
+      buffer = (const char *) buffer + add;
+      len -= add;
+    }
+
+  /* Process available complete blocks.  */
+  if (len >= 64)
+    {
+#if !_STRING_ARCH_unaligned
+/* To check alignment gcc has an appropriate operator.  Other
+   compilers don't.  */
+# if __GNUC__ >= 2
+#  define UNALIGNED_P(p) (((md5_uintptr) p) % __alignof__ (md5_uint32) != 0)
+# else
+#  define UNALIGNED_P(p) (((md5_uintptr) p) % sizeof (md5_uint32) != 0)
+# endif
+      if (UNALIGNED_P (buffer))
+	while (len > 64)
+	  {
+	    md5_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
+	    buffer = (const char *) buffer + 64;
+	    len -= 64;
+	  }
+      else
+#endif
+	{
+	  md5_process_block (buffer, len & ~63, ctx);
+	  buffer = (const char *) buffer + (len & ~63);
+	  len &= 63;
+	}
+    }
+
+  /* Move remaining bytes in internal buffer.  */
+  if (len > 0)
+    {
+      size_t left_over = ctx->buflen;
+
+      memcpy (&ctx->buffer[left_over], buffer, len);
+      left_over += len;
+      if (left_over >= 64)
+	{
+	  md5_process_block (ctx->buffer, 64, ctx);
+	  left_over -= 64;
+	  memcpy (ctx->buffer, &ctx->buffer[64], left_over);
+	}
+      ctx->buflen = left_over;
+    }
+}
+
+
+/* These are the four functions used in the four steps of the MD5 algorithm
+   and defined in the RFC 1321.  The first function is a little bit optimized
+   (as found in Colin Plumbs public domain implementation).  */
+/* #define FF(b, c, d) ((b & c) | (~b & d)) */
+#define FF(b, c, d) (d ^ (b & (c ^ d)))
+#define FG(b, c, d) FF (d, b, c)
+#define FH(b, c, d) (b ^ c ^ d)
+#define FI(b, c, d) (c ^ (b | ~d))
+
+/* Process LEN bytes of BUFFER, accumulating context into CTX.
+   It is assumed that LEN % 64 == 0.  */
+
+void
+md5_process_block (buffer, len, ctx)
+     const void *buffer;
+     size_t len;
+     struct MD5_CTX *ctx;
+{
+  md5_uint32 correct_words[16];
+  const md5_uint32 *words = buffer;
+  size_t nwords = len / sizeof (md5_uint32);
+  const md5_uint32 *endp = words + nwords;
+  md5_uint32 A = ctx->A;
+  md5_uint32 B = ctx->B;
+  md5_uint32 C = ctx->C;
+  md5_uint32 D = ctx->D;
+
+  /* First increment the byte count.  RFC 1321 specifies the possible
+     length of the file up to 2^64 bits.  Here we only compute the
+     number of bytes.  Do a double word increment.  */
+  ctx->total[0] += len;
+  if (ctx->total[0] < len)
+    ++ctx->total[1];
+
+  /* Process all bytes in the buffer with 64 bytes in each round of
+     the loop.  */
+  while (words < endp)
+    {
+      md5_uint32 *cwp = correct_words;
+      md5_uint32 A_save = A;
+      md5_uint32 B_save = B;
+      md5_uint32 C_save = C;
+      md5_uint32 D_save = D;
+
+      /* First round: using the given function, the context and a constant
+	 the next context is computed.  Because the algorithms processing
+	 unit is a 32-bit word and it is determined to work on words in
+	 little endian byte order we perhaps have to change the byte order
+	 before the computation.  To reduce the work for the next steps
+	 we store the swapped words in the array CORRECT_WORDS.  */
+
+#define OP(a, b, c, d, s, T)						\
+      do								\
+        {								\
+	  a += FF (b, c, d) + (*cwp++ = SWAP (*words)) + T;		\
+	  ++words;							\
+	  CYCLIC (a, s);						\
+	  a += b;							\
+        }								\
+      while (0)
+
+      /* It is unfortunate that C does not provide an operator for
+	 cyclic rotation.  Hope the C compiler is smart enough.  */
+#define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s)))
+
+      /* Before we start, one word to the strange constants.
+	 They are defined in RFC 1321 as
+
+	 T[i] = (int) (4294967296.0 * fabs (sin (i))), i=1..64
+       */
+
+      /* Round 1.  */
+      OP (A, B, C, D,  7, 0xd76aa478);
+      OP (D, A, B, C, 12, 0xe8c7b756);
+      OP (C, D, A, B, 17, 0x242070db);
+      OP (B, C, D, A, 22, 0xc1bdceee);
+      OP (A, B, C, D,  7, 0xf57c0faf);
+      OP (D, A, B, C, 12, 0x4787c62a);
+      OP (C, D, A, B, 17, 0xa8304613);
+      OP (B, C, D, A, 22, 0xfd469501);
+      OP (A, B, C, D,  7, 0x698098d8);
+      OP (D, A, B, C, 12, 0x8b44f7af);
+      OP (C, D, A, B, 17, 0xffff5bb1);
+      OP (B, C, D, A, 22, 0x895cd7be);
+      OP (A, B, C, D,  7, 0x6b901122);
+      OP (D, A, B, C, 12, 0xfd987193);
+      OP (C, D, A, B, 17, 0xa679438e);
+      OP (B, C, D, A, 22, 0x49b40821);
+
+      /* For the second to fourth round we have the possibly swapped words
+	 in CORRECT_WORDS.  Redefine the macro to take an additional first
+	 argument specifying the function to use.  */
+#undef OP
+#define OP(f, a, b, c, d, k, s, T)					\
+      do 								\
+	{								\
+	  a += f (b, c, d) + correct_words[k] + T;			\
+	  CYCLIC (a, s);						\
+	  a += b;							\
+	}								\
+      while (0)
+
+      /* Round 2.  */
+      OP (FG, A, B, C, D,  1,  5, 0xf61e2562);
+      OP (FG, D, A, B, C,  6,  9, 0xc040b340);
+      OP (FG, C, D, A, B, 11, 14, 0x265e5a51);
+      OP (FG, B, C, D, A,  0, 20, 0xe9b6c7aa);
+      OP (FG, A, B, C, D,  5,  5, 0xd62f105d);
+      OP (FG, D, A, B, C, 10,  9, 0x02441453);
+      OP (FG, C, D, A, B, 15, 14, 0xd8a1e681);
+      OP (FG, B, C, D, A,  4, 20, 0xe7d3fbc8);
+      OP (FG, A, B, C, D,  9,  5, 0x21e1cde6);
+      OP (FG, D, A, B, C, 14,  9, 0xc33707d6);
+      OP (FG, C, D, A, B,  3, 14, 0xf4d50d87);
+      OP (FG, B, C, D, A,  8, 20, 0x455a14ed);
+      OP (FG, A, B, C, D, 13,  5, 0xa9e3e905);
+      OP (FG, D, A, B, C,  2,  9, 0xfcefa3f8);
+      OP (FG, C, D, A, B,  7, 14, 0x676f02d9);
+      OP (FG, B, C, D, A, 12, 20, 0x8d2a4c8a);
+
+      /* Round 3.  */
+      OP (FH, A, B, C, D,  5,  4, 0xfffa3942);
+      OP (FH, D, A, B, C,  8, 11, 0x8771f681);
+      OP (FH, C, D, A, B, 11, 16, 0x6d9d6122);
+      OP (FH, B, C, D, A, 14, 23, 0xfde5380c);
+      OP (FH, A, B, C, D,  1,  4, 0xa4beea44);
+      OP (FH, D, A, B, C,  4, 11, 0x4bdecfa9);
+      OP (FH, C, D, A, B,  7, 16, 0xf6bb4b60);
+      OP (FH, B, C, D, A, 10, 23, 0xbebfbc70);
+      OP (FH, A, B, C, D, 13,  4, 0x289b7ec6);
+      OP (FH, D, A, B, C,  0, 11, 0xeaa127fa);
+      OP (FH, C, D, A, B,  3, 16, 0xd4ef3085);
+      OP (FH, B, C, D, A,  6, 23, 0x04881d05);
+      OP (FH, A, B, C, D,  9,  4, 0xd9d4d039);
+      OP (FH, D, A, B, C, 12, 11, 0xe6db99e5);
+      OP (FH, C, D, A, B, 15, 16, 0x1fa27cf8);
+      OP (FH, B, C, D, A,  2, 23, 0xc4ac5665);
+
+      /* Round 4.  */
+      OP (FI, A, B, C, D,  0,  6, 0xf4292244);
+      OP (FI, D, A, B, C,  7, 10, 0x432aff97);
+      OP (FI, C, D, A, B, 14, 15, 0xab9423a7);
+      OP (FI, B, C, D, A,  5, 21, 0xfc93a039);
+      OP (FI, A, B, C, D, 12,  6, 0x655b59c3);
+      OP (FI, D, A, B, C,  3, 10, 0x8f0ccc92);
+      OP (FI, C, D, A, B, 10, 15, 0xffeff47d);
+      OP (FI, B, C, D, A,  1, 21, 0x85845dd1);
+      OP (FI, A, B, C, D,  8,  6, 0x6fa87e4f);
+      OP (FI, D, A, B, C, 15, 10, 0xfe2ce6e0);
+      OP (FI, C, D, A, B,  6, 15, 0xa3014314);
+      OP (FI, B, C, D, A, 13, 21, 0x4e0811a1);
+      OP (FI, A, B, C, D,  4,  6, 0xf7537e82);
+      OP (FI, D, A, B, C, 11, 10, 0xbd3af235);
+      OP (FI, C, D, A, B,  2, 15, 0x2ad7d2bb);
+      OP (FI, B, C, D, A,  9, 21, 0xeb86d391);
+
+      /* Add the starting values of the context.  */
+      A += A_save;
+      B += B_save;
+      C += C_save;
+      D += D_save;
+    }
+
+  /* Put checksum in context given as argument.  */
+  ctx->A = A;
+  ctx->B = B;
+  ctx->C = C;
+  ctx->D = D;
+}
diff --git a/src/libutil/md5.h b/src/libutil/md5.h
new file mode 100644
index 000000000000..228d4972320f
--- /dev/null
+++ b/src/libutil/md5.h
@@ -0,0 +1,82 @@
+/* Declaration of functions and data types used for MD5 sum computing
+   library functions.
+   Copyright (C) 1995,1996,1997,1999,2000,2001 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#ifndef _MD5_H
+#define _MD5_H 1
+
+#include <inttypes.h>
+typedef uint32_t md5_uint32;
+typedef uintptr_t md5_uintptr;
+
+/* Structure to save state of computation between the single steps.  */
+struct MD5_CTX
+{
+  md5_uint32 A;
+  md5_uint32 B;
+  md5_uint32 C;
+  md5_uint32 D;
+
+  md5_uint32 total[2];
+  md5_uint32 buflen;
+  char buffer[128] __attribute__ ((__aligned__ (__alignof__ (md5_uint32))));
+};
+
+/*
+ * The following three functions are build up the low level used in
+ * the functions `md5_stream' and `md5_buffer'.
+ */
+
+/* Initialize structure containing state of computation.
+   (RFC 1321, 3.3: Step 3)  */
+extern void MD5_Init (struct MD5_CTX *ctx);
+
+/* Starting with the result of former calls of this function (or the
+   initialization function update the context for the next LEN bytes
+   starting at BUFFER.
+   It is necessary that LEN is a multiple of 64!!! */
+extern void md5_process_block (const void *buffer, size_t len,
+				      struct MD5_CTX *ctx);
+
+/* Starting with the result of former calls of this function (or the
+   initialization function update the context for the next LEN bytes
+   starting at BUFFER.
+   It is NOT required that LEN is a multiple of 64.  */
+extern void MD5_Update (struct MD5_CTX *ctx, const void *buffer, size_t len);
+
+/* Process the remaining bytes in the buffer and put result from CTX
+   in first 16 bytes following RESBUF.  The result is always in little
+   endian byte order, so that a byte-wise output yields to the wanted
+   ASCII representation of the message digest.
+
+   IMPORTANT: On some systems it is required that RESBUF is correctly
+   aligned for a 32 bits value.  */
+extern void *MD5_Final (void *resbuf, struct MD5_CTX *ctx);
+
+
+/* Put result from CTX in first 16 bytes following RESBUF.  The result is
+   always in little endian byte order, so that a byte-wise output yields
+   to the wanted ASCII representation of the message digest.
+
+   IMPORTANT: On some systems it is required that RESBUF is correctly
+   aligned for a 32 bits value.  */
+extern void *md5_read_ctx (const struct MD5_CTX *ctx, void *resbuf);
+
+
+#endif /* md5.h */
diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh
new file mode 100644
index 000000000000..72d23fb6934c
--- /dev/null
+++ b/src/libutil/monitor-fd.hh
@@ -0,0 +1,42 @@
+#pragma once
+
+#include <thread>
+#include <atomic>
+
+#include <poll.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+
+namespace nix {
+
+
+class MonitorFdHup
+{
+private:
+    std::thread thread;
+
+public:
+    MonitorFdHup(int fd)
+    {
+        thread = std::thread([fd]() {
+            /* Wait indefinitely until a POLLHUP occurs. */
+            struct pollfd fds[1];
+            fds[0].fd = fd;
+            fds[0].events = 0;
+            if (poll(fds, 1, -1) == -1) abort(); // can't happen
+            assert(fds[0].revents & POLLHUP);
+            /* We got POLLHUP, so send an INT signal to the main thread. */
+            kill(getpid(), SIGINT);
+        });
+    };
+
+    ~MonitorFdHup()
+    {
+        pthread_cancel(thread.native_handle());
+        thread.join();
+    }
+};
+
+
+}
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
new file mode 100644
index 000000000000..92417507508a
--- /dev/null
+++ b/src/libutil/serialise.cc
@@ -0,0 +1,286 @@
+#include "serialise.hh"
+#include "util.hh"
+
+#include <cstring>
+#include <cerrno>
+
+
+namespace nix {
+
+
+BufferedSink::~BufferedSink()
+{
+    /* We can't call flush() here, because C++ for some insane reason
+       doesn't allow you to call virtual methods from a destructor. */
+    assert(!bufPos);
+    delete[] buffer;
+}
+
+    
+void BufferedSink::operator () (const unsigned char * data, size_t len)
+{
+    if (!buffer) buffer = new unsigned char[bufSize];
+    
+    while (len) {
+        /* Optimisation: bypass the buffer if the data exceeds the
+           buffer size. */
+        if (bufPos + len >= bufSize) {
+            flush();
+            write(data, len);
+            break;
+        }
+        /* Otherwise, copy the bytes to the buffer.  Flush the buffer
+           when it's full. */
+        size_t n = bufPos + len > bufSize ? bufSize - bufPos : len;
+        memcpy(buffer + bufPos, data, n);
+        data += n; bufPos += n; len -= n;
+        if (bufPos == bufSize) flush();
+    }
+}
+
+
+void BufferedSink::flush()
+{
+    if (bufPos == 0) return;
+    size_t n = bufPos;
+    bufPos = 0; // don't trigger the assert() in ~BufferedSink()
+    write(buffer, n);
+}
+
+
+FdSink::~FdSink()
+{
+    try { flush(); } catch (...) { ignoreException(); }
+}
+
+
+size_t threshold = 256 * 1024 * 1024;
+
+static void warnLargeDump()
+{
+    printMsg(lvlError, "warning: dumping very large path (> 256 MiB); this may run out of memory");
+}
+
+
+void FdSink::write(const unsigned char * data, size_t len)
+{
+    static bool warned = false;
+    if (warn && !warned) {
+        written += len;
+        if (written > threshold) {
+            warnLargeDump();
+            warned = true;
+        }
+    }
+    writeFull(fd, data, len);
+}
+
+
+void Source::operator () (unsigned char * data, size_t len)
+{
+    while (len) {
+        size_t n = read(data, len);
+        data += n; len -= n;
+    }
+}
+
+
+BufferedSource::~BufferedSource()
+{
+    delete[] buffer;
+}
+
+
+size_t BufferedSource::read(unsigned char * data, size_t len)
+{
+    if (!buffer) buffer = new unsigned char[bufSize];
+
+    if (!bufPosIn) bufPosIn = readUnbuffered(buffer, bufSize);
+            
+    /* Copy out the data in the buffer. */
+    size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
+    memcpy(data, buffer + bufPosOut, n);
+    bufPosOut += n;
+    if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
+    return n;
+}
+
+
+bool BufferedSource::hasData()
+{
+    return bufPosOut < bufPosIn;
+}
+
+
+size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
+{
+    ssize_t n;
+    do {
+        checkInterrupt();
+        n = ::read(fd, (char *) data, bufSize);
+    } while (n == -1 && errno == EINTR);
+    if (n == -1) throw SysError("reading from file");
+    if (n == 0) throw EndOfFile("unexpected end-of-file");
+    return n;
+}
+
+
+size_t StringSource::read(unsigned char * data, size_t len)
+{
+    if (pos == s.size()) throw EndOfFile("end of string reached");
+    size_t n = s.copy((char *) data, len, pos);
+    pos += n;
+    return n;
+}
+
+
+void writePadding(size_t len, Sink & sink)
+{
+    if (len % 8) {
+        unsigned char zero[8];
+        memset(zero, 0, sizeof(zero));
+        sink(zero, 8 - (len % 8));
+    }
+}
+
+
+void writeInt(unsigned int n, Sink & sink)
+{
+    unsigned char buf[8];
+    memset(buf, 0, sizeof(buf));
+    buf[0] = n & 0xff;
+    buf[1] = (n >> 8) & 0xff;
+    buf[2] = (n >> 16) & 0xff;
+    buf[3] = (n >> 24) & 0xff;
+    sink(buf, sizeof(buf));
+}
+
+
+void writeLongLong(unsigned long long n, Sink & sink)
+{
+    unsigned char buf[8];
+    buf[0] = n & 0xff;
+    buf[1] = (n >> 8) & 0xff;
+    buf[2] = (n >> 16) & 0xff;
+    buf[3] = (n >> 24) & 0xff;
+    buf[4] = (n >> 32) & 0xff;
+    buf[5] = (n >> 40) & 0xff;
+    buf[6] = (n >> 48) & 0xff;
+    buf[7] = (n >> 56) & 0xff;
+    sink(buf, sizeof(buf));
+}
+
+
+void writeString(const unsigned char * buf, size_t len, Sink & sink)
+{
+    writeInt(len, sink);
+    sink(buf, len);
+    writePadding(len, sink);
+}
+
+
+void writeString(const string & s, Sink & sink)
+{
+    writeString((const unsigned char *) s.data(), s.size(), sink);
+}
+
+
+template<class T> void writeStrings(const T & ss, Sink & sink)
+{
+    writeInt(ss.size(), sink);
+    foreach (typename T::const_iterator, i, ss)
+        writeString(*i, sink);
+}
+
+template void writeStrings(const Paths & ss, Sink & sink);
+template void writeStrings(const PathSet & ss, Sink & sink);
+
+
+void readPadding(size_t len, Source & source)
+{
+    if (len % 8) {
+        unsigned char zero[8];
+        size_t n = 8 - (len % 8);
+        source(zero, n);
+        for (unsigned int i = 0; i < n; i++)
+            if (zero[i]) throw SerialisationError("non-zero padding");
+    }
+}
+
+
+unsigned int readInt(Source & source)
+{
+    unsigned char buf[8];
+    source(buf, sizeof(buf));
+    if (buf[4] || buf[5] || buf[6] || buf[7])
+        throw SerialisationError("implementation cannot deal with > 32-bit integers");
+    return
+        buf[0] |
+        (buf[1] << 8) |
+        (buf[2] << 16) |
+        (buf[3] << 24);
+}
+
+
+unsigned long long readLongLong(Source & source)
+{
+    unsigned char buf[8];
+    source(buf, sizeof(buf));
+    return
+        ((unsigned long long) buf[0]) |
+        ((unsigned long long) buf[1] << 8) |
+        ((unsigned long long) buf[2] << 16) |
+        ((unsigned long long) buf[3] << 24) |
+        ((unsigned long long) buf[4] << 32) |
+        ((unsigned long long) buf[5] << 40) |
+        ((unsigned long long) buf[6] << 48) |
+        ((unsigned long long) buf[7] << 56);
+}
+
+
+size_t readString(unsigned char * buf, size_t max, Source & source)
+{
+    size_t len = readInt(source);
+    if (len > max) throw Error("string is too long");
+    source(buf, len);
+    readPadding(len, source);
+    return len;
+}
+
+ 
+string readString(Source & source)
+{
+    size_t len = readInt(source);
+    unsigned char * buf = new unsigned char[len];
+    AutoDeleteArray<unsigned char> d(buf);
+    source(buf, len);
+    readPadding(len, source);
+    return string((char *) buf, len);
+}
+
+ 
+template<class T> T readStrings(Source & source)
+{
+    unsigned int count = readInt(source);
+    T ss;
+    while (count--)
+        ss.insert(ss.end(), readString(source));
+    return ss;
+}
+
+template Paths readStrings(Source & source);
+template PathSet readStrings(Source & source);
+
+
+void StringSink::operator () (const unsigned char * data, size_t len)
+{
+    static bool warned = false;
+    if (!warned && s.size() > threshold) {
+        warnLargeDump();
+        warned = true;
+    }
+    s.append((const char *) data, len);
+}
+
+
+}
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
new file mode 100644
index 000000000000..6a6f028aa652
--- /dev/null
+++ b/src/libutil/serialise.hh
@@ -0,0 +1,132 @@
+#pragma once
+
+#include "types.hh"
+
+
+namespace nix {
+
+
+/* Abstract destination of binary data. */
+struct Sink 
+{
+    virtual ~Sink() { }
+    virtual void operator () (const unsigned char * data, size_t len) = 0;
+};
+
+
+/* A buffered abstract sink. */
+struct BufferedSink : Sink
+{
+    size_t bufSize, bufPos;
+    unsigned char * buffer;
+
+    BufferedSink(size_t bufSize = 32 * 1024)
+        : bufSize(bufSize), bufPos(0), buffer(0) { }
+    ~BufferedSink();
+
+    void operator () (const unsigned char * data, size_t len);
+    
+    void flush();
+    
+    virtual void write(const unsigned char * data, size_t len) = 0;
+};
+
+
+/* Abstract source of binary data. */
+struct Source
+{
+    virtual ~Source() { }
+    
+    /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
+       It blocks until all the requested data is available, or throws
+       an error if it is not going to be available.   */
+    void operator () (unsigned char * data, size_t len);
+
+    /* Store up to ‘len’ in the buffer pointed to by ‘data’, and
+       return the number of bytes stored.  If blocks until at least
+       one byte is available. */
+    virtual size_t read(unsigned char * data, size_t len) = 0;
+};
+
+
+/* A buffered abstract source. */
+struct BufferedSource : Source
+{
+    size_t bufSize, bufPosIn, bufPosOut;
+    unsigned char * buffer;
+
+    BufferedSource(size_t bufSize = 32 * 1024)
+        : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { }
+    ~BufferedSource();
+    
+    size_t read(unsigned char * data, size_t len);
+    
+    /* Underlying read call, to be overridden. */
+    virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
+
+    bool hasData();
+};
+
+
+/* A sink that writes data to a file descriptor. */
+struct FdSink : BufferedSink
+{
+    int fd;
+    bool warn;
+    size_t written;
+
+    FdSink() : fd(-1), warn(false), written(0) { }
+    FdSink(int fd) : fd(fd), warn(false), written(0) { }
+    ~FdSink();
+    
+    void write(const unsigned char * data, size_t len);
+};
+
+
+/* A source that reads data from a file descriptor. */
+struct FdSource : BufferedSource
+{
+    int fd;
+    FdSource() : fd(-1) { }
+    FdSource(int fd) : fd(fd) { }
+    size_t readUnbuffered(unsigned char * data, size_t len);
+};
+
+
+/* A sink that writes data to a string. */
+struct StringSink : Sink
+{
+    string s;
+    void operator () (const unsigned char * data, size_t len);
+};
+
+
+/* A source that reads data from a string. */
+struct StringSource : Source
+{
+    const string & s;
+    size_t pos;
+    StringSource(const string & _s) : s(_s), pos(0) { }
+    size_t read(unsigned char * data, size_t len);    
+};
+
+
+void writePadding(size_t len, Sink & sink);
+void writeInt(unsigned int n, Sink & sink);
+void writeLongLong(unsigned long long n, Sink & sink);
+void writeString(const unsigned char * buf, size_t len, Sink & sink);
+void writeString(const string & s, Sink & sink);
+template<class T> void writeStrings(const T & ss, Sink & sink);
+
+void readPadding(size_t len, Source & source);
+unsigned int readInt(Source & source);
+unsigned long long readLongLong(Source & source);
+size_t readString(unsigned char * buf, size_t max, Source & source);
+string readString(Source & source);
+template<class T> T readStrings(Source & source);
+
+
+MakeError(SerialisationError, Error)
+
+
+}
diff --git a/src/libutil/sha1.c b/src/libutil/sha1.c
new file mode 100644
index 000000000000..d9d294d15540
--- /dev/null
+++ b/src/libutil/sha1.c
@@ -0,0 +1,369 @@
+/* $Id$ */
+
+/* sha.c - Implementation of the Secure Hash Algorithm
+ *
+ * Copyright (C) 1995, A.M. Kuchling
+ *
+ * Distribute and use freely; there are no restrictions on further 
+ * dissemination and usage except those imposed by the laws of your 
+ * country of residence.
+ *
+ * Adapted to pike and some cleanup by Niels Mller.
+ */
+
+/* $Id$ */
+
+/* SHA: NIST's Secure Hash Algorithm */
+
+/* Based on SHA code originally posted to sci.crypt by Peter Gutmann
+   in message <30ajo5$oe8@ccu2.auckland.ac.nz>.
+   Modified to test for endianness on creation of SHA objects by AMK.
+   Also, the original specification of SHA was found to have a weakness
+   by NSA/NIST.  This code implements the fixed version of SHA.
+*/
+
+/* Here's the first paragraph of Peter Gutmann's posting:
+   
+The following is my SHA (FIPS 180) code updated to allow use of the "fixed"
+SHA, thanks to Jim Gillogly and an anonymous contributor for the information on
+what's changed in the new version.  The fix is a simple change which involves
+adding a single rotate in the initial expansion function.  It is unknown
+whether this is an optimal solution to the problem which was discovered in the
+SHA or whether it's simply a bandaid which fixes the problem with a minimum of
+effort (for example the reengineering of a great many Capstone chips).
+*/
+
+#include "sha1.h"
+
+#include <string.h>
+
+void sha_copy(struct SHA_CTX *dest, struct SHA_CTX *src)
+{
+  unsigned int i;
+
+  dest->count_l=src->count_l;
+  dest->count_h=src->count_h;
+  for(i=0; i<SHA_DIGESTLEN; i++)
+    dest->digest[i]=src->digest[i];
+  for(i=0; i < src->index; i++)
+    dest->block[i] = src->block[i];
+  dest->index = src->index;
+}
+
+
+/* The SHA f()-functions.  The f1 and f3 functions can be optimized to
+   save one boolean operation each - thanks to Rich Schroeppel,
+   rcs@cs.arizona.edu for discovering this */
+
+/*#define f1(x,y,z) ( ( x & y ) | ( ~x & z ) )          // Rounds  0-19 */
+#define f1(x,y,z)   ( z ^ ( x & ( y ^ z ) ) )           /* Rounds  0-19 */
+#define f2(x,y,z)   ( x ^ y ^ z )                       /* Rounds 20-39 */
+/*#define f3(x,y,z) ( ( x & y ) | ( x & z ) | ( y & z ) )   // Rounds 40-59 */
+#define f3(x,y,z)   ( ( x & y ) | ( z & ( x | y ) ) )   /* Rounds 40-59 */
+#define f4(x,y,z)   ( x ^ y ^ z )                       /* Rounds 60-79 */
+
+/* The SHA Mysterious Constants */
+
+#define K1  0x5A827999L                                 /* Rounds  0-19 */
+#define K2  0x6ED9EBA1L                                 /* Rounds 20-39 */
+#define K3  0x8F1BBCDCL                                 /* Rounds 40-59 */
+#define K4  0xCA62C1D6L                                 /* Rounds 60-79 */
+
+/* SHA initial values */
+
+#define h0init  0x67452301L
+#define h1init  0xEFCDAB89L
+#define h2init  0x98BADCFEL
+#define h3init  0x10325476L
+#define h4init  0xC3D2E1F0L
+
+/* 32-bit rotate left - kludged with shifts */
+
+#define ROTL(n,X)  ( ( (X) << (n) ) | ( (X) >> ( 32 - (n) ) ) )
+
+/* The initial expanding function.  The hash function is defined over an
+   80-word expanded input array W, where the first 16 are copies of the input
+   data, and the remaining 64 are defined by
+
+        W[ i ] = W[ i - 16 ] ^ W[ i - 14 ] ^ W[ i - 8 ] ^ W[ i - 3 ]
+
+   This implementation generates these values on the fly in a circular
+   buffer - thanks to Colin Plumb, colin@nyx10.cs.du.edu for this
+   optimization.
+
+   The updated SHA changes the expanding function by adding a rotate of 1
+   bit.  Thanks to Jim Gillogly, jim@rand.org, and an anonymous contributor
+   for this information */
+
+#define expand(W,i) ( W[ i & 15 ] = \
+		      ROTL( 1, ( W[ i & 15 ] ^ W[ (i - 14) & 15 ] ^ \
+				 W[ (i - 8) & 15 ] ^ W[ (i - 3) & 15 ] ) ) )
+
+
+/* The prototype SHA sub-round.  The fundamental sub-round is:
+
+        a' = e + ROTL( 5, a ) + f( b, c, d ) + k + data;
+        b' = a;
+        c' = ROTL( 30, b );
+        d' = c;
+        e' = d;
+
+   but this is implemented by unrolling the loop 5 times and renaming the
+   variables ( e, a, b, c, d ) = ( a', b', c', d', e' ) each iteration.
+   This code is then replicated 20 times for each of the 4 functions, using
+   the next 20 values from the W[] array each time */
+
+#define subRound(a, b, c, d, e, f, k, data) \
+    ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) )
+
+/* Initialize the SHA values */
+
+void SHA1_Init(struct SHA_CTX *ctx)
+{
+  /* Set the h-vars to their initial values */
+  ctx->digest[ 0 ] = h0init;
+  ctx->digest[ 1 ] = h1init;
+  ctx->digest[ 2 ] = h2init;
+  ctx->digest[ 3 ] = h3init;
+  ctx->digest[ 4 ] = h4init;
+
+  /* Initialize bit count */
+  ctx->count_l = ctx->count_h = 0;
+  
+  /* Initialize buffer */
+  ctx->index = 0;
+}
+
+/* Perform the SHA transformation.  Note that this code, like MD5, seems to
+   break some optimizing compilers due to the complexity of the expressions
+   and the size of the basic block.  It may be necessary to split it into
+   sections, e.g. based on the four subrounds
+
+   Note that this function destroys the data area */
+
+static void sha_transform(struct SHA_CTX *ctx, uint32_t *data )
+{
+  uint32_t A, B, C, D, E;     /* Local vars */
+
+  /* Set up first buffer and local data buffer */
+  A = ctx->digest[0];
+  B = ctx->digest[1];
+  C = ctx->digest[2];
+  D = ctx->digest[3];
+  E = ctx->digest[4];
+
+  /* Heavy mangling, in 4 sub-rounds of 20 interations each. */
+  subRound( A, B, C, D, E, f1, K1, data[ 0] );
+  subRound( E, A, B, C, D, f1, K1, data[ 1] );
+  subRound( D, E, A, B, C, f1, K1, data[ 2] );
+  subRound( C, D, E, A, B, f1, K1, data[ 3] );
+  subRound( B, C, D, E, A, f1, K1, data[ 4] );
+  subRound( A, B, C, D, E, f1, K1, data[ 5] );
+  subRound( E, A, B, C, D, f1, K1, data[ 6] );
+  subRound( D, E, A, B, C, f1, K1, data[ 7] );
+  subRound( C, D, E, A, B, f1, K1, data[ 8] );
+  subRound( B, C, D, E, A, f1, K1, data[ 9] );
+  subRound( A, B, C, D, E, f1, K1, data[10] );
+  subRound( E, A, B, C, D, f1, K1, data[11] );
+  subRound( D, E, A, B, C, f1, K1, data[12] );
+  subRound( C, D, E, A, B, f1, K1, data[13] );
+  subRound( B, C, D, E, A, f1, K1, data[14] );
+  subRound( A, B, C, D, E, f1, K1, data[15] );
+  subRound( E, A, B, C, D, f1, K1, expand( data, 16 ) );
+  subRound( D, E, A, B, C, f1, K1, expand( data, 17 ) );
+  subRound( C, D, E, A, B, f1, K1, expand( data, 18 ) );
+  subRound( B, C, D, E, A, f1, K1, expand( data, 19 ) );
+
+  subRound( A, B, C, D, E, f2, K2, expand( data, 20 ) );
+  subRound( E, A, B, C, D, f2, K2, expand( data, 21 ) );
+  subRound( D, E, A, B, C, f2, K2, expand( data, 22 ) );
+  subRound( C, D, E, A, B, f2, K2, expand( data, 23 ) );
+  subRound( B, C, D, E, A, f2, K2, expand( data, 24 ) );
+  subRound( A, B, C, D, E, f2, K2, expand( data, 25 ) );
+  subRound( E, A, B, C, D, f2, K2, expand( data, 26 ) );
+  subRound( D, E, A, B, C, f2, K2, expand( data, 27 ) );
+  subRound( C, D, E, A, B, f2, K2, expand( data, 28 ) );
+  subRound( B, C, D, E, A, f2, K2, expand( data, 29 ) );
+  subRound( A, B, C, D, E, f2, K2, expand( data, 30 ) );
+  subRound( E, A, B, C, D, f2, K2, expand( data, 31 ) );
+  subRound( D, E, A, B, C, f2, K2, expand( data, 32 ) );
+  subRound( C, D, E, A, B, f2, K2, expand( data, 33 ) );
+  subRound( B, C, D, E, A, f2, K2, expand( data, 34 ) );
+  subRound( A, B, C, D, E, f2, K2, expand( data, 35 ) );
+  subRound( E, A, B, C, D, f2, K2, expand( data, 36 ) );
+  subRound( D, E, A, B, C, f2, K2, expand( data, 37 ) );
+  subRound( C, D, E, A, B, f2, K2, expand( data, 38 ) );
+  subRound( B, C, D, E, A, f2, K2, expand( data, 39 ) );
+
+  subRound( A, B, C, D, E, f3, K3, expand( data, 40 ) );
+  subRound( E, A, B, C, D, f3, K3, expand( data, 41 ) );
+  subRound( D, E, A, B, C, f3, K3, expand( data, 42 ) );
+  subRound( C, D, E, A, B, f3, K3, expand( data, 43 ) );
+  subRound( B, C, D, E, A, f3, K3, expand( data, 44 ) );
+  subRound( A, B, C, D, E, f3, K3, expand( data, 45 ) );
+  subRound( E, A, B, C, D, f3, K3, expand( data, 46 ) );
+  subRound( D, E, A, B, C, f3, K3, expand( data, 47 ) );
+  subRound( C, D, E, A, B, f3, K3, expand( data, 48 ) );
+  subRound( B, C, D, E, A, f3, K3, expand( data, 49 ) );
+  subRound( A, B, C, D, E, f3, K3, expand( data, 50 ) );
+  subRound( E, A, B, C, D, f3, K3, expand( data, 51 ) );
+  subRound( D, E, A, B, C, f3, K3, expand( data, 52 ) );
+  subRound( C, D, E, A, B, f3, K3, expand( data, 53 ) );
+  subRound( B, C, D, E, A, f3, K3, expand( data, 54 ) );
+  subRound( A, B, C, D, E, f3, K3, expand( data, 55 ) );
+  subRound( E, A, B, C, D, f3, K3, expand( data, 56 ) );
+  subRound( D, E, A, B, C, f3, K3, expand( data, 57 ) );
+  subRound( C, D, E, A, B, f3, K3, expand( data, 58 ) );
+  subRound( B, C, D, E, A, f3, K3, expand( data, 59 ) );
+
+  subRound( A, B, C, D, E, f4, K4, expand( data, 60 ) );
+  subRound( E, A, B, C, D, f4, K4, expand( data, 61 ) );
+  subRound( D, E, A, B, C, f4, K4, expand( data, 62 ) );
+  subRound( C, D, E, A, B, f4, K4, expand( data, 63 ) );
+  subRound( B, C, D, E, A, f4, K4, expand( data, 64 ) );
+  subRound( A, B, C, D, E, f4, K4, expand( data, 65 ) );
+  subRound( E, A, B, C, D, f4, K4, expand( data, 66 ) );
+  subRound( D, E, A, B, C, f4, K4, expand( data, 67 ) );
+  subRound( C, D, E, A, B, f4, K4, expand( data, 68 ) );
+  subRound( B, C, D, E, A, f4, K4, expand( data, 69 ) );
+  subRound( A, B, C, D, E, f4, K4, expand( data, 70 ) );
+  subRound( E, A, B, C, D, f4, K4, expand( data, 71 ) );
+  subRound( D, E, A, B, C, f4, K4, expand( data, 72 ) );
+  subRound( C, D, E, A, B, f4, K4, expand( data, 73 ) );
+  subRound( B, C, D, E, A, f4, K4, expand( data, 74 ) );
+  subRound( A, B, C, D, E, f4, K4, expand( data, 75 ) );
+  subRound( E, A, B, C, D, f4, K4, expand( data, 76 ) );
+  subRound( D, E, A, B, C, f4, K4, expand( data, 77 ) );
+  subRound( C, D, E, A, B, f4, K4, expand( data, 78 ) );
+  subRound( B, C, D, E, A, f4, K4, expand( data, 79 ) );
+
+  /* Build message digest */
+  ctx->digest[0] += A;
+  ctx->digest[1] += B;
+  ctx->digest[2] += C;
+  ctx->digest[3] += D;
+  ctx->digest[4] += E;
+}
+
+#if 1
+
+#ifndef EXTRACT_UCHAR
+#define EXTRACT_UCHAR(p)  (*(unsigned char *)(p))
+#endif
+
+#define STRING2INT(s) ((((((EXTRACT_UCHAR(s) << 8)    \
+			 | EXTRACT_UCHAR(s+1)) << 8)  \
+			 | EXTRACT_UCHAR(s+2)) << 8)  \
+			 | EXTRACT_UCHAR(s+3))
+#else
+uint32_t STRING2INT(unsigned char *s)
+{
+  uint32_t r;
+  unsigned int i;
+  
+  for (i = 0, r = 0; i < 4; i++, s++)
+    r = (r << 8) | *s;
+  return r;
+}
+#endif
+
+static void sha_block(struct SHA_CTX *ctx, const unsigned char *block)
+{
+  uint32_t data[SHA_DATALEN];
+  unsigned int i;
+  
+  /* Update block count */
+  if (!++ctx->count_l)
+    ++ctx->count_h;
+
+  /* Endian independent conversion */
+  for (i = 0; i<SHA_DATALEN; i++, block += 4)
+    data[i] = STRING2INT(block);
+
+  sha_transform(ctx, data);
+}
+
+void SHA1_Update(struct SHA_CTX *ctx, const unsigned char *buffer, uint32_t len)
+{
+  if (ctx->index)
+    { /* Try to fill partial block */
+      unsigned left = SHA_DATASIZE - ctx->index;
+      if (len < left)
+	{
+	  memcpy(ctx->block + ctx->index, buffer, len);
+	  ctx->index += len;
+	  return; /* Finished */
+	}
+      else
+	{
+	  memcpy(ctx->block + ctx->index, buffer, left);
+	  sha_block(ctx, ctx->block);
+	  buffer += left;
+	  len -= left;
+	}
+    }
+  while (len >= SHA_DATASIZE)
+    {
+      sha_block(ctx, buffer);
+      buffer += SHA_DATASIZE;
+      len -= SHA_DATASIZE;
+    }
+  if ((ctx->index = len))     /* This assignment is intended */
+    /* Buffer leftovers */
+    memcpy(ctx->block, buffer, len);
+}
+	  
+/* Final wrapup - pad to SHA_DATASIZE-byte boundary with the bit pattern
+   1 0* (64-bit count of bits processed, MSB-first) */
+
+void SHA1_Final(unsigned char *s, struct SHA_CTX *ctx)
+{
+  uint32_t data[SHA_DATALEN];
+  unsigned int i;
+  unsigned int words;
+  
+  i = ctx->index;
+  /* Set the first char of padding to 0x80.  This is safe since there is
+     always at least one byte free */
+  ctx->block[i++] = 0x80;
+
+  /* Fill rest of word */
+  for( ; i & 3; i++)
+    ctx->block[i] = 0;
+
+  /* i is now a multiple of the word size 4 */
+  words = i >> 2;
+  for (i = 0; i < words; i++)
+    data[i] = STRING2INT(ctx->block + 4*i);
+  
+  if (words > (SHA_DATALEN-2))
+    { /* No room for length in this block. Process it and
+       * pad with another one */
+      for (i = words ; i < SHA_DATALEN; i++)
+	data[i] = 0;
+      sha_transform(ctx, data);
+      for (i = 0; i < (SHA_DATALEN-2); i++)
+	data[i] = 0;
+    }
+  else
+    for (i = words ; i < SHA_DATALEN - 2; i++)
+      data[i] = 0;
+  /* Theres 512 = 2^9 bits in one block */
+  data[SHA_DATALEN-2] = (ctx->count_h << 9) | (ctx->count_l >> 23);
+  data[SHA_DATALEN-1] = (ctx->count_l << 9) | (ctx->index << 3);
+  sha_transform(ctx, data);
+  sha_digest(ctx, s);
+}
+
+void sha_digest(struct SHA_CTX *ctx, unsigned char *s)
+{
+  unsigned int i;
+
+  for (i = 0; i < SHA_DIGESTLEN; i++)
+    {
+      *s++ =         ctx->digest[i] >> 24;
+      *s++ = 0xff & (ctx->digest[i] >> 16);
+      *s++ = 0xff & (ctx->digest[i] >> 8);
+      *s++ = 0xff &  ctx->digest[i];
+    }
+}
diff --git a/src/libutil/sha1.h b/src/libutil/sha1.h
new file mode 100644
index 000000000000..715040dd48df
--- /dev/null
+++ b/src/libutil/sha1.h
@@ -0,0 +1,28 @@
+#ifndef _SHA_H
+#define _SHA_H
+
+#include <inttypes.h>
+
+/* The SHA block size and message digest sizes, in bytes */
+
+#define SHA_DATASIZE    64
+#define SHA_DATALEN     16
+#define SHA_DIGESTSIZE  20
+#define SHA_DIGESTLEN    5
+/* The structure for storing SHA info */
+
+struct SHA_CTX {
+  uint32_t digest[SHA_DIGESTLEN];  /* Message digest */
+  uint32_t count_l, count_h;       /* 64-bit block count */
+  uint8_t block[SHA_DATASIZE];     /* SHA data buffer */
+  unsigned int index;            /* index into buffer */
+};
+
+void SHA1_Init(struct SHA_CTX *ctx);
+void SHA1_Update(struct SHA_CTX *ctx, const unsigned char *buffer, uint32_t len);
+void SHA1_Final(unsigned char *s, struct SHA_CTX *ctx);
+void sha_digest(struct SHA_CTX *ctx, unsigned char *s);
+void sha_copy(struct SHA_CTX *dest, struct SHA_CTX *src);
+
+
+#endif /* !_SHA_H */
diff --git a/src/libutil/sha256.c b/src/libutil/sha256.c
new file mode 100644
index 000000000000..63ed0ba43011
--- /dev/null
+++ b/src/libutil/sha256.c
@@ -0,0 +1,238 @@
+/* crypto/sha/sha256.c */
+/* ====================================================================
+ * Copyright (c) 2004 The OpenSSL Project.  All rights reserved
+ * according to the OpenSSL license [found in ./md32_common.h].
+ * ====================================================================
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "sha256.h"
+
+int SHA224_Init (SHA256_CTX *c)
+	{
+	c->h[0]=0xc1059ed8UL;	c->h[1]=0x367cd507UL;
+	c->h[2]=0x3070dd17UL;	c->h[3]=0xf70e5939UL;
+	c->h[4]=0xffc00b31UL;	c->h[5]=0x68581511UL;
+	c->h[6]=0x64f98fa7UL;	c->h[7]=0xbefa4fa4UL;
+	c->Nl=0;	c->Nh=0;
+	c->num=0;	c->md_len=SHA224_DIGEST_LENGTH;
+	return 1;
+	}
+
+int SHA256_Init (SHA256_CTX *c)
+	{
+	c->h[0]=0x6a09e667UL;	c->h[1]=0xbb67ae85UL;
+	c->h[2]=0x3c6ef372UL;	c->h[3]=0xa54ff53aUL;
+	c->h[4]=0x510e527fUL;	c->h[5]=0x9b05688cUL;
+	c->h[6]=0x1f83d9abUL;	c->h[7]=0x5be0cd19UL;
+	c->Nl=0;	c->Nh=0;
+	c->num=0;	c->md_len=SHA256_DIGEST_LENGTH;
+	return 1;
+	}
+
+unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md)
+	{
+	SHA256_CTX c;
+	static unsigned char m[SHA224_DIGEST_LENGTH];
+
+	if (md == NULL) md=m;
+	SHA224_Init(&c);
+	SHA256_Update(&c,d,n);
+	SHA256_Final(md,&c);
+	return(md);
+	}
+
+unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md)
+	{
+	SHA256_CTX c;
+	static unsigned char m[SHA256_DIGEST_LENGTH];
+
+	if (md == NULL) md=m;
+	SHA256_Init(&c);
+	SHA256_Update(&c,d,n);
+	SHA256_Final(md,&c);
+	return(md);
+	}
+
+int SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
+{   return SHA256_Update (c,data,len);   }
+int SHA224_Final (unsigned char *md, SHA256_CTX *c)
+{   return SHA256_Final (md,c);   }
+
+#define	DATA_ORDER_IS_BIG_ENDIAN
+
+#define	HASH_LONG		uint32_t
+#define	HASH_LONG_LOG2		2
+#define	HASH_CTX		SHA256_CTX
+#define	HASH_CBLOCK		SHA_CBLOCK
+#define	HASH_LBLOCK		SHA_LBLOCK
+/*
+ * Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
+ * default: case below covers for it. It's not clear however if it's
+ * permitted to truncate to amount of bytes not divisible by 4. I bet not,
+ * but if it is, then default: case shall be extended. For reference.
+ * Idea behind separate cases for pre-defined lenghts is to let the
+ * compiler decide if it's appropriate to unroll small loops.
+ */
+#define	HASH_MAKE_STRING(c,s)	do {	\
+	unsigned long ll;		\
+	unsigned int  n;		\
+	switch ((c)->md_len)		\
+	{   case SHA224_DIGEST_LENGTH:	\
+		for (n=0;n<SHA224_DIGEST_LENGTH/4;n++)	\
+		{   ll=(c)->h[n]; HOST_l2c(ll,(s));   }	\
+		break;			\
+	    case SHA256_DIGEST_LENGTH:	\
+		for (n=0;n<SHA256_DIGEST_LENGTH/4;n++)	\
+		{   ll=(c)->h[n]; HOST_l2c(ll,(s));   }	\
+		break;			\
+	    default:			\
+		if ((c)->md_len > SHA256_DIGEST_LENGTH)	\
+		    return 0;				\
+		for (n=0;n<(c)->md_len/4;n++)		\
+		{   ll=(c)->h[n]; HOST_l2c(ll,(s));   }	\
+		break;			\
+	}				\
+	} while (0)
+
+#define	HASH_UPDATE		SHA256_Update
+#define	HASH_TRANSFORM		SHA256_Transform
+#define	HASH_FINAL		SHA256_Final
+#define	HASH_BLOCK_HOST_ORDER	sha256_block_host_order
+#define	HASH_BLOCK_DATA_ORDER	sha256_block_data_order
+void sha256_block_host_order (SHA256_CTX *ctx, const void *in, size_t num);
+void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num);
+
+#include "md32_common.h"
+
+static const uint32_t K256[64] = {
+	0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL,
+	0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL,
+	0xd807aa98UL,0x12835b01UL,0x243185beUL,0x550c7dc3UL,
+	0x72be5d74UL,0x80deb1feUL,0x9bdc06a7UL,0xc19bf174UL,
+	0xe49b69c1UL,0xefbe4786UL,0x0fc19dc6UL,0x240ca1ccUL,
+	0x2de92c6fUL,0x4a7484aaUL,0x5cb0a9dcUL,0x76f988daUL,
+	0x983e5152UL,0xa831c66dUL,0xb00327c8UL,0xbf597fc7UL,
+	0xc6e00bf3UL,0xd5a79147UL,0x06ca6351UL,0x14292967UL,
+	0x27b70a85UL,0x2e1b2138UL,0x4d2c6dfcUL,0x53380d13UL,
+	0x650a7354UL,0x766a0abbUL,0x81c2c92eUL,0x92722c85UL,
+	0xa2bfe8a1UL,0xa81a664bUL,0xc24b8b70UL,0xc76c51a3UL,
+	0xd192e819UL,0xd6990624UL,0xf40e3585UL,0x106aa070UL,
+	0x19a4c116UL,0x1e376c08UL,0x2748774cUL,0x34b0bcb5UL,
+	0x391c0cb3UL,0x4ed8aa4aUL,0x5b9cca4fUL,0x682e6ff3UL,
+	0x748f82eeUL,0x78a5636fUL,0x84c87814UL,0x8cc70208UL,
+	0x90befffaUL,0xa4506cebUL,0xbef9a3f7UL,0xc67178f2UL };
+
+/*
+ * FIPS specification refers to right rotations, while our ROTATE macro
+ * is left one. This is why you might notice that rotation coefficients
+ * differ from those observed in FIPS document by 32-N...
+ */
+#define Sigma0(x)	(ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10))
+#define Sigma1(x)	(ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7))
+#define sigma0(x)	(ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3))
+#define sigma1(x)	(ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10))
+
+#define Ch(x,y,z)	(((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x,y,z)	(((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+#define	ROUND_00_15(i,a,b,c,d,e,f,g,h)		do {	\
+	T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i];	\
+	h = Sigma0(a) + Maj(a,b,c);			\
+	d += T1;	h += T1;		} while (0)
+
+#define	ROUND_16_63(i,a,b,c,d,e,f,g,h,X)	do {	\
+	s0 = X[(i+1)&0x0f];	s0 = sigma0(s0);	\
+	s1 = X[(i+14)&0x0f];	s1 = sigma1(s1);	\
+	T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f];	\
+	ROUND_00_15(i,a,b,c,d,e,f,g,h);		} while (0)
+
+static void sha256_block (SHA256_CTX *ctx, const void *in, size_t num, int host)
+	{
+	uint32_t a,b,c,d,e,f,g,h,s0,s1,T1;
+	uint32_t	X[16];
+	int i;
+	const unsigned char *data=in;
+
+			while (num--) {
+
+	a = ctx->h[0];	b = ctx->h[1];	c = ctx->h[2];	d = ctx->h[3];
+	e = ctx->h[4];	f = ctx->h[5];	g = ctx->h[6];	h = ctx->h[7];
+
+	if (host)
+		{
+		const uint32_t *W=(const uint32_t *)data;
+
+		T1 = X[0] = W[0];	ROUND_00_15(0,a,b,c,d,e,f,g,h);
+		T1 = X[1] = W[1];	ROUND_00_15(1,h,a,b,c,d,e,f,g);
+		T1 = X[2] = W[2];	ROUND_00_15(2,g,h,a,b,c,d,e,f);
+		T1 = X[3] = W[3];	ROUND_00_15(3,f,g,h,a,b,c,d,e);
+		T1 = X[4] = W[4];	ROUND_00_15(4,e,f,g,h,a,b,c,d);
+		T1 = X[5] = W[5];	ROUND_00_15(5,d,e,f,g,h,a,b,c);
+		T1 = X[6] = W[6];	ROUND_00_15(6,c,d,e,f,g,h,a,b);
+		T1 = X[7] = W[7];	ROUND_00_15(7,b,c,d,e,f,g,h,a);
+		T1 = X[8] = W[8];	ROUND_00_15(8,a,b,c,d,e,f,g,h);
+		T1 = X[9] = W[9];	ROUND_00_15(9,h,a,b,c,d,e,f,g);
+		T1 = X[10] = W[10];	ROUND_00_15(10,g,h,a,b,c,d,e,f);
+		T1 = X[11] = W[11];	ROUND_00_15(11,f,g,h,a,b,c,d,e);
+		T1 = X[12] = W[12];	ROUND_00_15(12,e,f,g,h,a,b,c,d);
+		T1 = X[13] = W[13];	ROUND_00_15(13,d,e,f,g,h,a,b,c);
+		T1 = X[14] = W[14];	ROUND_00_15(14,c,d,e,f,g,h,a,b);
+		T1 = X[15] = W[15];	ROUND_00_15(15,b,c,d,e,f,g,h,a);
+
+		data += SHA256_CBLOCK;
+		}
+	else
+		{
+		uint32_t l;
+
+		HOST_c2l(data,l); T1 = X[0] = l;  ROUND_00_15(0,a,b,c,d,e,f,g,h);
+		HOST_c2l(data,l); T1 = X[1] = l;  ROUND_00_15(1,h,a,b,c,d,e,f,g);
+		HOST_c2l(data,l); T1 = X[2] = l;  ROUND_00_15(2,g,h,a,b,c,d,e,f);
+		HOST_c2l(data,l); T1 = X[3] = l;  ROUND_00_15(3,f,g,h,a,b,c,d,e);
+		HOST_c2l(data,l); T1 = X[4] = l;  ROUND_00_15(4,e,f,g,h,a,b,c,d);
+		HOST_c2l(data,l); T1 = X[5] = l;  ROUND_00_15(5,d,e,f,g,h,a,b,c);
+		HOST_c2l(data,l); T1 = X[6] = l;  ROUND_00_15(6,c,d,e,f,g,h,a,b);
+		HOST_c2l(data,l); T1 = X[7] = l;  ROUND_00_15(7,b,c,d,e,f,g,h,a);
+		HOST_c2l(data,l); T1 = X[8] = l;  ROUND_00_15(8,a,b,c,d,e,f,g,h);
+		HOST_c2l(data,l); T1 = X[9] = l;  ROUND_00_15(9,h,a,b,c,d,e,f,g);
+		HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f);
+		HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e);
+		HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d);
+		HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c);
+		HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b);
+		HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a);
+		}
+
+	for (i=16;i<64;i+=8)
+		{
+		ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X);
+		ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X);
+		ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X);
+		ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X);
+		ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X);
+		ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X);
+		ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X);
+		ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X);
+		}
+
+	ctx->h[0] += a;	ctx->h[1] += b;	ctx->h[2] += c;	ctx->h[3] += d;
+	ctx->h[4] += e;	ctx->h[5] += f;	ctx->h[6] += g;	ctx->h[7] += h;
+
+			}
+	}
+
+/*
+ * Idea is to trade couple of cycles for some space. On IA-32 we save
+ * about 4K in "big footprint" case. In "small footprint" case any gain
+ * is appreciated:-)
+ */
+void HASH_BLOCK_HOST_ORDER (SHA256_CTX *ctx, const void *in, size_t num)
+{   sha256_block (ctx,in,num,1);   }
+
+void HASH_BLOCK_DATA_ORDER (SHA256_CTX *ctx, const void *in, size_t num)
+{   sha256_block (ctx,in,num,0);   }
+
+
diff --git a/src/libutil/sha256.h b/src/libutil/sha256.h
new file mode 100644
index 000000000000..0686b84f0e08
--- /dev/null
+++ b/src/libutil/sha256.h
@@ -0,0 +1,35 @@
+#ifndef _SHA256_H
+#define _SHA256_H 1
+
+#include <inttypes.h>
+
+#define SHA_LBLOCK	16
+#define SHA_CBLOCK	(SHA_LBLOCK*4)	/* SHA treats input data as a
+					 * contiguous array of 32 bit
+					 * wide big-endian values. */
+
+#define SHA256_CBLOCK	(SHA_LBLOCK*4)	/* SHA-256 treats input data as a
+					 * contiguous array of 32 bit
+					 * wide big-endian values. */
+#define SHA224_DIGEST_LENGTH	28
+#define SHA256_DIGEST_LENGTH	32
+
+typedef struct SHA256state_st
+	{
+	uint32_t h[8];
+	uint32_t Nl,Nh;
+	uint32_t data[SHA_LBLOCK];
+	unsigned int num,md_len;
+	} SHA256_CTX;
+
+int SHA224_Init(SHA256_CTX *c);
+int SHA224_Update(SHA256_CTX *c, const void *data, size_t len);
+int SHA224_Final(unsigned char *md, SHA256_CTX *c);
+unsigned char *SHA224(const unsigned char *d, size_t n,unsigned char *md);
+int SHA256_Init(SHA256_CTX *c);
+int SHA256_Update(SHA256_CTX *c, const void *data, size_t len);
+int SHA256_Final(unsigned char *md, SHA256_CTX *c);
+unsigned char *SHA256(const unsigned char *d, size_t n,unsigned char *md);
+void SHA256_Transform(SHA256_CTX *c, const unsigned char *data);
+
+#endif
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
new file mode 100644
index 000000000000..906a959e3079
--- /dev/null
+++ b/src/libutil/types.hh
@@ -0,0 +1,86 @@
+#pragma once
+
+#include "config.h"
+
+#include <string>
+#include <list>
+#include <set>
+
+#include <boost/format.hpp>
+
+
+namespace nix {
+
+
+/* Inherit some names from other namespaces for convenience. */
+using std::string;
+using std::list;
+using std::set;
+using std::vector;
+using boost::format;
+
+
+struct FormatOrString
+{
+    string s;
+    FormatOrString(const string & s) : s(s) { };
+    FormatOrString(const format & f) : s(f.str()) { };
+    FormatOrString(const char * s) : s(s) { };
+};
+
+
+/* BaseError should generally not be caught, as it has Interrupted as
+   a subclass. Catch Error instead. */
+class BaseError : public std::exception
+{
+protected:
+    string prefix_; // used for location traces etc.
+    string err;
+public:
+    unsigned int status; // exit status
+    BaseError(const FormatOrString & fs, unsigned int status = 1);
+    ~BaseError() throw () { };
+    const char * what() const throw () { return err.c_str(); }
+    const string & msg() const { return err; }
+    const string & prefix() const { return prefix_; }
+    BaseError & addPrefix(const FormatOrString & fs);
+};
+
+#define MakeError(newClass, superClass) \
+    class newClass : public superClass                  \
+    {                                                   \
+    public:                                             \
+        newClass(const FormatOrString & fs, unsigned int status = 1) : superClass(fs, status) { }; \
+    };
+
+MakeError(Error, BaseError)
+
+class SysError : public Error
+{
+public:
+    int errNo;
+    SysError(const FormatOrString & fs);
+};
+
+
+typedef list<string> Strings;
+typedef set<string> StringSet;
+
+
+/* Paths are just strings. */
+typedef string Path;
+typedef list<Path> Paths;
+typedef set<Path> PathSet;
+
+
+typedef enum {
+    lvlError = 0,
+    lvlInfo,
+    lvlTalkative,
+    lvlChatty,
+    lvlDebug,
+    lvlVomit
+} Verbosity;
+
+
+}
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
new file mode 100644
index 000000000000..825748792a9a
--- /dev/null
+++ b/src/libutil/util.cc
@@ -0,0 +1,1144 @@
+#include "config.h"
+
+#include "util.hh"
+#include "affinity.hh"
+
+#include <iostream>
+#include <cerrno>
+#include <cstdio>
+#include <cstdlib>
+#include <sstream>
+#include <cstring>
+
+#include <sys/wait.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <limits.h>
+
+#ifdef __APPLE__
+#include <sys/syscall.h>
+#endif
+
+
+extern char * * environ;
+
+
+namespace nix {
+
+
+BaseError::BaseError(const FormatOrString & fs, unsigned int status)
+    : status(status)
+{
+    err = fs.s;
+}
+
+
+BaseError & BaseError::addPrefix(const FormatOrString & fs)
+{
+    prefix_ = fs.s + prefix_;
+    return *this;
+}
+
+
+SysError::SysError(const FormatOrString & fs)
+    : Error(format("%1%: %2%") % fs.s % strerror(errno))
+    , errNo(errno)
+{
+}
+
+
+string getEnv(const string & key, const string & def)
+{
+    char * value = getenv(key.c_str());
+    return value ? string(value) : def;
+}
+
+
+Path absPath(Path path, Path dir)
+{
+    if (path[0] != '/') {
+        if (dir == "") {
+#ifdef __GNU__
+            /* GNU (aka. GNU/Hurd) doesn't have any limitation on path
+               lengths and doesn't define `PATH_MAX'.  */
+            char *buf = getcwd(NULL, 0);
+            if (buf == NULL)
+#else
+            char buf[PATH_MAX];
+            if (!getcwd(buf, sizeof(buf)))
+#endif
+                throw SysError("cannot get cwd");
+            dir = buf;
+#ifdef __GNU__
+            free(buf);
+#endif
+        }
+        path = dir + "/" + path;
+    }
+    return canonPath(path);
+}
+
+
+Path canonPath(const Path & path, bool resolveSymlinks)
+{
+    string s;
+
+    if (path[0] != '/')
+        throw Error(format("not an absolute path: ‘%1%’") % path);
+
+    string::const_iterator i = path.begin(), end = path.end();
+    string temp;
+
+    /* Count the number of times we follow a symlink and stop at some
+       arbitrary (but high) limit to prevent infinite loops. */
+    unsigned int followCount = 0, maxFollow = 1024;
+
+    while (1) {
+
+        /* Skip slashes. */
+        while (i != end && *i == '/') i++;
+        if (i == end) break;
+
+        /* Ignore `.'. */
+        if (*i == '.' && (i + 1 == end || i[1] == '/'))
+            i++;
+
+        /* If `..', delete the last component. */
+        else if (*i == '.' && i + 1 < end && i[1] == '.' &&
+            (i + 2 == end || i[2] == '/'))
+        {
+            if (!s.empty()) s.erase(s.rfind('/'));
+            i += 2;
+        }
+
+        /* Normal component; copy it. */
+        else {
+            s += '/';
+            while (i != end && *i != '/') s += *i++;
+
+            /* If s points to a symlink, resolve it and restart (since
+               the symlink target might contain new symlinks). */
+            if (resolveSymlinks && isLink(s)) {
+                if (++followCount >= maxFollow)
+                    throw Error(format("infinite symlink recursion in path ‘%1%’") % path);
+                temp = absPath(readLink(s), dirOf(s))
+                    + string(i, end);
+                i = temp.begin(); /* restart */
+                end = temp.end();
+                s = "";
+                /* !!! potential for infinite loop */
+            }
+        }
+    }
+
+    return s.empty() ? "/" : s;
+}
+
+
+Path dirOf(const Path & path)
+{
+    Path::size_type pos = path.rfind('/');
+    if (pos == string::npos)
+        throw Error(format("invalid file name ‘%1%’") % path);
+    return pos == 0 ? "/" : Path(path, 0, pos);
+}
+
+
+string baseNameOf(const Path & path)
+{
+    Path::size_type pos = path.rfind('/');
+    if (pos == string::npos)
+        throw Error(format("invalid file name ‘%1%’") % path);
+    return string(path, pos + 1);
+}
+
+
+bool isInDir(const Path & path, const Path & dir)
+{
+    return path[0] == '/'
+        && string(path, 0, dir.size()) == dir
+        && path.size() >= dir.size() + 2
+        && path[dir.size()] == '/';
+}
+
+
+struct stat lstat(const Path & path)
+{
+    struct stat st;
+    if (lstat(path.c_str(), &st))
+        throw SysError(format("getting status of ‘%1%’") % path);
+    return st;
+}
+
+
+bool pathExists(const Path & path)
+{
+    int res;
+    struct stat st;
+    res = lstat(path.c_str(), &st);
+    if (!res) return true;
+    if (errno != ENOENT && errno != ENOTDIR)
+        throw SysError(format("getting status of %1%") % path);
+    return false;
+}
+
+
+Path readLink(const Path & path)
+{
+    checkInterrupt();
+    struct stat st = lstat(path);
+    if (!S_ISLNK(st.st_mode))
+        throw Error(format("‘%1%’ is not a symlink") % path);
+    char buf[st.st_size];
+    if (readlink(path.c_str(), buf, st.st_size) != st.st_size)
+        throw SysError(format("reading symbolic link ‘%1%’") % path);
+    return string(buf, st.st_size);
+}
+
+
+bool isLink(const Path & path)
+{
+    struct stat st = lstat(path);
+    return S_ISLNK(st.st_mode);
+}
+
+
+DirEntries readDirectory(const Path & path)
+{
+    DirEntries entries;
+    entries.reserve(64);
+
+    AutoCloseDir dir = opendir(path.c_str());
+    if (!dir) throw SysError(format("opening directory ‘%1%’") % path);
+
+    struct dirent * dirent;
+    while (errno = 0, dirent = readdir(dir)) { /* sic */
+        checkInterrupt();
+        string name = dirent->d_name;
+        if (name == "." || name == "..") continue;
+        entries.emplace_back(name, dirent->d_ino, dirent->d_type);
+    }
+    if (errno) throw SysError(format("reading directory ‘%1%’") % path);
+
+    return entries;
+}
+
+
+string readFile(int fd)
+{
+    struct stat st;
+    if (fstat(fd, &st) == -1)
+        throw SysError("statting file");
+
+    unsigned char * buf = new unsigned char[st.st_size];
+    AutoDeleteArray<unsigned char> d(buf);
+    readFull(fd, buf, st.st_size);
+
+    return string((char *) buf, st.st_size);
+}
+
+
+string readFile(const Path & path, bool drain)
+{
+    AutoCloseFD fd = open(path.c_str(), O_RDONLY);
+    if (fd == -1)
+        throw SysError(format("opening file ‘%1%’") % path);
+    return drain ? drainFD(fd) : readFile(fd);
+}
+
+
+void writeFile(const Path & path, const string & s)
+{
+    AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0666);
+    if (fd == -1)
+        throw SysError(format("opening file ‘%1%’") % path);
+    writeFull(fd, (unsigned char *) s.data(), s.size());
+}
+
+
+string readLine(int fd)
+{
+    string s;
+    while (1) {
+        checkInterrupt();
+        char ch;
+        ssize_t rd = read(fd, &ch, 1);
+        if (rd == -1) {
+            if (errno != EINTR)
+                throw SysError("reading a line");
+        } else if (rd == 0)
+            throw EndOfFile("unexpected EOF reading a line");
+        else {
+            if (ch == '\n') return s;
+            s += ch;
+        }
+    }
+}
+
+
+void writeLine(int fd, string s)
+{
+    s += '\n';
+    writeFull(fd, (const unsigned char *) s.data(), s.size());
+}
+
+
+static void _deletePath(const Path & path, unsigned long long & bytesFreed)
+{
+    checkInterrupt();
+
+    printMsg(lvlVomit, format("%1%") % path);
+
+    struct stat st = lstat(path);
+
+    if (!S_ISDIR(st.st_mode) && st.st_nlink == 1)
+        bytesFreed += st.st_blocks * 512;
+
+    if (S_ISDIR(st.st_mode)) {
+        /* Make the directory writable. */
+        if (!(st.st_mode & S_IWUSR)) {
+            if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
+                throw SysError(format("making ‘%1%’ writable") % path);
+        }
+
+        for (auto & i : readDirectory(path))
+            _deletePath(path + "/" + i.name, bytesFreed);
+    }
+
+    if (remove(path.c_str()) == -1)
+        throw SysError(format("cannot unlink ‘%1%’") % path);
+}
+
+
+void deletePath(const Path & path)
+{
+    unsigned long long dummy;
+    deletePath(path, dummy);
+}
+
+
+void deletePath(const Path & path, unsigned long long & bytesFreed)
+{
+    startNest(nest, lvlDebug,
+        format("recursively deleting path ‘%1%’") % path);
+    bytesFreed = 0;
+    _deletePath(path, bytesFreed);
+}
+
+
+static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
+    int & counter)
+{
+    tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR", "/tmp") : tmpRoot, true);
+    if (includePid)
+        return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
+    else
+        return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str();
+}
+
+
+Path createTempDir(const Path & tmpRoot, const Path & prefix,
+    bool includePid, bool useGlobalCounter, mode_t mode)
+{
+    static int globalCounter = 0;
+    int localCounter = 0;
+    int & counter(useGlobalCounter ? globalCounter : localCounter);
+
+    while (1) {
+        checkInterrupt();
+        Path tmpDir = tempName(tmpRoot, prefix, includePid, counter);
+        if (mkdir(tmpDir.c_str(), mode) == 0) {
+            /* Explicitly set the group of the directory.  This is to
+               work around around problems caused by BSD's group
+               ownership semantics (directories inherit the group of
+               the parent).  For instance, the group of /tmp on
+               FreeBSD is "wheel", so all directories created in /tmp
+               will be owned by "wheel"; but if the user is not in
+               "wheel", then "tar" will fail to unpack archives that
+               have the setgid bit set on directories. */
+            if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0)
+                throw SysError(format("setting group of directory ‘%1%’") % tmpDir);
+            return tmpDir;
+        }
+        if (errno != EEXIST)
+            throw SysError(format("creating directory ‘%1%’") % tmpDir);
+    }
+}
+
+
+Paths createDirs(const Path & path)
+{
+    Paths created;
+    if (path == "/") return created;
+
+    struct stat st;
+    if (lstat(path.c_str(), &st) == -1) {
+        created = createDirs(dirOf(path));
+        if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST)
+            throw SysError(format("creating directory ‘%1%’") % path);
+        st = lstat(path);
+        created.push_back(path);
+    }
+
+    if (!S_ISDIR(st.st_mode)) throw Error(format("‘%1%’ is not a directory") % path);
+
+    return created;
+}
+
+
+void createSymlink(const Path & target, const Path & link)
+{
+    if (symlink(target.c_str(), link.c_str()))
+        throw SysError(format("creating symlink from ‘%1%’ to ‘%2%’") % link % target);
+}
+
+
+LogType logType = ltPretty;
+Verbosity verbosity = lvlInfo;
+
+static int nestingLevel = 0;
+
+
+Nest::Nest()
+{
+    nest = false;
+}
+
+
+Nest::~Nest()
+{
+    close();
+}
+
+
+static string escVerbosity(Verbosity level)
+{
+    return int2String((int) level);
+}
+
+
+void Nest::open(Verbosity level, const FormatOrString & fs)
+{
+    if (level <= verbosity) {
+        if (logType == ltEscapes)
+            std::cerr << "\033[" << escVerbosity(level) << "p"
+                      << fs.s << "\n";
+        else
+            printMsg_(level, fs);
+        nest = true;
+        nestingLevel++;
+    }
+}
+
+
+void Nest::close()
+{
+    if (nest) {
+        nestingLevel--;
+        if (logType == ltEscapes)
+            std::cerr << "\033[q";
+        nest = false;
+    }
+}
+
+
+void printMsg_(Verbosity level, const FormatOrString & fs)
+{
+    checkInterrupt();
+    if (level > verbosity) return;
+    string prefix;
+    if (logType == ltPretty)
+        for (int i = 0; i < nestingLevel; i++)
+            prefix += "|   ";
+    else if (logType == ltEscapes && level != lvlInfo)
+        prefix = "\033[" + escVerbosity(level) + "s";
+    string s = (format("%1%%2%\n") % prefix % fs.s).str();
+    if (!isatty(STDERR_FILENO)) s = filterANSIEscapes(s);
+    writeToStderr(s);
+}
+
+
+void warnOnce(bool & haveWarned, const FormatOrString & fs)
+{
+    if (!haveWarned) {
+        printMsg(lvlError, format("warning: %1%") % fs.s);
+        haveWarned = true;
+    }
+}
+
+
+static void defaultWriteToStderr(const unsigned char * buf, size_t count)
+{
+    writeFull(STDERR_FILENO, buf, count);
+}
+
+
+void writeToStderr(const string & s)
+{
+    try {
+        auto p = _writeToStderr;
+        if (!p) p = defaultWriteToStderr;
+        p((const unsigned char *) s.data(), s.size());
+    } catch (SysError & e) {
+        /* Ignore failing writes to stderr if we're in an exception
+           handler, otherwise throw an exception.  We need to ignore
+           write errors in exception handlers to ensure that cleanup
+           code runs to completion if the other side of stderr has
+           been closed unexpectedly. */
+        if (!std::uncaught_exception()) throw;
+    }
+}
+
+
+void (*_writeToStderr) (const unsigned char * buf, size_t count) = defaultWriteToStderr;
+
+
+void readFull(int fd, unsigned char * buf, size_t count)
+{
+    while (count) {
+        checkInterrupt();
+        ssize_t res = read(fd, (char *) buf, count);
+        if (res == -1) {
+            if (errno == EINTR) continue;
+            throw SysError("reading from file");
+        }
+        if (res == 0) throw EndOfFile("unexpected end-of-file");
+        count -= res;
+        buf += res;
+    }
+}
+
+
+void writeFull(int fd, const unsigned char * buf, size_t count)
+{
+    while (count) {
+        checkInterrupt();
+        ssize_t res = write(fd, (char *) buf, count);
+        if (res == -1) {
+            if (errno == EINTR) continue;
+            throw SysError("writing to file");
+        }
+        count -= res;
+        buf += res;
+    }
+}
+
+
+string drainFD(int fd)
+{
+    string result;
+    unsigned char buffer[4096];
+    while (1) {
+        checkInterrupt();
+        ssize_t rd = read(fd, buffer, sizeof buffer);
+        if (rd == -1) {
+            if (errno != EINTR)
+                throw SysError("reading from file");
+        }
+        else if (rd == 0) break;
+        else result.append((char *) buffer, rd);
+    }
+    return result;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+AutoDelete::AutoDelete(const string & p, bool recursive) : path(p)
+{
+    del = true;
+    this->recursive = recursive;
+}
+
+AutoDelete::~AutoDelete()
+{
+    try {
+        if (del) {
+            if (recursive)
+                deletePath(path);
+            else {
+                if (remove(path.c_str()) == -1)
+                    throw SysError(format("cannot unlink ‘%1%’") % path);
+            }
+        }
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+void AutoDelete::cancel()
+{
+    del = false;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+AutoCloseFD::AutoCloseFD()
+{
+    fd = -1;
+}
+
+
+AutoCloseFD::AutoCloseFD(int fd)
+{
+    this->fd = fd;
+}
+
+
+AutoCloseFD::AutoCloseFD(const AutoCloseFD & fd)
+{
+    /* Copying an AutoCloseFD isn't allowed (who should get to close
+       it?).  But as an edge case, allow copying of closed
+       AutoCloseFDs.  This is necessary due to tiresome reasons
+       involving copy constructor use on default object values in STL
+       containers (like when you do `map[value]' where value isn't in
+       the map yet). */
+    this->fd = fd.fd;
+    if (this->fd != -1) abort();
+}
+
+
+AutoCloseFD::~AutoCloseFD()
+{
+    try {
+        close();
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
+void AutoCloseFD::operator =(int fd)
+{
+    if (this->fd != fd) close();
+    this->fd = fd;
+}
+
+
+AutoCloseFD::operator int() const
+{
+    return fd;
+}
+
+
+void AutoCloseFD::close()
+{
+    if (fd != -1) {
+        if (::close(fd) == -1)
+            /* This should never happen. */
+            throw SysError(format("closing file descriptor %1%") % fd);
+        fd = -1;
+    }
+}
+
+
+bool AutoCloseFD::isOpen()
+{
+    return fd != -1;
+}
+
+
+/* Pass responsibility for closing this fd to the caller. */
+int AutoCloseFD::borrow()
+{
+    int oldFD = fd;
+    fd = -1;
+    return oldFD;
+}
+
+
+void Pipe::create()
+{
+    int fds[2];
+    if (pipe(fds) != 0) throw SysError("creating pipe");
+    readSide = fds[0];
+    writeSide = fds[1];
+    closeOnExec(readSide);
+    closeOnExec(writeSide);
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+AutoCloseDir::AutoCloseDir()
+{
+    dir = 0;
+}
+
+
+AutoCloseDir::AutoCloseDir(DIR * dir)
+{
+    this->dir = dir;
+}
+
+
+AutoCloseDir::~AutoCloseDir()
+{
+    close();
+}
+
+
+void AutoCloseDir::operator =(DIR * dir)
+{
+    this->dir = dir;
+}
+
+
+AutoCloseDir::operator DIR *()
+{
+    return dir;
+}
+
+
+void AutoCloseDir::close()
+{
+    if (dir) {
+        closedir(dir);
+        dir = 0;
+    }
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+Pid::Pid()
+    : pid(-1), separatePG(false), killSignal(SIGKILL)
+{
+}
+
+
+Pid::Pid(pid_t pid)
+    : pid(pid), separatePG(false), killSignal(SIGKILL)
+{
+}
+
+
+Pid::~Pid()
+{
+    kill();
+}
+
+
+void Pid::operator =(pid_t pid)
+{
+    if (this->pid != pid) kill();
+    this->pid = pid;
+    killSignal = SIGKILL; // reset signal to default
+}
+
+
+Pid::operator pid_t()
+{
+    return pid;
+}
+
+
+void Pid::kill(bool quiet)
+{
+    if (pid == -1 || pid == 0) return;
+
+    if (!quiet)
+        printMsg(lvlError, format("killing process %1%") % pid);
+
+    /* Send the requested signal to the child.  If it has its own
+       process group, send the signal to every process in the child
+       process group (which hopefully includes *all* its children). */
+    if (::kill(separatePG ? -pid : pid, killSignal) != 0)
+        printMsg(lvlError, (SysError(format("killing process %1%") % pid).msg()));
+
+    /* Wait until the child dies, disregarding the exit status. */
+    int status;
+    while (waitpid(pid, &status, 0) == -1) {
+        checkInterrupt();
+        if (errno != EINTR) {
+            printMsg(lvlError,
+                (SysError(format("waiting for process %1%") % pid).msg()));
+            break;
+        }
+    }
+
+    pid = -1;
+}
+
+
+int Pid::wait(bool block)
+{
+    assert(pid != -1);
+    while (1) {
+        int status;
+        int res = waitpid(pid, &status, block ? 0 : WNOHANG);
+        if (res == pid) {
+            pid = -1;
+            return status;
+        }
+        if (res == 0 && !block) return -1;
+        if (errno != EINTR)
+            throw SysError("cannot get child exit status");
+        checkInterrupt();
+    }
+}
+
+
+void Pid::setSeparatePG(bool separatePG)
+{
+    this->separatePG = separatePG;
+}
+
+
+void Pid::setKillSignal(int signal)
+{
+    this->killSignal = signal;
+}
+
+
+void killUser(uid_t uid)
+{
+    debug(format("killing all processes running under uid ‘%1%’") % uid);
+
+    assert(uid != 0); /* just to be safe... */
+
+    /* The system call kill(-1, sig) sends the signal `sig' to all
+       users to which the current process can send signals.  So we
+       fork a process, switch to uid, and send a mass kill. */
+
+    Pid pid = startProcess([&]() {
+
+        if (setuid(uid) == -1)
+            throw SysError("setting uid");
+
+        while (true) {
+#ifdef __APPLE__
+            /* OSX's kill syscall takes a third parameter that, among
+               other things, determines if kill(-1, signo) affects the
+               calling process. In the OSX libc, it's set to true,
+               which means "follow POSIX", which we don't want here
+                 */
+            if (syscall(SYS_kill, -1, SIGKILL, false) == 0) break;
+#else
+            if (kill(-1, SIGKILL) == 0) break;
+#endif
+            if (errno == ESRCH) break; /* no more processes */
+            if (errno != EINTR)
+                throw SysError(format("cannot kill processes for uid ‘%1%’") % uid);
+        }
+
+        _exit(0);
+    });
+
+    int status = pid.wait(true);
+    if (status != 0)
+        throw Error(format("cannot kill processes for uid ‘%1%’: %2%") % uid % statusToString(status));
+
+    /* !!! We should really do some check to make sure that there are
+       no processes left running under `uid', but there is no portable
+       way to do so (I think).  The most reliable way may be `ps -eo
+       uid | grep -q $uid'. */
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+pid_t startProcess(std::function<void()> fun, const string & errorPrefix)
+{
+    pid_t pid = fork();
+    if (pid == -1) throw SysError("unable to fork");
+
+    if (pid == 0) {
+        _writeToStderr = 0;
+        try {
+            restoreAffinity();
+            fun();
+        } catch (std::exception & e) {
+            try {
+                std::cerr << errorPrefix << e.what() << "\n";
+            } catch (...) { }
+        } catch (...) { }
+        _exit(1);
+    }
+
+    return pid;
+}
+
+
+string runProgram(Path program, bool searchPath, const Strings & args)
+{
+    checkInterrupt();
+
+    std::vector<const char *> cargs; /* careful with c_str()! */
+    cargs.push_back(program.c_str());
+    for (Strings::const_iterator i = args.begin(); i != args.end(); ++i)
+        cargs.push_back(i->c_str());
+    cargs.push_back(0);
+
+    /* Create a pipe. */
+    Pipe pipe;
+    pipe.create();
+
+    /* Fork. */
+    Pid pid = startProcess([&]() {
+        if (dup2(pipe.writeSide, STDOUT_FILENO) == -1)
+            throw SysError("dupping stdout");
+
+        if (searchPath)
+            execvp(program.c_str(), (char * *) &cargs[0]);
+        else
+            execv(program.c_str(), (char * *) &cargs[0]);
+
+        throw SysError(format("executing ‘%1%’") % program);
+    });
+
+    pipe.writeSide.close();
+
+    string result = drainFD(pipe.readSide);
+
+    /* Wait for the child to finish. */
+    int status = pid.wait(true);
+    if (!statusOk(status))
+        throw ExecError(format("program ‘%1%’ %2%")
+            % program % statusToString(status));
+
+    return result;
+}
+
+
+void closeMostFDs(const set<int> & exceptions)
+{
+    int maxFD = 0;
+    maxFD = sysconf(_SC_OPEN_MAX);
+    for (int fd = 0; fd < maxFD; ++fd)
+        if (fd != STDIN_FILENO && fd != STDOUT_FILENO && fd != STDERR_FILENO
+            && exceptions.find(fd) == exceptions.end())
+            close(fd); /* ignore result */
+}
+
+
+void closeOnExec(int fd)
+{
+    int prev;
+    if ((prev = fcntl(fd, F_GETFD, 0)) == -1 ||
+        fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1)
+        throw SysError("setting close-on-exec flag");
+}
+
+
+void restoreSIGPIPE()
+{
+    struct sigaction act;
+    act.sa_handler = SIG_DFL;
+    act.sa_flags = 0;
+    sigemptyset(&act.sa_mask);
+    if (sigaction(SIGPIPE, &act, 0)) throw SysError("resetting SIGPIPE");
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+volatile sig_atomic_t _isInterrupted = 0;
+
+void _interrupted()
+{
+    /* Block user interrupts while an exception is being handled.
+       Throwing an exception while another exception is being handled
+       kills the program! */
+    if (!std::uncaught_exception()) {
+        _isInterrupted = 0;
+        throw Interrupted("interrupted by the user");
+    }
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+template<class C> C tokenizeString(const string & s, const string & separators)
+{
+    C result;
+    string::size_type pos = s.find_first_not_of(separators, 0);
+    while (pos != string::npos) {
+        string::size_type end = s.find_first_of(separators, pos + 1);
+        if (end == string::npos) end = s.size();
+        string token(s, pos, end - pos);
+        result.insert(result.end(), token);
+        pos = s.find_first_not_of(separators, end);
+    }
+    return result;
+}
+
+template Strings tokenizeString(const string & s, const string & separators);
+template StringSet tokenizeString(const string & s, const string & separators);
+template vector<string> tokenizeString(const string & s, const string & separators);
+
+
+string concatStringsSep(const string & sep, const Strings & ss)
+{
+    string s;
+    foreach (Strings::const_iterator, i, ss) {
+        if (s.size() != 0) s += sep;
+        s += *i;
+    }
+    return s;
+}
+
+
+string concatStringsSep(const string & sep, const StringSet & ss)
+{
+    string s;
+    foreach (StringSet::const_iterator, i, ss) {
+        if (s.size() != 0) s += sep;
+        s += *i;
+    }
+    return s;
+}
+
+
+string chomp(const string & s)
+{
+    size_t i = s.find_last_not_of(" \n\r\t");
+    return i == string::npos ? "" : string(s, 0, i + 1);
+}
+
+
+string statusToString(int status)
+{
+    if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+        if (WIFEXITED(status))
+            return (format("failed with exit code %1%") % WEXITSTATUS(status)).str();
+        else if (WIFSIGNALED(status)) {
+            int sig = WTERMSIG(status);
+#if HAVE_STRSIGNAL
+            const char * description = strsignal(sig);
+            return (format("failed due to signal %1% (%2%)") % sig % description).str();
+#else
+            return (format("failed due to signal %1%") % sig).str();
+#endif
+        }
+        else
+            return "died abnormally";
+    } else return "succeeded";
+}
+
+
+bool statusOk(int status)
+{
+    return WIFEXITED(status) && WEXITSTATUS(status) == 0;
+}
+
+
+bool hasSuffix(const string & s, const string & suffix)
+{
+    return s.size() >= suffix.size() && string(s, s.size() - suffix.size()) == suffix;
+}
+
+
+void expect(std::istream & str, const string & s)
+{
+    char s2[s.size()];
+    str.read(s2, s.size());
+    if (string(s2, s.size()) != s)
+        throw FormatError(format("expected string ‘%1%’") % s);
+}
+
+
+string parseString(std::istream & str)
+{
+    string res;
+    expect(str, "\"");
+    int c;
+    while ((c = str.get()) != '"')
+        if (c == '\\') {
+            c = str.get();
+            if (c == 'n') res += '\n';
+            else if (c == 'r') res += '\r';
+            else if (c == 't') res += '\t';
+            else res += c;
+        }
+        else res += c;
+    return res;
+}
+
+
+bool endOfList(std::istream & str)
+{
+    if (str.peek() == ',') {
+        str.get();
+        return false;
+    }
+    if (str.peek() == ']') {
+        str.get();
+        return true;
+    }
+    return false;
+}
+
+
+string decodeOctalEscaped(const string & s)
+{
+    string r;
+    for (string::const_iterator i = s.begin(); i != s.end(); ) {
+        if (*i != '\\') { r += *i++; continue; }
+        unsigned char c = 0;
+        ++i;
+        while (i != s.end() && *i >= '0' && *i < '8')
+            c = c * 8 + (*i++ - '0');
+        r += c;
+    }
+    return r;
+}
+
+
+void ignoreException()
+{
+    try {
+        throw;
+    } catch (std::exception & e) {
+        printMsg(lvlError, format("error (ignored): %1%") % e.what());
+    }
+}
+
+
+string filterANSIEscapes(const string & s, bool nixOnly)
+{
+    string t, r;
+    enum { stTop, stEscape, stCSI } state = stTop;
+    for (auto c : s) {
+        if (state == stTop) {
+            if (c == '\e') {
+                state = stEscape;
+                r = c;
+            } else
+                t += c;
+        } else if (state == stEscape) {
+            r += c;
+            if (c == '[')
+                state = stCSI;
+            else {
+                t += r;
+                state = stTop;
+            }
+        } else {
+            r += c;
+            if (c >= 0x40 && c != 0x7e) {
+                if (nixOnly && (c != 'p' && c != 'q' && c != 's' && c != 'a' && c != 'b'))
+                    t += r;
+                state = stTop;
+                r.clear();
+            }
+        }
+    }
+    t += r;
+    return t;
+}
+
+
+}
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
new file mode 100644
index 000000000000..f6f5d1b3fee7
--- /dev/null
+++ b/src/libutil/util.hh
@@ -0,0 +1,384 @@
+#pragma once
+
+#include "types.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <signal.h>
+#include <functional>
+
+#include <cstdio>
+
+
+namespace nix {
+
+
+#define foreach(it_type, it, collection)                                \
+    for (it_type it = (collection).begin(); it != (collection).end(); ++it)
+
+#define foreach_reverse(it_type, it, collection)                                \
+    for (it_type it = (collection).rbegin(); it != (collection).rend(); ++it)
+
+
+/* Return an environment variable. */
+string getEnv(const string & key, const string & def = "");
+
+/* Return an absolutized path, resolving paths relative to the
+   specified directory, or the current directory otherwise.  The path
+   is also canonicalised. */
+Path absPath(Path path, Path dir = "");
+
+/* Canonicalise a path by removing all `.' or `..' components and
+   double or trailing slashes.  Optionally resolves all symlink
+   components such that each component of the resulting path is *not*
+   a symbolic link. */
+Path canonPath(const Path & path, bool resolveSymlinks = false);
+
+/* Return the directory part of the given canonical path, i.e.,
+   everything before the final `/'.  If the path is the root or an
+   immediate child thereof (e.g., `/foo'), this means an empty string
+   is returned. */
+Path dirOf(const Path & path);
+
+/* Return the base name of the given canonical path, i.e., everything
+   following the final `/'. */
+string baseNameOf(const Path & path);
+
+/* Check whether a given path is a descendant of the given
+   directory. */
+bool isInDir(const Path & path, const Path & dir);
+
+/* Get status of `path'. */
+struct stat lstat(const Path & path);
+
+/* Return true iff the given path exists. */
+bool pathExists(const Path & path);
+
+/* Read the contents (target) of a symbolic link.  The result is not
+   in any way canonicalised. */
+Path readLink(const Path & path);
+
+bool isLink(const Path & path);
+
+/* Read the contents of a directory.  The entries `.' and `..' are
+   removed. */
+struct DirEntry
+{
+    string name;
+    ino_t ino;
+    unsigned char type; // one of DT_*
+    DirEntry(const string & name, ino_t ino, unsigned char type)
+        : name(name), ino(ino), type(type) { }
+};
+
+typedef vector<DirEntry> DirEntries;
+
+DirEntries readDirectory(const Path & path);
+
+/* Read the contents of a file into a string. */
+string readFile(int fd);
+string readFile(const Path & path, bool drain = false);
+
+/* Write a string to a file. */
+void writeFile(const Path & path, const string & s);
+
+/* Read a line from a file descriptor. */
+string readLine(int fd);
+
+/* Write a line to a file descriptor. */
+void writeLine(int fd, string s);
+
+/* Delete a path; i.e., in the case of a directory, it is deleted
+   recursively.  Don't use this at home, kids.  The second variant
+   returns the number of bytes and blocks freed. */
+void deletePath(const Path & path);
+
+void deletePath(const Path & path, unsigned long long & bytesFreed);
+
+/* Create a temporary directory. */
+Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix",
+    bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755);
+
+/* Create a directory and all its parents, if necessary.  Returns the
+   list of created directories, in order of creation. */
+Paths createDirs(const Path & path);
+
+/* Create a symlink. */
+void createSymlink(const Path & target, const Path & link);
+
+
+template<class T, class A>
+T singleton(const A & a)
+{
+    T t;
+    t.insert(a);
+    return t;
+}
+
+
+/* Messages. */
+
+
+typedef enum {
+    ltPretty,   /* nice, nested output */
+    ltEscapes,  /* nesting indicated using escape codes (for log2xml) */
+    ltFlat      /* no nesting */
+} LogType;
+
+extern LogType logType;
+extern Verbosity verbosity; /* suppress msgs > this */
+
+class Nest
+{
+private:
+    bool nest;
+public:
+    Nest();
+    ~Nest();
+    void open(Verbosity level, const FormatOrString & fs);
+    void close();
+};
+
+void printMsg_(Verbosity level, const FormatOrString & fs);
+
+#define startNest(varName, level, f) \
+    Nest varName; \
+    if (level <= verbosity) { \
+      varName.open(level, (f)); \
+    }
+
+#define printMsg(level, f) \
+    do { \
+        if (level <= verbosity) { \
+            printMsg_(level, (f)); \
+        } \
+    } while (0)
+
+#define debug(f) printMsg(lvlDebug, f)
+
+void warnOnce(bool & haveWarned, const FormatOrString & fs);
+
+void writeToStderr(const string & s);
+
+extern void (*_writeToStderr) (const unsigned char * buf, size_t count);
+
+
+/* Wrappers arount read()/write() that read/write exactly the
+   requested number of bytes. */
+void readFull(int fd, unsigned char * buf, size_t count);
+void writeFull(int fd, const unsigned char * buf, size_t count);
+
+MakeError(EndOfFile, Error)
+
+
+/* Read a file descriptor until EOF occurs. */
+string drainFD(int fd);
+
+
+
+/* Automatic cleanup of resources. */
+
+
+template <class T>
+struct AutoDeleteArray
+{
+    T * p;
+    AutoDeleteArray(T * p) : p(p) { }
+    ~AutoDeleteArray()
+    {
+        delete [] p;
+    }
+};
+
+
+class AutoDelete
+{
+    Path path;
+    bool del;
+    bool recursive;
+public:
+    AutoDelete(const Path & p, bool recursive = true);
+    ~AutoDelete();
+    void cancel();
+};
+
+
+class AutoCloseFD
+{
+    int fd;
+public:
+    AutoCloseFD();
+    AutoCloseFD(int fd);
+    AutoCloseFD(const AutoCloseFD & fd);
+    ~AutoCloseFD();
+    void operator =(int fd);
+    operator int() const;
+    void close();
+    bool isOpen();
+    int borrow();
+};
+
+
+class Pipe
+{
+public:
+    AutoCloseFD readSide, writeSide;
+    void create();
+};
+
+
+class AutoCloseDir
+{
+    DIR * dir;
+public:
+    AutoCloseDir();
+    AutoCloseDir(DIR * dir);
+    ~AutoCloseDir();
+    void operator =(DIR * dir);
+    operator DIR *();
+    void close();
+};
+
+
+class Pid
+{
+    pid_t pid;
+    bool separatePG;
+    int killSignal;
+public:
+    Pid();
+    Pid(pid_t pid);
+    ~Pid();
+    void operator =(pid_t pid);
+    operator pid_t();
+    void kill(bool quiet = false);
+    int wait(bool block);
+    void setSeparatePG(bool separatePG);
+    void setKillSignal(int signal);
+};
+
+
+/* Kill all processes running under the specified uid by sending them
+   a SIGKILL. */
+void killUser(uid_t uid);
+
+
+/* Fork a process that runs the given function, and return the child
+   pid to the caller. */
+pid_t startProcess(std::function<void()> fun, const string & errorPrefix = "error: ");
+
+
+/* Run a program and return its stdout in a string (i.e., like the
+   shell backtick operator). */
+string runProgram(Path program, bool searchPath = false,
+    const Strings & args = Strings());
+
+MakeError(ExecError, Error)
+
+/* Close all file descriptors except stdin, stdout, stderr, and those
+   listed in the given set.  Good practice in child processes. */
+void closeMostFDs(const set<int> & exceptions);
+
+/* Set the close-on-exec flag for the given file descriptor. */
+void closeOnExec(int fd);
+
+/* Restore default handling of SIGPIPE, otherwise some programs will
+   randomly say "Broken pipe". */
+void restoreSIGPIPE();
+
+
+/* User interruption. */
+
+extern volatile sig_atomic_t _isInterrupted;
+
+void _interrupted();
+
+void inline checkInterrupt()
+{
+    if (_isInterrupted) _interrupted();
+}
+
+MakeError(Interrupted, BaseError)
+
+
+/* String tokenizer. */
+template<class C> C tokenizeString(const string & s, const string & separators = " \t\n\r");
+
+
+/* Concatenate the given strings with a separator between the
+   elements. */
+string concatStringsSep(const string & sep, const Strings & ss);
+string concatStringsSep(const string & sep, const StringSet & ss);
+
+
+/* Remove trailing whitespace from a string. */
+string chomp(const string & s);
+
+
+/* Convert the exit status of a child as returned by wait() into an
+   error string. */
+string statusToString(int status);
+
+bool statusOk(int status);
+
+
+/* Parse a string into an integer. */
+template<class N> bool string2Int(const string & s, N & n)
+{
+    std::istringstream str(s);
+    str >> n;
+    return str && str.get() == EOF;
+}
+
+template<class N> string int2String(N n)
+{
+    std::ostringstream str;
+    str << n;
+    return str.str();
+}
+
+
+/* Return true iff `s' ends in `suffix'. */
+bool hasSuffix(const string & s, const string & suffix);
+
+
+/* Read string `s' from stream `str'. */
+void expect(std::istream & str, const string & s);
+
+MakeError(FormatError, Error)
+
+
+/* Read a C-style string from stream `str'. */
+string parseString(std::istream & str);
+
+
+/* Utility function used to parse legacy ATerms. */
+bool endOfList(std::istream & str);
+
+
+/* Escape a string that contains octal-encoded escape codes such as
+   used in /etc/fstab and /proc/mounts (e.g. "foo\040bar" decodes to
+   "foo bar"). */
+string decodeOctalEscaped(const string & s);
+
+
+/* Exception handling in destructors: print an error message, then
+   ignore the exception. */
+void ignoreException();
+
+
+/* Some ANSI escape sequences. */
+#define ANSI_NORMAL "\e[0m"
+#define ANSI_BOLD "\e[1m"
+#define ANSI_RED "\e[31;1m"
+
+
+/* Filter out ANSI escape codes from the given string. If ‘nixOnly’ is
+   set, only filter escape codes generated by Nixpkgs' stdenv (used to
+   denote nesting etc.). */
+string filterANSIEscapes(const string & s, bool nixOnly = false);
+
+
+}
diff --git a/src/libutil/xml-writer.cc b/src/libutil/xml-writer.cc
new file mode 100644
index 000000000000..01794001b2c6
--- /dev/null
+++ b/src/libutil/xml-writer.cc
@@ -0,0 +1,94 @@
+#include <assert.h>
+
+#include "xml-writer.hh"
+
+
+namespace nix {
+    
+
+XMLWriter::XMLWriter(bool indent, std::ostream & output)
+    : output(output), indent(indent)
+{
+    output << "<?xml version='1.0' encoding='utf-8'?>" << std::endl;
+    closed = false;
+}
+
+
+XMLWriter::~XMLWriter()
+{
+    close();
+}
+
+
+void XMLWriter::close()
+{
+    if (closed) return;
+    while (!pendingElems.empty()) closeElement();
+    closed = true;
+}
+
+
+void XMLWriter::indent_(unsigned int depth)
+{
+    if (!indent) return;
+    output << string(depth * 2, ' ');
+}
+
+
+void XMLWriter::openElement(const string & name,
+    const XMLAttrs & attrs)
+{
+    assert(!closed);
+    indent_(pendingElems.size());
+    output << "<" << name;
+    writeAttrs(attrs);
+    output << ">";
+    if (indent) output << std::endl;
+    pendingElems.push_back(name);
+}
+
+
+void XMLWriter::closeElement()
+{
+    assert(!pendingElems.empty());
+    indent_(pendingElems.size() - 1);
+    output << "</" << pendingElems.back() << ">";
+    if (indent) output << std::endl;
+    pendingElems.pop_back();
+    if (pendingElems.empty()) closed = true;
+}
+
+
+void XMLWriter::writeEmptyElement(const string & name,
+    const XMLAttrs & attrs)
+{
+    assert(!closed);
+    indent_(pendingElems.size());
+    output << "<" << name;
+    writeAttrs(attrs);
+    output << " />";
+    if (indent) output << std::endl;
+}
+
+
+void XMLWriter::writeAttrs(const XMLAttrs & attrs)
+{
+    for (XMLAttrs::const_iterator i = attrs.begin(); i != attrs.end(); ++i) {
+        output << " " << i->first << "=\"";
+        for (unsigned int j = 0; j < i->second.size(); ++j) {
+            char c = i->second[j];
+            if (c == '"') output << "&quot;";
+            else if (c == '<') output << "&lt;";
+            else if (c == '>') output << "&gt;";
+            else if (c == '&') output << "&amp;";
+            /* Escape newlines to prevent attribute normalisation (see
+               XML spec, section 3.3.3. */
+            else if (c == '\n') output << "&#xA;";
+            else output << c;
+        }
+        output << "\"";
+    }
+}
+
+
+}
diff --git a/src/libutil/xml-writer.hh b/src/libutil/xml-writer.hh
new file mode 100644
index 000000000000..3cefe3712c08
--- /dev/null
+++ b/src/libutil/xml-writer.hh
@@ -0,0 +1,69 @@
+#pragma once
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <map>
+
+
+namespace nix {
+
+using std::string;
+using std::map;
+using std::list;
+
+
+typedef map<string, string> XMLAttrs;
+
+
+class XMLWriter
+{
+private:
+
+    std::ostream & output;
+
+    bool indent;
+    bool closed;
+
+    list<string> pendingElems;
+
+public:
+
+    XMLWriter(bool indent, std::ostream & output);
+    ~XMLWriter();
+
+    void close();
+
+    void openElement(const string & name,
+        const XMLAttrs & attrs = XMLAttrs());
+    void closeElement();
+
+    void writeEmptyElement(const string & name,
+        const XMLAttrs & attrs = XMLAttrs());
+
+private:
+    void writeAttrs(const XMLAttrs & attrs);
+
+    void indent_(unsigned int depth);
+};
+
+
+class XMLOpenElement
+{
+private:
+    XMLWriter & writer;
+public:
+    XMLOpenElement(XMLWriter & writer, const string & name,
+        const XMLAttrs & attrs = XMLAttrs())
+        : writer(writer)
+    {
+        writer.openElement(name, attrs);
+    }
+    ~XMLOpenElement()
+    {
+        writer.closeElement();
+    }
+};
+
+
+}
diff --git a/src/nix-daemon/local.mk b/src/nix-daemon/local.mk
new file mode 100644
index 000000000000..e5538bada0b2
--- /dev/null
+++ b/src/nix-daemon/local.mk
@@ -0,0 +1,15 @@
+programs += nix-daemon
+
+nix-daemon_DIR := $(d)
+
+nix-daemon_SOURCES := $(d)/nix-daemon.cc
+
+nix-daemon_LIBS = libmain libstore libutil libformat
+
+nix-daemon_LDFLAGS = -pthread
+
+ifeq ($(OS), SunOS)
+        nix-daemon_LDFLAGS += -lsocket
+endif
+
+$(eval $(call install-symlink, nix-daemon, $(bindir)/nix-worker))
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
new file mode 100644
index 000000000000..6d166c427480
--- /dev/null
+++ b/src/nix-daemon/nix-daemon.cc
@@ -0,0 +1,820 @@
+#include "shared.hh"
+#include "local-store.hh"
+#include "util.hh"
+#include "serialise.hh"
+#include "worker-protocol.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "globals.hh"
+#include "monitor-fd.hh"
+
+#include <algorithm>
+
+#include <cstring>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+#include <pwd.h>
+#include <grp.h>
+
+using namespace nix;
+
+
+static FdSource from(STDIN_FILENO);
+static FdSink to(STDOUT_FILENO);
+
+bool canSendStderr;
+
+
+/* This function is called anytime we want to write something to
+   stderr.  If we're in a state where the protocol allows it (i.e.,
+   when canSendStderr), send the message to the client over the
+   socket. */
+static void tunnelStderr(const unsigned char * buf, size_t count)
+{
+    if (canSendStderr) {
+        try {
+            writeInt(STDERR_NEXT, to);
+            writeString(buf, count, to);
+            to.flush();
+        } catch (...) {
+            /* Write failed; that means that the other side is
+               gone. */
+            canSendStderr = false;
+            throw;
+        }
+    } else
+        writeFull(STDERR_FILENO, buf, count);
+}
+
+
+/* startWork() means that we're starting an operation for which we
+   want to send out stderr to the client. */
+static void startWork()
+{
+    canSendStderr = true;
+}
+
+
+/* stopWork() means that we're done; stop sending stderr to the
+   client. */
+static void stopWork(bool success = true, const string & msg = "", unsigned int status = 0)
+{
+    canSendStderr = false;
+
+    if (success)
+        writeInt(STDERR_LAST, to);
+    else {
+        writeInt(STDERR_ERROR, to);
+        writeString(msg, to);
+        if (status != 0) writeInt(status, to);
+    }
+}
+
+
+struct TunnelSink : Sink
+{
+    Sink & to;
+    TunnelSink(Sink & to) : to(to) { }
+    virtual void operator () (const unsigned char * data, size_t len)
+    {
+        writeInt(STDERR_WRITE, to);
+        writeString(data, len, to);
+    }
+};
+
+
+struct TunnelSource : BufferedSource
+{
+    Source & from;
+    TunnelSource(Source & from) : from(from) { }
+    size_t readUnbuffered(unsigned char * data, size_t len)
+    {
+        writeInt(STDERR_READ, to);
+        writeInt(len, to);
+        to.flush();
+        size_t n = readString(data, len, from);
+        if (n == 0) throw EndOfFile("unexpected end-of-file");
+        return n;
+    }
+};
+
+
+/* If the NAR archive contains a single file at top-level, then save
+   the contents of the file to `s'.  Otherwise barf. */
+struct RetrieveRegularNARSink : ParseSink
+{
+    bool regular;
+    string s;
+
+    RetrieveRegularNARSink() : regular(true) { }
+
+    void createDirectory(const Path & path)
+    {
+        regular = false;
+    }
+
+    void receiveContents(unsigned char * data, unsigned int len)
+    {
+        s.append((const char *) data, len);
+    }
+
+    void createSymlink(const Path & path, const string & target)
+    {
+        regular = false;
+    }
+};
+
+
+/* Adapter class of a Source that saves all data read to `s'. */
+struct SavingSourceAdapter : Source
+{
+    Source & orig;
+    string s;
+    SavingSourceAdapter(Source & orig) : orig(orig) { }
+    size_t read(unsigned char * data, size_t len)
+    {
+        size_t n = orig.read(data, len);
+        s.append((const char *) data, n);
+        return n;
+    }
+};
+
+
+static void performOp(bool trusted, unsigned int clientVersion,
+    Source & from, Sink & to, unsigned int op)
+{
+    switch (op) {
+
+    case wopIsValidPath: {
+        /* 'readStorePath' could raise an error leading to the connection
+           being closed.  To be able to recover from an invalid path error,
+           call 'startWork' early, and do 'assertStorePath' afterwards so
+           that the 'Error' exception handler doesn't close the
+           connection.  */
+        Path path = readString(from);
+        startWork();
+        assertStorePath(path);
+        bool result = store->isValidPath(path);
+        stopWork();
+        writeInt(result, to);
+        break;
+    }
+
+    case wopQueryValidPaths: {
+        PathSet paths = readStorePaths<PathSet>(from);
+        startWork();
+        PathSet res = store->queryValidPaths(paths);
+        stopWork();
+        writeStrings(res, to);
+        break;
+    }
+
+    case wopHasSubstitutes: {
+        Path path = readStorePath(from);
+        startWork();
+        PathSet res = store->querySubstitutablePaths(singleton<PathSet>(path));
+        stopWork();
+        writeInt(res.find(path) != res.end(), to);
+        break;
+    }
+
+    case wopQuerySubstitutablePaths: {
+        PathSet paths = readStorePaths<PathSet>(from);
+        startWork();
+        PathSet res = store->querySubstitutablePaths(paths);
+        stopWork();
+        writeStrings(res, to);
+        break;
+    }
+
+    case wopQueryPathHash: {
+        Path path = readStorePath(from);
+        startWork();
+        Hash hash = store->queryPathHash(path);
+        stopWork();
+        writeString(printHash(hash), to);
+        break;
+    }
+
+    case wopQueryReferences:
+    case wopQueryReferrers:
+    case wopQueryValidDerivers:
+    case wopQueryDerivationOutputs: {
+        Path path = readStorePath(from);
+        startWork();
+        PathSet paths;
+        if (op == wopQueryReferences)
+            store->queryReferences(path, paths);
+        else if (op == wopQueryReferrers)
+            store->queryReferrers(path, paths);
+        else if (op == wopQueryValidDerivers)
+            paths = store->queryValidDerivers(path);
+        else paths = store->queryDerivationOutputs(path);
+        stopWork();
+        writeStrings(paths, to);
+        break;
+    }
+
+    case wopQueryDerivationOutputNames: {
+        Path path = readStorePath(from);
+        startWork();
+        StringSet names;
+        names = store->queryDerivationOutputNames(path);
+        stopWork();
+        writeStrings(names, to);
+        break;
+    }
+
+    case wopQueryDeriver: {
+        Path path = readStorePath(from);
+        startWork();
+        Path deriver = store->queryDeriver(path);
+        stopWork();
+        writeString(deriver, to);
+        break;
+    }
+
+    case wopQueryPathFromHashPart: {
+        string hashPart = readString(from);
+        startWork();
+        Path path = store->queryPathFromHashPart(hashPart);
+        stopWork();
+        writeString(path, to);
+        break;
+    }
+
+    case wopAddToStore: {
+        string baseName = readString(from);
+        bool fixed = readInt(from) == 1; /* obsolete */
+        bool recursive = readInt(from) == 1;
+        string s = readString(from);
+        /* Compatibility hack. */
+        if (!fixed) {
+            s = "sha256";
+            recursive = true;
+        }
+        HashType hashAlgo = parseHashType(s);
+
+        SavingSourceAdapter savedNAR(from);
+        RetrieveRegularNARSink savedRegular;
+
+        if (recursive) {
+            /* Get the entire NAR dump from the client and save it to
+               a string so that we can pass it to
+               addToStoreFromDump(). */
+            ParseSink sink; /* null sink; just parse the NAR */
+            parseDump(sink, savedNAR);
+        } else
+            parseDump(savedRegular, from);
+
+        startWork();
+        if (!savedRegular.regular) throw Error("regular file expected");
+        Path path = dynamic_cast<LocalStore *>(store.get())
+            ->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
+        stopWork();
+
+        writeString(path, to);
+        break;
+    }
+
+    case wopAddTextToStore: {
+        string suffix = readString(from);
+        string s = readString(from);
+        PathSet refs = readStorePaths<PathSet>(from);
+        startWork();
+        Path path = store->addTextToStore(suffix, s, refs);
+        stopWork();
+        writeString(path, to);
+        break;
+    }
+
+    case wopExportPath: {
+        Path path = readStorePath(from);
+        bool sign = readInt(from) == 1;
+        startWork();
+        TunnelSink sink(to);
+        store->exportPath(path, sign, sink);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopImportPaths: {
+        startWork();
+        TunnelSource source(from);
+        Paths paths = store->importPaths(!trusted, source);
+        stopWork();
+        writeStrings(paths, to);
+        break;
+    }
+
+    case wopBuildPaths: {
+        PathSet drvs = readStorePaths<PathSet>(from);
+        startWork();
+        store->buildPaths(drvs);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopEnsurePath: {
+        Path path = readStorePath(from);
+        startWork();
+        store->ensurePath(path);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopAddTempRoot: {
+        Path path = readStorePath(from);
+        startWork();
+        store->addTempRoot(path);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopAddIndirectRoot: {
+        Path path = absPath(readString(from));
+        startWork();
+        store->addIndirectRoot(path);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopSyncWithGC: {
+        startWork();
+        store->syncWithGC();
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopFindRoots: {
+        startWork();
+        Roots roots = store->findRoots();
+        stopWork();
+        writeInt(roots.size(), to);
+        for (Roots::iterator i = roots.begin(); i != roots.end(); ++i) {
+            writeString(i->first, to);
+            writeString(i->second, to);
+        }
+        break;
+    }
+
+    case wopCollectGarbage: {
+        GCOptions options;
+        options.action = (GCOptions::GCAction) readInt(from);
+        options.pathsToDelete = readStorePaths<PathSet>(from);
+        options.ignoreLiveness = readInt(from);
+        options.maxFreed = readLongLong(from);
+        readInt(from); // obsolete field
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 5) {
+            /* removed options */
+            readInt(from);
+            readInt(from);
+        }
+
+        GCResults results;
+
+        startWork();
+        if (options.ignoreLiveness)
+            throw Error("you are not allowed to ignore liveness");
+        store->collectGarbage(options, results);
+        stopWork();
+
+        writeStrings(results.paths, to);
+        writeLongLong(results.bytesFreed, to);
+        writeLongLong(0, to); // obsolete
+
+        break;
+    }
+
+    case wopSetOptions: {
+        settings.keepFailed = readInt(from) != 0;
+        settings.keepGoing = readInt(from) != 0;
+        settings.set("build-fallback", readInt(from) ? "true" : "false");
+        verbosity = (Verbosity) readInt(from);
+        settings.set("build-max-jobs", int2String(readInt(from)));
+        settings.set("build-max-silent-time", int2String(readInt(from)));
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
+            settings.useBuildHook = readInt(from) != 0;
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 4) {
+            settings.buildVerbosity = (Verbosity) readInt(from);
+            logType = (LogType) readInt(from);
+            settings.printBuildTrace = readInt(from) != 0;
+        }
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 6)
+            settings.set("build-cores", int2String(readInt(from)));
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 10)
+            settings.set("build-use-substitutes", readInt(from) ? "true" : "false");
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
+            unsigned int n = readInt(from);
+            for (unsigned int i = 0; i < n; i++) {
+                string name = readString(from);
+                string value = readString(from);
+                if (name == "build-timeout" || name == "use-ssh-substituter")
+                    settings.set(name, value);
+                else
+                    settings.set(trusted ? name : "untrusted-" + name, value);
+            }
+        }
+        settings.update();
+        startWork();
+        stopWork();
+        break;
+    }
+
+    case wopQuerySubstitutablePathInfo: {
+        Path path = absPath(readString(from));
+        startWork();
+        SubstitutablePathInfos infos;
+        store->querySubstitutablePathInfos(singleton<PathSet>(path), infos);
+        stopWork();
+        SubstitutablePathInfos::iterator i = infos.find(path);
+        if (i == infos.end())
+            writeInt(0, to);
+        else {
+            writeInt(1, to);
+            writeString(i->second.deriver, to);
+            writeStrings(i->second.references, to);
+            writeLongLong(i->second.downloadSize, to);
+            if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
+                writeLongLong(i->second.narSize, to);
+        }
+        break;
+    }
+
+    case wopQuerySubstitutablePathInfos: {
+        PathSet paths = readStorePaths<PathSet>(from);
+        startWork();
+        SubstitutablePathInfos infos;
+        store->querySubstitutablePathInfos(paths, infos);
+        stopWork();
+        writeInt(infos.size(), to);
+        foreach (SubstitutablePathInfos::iterator, i, infos) {
+            writeString(i->first, to);
+            writeString(i->second.deriver, to);
+            writeStrings(i->second.references, to);
+            writeLongLong(i->second.downloadSize, to);
+            writeLongLong(i->second.narSize, to);
+        }
+        break;
+    }
+
+    case wopQueryAllValidPaths: {
+        startWork();
+        PathSet paths = store->queryAllValidPaths();
+        stopWork();
+        writeStrings(paths, to);
+        break;
+    }
+
+    case wopQueryFailedPaths: {
+        startWork();
+        PathSet paths = store->queryFailedPaths();
+        stopWork();
+        writeStrings(paths, to);
+        break;
+    }
+
+    case wopClearFailedPaths: {
+        PathSet paths = readStrings<PathSet>(from);
+        startWork();
+        store->clearFailedPaths(paths);
+        stopWork();
+        writeInt(1, to);
+        break;
+    }
+
+    case wopQueryPathInfo: {
+        Path path = readStorePath(from);
+        startWork();
+        ValidPathInfo info = store->queryPathInfo(path);
+        stopWork();
+        writeString(info.deriver, to);
+        writeString(printHash(info.hash), to);
+        writeStrings(info.references, to);
+        writeInt(info.registrationTime, to);
+        writeLongLong(info.narSize, to);
+        break;
+    }
+
+    default:
+        throw Error(format("invalid operation %1%") % op);
+    }
+}
+
+
+static void processConnection(bool trusted)
+{
+    MonitorFdHup monitor(from.fd);
+
+    canSendStderr = false;
+    _writeToStderr = tunnelStderr;
+
+    /* Exchange the greeting. */
+    unsigned int magic = readInt(from);
+    if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
+    writeInt(WORKER_MAGIC_2, to);
+    writeInt(PROTOCOL_VERSION, to);
+    to.flush();
+    unsigned int clientVersion = readInt(from);
+
+    if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
+        setAffinityTo(readInt(from));
+
+    bool reserveSpace = true;
+    if (GET_PROTOCOL_MINOR(clientVersion) >= 11)
+        reserveSpace = readInt(from) != 0;
+
+    /* Send startup error messages to the client. */
+    startWork();
+
+    try {
+
+        /* If we can't accept clientVersion, then throw an error
+           *here* (not above). */
+
+#if 0
+        /* Prevent users from doing something very dangerous. */
+        if (geteuid() == 0 &&
+            querySetting("build-users-group", "") == "")
+            throw Error("if you run ‘nix-daemon’ as root, then you MUST set ‘build-users-group’!");
+#endif
+
+        /* Open the store. */
+        store = std::shared_ptr<StoreAPI>(new LocalStore(reserveSpace));
+
+        stopWork();
+        to.flush();
+
+    } catch (Error & e) {
+        stopWork(false, e.msg());
+        to.flush();
+        return;
+    }
+
+    /* Process client requests. */
+    unsigned int opCount = 0;
+
+    while (true) {
+        WorkerOp op;
+        try {
+            op = (WorkerOp) readInt(from);
+        } catch (Interrupted & e) {
+            break;
+        } catch (EndOfFile & e) {
+            break;
+        }
+
+        opCount++;
+
+        try {
+            performOp(trusted, clientVersion, from, to, op);
+        } catch (Error & e) {
+            /* If we're not in a state where we can send replies, then
+               something went wrong processing the input of the
+               client.  This can happen especially if I/O errors occur
+               during addTextToStore() / importPath().  If that
+               happens, just send the error message and exit. */
+            bool errorAllowed = canSendStderr;
+            stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0);
+            if (!errorAllowed) throw;
+        } catch (std::bad_alloc & e) {
+            stopWork(false, "Nix daemon out of memory", GET_PROTOCOL_MINOR(clientVersion) >= 8 ? 1 : 0);
+            throw;
+        }
+
+        to.flush();
+
+        assert(!canSendStderr);
+    };
+
+    printMsg(lvlDebug, format("%1% operations") % opCount);
+}
+
+
+static void sigChldHandler(int sigNo)
+{
+    /* Reap all dead children. */
+    while (waitpid(-1, 0, WNOHANG) > 0) ;
+}
+
+
+static void setSigChldAction(bool autoReap)
+{
+    struct sigaction act, oact;
+    act.sa_handler = autoReap ? sigChldHandler : SIG_DFL;
+    sigfillset(&act.sa_mask);
+    act.sa_flags = 0;
+    if (sigaction(SIGCHLD, &act, &oact))
+        throw SysError("setting SIGCHLD handler");
+}
+
+
+bool matchUser(const string & user, const string & group, const Strings & users)
+{
+    if (find(users.begin(), users.end(), "*") != users.end())
+        return true;
+
+    if (find(users.begin(), users.end(), user) != users.end())
+        return true;
+
+    for (auto & i : users)
+        if (string(i, 0, 1) == "@") {
+            if (group == string(i, 1)) return true;
+            struct group * gr = getgrnam(i.c_str() + 1);
+            if (!gr) continue;
+            for (char * * mem = gr->gr_mem; *mem; mem++)
+                if (user == string(*mem)) return true;
+        }
+
+    return false;
+}
+
+
+#define SD_LISTEN_FDS_START 3
+
+
+static void daemonLoop(char * * argv)
+{
+    chdir("/");
+
+    /* Get rid of children automatically; don't let them become
+       zombies. */
+    setSigChldAction(true);
+
+    AutoCloseFD fdSocket;
+
+    /* Handle socket-based activation by systemd. */
+    if (getEnv("LISTEN_FDS") != "") {
+        if (getEnv("LISTEN_PID") != int2String(getpid()) || getEnv("LISTEN_FDS") != "1")
+            throw Error("unexpected systemd environment variables");
+        fdSocket = SD_LISTEN_FDS_START;
+    }
+
+    /* Otherwise, create and bind to a Unix domain socket. */
+    else {
+
+        /* Create and bind to a Unix domain socket. */
+        fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
+        if (fdSocket == -1)
+            throw SysError("cannot create Unix domain socket");
+
+        string socketPath = settings.nixDaemonSocketFile;
+
+        createDirs(dirOf(socketPath));
+
+        /* Urgh, sockaddr_un allows path names of only 108 characters.
+           So chdir to the socket directory so that we can pass a
+           relative path name. */
+        chdir(dirOf(socketPath).c_str());
+        Path socketPathRel = "./" + baseNameOf(socketPath);
+
+        struct sockaddr_un addr;
+        addr.sun_family = AF_UNIX;
+        if (socketPathRel.size() >= sizeof(addr.sun_path))
+            throw Error(format("socket path ‘%1%’ is too long") % socketPathRel);
+        strcpy(addr.sun_path, socketPathRel.c_str());
+
+        unlink(socketPath.c_str());
+
+        /* Make sure that the socket is created with 0666 permission
+           (everybody can connect --- provided they have access to the
+           directory containing the socket). */
+        mode_t oldMode = umask(0111);
+        int res = bind(fdSocket, (struct sockaddr *) &addr, sizeof(addr));
+        umask(oldMode);
+        if (res == -1)
+            throw SysError(format("cannot bind to socket ‘%1%’") % socketPath);
+
+        chdir("/"); /* back to the root */
+
+        if (listen(fdSocket, 5) == -1)
+            throw SysError(format("cannot listen on socket ‘%1%’") % socketPath);
+    }
+
+    closeOnExec(fdSocket);
+
+    /* Loop accepting connections. */
+    while (1) {
+
+        try {
+            /* Important: the server process *cannot* open the SQLite
+               database, because it doesn't like forks very much. */
+            assert(!store);
+
+            /* Accept a connection. */
+            struct sockaddr_un remoteAddr;
+            socklen_t remoteAddrLen = sizeof(remoteAddr);
+
+            AutoCloseFD remote = accept(fdSocket,
+                (struct sockaddr *) &remoteAddr, &remoteAddrLen);
+            checkInterrupt();
+            if (remote == -1) {
+                if (errno == EINTR) continue;
+                throw SysError("accepting connection");
+            }
+
+            closeOnExec(remote);
+
+            bool trusted = false;
+            pid_t clientPid = -1;
+
+#if defined(SO_PEERCRED)
+            /* Get the identity of the caller, if possible. */
+            ucred cred;
+            socklen_t credLen = sizeof(cred);
+            if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) == -1)
+                throw SysError("getting peer credentials");
+
+            clientPid = cred.pid;
+
+            struct passwd * pw = getpwuid(cred.uid);
+            string user = pw ? pw->pw_name : int2String(cred.uid);
+
+            struct group * gr = getgrgid(cred.gid);
+            string group = gr ? gr->gr_name : int2String(cred.gid);
+
+            Strings trustedUsers = settings.get("trusted-users", Strings({"root"}));
+            Strings allowedUsers = settings.get("allowed-users", Strings({"*"}));
+
+            if (matchUser(user, group, trustedUsers))
+                trusted = true;
+
+            if (!trusted && !matchUser(user, group, allowedUsers))
+                throw Error(format("user ‘%1%’ is not allowed to connect to the Nix daemon") % user);
+
+            printMsg(lvlInfo, format((string) "accepted connection from pid %1%, user %2%"
+                    + (trusted ? " (trusted)" : "")) % clientPid % user);
+#endif
+
+            /* Fork a child to handle the connection. */
+            startProcess([&]() {
+                /* Background the daemon. */
+                if (setsid() == -1)
+                    throw SysError(format("creating a new session"));
+
+                /* Restore normal handling of SIGCHLD. */
+                setSigChldAction(false);
+
+                /* For debugging, stuff the pid into argv[1]. */
+                if (clientPid != -1 && argv[1]) {
+                    string processName = int2String(clientPid);
+                    strncpy(argv[1], processName.c_str(), strlen(argv[1]));
+                }
+
+                /* Handle the connection. */
+                from.fd = remote;
+                to.fd = remote;
+                processConnection(trusted);
+
+                _exit(0);
+            }, "unexpected Nix daemon error: ");
+
+        } catch (Interrupted & e) {
+            throw;
+        } catch (Error & e) {
+            printMsg(lvlError, format("error processing connection: %1%") % e.msg());
+        }
+    }
+}
+
+
+void run(Strings args)
+{
+    for (Strings::iterator i = args.begin(); i != args.end(); ) {
+        string arg = *i++;
+    }
+
+}
+
+
+int main(int argc, char * * argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            if (*arg == "--daemon")
+                ; /* ignored for backwards compatibility */
+            else if (*arg == "--help")
+                showManPage("nix-daemon");
+            else if (*arg == "--version")
+                printVersion("nix-daemon");
+            else return false;
+            return true;
+        });
+
+        daemonLoop(argv);
+    });
+}
diff --git a/src/nix-env/local.mk b/src/nix-env/local.mk
new file mode 100644
index 000000000000..e80719cd76f7
--- /dev/null
+++ b/src/nix-env/local.mk
@@ -0,0 +1,7 @@
+programs += nix-env
+
+nix-env_DIR := $(d)
+
+nix-env_SOURCES := $(wildcard $(d)/*.cc)
+
+nix-env_LIBS = libexpr libmain libstore libutil libformat
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
new file mode 100644
index 000000000000..325c8b928fea
--- /dev/null
+++ b/src/nix-env/nix-env.cc
@@ -0,0 +1,1439 @@
+#include "profiles.hh"
+#include "names.hh"
+#include "globals.hh"
+#include "misc.hh"
+#include "shared.hh"
+#include "eval.hh"
+#include "get-drvs.hh"
+#include "attr-path.hh"
+#include "common-opts.hh"
+#include "xml-writer.hh"
+#include "store-api.hh"
+#include "user-env.hh"
+#include "util.hh"
+#include "value-to-json.hh"
+
+#include <cerrno>
+#include <ctime>
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+
+using namespace nix;
+using std::cout;
+
+
+typedef enum {
+    srcNixExprDrvs,
+    srcNixExprs,
+    srcStorePaths,
+    srcProfile,
+    srcAttrPath,
+    srcUnknown
+} InstallSourceType;
+
+
+struct InstallSourceInfo
+{
+    InstallSourceType type;
+    Path nixExprPath; /* for srcNixExprDrvs, srcNixExprs */
+    Path profile; /* for srcProfile */
+    string systemFilter; /* for srcNixExprDrvs */
+    Bindings autoArgs;
+};
+
+
+struct Globals
+{
+    InstallSourceInfo instSource;
+    Path profile;
+    std::shared_ptr<EvalState> state;
+    bool dryRun;
+    bool preserveInstalled;
+    bool removeAll;
+    string forceName;
+    bool prebuiltOnly;
+};
+
+
+typedef void (* Operation) (Globals & globals,
+    Strings opFlags, Strings opArgs);
+
+
+static string needArg(Strings::iterator & i,
+    Strings & args, const string & arg)
+{
+    if (i == args.end()) throw UsageError(
+        format("‘%1%’ requires an argument") % arg);
+    return *i++;
+}
+
+
+static bool parseInstallSourceOptions(Globals & globals,
+    Strings::iterator & i, Strings & args, const string & arg)
+{
+    if (arg == "--from-expression" || arg == "-E")
+        globals.instSource.type = srcNixExprs;
+    else if (arg == "--from-profile") {
+        globals.instSource.type = srcProfile;
+        globals.instSource.profile = needArg(i, args, arg);
+    }
+    else if (arg == "--attr" || arg == "-A")
+        globals.instSource.type = srcAttrPath;
+    else return false;
+    return true;
+}
+
+
+static bool isNixExpr(const Path & path, struct stat & st)
+{
+    return S_ISREG(st.st_mode) || (S_ISDIR(st.st_mode) && pathExists(path + "/default.nix"));
+}
+
+
+static void getAllExprs(EvalState & state,
+    const Path & path, StringSet & attrs, Value & v)
+{
+    StringSet namesSorted;
+    for (auto & i : readDirectory(path)) namesSorted.insert(i.name);
+
+    for (auto & i : namesSorted) {
+        /* Ignore the manifest.nix used by profiles.  This is
+           necessary to prevent it from showing up in channels (which
+           are implemented using profiles). */
+        if (i == "manifest.nix") continue;
+
+        Path path2 = path + "/" + i;
+
+        struct stat st;
+        if (stat(path2.c_str(), &st) == -1)
+            continue; // ignore dangling symlinks in ~/.nix-defexpr
+
+        if (isNixExpr(path2, st) && (!S_ISREG(st.st_mode) || hasSuffix(path2, ".nix"))) {
+            /* Strip off the `.nix' filename suffix (if applicable),
+               otherwise the attribute cannot be selected with the
+               `-A' option.  Useful if you want to stick a Nix
+               expression directly in ~/.nix-defexpr. */
+            string attrName = i;
+            if (hasSuffix(attrName, ".nix"))
+                attrName = string(attrName, 0, attrName.size() - 4);
+            if (attrs.find(attrName) != attrs.end()) {
+                printMsg(lvlError, format("warning: name collision in input Nix expressions, skipping ‘%1%’") % path2);
+                continue;
+            }
+            attrs.insert(attrName);
+            /* Load the expression on demand. */
+            Value & vFun(*state.allocValue());
+            Value & vArg(*state.allocValue());
+            state.getBuiltin("import", vFun);
+            mkString(vArg, path2);
+            mkApp(*state.allocAttr(v, state.symbols.create(attrName)), vFun, vArg);
+        }
+        else if (S_ISDIR(st.st_mode))
+            /* `path2' is a directory (with no default.nix in it);
+               recurse into it. */
+            getAllExprs(state, path2, attrs, v);
+    }
+}
+
+
+static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
+{
+    struct stat st;
+    if (stat(path.c_str(), &st) == -1)
+        throw SysError(format("getting information about ‘%1%’") % path);
+
+    if (isNixExpr(path, st)) {
+        state.evalFile(path, v);
+        return;
+    }
+
+    /* The path is a directory.  Put the Nix expressions in the
+       directory in a set, with the file name of each expression as
+       the attribute name.  Recurse into subdirectories (but keep the
+       set flat, not nested, to make it easier for a user to have a
+       ~/.nix-defexpr directory that includes some system-wide
+       directory). */
+    if (S_ISDIR(st.st_mode)) {
+        state.mkAttrs(v, 16);
+        state.mkList(*state.allocAttr(v, state.symbols.create("_combineChannels")), 0);
+        StringSet attrs;
+        getAllExprs(state, path, attrs, v);
+        v.attrs->sort();
+    }
+}
+
+
+static void loadDerivations(EvalState & state, Path nixExprPath,
+    string systemFilter, Bindings & autoArgs,
+    const string & pathPrefix, DrvInfos & elems)
+{
+    Value vRoot;
+    loadSourceExpr(state, nixExprPath, vRoot);
+
+    Value & v(*findAlongAttrPath(state, pathPrefix, autoArgs, vRoot));
+
+    getDerivations(state, v, pathPrefix, autoArgs, elems, true);
+
+    /* Filter out all derivations not applicable to the current
+       system. */
+    for (DrvInfos::iterator i = elems.begin(), j; i != elems.end(); i = j) {
+        j = i; j++;
+        if (systemFilter != "*" && i->system != systemFilter)
+            elems.erase(i);
+    }
+}
+
+
+static Path getHomeDir()
+{
+    Path homeDir(getEnv("HOME", ""));
+    if (homeDir == "") throw Error("HOME environment variable not set");
+    return homeDir;
+}
+
+
+static Path getDefNixExprPath()
+{
+    return getHomeDir() + "/.nix-defexpr";
+}
+
+
+static int getPriority(EvalState & state, DrvInfo & drv)
+{
+    return drv.queryMetaInt("priority", 0);
+}
+
+
+static int comparePriorities(EvalState & state, DrvInfo & drv1, DrvInfo & drv2)
+{
+    return getPriority(state, drv2) - getPriority(state, drv1);
+}
+
+
+// FIXME: this function is rather slow since it checks a single path
+// at a time.
+static bool isPrebuilt(EvalState & state, DrvInfo & elem)
+{
+    Path path = elem.queryOutPath();
+    if (store->isValidPath(path)) return true;
+    PathSet ps = store->querySubstitutablePaths(singleton<PathSet>(path));
+    return ps.find(path) != ps.end();
+}
+
+
+static void checkSelectorUse(DrvNames & selectors)
+{
+    /* Check that all selectors have been used. */
+    foreach (DrvNames::iterator, i, selectors)
+        if (i->hits == 0 && i->fullName != "*")
+            throw Error(format("selector ‘%1%’ matches no derivations") % i->fullName);
+}
+
+
+static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems,
+    const Strings & args, bool newestOnly)
+{
+    DrvNames selectors = drvNamesFromArgs(args);
+    if (selectors.empty())
+        selectors.push_back(DrvName("*"));
+
+    DrvInfos elems;
+    set<unsigned int> done;
+
+    foreach (DrvNames::iterator, i, selectors) {
+        typedef list<std::pair<DrvInfo, unsigned int> > Matches;
+        Matches matches;
+        unsigned int n = 0;
+        for (DrvInfos::const_iterator j = allElems.begin();
+             j != allElems.end(); ++j, ++n)
+        {
+            DrvName drvName(j->name);
+            if (i->matches(drvName)) {
+                i->hits++;
+                matches.push_back(std::pair<DrvInfo, unsigned int>(*j, n));
+            }
+        }
+
+        /* If `newestOnly', if a selector matches multiple derivations
+           with the same name, pick the one matching the current
+           system.  If there are still multiple derivations, pick the
+           one with the highest priority.  If there are still multiple
+           derivations, pick the one with the highest version.
+           Finally, if there are still multiple derivations,
+           arbitrarily pick the first one. */
+        if (newestOnly) {
+
+            /* Map from package names to derivations. */
+            typedef map<string, std::pair<DrvInfo, unsigned int> > Newest;
+            Newest newest;
+            StringSet multiple;
+
+            for (Matches::iterator j = matches.begin(); j != matches.end(); ++j) {
+                DrvName drvName(j->first.name);
+                int d = 1;
+
+                Newest::iterator k = newest.find(drvName.name);
+
+                if (k != newest.end()) {
+                    d = j->first.system == k->second.first.system ? 0 :
+                        j->first.system == settings.thisSystem ? 1 :
+                        k->second.first.system == settings.thisSystem ? -1 : 0;
+                    if (d == 0)
+                        d = comparePriorities(state, j->first, k->second.first);
+                    if (d == 0)
+                        d = compareVersions(drvName.version, DrvName(k->second.first.name).version);
+                }
+
+                if (d > 0) {
+                    newest.erase(drvName.name);
+                    newest.insert(Newest::value_type(drvName.name, *j));
+                    multiple.erase(j->first.name);
+                } else if (d == 0) {
+                    multiple.insert(j->first.name);
+                }
+            }
+
+            matches.clear();
+            for (Newest::iterator j = newest.begin(); j != newest.end(); ++j) {
+                if (multiple.find(j->second.first.name) != multiple.end())
+                    printMsg(lvlInfo,
+                        format("warning: there are multiple derivations named ‘%1%’; using the first one")
+                        % j->second.first.name);
+                matches.push_back(j->second);
+            }
+        }
+
+        /* Insert only those elements in the final list that we
+           haven't inserted before. */
+        for (Matches::iterator j = matches.begin(); j != matches.end(); ++j)
+            if (done.find(j->second) == done.end()) {
+                done.insert(j->second);
+                elems.push_back(j->first);
+            }
+    }
+
+    checkSelectorUse(selectors);
+
+    return elems;
+}
+
+
+static bool isPath(const string & s)
+{
+    return s.find('/') != string::npos;
+}
+
+
+static void queryInstSources(EvalState & state,
+    InstallSourceInfo & instSource, const Strings & args,
+    DrvInfos & elems, bool newestOnly)
+{
+    InstallSourceType type = instSource.type;
+    if (type == srcUnknown && args.size() > 0 && isPath(args.front()))
+        type = srcStorePaths;
+
+    switch (type) {
+
+        /* Get the available user environment elements from the
+           derivations specified in a Nix expression, including only
+           those with names matching any of the names in `args'. */
+        case srcUnknown:
+        case srcNixExprDrvs: {
+
+            /* Load the derivations from the (default or specified)
+               Nix expression. */
+            DrvInfos allElems;
+            loadDerivations(state, instSource.nixExprPath,
+                instSource.systemFilter, instSource.autoArgs, "", allElems);
+
+            elems = filterBySelector(state, allElems, args, newestOnly);
+
+            break;
+        }
+
+        /* Get the available user environment elements from the Nix
+           expressions specified on the command line; these should be
+           functions that take the default Nix expression file as
+           argument, e.g., if the file is `./foo.nix', then the
+           argument `x: x.bar' is equivalent to `(x: x.bar)
+           (import ./foo.nix)' = `(import ./foo.nix).bar'. */
+        case srcNixExprs: {
+
+            Value vArg;
+            loadSourceExpr(state, instSource.nixExprPath, vArg);
+
+            foreach (Strings::const_iterator, i, args) {
+                Expr * eFun = state.parseExprFromString(*i, absPath("."));
+                Value vFun, vTmp;
+                state.eval(eFun, vFun);
+                mkApp(vTmp, vFun, vArg);
+                getDerivations(state, vTmp, "", instSource.autoArgs, elems, true);
+            }
+
+            break;
+        }
+
+        /* The available user environment elements are specified as a
+           list of store paths (which may or may not be
+           derivations). */
+        case srcStorePaths: {
+
+            foreach (Strings::const_iterator, i, args) {
+                Path path = followLinksToStorePath(*i);
+
+                string name = baseNameOf(path);
+                string::size_type dash = name.find('-');
+                if (dash != string::npos)
+                    name = string(name, dash + 1);
+
+                DrvInfo elem(state, name, "", "", 0);
+
+                if (isDerivation(path)) {
+                    elem.setDrvPath(path);
+                    elem.setOutPath(findOutput(derivationFromPath(*store, path), "out"));
+                    if (name.size() >= drvExtension.size() &&
+                        string(name, name.size() - drvExtension.size()) == drvExtension)
+                        name = string(name, 0, name.size() - drvExtension.size());
+                }
+                else elem.setOutPath(path);
+
+                elems.push_back(elem);
+            }
+
+            break;
+        }
+
+        /* Get the available user environment elements from another
+           user environment.  These are then filtered as in the
+           `srcNixExprDrvs' case. */
+        case srcProfile: {
+            elems = filterBySelector(state,
+                queryInstalled(state, instSource.profile),
+                args, newestOnly);
+            break;
+        }
+
+        case srcAttrPath: {
+            Value vRoot;
+            loadSourceExpr(state, instSource.nixExprPath, vRoot);
+            foreach (Strings::const_iterator, i, args) {
+                Value & v(*findAlongAttrPath(state, *i, instSource.autoArgs, vRoot));
+                getDerivations(state, v, "", instSource.autoArgs, elems, true);
+            }
+            break;
+        }
+    }
+}
+
+
+static void printMissing(EvalState & state, DrvInfos & elems)
+{
+    PathSet targets;
+    foreach (DrvInfos::iterator, i, elems) {
+        Path drvPath = i->queryDrvPath();
+        if (drvPath != "")
+            targets.insert(drvPath);
+        else
+            targets.insert(i->queryOutPath());
+    }
+
+    printMissing(*store, targets);
+}
+
+
+static bool keep(DrvInfo & drv)
+{
+    return drv.queryMetaBool("keep", false);
+}
+
+
+static void installDerivations(Globals & globals,
+    const Strings & args, const Path & profile)
+{
+    debug(format("installing derivations"));
+
+    /* Get the set of user environment elements to be installed. */
+    DrvInfos newElems, newElemsTmp;
+    queryInstSources(*globals.state, globals.instSource, args, newElemsTmp, true);
+
+    /* If --prebuilt-only is given, filter out source-only packages. */
+    foreach (DrvInfos::iterator, i, newElemsTmp)
+        if (!globals.prebuiltOnly || isPrebuilt(*globals.state, *i))
+            newElems.push_back(*i);
+
+    StringSet newNames;
+    for (DrvInfos::iterator i = newElems.begin(); i != newElems.end(); ++i) {
+        /* `forceName' is a hack to get package names right in some
+           one-click installs, namely those where the name used in the
+           path is not the one we want (e.g., `java-front' versus
+           `java-front-0.9pre15899'). */
+        if (globals.forceName != "")
+            i->name = globals.forceName;
+        newNames.insert(DrvName(i->name).name);
+    }
+
+
+    while (true) {
+        string lockToken = optimisticLockProfile(profile);
+
+        DrvInfos allElems(newElems);
+
+        /* Add in the already installed derivations, unless they have
+           the same name as a to-be-installed element. */
+        if (!globals.removeAll) {
+            DrvInfos installedElems = queryInstalled(*globals.state, profile);
+
+            foreach (DrvInfos::iterator, i, installedElems) {
+                DrvName drvName(i->name);
+                if (!globals.preserveInstalled &&
+                    newNames.find(drvName.name) != newNames.end() &&
+                    !keep(*i))
+                    printMsg(lvlInfo, format("replacing old ‘%1%’") % i->name);
+                else
+                    allElems.push_back(*i);
+            }
+
+            foreach (DrvInfos::iterator, i, newElems)
+                printMsg(lvlInfo, format("installing ‘%1%’") % i->name);
+        }
+
+        printMissing(*globals.state, newElems);
+
+        if (globals.dryRun) return;
+
+        if (createUserEnv(*globals.state, allElems,
+                profile, settings.envKeepDerivations, lockToken)) break;
+    }
+}
+
+
+static void opInstall(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+        string arg = *i++;
+        if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
+        else if (arg == "--preserve-installed" || arg == "-P")
+            globals.preserveInstalled = true;
+        else if (arg == "--remove-all" || arg == "-r")
+            globals.removeAll = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % arg);
+    }
+
+    installDerivations(globals, opArgs, globals.profile);
+}
+
+
+typedef enum { utLt, utLeq, utEq, utAlways } UpgradeType;
+
+
+static void upgradeDerivations(Globals & globals,
+    const Strings & args, UpgradeType upgradeType)
+{
+    debug(format("upgrading derivations"));
+
+    /* Upgrade works as follows: we take all currently installed
+       derivations, and for any derivation matching any selector, look
+       for a derivation in the input Nix expression that has the same
+       name and a higher version number. */
+
+    while (true) {
+        string lockToken = optimisticLockProfile(globals.profile);
+
+        DrvInfos installedElems = queryInstalled(*globals.state, globals.profile);
+
+        /* Fetch all derivations from the input file. */
+        DrvInfos availElems;
+        queryInstSources(*globals.state, globals.instSource, args, availElems, false);
+
+        /* Go through all installed derivations. */
+        DrvInfos newElems;
+        foreach (DrvInfos::iterator, i, installedElems) {
+            DrvName drvName(i->name);
+
+            try {
+
+                if (keep(*i)) {
+                    newElems.push_back(*i);
+                    continue;
+                }
+
+                /* Find the derivation in the input Nix expression
+                   with the same name that satisfies the version
+                   constraints specified by upgradeType.  If there are
+                   multiple matches, take the one with the highest
+                   priority.  If there are still multiple matches,
+                   take the one with the highest version. */
+                DrvInfos::iterator bestElem = availElems.end();
+                DrvName bestName;
+                foreach (DrvInfos::iterator, j, availElems) {
+                    DrvName newName(j->name);
+                    if (newName.name == drvName.name) {
+                        int d = comparePriorities(*globals.state, *i, *j);
+                        if (d == 0) d = compareVersions(drvName.version, newName.version);
+                        if ((upgradeType == utLt && d < 0) ||
+                            (upgradeType == utLeq && d <= 0) ||
+                            (upgradeType == utEq && d == 0) ||
+                            upgradeType == utAlways)
+                        {
+                            int d2 = -1;
+                            if (bestElem != availElems.end()) {
+                                d2 = comparePriorities(*globals.state, *bestElem, *j);
+                                if (d2 == 0) d2 = compareVersions(bestName.version, newName.version);
+                            }
+                            if (d2 < 0 && (!globals.prebuiltOnly || isPrebuilt(*globals.state, *j))) {
+                                bestElem = j;
+                                bestName = newName;
+                            }
+                        }
+                    }
+                }
+
+                if (bestElem != availElems.end() &&
+                    i->queryOutPath() !=
+                    bestElem->queryOutPath())
+                {
+                    printMsg(lvlInfo,
+                        format("upgrading ‘%1%’ to ‘%2%’")
+                        % i->name % bestElem->name);
+                    newElems.push_back(*bestElem);
+                } else newElems.push_back(*i);
+
+            } catch (Error & e) {
+                e.addPrefix(format("while trying to find an upgrade for ‘%1%’:\n") % i->name);
+                throw;
+            }
+        }
+
+        printMissing(*globals.state, newElems);
+
+        if (globals.dryRun) return;
+
+        if (createUserEnv(*globals.state, newElems,
+                globals.profile, settings.envKeepDerivations, lockToken)) break;
+    }
+}
+
+
+static void opUpgrade(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    UpgradeType upgradeType = utLt;
+    for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+        string arg = *i++;
+        if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
+        else if (arg == "--lt") upgradeType = utLt;
+        else if (arg == "--leq") upgradeType = utLeq;
+        else if (arg == "--eq") upgradeType = utEq;
+        else if (arg == "--always") upgradeType = utAlways;
+        else throw UsageError(format("unknown flag ‘%1%’") % arg);
+    }
+
+    upgradeDerivations(globals, opArgs, upgradeType);
+}
+
+
+static void setMetaFlag(EvalState & state, DrvInfo & drv,
+    const string & name, const string & value)
+{
+    Value * v = state.allocValue();
+    mkString(*v, value.c_str());
+    drv.setMeta(name, v);
+}
+
+
+static void opSetFlag(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+    if (opArgs.size() < 2)
+        throw UsageError("not enough arguments to ‘--set-flag’");
+
+    Strings::iterator arg = opArgs.begin();
+    string flagName = *arg++;
+    string flagValue = *arg++;
+    DrvNames selectors = drvNamesFromArgs(Strings(arg, opArgs.end()));
+
+    while (true) {
+        string lockToken = optimisticLockProfile(globals.profile);
+
+        DrvInfos installedElems = queryInstalled(*globals.state, globals.profile);
+
+        /* Update all matching derivations. */
+        foreach (DrvInfos::iterator, i, installedElems) {
+            DrvName drvName(i->name);
+            foreach (DrvNames::iterator, j, selectors)
+                if (j->matches(drvName)) {
+                    printMsg(lvlInfo, format("setting flag on ‘%1%’") % i->name);
+                    j->hits++;
+                    setMetaFlag(*globals.state, *i, flagName, flagValue);
+                    break;
+                }
+        }
+
+        checkSelectorUse(selectors);
+
+        /* Write the new user environment. */
+        if (createUserEnv(*globals.state, installedElems,
+                globals.profile, settings.envKeepDerivations, lockToken)) break;
+    }
+}
+
+
+static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+        string arg = *i++;
+        if (parseInstallSourceOptions(globals, i, opFlags, arg)) ;
+        else throw UsageError(format("unknown flag ‘%1%’") % arg);
+    }
+
+    DrvInfos elems;
+    queryInstSources(*globals.state, globals.instSource, opArgs, elems, true);
+
+    if (elems.size() != 1)
+        throw Error("--set requires exactly one derivation");
+
+    DrvInfo & drv(elems.front());
+
+    if (drv.queryDrvPath() != "") {
+        PathSet paths = singleton<PathSet>(drv.queryDrvPath());
+        printMissing(*store, paths);
+        if (globals.dryRun) return;
+        store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal);
+    }
+    else {
+        printMissing(*store, singleton<PathSet>(drv.queryOutPath()));
+        if (globals.dryRun) return;
+        store->ensurePath(drv.queryOutPath());
+    }
+
+    debug(format("switching to new user environment"));
+    Path generation = createGeneration(globals.profile, drv.queryOutPath());
+    switchLink(globals.profile, generation);
+}
+
+
+static void uninstallDerivations(Globals & globals, Strings & selectors,
+    Path & profile)
+{
+    while (true) {
+        string lockToken = optimisticLockProfile(profile);
+
+        DrvInfos installedElems = queryInstalled(*globals.state, profile);
+        DrvInfos newElems;
+
+        foreach (DrvInfos::iterator, i, installedElems) {
+            DrvName drvName(i->name);
+            bool found = false;
+            foreach (Strings::iterator, j, selectors)
+                /* !!! the repeated calls to followLinksToStorePath()
+                   are expensive, should pre-compute them. */
+                if ((isPath(*j) && i->queryOutPath() == followLinksToStorePath(*j))
+                    || DrvName(*j).matches(drvName))
+                {
+                    printMsg(lvlInfo, format("uninstalling ‘%1%’") % i->name);
+                    found = true;
+                    break;
+                }
+            if (!found) newElems.push_back(*i);
+        }
+
+        if (globals.dryRun) return;
+
+        if (createUserEnv(*globals.state, newElems,
+                profile, settings.envKeepDerivations, lockToken)) break;
+    }
+}
+
+
+static void opUninstall(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+    uninstallDerivations(globals, opArgs, globals.profile);
+}
+
+
+static bool cmpChars(char a, char b)
+{
+    return toupper(a) < toupper(b);
+}
+
+
+static bool cmpElemByName(const DrvInfo & a, const DrvInfo & b)
+{
+    return lexicographical_compare(
+        a.name.begin(), a.name.end(),
+        b.name.begin(), b.name.end(), cmpChars);
+}
+
+
+typedef list<Strings> Table;
+
+
+void printTable(Table & table)
+{
+    unsigned int nrColumns = table.size() > 0 ? table.front().size() : 0;
+
+    vector<unsigned int> widths;
+    widths.resize(nrColumns);
+
+    foreach (Table::iterator, i, table) {
+        assert(i->size() == nrColumns);
+        Strings::iterator j;
+        unsigned int column;
+        for (j = i->begin(), column = 0; j != i->end(); ++j, ++column)
+            if (j->size() > widths[column]) widths[column] = j->size();
+    }
+
+    foreach (Table::iterator, i, table) {
+        Strings::iterator j;
+        unsigned int column;
+        for (j = i->begin(), column = 0; j != i->end(); ++j, ++column) {
+            string s = *j;
+            replace(s.begin(), s.end(), '\n', ' ');
+            cout << s;
+            if (column < nrColumns - 1)
+                cout << string(widths[column] - s.size() + 2, ' ');
+        }
+        cout << std::endl;
+    }
+}
+
+
+/* This function compares the version of an element against the
+   versions in the given set of elements.  `cvLess' means that only
+   lower versions are in the set, `cvEqual' means that at most an
+   equal version is in the set, and `cvGreater' means that there is at
+   least one element with a higher version in the set.  `cvUnavail'
+   means that there are no elements with the same name in the set. */
+
+typedef enum { cvLess, cvEqual, cvGreater, cvUnavail } VersionDiff;
+
+static VersionDiff compareVersionAgainstSet(
+    const DrvInfo & elem, const DrvInfos & elems, string & version)
+{
+    DrvName name(elem.name);
+
+    VersionDiff diff = cvUnavail;
+    version = "?";
+
+    for (DrvInfos::const_iterator i = elems.begin(); i != elems.end(); ++i) {
+        DrvName name2(i->name);
+        if (name.name == name2.name) {
+            int d = compareVersions(name.version, name2.version);
+            if (d < 0) {
+                diff = cvGreater;
+                version = name2.version;
+            }
+            else if (diff != cvGreater && d == 0) {
+                diff = cvEqual;
+                version = name2.version;
+            }
+            else if (diff != cvGreater && diff != cvEqual && d > 0) {
+                diff = cvLess;
+                if (version == "" || compareVersions(version, name2.version) < 0)
+                    version = name2.version;
+            }
+        }
+    }
+
+    return diff;
+}
+
+
+static void queryJSON(Globals & globals, vector<DrvInfo> & elems)
+{
+    JSONObject topObj(cout);
+    foreach (vector<DrvInfo>::iterator, i, elems) {
+        topObj.attr(i->attrPath);
+        JSONObject pkgObj(cout);
+
+        pkgObj.attr("name", i->name);
+        pkgObj.attr("system", i->system);
+
+        pkgObj.attr("meta");
+        JSONObject metaObj(cout);
+        StringSet metaNames = i->queryMetaNames();
+        foreach (StringSet::iterator, j, metaNames) {
+            metaObj.attr(*j);
+            Value * v = i->queryMeta(*j);
+            if (!v) {
+                printMsg(lvlError, format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i->name % *j);
+                cout << "null";
+            } else {
+                PathSet context;
+                printValueAsJSON(*globals.state, true, *v, cout, context);
+            }
+        }
+    }
+}
+
+
+static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    Strings remaining;
+    string attrPath;
+
+    bool printStatus = false;
+    bool printName = true;
+    bool printAttrPath = false;
+    bool printSystem = false;
+    bool printDrvPath = false;
+    bool printOutPath = false;
+    bool printDescription = false;
+    bool printMeta = false;
+    bool compareVersions = false;
+    bool xmlOutput = false;
+    bool jsonOutput = false;
+
+    enum { sInstalled, sAvailable } source = sInstalled;
+
+    settings.readOnlyMode = true; /* makes evaluation a bit faster */
+
+    for (Strings::iterator i = opFlags.begin(); i != opFlags.end(); ) {
+        string arg = *i++;
+        if (arg == "--status" || arg == "-s") printStatus = true;
+        else if (arg == "--no-name") printName = false;
+        else if (arg == "--system") printSystem = true;
+        else if (arg == "--description") printDescription = true;
+        else if (arg == "--compare-versions" || arg == "-c") compareVersions = true;
+        else if (arg == "--drv-path") printDrvPath = true;
+        else if (arg == "--out-path") printOutPath = true;
+        else if (arg == "--meta") printMeta = true;
+        else if (arg == "--installed") source = sInstalled;
+        else if (arg == "--available" || arg == "-a") source = sAvailable;
+        else if (arg == "--xml") xmlOutput = true;
+        else if (arg == "--json") jsonOutput = true;
+        else if (arg == "--attr-path" || arg == "-P") printAttrPath = true;
+        else if (arg == "--attr" || arg == "-A")
+            attrPath = needArg(i, opFlags, arg);
+        else
+            throw UsageError(format("unknown flag ‘%1%’") % arg);
+    }
+
+
+    /* Obtain derivation information from the specified source. */
+    DrvInfos availElems, installedElems;
+
+    if (source == sInstalled || compareVersions || printStatus)
+        installedElems = queryInstalled(*globals.state, globals.profile);
+
+    if (source == sAvailable || compareVersions)
+        loadDerivations(*globals.state, globals.instSource.nixExprPath,
+            globals.instSource.systemFilter, globals.instSource.autoArgs,
+            attrPath, availElems);
+
+    DrvInfos elems_ = filterBySelector(*globals.state,
+        source == sInstalled ? installedElems : availElems,
+        opArgs, false);
+
+    DrvInfos & otherElems(source == sInstalled ? availElems : installedElems);
+
+
+    /* Sort them by name. */
+    /* !!! */
+    vector<DrvInfo> elems;
+    for (DrvInfos::iterator i = elems_.begin(); i != elems_.end(); ++i)
+        elems.push_back(*i);
+    sort(elems.begin(), elems.end(), cmpElemByName);
+
+
+    /* We only need to know the installed paths when we are querying
+       the status of the derivation. */
+    PathSet installed; /* installed paths */
+
+    if (printStatus) {
+        for (DrvInfos::iterator i = installedElems.begin();
+             i != installedElems.end(); ++i)
+            installed.insert(i->queryOutPath());
+    }
+
+
+    /* Query which paths have substitutes. */
+    PathSet validPaths, substitutablePaths;
+    if (printStatus || globals.prebuiltOnly) {
+        PathSet paths;
+        foreach (vector<DrvInfo>::iterator, i, elems)
+            try {
+                paths.insert(i->queryOutPath());
+            } catch (AssertionError & e) {
+                printMsg(lvlTalkative, format("skipping derivation named ‘%1%’ which gives an assertion failure") % i->name);
+                i->setFailed();
+            }
+        validPaths = store->queryValidPaths(paths);
+        substitutablePaths = store->querySubstitutablePaths(paths);
+    }
+
+
+    /* Print the desired columns, or XML output. */
+    if (jsonOutput) {
+        queryJSON(globals, elems);
+        return;
+    }
+
+    Table table;
+    std::ostringstream dummy;
+    XMLWriter xml(true, *(xmlOutput ? &cout : &dummy));
+    XMLOpenElement xmlRoot(xml, "items");
+
+    foreach (vector<DrvInfo>::iterator, i, elems) {
+        try {
+            if (i->hasFailed()) continue;
+
+            startNest(nest, lvlDebug, format("outputting query result ‘%1%’") % i->attrPath);
+
+            if (globals.prebuiltOnly &&
+                validPaths.find(i->queryOutPath()) == validPaths.end() &&
+                substitutablePaths.find(i->queryOutPath()) == substitutablePaths.end())
+                continue;
+
+            /* For table output. */
+            Strings columns;
+
+            /* For XML output. */
+            XMLAttrs attrs;
+
+            if (printStatus) {
+                Path outPath = i->queryOutPath();
+                bool hasSubs = substitutablePaths.find(outPath) != substitutablePaths.end();
+                bool isInstalled = installed.find(outPath) != installed.end();
+                bool isValid = validPaths.find(outPath) != validPaths.end();
+                if (xmlOutput) {
+                    attrs["installed"] = isInstalled ? "1" : "0";
+                    attrs["valid"] = isValid ? "1" : "0";
+                    attrs["substitutable"] = hasSubs ? "1" : "0";
+                } else
+                    columns.push_back(
+                        (string) (isInstalled ? "I" : "-")
+                        + (isValid ? "P" : "-")
+                        + (hasSubs ? "S" : "-"));
+            }
+
+            if (xmlOutput)
+                attrs["attrPath"] = i->attrPath;
+            else if (printAttrPath)
+                columns.push_back(i->attrPath);
+
+            if (xmlOutput)
+                attrs["name"] = i->name;
+            else if (printName)
+                columns.push_back(i->name);
+
+            if (compareVersions) {
+                /* Compare this element against the versions of the
+                   same named packages in either the set of available
+                   elements, or the set of installed elements.  !!!
+                   This is O(N * M), should be O(N * lg M). */
+                string version;
+                VersionDiff diff = compareVersionAgainstSet(*i, otherElems, version);
+
+                char ch;
+                switch (diff) {
+                    case cvLess: ch = '>'; break;
+                    case cvEqual: ch = '='; break;
+                    case cvGreater: ch = '<'; break;
+                    case cvUnavail: ch = '-'; break;
+                    default: abort();
+                }
+
+                if (xmlOutput) {
+                    if (diff != cvUnavail) {
+                        attrs["versionDiff"] = ch;
+                        attrs["maxComparedVersion"] = version;
+                    }
+                } else {
+                    string column = (string) "" + ch + " " + version;
+                    if (diff == cvGreater && isatty(STDOUT_FILENO))
+                        column = ANSI_RED + column + ANSI_NORMAL;
+                    columns.push_back(column);
+                }
+            }
+
+            if (xmlOutput) {
+                if (i->system != "") attrs["system"] = i->system;
+            }
+            else if (printSystem)
+                columns.push_back(i->system);
+
+            if (printDrvPath) {
+                string drvPath = i->queryDrvPath();
+                if (xmlOutput) {
+                    if (drvPath != "") attrs["drvPath"] = drvPath;
+                } else
+                    columns.push_back(drvPath == "" ? "-" : drvPath);
+            }
+
+            if (printOutPath && !xmlOutput) {
+                DrvInfo::Outputs outputs = i->queryOutputs();
+                string s;
+                foreach (DrvInfo::Outputs::iterator, j, outputs) {
+                    if (!s.empty()) s += ';';
+                    if (j->first != "out") { s += j->first; s += "="; }
+                    s += j->second;
+                }
+                columns.push_back(s);
+            }
+
+            if (printDescription) {
+                string descr = i->queryMetaString("description");
+                if (xmlOutput) {
+                    if (descr != "") attrs["description"] = descr;
+                } else
+                    columns.push_back(descr);
+            }
+
+            if (xmlOutput) {
+                if (printOutPath || printMeta) {
+                    XMLOpenElement item(xml, "item", attrs);
+                    if (printOutPath) {
+                        DrvInfo::Outputs outputs = i->queryOutputs();
+                        foreach (DrvInfo::Outputs::iterator, j, outputs) {
+                            XMLAttrs attrs2;
+                            attrs2["name"] = j->first;
+                            attrs2["path"] = j->second;
+                            xml.writeEmptyElement("output", attrs2);
+                        }
+                    }
+                    if (printMeta) {
+                        StringSet metaNames = i->queryMetaNames();
+                        foreach (StringSet::iterator, j, metaNames) {
+                            XMLAttrs attrs2;
+                            attrs2["name"] = *j;
+                            Value * v = i->queryMeta(*j);
+                            if (!v)
+                                printMsg(lvlError, format("derivation ‘%1%’ has invalid meta attribute ‘%2%’") % i->name % *j);
+                            else {
+                                if (v->type == tString) {
+                                    attrs2["type"] = "string";
+                                    attrs2["value"] = v->string.s;
+                                    xml.writeEmptyElement("meta", attrs2);
+                                } else if (v->type == tInt) {
+                                    attrs2["type"] = "int";
+                                    attrs2["value"] = (format("%1%") % v->integer).str();
+                                    xml.writeEmptyElement("meta", attrs2);
+                                } else if (v->type == tBool) {
+                                    attrs2["type"] = "bool";
+                                    attrs2["value"] = v->boolean ? "true" : "false";
+                                    xml.writeEmptyElement("meta", attrs2);
+                                } else if (v->type == tList) {
+                                    attrs2["type"] = "strings";
+                                    XMLOpenElement m(xml, "meta", attrs2);
+                                    for (unsigned int j = 0; j < v->list.length; ++j) {
+                                        if (v->list.elems[j]->type != tString) continue;
+                                        XMLAttrs attrs3;
+                                        attrs3["value"] = v->list.elems[j]->string.s;
+                                        xml.writeEmptyElement("string", attrs3);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                } else
+                    xml.writeEmptyElement("item", attrs);
+            } else
+                table.push_back(columns);
+
+            cout.flush();
+
+        } catch (AssertionError & e) {
+            printMsg(lvlTalkative, format("skipping derivation named ‘%1%’ which gives an assertion failure") % i->name);
+        } catch (Error & e) {
+            e.addPrefix(format("while querying the derivation named ‘%1%’:\n") % i->name);
+            throw;
+        }
+    }
+
+    if (!xmlOutput) printTable(table);
+}
+
+
+static void opSwitchProfile(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+    if (opArgs.size() != 1)
+        throw UsageError(format("exactly one argument expected"));
+
+    Path profile = absPath(opArgs.front());
+    Path profileLink = getHomeDir() + "/.nix-profile";
+
+    switchLink(profileLink, profile);
+}
+
+
+static const int prevGen = -2;
+
+
+static void switchGeneration(Globals & globals, int dstGen)
+{
+    PathLocks lock;
+    lockProfile(lock, globals.profile);
+
+    int curGen;
+    Generations gens = findGenerations(globals.profile, curGen);
+
+    Generation dst;
+    for (Generations::iterator i = gens.begin(); i != gens.end(); ++i)
+        if ((dstGen == prevGen && i->number < curGen) ||
+            (dstGen >= 0 && i->number == dstGen))
+            dst = *i;
+
+    if (!dst) {
+        if (dstGen == prevGen)
+            throw Error(format("no generation older than the current (%1%) exists")
+                % curGen);
+        else
+            throw Error(format("generation %1% does not exist") % dstGen);
+    }
+
+    printMsg(lvlInfo, format("switching from generation %1% to %2%")
+        % curGen % dst.number);
+
+    if (globals.dryRun) return;
+
+    switchLink(globals.profile, dst.path);
+}
+
+
+static void opSwitchGeneration(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+    if (opArgs.size() != 1)
+        throw UsageError(format("exactly one argument expected"));
+
+    int dstGen;
+    if (!string2Int(opArgs.front(), dstGen))
+        throw UsageError(format("expected a generation number"));
+
+    switchGeneration(globals, dstGen);
+}
+
+
+static void opRollback(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+    if (opArgs.size() != 0)
+        throw UsageError(format("no arguments expected"));
+
+    switchGeneration(globals, prevGen);
+}
+
+
+static void opListGenerations(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+    if (opArgs.size() != 0)
+        throw UsageError(format("no arguments expected"));
+
+    PathLocks lock;
+    lockProfile(lock, globals.profile);
+
+    int curGen;
+    Generations gens = findGenerations(globals.profile, curGen);
+
+    for (Generations::iterator i = gens.begin(); i != gens.end(); ++i) {
+        tm t;
+        if (!localtime_r(&i->creationTime, &t)) throw Error("cannot convert time");
+        cout << format("%|4|   %|4|-%|02|-%|02| %|02|:%|02|:%|02|   %||\n")
+            % i->number
+            % (t.tm_year + 1900) % (t.tm_mon + 1) % t.tm_mday
+            % t.tm_hour % t.tm_min % t.tm_sec
+            % (i->number == curGen ? "(current)" : "");
+    }
+}
+
+
+static void deleteGeneration2(Globals & globals, unsigned int gen)
+{
+    if (globals.dryRun)
+        printMsg(lvlInfo, format("would remove generation %1%") % gen);
+    else {
+        printMsg(lvlInfo, format("removing generation %1%") % gen);
+        deleteGeneration(globals.profile, gen);
+    }
+
+}
+
+
+static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opArgs)
+{
+    if (opFlags.size() > 0)
+        throw UsageError(format("unknown flag ‘%1%’") % opFlags.front());
+
+    PathLocks lock;
+    lockProfile(lock, globals.profile);
+
+    int curGen;
+    Generations gens = findGenerations(globals.profile, curGen);
+
+    for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i) {
+
+        if (*i == "old") {
+            for (Generations::iterator j = gens.begin(); j != gens.end(); ++j)
+                if (j->number != curGen)
+                    deleteGeneration2(globals, j->number);
+        } else if (i->size() >= 2 && tolower(*i->rbegin()) == 'd') {
+            time_t curTime = time(NULL);
+            time_t oldTime;
+            string strDays = string(*i, 0, i->size() - 1);
+            int days;
+
+            if (!string2Int(strDays, days) || days < 1)
+                throw UsageError(format("invalid number of days specifier ‘%1%’") % *i);
+
+            oldTime = curTime - days * 24 * 3600;
+
+            bool canDelete = false;
+            for (Generations::reverse_iterator j = gens.rbegin(); j != gens.rend(); ++j) {
+                if (canDelete) {
+                    assert(j->creationTime < oldTime);
+                    deleteGeneration2(globals, j->number);
+                } else if (j->creationTime < oldTime) {
+                    /* We may now start deleting generations, but we don't delete
+                       this generation yet, because this generation was still the
+                       one that was active at the requested point in time. */
+                    canDelete = true;
+                }
+            }
+        } else {
+            int n;
+            if (!string2Int(*i, n) || n < 0)
+                throw UsageError(format("invalid generation specifier ‘%1%’")  % *i);
+            bool found = false;
+            for (Generations::iterator j = gens.begin(); j != gens.end(); ++j) {
+                if (j->number == n) {
+                    deleteGeneration2(globals, j->number);
+                    found = true;
+                    break;
+                }
+            }
+            if (!found)
+                printMsg(lvlError, format("generation %1% does not exist") % n);
+        }
+    }
+}
+
+
+int main(int argc, char * * argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        Strings opFlags, opArgs, searchPath;
+        std::map<string, string> autoArgs_;
+        Operation op = 0;
+        bool repair = false;
+        string file;
+
+        Globals globals;
+
+        globals.instSource.type = srcUnknown;
+        globals.instSource.nixExprPath = getDefNixExprPath();
+        globals.instSource.systemFilter = "*";
+
+        globals.dryRun = false;
+        globals.preserveInstalled = false;
+        globals.removeAll = false;
+        globals.prebuiltOnly = false;
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            Operation oldOp = op;
+
+            if (*arg == "--help")
+                showManPage("nix-env");
+            else if (*arg == "--version")
+                printVersion("nix-env");
+            else if (*arg == "--install" || *arg == "-i")
+                op = opInstall;
+            else if (parseAutoArgs(arg, end, autoArgs_))
+                ;
+            else if (parseSearchPathArg(arg, end, searchPath))
+                ;
+            else if (*arg == "--force-name") // undocumented flag for nix-install-package
+                globals.forceName = getArg(*arg, arg, end);
+            else if (*arg == "--uninstall" || *arg == "-e")
+                op = opUninstall;
+            else if (*arg == "--upgrade" || *arg == "-u")
+                op = opUpgrade;
+            else if (*arg == "--set-flag")
+                op = opSetFlag;
+            else if (*arg == "--set")
+                op = opSet;
+            else if (*arg == "--query" || *arg == "-q")
+                op = opQuery;
+            else if (*arg == "--profile" || *arg == "-p")
+                globals.profile = absPath(getArg(*arg, arg, end));
+            else if (*arg == "--file" || *arg == "-f")
+                file = getArg(*arg, arg, end);
+            else if (*arg == "--switch-profile" || *arg == "-S")
+                op = opSwitchProfile;
+            else if (*arg == "--switch-generation" || *arg == "-G")
+                op = opSwitchGeneration;
+            else if (*arg == "--rollback")
+                op = opRollback;
+            else if (*arg == "--list-generations")
+                op = opListGenerations;
+            else if (*arg == "--delete-generations")
+                op = opDeleteGenerations;
+            else if (*arg == "--dry-run") {
+                printMsg(lvlInfo, "(dry run; not doing anything)");
+                globals.dryRun = true;
+            }
+            else if (*arg == "--system-filter")
+                globals.instSource.systemFilter = getArg(*arg, arg, end);
+            else if (*arg == "--prebuilt-only" || *arg == "-b")
+                globals.prebuiltOnly = true;
+            else if (*arg == "--repair")
+                repair = true;
+            else if (*arg != "" && arg->at(0) == '-') {
+                opFlags.push_back(*arg);
+                /* FIXME: hacky */
+                if (*arg == "--from-profile" ||
+                    (op == opQuery && (*arg == "--attr" || *arg == "-A")))
+                    opFlags.push_back(getArg(*arg, arg, end));
+            }
+            else
+                opArgs.push_back(*arg);
+
+            if (oldOp && oldOp != op)
+                throw UsageError("only one operation may be specified");
+
+            return true;
+        });
+
+        if (!op) throw UsageError("no operation specified");
+
+        globals.state = std::shared_ptr<EvalState>(new EvalState(searchPath));
+        globals.state->repair = repair;
+
+        if (file != "")
+            globals.instSource.nixExprPath = lookupFileArg(*globals.state, file);
+
+        evalAutoArgs(*globals.state, autoArgs_, globals.instSource.autoArgs);
+
+        if (globals.profile == "")
+            globals.profile = getEnv("NIX_PROFILE", "");
+
+        if (globals.profile == "") {
+            Path profileLink = getHomeDir() + "/.nix-profile";
+            globals.profile = pathExists(profileLink)
+                ? absPath(readLink(profileLink), dirOf(profileLink))
+                : canonPath(settings.nixStateDir + "/profiles/default");
+        }
+
+        store = openStore();
+
+        op(globals, opFlags, opArgs);
+
+        globals.state->printStats();
+    });
+}
diff --git a/src/nix-env/profiles.cc b/src/nix-env/profiles.cc
new file mode 100644
index 000000000000..d8eb0ef5269c
--- /dev/null
+++ b/src/nix-env/profiles.cc
@@ -0,0 +1,146 @@
+#include "profiles.hh"
+#include "store-api.hh"
+#include "util.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+
+namespace nix {
+
+
+static bool cmpGensByNumber(const Generation & a, const Generation & b)
+{
+    return a.number < b.number;
+}
+
+
+/* Parse a generation name of the format
+   `<profilename>-<number>-link'. */
+static int parseName(const string & profileName, const string & name)
+{
+    if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
+    string s = string(name, profileName.size() + 1);
+    string::size_type p = s.find("-link");
+    if (p == string::npos) return -1;
+    int n;
+    if (string2Int(string(s, 0, p), n) && n >= 0)
+        return n;
+    else
+        return -1;
+}
+
+
+
+Generations findGenerations(Path profile, int & curGen)
+{
+    Generations gens;
+
+    Path profileDir = dirOf(profile);
+    string profileName = baseNameOf(profile);
+
+    for (auto & i : readDirectory(profileDir)) {
+        int n;
+        if ((n = parseName(profileName, i.name)) != -1) {
+            Generation gen;
+            gen.path = profileDir + "/" + i.name;
+            gen.number = n;
+            struct stat st;
+            if (lstat(gen.path.c_str(), &st) != 0)
+                throw SysError(format("statting ‘%1%’") % gen.path);
+            gen.creationTime = st.st_mtime;
+            gens.push_back(gen);
+        }
+    }
+
+    gens.sort(cmpGensByNumber);
+
+    curGen = pathExists(profile)
+        ? parseName(profileName, readLink(profile))
+        : -1;
+
+    return gens;
+}
+
+
+static void makeName(const Path & profile, unsigned int num,
+    Path & outLink)
+{
+    Path prefix = (format("%1%-%2%") % profile % num).str();
+    outLink = prefix + "-link";
+}
+
+
+Path createGeneration(Path profile, Path outPath)
+{
+    /* The new generation number should be higher than old the
+       previous ones. */
+    int dummy;
+    Generations gens = findGenerations(profile, dummy);
+    unsigned int num = gens.size() > 0 ? gens.back().number : 0;
+
+    /* Create the new generation.  Note that addPermRoot() blocks if
+       the garbage collector is running to prevent the stuff we've
+       built from moving from the temporary roots (which the GC knows)
+       to the permanent roots (of which the GC would have a stale
+       view).  If we didn't do it this way, the GC might remove the
+       user environment etc. we've just built. */
+    Path generation;
+    makeName(profile, num + 1, generation);
+    addPermRoot(*store, outPath, generation, false, true);
+
+    return generation;
+}
+
+
+static void removeFile(const Path & path)
+{
+    if (remove(path.c_str()) == -1)
+        throw SysError(format("cannot unlink ‘%1%’") % path);
+}
+
+
+void deleteGeneration(const Path & profile, unsigned int gen)
+{
+    Path generation;
+    makeName(profile, gen, generation);
+    removeFile(generation);
+}
+
+
+void switchLink(Path link, Path target)
+{
+    /* Hacky. */
+    if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
+
+    Path tmp = canonPath(dirOf(link) + "/.new_" + baseNameOf(link));
+    createSymlink(target, tmp);
+    /* The rename() system call is supposed to be essentially atomic
+       on Unix.  That is, if we have links `current -> X' and
+       `new_current -> Y', and we rename new_current to current, a
+       process accessing current will see X or Y, but never a
+       file-not-found or other error condition.  This is sufficient to
+       atomically switch user environments. */
+    if (rename(tmp.c_str(), link.c_str()) != 0)
+        throw SysError(format("renaming ‘%1%’ to ‘%2%’") % tmp % link);
+}
+
+
+void lockProfile(PathLocks & lock, const Path & profile)
+{
+    lock.lockPaths(singleton<PathSet>(profile),
+        (format("waiting for lock on profile ‘%1%’") % profile).str());
+    lock.setDeletion(true);
+}
+
+
+string optimisticLockProfile(const Path & profile)
+{
+    return pathExists(profile) ? readLink(profile) : "";
+}
+
+
+}
diff --git a/src/nix-env/profiles.hh b/src/nix-env/profiles.hh
new file mode 100644
index 000000000000..30d2376d998c
--- /dev/null
+++ b/src/nix-env/profiles.hh
@@ -0,0 +1,55 @@
+#pragma once
+
+#include "types.hh"
+#include "pathlocks.hh"
+
+#include <time.h>
+
+
+namespace nix {
+
+
+struct Generation
+{
+    int number;
+    Path path;
+    time_t creationTime;
+    Generation()
+    {
+        number = -1;
+    }
+    operator bool() const
+    {
+        return number != -1;
+    }
+};
+
+typedef list<Generation> Generations;
+
+
+/* Returns the list of currently present generations for the specified
+   profile, sorted by generation number. */
+Generations findGenerations(Path profile, int & curGen);
+    
+Path createGeneration(Path profile, Path outPath);
+
+void deleteGeneration(const Path & profile, unsigned int gen);
+
+void switchLink(Path link, Path target);
+
+/* Ensure exclusive access to a profile.  Any command that modifies
+   the profile first acquires this lock. */
+void lockProfile(PathLocks & lock, const Path & profile);
+
+/* Optimistic locking is used by long-running operations like `nix-env
+   -i'.  Instead of acquiring the exclusive lock for the entire
+   duration of the operation, we just perform the operation
+   optimistically (without an exclusive lock), and check at the end
+   whether the profile changed while we were busy (i.e., the symlink
+   target changed).  If so, the operation is restarted.  Restarting is
+   generally cheap, since the build results are still in the Nix
+   store.  Most of the time, only the user environment has to be
+   rebuilt. */
+string optimisticLockProfile(const Path & profile);
+
+}
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
new file mode 100644
index 000000000000..3ebd6c1f2362
--- /dev/null
+++ b/src/nix-env/user-env.cc
@@ -0,0 +1,151 @@
+#include "user-env.hh"
+#include "util.hh"
+#include "derivations.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "shared.hh"
+#include "eval.hh"
+#include "eval-inline.hh"
+#include "profiles.hh"
+
+
+namespace nix {
+
+
+DrvInfos queryInstalled(EvalState & state, const Path & userEnv)
+{
+    DrvInfos elems;
+    Path manifestFile = userEnv + "/manifest.nix";
+    if (pathExists(manifestFile)) {
+        Value v;
+        state.evalFile(manifestFile, v);
+        Bindings bindings;
+        getDerivations(state, v, "", bindings, elems, false);
+    }
+    return elems;
+}
+
+
+bool createUserEnv(EvalState & state, DrvInfos & elems,
+    const Path & profile, bool keepDerivations,
+    const string & lockToken)
+{
+    /* Build the components in the user environment, if they don't
+       exist already. */
+    PathSet drvsToBuild;
+    foreach (DrvInfos::iterator, i, elems)
+        if (i->queryDrvPath() != "")
+            drvsToBuild.insert(i->queryDrvPath());
+
+    debug(format("building user environment dependencies"));
+    store->buildPaths(drvsToBuild, state.repair ? bmRepair : bmNormal);
+
+    /* Construct the whole top level derivation. */
+    PathSet references;
+    Value manifest;
+    state.mkList(manifest, elems.size());
+    unsigned int n = 0;
+    foreach (DrvInfos::iterator, i, elems) {
+        /* Create a pseudo-derivation containing the name, system,
+           output paths, and optionally the derivation path, as well
+           as the meta attributes. */
+        Path drvPath = keepDerivations ? i->queryDrvPath() : "";
+
+        Value & v(*state.allocValue());
+        manifest.list.elems[n++] = &v;
+        state.mkAttrs(v, 16);
+
+        mkString(*state.allocAttr(v, state.sType), "derivation");
+        mkString(*state.allocAttr(v, state.sName), i->name);
+        if (!i->system.empty())
+            mkString(*state.allocAttr(v, state.sSystem), i->system);
+        mkString(*state.allocAttr(v, state.sOutPath), i->queryOutPath());
+        if (drvPath != "")
+            mkString(*state.allocAttr(v, state.sDrvPath), i->queryDrvPath());
+
+        // Copy each output.
+        DrvInfo::Outputs outputs = i->queryOutputs();
+        Value & vOutputs = *state.allocAttr(v, state.sOutputs);
+        state.mkList(vOutputs, outputs.size());
+        unsigned int m = 0;
+        foreach (DrvInfo::Outputs::iterator, j, outputs) {
+            mkString(*(vOutputs.list.elems[m++] = state.allocValue()), j->first);
+            Value & vOutputs = *state.allocAttr(v, state.symbols.create(j->first));
+            state.mkAttrs(vOutputs, 2);
+            mkString(*state.allocAttr(vOutputs, state.sOutPath), j->second);
+
+            /* This is only necessary when installing store paths, e.g.,
+               `nix-env -i /nix/store/abcd...-foo'. */
+            store->addTempRoot(j->second);
+            store->ensurePath(j->second);
+
+            references.insert(j->second);
+        }
+
+        // Copy the meta attributes.
+        Value & vMeta = *state.allocAttr(v, state.sMeta);
+        state.mkAttrs(vMeta, 16);
+        StringSet metaNames = i->queryMetaNames();
+        foreach (StringSet::iterator, j, metaNames) {
+            Value * v = i->queryMeta(*j);
+            if (!v) continue;
+            vMeta.attrs->push_back(Attr(state.symbols.create(*j), v));
+        }
+        vMeta.attrs->sort();
+        v.attrs->sort();
+
+        if (drvPath != "") references.insert(drvPath);
+    }
+
+    /* Also write a copy of the list of user environment elements to
+       the store; we need it for future modifications of the
+       environment. */
+    Path manifestFile = store->addTextToStore("env-manifest.nix",
+        (format("%1%") % manifest).str(), references);
+
+    /* Get the environment builder expression. */
+    Value envBuilder;
+    state.evalFile(state.findFile("nix/buildenv.nix"), envBuilder);
+
+    /* Construct a Nix expression that calls the user environment
+       builder with the manifest as argument. */
+    Value args, topLevel;
+    state.mkAttrs(args, 3);
+    mkString(*state.allocAttr(args, state.symbols.create("manifest")),
+        manifestFile, singleton<PathSet>(manifestFile));
+    args.attrs->push_back(Attr(state.symbols.create("derivations"), &manifest));
+    args.attrs->sort();
+    mkApp(topLevel, envBuilder, args);
+
+    /* Evaluate it. */
+    debug("evaluating user environment builder");
+    state.forceValue(topLevel);
+    PathSet context;
+    Attr & aDrvPath(*topLevel.attrs->find(state.sDrvPath));
+    Path topLevelDrv = state.coerceToPath(aDrvPath.pos ? *(aDrvPath.pos) : noPos, *(aDrvPath.value), context);
+    Attr & aOutPath(*topLevel.attrs->find(state.sOutPath));
+    Path topLevelOut = state.coerceToPath(aOutPath.pos ? *(aOutPath.pos) : noPos, *(aOutPath.value), context);
+
+    /* Realise the resulting store expression. */
+    debug("building user environment");
+    store->buildPaths(singleton<PathSet>(topLevelDrv), state.repair ? bmRepair : bmNormal);
+
+    /* Switch the current user environment to the output path. */
+    PathLocks lock;
+    lockProfile(lock, profile);
+
+    Path lockTokenCur = optimisticLockProfile(profile);
+    if (lockToken != lockTokenCur) {
+        printMsg(lvlError, format("profile ‘%1%’ changed while we were busy; restarting") % profile);
+        return false;
+    }
+
+    debug(format("switching to new user environment"));
+    Path generation = createGeneration(profile, topLevelOut);
+    switchLink(profile, generation);
+
+    return true;
+}
+
+
+}
diff --git a/src/nix-env/user-env.hh b/src/nix-env/user-env.hh
new file mode 100644
index 000000000000..f188efe9b4a9
--- /dev/null
+++ b/src/nix-env/user-env.hh
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "get-drvs.hh"
+
+namespace nix {
+
+DrvInfos queryInstalled(EvalState & state, const Path & userEnv);
+
+bool createUserEnv(EvalState & state, DrvInfos & elems,
+    const Path & profile, bool keepDerivations,
+    const string & lockToken);
+
+}
diff --git a/src/nix-hash/local.mk b/src/nix-hash/local.mk
new file mode 100644
index 000000000000..7c290ca8466e
--- /dev/null
+++ b/src/nix-hash/local.mk
@@ -0,0 +1,7 @@
+programs += nix-hash
+
+nix-hash_DIR := $(d)
+
+nix-hash_SOURCES := $(d)/nix-hash.cc
+
+nix-hash_LIBS = libmain libstore libutil libformat
diff --git a/src/nix-hash/nix-hash.cc b/src/nix-hash/nix-hash.cc
new file mode 100644
index 000000000000..8035162aea37
--- /dev/null
+++ b/src/nix-hash/nix-hash.cc
@@ -0,0 +1,63 @@
+#include "hash.hh"
+#include "shared.hh"
+
+#include <iostream>
+
+using namespace nix;
+
+
+int main(int argc, char * * argv)
+{
+    HashType ht = htMD5;
+    bool flat = false;
+    bool base32 = false;
+    bool truncate = false;
+    enum { opHash, opTo32, opTo16 } op = opHash;
+
+    Strings ss;
+
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            if (*arg == "--help")
+                showManPage("nix-hash");
+            else if (*arg == "--version")
+                printVersion("nix-hash");
+            else if (*arg == "--flat") flat = true;
+            else if (*arg == "--base32") base32 = true;
+            else if (*arg == "--truncate") truncate = true;
+            else if (*arg == "--type") {
+                string s = getArg(*arg, arg, end);
+                ht = parseHashType(s);
+                if (ht == htUnknown)
+                    throw UsageError(format("unknown hash type ‘%1%’") % s);
+            }
+            else if (*arg == "--to-base16") op = opTo16;
+            else if (*arg == "--to-base32") op = opTo32;
+            else if (*arg != "" && arg->at(0) == '-')
+                return false;
+            else
+                ss.push_back(*arg);
+            return true;
+        });
+
+        if (op == opHash) {
+            for (auto & i : ss) {
+                Hash h = flat ? hashFile(ht, i) : hashPath(ht, i).first;
+                if (truncate && h.hashSize > 20) h = compressHash(h, 20);
+                std::cout << format("%1%\n") %
+                    (base32 ? printHash32(h) : printHash(h));
+            }
+        }
+
+        else {
+            for (auto & i : ss) {
+                Hash h = parseHash16or32(ht, i);
+                std::cout << format("%1%\n") %
+                    (op == opTo16 ? printHash(h) : printHash32(h));
+            }
+        }
+    });
+}
+
diff --git a/src/nix-instantiate/local.mk b/src/nix-instantiate/local.mk
new file mode 100644
index 000000000000..7d1bc5ec9dfb
--- /dev/null
+++ b/src/nix-instantiate/local.mk
@@ -0,0 +1,7 @@
+programs += nix-instantiate
+
+nix-instantiate_DIR := $(d)
+
+nix-instantiate_SOURCES := $(d)/nix-instantiate.cc
+
+nix-instantiate_LIBS = libexpr libmain libstore libutil libformat
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
new file mode 100644
index 000000000000..5080160c4015
--- /dev/null
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -0,0 +1,196 @@
+#include "globals.hh"
+#include "shared.hh"
+#include "eval.hh"
+#include "eval-inline.hh"
+#include "get-drvs.hh"
+#include "attr-path.hh"
+#include "value-to-xml.hh"
+#include "value-to-json.hh"
+#include "util.hh"
+#include "store-api.hh"
+#include "common-opts.hh"
+#include "misc.hh"
+
+#include <map>
+#include <iostream>
+
+
+using namespace nix;
+
+
+static Expr * parseStdin(EvalState & state)
+{
+    startNest(nest, lvlTalkative, format("parsing standard input"));
+    return state.parseExprFromString(drainFD(0), absPath("."));
+}
+
+
+static Path gcRoot;
+static int rootNr = 0;
+static bool indirectRoot = false;
+
+
+enum OutputKind { okPlain, okXML, okJSON };
+
+
+void processExpr(EvalState & state, const Strings & attrPaths,
+    bool parseOnly, bool strict, Bindings & autoArgs,
+    bool evalOnly, OutputKind output, bool location, Expr * e)
+{
+    if (parseOnly) {
+        std::cout << format("%1%\n") % *e;
+        return;
+    }
+
+    Value vRoot;
+    state.eval(e, vRoot);
+
+    foreach (Strings::const_iterator, i, attrPaths) {
+        Value & v(*findAlongAttrPath(state, *i, autoArgs, vRoot));
+        state.forceValue(v);
+
+        PathSet context;
+        if (evalOnly) {
+            Value vRes;
+            if (autoArgs.empty())
+                vRes = v;
+            else
+                state.autoCallFunction(autoArgs, v, vRes);
+            if (output == okXML)
+                printValueAsXML(state, strict, location, vRes, std::cout, context);
+            else if (output == okJSON)
+                printValueAsJSON(state, strict, vRes, std::cout, context);
+            else {
+                if (strict) state.strictForceValue(vRes);
+                std::cout << vRes << std::endl;
+            }
+        } else {
+            DrvInfos drvs;
+            getDerivations(state, v, "", autoArgs, drvs, false);
+            foreach (DrvInfos::iterator, i, drvs) {
+                Path drvPath = i->queryDrvPath();
+
+                /* What output do we want? */
+                string outputName = i->queryOutputName();
+                if (outputName == "")
+                    throw Error(format("derivation ‘%1%’ lacks an ‘outputName’ attribute ") % drvPath);
+
+                if (gcRoot == "")
+                    printGCWarning();
+                else {
+                    Path rootName = gcRoot;
+                    if (++rootNr > 1) rootName += "-" + int2String(rootNr);
+                    drvPath = addPermRoot(*store, drvPath, rootName, indirectRoot);
+                }
+                std::cout << format("%1%%2%\n") % drvPath % (outputName != "out" ? "!" + outputName : "");
+            }
+        }
+    }
+}
+
+
+int main(int argc, char * * argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        Strings files, searchPath;
+        bool readStdin = false;
+        bool fromArgs = false;
+        bool findFile = false;
+        bool evalOnly = false;
+        bool parseOnly = false;
+        OutputKind outputKind = okPlain;
+        bool xmlOutputSourceLocation = true;
+        bool strict = false;
+        Strings attrPaths;
+        bool wantsReadWrite = false;
+        std::map<string, string> autoArgs_;
+        bool repair = false;
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            if (*arg == "--help")
+                showManPage("nix-instantiate");
+            else if (*arg == "--version")
+                printVersion("nix-instantiate");
+            else if (*arg == "-")
+                readStdin = true;
+            else if (*arg == "--expr" || *arg == "-E")
+                fromArgs = true;
+            else if (*arg == "--eval" || *arg == "--eval-only")
+                evalOnly = true;
+            else if (*arg == "--read-write-mode")
+                wantsReadWrite = true;
+            else if (*arg == "--parse" || *arg == "--parse-only")
+                parseOnly = evalOnly = true;
+            else if (*arg == "--find-file")
+                findFile = true;
+            else if (*arg == "--attr" || *arg == "-A")
+                attrPaths.push_back(getArg(*arg, arg, end));
+            else if (parseAutoArgs(arg, end, autoArgs_))
+                ;
+            else if (parseSearchPathArg(arg, end, searchPath))
+                ;
+            else if (*arg == "--add-root")
+                gcRoot = getArg(*arg, arg, end);
+            else if (*arg == "--indirect")
+                indirectRoot = true;
+            else if (*arg == "--xml")
+                outputKind = okXML;
+            else if (*arg == "--json")
+                outputKind = okJSON;
+            else if (*arg == "--no-location")
+                xmlOutputSourceLocation = false;
+            else if (*arg == "--strict")
+                strict = true;
+            else if (*arg == "--repair")
+                repair = true;
+            else if (*arg == "--dry-run")
+                settings.readOnlyMode = true;
+            else if (*arg != "" && arg->at(0) == '-')
+                return false;
+            else
+                files.push_back(*arg);
+            return true;
+        });
+
+        EvalState state(searchPath);
+        state.repair = repair;
+
+        Bindings autoArgs;
+        evalAutoArgs(state, autoArgs_, autoArgs);
+
+        if (evalOnly && !wantsReadWrite)
+            settings.readOnlyMode = true;
+
+        if (attrPaths.empty()) attrPaths.push_back("");
+
+        if (findFile) {
+            foreach (Strings::iterator, i, files) {
+                Path p = state.findFile(*i);
+                if (p == "") throw Error(format("unable to find ‘%1%’") % *i);
+                std::cout << p << std::endl;
+            }
+            return;
+        }
+
+        store = openStore();
+
+        if (readStdin) {
+            Expr * e = parseStdin(state);
+            processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+                evalOnly, outputKind, xmlOutputSourceLocation, e);
+        } else if (files.empty() && !fromArgs)
+            files.push_back("./default.nix");
+
+        foreach (Strings::iterator, i, files) {
+            Expr * e = fromArgs
+                ? state.parseExprFromString(*i, absPath("."))
+                : state.parseExprFromFile(resolveExprPath(lookupFileArg(state, *i)));
+            processExpr(state, attrPaths, parseOnly, strict, autoArgs,
+                evalOnly, outputKind, xmlOutputSourceLocation, e);
+        }
+
+        state.printStats();
+    });
+}
diff --git a/src/nix-log2xml/local.mk b/src/nix-log2xml/local.mk
new file mode 100644
index 000000000000..09c848c17f40
--- /dev/null
+++ b/src/nix-log2xml/local.mk
@@ -0,0 +1,5 @@
+programs += nix-log2xml
+
+nix-log2xml_DIR := $(d)
+
+nix-log2xml_SOURCES := $(d)/log2xml.cc
diff --git a/src/nix-log2xml/log2xml.cc b/src/nix-log2xml/log2xml.cc
new file mode 100644
index 000000000000..31cea60c3809
--- /dev/null
+++ b/src/nix-log2xml/log2xml.cc
@@ -0,0 +1,201 @@
+#include <vector>
+#include <iostream>
+#include <cstdio>
+#include <string>
+#include <cstring>
+
+using namespace std;
+
+
+struct Decoder
+{
+    enum { stTop, stEscape, stCSI } state;
+    string line;
+    bool inHeader;
+    int level;
+    vector<int> args;
+    bool newNumber;
+    int priority;
+    bool ignoreLF;
+    int lineNo, charNo;
+    bool warning;
+    bool error;
+
+    Decoder()
+    {
+        state = stTop;
+        line = "";
+        inHeader = false;
+        level = 0;
+        priority = 1;
+        ignoreLF = false;
+        lineNo = 1;
+        charNo = 0;
+        warning = false;
+        error = false;
+    }
+
+    void pushChar(char c);
+
+    void finishLine();
+
+    void decodeFile(istream & st);
+};
+
+
+void Decoder::pushChar(char c)
+{
+    if (c == '\n') {
+        lineNo++;
+        charNo = 0;
+    } else charNo++;
+    
+    switch (state) {
+        
+        case stTop:
+            if (c == '\e') {
+                state = stEscape;
+            } else if (c == '\n' && !ignoreLF) {
+                finishLine();
+            } else line += c;
+            break;
+
+        case stEscape:
+            if (c == '[') {
+                state = stCSI;
+                args.clear();
+                newNumber = true;
+            } else
+                state = stTop; /* !!! wrong */
+            break;
+
+        case stCSI:
+            if (c >= 0x40 && c != 0x7e) {
+                state = stTop;
+                switch (c) {
+                    case 'p':
+                        if (line.size()) finishLine();
+                        level++;
+                        inHeader = true;
+                        cout << "<nest>" << endl;
+                        priority = args.size() >= 1 ? args[0] : 1;
+                        break;
+                    case 'q':
+                        if (line.size()) finishLine();
+                        if (level > 0) {
+                            level--;
+                            cout << "</nest>" << endl;
+                        } else
+                            cerr << "not enough nesting levels at line "
+                                 << lineNo << ", character " << charNo  << endl;
+                        break;
+                    case 's':
+                        if (line.size()) finishLine();
+                        priority = args.size() >= 1 ? args[0] : 1;
+                        break;
+                    case 'a':
+                        ignoreLF = true;
+                        break;
+                    case 'b':
+                        ignoreLF = false;
+                        break;
+                    case 'e':
+                        error = true;
+                        break;
+                    case 'w':
+                        warning = true;
+                        break;
+                }
+            } else if (c >= '0' && c <= '9') {
+                int n = 0;
+                if (!newNumber) {
+                    n = args.back() * 10;
+                    args.pop_back();
+                }
+                n += c - '0';
+                args.push_back(n);
+            }
+            break;
+            
+    }
+}
+
+
+void Decoder::finishLine()
+{
+    string storeDir = "/nix/store/";
+    int sz = storeDir.size();
+    string tag = inHeader ? "head" : "line";
+    cout << "<" << tag;
+    if (priority != 1) cout << " priority='" << priority << "'";
+    if (warning) cout << " warning='1'";
+    if (error) cout << " error='1'";
+    cout << ">";
+
+    for (unsigned int i = 0; i < line.size(); i++) {
+
+        if (line[i] == '<') cout << "&lt;";
+        else if (line[i] == '&') cout << "&amp;";
+        else if (line[i] == '\r') ; /* ignore carriage return */
+        else if (line[i] == '\n') cout << "\n";
+        else if (line[i] >= 0 && line[i] < 32 && line[i] != 9) cout << "&#xfffd;";
+        else if (i + sz + 33 < line.size() &&
+            string(line, i, sz) == storeDir &&
+            line[i + sz + 32] == '-')
+        {
+            int j = i + sz + 32;
+            /* skip name */
+            while (!strchr("/\n\r\t ()[]:;?<>", line[j])) j++;
+            int k = j;
+            while (!strchr("\n\r\t ()[]:;?<>", line[k])) k++;
+            // !!! escaping
+            cout << "<storeref>"
+                 << "<storedir>"
+                 << string(line, i, sz)
+                 << "</storedir>"
+                 << "<hash>"
+                 << string(line, i + sz, 32)
+                 << "</hash>"
+                 << "<name>"
+                 << string(line, i + sz + 32, j - (i + sz + 32))
+                 << "</name>"
+                 << "<path>"
+                 << string(line, j, k - j)
+                 << "</path>"
+                 << "</storeref>";
+            i = k - 1;
+        } else cout << line[i];
+    }
+    
+    cout << "</" << tag << ">" << endl;
+    line = "";
+    inHeader = false;
+    priority = 1;
+    warning = false;
+    error = false;
+}
+
+
+void Decoder::decodeFile(istream & st)
+{
+    int c;
+    
+    cout << "<logfile>" << endl;
+    
+    while ((c = st.get()) != EOF) {
+        pushChar(c);
+    }
+
+    if (line.size()) finishLine();
+
+    while (level--) cout << "</nest>" << endl;
+    
+    cout << "</logfile>" << endl;
+}
+
+
+int main(int argc, char * * argv)
+{
+    Decoder dec;
+    dec.decodeFile(cin);
+}
diff --git a/src/nix-log2xml/logfile.css b/src/nix-log2xml/logfile.css
new file mode 100644
index 000000000000..ed390d64a9ef
--- /dev/null
+++ b/src/nix-log2xml/logfile.css
@@ -0,0 +1,86 @@
+body {
+    font-family: sans-serif;
+    background: white;
+}
+
+
+ul.nesting, ul.toplevel {
+    padding: 0;
+    margin: 0;
+}
+
+ul.toplevel {
+    list-style-type: none;
+}
+
+ul.nesting li.line, ul.nesting li.lastline {
+    position: relative;
+    list-style-type: none;
+}
+
+ul.nesting li.line {
+    padding-left: 1.1em;
+}
+
+ul.nesting li.lastline {
+    padding-left: 1.2em; // for the 0.1em border-left in .lastline > .lineconn
+}
+
+li.line {
+    border-left: 0.1em solid #6185a0;
+}
+
+li.line > span.lineconn, li.lastline > span.lineconn {
+    position: absolute;
+    height: 0.65em;
+    left: 0em;
+    width: 1em;
+    border-bottom: 0.1em solid #6185a0;
+}
+
+li.lastline > span.lineconn {
+    border-left: 0.1em solid #6185a0;
+}
+
+
+em.storeref {
+    color: #500000;
+    position: relative; 
+    width: 100%;
+}
+
+em.storeref:hover {
+    background-color: #eeeeee;
+}
+
+*.popup {
+    display: none;
+/*    background: url('http://losser.st-lab.cs.uu.nl/~mbravenb/menuback.png') repeat; */
+    background: #ffffcd;
+    border: solid #555555 1px;
+    position: absolute;
+    top: 0em;
+    left: 0em;
+    margin: 0;
+    padding: 0;
+    z-index: 100;
+}
+
+em.storeref:hover span.popup {
+    display: inline;
+}
+
+
+.toggle {
+    text-decoration: none;
+}
+
+.showTree, .hideTree {
+    font-family: monospace;
+    font-size: larger;
+}
+
+.error {
+    color: #ff0000;
+    font-weight: bold;
+}
\ No newline at end of file
diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc
new file mode 100644
index 000000000000..a333d7351010
--- /dev/null
+++ b/src/nix-store/dotgraph.cc
@@ -0,0 +1,162 @@
+#include "dotgraph.hh"
+#include "util.hh"
+#include "store-api.hh"
+
+#include <iostream>
+
+
+using std::cout;
+
+namespace nix {
+
+
+static string dotQuote(const string & s)
+{
+    return "\"" + s + "\"";
+}
+
+
+static string nextColour()
+{
+    static int n = 0;
+    static string colours[] =
+	{ "black", "red", "green", "blue"
+	, "magenta", "burlywood" };
+    return colours[n++ % (sizeof(colours) / sizeof(string))];
+}
+
+
+static string makeEdge(const string & src, const string & dst)
+{
+    format f = format("%1% -> %2% [color = %3%];\n")
+	% dotQuote(src) % dotQuote(dst) % dotQuote(nextColour());
+    return f.str();
+}
+
+
+static string makeNode(const string & id, const string & label,
+    const string & colour)
+{
+    format f = format("%1% [label = %2%, shape = box, "
+	"style = filled, fillcolor = %3%];\n")
+	% dotQuote(id) % dotQuote(label) % dotQuote(colour);
+    return f.str();
+}
+
+
+static string symbolicName(const string & path)
+{
+    string p = baseNameOf(path);
+    int dash = p.find('-');
+    return string(p, dash + 1);
+}
+
+
+#if 0
+string pathLabel(const Path & nePath, const string & elemPath)
+{
+    return (string) nePath + "-" + elemPath;
+}
+
+
+void printClosure(const Path & nePath, const StoreExpr & fs)
+{
+    PathSet workList(fs.closure.roots);
+    PathSet doneSet;
+
+    for (PathSet::iterator i = workList.begin(); i != workList.end(); ++i) {
+	cout << makeEdge(pathLabel(nePath, *i), nePath);
+    }
+
+    while (!workList.empty()) {
+	Path path = *(workList.begin());
+	workList.erase(path);
+
+	if (doneSet.find(path) == doneSet.end()) {
+	    doneSet.insert(path);
+
+	    ClosureElems::const_iterator elem = fs.closure.elems.find(path);
+	    if (elem == fs.closure.elems.end())
+		throw Error(format("bad closure, missing path ‘%1%’") % path);
+
+	    for (StringSet::const_iterator i = elem->second.refs.begin();
+		 i != elem->second.refs.end(); ++i)
+	    {
+		workList.insert(*i);
+		cout << makeEdge(pathLabel(nePath, *i), pathLabel(nePath, path));
+	    }
+
+	    cout << makeNode(pathLabel(nePath, path), 
+		symbolicName(path), "#ff0000");
+	}
+    }
+}
+#endif
+
+
+void printDotGraph(const PathSet & roots)
+{
+    PathSet workList(roots);
+    PathSet doneSet;
+            
+    cout << "digraph G {\n";
+
+    while (!workList.empty()) {
+	Path path = *(workList.begin());
+	workList.erase(path);
+
+	if (doneSet.find(path) != doneSet.end()) continue;
+        doneSet.insert(path);
+
+        cout << makeNode(path, symbolicName(path), "#ff0000");
+        
+        PathSet references;
+        store->queryReferences(path, references);
+
+        for (PathSet::iterator i = references.begin();
+             i != references.end(); ++i)
+        {
+            if (*i != path) {
+                workList.insert(*i);
+                cout << makeEdge(*i, path);
+            }
+        }
+            
+
+#if 0        
+	    StoreExpr ne = storeExprFromPath(path);
+
+	    string label, colour;
+                    
+	    if (ne.type == StoreExpr::neDerivation) {
+		for (PathSet::iterator i = ne.derivation.inputs.begin();
+		     i != ne.derivation.inputs.end(); ++i)
+		{
+		    workList.insert(*i);
+		    cout << makeEdge(*i, path);
+		}
+
+		label = "derivation";
+		colour = "#00ff00";
+		for (StringPairs::iterator i = ne.derivation.env.begin();
+		     i != ne.derivation.env.end(); ++i)
+		    if (i->first == "name") label = i->second;
+	    }
+
+	    else if (ne.type == StoreExpr::neClosure) {
+		label = "<closure>";
+		colour = "#00ffff";
+		printClosure(path, ne);
+	    }
+
+	    else abort();
+
+	    cout << makeNode(path, label, colour);
+#endif
+    }
+
+    cout << "}\n";
+}
+
+ 
+}
diff --git a/src/nix-store/dotgraph.hh b/src/nix-store/dotgraph.hh
new file mode 100644
index 000000000000..68410d84156d
--- /dev/null
+++ b/src/nix-store/dotgraph.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+void printDotGraph(const PathSet & roots);
+
+}
diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk
new file mode 100644
index 000000000000..b887fe03389b
--- /dev/null
+++ b/src/nix-store/local.mk
@@ -0,0 +1,11 @@
+programs += nix-store
+
+nix-store_DIR := $(d)
+
+nix-store_SOURCES := $(wildcard $(d)/*.cc)
+
+nix-store_LIBS = libmain libstore libutil libformat
+
+nix-store_LDFLAGS = -lbz2 -pthread
+
+nix-store_CXXFLAGS = -DCURL=\"$(curl)\"
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
new file mode 100644
index 000000000000..24ecf84142b0
--- /dev/null
+++ b/src/nix-store/nix-store.cc
@@ -0,0 +1,1112 @@
+#include "globals.hh"
+#include "misc.hh"
+#include "archive.hh"
+#include "shared.hh"
+#include "dotgraph.hh"
+#include "xmlgraph.hh"
+#include "local-store.hh"
+#include "util.hh"
+#include "serve-protocol.hh"
+#include "worker-protocol.hh"
+#include "monitor-fd.hh"
+
+#include <iostream>
+#include <algorithm>
+#include <cstdio>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <bzlib.h>
+
+
+using namespace nix;
+using std::cin;
+using std::cout;
+
+
+typedef void (* Operation) (Strings opFlags, Strings opArgs);
+
+
+static Path gcRoot;
+static int rootNr = 0;
+static bool indirectRoot = false;
+static bool noOutput = false;
+
+
+LocalStore & ensureLocalStore()
+{
+    LocalStore * store2(dynamic_cast<LocalStore *>(store.get()));
+    if (!store2) throw Error("you don't have sufficient rights to use this command");
+    return *store2;
+}
+
+
+static Path useDeriver(Path path)
+{
+    if (!isDerivation(path)) {
+        path = store->queryDeriver(path);
+        if (path == "")
+            throw Error(format("deriver of path ‘%1%’ is not known") % path);
+    }
+    return path;
+}
+
+
+/* Realise the given path.  For a derivation that means build it; for
+   other paths it means ensure their validity. */
+static PathSet realisePath(Path path, bool build = true)
+{
+    DrvPathWithOutputs p = parseDrvPathWithOutputs(path);
+
+    if (isDerivation(p.first)) {
+        if (build) store->buildPaths(singleton<PathSet>(path));
+        Derivation drv = derivationFromPath(*store, p.first);
+        rootNr++;
+
+        if (p.second.empty())
+            foreach (DerivationOutputs::iterator, i, drv.outputs) p.second.insert(i->first);
+
+        PathSet outputs;
+        foreach (StringSet::iterator, j, p.second) {
+            DerivationOutputs::iterator i = drv.outputs.find(*j);
+            if (i == drv.outputs.end())
+                throw Error(format("derivation ‘%1%’ does not have an output named ‘%2%’") % p.first % *j);
+            Path outPath = i->second.path;
+            if (gcRoot == "")
+                printGCWarning();
+            else {
+                Path rootName = gcRoot;
+                if (rootNr > 1) rootName += "-" + int2String(rootNr);
+                if (i->first != "out") rootName += "-" + i->first;
+                outPath = addPermRoot(*store, outPath, rootName, indirectRoot);
+            }
+            outputs.insert(outPath);
+        }
+        return outputs;
+    }
+
+    else {
+        if (build) store->ensurePath(path);
+        else if (!store->isValidPath(path)) throw Error(format("path ‘%1%’ does not exist and cannot be created") % path);
+        if (gcRoot == "")
+            printGCWarning();
+        else {
+            Path rootName = gcRoot;
+            rootNr++;
+            if (rootNr > 1) rootName += "-" + int2String(rootNr);
+            path = addPermRoot(*store, path, rootName, indirectRoot);
+        }
+        return singleton<PathSet>(path);
+    }
+}
+
+
+/* Realise the given paths. */
+static void opRealise(Strings opFlags, Strings opArgs)
+{
+    bool dryRun = false;
+    BuildMode buildMode = bmNormal;
+    bool ignoreUnknown = false;
+
+    foreach (Strings::iterator, i, opFlags)
+        if (*i == "--dry-run") dryRun = true;
+        else if (*i == "--repair") buildMode = bmRepair;
+        else if (*i == "--check") buildMode = bmCheck;
+        else if (*i == "--ignore-unknown") ignoreUnknown = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    Paths paths;
+    foreach (Strings::iterator, i, opArgs) {
+        DrvPathWithOutputs p = parseDrvPathWithOutputs(*i);
+        paths.push_back(makeDrvPathWithOutputs(followLinksToStorePath(p.first), p.second));
+    }
+
+    unsigned long long downloadSize, narSize;
+    PathSet willBuild, willSubstitute, unknown;
+    queryMissing(*store, PathSet(paths.begin(), paths.end()),
+        willBuild, willSubstitute, unknown, downloadSize, narSize);
+
+    if (ignoreUnknown) {
+        Paths paths2;
+        foreach (Paths::iterator, i, paths)
+            if (unknown.find(*i) == unknown.end()) paths2.push_back(*i);
+        paths = paths2;
+        unknown = PathSet();
+    }
+
+    printMissing(willBuild, willSubstitute, unknown, downloadSize, narSize);
+
+    if (dryRun) return;
+
+    /* Build all paths at the same time to exploit parallelism. */
+    store->buildPaths(PathSet(paths.begin(), paths.end()), buildMode);
+
+    if (!ignoreUnknown)
+        foreach (Paths::iterator, i, paths) {
+            PathSet paths = realisePath(*i, false);
+            if (!noOutput)
+                foreach (PathSet::iterator, j, paths)
+                    cout << format("%1%\n") % *j;
+        }
+}
+
+
+/* Add files to the Nix store and print the resulting paths. */
+static void opAdd(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+
+    for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i)
+        cout << format("%1%\n") % store->addToStore(*i);
+}
+
+
+/* Preload the output of a fixed-output derivation into the Nix
+   store. */
+static void opAddFixed(Strings opFlags, Strings opArgs)
+{
+    bool recursive = false;
+
+    for (Strings::iterator i = opFlags.begin();
+         i != opFlags.end(); ++i)
+        if (*i == "--recursive") recursive = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    if (opArgs.empty())
+        throw UsageError("first argument must be hash algorithm");
+
+    HashType hashAlgo = parseHashType(opArgs.front());
+    opArgs.pop_front();
+
+    for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i)
+        cout << format("%1%\n") % store->addToStore(*i, recursive, hashAlgo);
+}
+
+
+/* Hack to support caching in `nix-prefetch-url'. */
+static void opPrintFixedPath(Strings opFlags, Strings opArgs)
+{
+    bool recursive = false;
+
+    for (Strings::iterator i = opFlags.begin();
+         i != opFlags.end(); ++i)
+        if (*i == "--recursive") recursive = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    if (opArgs.size() != 3)
+        throw UsageError(format("‘--print-fixed-path’ requires three arguments"));
+
+    Strings::iterator i = opArgs.begin();
+    HashType hashAlgo = parseHashType(*i++);
+    string hash = *i++;
+    string name = *i++;
+
+    cout << format("%1%\n") %
+        makeFixedOutputPath(recursive, hashAlgo,
+            parseHash16or32(hashAlgo, hash), name);
+}
+
+
+static PathSet maybeUseOutputs(const Path & storePath, bool useOutput, bool forceRealise)
+{
+    if (forceRealise) realisePath(storePath);
+    if (useOutput && isDerivation(storePath)) {
+        Derivation drv = derivationFromPath(*store, storePath);
+        PathSet outputs;
+        foreach (DerivationOutputs::iterator, i, drv.outputs)
+            outputs.insert(i->second.path);
+        return outputs;
+    }
+    else return singleton<PathSet>(storePath);
+}
+
+
+/* Some code to print a tree representation of a derivation dependency
+   graph.  Topological sorting is used to keep the tree relatively
+   flat. */
+
+const string treeConn = "+---";
+const string treeLine = "|   ";
+const string treeNull = "    ";
+
+
+static void printTree(const Path & path,
+    const string & firstPad, const string & tailPad, PathSet & done)
+{
+    if (done.find(path) != done.end()) {
+        cout << format("%1%%2% [...]\n") % firstPad % path;
+        return;
+    }
+    done.insert(path);
+
+    cout << format("%1%%2%\n") % firstPad % path;
+
+    PathSet references;
+    store->queryReferences(path, references);
+
+    /* Topologically sort under the relation A < B iff A \in
+       closure(B).  That is, if derivation A is an (possibly indirect)
+       input of B, then A is printed first.  This has the effect of
+       flattening the tree, preventing deeply nested structures.  */
+    Paths sorted = topoSortPaths(*store, references);
+    reverse(sorted.begin(), sorted.end());
+
+    foreach (Paths::iterator, i, sorted) {
+        Paths::iterator j = i; ++j;
+        printTree(*i, tailPad + treeConn,
+            j == sorted.end() ? tailPad + treeNull : tailPad + treeLine,
+            done);
+    }
+}
+
+
+/* Perform various sorts of queries. */
+static void opQuery(Strings opFlags, Strings opArgs)
+{
+    enum { qOutputs, qRequisites, qReferences, qReferrers
+         , qReferrersClosure, qDeriver, qBinding, qHash, qSize
+         , qTree, qGraph, qXml, qResolve, qRoots } query = qOutputs;
+    bool useOutput = false;
+    bool includeOutputs = false;
+    bool forceRealise = false;
+    string bindingName;
+
+    foreach (Strings::iterator, i, opFlags)
+        if (*i == "--outputs") query = qOutputs;
+        else if (*i == "--requisites" || *i == "-R") query = qRequisites;
+        else if (*i == "--references") query = qReferences;
+        else if (*i == "--referrers" || *i == "--referers") query = qReferrers;
+        else if (*i == "--referrers-closure" || *i == "--referers-closure") query = qReferrersClosure;
+        else if (*i == "--deriver" || *i == "-d") query = qDeriver;
+        else if (*i == "--binding" || *i == "-b") {
+            if (opArgs.size() == 0)
+                throw UsageError("expected binding name");
+            bindingName = opArgs.front();
+            opArgs.pop_front();
+            query = qBinding;
+        }
+        else if (*i == "--hash") query = qHash;
+        else if (*i == "--size") query = qSize;
+        else if (*i == "--tree") query = qTree;
+        else if (*i == "--graph") query = qGraph;
+        else if (*i == "--xml") query = qXml;
+        else if (*i == "--resolve") query = qResolve;
+        else if (*i == "--roots") query = qRoots;
+        else if (*i == "--use-output" || *i == "-u") useOutput = true;
+        else if (*i == "--force-realise" || *i == "--force-realize" || *i == "-f") forceRealise = true;
+        else if (*i == "--include-outputs") includeOutputs = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    switch (query) {
+
+        case qOutputs: {
+            foreach (Strings::iterator, i, opArgs) {
+                *i = followLinksToStorePath(*i);
+                if (forceRealise) realisePath(*i);
+                Derivation drv = derivationFromPath(*store, *i);
+                foreach (DerivationOutputs::iterator, j, drv.outputs)
+                    cout << format("%1%\n") % j->second.path;
+            }
+            break;
+        }
+
+        case qRequisites:
+        case qReferences:
+        case qReferrers:
+        case qReferrersClosure: {
+            PathSet paths;
+            foreach (Strings::iterator, i, opArgs) {
+                PathSet ps = maybeUseOutputs(followLinksToStorePath(*i), useOutput, forceRealise);
+                foreach (PathSet::iterator, j, ps) {
+                    if (query == qRequisites) computeFSClosure(*store, *j, paths, false, includeOutputs);
+                    else if (query == qReferences) store->queryReferences(*j, paths);
+                    else if (query == qReferrers) store->queryReferrers(*j, paths);
+                    else if (query == qReferrersClosure) computeFSClosure(*store, *j, paths, true);
+                }
+            }
+            Paths sorted = topoSortPaths(*store, paths);
+            for (Paths::reverse_iterator i = sorted.rbegin();
+                 i != sorted.rend(); ++i)
+                cout << format("%s\n") % *i;
+            break;
+        }
+
+        case qDeriver:
+            foreach (Strings::iterator, i, opArgs) {
+                Path deriver = store->queryDeriver(followLinksToStorePath(*i));
+                cout << format("%1%\n") %
+                    (deriver == "" ? "unknown-deriver" : deriver);
+            }
+            break;
+
+        case qBinding:
+            foreach (Strings::iterator, i, opArgs) {
+                Path path = useDeriver(followLinksToStorePath(*i));
+                Derivation drv = derivationFromPath(*store, path);
+                StringPairs::iterator j = drv.env.find(bindingName);
+                if (j == drv.env.end())
+                    throw Error(format("derivation ‘%1%’ has no environment binding named ‘%2%’")
+                        % path % bindingName);
+                cout << format("%1%\n") % j->second;
+            }
+            break;
+
+        case qHash:
+        case qSize:
+            foreach (Strings::iterator, i, opArgs) {
+                PathSet paths = maybeUseOutputs(followLinksToStorePath(*i), useOutput, forceRealise);
+                foreach (PathSet::iterator, j, paths) {
+                    ValidPathInfo info = store->queryPathInfo(*j);
+                    if (query == qHash) {
+                        assert(info.hash.type == htSHA256);
+                        cout << format("sha256:%1%\n") % printHash32(info.hash);
+                    } else if (query == qSize)
+                        cout << format("%1%\n") % info.narSize;
+                }
+            }
+            break;
+
+        case qTree: {
+            PathSet done;
+            foreach (Strings::iterator, i, opArgs)
+                printTree(followLinksToStorePath(*i), "", "", done);
+            break;
+        }
+
+        case qGraph: {
+            PathSet roots;
+            foreach (Strings::iterator, i, opArgs) {
+                PathSet paths = maybeUseOutputs(followLinksToStorePath(*i), useOutput, forceRealise);
+                roots.insert(paths.begin(), paths.end());
+            }
+            printDotGraph(roots);
+            break;
+        }
+
+        case qXml: {
+            PathSet roots;
+            foreach (Strings::iterator, i, opArgs) {
+                PathSet paths = maybeUseOutputs(followLinksToStorePath(*i), useOutput, forceRealise);
+                roots.insert(paths.begin(), paths.end());
+            }
+            printXmlGraph(roots);
+            break;
+        }
+
+        case qResolve: {
+            foreach (Strings::iterator, i, opArgs)
+                cout << format("%1%\n") % followLinksToStorePath(*i);
+            break;
+        }
+
+        case qRoots: {
+            PathSet referrers;
+            foreach (Strings::iterator, i, opArgs) {
+                PathSet paths = maybeUseOutputs(followLinksToStorePath(*i), useOutput, forceRealise);
+                foreach (PathSet::iterator, j, paths)
+                    computeFSClosure(*store, *j, referrers, true,
+                        settings.gcKeepOutputs, settings.gcKeepDerivations);
+            }
+            Roots roots = store->findRoots();
+            foreach (Roots::iterator, i, roots)
+                if (referrers.find(i->second) != referrers.end())
+                    cout << format("%1%\n") % i->first;
+            break;
+        }
+
+        default:
+            abort();
+    }
+}
+
+
+static string shellEscape(const string & s)
+{
+    string r;
+    foreach (string::const_iterator, i, s)
+        if (*i == '\'') r += "'\\''"; else r += *i;
+    return r;
+}
+
+
+static void opPrintEnv(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+    if (opArgs.size() != 1) throw UsageError("‘--print-env’ requires one derivation store path");
+
+    Path drvPath = opArgs.front();
+    Derivation drv = derivationFromPath(*store, drvPath);
+
+    /* Print each environment variable in the derivation in a format
+       that can be sourced by the shell. */
+    foreach (StringPairs::iterator, i, drv.env)
+        cout << format("export %1%; %1%='%2%'\n") % i->first % shellEscape(i->second);
+
+    /* Also output the arguments.  This doesn't preserve whitespace in
+       arguments. */
+    cout << "export _args; _args='";
+    foreach (Strings::iterator, i, drv.args) {
+        if (i != drv.args.begin()) cout << ' ';
+        cout << shellEscape(*i);
+    }
+    cout << "'\n";
+}
+
+
+static void opReadLog(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+
+    RunPager pager;
+
+    foreach (Strings::iterator, i, opArgs) {
+        Path path = useDeriver(followLinksToStorePath(*i));
+
+        string baseName = baseNameOf(path);
+        bool found = false;
+
+        for (int j = 0; j < 2; j++) {
+
+            Path logPath =
+                j == 0
+                ? (format("%1%/%2%/%3%/%4%") % settings.nixLogDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str()
+                : (format("%1%/%2%/%3%") % settings.nixLogDir % drvsLogDir % baseName).str();
+            Path logBz2Path = logPath + ".bz2";
+
+            if (pathExists(logPath)) {
+                /* !!! Make this run in O(1) memory. */
+                string log = readFile(logPath);
+                writeFull(STDOUT_FILENO, (const unsigned char *) log.data(), log.size());
+                found = true;
+                break;
+            }
+
+            else if (pathExists(logBz2Path)) {
+                AutoCloseFD fd = open(logBz2Path.c_str(), O_RDONLY);
+                FILE * f = 0;
+                if (fd == -1 || (f = fdopen(fd.borrow(), "r")) == 0)
+                    throw SysError(format("opening file ‘%1%’") % logBz2Path);
+                int err;
+                BZFILE * bz = BZ2_bzReadOpen(&err, f, 0, 0, 0, 0);
+                if (!bz) throw Error(format("cannot open bzip2 file ‘%1%’") % logBz2Path);
+                unsigned char buf[128 * 1024];
+                do {
+                    int n = BZ2_bzRead(&err, bz, buf, sizeof(buf));
+                    if (err != BZ_OK && err != BZ_STREAM_END)
+                        throw Error(format("error reading bzip2 file ‘%1%’") % logBz2Path);
+                    writeFull(STDOUT_FILENO, buf, n);
+                } while (err != BZ_STREAM_END);
+                BZ2_bzReadClose(&err, bz);
+                found = true;
+                break;
+            }
+        }
+
+        if (!found) {
+            for (auto & i : settings.logServers) {
+                string prefix = i;
+                if (!prefix.empty() && prefix.back() != '/') prefix += '/';
+                string url = prefix + baseName;
+                try {
+                    string log = runProgram(CURL, true, {"--fail", "--location", "--silent", "--", url});
+                    std::cout << "(using build log from " << url << ")" << std::endl;
+                    std::cout << log;
+                    found = true;
+                    break;
+                } catch (ExecError & e) {
+                    /* Ignore errors from curl. FIXME: actually, might be
+                       nice to print a warning on HTTP status != 404. */
+                }
+            }
+        }
+
+        if (!found) throw Error(format("build log of derivation ‘%1%’ is not available") % path);
+    }
+}
+
+
+static void opDumpDB(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+    if (!opArgs.empty())
+        throw UsageError("no arguments expected");
+    PathSet validPaths = store->queryAllValidPaths();
+    foreach (PathSet::iterator, i, validPaths)
+        cout << store->makeValidityRegistration(singleton<PathSet>(*i), true, true);
+}
+
+
+static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
+{
+    ValidPathInfos infos;
+
+    while (1) {
+        ValidPathInfo info = decodeValidPathInfo(cin, hashGiven);
+        if (info.path == "") break;
+        if (!store->isValidPath(info.path) || reregister) {
+            /* !!! races */
+            if (canonicalise)
+                canonicalisePathMetaData(info.path, -1);
+            if (!hashGiven) {
+                HashResult hash = hashPath(htSHA256, info.path);
+                info.hash = hash.first;
+                info.narSize = hash.second;
+            }
+            infos.push_back(info);
+        }
+    }
+
+    ensureLocalStore().registerValidPaths(infos);
+}
+
+
+static void opLoadDB(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+    if (!opArgs.empty())
+        throw UsageError("no arguments expected");
+    registerValidity(true, true, false);
+}
+
+
+static void opRegisterValidity(Strings opFlags, Strings opArgs)
+{
+    bool reregister = false; // !!! maybe this should be the default
+    bool hashGiven = false;
+
+    for (Strings::iterator i = opFlags.begin();
+         i != opFlags.end(); ++i)
+        if (*i == "--reregister") reregister = true;
+        else if (*i == "--hash-given") hashGiven = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+    registerValidity(reregister, hashGiven, true);
+}
+
+
+static void opCheckValidity(Strings opFlags, Strings opArgs)
+{
+    bool printInvalid = false;
+
+    for (Strings::iterator i = opFlags.begin();
+         i != opFlags.end(); ++i)
+        if (*i == "--print-invalid") printInvalid = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    for (Strings::iterator i = opArgs.begin();
+         i != opArgs.end(); ++i)
+    {
+        Path path = followLinksToStorePath(*i);
+        if (!store->isValidPath(path)) {
+            if (printInvalid)
+                cout << format("%1%\n") % path;
+            else
+                throw Error(format("path ‘%1%’ is not valid") % path);
+        }
+    }
+}
+
+
+static string showBytes(unsigned long long bytes)
+{
+    return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
+}
+
+
+struct PrintFreed
+{
+    bool show;
+    const GCResults & results;
+    PrintFreed(bool show, const GCResults & results)
+        : show(show), results(results) { }
+    ~PrintFreed()
+    {
+        if (show)
+            cout << format("%1% store paths deleted, %2% freed\n")
+                % results.paths.size()
+                % showBytes(results.bytesFreed);
+    }
+};
+
+
+static void opGC(Strings opFlags, Strings opArgs)
+{
+    bool printRoots = false;
+    GCOptions options;
+    options.action = GCOptions::gcDeleteDead;
+
+    GCResults results;
+
+    /* Do what? */
+    foreach (Strings::iterator, i, opFlags)
+        if (*i == "--print-roots") printRoots = true;
+        else if (*i == "--print-live") options.action = GCOptions::gcReturnLive;
+        else if (*i == "--print-dead") options.action = GCOptions::gcReturnDead;
+        else if (*i == "--delete") options.action = GCOptions::gcDeleteDead;
+        else if (*i == "--max-freed") {
+            long long maxFreed = getIntArg<long long>(*i, i, opFlags.end(), true);
+            options.maxFreed = maxFreed >= 0 ? maxFreed : 0;
+        }
+        else throw UsageError(format("bad sub-operation ‘%1%’ in GC") % *i);
+
+    if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+    if (printRoots) {
+        Roots roots = store->findRoots();
+        foreach (Roots::iterator, i, roots)
+            cout << i->first << " -> " << i->second << std::endl;
+    }
+
+    else {
+        PrintFreed freed(options.action == GCOptions::gcDeleteDead, results);
+        store->collectGarbage(options, results);
+
+        if (options.action != GCOptions::gcDeleteDead)
+            foreach (PathSet::iterator, i, results.paths)
+                cout << *i << std::endl;
+    }
+}
+
+
+/* Remove paths from the Nix store if possible (i.e., if they do not
+   have any remaining referrers and are not reachable from any GC
+   roots). */
+static void opDelete(Strings opFlags, Strings opArgs)
+{
+    GCOptions options;
+    options.action = GCOptions::gcDeleteSpecific;
+
+    foreach (Strings::iterator, i, opFlags)
+        if (*i == "--ignore-liveness") options.ignoreLiveness = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    foreach (Strings::iterator, i, opArgs)
+        options.pathsToDelete.insert(followLinksToStorePath(*i));
+
+    GCResults results;
+    PrintFreed freed(true, results);
+    store->collectGarbage(options, results);
+}
+
+
+/* Dump a path as a Nix archive.  The archive is written to standard
+   output. */
+static void opDump(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+    if (opArgs.size() != 1) throw UsageError("only one argument allowed");
+
+    FdSink sink(STDOUT_FILENO);
+    string path = *opArgs.begin();
+    dumpPath(path, sink);
+}
+
+
+/* Restore a value from a Nix archive.  The archive is read from
+   standard input. */
+static void opRestore(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+    if (opArgs.size() != 1) throw UsageError("only one argument allowed");
+
+    FdSource source(STDIN_FILENO);
+    restorePath(*opArgs.begin(), source);
+}
+
+
+static void opExport(Strings opFlags, Strings opArgs)
+{
+    bool sign = false;
+    for (Strings::iterator i = opFlags.begin();
+         i != opFlags.end(); ++i)
+        if (*i == "--sign") sign = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    FdSink sink(STDOUT_FILENO);
+    Paths sorted = topoSortPaths(*store, PathSet(opArgs.begin(), opArgs.end()));
+    reverse(sorted.begin(), sorted.end());
+    exportPaths(*store, sorted, sign, sink);
+}
+
+
+static void opImport(Strings opFlags, Strings opArgs)
+{
+    bool requireSignature = false;
+    foreach (Strings::iterator, i, opFlags)
+        if (*i == "--require-signature") requireSignature = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+    FdSource source(STDIN_FILENO);
+    Paths paths = store->importPaths(requireSignature, source);
+
+    foreach (Paths::iterator, i, paths)
+        cout << format("%1%\n") % *i << std::flush;
+}
+
+
+/* Initialise the Nix databases. */
+static void opInit(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty()) throw UsageError("unknown flag");
+    if (!opArgs.empty())
+        throw UsageError("no arguments expected");
+    /* Doesn't do anything right now; database tables are initialised
+       automatically. */
+}
+
+
+/* Verify the consistency of the Nix environment. */
+static void opVerify(Strings opFlags, Strings opArgs)
+{
+    if (!opArgs.empty())
+        throw UsageError("no arguments expected");
+
+    bool checkContents = false;
+    bool repair = false;
+
+    for (Strings::iterator i = opFlags.begin();
+         i != opFlags.end(); ++i)
+        if (*i == "--check-contents") checkContents = true;
+        else if (*i == "--repair") repair = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    if (ensureLocalStore().verifyStore(checkContents, repair)) {
+        printMsg(lvlError, "warning: not all errors were fixed");
+        throw Exit(1);
+    }
+}
+
+
+/* Verify whether the contents of the given store path have not changed. */
+static void opVerifyPath(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty())
+        throw UsageError("no flags expected");
+
+    int status = 0;
+
+    foreach (Strings::iterator, i, opArgs) {
+        Path path = followLinksToStorePath(*i);
+        printMsg(lvlTalkative, format("checking path ‘%1%’...") % path);
+        ValidPathInfo info = store->queryPathInfo(path);
+        HashResult current = hashPath(info.hash.type, path);
+        if (current.first != info.hash) {
+            printMsg(lvlError,
+                format("path ‘%1%’ was modified! expected hash ‘%2%’, got ‘%3%’")
+                % path % printHash(info.hash) % printHash(current.first));
+            status = 1;
+        }
+    }
+
+    throw Exit(status);
+}
+
+
+/* Repair the contents of the given path by redownloading it using a
+   substituter (if available). */
+static void opRepairPath(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty())
+        throw UsageError("no flags expected");
+
+    foreach (Strings::iterator, i, opArgs) {
+        Path path = followLinksToStorePath(*i);
+        ensureLocalStore().repairPath(path);
+    }
+}
+
+
+static void showOptimiseStats(OptimiseStats & stats)
+{
+    printMsg(lvlError,
+        format("%1% freed by hard-linking %2% files")
+        % showBytes(stats.bytesFreed)
+        % stats.filesLinked);
+}
+
+
+/* Optimise the disk space usage of the Nix store by hard-linking
+   files with the same contents. */
+static void opOptimise(Strings opFlags, Strings opArgs)
+{
+    if (!opArgs.empty() || !opFlags.empty())
+        throw UsageError("no arguments expected");
+
+    OptimiseStats stats;
+    try {
+        ensureLocalStore().optimiseStore(stats);
+    } catch (...) {
+        showOptimiseStats(stats);
+        throw;
+    }
+    showOptimiseStats(stats);
+}
+
+
+static void opQueryFailedPaths(Strings opFlags, Strings opArgs)
+{
+    if (!opArgs.empty() || !opFlags.empty())
+        throw UsageError("no arguments expected");
+    PathSet failed = store->queryFailedPaths();
+    foreach (PathSet::iterator, i, failed)
+        cout << format("%1%\n") % *i;
+}
+
+
+static void opClearFailedPaths(Strings opFlags, Strings opArgs)
+{
+    if (!opFlags.empty())
+        throw UsageError("no flags expected");
+    store->clearFailedPaths(PathSet(opArgs.begin(), opArgs.end()));
+}
+
+
+/* Serve the nix store in a way usable by a restricted ssh user. */
+static void opServe(Strings opFlags, Strings opArgs)
+{
+    bool writeAllowed = false;
+    foreach (Strings::iterator, i, opFlags)
+        if (*i == "--write") writeAllowed = true;
+        else throw UsageError(format("unknown flag ‘%1%’") % *i);
+
+    if (!opArgs.empty()) throw UsageError("no arguments expected");
+
+    FdSource in(STDIN_FILENO);
+    FdSink out(STDOUT_FILENO);
+
+    /* Exchange the greeting. */
+    unsigned int magic = readInt(in);
+    if (magic != SERVE_MAGIC_1) throw Error("protocol mismatch");
+    writeInt(SERVE_MAGIC_2, out);
+    writeInt(SERVE_PROTOCOL_VERSION, out);
+    out.flush();
+    readInt(in); // Client version, unused for now
+
+    while (true) {
+        ServeCommand cmd;
+        try {
+            cmd = (ServeCommand) readInt(in);
+        } catch (EndOfFile & e) {
+            break;
+        }
+
+        switch (cmd) {
+
+            case cmdQueryValidPaths: {
+                bool lock = readInt(in);
+                bool substitute = readInt(in);
+                PathSet paths = readStorePaths<PathSet>(in);
+                if (lock && writeAllowed)
+                    for (auto & path : paths)
+                        store->addTempRoot(path);
+
+                /* If requested, substitute missing paths. This
+                   implements nix-copy-closure's --use-substitutes
+                   flag. */
+                if (substitute && writeAllowed) {
+                    /* Filter out .drv files (we don't want to build anything). */
+                    PathSet paths2;
+                    for (auto & path : paths)
+                        if (!isDerivation(path)) paths2.insert(path);
+                    unsigned long long downloadSize, narSize;
+                    PathSet willBuild, willSubstitute, unknown;
+                    queryMissing(*store, PathSet(paths2.begin(), paths2.end()),
+                        willBuild, willSubstitute, unknown, downloadSize, narSize);
+                    /* FIXME: should use ensurePath(), but it only
+                       does one path at a time. */
+                    if (!willSubstitute.empty())
+                        try {
+                            store->buildPaths(willSubstitute);
+                        } catch (Error & e) {
+                            printMsg(lvlError, format("warning: %1%") % e.msg());
+                        }
+                }
+
+                writeStrings(store->queryValidPaths(paths), out);
+                break;
+            }
+
+            case cmdQueryPathInfos: {
+                PathSet paths = readStorePaths<PathSet>(in);
+                // !!! Maybe we want a queryPathInfos?
+                foreach (PathSet::iterator, i, paths) {
+                    if (!store->isValidPath(*i))
+                        continue;
+                    ValidPathInfo info = store->queryPathInfo(*i);
+                    writeString(info.path, out);
+                    writeString(info.deriver, out);
+                    writeStrings(info.references, out);
+                    // !!! Maybe we want compression?
+                    writeLongLong(info.narSize, out); // downloadSize
+                    writeLongLong(info.narSize, out);
+                }
+                writeString("", out);
+                break;
+            }
+
+            case cmdDumpStorePath:
+                dumpPath(readStorePath(in), out);
+                break;
+
+            case cmdImportPaths: {
+                if (!writeAllowed) throw Error("importing paths is not allowed");
+                store->importPaths(false, in);
+                writeInt(1, out); // indicate success
+                break;
+            }
+
+            case cmdExportPaths: {
+                bool sign = readInt(in);
+                Paths sorted = topoSortPaths(*store, readStorePaths<PathSet>(in));
+                reverse(sorted.begin(), sorted.end());
+                exportPaths(*store, sorted, sign, out);
+                break;
+            }
+
+            case cmdBuildPaths: {
+
+                /* Used by build-remote.pl. */
+                if (!writeAllowed) throw Error("building paths is not allowed");
+                PathSet paths = readStorePaths<PathSet>(in);
+
+                // FIXME: changing options here doesn't work if we're
+                // building through the daemon.
+                verbosity = lvlError;
+                settings.keepLog = false;
+                settings.useSubstitutes = false;
+                settings.maxSilentTime = readInt(in);
+                settings.buildTimeout = readInt(in);
+
+                try {
+                    MonitorFdHup monitor(in.fd);
+                    store->buildPaths(paths);
+                    writeInt(0, out);
+                } catch (Error & e) {
+                    assert(e.status);
+                    writeInt(e.status, out);
+                    writeString(e.msg(), out);
+                }
+                break;
+            }
+
+            case cmdQueryClosure: {
+                bool includeOutputs = readInt(in);
+                PathSet paths = readStorePaths<PathSet>(in);
+                PathSet closure;
+                for (auto & i : paths)
+                    computeFSClosure(*store, i, closure, false, includeOutputs);
+                writeStrings(closure, out);
+                break;
+            }
+
+            default:
+                throw Error(format("unknown serve command %1%") % cmd);
+        }
+
+        out.flush();
+    }
+}
+
+
+/* Scan the arguments; find the operation, set global flags, put all
+   other flags in a list, and put all other arguments in another
+   list. */
+int main(int argc, char * * argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        Strings opFlags, opArgs;
+        Operation op = 0;
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            Operation oldOp = op;
+
+            if (*arg == "--help")
+                showManPage("nix-store");
+            else if (*arg == "--version")
+                printVersion("nix-store");
+            else if (*arg == "--realise" || *arg == "--realize" || *arg == "-r")
+                op = opRealise;
+            else if (*arg == "--add" || *arg == "-A")
+                op = opAdd;
+            else if (*arg == "--add-fixed")
+                op = opAddFixed;
+            else if (*arg == "--print-fixed-path")
+                op = opPrintFixedPath;
+            else if (*arg == "--delete")
+                op = opDelete;
+            else if (*arg == "--query" || *arg == "-q")
+                op = opQuery;
+            else if (*arg == "--print-env")
+                op = opPrintEnv;
+            else if (*arg == "--read-log" || *arg == "-l")
+                op = opReadLog;
+            else if (*arg == "--dump-db")
+                op = opDumpDB;
+            else if (*arg == "--load-db")
+                op = opLoadDB;
+            else if (*arg == "--register-validity")
+                op = opRegisterValidity;
+            else if (*arg == "--check-validity")
+                op = opCheckValidity;
+            else if (*arg == "--gc")
+                op = opGC;
+            else if (*arg == "--dump")
+                op = opDump;
+            else if (*arg == "--restore")
+                op = opRestore;
+            else if (*arg == "--export")
+                op = opExport;
+            else if (*arg == "--import")
+                op = opImport;
+            else if (*arg == "--init")
+                op = opInit;
+            else if (*arg == "--verify")
+                op = opVerify;
+            else if (*arg == "--verify-path")
+                op = opVerifyPath;
+            else if (*arg == "--repair-path")
+                op = opRepairPath;
+            else if (*arg == "--optimise" || *arg == "--optimize")
+                op = opOptimise;
+            else if (*arg == "--query-failed-paths")
+                op = opQueryFailedPaths;
+            else if (*arg == "--clear-failed-paths")
+                op = opClearFailedPaths;
+            else if (*arg == "--add-root")
+                gcRoot = absPath(getArg(*arg, arg, end));
+            else if (*arg == "--indirect")
+                indirectRoot = true;
+            else if (*arg == "--no-output")
+                noOutput = true;
+            else if (*arg == "--serve")
+                op = opServe;
+            else if (*arg != "" && arg->at(0) == '-') {
+                opFlags.push_back(*arg);
+                if (*arg == "--max-freed" || *arg == "--max-links" || *arg == "--max-atime") /* !!! hack */
+                    opFlags.push_back(getArg(*arg, arg, end));
+            }
+            else
+                opArgs.push_back(*arg);
+
+            if (oldOp && oldOp != op)
+                throw UsageError("only one operation may be specified");
+
+            return true;
+        });
+
+        if (!op) throw UsageError("no operation specified");
+
+        if (op != opDump && op != opRestore) /* !!! hack */
+            store = openStore(op != opGC);
+
+        op(opFlags, opArgs);
+    });
+}
diff --git a/src/nix-store/serve-protocol.hh b/src/nix-store/serve-protocol.hh
new file mode 100644
index 000000000000..741b622beb17
--- /dev/null
+++ b/src/nix-store/serve-protocol.hh
@@ -0,0 +1,22 @@
+#pragma once
+
+namespace nix {
+
+#define SERVE_MAGIC_1 0x390c9deb
+#define SERVE_MAGIC_2 0x5452eecb
+
+#define SERVE_PROTOCOL_VERSION 0x200
+#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
+#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+
+typedef enum {
+    cmdQueryValidPaths = 1,
+    cmdQueryPathInfos = 2,
+    cmdDumpStorePath = 3,
+    cmdImportPaths = 4,
+    cmdExportPaths = 5,
+    cmdBuildPaths = 6,
+    cmdQueryClosure = 7,
+} ServeCommand;
+
+}
diff --git a/src/nix-store/xmlgraph.cc b/src/nix-store/xmlgraph.cc
new file mode 100644
index 000000000000..1b3ad3d28ad4
--- /dev/null
+++ b/src/nix-store/xmlgraph.cc
@@ -0,0 +1,71 @@
+#include "xmlgraph.hh"
+#include "util.hh"
+#include "store-api.hh"
+
+#include <iostream>
+
+
+using std::cout;
+
+namespace nix {
+
+
+static inline const string & xmlQuote(const string & s)
+{
+    // Luckily, store paths shouldn't contain any character that needs to be
+    // quoted.
+    return s;
+}
+
+
+static string makeEdge(const string & src, const string & dst)
+{
+    format f = format("  <edge src=\"%1%\" dst=\"%2%\"/>\n")
+      % xmlQuote(src) % xmlQuote(dst);
+    return f.str();
+}
+
+
+static string makeNode(const string & id)
+{
+    format f = format("  <node name=\"%1%\"/>\n") % xmlQuote(id);
+    return f.str();
+}
+
+
+void printXmlGraph(const PathSet & roots)
+{
+    PathSet workList(roots);
+    PathSet doneSet;
+
+    cout << "<?xml version='1.0' encoding='utf-8'?>\n"
+	 << "<nix>\n";
+
+    while (!workList.empty()) {
+	Path path = *(workList.begin());
+	workList.erase(path);
+
+	if (doneSet.find(path) != doneSet.end()) continue;
+	doneSet.insert(path);
+
+	cout << makeNode(path);
+
+	PathSet references;
+	store->queryReferences(path, references);
+
+	for (PathSet::iterator i = references.begin();
+	     i != references.end(); ++i)
+	{
+	    if (*i != path) {
+		workList.insert(*i);
+		cout << makeEdge(*i, path);
+	    }
+	}
+
+    }
+
+    cout << "</nix>\n";
+}
+
+
+}
diff --git a/src/nix-store/xmlgraph.hh b/src/nix-store/xmlgraph.hh
new file mode 100644
index 000000000000..c2216c5a4627
--- /dev/null
+++ b/src/nix-store/xmlgraph.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+void printXmlGraph(const PathSet & roots);
+
+}
diff --git a/tests/add.sh b/tests/add.sh
new file mode 100644
index 000000000000..e26e05843d7f
--- /dev/null
+++ b/tests/add.sh
@@ -0,0 +1,28 @@
+source common.sh
+
+path1=$(nix-store --add ./dummy)
+echo $path1
+
+path2=$(nix-store --add-fixed sha256 --recursive ./dummy)
+echo $path2
+
+if test "$path1" != "$path2"; then
+    echo "nix-store --add and --add-fixed mismatch"
+    exit 1
+fi    
+
+path3=$(nix-store --add-fixed sha256 ./dummy)
+echo $path3
+test "$path1" != "$path3" || exit 1
+
+path4=$(nix-store --add-fixed sha1 --recursive ./dummy)
+echo $path4
+test "$path1" != "$path4" || exit 1
+
+hash1=$(nix-store -q --hash $path1)
+echo $hash1
+
+hash2=$(nix-hash --type sha256 --base32 ./dummy)
+echo $hash2
+
+test "$hash1" = "sha256:$hash2"
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
new file mode 100644
index 000000000000..6f0c36f6300f
--- /dev/null
+++ b/tests/binary-cache.sh
@@ -0,0 +1,89 @@
+source common.sh
+
+clearStore
+clearManifests
+clearCache
+
+# Create the binary cache.
+outPath=$(nix-build dependencies.nix --no-out-link)
+
+nix-push --dest $cacheDir $outPath
+
+
+# By default, a binary cache doesn't support "nix-env -qas", but does
+# support installation.
+clearStore
+rm -f $NIX_STATE_DIR/binary-cache*
+
+export _NIX_CACHE_FILE_URLS=1
+
+nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---"
+
+nix-store --option binary-caches "file://$cacheDir" -r $outPath
+
+[ -x $outPath/program ]
+
+
+# But with the right configuration, "nix-env -qas" should also work.
+clearStore
+rm -f $NIX_STATE_DIR/binary-cache*
+echo "WantMassQuery: 1" >> $cacheDir/nix-cache-info
+
+nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S"
+
+x=$(nix-env -f dependencies.nix -qas \* --prebuilt-only)
+[ -z "$x" ]
+
+nix-store --option binary-caches "file://$cacheDir" -r $outPath
+
+nix-store --check-validity $outPath
+nix-store -qR $outPath | grep input-2
+
+
+# Test whether Nix notices if the NAR doesn't match the hash in the NAR info.
+clearStore
+
+nar=$(ls $cacheDir/*.nar.xz | head -n1)
+mv $nar $nar.good
+mkdir -p $TEST_ROOT/empty
+nix-store --dump $TEST_ROOT/empty | xz > $nar
+
+nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+grep -q "hash mismatch in downloaded path" $TEST_ROOT/log
+
+mv $nar.good $nar
+
+
+# Test whether this unsigned cache is rejected if the user requires signed caches.
+clearStore
+
+rm -f $NIX_STATE_DIR/binary-cache*
+
+if nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '*' -r $outPath; then
+    echo "unsigned binary cache incorrectly accepted"
+    exit 1
+fi
+
+
+# Test whether fallback works if we have cached info but the
+# corresponding NAR has disappeared.
+clearStore
+
+nix-build --option binary-caches "file://$cacheDir" dependencies.nix --dry-run # get info
+
+mkdir $cacheDir/tmp
+mv $cacheDir/*.nar* $cacheDir/tmp/
+
+NIX_DEBUG_SUBST=1 nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result --fallback
+
+mv $cacheDir/tmp/* $cacheDir/
+
+
+# Test whether building works if the binary cache contains an
+# incomplete closure.
+clearStore
+
+rm $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo)
+
+nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+grep -q "Downloading" $TEST_ROOT/log
diff --git a/tests/binary-patching.nix b/tests/binary-patching.nix
new file mode 100644
index 000000000000..8ed474d1f27f
--- /dev/null
+++ b/tests/binary-patching.nix
@@ -0,0 +1,18 @@
+{ version }:
+
+with import ./config.nix;
+
+mkDerivation {
+  name = "foo-${toString version}";
+  builder = builtins.toFile "builder.sh"
+    ''
+      mkdir $out
+      (for ((n = 1; n < 100000; n++)); do echo $n; done) > $out/foo
+      ${if version != 1 then ''
+        (for ((n = 100000; n < 110000; n++)); do echo $n; done) >> $out/foo
+      '' else ""}
+      ${if version == 3 then ''
+        echo foobar >> $out/foo
+      '' else ""}
+    '';
+}
diff --git a/tests/binary-patching.sh b/tests/binary-patching.sh
new file mode 100644
index 000000000000..188be109a0b5
--- /dev/null
+++ b/tests/binary-patching.sh
@@ -0,0 +1,61 @@
+source common.sh
+
+clearManifests
+
+mkdir -p $TEST_ROOT/cache2 $TEST_ROOT/patches
+
+RESULT=$TEST_ROOT/result
+
+# Build version 1 and 2 of the "foo" package.
+nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 \
+    $(nix-build -o $RESULT binary-patching.nix --arg version 1)
+mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest1 
+
+out2=$(nix-build -o $RESULT binary-patching.nix --arg version 2)
+nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out2
+mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest2
+    
+out3=$(nix-build -o $RESULT binary-patching.nix --arg version 3)
+nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out3
+mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest3
+
+rm $RESULT
+
+# Generate binary patches.
+nix-generate-patches $TEST_ROOT/cache2 $TEST_ROOT/patches \
+    file://$TEST_ROOT/patches $TEST_ROOT/manifest1 $TEST_ROOT/manifest2
+
+nix-generate-patches $TEST_ROOT/cache2 $TEST_ROOT/patches \
+    file://$TEST_ROOT/patches $TEST_ROOT/manifest2 $TEST_ROOT/manifest3
+
+grep -q "patch {" $TEST_ROOT/manifest3
+
+# Get rid of versions 2 and 3.
+nix-store --delete $out2 $out3
+
+# Pull the manifest containing the patches.
+clearManifests
+nix-pull file://$TEST_ROOT/manifest3
+
+# Make sure that the download size prediction uses the patches rather
+# than the full download.
+nix-build -o $RESULT binary-patching.nix --arg version 3 --dry-run 2>&1 | grep -q "0.01 MiB"
+
+# Now rebuild it.  This should use the two patches generated above.
+rm -f $TEST_ROOT/var/log/nix/downloads
+nix-build -o $RESULT binary-patching.nix --arg version 3
+rm $RESULT
+[ "$(grep ' patch ' $TEST_ROOT/var/log/nix/downloads | wc -l)" -eq 2 ]
+
+# Add a patch from version 1 directly to version 3.
+nix-generate-patches $TEST_ROOT/cache2 $TEST_ROOT/patches \
+    file://$TEST_ROOT/patches $TEST_ROOT/manifest1 $TEST_ROOT/manifest3
+
+# Rebuild version 3.  This should use the direct patch rather than the
+# sequence of two patches.
+nix-store --delete $out2 $out3
+clearManifests
+rm $TEST_ROOT/var/log/nix/downloads
+nix-pull file://$TEST_ROOT/manifest3
+nix-build -o $RESULT binary-patching.nix --arg version 3
+[ "$(grep ' patch ' $TEST_ROOT/var/log/nix/downloads | wc -l)" -eq 1 ]
diff --git a/tests/build-hook.hook.sh b/tests/build-hook.hook.sh
new file mode 100755
index 000000000000..ff709985d3af
--- /dev/null
+++ b/tests/build-hook.hook.sh
@@ -0,0 +1,23 @@
+#! /bin/sh
+
+#set -x
+
+while read x y drv rest; do
+
+    echo "HOOK for $drv" >&2
+
+    outPath=`sed 's/Derive(\[("out",\"\([^\"]*\)\".*/\1/' $drv`
+
+    echo "output path is $outPath" >&2
+
+    if `echo $outPath | grep -q input-1`; then
+        echo "# accept" >&2
+        read inputs
+        read outputs
+        mkdir $outPath
+        echo "BAR" > $outPath/foo
+    else
+        echo "# decline" >&2
+    fi
+
+done
\ No newline at end of file
diff --git a/tests/build-hook.nix b/tests/build-hook.nix
new file mode 100644
index 000000000000..666cc6ef8041
--- /dev/null
+++ b/tests/build-hook.nix
@@ -0,0 +1,22 @@
+with import ./config.nix;
+
+let
+
+  input1 = mkDerivation {
+    name = "build-hook-input-1";
+    builder = ./dependencies.builder1.sh;
+  };
+
+  input2 = mkDerivation {
+    name = "build-hook-input-2";
+    builder = ./dependencies.builder2.sh;
+  };
+
+in
+
+  mkDerivation {
+    name = "build-hook";
+    builder = ./dependencies.builder0.sh;
+    input1 = " " + input1 + "/.";
+    input2 = " ${input2}/.";
+  }
diff --git a/tests/build-hook.sh b/tests/build-hook.sh
new file mode 100644
index 000000000000..ef77a3ae5285
--- /dev/null
+++ b/tests/build-hook.sh
@@ -0,0 +1,10 @@
+source common.sh
+
+export NIX_BUILD_HOOK="$(pwd)/build-hook.hook.sh"
+
+outPath=$(nix-build build-hook.nix --no-out-link)
+
+echo "output path is $outPath"
+
+text=$(cat "$outPath"/foobar)
+if test "$text" != "BARBAR"; then exit 1; fi
diff --git a/tests/case-hack.sh b/tests/case-hack.sh
new file mode 100644
index 000000000000..ebc7cb1d5d65
--- /dev/null
+++ b/tests/case-hack.sh
@@ -0,0 +1,19 @@
+source common.sh
+
+clearStore
+
+rm -rf $TEST_ROOT/case
+
+opts="--option use-case-hack true"
+
+# Check whether restoring and dumping a NAR that contains case
+# collisions is round-tripping, even on a case-insensitive system.
+nix-store $opts  --restore $TEST_ROOT/case < case.nar
+nix-store $opts --dump $TEST_ROOT/case > $TEST_ROOT/case.nar
+cmp case.nar $TEST_ROOT/case.nar
+[ "$(nix-hash $opts --type sha256 $TEST_ROOT/case)" = "$(nix-hash --flat --type sha256 case.nar)" ]
+
+# Check whether we detect true collisions (e.g. those remaining after
+# removal of the suffix).
+touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3"
+! nix-store $opts --dump $TEST_ROOT/case > /dev/null
diff --git a/tests/case.nar b/tests/case.nar
new file mode 100644
index 000000000000..22ff26db5afd
--- /dev/null
+++ b/tests/case.nar
Binary files differdiff --git a/tests/check-refs.nix b/tests/check-refs.nix
new file mode 100644
index 000000000000..63791fe16094
--- /dev/null
+++ b/tests/check-refs.nix
@@ -0,0 +1,58 @@
+with import ./config.nix;
+
+rec {
+
+  dep = import ./dependencies.nix;
+
+  makeTest = nr: args: mkDerivation ({
+    name = "check-refs-" + toString nr;
+  } // args);
+
+  src = builtins.toFile "aux-ref" "bla bla";
+
+  test1 = makeTest 1 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s $dep $out/link";
+    inherit dep;
+  };
+
+  test2 = makeTest 2 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s ${src} $out/link";
+    inherit dep;
+  };
+
+  test3 = makeTest 3 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s $dep $out/link";
+    allowedReferences = [];
+    inherit dep;
+  };
+
+  test4 = makeTest 4 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s $dep $out/link";
+    allowedReferences = [dep];
+    inherit dep;
+  };
+
+  test5 = makeTest 5 {
+    builder = builtins.toFile "builder.sh" "mkdir $out";
+    allowedReferences = [];
+    inherit dep;
+  };
+
+  test6 = makeTest 6 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s $out $out/link";
+    allowedReferences = [];
+    inherit dep;
+  };
+
+  test7 = makeTest 7 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s $out $out/link";
+    allowedReferences = ["out"];
+    inherit dep;
+  };
+
+  test8 = makeTest 8 {
+    builder = builtins.toFile "builder.sh" "mkdir $out; ln -s ${test1} $out/link";
+    inherit dep;
+  };
+
+}
diff --git a/tests/check-refs.sh b/tests/check-refs.sh
new file mode 100644
index 000000000000..08fe01ec18b4
--- /dev/null
+++ b/tests/check-refs.sh
@@ -0,0 +1,36 @@
+source common.sh
+
+set -x
+
+RESULT=$TEST_ROOT/result
+
+dep=$(nix-build -o $RESULT check-refs.nix -A dep)
+
+# test1 references dep, not itself.
+test1=$(nix-build -o $RESULT check-refs.nix -A test1)
+! nix-store -q --references $test1 | grep -q $test1
+nix-store -q --references $test1 | grep -q $dep
+
+# test2 references src, not itself nor dep.
+test2=$(nix-build -o $RESULT check-refs.nix -A test2)
+! nix-store -q --references $test2 | grep -q $test2
+! nix-store -q --references $test2 | grep -q $dep
+nix-store -q --references $test2 | grep -q aux-ref
+
+# test3 should fail (unallowed ref).
+! nix-build -o $RESULT check-refs.nix -A test3
+
+# test4 should succeed.
+nix-build -o $RESULT check-refs.nix -A test4
+
+# test5 should succeed.
+nix-build -o $RESULT check-refs.nix -A test5
+
+# test6 should fail (unallowed self-ref).
+! nix-build -o $RESULT check-refs.nix -A test6
+
+# test7 should succeed (allowed self-ref).
+nix-build -o $RESULT check-refs.nix -A test7
+
+# test8 should fail (toFile depending on derivation output).
+! nix-build -o $RESULT check-refs.nix -A test8
diff --git a/tests/common.sh.in b/tests/common.sh.in
new file mode 100644
index 000000000000..8581223b3d89
--- /dev/null
+++ b/tests/common.sh.in
@@ -0,0 +1,88 @@
+set -e
+
+datadir="@datadir@"
+
+export TEST_ROOT=$(pwd)/test-tmp
+export NIX_STORE_DIR
+if ! NIX_STORE_DIR=$(readlink -f $TEST_ROOT/store 2> /dev/null); then
+    # Maybe the build directory is symlinked.
+    export NIX_IGNORE_SYMLINK_STORE=1
+    NIX_STORE_DIR=$TEST_ROOT/store
+fi
+export NIX_LOCALSTATE_DIR=$TEST_ROOT/var
+export NIX_LOG_DIR=$TEST_ROOT/var/log/nix
+export NIX_STATE_DIR=$TEST_ROOT/var/nix
+export NIX_DB_DIR=$TEST_ROOT/db
+export NIX_CONF_DIR=$TEST_ROOT/etc
+export NIX_MANIFESTS_DIR=$TEST_ROOT/var/nix/manifests
+export _NIX_TEST_SHARED=$TEST_ROOT/shared
+export NIX_REMOTE=$NIX_REMOTE_
+
+export PATH=@bindir@:$PATH
+
+export NIX_BUILD_HOOK=
+export dot=@dot@
+export xmllint="@xmllint@"
+export SHELL="@bash@"
+
+export version=@PACKAGE_VERSION@
+export system=@system@
+
+cacheDir=$TEST_ROOT/binary-cache
+
+readLink() {
+    ls -l "$1" | sed 's/.*->\ //'
+}
+
+clearProfiles() {
+    profiles="$NIX_STATE_DIR"/profiles
+    rm -rf $profiles
+}
+
+clearStore() {
+    echo "clearing store..."
+    chmod -R +w "$NIX_STORE_DIR"
+    rm -rf "$NIX_STORE_DIR"
+    mkdir "$NIX_STORE_DIR"
+    rm -rf "$NIX_DB_DIR"
+    mkdir "$NIX_DB_DIR"
+    nix-store --init
+    clearProfiles
+    rm -f "$NIX_STATE_DIR"/gcroots/auto/*
+    rm -f "$NIX_STATE_DIR"/gcroots/ref
+}
+
+clearManifests() {
+    rm -f $NIX_STATE_DIR/manifests/*
+}
+
+clearCache() {
+    rm -rf "$cacheDir"
+}
+
+startDaemon() {
+    # Start the daemon, wait for the socket to appear.  !!!
+    # ‘nix-daemon’ should have an option to fork into the background.
+    rm -f $NIX_STATE_DIR/daemon-socket/socket
+    nix-daemon &
+    for ((i = 0; i < 30; i++)); do
+        if [ -e $NIX_STATE_DIR/daemon-socket/socket ]; then break; fi
+        sleep 1
+    done
+    pidDaemon=$!
+    trap "kill -9 $pidDaemon" EXIT
+    export NIX_REMOTE=daemon
+}
+
+killDaemon() {
+    kill -9 $pidDaemon
+    wait $pidDaemon || true
+    trap "" EXIT
+}
+
+fail() {
+    echo "$1"
+    exit 1
+}
+
+set -x
diff --git a/tests/config.nix b/tests/config.nix
new file mode 100644
index 000000000000..6244a15fa48a
--- /dev/null
+++ b/tests/config.nix
@@ -0,0 +1,20 @@
+with import <nix/config.nix>;
+
+rec {
+  inherit shell;
+
+  path = coreutils;
+
+  system = builtins.currentSystem;
+
+  shared = builtins.getEnv "_NIX_TEST_SHARED";
+
+  mkDerivation = args:
+    derivation ({
+      inherit system;
+      builder = shell;
+      args = ["-e" args.builder];
+      PATH = path;
+    } // removeAttrs args ["builder" "meta"])
+    // { meta = args.meta or {}; };
+}
diff --git a/tests/dependencies.builder0.sh b/tests/dependencies.builder0.sh
new file mode 100644
index 000000000000..c37bf909a5f9
--- /dev/null
+++ b/tests/dependencies.builder0.sh
@@ -0,0 +1,16 @@
+[ "${input1: -2}" = /. ]
+[ "${input2: -2}" = /. ]
+
+mkdir $out
+echo $(cat $input1/foo)$(cat $input2/bar) > $out/foobar
+
+ln -s $input2 $out/input-2
+
+# Self-reference.
+ln -s $out $out/self
+
+# Executable.
+echo program > $out/program
+chmod +x $out/program
+
+echo FOO
diff --git a/tests/dependencies.builder1.sh b/tests/dependencies.builder1.sh
new file mode 100644
index 000000000000..4b006a17d70f
--- /dev/null
+++ b/tests/dependencies.builder1.sh
@@ -0,0 +1,2 @@
+mkdir $out
+echo FOO > $out/foo
diff --git a/tests/dependencies.builder2.sh b/tests/dependencies.builder2.sh
new file mode 100644
index 000000000000..4f886fdb3a1a
--- /dev/null
+++ b/tests/dependencies.builder2.sh
@@ -0,0 +1,2 @@
+mkdir $out
+echo BAR > $out/bar
diff --git a/tests/dependencies.nix b/tests/dependencies.nix
new file mode 100644
index 000000000000..687237add820
--- /dev/null
+++ b/tests/dependencies.nix
@@ -0,0 +1,23 @@
+with import ./config.nix;
+
+let {
+
+  input1 = mkDerivation {
+    name = "dependencies-input-1";
+    builder = ./dependencies.builder1.sh;
+  };
+
+  input2 = mkDerivation {
+    name = "dependencies-input-2";
+    builder = "${./dependencies.builder2.sh}";
+  };
+
+  body = mkDerivation {
+    name = "dependencies";
+    builder = ./dependencies.builder0.sh + "/FOOBAR/../.";
+    input1 = input1 + "/.";
+    input2 = "${input2}/.";
+    meta.description = "Random test package";
+  };
+
+}
diff --git a/tests/dependencies.sh b/tests/dependencies.sh
new file mode 100644
index 000000000000..df204d185ddc
--- /dev/null
+++ b/tests/dependencies.sh
@@ -0,0 +1,52 @@
+source common.sh
+
+clearStore
+
+drvPath=$(nix-instantiate dependencies.nix)
+
+echo "derivation is $drvPath"
+
+nix-store -q --tree "$drvPath" | grep '   +---.*builder1.sh'
+
+# Test Graphviz graph generation.
+nix-store -q --graph "$drvPath" > $TEST_ROOT/graph
+if test -n "$dot"; then
+    # Does it parse?
+    $dot < $TEST_ROOT/graph
+fi
+
+outPath=$(nix-store -rvv "$drvPath") || fail "build failed"
+
+# Test Graphviz graph generation.
+nix-store -q --graph "$outPath" > $TEST_ROOT/graph
+if test -n "$dot"; then
+    # Does it parse?
+    $dot < $TEST_ROOT/graph
+fi    
+
+nix-store -q --tree "$outPath" | grep '+---.*dependencies-input-2'
+
+echo "output path is $outPath"
+
+text=$(cat "$outPath"/foobar)
+if test "$text" != "FOOBAR"; then exit 1; fi
+
+deps=$(nix-store -quR "$drvPath")
+
+echo "output closure contains $deps"
+
+# The output path should be in the closure.
+echo "$deps" | grep -q "$outPath"
+
+# Input-1 is not retained.
+if echo "$deps" | grep -q "dependencies-input-1"; then exit 1; fi
+
+# Input-2 is retained.
+input2OutPath=$(echo "$deps" | grep "dependencies-input-2")
+
+# The referrers closure of input-2 should include outPath.
+nix-store -q --referrers-closure "$input2OutPath" | grep "$outPath"
+
+# Check that the derivers are set properly.
+test $(nix-store -q --deriver "$outPath") = "$drvPath"
+nix-store -q --deriver "$input2OutPath" | grep -q -- "-input-2.drv" 
diff --git a/tests/dump-db.sh b/tests/dump-db.sh
new file mode 100644
index 000000000000..234b7ac02680
--- /dev/null
+++ b/tests/dump-db.sh
@@ -0,0 +1,20 @@
+source common.sh
+
+clearStore
+
+path=$(nix-build dependencies.nix -o $TEST_ROOT/result)
+
+deps="$(nix-store -qR $TEST_ROOT/result)"
+
+nix-store --dump-db > $TEST_ROOT/dump
+
+rm -rf $NIX_DB_DIR
+
+nix-store --load-db < $TEST_ROOT/dump
+
+deps2="$(nix-store -qR $TEST_ROOT/result)"
+
+[ "$deps" = "$deps2" ];
+
+nix-store --dump-db > $TEST_ROOT/dump2
+cmp $TEST_ROOT/dump $TEST_ROOT/dump2
diff --git a/tests/export-graph.nix b/tests/export-graph.nix
new file mode 100644
index 000000000000..fdac9583db2c
--- /dev/null
+++ b/tests/export-graph.nix
@@ -0,0 +1,29 @@
+with import ./config.nix;
+
+rec {
+
+  printRefs =
+    ''
+      echo $exportReferencesGraph
+      while read path; do
+          read drv
+          read nrRefs
+          echo "$path has $nrRefs references"
+          echo "$path" >> $out
+          for ((n = 0; n < $nrRefs; n++)); do read ref; echo "ref $ref"; test -e "$ref"; done
+      done < refs
+    '';
+
+  foo."bar.runtimeGraph" = mkDerivation {
+    name = "dependencies";
+    builder = builtins.toFile "build-graph-builder" "${printRefs}";
+    exportReferencesGraph = ["refs" (import ./dependencies.nix)];
+  };
+
+  foo."bar.buildGraph" = mkDerivation {
+    name = "dependencies";
+    builder = builtins.toFile "build-graph-builder" "${printRefs}";
+    exportReferencesGraph = ["refs" (import ./dependencies.nix).drvPath];
+  };
+
+}
diff --git a/tests/export-graph.sh b/tests/export-graph.sh
new file mode 100644
index 000000000000..a6fd69054425
--- /dev/null
+++ b/tests/export-graph.sh
@@ -0,0 +1,30 @@
+source common.sh
+
+clearStore
+clearProfiles
+
+checkRef() {
+    nix-store -q --references $TEST_ROOT/result | grep -q "$1" || fail "missing reference $1"
+}
+
+# Test the export of the runtime dependency graph.
+
+outPath=$(nix-build ./export-graph.nix -A 'foo."bar.runtimeGraph"' -o $TEST_ROOT/result)
+
+test $(nix-store -q --references $TEST_ROOT/result | wc -l) = 2 || fail "bad nr of references"
+
+checkRef input-2
+for i in $(cat $outPath); do checkRef $i; done
+
+# Test the export of the build-time dependency graph.
+
+nix-store --gc # should force rebuild of input-1
+
+outPath=$(nix-build ./export-graph.nix -A 'foo."bar.buildGraph"' -o $TEST_ROOT/result)
+
+checkRef input-1
+checkRef input-1.drv
+checkRef input-2
+checkRef input-2.drv
+
+for i in $(cat $outPath); do checkRef $i; done
diff --git a/tests/export.sh b/tests/export.sh
new file mode 100644
index 000000000000..ec7560f19728
--- /dev/null
+++ b/tests/export.sh
@@ -0,0 +1,31 @@
+source common.sh
+
+clearStore
+
+outPath=$(nix-build dependencies.nix --no-out-link)
+
+nix-store --export $outPath > $TEST_ROOT/exp
+
+nix-store --export $(nix-store -qR $outPath) > $TEST_ROOT/exp_all
+
+
+clearStore
+
+if nix-store --import < $TEST_ROOT/exp; then
+    echo "importing a non-closure should fail"
+    exit 1
+fi
+
+
+clearStore
+
+nix-store --import < $TEST_ROOT/exp_all
+
+nix-store --export $(nix-store -qR $outPath) > $TEST_ROOT/exp_all2
+
+
+clearStore
+
+# Regression test: the derivers in exp_all2 are empty, which shouldn't
+# cause a failure.
+nix-store --import < $TEST_ROOT/exp_all2
diff --git a/tests/fallback.sh b/tests/fallback.sh
new file mode 100644
index 000000000000..f3a6b50515bf
--- /dev/null
+++ b/tests/fallback.sh
@@ -0,0 +1,20 @@
+source common.sh
+
+clearStore
+
+drvPath=$(nix-instantiate simple.nix)
+echo "derivation is $drvPath"
+
+outPath=$(nix-store -q --fallback "$drvPath")
+echo "output path is $outPath"
+
+# Build with a substitute that fails.  This should fail.
+export NIX_SUBSTITUTERS=$(pwd)/substituter2.sh
+if nix-store -r "$drvPath"; then echo unexpected fallback; exit 1; fi
+
+# Build with a substitute that fails.  This should fall back to a source build.
+export NIX_SUBSTITUTERS=$(pwd)/substituter2.sh
+nix-store -r --fallback "$drvPath"
+
+text=$(cat "$outPath"/hello)
+if test "$text" != "Hello World!"; then exit 1; fi
diff --git a/tests/fetchurl.nix b/tests/fetchurl.nix
new file mode 100644
index 000000000000..2abcc039a832
--- /dev/null
+++ b/tests/fetchurl.nix
@@ -0,0 +1,6 @@
+{ filename, sha256 }:
+
+import <nix/fetchurl.nix> {
+  url = "file://${filename}";
+  inherit sha256;
+}
diff --git a/tests/fetchurl.sh b/tests/fetchurl.sh
new file mode 100644
index 000000000000..6acc87eafca8
--- /dev/null
+++ b/tests/fetchurl.sh
@@ -0,0 +1,9 @@
+source common.sh
+
+clearStore
+
+hash=$(nix-hash --flat --type sha256 ./fetchurl.nix)
+
+outPath=$(nix-build ./fetchurl.nix --argstr filename $(pwd)/fetchurl.nix --argstr sha256 $hash --no-out-link)
+
+cmp $outPath fetchurl.nix
diff --git a/tests/filter-source.nix b/tests/filter-source.nix
new file mode 100644
index 000000000000..a620f0fda5c4
--- /dev/null
+++ b/tests/filter-source.nix
@@ -0,0 +1,12 @@
+with import ./config.nix;
+
+mkDerivation {
+  name = "filter";
+  builder = builtins.toFile "builder" "ln -s $input $out";
+  input =
+    let filter = path: type:
+      type != "symlink"
+      && baseNameOf path != "foo"
+      && !((import ./lang/lib.nix).hasSuffix ".bak" (baseNameOf path));
+    in builtins.filterSource filter ./test-tmp/filterin;
+}
diff --git a/tests/filter-source.sh b/tests/filter-source.sh
new file mode 100644
index 000000000000..f7a096ed605d
--- /dev/null
+++ b/tests/filter-source.sh
@@ -0,0 +1,20 @@
+source common.sh
+
+rm -rf $TEST_ROOT/filterin
+mkdir $TEST_ROOT/filterin
+mkdir $TEST_ROOT/filterin/foo
+touch $TEST_ROOT/filterin/foo/bar
+touch $TEST_ROOT/filterin/xyzzy
+touch $TEST_ROOT/filterin/b
+touch $TEST_ROOT/filterin/bak
+touch $TEST_ROOT/filterin/bla.c.bak
+ln -s xyzzy $TEST_ROOT/filterin/link
+
+nix-build ./filter-source.nix -o $TEST_ROOT/filterout
+
+set -x
+test ! -e $TEST_ROOT/filterout/foo/bar
+test -e $TEST_ROOT/filterout/xyzzy
+test -e $TEST_ROOT/filterout/bak
+test ! -e $TEST_ROOT/filterout/bla.c.bak
+test ! -L $TEST_ROOT/filterout/link
diff --git a/tests/fixed.builder1.sh b/tests/fixed.builder1.sh
new file mode 100644
index 000000000000..c41bb2b9a611
--- /dev/null
+++ b/tests/fixed.builder1.sh
@@ -0,0 +1,3 @@
+if test "$IMPURE_VAR1" != "foo"; then exit 1; fi
+if test "$IMPURE_VAR2" != "bar"; then exit 1; fi
+echo "Hello World!" > $out
diff --git a/tests/fixed.builder2.sh b/tests/fixed.builder2.sh
new file mode 100644
index 000000000000..31ea1579a514
--- /dev/null
+++ b/tests/fixed.builder2.sh
@@ -0,0 +1,6 @@
+echo dummy: $dummy
+if test -n "$dummy"; then sleep 2; fi
+mkdir $out
+mkdir $out/bla
+echo "Hello World!" > $out/foo
+ln -s foo $out/bar
diff --git a/tests/fixed.nix b/tests/fixed.nix
new file mode 100644
index 000000000000..76580ffa19e8
--- /dev/null
+++ b/tests/fixed.nix
@@ -0,0 +1,50 @@
+with import ./config.nix;
+
+rec {
+
+  f2 = dummy: builder: mode: algo: hash: mkDerivation {
+    name = "fixed";
+    inherit builder;
+    outputHashMode = mode;
+    outputHashAlgo = algo;
+    outputHash = hash;
+    inherit dummy;
+    impureEnvVars = ["IMPURE_VAR1" "IMPURE_VAR2"];
+  };
+
+  f = f2 "";
+
+  good = [
+    (f ./fixed.builder1.sh "flat" "md5" "8ddd8be4b179a529afa5f2ffae4b9858")
+    (f ./fixed.builder1.sh "flat" "sha1" "a0b65939670bc2c010f4d5d6a0b3e4e4590fb92b")
+    (f ./fixed.builder2.sh "recursive" "md5" "3670af73070fa14077ad74e0f5ea4e42")
+    (f ./fixed.builder2.sh "recursive" "sha1" "vw46m23bizj4n8afrc0fj19wrp7mj3c0")
+  ];
+
+  good2 = [
+    # Yes, this looks fscked up: builder2 doesn't have that result.
+    # But Nix sees that an output with the desired hash already
+    # exists, and will refrain from building it.
+    (f ./fixed.builder2.sh "flat" "md5" "8ddd8be4b179a529afa5f2ffae4b9858")
+  ];
+
+  sameAsAdd =
+    f ./fixed.builder2.sh "recursive" "sha256" "1ixr6yd3297ciyp9im522dfxpqbkhcw0pylkb2aab915278fqaik";
+
+  bad = [
+    (f ./fixed.builder1.sh "flat" "md5" "0ddd8be4b179a529afa5f2ffae4b9858")
+  ];
+
+  reallyBad = [
+    # Hash too short, and not base-32 either.
+    (f ./fixed.builder1.sh "flat" "md5" "ddd8be4b179a529afa5f2ffae4b9858")
+  ];
+
+  # Test for building two derivations in parallel that produce the
+  # same output path because they're fixed-output derivations.
+  parallelSame = [
+    (f2 "foo" ./fixed.builder2.sh "recursive" "md5" "3670af73070fa14077ad74e0f5ea4e42")
+    (f2 "bar" ./fixed.builder2.sh "recursive" "md5" "3670af73070fa14077ad74e0f5ea4e42")
+  ];
+
+}
diff --git a/tests/fixed.sh b/tests/fixed.sh
new file mode 100644
index 000000000000..ed0d06dd29cb
--- /dev/null
+++ b/tests/fixed.sh
@@ -0,0 +1,52 @@
+source common.sh
+
+clearStore
+
+export IMPURE_VAR1=foo
+export IMPURE_VAR2=bar
+
+echo 'testing good...'
+nix-build fixed.nix -A good --no-out-link
+
+echo 'testing good2...'
+nix-build fixed.nix -A good2 --no-out-link
+
+echo 'testing bad...'
+nix-build fixed.nix -A bad --no-out-link && fail "should fail"
+
+echo 'testing reallyBad...'
+nix-instantiate fixed.nix -A reallyBad && fail "should fail"
+
+# While we're at it, check attribute selection a bit more.
+echo 'testing attribute selection...'
+test $(nix-instantiate fixed.nix -A good.1 | wc -l) = 1
+
+# Test parallel builds of derivations that produce the same output.
+# Only one should run at the same time.
+echo 'testing parallelSame...'
+clearStore
+nix-build fixed.nix -A parallelSame --no-out-link -j2
+
+# Fixed-output derivations with a recursive SHA-256 hash should
+# produce the same path as "nix-store --add".
+echo 'testing sameAsAdd...'
+out=$(nix-build fixed.nix -A sameAsAdd --no-out-link)
+
+# This is what fixed.builder2 produces...
+rm -rf $TEST_ROOT/fixed
+mkdir $TEST_ROOT/fixed
+mkdir $TEST_ROOT/fixed/bla
+echo "Hello World!" > $TEST_ROOT/fixed/foo
+ln -s foo $TEST_ROOT/fixed/bar
+
+out2=$(nix-store --add $TEST_ROOT/fixed)
+echo $out2
+test "$out" = "$out2" || exit 1
+
+out3=$(nix-store --add-fixed --recursive sha256 $TEST_ROOT/fixed)
+echo $out3
+test "$out" = "$out3" || exit 1
+
+out4=$(nix-store --print-fixed-path --recursive sha256 "1ixr6yd3297ciyp9im522dfxpqbkhcw0pylkb2aab915278fqaik" fixed)
+echo $out4
+test "$out" = "$out4" || exit 1
diff --git a/tests/gc-concurrent.builder.sh b/tests/gc-concurrent.builder.sh
new file mode 100644
index 000000000000..0cd67df3aeda
--- /dev/null
+++ b/tests/gc-concurrent.builder.sh
@@ -0,0 +1,13 @@
+mkdir $out
+echo $(cat $input1/foo)$(cat $input2/bar) > $out/foobar
+
+sleep 10
+
+# $out should not have been GC'ed while we were sleeping, but just in
+# case...
+mkdir -p $out
+
+# Check that the GC hasn't deleted the lock on our output.
+test -e "$out.lock"
+
+ln -s $input2 $out/input-2
diff --git a/tests/gc-concurrent.nix b/tests/gc-concurrent.nix
new file mode 100644
index 000000000000..c0595cc471b9
--- /dev/null
+++ b/tests/gc-concurrent.nix
@@ -0,0 +1,27 @@
+with import ./config.nix;
+
+rec {
+
+  input1 = mkDerivation {
+    name = "dependencies-input-1";
+    builder = ./dependencies.builder1.sh;
+  };
+
+  input2 = mkDerivation {
+    name = "dependencies-input-2";
+    builder = ./dependencies.builder2.sh;
+  };
+
+  test1 = mkDerivation {
+    name = "gc-concurrent";
+    builder = ./gc-concurrent.builder.sh;
+    inherit input1 input2;
+  };
+
+  test2 = mkDerivation {
+    name = "gc-concurrent2";
+    builder = ./gc-concurrent2.builder.sh;
+    inherit input1 input2;
+  };
+  
+}
diff --git a/tests/gc-concurrent.sh b/tests/gc-concurrent.sh
new file mode 100644
index 000000000000..0bc5a12d318f
--- /dev/null
+++ b/tests/gc-concurrent.sh
@@ -0,0 +1,58 @@
+source common.sh
+
+clearStore
+
+drvPath1=$(nix-instantiate gc-concurrent.nix -A test1)
+outPath1=$(nix-store -q $drvPath1)
+
+drvPath2=$(nix-instantiate gc-concurrent.nix -A test2)
+outPath2=$(nix-store -q $drvPath2)
+
+drvPath3=$(nix-instantiate simple.nix)
+outPath3=$(nix-store -r $drvPath3)
+
+! test -e $outPath3.lock
+touch $outPath3.lock
+
+rm -f "$NIX_STATE_DIR"/gcroots/foo*
+ln -s $drvPath2 "$NIX_STATE_DIR"/gcroots/foo
+ln -s $outPath3 "$NIX_STATE_DIR"/gcroots/foo2
+
+# Start build #1 in the background.  It starts immediately.
+nix-store -rvv "$drvPath1" &
+pid1=$!
+
+# Start build #2 in the background after 10 seconds.
+(sleep 10 && nix-store -rvv "$drvPath2") &
+pid2=$!
+
+# Run the garbage collector while the build is running.
+sleep 6
+nix-collect-garbage
+
+# Wait for build #1/#2 to finish.
+echo waiting for pid $pid1 to finish...
+wait $pid1
+echo waiting for pid $pid2 to finish...
+wait $pid2
+
+# Check that the root of build #1 and its dependencies haven't been
+# deleted.  The should not be deleted by the GC because they were
+# being built during the GC.
+cat $outPath1/foobar
+cat $outPath1/input-2/bar
+
+# Check that build #2 has succeeded.  It should succeed because the
+# derivation is a GC root.
+cat $outPath2/foobar
+
+rm -f "$NIX_STATE_DIR"/gcroots/foo*
+
+# The collector should have deleted lock files for paths that have
+# been built previously.
+! test -e $outPath3.lock
+
+# If we run the collector now, it should delete outPath1/2.
+nix-collect-garbage
+! test -e $outPath1
+! test -e $outPath2
diff --git a/tests/gc-concurrent2.builder.sh b/tests/gc-concurrent2.builder.sh
new file mode 100644
index 000000000000..4bfb33103e73
--- /dev/null
+++ b/tests/gc-concurrent2.builder.sh
@@ -0,0 +1,7 @@
+mkdir $out
+echo $(cat $input1/foo)$(cat $input2/bar)xyzzy > $out/foobar
+
+# Check that the GC hasn't deleted the lock on our output.
+test -e "$out.lock"
+
+sleep 6
diff --git a/tests/gc-runtime.nix b/tests/gc-runtime.nix
new file mode 100644
index 000000000000..ee5980bdff98
--- /dev/null
+++ b/tests/gc-runtime.nix
@@ -0,0 +1,17 @@
+with import ./config.nix;
+
+mkDerivation {
+  name = "gc-runtime";
+  builder =
+    # Test inline source file definitions.
+    builtins.toFile "builder.sh" ''
+      mkdir $out
+
+      cat > $out/program <<EOF
+      #! ${shell}
+      sleep 10000
+      EOF
+
+      chmod +x $out/program
+    '';
+}
diff --git a/tests/gc-runtime.sh b/tests/gc-runtime.sh
new file mode 100644
index 000000000000..a44195756f52
--- /dev/null
+++ b/tests/gc-runtime.sh
@@ -0,0 +1,38 @@
+source common.sh
+
+case $system in
+    *linux*)
+        ;;
+    *)
+        exit 0;
+esac
+
+set -m # enable job control, needed for kill
+
+profiles="$NIX_STATE_DIR"/profiles
+rm -f $profiles/*
+
+nix-env -p $profiles/test -f ./gc-runtime.nix -i gc-runtime
+
+outPath=$(nix-env -p $profiles/test -q --no-name --out-path gc-runtime)
+echo $outPath
+
+echo "backgrounding program..."
+$profiles/test/program &
+sleep 2 # hack - wait for the program to get started
+child=$!
+echo PID=$child
+
+nix-env -p $profiles/test -e gc-runtime
+nix-env -p $profiles/test --delete-generations old
+
+nix-store --gc
+
+kill -- -$child
+
+if ! test -e $outPath; then
+    echo "running program was garbage collected!"
+    exit 1
+fi
+
+exit 0
diff --git a/tests/gc.sh b/tests/gc.sh
new file mode 100644
index 000000000000..0adb05bf173a
--- /dev/null
+++ b/tests/gc.sh
@@ -0,0 +1,40 @@
+source common.sh
+
+drvPath=$(nix-instantiate dependencies.nix)
+outPath=$(nix-store -rvv "$drvPath")
+
+# Set a GC root.
+rm -f "$NIX_STATE_DIR"/gcroots/foo
+ln -sf $outPath "$NIX_STATE_DIR"/gcroots/foo
+
+[ "$(nix-store -q --roots $outPath)" = "$NIX_STATE_DIR"/gcroots/foo ]
+
+nix-store --gc --print-roots | grep $outPath
+nix-store --gc --print-live | grep $outPath
+nix-store --gc --print-dead | grep $drvPath
+if nix-store --gc --print-dead | grep $outPath; then false; fi
+
+nix-store --gc --print-dead
+
+inUse=$(readLink $outPath/input-2)
+if nix-store --delete $inUse; then false; fi
+test -e $inUse
+
+if nix-store --delete $outPath; then false; fi
+test -e $outPath
+
+nix-collect-garbage
+
+# Check that the root and its dependencies haven't been deleted.
+cat $outPath/foobar
+cat $outPath/input-2/bar
+
+# Check that the derivation has been GC'd.
+if test -e $drvPath; then false; fi
+
+rm "$NIX_STATE_DIR"/gcroots/foo
+
+nix-collect-garbage
+
+# Check that the output has been GC'd.
+if test -e $outPath/foobar; then false; fi
diff --git a/tests/hash-check.nix b/tests/hash-check.nix
new file mode 100644
index 000000000000..4a8e9b8a8df9
--- /dev/null
+++ b/tests/hash-check.nix
@@ -0,0 +1,29 @@
+let {
+
+  input1 = derivation {
+    name = "dependencies-input-1";
+    system = "i086-msdos";
+    builder = "/bar/sh";
+    args = ["-e" "-x" ./dummy];
+  };
+
+  input2 = derivation {
+    name = "dependencies-input-2";
+    system = "i086-msdos";
+    builder = "/bar/sh";
+    args = ["-e" "-x" ./dummy];
+    outputHashMode = "recursive";
+    outputHashAlgo = "md5";
+    outputHash = "ffffffffffffffffffffffffffffffff";
+  };
+
+  body = derivation {
+    name = "dependencies";
+    system = "i086-msdos";
+    builder = "/bar/sh";
+    args = ["-e" "-x" (./dummy  + "/FOOBAR/../.")];
+    input1 = input1 + "/.";
+    inherit input2;
+  };
+
+}
\ No newline at end of file
diff --git a/tests/hash.sh b/tests/hash.sh
new file mode 100644
index 000000000000..d659bbe34e8f
--- /dev/null
+++ b/tests/hash.sh
@@ -0,0 +1,62 @@
+source common.sh
+
+try () {
+    printf "%s" "$2" > $TEST_ROOT/vector
+    hash=$(nix-hash $EXTRA --flat --type "$1" $TEST_ROOT/vector)
+    if test "$hash" != "$3"; then
+        echo "hash $1, expected $3, got $hash"
+        exit 1
+    fi
+}
+
+try md5 "" "d41d8cd98f00b204e9800998ecf8427e"
+try md5 "a" "0cc175b9c0f1b6a831c399e269772661"
+try md5 "abc" "900150983cd24fb0d6963f7d28e17f72"
+try md5 "message digest" "f96b697d7cb7938d525a2f31aaf161d0"
+try md5 "abcdefghijklmnopqrstuvwxyz" "c3fcd3d76192e4007dfb496cca67e13b"
+try md5 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" "d174ab98d277d9f5a5611c2c9f419d9f"
+try md5 "12345678901234567890123456789012345678901234567890123456789012345678901234567890" "57edf4a22be3c955ac49da2e2107b67a"
+
+try sha1 "abc" "a9993e364706816aba3e25717850c26c9cd0d89d"
+try sha1 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "84983e441c3bd26ebaae4aa1f95129e5e54670f1"
+
+try sha256 "abc" "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
+try sha256 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1"
+
+EXTRA=--base32
+try sha256 "abc" "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
+EXTRA=
+
+try2 () {
+    hash=$(nix-hash --type "$1" $TEST_ROOT/hash-path)
+    if test "$hash" != "$2"; then
+        echo "hash $1, expected $2, got $hash"
+        exit 1
+    fi
+}
+
+rm -rf $TEST_ROOT/hash-path
+mkdir $TEST_ROOT/hash-path
+echo "Hello World" > $TEST_ROOT/hash-path/hello
+
+try2 md5 "ea9b55537dd4c7e104515b2ccfaf4100"
+
+# Execute bit matters.
+chmod +x $TEST_ROOT/hash-path/hello
+try2 md5 "20f3ffe011d4cfa7d72bfabef7882836"
+
+# Mtime and other bits don't.
+touch -r . $TEST_ROOT/hash-path/hello
+chmod 744 $TEST_ROOT/hash-path/hello
+try2 md5 "20f3ffe011d4cfa7d72bfabef7882836"
+
+# File type (e.g., symlink) does.
+rm $TEST_ROOT/hash-path/hello
+ln -s x $TEST_ROOT/hash-path/hello
+try2 md5 "f78b733a68f5edbdf9413899339eaa4a"
+
+# Conversion.
+test $(nix-hash --type sha256 --to-base32 "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad") = "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
+test $(nix-hash --type sha256 --to-base16 "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s") = "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
+test $(nix-hash --type sha1 --to-base32 "800d59cfcd3c05e900cb4e214be48f6b886a08df") = "vw46m23bizj4n8afrc0fj19wrp7mj3c0"
+test $(nix-hash --type sha1 --to-base16 "vw46m23bizj4n8afrc0fj19wrp7mj3c0") = "800d59cfcd3c05e900cb4e214be48f6b886a08df"
diff --git a/tests/import-derivation.nix b/tests/import-derivation.nix
new file mode 100644
index 000000000000..91adcd288f6e
--- /dev/null
+++ b/tests/import-derivation.nix
@@ -0,0 +1,23 @@
+with import ./config.nix;
+
+let
+
+  bar = mkDerivation {
+    name = "bar";
+    builder = builtins.toFile "builder.sh"
+      ''
+        echo 'builtins.add 123 456' > $out
+      '';
+  };
+
+  value = import bar;
+
+in
+
+mkDerivation {
+  name = "foo";
+  builder = builtins.toFile "builder.sh"
+    ''
+      echo -n FOO${toString value} > $out
+    '';
+}
diff --git a/tests/import-derivation.sh b/tests/import-derivation.sh
new file mode 100644
index 000000000000..98d61ef49b9c
--- /dev/null
+++ b/tests/import-derivation.sh
@@ -0,0 +1,12 @@
+source common.sh
+
+clearStore
+
+if nix-instantiate --readonly-mode ./import-derivation.nix; then
+    echo "read-only evaluation of an imported derivation unexpectedly failed"
+    exit 1
+fi
+
+outPath=$(nix-build ./import-derivation.nix --no-out-link)
+
+[ "$(cat $outPath)" = FOO579 ]
diff --git a/tests/init.sh b/tests/init.sh
new file mode 100644
index 000000000000..5be999e4b1be
--- /dev/null
+++ b/tests/init.sh
@@ -0,0 +1,33 @@
+source common.sh
+
+echo "NIX_STORE_DIR=$NIX_STORE_DIR NIX_DB_DIR=$NIX_DB_DIR"
+
+test -n "$TEST_ROOT"
+if test -d "$TEST_ROOT"; then
+    chmod -R u+w "$TEST_ROOT"
+    rm -rf "$TEST_ROOT"
+fi
+mkdir "$TEST_ROOT"
+
+mkdir "$NIX_STORE_DIR"
+mkdir "$NIX_LOCALSTATE_DIR"
+mkdir -p "$NIX_LOG_DIR"/drvs
+mkdir "$NIX_STATE_DIR"
+mkdir "$NIX_DB_DIR"
+mkdir "$NIX_CONF_DIR"
+
+cat > "$NIX_CONF_DIR"/nix.conf <<EOF
+build-users-group =
+gc-keep-outputs = false
+gc-keep-derivations = false
+env-keep-derivations = false
+fsync-metadata = false
+EOF
+
+# Initialise the database.
+nix-store --init
+
+# Did anything happen?
+test -e "$NIX_DB_DIR"/db.sqlite
+
+echo 'Hello World' > ./dummy
diff --git a/tests/install-package.sh b/tests/install-package.sh
new file mode 100644
index 000000000000..653dfee4c8d1
--- /dev/null
+++ b/tests/install-package.sh
@@ -0,0 +1,21 @@
+source common.sh
+
+# Note: this test expects to be run *after* nix-push.sh.
+
+drvPath=$(nix-instantiate ./dependencies.nix)
+outPath=$(nix-store -q $drvPath)
+
+clearStore
+clearProfiles
+
+cat > $TEST_ROOT/foo.nixpkg <<EOF
+NIXPKG1 file://$TEST_ROOT/cache/MANIFEST simple $system $drvPath $outPath
+EOF
+
+nix-install-package --non-interactive -p $profiles/test $TEST_ROOT/foo.nixpkg
+test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1
+
+clearProfiles
+
+nix-install-package --non-interactive -p $profiles/test --url file://$TEST_ROOT/foo.nixpkg
+test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1
diff --git a/tests/lang.sh b/tests/lang.sh
new file mode 100644
index 000000000000..7157a68c5ceb
--- /dev/null
+++ b/tests/lang.sh
@@ -0,0 +1,70 @@
+source common.sh
+
+export TEST_VAR=foo # for eval-okay-getenv.nix
+
+nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>&1 | grep -q Hello
+! nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grep -q Hello
+nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' 2>&1 | grep -q Hello
+
+set +x
+
+fail=0
+
+for i in lang/parse-fail-*.nix; do
+    echo "parsing $i (should fail)";
+    i=$(basename $i .nix)
+    if nix-instantiate --parse - < lang/$i.nix; then
+        echo "FAIL: $i shouldn't parse"
+        fail=1
+    fi
+done
+
+for i in lang/parse-okay-*.nix; do
+    echo "parsing $i (should succeed)";
+    i=$(basename $i .nix)
+    if ! nix-instantiate --parse - < lang/$i.nix > lang/$i.out; then
+        echo "FAIL: $i should parse"
+        fail=1
+    fi
+done
+
+for i in lang/eval-fail-*.nix; do
+    echo "evaluating $i (should fail)";
+    i=$(basename $i .nix)
+    if nix-instantiate --eval lang/$i.nix; then
+        echo "FAIL: $i shouldn't evaluate"
+        fail=1
+    fi
+done
+
+for i in lang/eval-okay-*.nix; do
+    echo "evaluating $i (should succeed)";
+    i=$(basename $i .nix)
+
+    if test -e lang/$i.exp; then
+        flags=
+        if test -e lang/$i.flags; then
+            flags=$(cat lang/$i.flags)
+        fi
+        if ! NIX_PATH=lang/dir3:lang/dir4 nix-instantiate $flags --eval --strict lang/$i.nix > lang/$i.out; then
+            echo "FAIL: $i should evaluate"
+            fail=1
+        elif ! diff lang/$i.out lang/$i.exp; then
+            echo "FAIL: evaluation result of $i not as expected"
+            fail=1
+        fi
+    fi
+
+    if test -e lang/$i.exp.xml; then
+        if ! nix-instantiate --eval --xml --no-location --strict \
+                lang/$i.nix > lang/$i.out.xml; then
+            echo "FAIL: $i should evaluate"
+            fail=1
+        elif ! cmp -s lang/$i.out.xml lang/$i.exp.xml; then
+            echo "FAIL: XML evaluation result of $i not as expected"
+            fail=1
+        fi
+    fi
+done
+
+exit $fail
diff --git a/tests/lang/dir1/a.nix b/tests/lang/dir1/a.nix
new file mode 100644
index 000000000000..231f150c579c
--- /dev/null
+++ b/tests/lang/dir1/a.nix
@@ -0,0 +1 @@
+"a"
diff --git a/tests/lang/dir2/a.nix b/tests/lang/dir2/a.nix
new file mode 100644
index 000000000000..170df520ab68
--- /dev/null
+++ b/tests/lang/dir2/a.nix
@@ -0,0 +1 @@
+"X"
diff --git a/tests/lang/dir2/b.nix b/tests/lang/dir2/b.nix
new file mode 100644
index 000000000000..19010cc35ca6
--- /dev/null
+++ b/tests/lang/dir2/b.nix
@@ -0,0 +1 @@
+"b"
diff --git a/tests/lang/dir3/a.nix b/tests/lang/dir3/a.nix
new file mode 100644
index 000000000000..170df520ab68
--- /dev/null
+++ b/tests/lang/dir3/a.nix
@@ -0,0 +1 @@
+"X"
diff --git a/tests/lang/dir3/b.nix b/tests/lang/dir3/b.nix
new file mode 100644
index 000000000000..170df520ab68
--- /dev/null
+++ b/tests/lang/dir3/b.nix
@@ -0,0 +1 @@
+"X"
diff --git a/tests/lang/dir3/c.nix b/tests/lang/dir3/c.nix
new file mode 100644
index 000000000000..cdf158597eef
--- /dev/null
+++ b/tests/lang/dir3/c.nix
@@ -0,0 +1 @@
+"c"
diff --git a/tests/lang/dir4/a.nix b/tests/lang/dir4/a.nix
new file mode 100644
index 000000000000..170df520ab68
--- /dev/null
+++ b/tests/lang/dir4/a.nix
@@ -0,0 +1 @@
+"X"
diff --git a/tests/lang/dir4/c.nix b/tests/lang/dir4/c.nix
new file mode 100644
index 000000000000..170df520ab68
--- /dev/null
+++ b/tests/lang/dir4/c.nix
@@ -0,0 +1 @@
+"X"
diff --git a/tests/lang/eval-fail-abort.nix b/tests/lang/eval-fail-abort.nix
new file mode 100644
index 000000000000..75c51bceb540
--- /dev/null
+++ b/tests/lang/eval-fail-abort.nix
@@ -0,0 +1 @@
+if true then abort "this should fail" else 1
diff --git a/tests/lang/eval-fail-antiquoted-path.nix b/tests/lang/eval-fail-antiquoted-path.nix
new file mode 100644
index 000000000000..f2f08107b516
--- /dev/null
+++ b/tests/lang/eval-fail-antiquoted-path.nix
@@ -0,0 +1,4 @@
+# This must fail to evaluate, since ./fnord doesn't exist.  If it did
+# exist, it would produce "/nix/store/<hash>-fnord/xyzzy" (with an
+# appropriate context).
+"${./fnord}/xyzzy"
diff --git a/tests/lang/eval-fail-assert.nix b/tests/lang/eval-fail-assert.nix
new file mode 100644
index 000000000000..3b7a1e8bf0c2
--- /dev/null
+++ b/tests/lang/eval-fail-assert.nix
@@ -0,0 +1,5 @@
+let {
+  x = arg: assert arg == "y"; 123;
+
+  body = x "x";
+}
\ No newline at end of file
diff --git a/tests/lang/eval-fail-bad-antiquote-1.nix b/tests/lang/eval-fail-bad-antiquote-1.nix
new file mode 100644
index 000000000000..ffe9c983c26b
--- /dev/null
+++ b/tests/lang/eval-fail-bad-antiquote-1.nix
@@ -0,0 +1 @@
+"${x: x}"
diff --git a/tests/lang/eval-fail-bad-antiquote-2.nix b/tests/lang/eval-fail-bad-antiquote-2.nix
new file mode 100644
index 000000000000..3745235ce95e
--- /dev/null
+++ b/tests/lang/eval-fail-bad-antiquote-2.nix
@@ -0,0 +1 @@
+"${./fnord}"
diff --git a/tests/lang/eval-fail-bad-antiquote-3.nix b/tests/lang/eval-fail-bad-antiquote-3.nix
new file mode 100644
index 000000000000..65b9d4f505b1
--- /dev/null
+++ b/tests/lang/eval-fail-bad-antiquote-3.nix
@@ -0,0 +1 @@
+''${x: x}''
diff --git a/tests/lang/eval-fail-blackhole.nix b/tests/lang/eval-fail-blackhole.nix
new file mode 100644
index 000000000000..81133b511c95
--- /dev/null
+++ b/tests/lang/eval-fail-blackhole.nix
@@ -0,0 +1,5 @@
+let {
+  body = x;
+  x = y;
+  y = x;
+}
diff --git a/tests/lang/eval-fail-missing-arg.nix b/tests/lang/eval-fail-missing-arg.nix
new file mode 100644
index 000000000000..c4be9797c534
--- /dev/null
+++ b/tests/lang/eval-fail-missing-arg.nix
@@ -0,0 +1 @@
+({x, y, z}: x + y + z) {x = "foo"; z = "bar";}
diff --git a/tests/lang/eval-fail-remove.nix b/tests/lang/eval-fail-remove.nix
new file mode 100644
index 000000000000..539e0eb0a6f6
--- /dev/null
+++ b/tests/lang/eval-fail-remove.nix
@@ -0,0 +1,5 @@
+let {
+  attrs = {x = 123; y = 456;};
+
+  body = (removeAttrs attrs ["x"]).x;
+}
\ No newline at end of file
diff --git a/tests/lang/eval-fail-scope-5.nix b/tests/lang/eval-fail-scope-5.nix
new file mode 100644
index 000000000000..f89a65a99be3
--- /dev/null
+++ b/tests/lang/eval-fail-scope-5.nix
@@ -0,0 +1,10 @@
+let {
+
+  x = "a";
+  y = "b";
+
+  f = {x ? y, y ? x}: x + y;
+
+  body = f {};
+
+}
diff --git a/tests/lang/eval-fail-substring.nix b/tests/lang/eval-fail-substring.nix
new file mode 100644
index 000000000000..f37c2bc0a160
--- /dev/null
+++ b/tests/lang/eval-fail-substring.nix
@@ -0,0 +1 @@
+builtins.substring (builtins.sub 0 1) 1 "x"
diff --git a/tests/lang/eval-fail-to-path.nix b/tests/lang/eval-fail-to-path.nix
new file mode 100644
index 000000000000..5e322bc31369
--- /dev/null
+++ b/tests/lang/eval-fail-to-path.nix
@@ -0,0 +1 @@
+builtins.toPath "foo/bar"
diff --git a/tests/lang/eval-fail-undeclared-arg.nix b/tests/lang/eval-fail-undeclared-arg.nix
new file mode 100644
index 000000000000..cafdf1636272
--- /dev/null
+++ b/tests/lang/eval-fail-undeclared-arg.nix
@@ -0,0 +1 @@
+({x, z}: x + z) {x = "foo"; y = "bla"; z = "bar";}
diff --git a/tests/lang/eval-okay-arithmetic.exp b/tests/lang/eval-okay-arithmetic.exp
new file mode 100644
index 000000000000..b195055b7a09
--- /dev/null
+++ b/tests/lang/eval-okay-arithmetic.exp
@@ -0,0 +1 @@
+2188
diff --git a/tests/lang/eval-okay-arithmetic.nix b/tests/lang/eval-okay-arithmetic.nix
new file mode 100644
index 000000000000..bbbbc4691d75
--- /dev/null
+++ b/tests/lang/eval-okay-arithmetic.nix
@@ -0,0 +1,55 @@
+with import ./lib.nix;
+
+let {
+
+  /* Supposedly tail recursive version:
+
+  range_ = accum: first: last:
+    if first == last then ([first] ++ accum)
+    else range_ ([first] ++ accum) (builtins.add first 1) last;
+
+  range = range_ [];
+  */
+
+  x = 12;
+
+  err = abort "urgh";
+
+  body = sum
+    [ (sum (range 1 50))
+      (123 + 456)
+      (0 + -10 + -(-11) + -x)
+      (10 - 7 - -2)
+      (10 - (6 - -1))
+      (10 - 1 + 2)
+      (3 * 4 * 5)
+      (56088 / 123 / 2)
+      (3 + 4 * const 5 0 - 6 / id 2)
+
+      (if 3 < 7 then 1 else err)
+      (if 7 < 3 then err else 1)
+      (if 3 < 3 then err else 1)
+
+      (if 3 <= 7 then 1 else err)
+      (if 7 <= 3 then err else 1)
+      (if 3 <= 3 then 1 else err)
+
+      (if 3 > 7 then err else 1)
+      (if 7 > 3 then 1 else err)
+      (if 3 > 3 then err else 1)
+
+      (if 3 >= 7 then err else 1)
+      (if 7 >= 3 then 1 else err)
+      (if 3 >= 3 then 1 else err)
+
+      (if 2 > 1 == 1 < 2 then 1 else err)
+      (if 1 + 2 * 3 >= 7 then 1 else err)
+      (if 1 + 2 * 3 < 7 then err else 1)
+
+      # Not integer, but so what.
+      (if "aa" < "ab" then 1 else err)
+      (if "aa" < "aa" then err else 1)
+      (if "foo" < "foobar" then 1 else err)
+    ];
+
+}
diff --git a/tests/lang/eval-okay-attrnames.exp b/tests/lang/eval-okay-attrnames.exp
new file mode 100644
index 000000000000..b4aa387e07b8
--- /dev/null
+++ b/tests/lang/eval-okay-attrnames.exp
@@ -0,0 +1 @@
+"newxfoonewxy"
diff --git a/tests/lang/eval-okay-attrnames.nix b/tests/lang/eval-okay-attrnames.nix
new file mode 100644
index 000000000000..978138f0c0d9
--- /dev/null
+++ b/tests/lang/eval-okay-attrnames.nix
@@ -0,0 +1,11 @@
+with import ./lib.nix;
+
+let
+
+  attrs = {y = "y"; x = "x"; foo = "foo";} // rec {x = "newx"; bar = x;};
+
+  names = builtins.attrNames attrs;
+
+  values = map (name: builtins.getAttr name attrs) names;
+
+in concat values
diff --git a/tests/lang/eval-okay-attrs.exp b/tests/lang/eval-okay-attrs.exp
new file mode 100644
index 000000000000..45b0f829eb33
--- /dev/null
+++ b/tests/lang/eval-okay-attrs.exp
@@ -0,0 +1 @@
+987
diff --git a/tests/lang/eval-okay-attrs.nix b/tests/lang/eval-okay-attrs.nix
new file mode 100644
index 000000000000..810b31a5da96
--- /dev/null
+++ b/tests/lang/eval-okay-attrs.nix
@@ -0,0 +1,5 @@
+let {
+  as = { x = 123; y = 456; } // { z = 789; } // { z = 987; };
+
+  body = if as ? a then as.a else assert as ? z; as.z;
+}
diff --git a/tests/lang/eval-okay-attrs2.exp b/tests/lang/eval-okay-attrs2.exp
new file mode 100644
index 000000000000..45b0f829eb33
--- /dev/null
+++ b/tests/lang/eval-okay-attrs2.exp
@@ -0,0 +1 @@
+987
diff --git a/tests/lang/eval-okay-attrs2.nix b/tests/lang/eval-okay-attrs2.nix
new file mode 100644
index 000000000000..9e06b83ac1fd
--- /dev/null
+++ b/tests/lang/eval-okay-attrs2.nix
@@ -0,0 +1,10 @@
+let {
+  as = { x = 123; y = 456; } // { z = 789; } // { z = 987; };
+
+  A = "a";
+  Z = "z";
+
+  body = if builtins.hasAttr A as
+         then builtins.getAttr A as
+         else assert builtins.hasAttr Z as; builtins.getAttr Z as;
+}
diff --git a/tests/lang/eval-okay-attrs3.exp b/tests/lang/eval-okay-attrs3.exp
new file mode 100644
index 000000000000..19de4fdf79f7
--- /dev/null
+++ b/tests/lang/eval-okay-attrs3.exp
@@ -0,0 +1 @@
+"foo 22 80 itchyxac"
diff --git a/tests/lang/eval-okay-attrs3.nix b/tests/lang/eval-okay-attrs3.nix
new file mode 100644
index 000000000000..f29de11fe660
--- /dev/null
+++ b/tests/lang/eval-okay-attrs3.nix
@@ -0,0 +1,22 @@
+let
+
+  config = 
+    {
+      services.sshd.enable = true;
+      services.sshd.port = 22;
+      services.httpd.port = 80;
+      hostName = "itchy";
+      a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s.t.u.v.w.x.y.z = "x";
+      foo = {
+        a = "a";
+        b.c = "c";
+      };
+    };
+
+in
+  if config.services.sshd.enable
+  then "foo ${toString config.services.sshd.port} ${toString config.services.httpd.port} ${config.hostName}"
+       + "${config.a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s.t.u.v.w.x.y.z}"
+       + "${config.foo.a}"
+       + "${config.foo.b.c}"
+  else "bar"
diff --git a/tests/lang/eval-okay-attrs4.exp b/tests/lang/eval-okay-attrs4.exp
new file mode 100644
index 000000000000..1851731442d3
--- /dev/null
+++ b/tests/lang/eval-okay-attrs4.exp
@@ -0,0 +1 @@
+[ true false true false false true false false ]
diff --git a/tests/lang/eval-okay-attrs4.nix b/tests/lang/eval-okay-attrs4.nix
new file mode 100644
index 000000000000..43ec81210f38
--- /dev/null
+++ b/tests/lang/eval-okay-attrs4.nix
@@ -0,0 +1,7 @@
+let
+
+  as = { x.y.z = 123; a.b.c = 456; };
+
+  bs = null;
+
+in [ (as ? x) (as ? y) (as ? x.y.z) (as ? x.y.z.a) (as ? x.y.a) (as ? a.b.c) (bs ? x) (bs ? x.y.z) ]
diff --git a/tests/lang/eval-okay-attrs5.exp b/tests/lang/eval-okay-attrs5.exp
new file mode 100644
index 000000000000..ce0430d78081
--- /dev/null
+++ b/tests/lang/eval-okay-attrs5.exp
@@ -0,0 +1 @@
+[ 123 "foo" 456 456 "foo" "xyzzy" "xyzzy" true ]
diff --git a/tests/lang/eval-okay-attrs5.nix b/tests/lang/eval-okay-attrs5.nix
new file mode 100644
index 000000000000..0a98b8fdffa6
--- /dev/null
+++ b/tests/lang/eval-okay-attrs5.nix
@@ -0,0 +1,21 @@
+with import ./lib.nix;
+
+let
+
+  as = { x.y.z = 123; a.b.c = 456; };
+
+  bs = { f-o-o.bar = "foo"; };
+
+  or = x: y: x || y;
+  
+in
+  [ as.x.y.z
+    as.foo or "foo"
+    as.x.y.bla or as.a.b.c
+    as.a.b.c or as.x.y.z
+    as.x.y.bla or bs.f-o-o.bar or "xyzzy"
+    as.x.y.bla or bs.bar.foo or "xyzzy"
+    123.bla or null.foo or "xyzzy"
+    # Backwards compatibility test.
+    (fold or [] [true false false])
+  ]
diff --git a/tests/lang/eval-okay-autoargs.exp b/tests/lang/eval-okay-autoargs.exp
new file mode 100644
index 000000000000..7a8391786a09
--- /dev/null
+++ b/tests/lang/eval-okay-autoargs.exp
@@ -0,0 +1 @@
+"xyzzy!xyzzy!foobar"
diff --git a/tests/lang/eval-okay-autoargs.flags b/tests/lang/eval-okay-autoargs.flags
new file mode 100644
index 000000000000..ae3762254460
--- /dev/null
+++ b/tests/lang/eval-okay-autoargs.flags
@@ -0,0 +1 @@
+--arg lib import(lang/lib.nix) --argstr xyzzy xyzzy! -A result
diff --git a/tests/lang/eval-okay-autoargs.nix b/tests/lang/eval-okay-autoargs.nix
new file mode 100644
index 000000000000..815f51b1d67a
--- /dev/null
+++ b/tests/lang/eval-okay-autoargs.nix
@@ -0,0 +1,15 @@
+let
+
+  foobar = "foobar";
+
+in
+
+{ xyzzy2 ? xyzzy # mutually recursive args
+, xyzzy ? "blaat" # will be overridden by --argstr
+, fb ? foobar
+, lib # will be set by --arg
+}:
+
+{
+  result = lib.concat [xyzzy xyzzy2 fb];
+}
diff --git a/tests/lang/eval-okay-builtins.exp b/tests/lang/eval-okay-builtins.exp
new file mode 100644
index 000000000000..0661686d611d
--- /dev/null
+++ b/tests/lang/eval-okay-builtins.exp
@@ -0,0 +1 @@
+/foo
diff --git a/tests/lang/eval-okay-builtins.nix b/tests/lang/eval-okay-builtins.nix
new file mode 100644
index 000000000000..e9d65e88a817
--- /dev/null
+++ b/tests/lang/eval-okay-builtins.nix
@@ -0,0 +1,12 @@
+assert builtins ? currentSystem;
+assert !builtins ? __currentSystem;
+
+let {
+
+  x = if builtins ? dirOf then builtins.dirOf /foo/bar else "";
+
+  y = if builtins ? fnord then builtins.fnord "foo" else "";
+
+  body = x + y;
+  
+}
diff --git a/tests/lang/eval-okay-closure.exp.xml b/tests/lang/eval-okay-closure.exp.xml
new file mode 100644
index 000000000000..dffc03a99891
--- /dev/null
+++ b/tests/lang/eval-okay-closure.exp.xml
@@ -0,0 +1,343 @@
+<?xml version='1.0' encoding='utf-8'?>
+<expr>
+  <list>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-13" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-12" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-11" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-9" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-8" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-7" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-5" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-4" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="-3" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="-1" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="0" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="1" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="2" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="4" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="5" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="6" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="8" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="9" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="10" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="13" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="14" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="15" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="17" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="18" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="19" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="22" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="23" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="26" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="27" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="28" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="31" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="32" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="35" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="36" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="40" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="41" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="44" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="45" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="49" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="53" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="54" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="58" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="62" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="foo">
+        <bool value="true" />
+      </attr>
+      <attr name="key">
+        <int value="67" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="71" />
+      </attr>
+    </attrs>
+    <attrs>
+      <attr name="key">
+        <int value="80" />
+      </attr>
+    </attrs>
+  </list>
+</expr>
diff --git a/tests/lang/eval-okay-closure.nix b/tests/lang/eval-okay-closure.nix
new file mode 100644
index 000000000000..cccd4dc35730
--- /dev/null
+++ b/tests/lang/eval-okay-closure.nix
@@ -0,0 +1,13 @@
+let
+
+  closure = builtins.genericClosure {
+    startSet = [{key = 80;}];
+    operator = {key, foo ? false}:
+      if builtins.lessThan key 0
+      then []
+      else [{key = builtins.sub key 9;} {key = builtins.sub key 13; foo = true;}];
+  };
+
+  sort = (import ./lib.nix).sortBy (a: b: builtins.lessThan a.key b.key);
+
+in sort closure
diff --git a/tests/lang/eval-okay-concat.exp b/tests/lang/eval-okay-concat.exp
new file mode 100644
index 000000000000..bb4bbd577410
--- /dev/null
+++ b/tests/lang/eval-okay-concat.exp
@@ -0,0 +1 @@
+[ 1 2 3 4 5 6 7 8 9 ]
diff --git a/tests/lang/eval-okay-concat.nix b/tests/lang/eval-okay-concat.nix
new file mode 100644
index 000000000000..d158a9bf05b9
--- /dev/null
+++ b/tests/lang/eval-okay-concat.nix
@@ -0,0 +1 @@
+[1 2 3] ++ [4 5 6] ++ [7 8 9]
diff --git a/tests/lang/eval-okay-context.exp b/tests/lang/eval-okay-context.exp
new file mode 100644
index 000000000000..2f535bdbc454
--- /dev/null
+++ b/tests/lang/eval-okay-context.exp
@@ -0,0 +1 @@
+"foo eval-okay-context.nix bar"
diff --git a/tests/lang/eval-okay-context.nix b/tests/lang/eval-okay-context.nix
new file mode 100644
index 000000000000..8cd8f2e131d8
--- /dev/null
+++ b/tests/lang/eval-okay-context.nix
@@ -0,0 +1,6 @@
+let s = "foo ${builtins.substring 33 100 (baseNameOf ./eval-okay-context.nix)} bar";
+in
+  if s != "foo eval-okay-context.nix bar"
+  then abort "context not discarded"
+  else builtins.unsafeDiscardStringContext s
+
diff --git a/tests/lang/eval-okay-curpos.exp b/tests/lang/eval-okay-curpos.exp
new file mode 100644
index 000000000000..65fd65b4d01f
--- /dev/null
+++ b/tests/lang/eval-okay-curpos.exp
@@ -0,0 +1 @@
+[ 3 7 4 9 ]
diff --git a/tests/lang/eval-okay-curpos.nix b/tests/lang/eval-okay-curpos.nix
new file mode 100644
index 000000000000..b79553df0bd3
--- /dev/null
+++ b/tests/lang/eval-okay-curpos.nix
@@ -0,0 +1,5 @@
+# Bla
+let
+  x = __curPos;
+    y = __curPos;
+in [ x.line x.column y.line y.column ]
diff --git a/tests/lang/eval-okay-delayed-with-inherit.exp b/tests/lang/eval-okay-delayed-with-inherit.exp
new file mode 100644
index 000000000000..eaacb55c1aff
--- /dev/null
+++ b/tests/lang/eval-okay-delayed-with-inherit.exp
@@ -0,0 +1 @@
+"b-overridden"
diff --git a/tests/lang/eval-okay-delayed-with-inherit.nix b/tests/lang/eval-okay-delayed-with-inherit.nix
new file mode 100644
index 000000000000..84b388c27130
--- /dev/null
+++ b/tests/lang/eval-okay-delayed-with-inherit.nix
@@ -0,0 +1,24 @@
+let
+  pkgs_ = with pkgs; {
+    a = derivation {
+      name = "a";
+      system = builtins.currentSystem;
+      builder = "/bin/sh";
+      args = [ "-c" "touch $out" ];
+      inherit b;
+    };
+
+    inherit b;
+  };
+
+  packageOverrides = p: {
+    b = derivation {
+      name = "b-overridden";
+      system = builtins.currentSystem;
+      builder = "/bin/sh";
+      args = [ "-c" "touch $out" ];
+    };
+  };
+
+  pkgs = pkgs_ // (packageOverrides pkgs_);
+in pkgs.a.b.name
diff --git a/tests/lang/eval-okay-delayed-with.exp b/tests/lang/eval-okay-delayed-with.exp
new file mode 100644
index 000000000000..8e7c61ab8e77
--- /dev/null
+++ b/tests/lang/eval-okay-delayed-with.exp
@@ -0,0 +1 @@
+"b-overridden b-overridden a"
diff --git a/tests/lang/eval-okay-delayed-with.nix b/tests/lang/eval-okay-delayed-with.nix
new file mode 100644
index 000000000000..3fb023e1cd42
--- /dev/null
+++ b/tests/lang/eval-okay-delayed-with.nix
@@ -0,0 +1,29 @@
+let
+
+  pkgs_ = with pkgs; {
+    a = derivation {
+      name = "a";
+      system = builtins.currentSystem;
+      builder = "/bin/sh";
+      args = [ "-c" "touch $out" ];
+      inherit b;
+    };
+
+    b = derivation {
+      name = "b";
+      system = builtins.currentSystem;
+      builder = "/bin/sh";
+      args = [ "-c" "touch $out" ];
+      inherit a;
+    };
+
+    c = b;
+  };
+
+  packageOverrides = pkgs: with pkgs; {
+    b = derivation (b.drvAttrs // { name = "${b.name}-overridden"; });
+  };
+
+  pkgs = pkgs_ // (packageOverrides pkgs_);
+
+in "${pkgs.a.b.name} ${pkgs.c.name} ${pkgs.b.a.name}"
diff --git a/tests/lang/eval-okay-dynamic-attrs-2.exp b/tests/lang/eval-okay-dynamic-attrs-2.exp
new file mode 100644
index 000000000000..27ba77ddaf61
--- /dev/null
+++ b/tests/lang/eval-okay-dynamic-attrs-2.exp
@@ -0,0 +1 @@
+true
diff --git a/tests/lang/eval-okay-dynamic-attrs-2.nix b/tests/lang/eval-okay-dynamic-attrs-2.nix
new file mode 100644
index 000000000000..6d57bf854908
--- /dev/null
+++ b/tests/lang/eval-okay-dynamic-attrs-2.nix
@@ -0,0 +1 @@
+{ a."${"b"}" = true; a."${"c"}" = false; }.a.b
diff --git a/tests/lang/eval-okay-dynamic-attrs-bare.exp b/tests/lang/eval-okay-dynamic-attrs-bare.exp
new file mode 100644
index 000000000000..df8750afc036
--- /dev/null
+++ b/tests/lang/eval-okay-dynamic-attrs-bare.exp
@@ -0,0 +1 @@
+{ binds = true; hasAttrs = true; multiAttrs = true; recBinds = true; selectAttrs = true; selectOrAttrs = true; }
diff --git a/tests/lang/eval-okay-dynamic-attrs-bare.nix b/tests/lang/eval-okay-dynamic-attrs-bare.nix
new file mode 100644
index 000000000000..0dbe15e6384c
--- /dev/null
+++ b/tests/lang/eval-okay-dynamic-attrs-bare.nix
@@ -0,0 +1,17 @@
+let
+  aString = "a";
+
+  bString = "b";
+in {
+  hasAttrs = { a.b = null; } ? ${aString}.b;
+
+  selectAttrs = { a.b = true; }.a.${bString};
+
+  selectOrAttrs = { }.${aString} or true;
+
+  binds = { ${aString}."${bString}c" = true; }.a.bc;
+
+  recBinds = rec { ${bString} = a; a = true; }.b;
+
+  multiAttrs = { ${aString} = true; ${bString} = false; }.a;
+}
diff --git a/tests/lang/eval-okay-dynamic-attrs.exp b/tests/lang/eval-okay-dynamic-attrs.exp
new file mode 100644
index 000000000000..df8750afc036
--- /dev/null
+++ b/tests/lang/eval-okay-dynamic-attrs.exp
@@ -0,0 +1 @@
+{ binds = true; hasAttrs = true; multiAttrs = true; recBinds = true; selectAttrs = true; selectOrAttrs = true; }
diff --git a/tests/lang/eval-okay-dynamic-attrs.nix b/tests/lang/eval-okay-dynamic-attrs.nix
new file mode 100644
index 000000000000..ee02ac7e6579
--- /dev/null
+++ b/tests/lang/eval-okay-dynamic-attrs.nix
@@ -0,0 +1,17 @@
+let
+  aString = "a";
+
+  bString = "b";
+in {
+  hasAttrs = { a.b = null; } ? "${aString}".b;
+
+  selectAttrs = { a.b = true; }.a."${bString}";
+
+  selectOrAttrs = { }."${aString}" or true;
+
+  binds = { "${aString}"."${bString}c" = true; }.a.bc;
+
+  recBinds = rec { "${bString}" = a; a = true; }.b;
+
+  multiAttrs = { "${aString}" = true; "${bString}" = false; }.a;
+}
diff --git a/tests/lang/eval-okay-elem.exp b/tests/lang/eval-okay-elem.exp
new file mode 100644
index 000000000000..3cf6c0e962f0
--- /dev/null
+++ b/tests/lang/eval-okay-elem.exp
@@ -0,0 +1 @@
+[ true false 30 ]
diff --git a/tests/lang/eval-okay-elem.nix b/tests/lang/eval-okay-elem.nix
new file mode 100644
index 000000000000..71ea7a4ed03d
--- /dev/null
+++ b/tests/lang/eval-okay-elem.nix
@@ -0,0 +1,6 @@
+with import ./lib.nix;
+
+let xs = range 10 40; in
+
+[ (builtins.elem 23 xs) (builtins.elem 42 xs) (builtins.elemAt xs 20) ]
+
diff --git a/tests/lang/eval-okay-empty-args.exp b/tests/lang/eval-okay-empty-args.exp
new file mode 100644
index 000000000000..cb5537d5d7ce
--- /dev/null
+++ b/tests/lang/eval-okay-empty-args.exp
@@ -0,0 +1 @@
+"ab"
diff --git a/tests/lang/eval-okay-empty-args.nix b/tests/lang/eval-okay-empty-args.nix
new file mode 100644
index 000000000000..78c133afdd94
--- /dev/null
+++ b/tests/lang/eval-okay-empty-args.nix
@@ -0,0 +1 @@
+({}: {x,y,}: "${x}${y}") {} {x = "a"; y = "b";}
diff --git a/tests/lang/eval-okay-eq-derivations.exp b/tests/lang/eval-okay-eq-derivations.exp
new file mode 100644
index 000000000000..ec04aab6aeec
--- /dev/null
+++ b/tests/lang/eval-okay-eq-derivations.exp
@@ -0,0 +1 @@
+[ true true true false ]
diff --git a/tests/lang/eval-okay-eq-derivations.nix b/tests/lang/eval-okay-eq-derivations.nix
new file mode 100644
index 000000000000..d526cb4a2161
--- /dev/null
+++ b/tests/lang/eval-okay-eq-derivations.nix
@@ -0,0 +1,10 @@
+let
+
+  drvA1 = derivation { name = "a"; builder = "/foo"; system = "i686-linux"; };
+  drvA2 = derivation { name = "a"; builder = "/foo"; system = "i686-linux"; };
+  drvA3 = derivation { name = "a"; builder = "/foo"; system = "i686-linux"; } // { dummy = 1; };
+  
+  drvC1 = derivation { name = "c"; builder = "/foo"; system = "i686-linux"; };
+  drvC2 = derivation { name = "c"; builder = "/bar"; system = "i686-linux"; };
+
+in [ (drvA1 == drvA1) (drvA1 == drvA2) (drvA1 == drvA3) (drvC1 == drvC2) ]
diff --git a/tests/lang/eval-okay-eq.exp.disabled b/tests/lang/eval-okay-eq.exp.disabled
new file mode 100644
index 000000000000..2015847b65e7
--- /dev/null
+++ b/tests/lang/eval-okay-eq.exp.disabled
@@ -0,0 +1 @@
+Bool(True)
diff --git a/tests/lang/eval-okay-eq.nix b/tests/lang/eval-okay-eq.nix
new file mode 100644
index 000000000000..73d200b38141
--- /dev/null
+++ b/tests/lang/eval-okay-eq.nix
@@ -0,0 +1,3 @@
+["foobar" (rec {x = 1; y = x;})]
+==
+[("foo" + "bar") ({x = 1; y = 1;})]
diff --git a/tests/lang/eval-okay-filter.exp b/tests/lang/eval-okay-filter.exp
new file mode 100644
index 000000000000..355d51c27d8f
--- /dev/null
+++ b/tests/lang/eval-okay-filter.exp
@@ -0,0 +1 @@
+[ 0 2 4 6 8 10 100 102 104 106 108 110 ]
diff --git a/tests/lang/eval-okay-filter.nix b/tests/lang/eval-okay-filter.nix
new file mode 100644
index 000000000000..85109b0d0eb8
--- /dev/null
+++ b/tests/lang/eval-okay-filter.nix
@@ -0,0 +1,5 @@
+with import ./lib.nix;
+
+builtins.filter
+  (x: x / 2 * 2 == x)
+  (builtins.concatLists [ (range 0 10) (range 100 110) ])
diff --git a/tests/lang/eval-okay-flatten.exp b/tests/lang/eval-okay-flatten.exp
new file mode 100644
index 000000000000..b979b2b8b9bc
--- /dev/null
+++ b/tests/lang/eval-okay-flatten.exp
@@ -0,0 +1 @@
+"1234567"
diff --git a/tests/lang/eval-okay-flatten.nix b/tests/lang/eval-okay-flatten.nix
new file mode 100644
index 000000000000..fe911e9683e2
--- /dev/null
+++ b/tests/lang/eval-okay-flatten.nix
@@ -0,0 +1,8 @@
+with import ./lib.nix;
+
+let {
+
+  l = ["1" "2" ["3" ["4"] ["5" "6"]] "7"];
+
+  body = concat (flatten l);
+}
diff --git a/tests/lang/eval-okay-fromjson.exp b/tests/lang/eval-okay-fromjson.exp
new file mode 100644
index 000000000000..27ba77ddaf61
--- /dev/null
+++ b/tests/lang/eval-okay-fromjson.exp
@@ -0,0 +1 @@
+true
diff --git a/tests/lang/eval-okay-fromjson.nix b/tests/lang/eval-okay-fromjson.nix
new file mode 100644
index 000000000000..5ed0c1c4395d
--- /dev/null
+++ b/tests/lang/eval-okay-fromjson.nix
@@ -0,0 +1,32 @@
+# RFC 7159, section 13.
+builtins.fromJSON
+  ''
+    {
+      "Image": {
+          "Width":  800,
+          "Height": 600,
+          "Title":  "View from 15th Floor",
+          "Thumbnail": {
+              "Url":    "http://www.example.com/image/481989943",
+              "Height": 125,
+              "Width":  100
+          },
+          "Animated" : false,
+          "IDs": [116, 943, 234, 38793, true  ,false,null, -100]
+        }
+    }
+  ''
+==
+  { Image =
+    { Width = 800;
+      Height = 600;
+      Title = "View from 15th Floor";
+      Thumbnail =
+        { Url = http://www.example.com/image/481989943;
+          Height = 125;
+          Width = 100;
+        };
+      Animated = false;
+      IDs = [ 116 943 234 38793 true false null (0-100) ];
+    };
+  }
diff --git a/tests/lang/eval-okay-functionargs.exp.xml b/tests/lang/eval-okay-functionargs.exp.xml
new file mode 100644
index 000000000000..651f54c36341
--- /dev/null
+++ b/tests/lang/eval-okay-functionargs.exp.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='utf-8'?>
+<expr>
+  <list>
+    <string value="stdenv" />
+    <string value="fetchurl" />
+    <string value="aterm-stdenv" />
+    <string value="aterm-stdenv2" />
+    <string value="libX11" />
+    <string value="libXv" />
+    <string value="mplayer-stdenv2.libXv-libX11" />
+    <string value="mplayer-stdenv2.libXv-libX11_2" />
+    <string value="nix-stdenv-aterm-stdenv" />
+    <string value="nix-stdenv2-aterm2-stdenv2" />
+  </list>
+</expr>
diff --git a/tests/lang/eval-okay-functionargs.nix b/tests/lang/eval-okay-functionargs.nix
new file mode 100644
index 000000000000..68dca62ee18d
--- /dev/null
+++ b/tests/lang/eval-okay-functionargs.nix
@@ -0,0 +1,80 @@
+let
+
+  stdenvFun = { }: { name = "stdenv"; };
+  stdenv2Fun = { }: { name = "stdenv2"; };
+  fetchurlFun = { stdenv }: assert stdenv.name == "stdenv"; { name = "fetchurl"; };
+  atermFun = { stdenv, fetchurl }: { name = "aterm-${stdenv.name}"; };
+  aterm2Fun = { stdenv, fetchurl }: { name = "aterm2-${stdenv.name}"; };
+  nixFun = { stdenv, fetchurl, aterm }: { name = "nix-${stdenv.name}-${aterm.name}"; };
+  
+  mplayerFun =
+    { stdenv, fetchurl, enableX11 ? false, xorg ? null, enableFoo ? true, foo ? null  }:
+    assert stdenv.name == "stdenv2";
+    assert enableX11 -> xorg.libXv.name == "libXv";
+    assert enableFoo -> foo != null;
+    { name = "mplayer-${stdenv.name}.${xorg.libXv.name}-${xorg.libX11.name}"; };
+
+  makeOverridable = f: origArgs: f origArgs //
+    { override = newArgs:
+        makeOverridable f (origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs));
+    };
+    
+  callPackage_ = pkgs: f: args:
+    makeOverridable f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
+
+  allPackages =
+    { overrides ? (pkgs: pkgsPrev: { }) }:
+    let
+      callPackage = callPackage_ pkgs;
+      pkgs = pkgsStd // (overrides pkgs pkgsStd);
+      pkgsStd = {
+        inherit pkgs;
+        stdenv = callPackage stdenvFun { };
+        stdenv2 = callPackage stdenv2Fun { };
+        fetchurl = callPackage fetchurlFun { };
+        aterm = callPackage atermFun { };
+        xorg = callPackage xorgFun { };
+        mplayer = callPackage mplayerFun { stdenv = pkgs.stdenv2; enableFoo = false; };
+        nix = callPackage nixFun { };
+      };
+    in pkgs;
+
+  libX11Fun = { stdenv, fetchurl }: { name = "libX11"; };
+  libX11_2Fun = { stdenv, fetchurl }: { name = "libX11_2"; };
+  libXvFun = { stdenv, fetchurl, libX11 }: { name = "libXv"; };
+  
+  xorgFun =
+    { pkgs }:
+    let callPackage = callPackage_ (pkgs // pkgs.xorg); in
+    {
+      libX11 = callPackage libX11Fun { };
+      libXv = callPackage libXvFun { };
+    };
+
+in
+
+let
+
+  pkgs = allPackages { };
+  
+  pkgs2 = allPackages {
+    overrides = pkgs: pkgsPrev: {
+      stdenv = pkgs.stdenv2;
+      nix = pkgsPrev.nix.override { aterm = aterm2Fun { inherit (pkgs) stdenv fetchurl; }; };
+      xorg = pkgsPrev.xorg // { libX11 = libX11_2Fun { inherit (pkgs) stdenv fetchurl; }; };
+    };
+  };
+  
+in
+
+  [ pkgs.stdenv.name
+    pkgs.fetchurl.name
+    pkgs.aterm.name
+    pkgs2.aterm.name
+    pkgs.xorg.libX11.name
+    pkgs.xorg.libXv.name
+    pkgs.mplayer.name
+    pkgs2.mplayer.name
+    pkgs.nix.name
+    pkgs2.nix.name
+  ]
diff --git a/tests/lang/eval-okay-getattrpos.exp b/tests/lang/eval-okay-getattrpos.exp
new file mode 100644
index 000000000000..469249bbc646
--- /dev/null
+++ b/tests/lang/eval-okay-getattrpos.exp
@@ -0,0 +1 @@
+{ column = 5; file = "eval-okay-getattrpos.nix"; line = 3; }
diff --git a/tests/lang/eval-okay-getattrpos.nix b/tests/lang/eval-okay-getattrpos.nix
new file mode 100644
index 000000000000..ca6b07961547
--- /dev/null
+++ b/tests/lang/eval-okay-getattrpos.nix
@@ -0,0 +1,6 @@
+let
+  as = {
+    foo = "bar";
+  };
+  pos = builtins.unsafeGetAttrPos "foo" as;
+in { inherit (pos) column line; file = baseNameOf pos.file; }
diff --git a/tests/lang/eval-okay-getenv.exp b/tests/lang/eval-okay-getenv.exp
new file mode 100644
index 000000000000..14e24d419005
--- /dev/null
+++ b/tests/lang/eval-okay-getenv.exp
@@ -0,0 +1 @@
+"foobar"
diff --git a/tests/lang/eval-okay-getenv.nix b/tests/lang/eval-okay-getenv.nix
new file mode 100644
index 000000000000..4cfec5f553d9
--- /dev/null
+++ b/tests/lang/eval-okay-getenv.nix
@@ -0,0 +1 @@
+builtins.getEnv "TEST_VAR" + (if builtins.getEnv "NO_SUCH_VAR" == "" then "bar" else "bla")
diff --git a/tests/lang/eval-okay-hash.exp b/tests/lang/eval-okay-hash.exp
new file mode 100644
index 000000000000..7bbe452bcc01
--- /dev/null
+++ b/tests/lang/eval-okay-hash.exp
@@ -0,0 +1 @@
+[ "d41d8cd98f00b204e9800998ecf8427e" "6c69ee7f211c640419d5366cc076ae46" "bb3438fbabd460ea6dbd27d153e2233b" "da39a3ee5e6b4b0d3255bfef95601890afd80709" "cd54e8568c1b37cf1e5badb0779bcbf382212189" "6d12e10b1d331dad210e47fd25d4f260802b7e77" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" "900a4469df00ccbfd0c145c6d1e4b7953dd0afafadd7534e3a4019e8d38fc663" "ad0387b3bd8652f730ca46d25f9c170af0fd589f42e7f23f5a9e6412d97d7e56" ]
diff --git a/tests/lang/eval-okay-hash.nix b/tests/lang/eval-okay-hash.nix
new file mode 100644
index 000000000000..2fff17f849bb
--- /dev/null
+++ b/tests/lang/eval-okay-hash.nix
@@ -0,0 +1,7 @@
+let
+  md5 = builtins.hashString "md5";
+  sha1 = builtins.hashString "sha1";
+  sha256 = builtins.hashString "sha256";
+  strings = [ "" "text 1" "text 2" ];
+in
+  (builtins.map md5 strings) ++ (builtins.map sha1 strings) ++ (builtins.map sha256 strings)
diff --git a/tests/lang/eval-okay-if.exp b/tests/lang/eval-okay-if.exp
new file mode 100644
index 000000000000..00750edc07d6
--- /dev/null
+++ b/tests/lang/eval-okay-if.exp
@@ -0,0 +1 @@
+3
diff --git a/tests/lang/eval-okay-if.nix b/tests/lang/eval-okay-if.nix
new file mode 100644
index 000000000000..23e4c74d5016
--- /dev/null
+++ b/tests/lang/eval-okay-if.nix
@@ -0,0 +1 @@
+if "foo" != "f" + "oo" then 1 else if false then 2 else 3
diff --git a/tests/lang/eval-okay-import.exp b/tests/lang/eval-okay-import.exp
new file mode 100644
index 000000000000..c508125b55be
--- /dev/null
+++ b/tests/lang/eval-okay-import.exp
@@ -0,0 +1 @@
+[ 1 2 3 4 5 6 7 8 9 10 ]
diff --git a/tests/lang/eval-okay-import.nix b/tests/lang/eval-okay-import.nix
new file mode 100644
index 000000000000..0b18d9413122
--- /dev/null
+++ b/tests/lang/eval-okay-import.nix
@@ -0,0 +1,11 @@
+let
+
+  overrides = {
+    import = fn: scopedImport overrides fn;
+
+    scopedImport = attrs: fn: scopedImport (overrides // attrs) fn;
+
+    builtins = builtins // overrides;
+  } // import ./lib.nix;
+
+in scopedImport overrides ./imported.nix
diff --git a/tests/lang/eval-okay-ind-string.exp b/tests/lang/eval-okay-ind-string.exp
new file mode 100644
index 000000000000..886219dcf652
--- /dev/null
+++ b/tests/lang/eval-okay-ind-string.exp
@@ -0,0 +1 @@
+"This is an indented multi-line string\nliteral.  An amount of whitespace at\nthe start of each line matching the minimum\nindentation of all lines in the string\nliteral together will be removed.  Thus,\nin this case four spaces will be\nstripped from each line, even though\n  THIS LINE is indented six spaces.\n\nAlso, empty lines don't count in the\ndetermination of the indentation level (the\nprevious empty line has indentation 0, but\nit doesn't matter).\nIf the string starts with whitespace\n  followed by a newline, it's stripped, but\n  that's not the case here. Two spaces are\n  stripped because of the \"  \" at the start. \nThis line is indented\na bit further.\nAnti-quotations, like so, are\nalso allowed.\n  The \\ is not special here.\n' can be followed by any character except another ', e.g. 'x'.\nLikewise for $, e.g. $$ or $varName.\nBut ' followed by ' is special, as is $ followed by {.\nIf you want them, use anti-quotations: '', ${.\n   Tabs are not interpreted as whitespace (since we can't guess\n   what tab settings are intended), so don't use them.\n\tThis line starts with a space and a tab, so only one\n   space will be stripped from each line.\nAlso note that if the last line (just before the closing ' ')\nconsists only of whitespace, it's ignored.  But here there is\nsome non-whitespace stuff, so the line isn't removed. \nThis shows a hacky way to preserve an empty line after the start.\nBut there's no reason to do so: you could just repeat the empty\nline.\n  Similarly you can force an indentation level,\n  in this case to 2 spaces.  This works because the anti-quote\n  is significant (not whitespace).\nstart on network-interfaces\n\nstart script\n\n  rm -f /var/run/opengl-driver\n  ln -sf 123 /var/run/opengl-driver\n\n  rm -f /var/log/slim.log\n   \nend script\n\nenv SLIM_CFGFILE=abc\nenv SLIM_THEMESDIR=def\nenv FONTCONFIG_FILE=/etc/fonts/fonts.conf  \t\t\t\t# !!! cleanup\nenv XKB_BINDIR=foo/bin         \t\t\t\t# Needed for the Xkb extension.\nenv LD_LIBRARY_PATH=libX11/lib:libXext/lib:/usr/lib/          # related to xorg-sys-opengl - needed to load libglx for (AI)GLX support (for compiz)\n\nenv XORG_DRI_DRIVER_PATH=nvidiaDrivers/X11R6/lib/modules/drivers/ \n\nexec slim/bin/slim\nEscaping of ' followed by ': ''\nEscaping of $ followed by {: ${\nAnd finally to interpret \\n etc. as in a string: \n, \r, \t.\nfoo\n'bla'\nbar\n"
diff --git a/tests/lang/eval-okay-ind-string.nix b/tests/lang/eval-okay-ind-string.nix
new file mode 100644
index 000000000000..1556aae9f54f
--- /dev/null
+++ b/tests/lang/eval-okay-ind-string.nix
@@ -0,0 +1,120 @@
+let
+
+  s1 = ''
+    This is an indented multi-line string
+    literal.  An amount of whitespace at
+    the start of each line matching the minimum
+    indentation of all lines in the string
+    literal together will be removed.  Thus,
+    in this case four spaces will be
+    stripped from each line, even though
+      THIS LINE is indented six spaces.
+
+    Also, empty lines don't count in the
+    determination of the indentation level (the
+    previous empty line has indentation 0, but
+    it doesn't matter).
+  '';
+
+  s2 = ''  If the string starts with whitespace
+    followed by a newline, it's stripped, but
+    that's not the case here. Two spaces are
+    stripped because of the "  " at the start. 
+  '';
+
+  s3 = ''
+      This line is indented
+      a bit further.
+        ''; # indentation of last line doesn't count if it's empty
+
+  s4 = ''
+    Anti-quotations, like ${if true then "so" else "not so"}, are
+    also allowed.
+  '';
+
+  s5 = ''
+      The \ is not special here.
+    ' can be followed by any character except another ', e.g. 'x'.
+    Likewise for $, e.g. $$ or $varName.
+    But ' followed by ' is special, as is $ followed by {.
+    If you want them, use anti-quotations: ${"''"}, ${"\${"}.
+  '';
+
+  s6 = ''  
+    Tabs are not interpreted as whitespace (since we can't guess
+    what tab settings are intended), so don't use them.
+ 	This line starts with a space and a tab, so only one
+    space will be stripped from each line.
+  '';
+
+  s7 = ''
+    Also note that if the last line (just before the closing ' ')
+    consists only of whitespace, it's ignored.  But here there is
+    some non-whitespace stuff, so the line isn't removed. '';
+
+  s8 = ''    ${""}
+    This shows a hacky way to preserve an empty line after the start.
+    But there's no reason to do so: you could just repeat the empty
+    line.
+  '';
+
+  s9 = ''
+  ${""}  Similarly you can force an indentation level,
+    in this case to 2 spaces.  This works because the anti-quote
+    is significant (not whitespace).
+  '';
+
+  s10 = ''
+  '';
+
+  s11 = '''';
+
+  s12 = ''   '';
+
+  s13 = ''
+    start on network-interfaces
+
+    start script
+    
+      rm -f /var/run/opengl-driver
+      ${if true
+        then "ln -sf 123 /var/run/opengl-driver"
+        else if true
+        then "ln -sf 456 /var/run/opengl-driver"
+        else ""
+      }
+
+      rm -f /var/log/slim.log
+       
+    end script
+
+    env SLIM_CFGFILE=${"abc"}
+    env SLIM_THEMESDIR=${"def"}
+    env FONTCONFIG_FILE=/etc/fonts/fonts.conf  				# !!! cleanup
+    env XKB_BINDIR=${"foo"}/bin         				# Needed for the Xkb extension.
+    env LD_LIBRARY_PATH=${"libX11"}/lib:${"libXext"}/lib:/usr/lib/          # related to xorg-sys-opengl - needed to load libglx for (AI)GLX support (for compiz)
+
+    ${if true
+      then "env XORG_DRI_DRIVER_PATH=${"nvidiaDrivers"}/X11R6/lib/modules/drivers/"
+    else if true
+      then "env XORG_DRI_DRIVER_PATH=${"mesa"}/lib/modules/dri"
+      else ""
+    } 
+
+    exec ${"slim"}/bin/slim
+  '';
+
+  s14 = ''
+    Escaping of ' followed by ': '''
+    Escaping of $ followed by {: ''${
+    And finally to interpret \n etc. as in a string: ''\n, ''\r, ''\t.
+  '';
+
+  # Regression test: antiquotation in '${x}' should work, but didn't.
+  s15 = let x = "bla"; in ''
+    foo
+    '${x}'
+    bar
+  '';
+
+in s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10 + s11 + s12 + s13 + s14 + s15
diff --git a/tests/lang/eval-okay-let.exp b/tests/lang/eval-okay-let.exp
new file mode 100644
index 000000000000..14e24d419005
--- /dev/null
+++ b/tests/lang/eval-okay-let.exp
@@ -0,0 +1 @@
+"foobar"
diff --git a/tests/lang/eval-okay-let.nix b/tests/lang/eval-okay-let.nix
new file mode 100644
index 000000000000..fe118c5282e3
--- /dev/null
+++ b/tests/lang/eval-okay-let.nix
@@ -0,0 +1,5 @@
+let {
+  x = "foo";
+  y = "bar";
+  body = x + y;
+}
diff --git a/tests/lang/eval-okay-list.exp b/tests/lang/eval-okay-list.exp
new file mode 100644
index 000000000000..f784f26d83f4
--- /dev/null
+++ b/tests/lang/eval-okay-list.exp
@@ -0,0 +1 @@
+"foobarblatest"
diff --git a/tests/lang/eval-okay-list.nix b/tests/lang/eval-okay-list.nix
new file mode 100644
index 000000000000..d433bcf908ba
--- /dev/null
+++ b/tests/lang/eval-okay-list.nix
@@ -0,0 +1,7 @@
+with import ./lib.nix;
+
+let {
+
+  body = concat ["foo" "bar" "bla" "test"];
+    
+}
\ No newline at end of file
diff --git a/tests/lang/eval-okay-listtoattrs.exp b/tests/lang/eval-okay-listtoattrs.exp
new file mode 100644
index 000000000000..74abef7bc6ed
--- /dev/null
+++ b/tests/lang/eval-okay-listtoattrs.exp
@@ -0,0 +1 @@
+"AAbar"
diff --git a/tests/lang/eval-okay-listtoattrs.nix b/tests/lang/eval-okay-listtoattrs.nix
new file mode 100644
index 000000000000..4186e029b538
--- /dev/null
+++ b/tests/lang/eval-okay-listtoattrs.nix
@@ -0,0 +1,11 @@
+# this test shows how to use listToAttrs and that evaluation is still lazy (throw isn't called)
+with import ./lib.nix;
+
+let 
+  asi = name: value : { inherit name value; };
+  list = [ ( asi "a" "A" ) ( asi "b" "B" ) ];
+  a = builtins.listToAttrs list;
+  b = builtins.listToAttrs ( list ++ list );
+  r = builtins.listToAttrs [ (asi "result" [ a b ]) ( asi "throw" (throw "this should not be thrown")) ];
+  x = builtins.listToAttrs [ (asi "foo" "bar") (asi "foo" "bla") ];
+in concat (map (x: x.a) r.result) + x.foo
diff --git a/tests/lang/eval-okay-logic.exp b/tests/lang/eval-okay-logic.exp
new file mode 100644
index 000000000000..d00491fd7e5b
--- /dev/null
+++ b/tests/lang/eval-okay-logic.exp
@@ -0,0 +1 @@
+1
diff --git a/tests/lang/eval-okay-logic.nix b/tests/lang/eval-okay-logic.nix
new file mode 100644
index 000000000000..fbb12794401f
--- /dev/null
+++ b/tests/lang/eval-okay-logic.nix
@@ -0,0 +1 @@
+assert !false && (true || false) -> true; 1
diff --git a/tests/lang/eval-okay-map.exp b/tests/lang/eval-okay-map.exp
new file mode 100644
index 000000000000..dbb64f717b96
--- /dev/null
+++ b/tests/lang/eval-okay-map.exp
@@ -0,0 +1 @@
+"foobarblabarxyzzybar"
diff --git a/tests/lang/eval-okay-map.nix b/tests/lang/eval-okay-map.nix
new file mode 100644
index 000000000000..a76c1d811454
--- /dev/null
+++ b/tests/lang/eval-okay-map.nix
@@ -0,0 +1,3 @@
+with import ./lib.nix;
+
+concat (map (x: x + "bar") [ "foo" "bla" "xyzzy" ])
\ No newline at end of file
diff --git a/tests/lang/eval-okay-new-let.exp b/tests/lang/eval-okay-new-let.exp
new file mode 100644
index 000000000000..f98b388071c2
--- /dev/null
+++ b/tests/lang/eval-okay-new-let.exp
@@ -0,0 +1 @@
+"xyzzyfoobar"
diff --git a/tests/lang/eval-okay-new-let.nix b/tests/lang/eval-okay-new-let.nix
new file mode 100644
index 000000000000..738123141508
--- /dev/null
+++ b/tests/lang/eval-okay-new-let.nix
@@ -0,0 +1,14 @@
+let
+
+  f = z: 
+
+    let
+      x = "foo";
+      y = "bar";
+      body = 1; # compat test
+    in
+      z + x + y;
+
+  arg = "xyzzy";
+
+in f arg
diff --git a/tests/lang/eval-okay-null-dynamic-attrs.exp b/tests/lang/eval-okay-null-dynamic-attrs.exp
new file mode 100644
index 000000000000..27ba77ddaf61
--- /dev/null
+++ b/tests/lang/eval-okay-null-dynamic-attrs.exp
@@ -0,0 +1 @@
+true
diff --git a/tests/lang/eval-okay-null-dynamic-attrs.nix b/tests/lang/eval-okay-null-dynamic-attrs.nix
new file mode 100644
index 000000000000..b060c0bc9850
--- /dev/null
+++ b/tests/lang/eval-okay-null-dynamic-attrs.nix
@@ -0,0 +1 @@
+{ ${null} = true; } == {}
diff --git a/tests/lang/eval-okay-overrides.exp b/tests/lang/eval-okay-overrides.exp
new file mode 100644
index 000000000000..0cfbf08886fc
--- /dev/null
+++ b/tests/lang/eval-okay-overrides.exp
@@ -0,0 +1 @@
+2
diff --git a/tests/lang/eval-okay-overrides.nix b/tests/lang/eval-okay-overrides.nix
new file mode 100644
index 000000000000..358742b36e22
--- /dev/null
+++ b/tests/lang/eval-okay-overrides.nix
@@ -0,0 +1,9 @@
+let
+
+  overrides = { a = 2; };
+
+in (rec {
+  __overrides = overrides;
+  x = a;
+  a = 1;
+}).x
diff --git a/tests/lang/eval-okay-pathexists.exp b/tests/lang/eval-okay-pathexists.exp
new file mode 100644
index 000000000000..27ba77ddaf61
--- /dev/null
+++ b/tests/lang/eval-okay-pathexists.exp
@@ -0,0 +1 @@
+true
diff --git a/tests/lang/eval-okay-pathexists.nix b/tests/lang/eval-okay-pathexists.nix
new file mode 100644
index 000000000000..50c28ee0cd30
--- /dev/null
+++ b/tests/lang/eval-okay-pathexists.nix
@@ -0,0 +1,5 @@
+builtins.pathExists (builtins.toPath ./lib.nix)
+&& builtins.pathExists (builtins.toPath (builtins.toString ./lib.nix))
+&& !builtins.pathExists (builtins.toPath (builtins.toString ./bla.nix))
+&& builtins.pathExists ./lib.nix
+&& !builtins.pathExists ./bla.nix
diff --git a/tests/lang/eval-okay-patterns.exp b/tests/lang/eval-okay-patterns.exp
new file mode 100644
index 000000000000..a4304010fe80
--- /dev/null
+++ b/tests/lang/eval-okay-patterns.exp
@@ -0,0 +1 @@
+"abcxyzDDDDEFijk"
diff --git a/tests/lang/eval-okay-patterns.nix b/tests/lang/eval-okay-patterns.nix
new file mode 100644
index 000000000000..96fd25a01517
--- /dev/null
+++ b/tests/lang/eval-okay-patterns.nix
@@ -0,0 +1,16 @@
+let
+
+  f = args@{x, y, z}: x + args.y + z;
+
+  g = {x, y, z}@args: f args;
+
+  h = {x ? "d", y ? x, z ? args.x}@args: x + y + z;
+
+  j = {x, y, z, ...}: x + y + z;
+
+in
+  f {x = "a"; y = "b"; z = "c";} +
+  g {x = "x"; y = "y"; z = "z";} +
+  h {x = "D";} +
+  h {x = "D"; y = "E"; z = "F";} +
+  j {x = "i"; y = "j"; z = "k"; bla = "bla"; foo = "bar";}
diff --git a/tests/lang/eval-okay-readfile.exp b/tests/lang/eval-okay-readfile.exp
new file mode 100644
index 000000000000..a2c87d0c439f
--- /dev/null
+++ b/tests/lang/eval-okay-readfile.exp
@@ -0,0 +1 @@
+"builtins.readFile ./eval-okay-readfile.nix\n"
diff --git a/tests/lang/eval-okay-readfile.nix b/tests/lang/eval-okay-readfile.nix
new file mode 100644
index 000000000000..82f7cb17435a
--- /dev/null
+++ b/tests/lang/eval-okay-readfile.nix
@@ -0,0 +1 @@
+builtins.readFile ./eval-okay-readfile.nix
diff --git a/tests/lang/eval-okay-redefine-builtin.exp b/tests/lang/eval-okay-redefine-builtin.exp
new file mode 100644
index 000000000000..c508d5366f70
--- /dev/null
+++ b/tests/lang/eval-okay-redefine-builtin.exp
@@ -0,0 +1 @@
+false
diff --git a/tests/lang/eval-okay-redefine-builtin.nix b/tests/lang/eval-okay-redefine-builtin.nix
new file mode 100644
index 000000000000..df9fc3f37d22
--- /dev/null
+++ b/tests/lang/eval-okay-redefine-builtin.nix
@@ -0,0 +1,3 @@
+let
+  throw = abort "Error!";
+in (builtins.tryEval <foobaz>).success
diff --git a/tests/lang/eval-okay-remove.exp b/tests/lang/eval-okay-remove.exp
new file mode 100644
index 000000000000..8d38505c1686
--- /dev/null
+++ b/tests/lang/eval-okay-remove.exp
@@ -0,0 +1 @@
+456
diff --git a/tests/lang/eval-okay-remove.nix b/tests/lang/eval-okay-remove.nix
new file mode 100644
index 000000000000..4ad5ba897fa7
--- /dev/null
+++ b/tests/lang/eval-okay-remove.nix
@@ -0,0 +1,5 @@
+let {
+  attrs = {x = 123; y = 456;};
+
+  body = (removeAttrs attrs ["x"]).y;
+}
\ No newline at end of file
diff --git a/tests/lang/eval-okay-scope-1.exp b/tests/lang/eval-okay-scope-1.exp
new file mode 100644
index 000000000000..00750edc07d6
--- /dev/null
+++ b/tests/lang/eval-okay-scope-1.exp
@@ -0,0 +1 @@
+3
diff --git a/tests/lang/eval-okay-scope-1.nix b/tests/lang/eval-okay-scope-1.nix
new file mode 100644
index 000000000000..fa38a7174e03
--- /dev/null
+++ b/tests/lang/eval-okay-scope-1.nix
@@ -0,0 +1,6 @@
+(({x}: x:
+
+  { x = 1;
+    y = x;
+  }
+) {x = 2;} 3).y
diff --git a/tests/lang/eval-okay-scope-2.exp b/tests/lang/eval-okay-scope-2.exp
new file mode 100644
index 000000000000..d00491fd7e5b
--- /dev/null
+++ b/tests/lang/eval-okay-scope-2.exp
@@ -0,0 +1 @@
+1
diff --git a/tests/lang/eval-okay-scope-2.nix b/tests/lang/eval-okay-scope-2.nix
new file mode 100644
index 000000000000..eb8b02bc4994
--- /dev/null
+++ b/tests/lang/eval-okay-scope-2.nix
@@ -0,0 +1,6 @@
+((x: {x}:
+  rec {
+    x = 1;
+    y = x;
+  }
+) 2 {x = 3;}).y
diff --git a/tests/lang/eval-okay-scope-3.exp b/tests/lang/eval-okay-scope-3.exp
new file mode 100644
index 000000000000..b8626c4cff28
--- /dev/null
+++ b/tests/lang/eval-okay-scope-3.exp
@@ -0,0 +1 @@
+4
diff --git a/tests/lang/eval-okay-scope-3.nix b/tests/lang/eval-okay-scope-3.nix
new file mode 100644
index 000000000000..10d6bc04d830
--- /dev/null
+++ b/tests/lang/eval-okay-scope-3.nix
@@ -0,0 +1,6 @@
+((x: as: {x}:
+  rec {
+    inherit (as) x;
+    y = x;
+  }
+) 2 {x = 4;} {x = 3;}).y
diff --git a/tests/lang/eval-okay-scope-4.exp b/tests/lang/eval-okay-scope-4.exp
new file mode 100644
index 000000000000..00ff03a46c9b
--- /dev/null
+++ b/tests/lang/eval-okay-scope-4.exp
@@ -0,0 +1 @@
+"ccdd"
diff --git a/tests/lang/eval-okay-scope-4.nix b/tests/lang/eval-okay-scope-4.nix
new file mode 100644
index 000000000000..dc8243bc8546
--- /dev/null
+++ b/tests/lang/eval-okay-scope-4.nix
@@ -0,0 +1,10 @@
+let {
+
+  x = "a";
+  y = "b";
+
+  f = {x ? y, y ? x}: x + y;
+
+  body = f {x = "c";} + f {y = "d";};
+
+}
diff --git a/tests/lang/eval-okay-scope-6.exp b/tests/lang/eval-okay-scope-6.exp
new file mode 100644
index 000000000000..00ff03a46c9b
--- /dev/null
+++ b/tests/lang/eval-okay-scope-6.exp
@@ -0,0 +1 @@
+"ccdd"
diff --git a/tests/lang/eval-okay-scope-6.nix b/tests/lang/eval-okay-scope-6.nix
new file mode 100644
index 000000000000..0995d4e7e7e0
--- /dev/null
+++ b/tests/lang/eval-okay-scope-6.nix
@@ -0,0 +1,7 @@
+let {
+
+  f = {x ? y, y ? x}: x + y;
+
+  body = f {x = "c";} + f {y = "d";};
+
+}
diff --git a/tests/lang/eval-okay-scope-7.exp b/tests/lang/eval-okay-scope-7.exp
new file mode 100644
index 000000000000..d00491fd7e5b
--- /dev/null
+++ b/tests/lang/eval-okay-scope-7.exp
@@ -0,0 +1 @@
+1
diff --git a/tests/lang/eval-okay-scope-7.nix b/tests/lang/eval-okay-scope-7.nix
new file mode 100644
index 000000000000..4da02968f6b7
--- /dev/null
+++ b/tests/lang/eval-okay-scope-7.nix
@@ -0,0 +1,6 @@
+rec {
+  inherit (x) y;
+  x = {
+    y = 1;
+  };
+}.y
diff --git a/tests/lang/eval-okay-search-path.exp b/tests/lang/eval-okay-search-path.exp
new file mode 100644
index 000000000000..4519bc406db5
--- /dev/null
+++ b/tests/lang/eval-okay-search-path.exp
@@ -0,0 +1 @@
+"abccX"
diff --git a/tests/lang/eval-okay-search-path.flags b/tests/lang/eval-okay-search-path.flags
new file mode 100644
index 000000000000..a28e6821004a
--- /dev/null
+++ b/tests/lang/eval-okay-search-path.flags
@@ -0,0 +1 @@
+-I lang/dir1 -I lang/dir2 -I dir5=lang/dir3
\ No newline at end of file
diff --git a/tests/lang/eval-okay-search-path.nix b/tests/lang/eval-okay-search-path.nix
new file mode 100644
index 000000000000..cca41f821f83
--- /dev/null
+++ b/tests/lang/eval-okay-search-path.nix
@@ -0,0 +1,11 @@
+with import ./lib.nix;
+with builtins;
+
+assert pathExists <nix/buildenv.nix>;
+
+assert length __nixPath == 6;
+assert length (filter (x: x.prefix == "nix") __nixPath) == 1;
+assert length (filter (x: baseNameOf x.path == "dir4") __nixPath) == 1;
+
+import <a.nix> + import <b.nix> + import <c.nix> + import <dir5/c.nix>
+  + (let __nixPath = [ { path = ./dir2; } { path = ./dir1; } ]; in import <a.nix>)
diff --git a/tests/lang/eval-okay-string.exp b/tests/lang/eval-okay-string.exp
new file mode 100644
index 000000000000..6079202470e3
--- /dev/null
+++ b/tests/lang/eval-okay-string.exp
@@ -0,0 +1 @@
+"foobar/a/b/c/d/foo/xyzzy/foo.txt/../foo/x/yescape: \"quote\" \n \\end\nof\nlinefoobarblaatfoo$bar"
diff --git a/tests/lang/eval-okay-string.nix b/tests/lang/eval-okay-string.nix
new file mode 100644
index 000000000000..839539e6c171
--- /dev/null
+++ b/tests/lang/eval-okay-string.nix
@@ -0,0 +1,10 @@
+"foo" + "bar"
+  + toString (/a/b + /c/d)
+  + toString (/foo/bar + "/../xyzzy/." + "/foo.txt")
+  + ("/../foo" + toString /x/y)
+  + "escape: \"quote\" \n \\"
+  + "end
+of
+line"
+  + "foo${if true then "b${"a" + "r"}" else "xyzzy"}blaat"
+  + "foo$bar"
diff --git a/tests/lang/eval-okay-strings-as-attrs-names.exp b/tests/lang/eval-okay-strings-as-attrs-names.exp
new file mode 100644
index 000000000000..27ba77ddaf61
--- /dev/null
+++ b/tests/lang/eval-okay-strings-as-attrs-names.exp
@@ -0,0 +1 @@
+true
diff --git a/tests/lang/eval-okay-strings-as-attrs-names.nix b/tests/lang/eval-okay-strings-as-attrs-names.nix
new file mode 100644
index 000000000000..5e40928dbe31
--- /dev/null
+++ b/tests/lang/eval-okay-strings-as-attrs-names.nix
@@ -0,0 +1,20 @@
+let
+
+  attr = {
+    "key 1" = "test";
+    "key 2" = "caseok";
+  };
+
+  t1 = builtins.getAttr "key 1" attr;
+  t2 = attr."key 2";
+  t3 = attr ? "key 1";
+  t4 = builtins.attrNames { inherit (attr) "key 1"; };
+
+  # This is permitted, but there is currently no way to reference this
+  # variable.
+  "foo bar" = 1;
+
+in t1 == "test"
+   && t2 == "caseok"
+   && t3 == true
+   && t4 == ["key 1"]
diff --git a/tests/lang/eval-okay-substring.exp b/tests/lang/eval-okay-substring.exp
new file mode 100644
index 000000000000..6aace04b0f57
--- /dev/null
+++ b/tests/lang/eval-okay-substring.exp
@@ -0,0 +1 @@
+"ooxfoobarybarzobaabbc"
diff --git a/tests/lang/eval-okay-substring.nix b/tests/lang/eval-okay-substring.nix
new file mode 100644
index 000000000000..424af00d9b3b
--- /dev/null
+++ b/tests/lang/eval-okay-substring.nix
@@ -0,0 +1,21 @@
+with builtins;
+
+let
+
+  s = "foobar";
+
+in
+
+substring 1 2 s
++ "x"
++ substring 0 (stringLength s) s
++ "y"
++ substring 3 100 s
++ "z"
++ substring 2 (sub (stringLength s) 3) s
++ "a"
++ substring 3 0 s
++ "b"
++ substring 3 1 s
++ "c"
++ substring 5 10 "perl"
diff --git a/tests/lang/eval-okay-tail-call-1.exp-disabled b/tests/lang/eval-okay-tail-call-1.exp-disabled
new file mode 100644
index 000000000000..f7393e847d34
--- /dev/null
+++ b/tests/lang/eval-okay-tail-call-1.exp-disabled
@@ -0,0 +1 @@
+100000
diff --git a/tests/lang/eval-okay-tail-call-1.nix b/tests/lang/eval-okay-tail-call-1.nix
new file mode 100644
index 000000000000..a3962ce3fdb5
--- /dev/null
+++ b/tests/lang/eval-okay-tail-call-1.nix
@@ -0,0 +1,3 @@
+let
+  f = n: if n == 100000 then n else f (n + 1);
+in f 0
diff --git a/tests/lang/eval-okay-tojson.exp b/tests/lang/eval-okay-tojson.exp
new file mode 100644
index 000000000000..e8164af2b66e
--- /dev/null
+++ b/tests/lang/eval-okay-tojson.exp
@@ -0,0 +1 @@
+"{\"a\":123,\"b\":-456,\"c\":\"foo\",\"d\":\"foo\\n\\\"bar\\\"\",\"e\":true,\"f\":false,\"g\":[1,2,3],\"h\":[\"a\",[\"b\",{\"foo\\nbar\":{}}]],\"i\":3}"
diff --git a/tests/lang/eval-okay-tojson.nix b/tests/lang/eval-okay-tojson.nix
new file mode 100644
index 000000000000..0d4e55b3d367
--- /dev/null
+++ b/tests/lang/eval-okay-tojson.nix
@@ -0,0 +1,11 @@
+builtins.toJSON
+  { a = 123;
+    b = -456;
+    c = "foo";
+    d = "foo\n\"bar\"";
+    e = true;
+    f = false;
+    g = [ 1 2 3 ];
+    h = [ "a" [ "b" { "foo\nbar" = {}; } ] ];
+    i = 1 + 2;
+  }
diff --git a/tests/lang/eval-okay-toxml.exp b/tests/lang/eval-okay-toxml.exp
new file mode 100644
index 000000000000..828220890ecd
--- /dev/null
+++ b/tests/lang/eval-okay-toxml.exp
@@ -0,0 +1 @@
+"<?xml version='1.0' encoding='utf-8'?>\n<expr>\n  <attrs>\n    <attr name=\"a\">\n      <string value=\"s\" />\n    </attr>\n  </attrs>\n</expr>\n"
diff --git a/tests/lang/eval-okay-toxml.nix b/tests/lang/eval-okay-toxml.nix
new file mode 100644
index 000000000000..068c97a6c1b3
--- /dev/null
+++ b/tests/lang/eval-okay-toxml.nix
@@ -0,0 +1,3 @@
+# Make sure the expected XML output is produced; in particular, make sure it
+# doesn't contain source location information.
+builtins.toXML { a = "s"; }
diff --git a/tests/lang/eval-okay-toxml2.exp b/tests/lang/eval-okay-toxml2.exp
new file mode 100644
index 000000000000..634a841eb190
--- /dev/null
+++ b/tests/lang/eval-okay-toxml2.exp
@@ -0,0 +1 @@
+"<?xml version='1.0' encoding='utf-8'?>\n<expr>\n  <list>\n    <string value=\"ab\" />\n    <int value=\"10\" />\n    <attrs>\n      <attr name=\"x\">\n        <string value=\"x\" />\n      </attr>\n      <attr name=\"y\">\n        <string value=\"x\" />\n      </attr>\n    </attrs>\n  </list>\n</expr>\n"
diff --git a/tests/lang/eval-okay-toxml2.nix b/tests/lang/eval-okay-toxml2.nix
new file mode 100644
index 000000000000..ff1791b30eb5
--- /dev/null
+++ b/tests/lang/eval-okay-toxml2.nix
@@ -0,0 +1 @@
+builtins.toXML [("a" + "b") 10 (rec {x = "x"; y = x;})]
diff --git a/tests/lang/eval-okay-tryeval.exp b/tests/lang/eval-okay-tryeval.exp
new file mode 100644
index 000000000000..2b2e6fa711f4
--- /dev/null
+++ b/tests/lang/eval-okay-tryeval.exp
@@ -0,0 +1 @@
+{ x = { success = true; value = "x"; }; y = { success = false; value = false; }; z = { success = false; value = false; }; }
diff --git a/tests/lang/eval-okay-tryeval.nix b/tests/lang/eval-okay-tryeval.nix
new file mode 100644
index 000000000000..629bc440a85a
--- /dev/null
+++ b/tests/lang/eval-okay-tryeval.nix
@@ -0,0 +1,5 @@
+{
+  x = builtins.tryEval "x";
+  y = builtins.tryEval (assert false; "y");
+  z = builtins.tryEval (throw "bla");
+}
diff --git a/tests/lang/eval-okay-types.exp b/tests/lang/eval-okay-types.exp
new file mode 100644
index 000000000000..82487f7100e2
--- /dev/null
+++ b/tests/lang/eval-okay-types.exp
@@ -0,0 +1 @@
+[ true false true false true false true false true false true false "int" "bool" "string" "null" "set" "list" "lambda" "lambda" "lambda" "lambda" ]
diff --git a/tests/lang/eval-okay-types.nix b/tests/lang/eval-okay-types.nix
new file mode 100644
index 000000000000..8cb225e247fb
--- /dev/null
+++ b/tests/lang/eval-okay-types.nix
@@ -0,0 +1,25 @@
+with builtins;
+
+[ (isNull null)
+  (isNull (x: x))
+  (isFunction (x: x))
+  (isFunction "fnord")
+  (isString ("foo" + "bar"))
+  (isString [ "x" ])
+  (isInt (1 + 2))
+  (isInt { x = 123; })
+  (isBool (true && false))
+  (isBool null)
+  (isAttrs { x = 123; })
+  (isAttrs null)
+  (typeOf (3 * 4))
+  (typeOf true)
+  (typeOf "xyzzy")
+  (typeOf null)
+  (typeOf { x = 456; })
+  (typeOf [ 1 2 3 ])
+  (typeOf (x: x))
+  (typeOf ((x: y: x) 1))
+  (typeOf map)
+  (typeOf (map (x: x)))
+]
diff --git a/tests/lang/eval-okay-versions.exp b/tests/lang/eval-okay-versions.exp
new file mode 100644
index 000000000000..27ba77ddaf61
--- /dev/null
+++ b/tests/lang/eval-okay-versions.exp
@@ -0,0 +1 @@
+true
diff --git a/tests/lang/eval-okay-versions.nix b/tests/lang/eval-okay-versions.nix
new file mode 100644
index 000000000000..e63c36586bb6
--- /dev/null
+++ b/tests/lang/eval-okay-versions.nix
@@ -0,0 +1,40 @@
+let
+
+  name1 = "hello-1.0.2";
+  name2 = "hello";
+  name3 = "915resolution-0.5.2";
+  name4 = "xf86-video-i810-1.7.4";
+
+  eq = 0;
+  lt = builtins.sub 0 1;
+  gt = 1;
+
+  versionTest = v1: v2: expected:
+    let d1 = builtins.compareVersions v1 v2;
+        d2 = builtins.compareVersions v2 v1;
+    in d1 == builtins.sub 0 d2 && d1 == expected;
+
+  tests = [
+    ((builtins.parseDrvName name1).name == "hello")
+    ((builtins.parseDrvName name1).version == "1.0.2")
+    ((builtins.parseDrvName name2).name == "hello")
+    ((builtins.parseDrvName name2).version == "")
+    ((builtins.parseDrvName name3).name == "915resolution")
+    ((builtins.parseDrvName name3).version == "0.5.2")
+    ((builtins.parseDrvName name4).name == "xf86-video-i810")
+    ((builtins.parseDrvName name4).version == "1.7.4")
+    (versionTest "1.0" "2.3" lt)
+    (versionTest "2.1" "2.3" lt)
+    (versionTest "2.3" "2.3" eq)
+    (versionTest "2.5" "2.3" gt)
+    (versionTest "3.1" "2.3" gt)
+    (versionTest "2.3.1" "2.3" gt)
+    (versionTest "2.3.1" "2.3a" gt)
+    (versionTest "2.3pre1" "2.3" lt)
+    (versionTest "2.3pre3" "2.3pre12" lt)
+    (versionTest "2.3a" "2.3c" lt)
+    (versionTest "2.3pre1" "2.3c" lt)
+    (versionTest "2.3pre1" "2.3q" lt)
+  ];
+
+in (import ./lib.nix).and tests
diff --git a/tests/lang/eval-okay-with.exp b/tests/lang/eval-okay-with.exp
new file mode 100644
index 000000000000..378c8dc80403
--- /dev/null
+++ b/tests/lang/eval-okay-with.exp
@@ -0,0 +1 @@
+"xyzzybarxyzzybar"
diff --git a/tests/lang/eval-okay-with.nix b/tests/lang/eval-okay-with.nix
new file mode 100644
index 000000000000..033e8d3aba57
--- /dev/null
+++ b/tests/lang/eval-okay-with.nix
@@ -0,0 +1,19 @@
+let {
+
+  a = "xyzzy";
+
+  as = {
+    a = "foo";
+    b = "bar";
+  };
+
+  bs = {
+    a = "bar";
+  };
+
+  x = with as; a + b;
+
+  y = with as; with bs; a + b;
+
+  body = x + y;
+}
diff --git a/tests/lang/eval-okay-xml.exp.xml b/tests/lang/eval-okay-xml.exp.xml
new file mode 100644
index 000000000000..f124f939ed48
--- /dev/null
+++ b/tests/lang/eval-okay-xml.exp.xml
@@ -0,0 +1,49 @@
+<?xml version='1.0' encoding='utf-8'?>
+<expr>
+  <attrs>
+    <attr name="a">
+      <string value="foo" />
+    </attr>
+    <attr name="at">
+      <function>
+        <attrspat name="args">
+          <attr name="x" />
+          <attr name="y" />
+          <attr name="z" />
+        </attrspat>
+      </function>
+    </attr>
+    <attr name="b">
+      <string value="bar" />
+    </attr>
+    <attr name="c">
+      <string value="foobar" />
+    </attr>
+    <attr name="ellipsis">
+      <function>
+        <attrspat ellipsis="1">
+          <attr name="x" />
+          <attr name="y" />
+          <attr name="z" />
+        </attrspat>
+      </function>
+    </attr>
+    <attr name="f">
+      <function>
+        <attrspat>
+          <attr name="z" />
+          <attr name="x" />
+          <attr name="y" />
+        </attrspat>
+      </function>
+    </attr>
+    <attr name="id">
+      <function>
+        <varpat name="x" />
+      </function>
+    </attr>
+    <attr name="x">
+      <int value="123" />
+    </attr>
+  </attrs>
+</expr>
diff --git a/tests/lang/eval-okay-xml.nix b/tests/lang/eval-okay-xml.nix
new file mode 100644
index 000000000000..b9389bfae759
--- /dev/null
+++ b/tests/lang/eval-okay-xml.nix
@@ -0,0 +1,19 @@
+rec {
+
+  x = 123;
+
+  a = "foo";
+
+  b = "bar";
+
+  c = "foo" + "bar";
+
+  f = {z, x, y}: if y then x else z;
+
+  id = x: x;
+
+  at = args@{x, y, z}: x;
+
+  ellipsis = {x, y, z, ...}: x;
+
+}
diff --git a/tests/lang/imported.nix b/tests/lang/imported.nix
new file mode 100644
index 000000000000..fb39ee4efacd
--- /dev/null
+++ b/tests/lang/imported.nix
@@ -0,0 +1,3 @@
+# The function ‘range’ comes from lib.nix and was added to the lexical
+# scope by scopedImport.
+range 1 5 ++ import ./imported2.nix
diff --git a/tests/lang/imported2.nix b/tests/lang/imported2.nix
new file mode 100644
index 000000000000..6d0a2992b732
--- /dev/null
+++ b/tests/lang/imported2.nix
@@ -0,0 +1 @@
+range 6 10
diff --git a/tests/lang/lib.nix b/tests/lang/lib.nix
new file mode 100644
index 000000000000..882005dc1b5c
--- /dev/null
+++ b/tests/lang/lib.nix
@@ -0,0 +1,61 @@
+with builtins;
+
+rec {
+
+  fold = op: nul: list:
+    if list == []
+    then nul
+    else op (head list) (fold op nul (tail list));
+
+  concat =
+    fold (x: y: x + y) "";
+
+  and = fold (x: y: x && y) true;
+
+  flatten = x:
+    if isList x
+    then fold (x: y: (flatten x) ++ y) [] x
+    else [x];
+
+  sum = fold (x: y: add x y) 0;
+
+  hasSuffix = ext: fileName:
+    let lenFileName = stringLength fileName;
+        lenExt = stringLength ext;
+    in !(lessThan lenFileName lenExt) &&
+       substring (sub lenFileName lenExt) lenFileName fileName == ext;
+
+  # Split a list at the given position.
+  splitAt = pos: list:
+    if pos == 0 then {first = []; second = list;} else
+    if list == [] then {first = []; second = [];} else
+    let res = splitAt (sub pos 1) (tail list);
+    in {first = [(head list)] ++ res.first; second = res.second;};
+
+  # Stable merge sort.
+  sortBy = comp: list:
+    if lessThan 1 (length list)
+    then
+      let
+        split = splitAt (div (length list) 2) list;
+        first = sortBy comp split.first;
+        second = sortBy comp split.second;
+      in mergeLists comp first second
+    else list;
+
+  mergeLists = comp: list1: list2:
+    if list1 == [] then list2 else
+    if list2 == [] then list1 else
+    if comp (head list2) (head list1) then [(head list2)] ++ mergeLists comp list1 (tail list2) else
+    [(head list1)] ++ mergeLists comp (tail list1) list2;
+
+  id = x: x;
+
+  const = x: y: x;
+
+  range = first: last:
+    if builtins.lessThan last first
+    then []
+    else [first] ++ range (builtins.add first 1) last;
+
+}
diff --git a/tests/lang/parse-fail-dup-attrs-1.nix b/tests/lang/parse-fail-dup-attrs-1.nix
new file mode 100644
index 000000000000..2c02317d2a19
--- /dev/null
+++ b/tests/lang/parse-fail-dup-attrs-1.nix
@@ -0,0 +1,4 @@
+{ x = 123;
+  y = 456;
+  x = 789;
+}
diff --git a/tests/lang/parse-fail-dup-attrs-2.nix b/tests/lang/parse-fail-dup-attrs-2.nix
new file mode 100644
index 000000000000..864d9865e07d
--- /dev/null
+++ b/tests/lang/parse-fail-dup-attrs-2.nix
@@ -0,0 +1,13 @@
+let {
+
+  as = {
+    x = 123;
+    y = 456;
+  };
+
+  bs = {
+    x = 789;
+    inherit (as) x;
+  };
+  
+}
diff --git a/tests/lang/parse-fail-dup-attrs-3.nix b/tests/lang/parse-fail-dup-attrs-3.nix
new file mode 100644
index 000000000000..114d19779f86
--- /dev/null
+++ b/tests/lang/parse-fail-dup-attrs-3.nix
@@ -0,0 +1,13 @@
+let {
+
+  as = {
+    x = 123;
+    y = 456;
+  };
+
+  bs = rec {
+    x = 789;
+    inherit (as) x;
+  };
+  
+}
diff --git a/tests/lang/parse-fail-dup-attrs-4.nix b/tests/lang/parse-fail-dup-attrs-4.nix
new file mode 100644
index 000000000000..77417432b347
--- /dev/null
+++ b/tests/lang/parse-fail-dup-attrs-4.nix
@@ -0,0 +1,4 @@
+{
+  services.ssh.port = 22;
+  services.ssh.port = 23;
+}
diff --git a/tests/lang/parse-fail-dup-attrs-6.nix b/tests/lang/parse-fail-dup-attrs-6.nix
new file mode 100644
index 000000000000..ae6d7a769305
--- /dev/null
+++ b/tests/lang/parse-fail-dup-attrs-6.nix
@@ -0,0 +1,4 @@
+{
+  services.ssh.port = 23;
+  services.ssh = { enable = true; };
+}
diff --git a/tests/lang/parse-fail-dup-attrs-7.nix b/tests/lang/parse-fail-dup-attrs-7.nix
new file mode 100644
index 000000000000..bbc3eb08c0f6
--- /dev/null
+++ b/tests/lang/parse-fail-dup-attrs-7.nix
@@ -0,0 +1,9 @@
+rec {
+
+  x = 1;
+
+  as = {
+    inherit x;
+    inherit x;
+  };
+}
\ No newline at end of file
diff --git a/tests/lang/parse-fail-dup-formals.nix b/tests/lang/parse-fail-dup-formals.nix
new file mode 100644
index 000000000000..a0edd91a9666
--- /dev/null
+++ b/tests/lang/parse-fail-dup-formals.nix
@@ -0,0 +1 @@
+{x, y, x}: x
\ No newline at end of file
diff --git a/tests/lang/parse-fail-patterns-1.nix b/tests/lang/parse-fail-patterns-1.nix
new file mode 100644
index 000000000000..7b40616417b8
--- /dev/null
+++ b/tests/lang/parse-fail-patterns-1.nix
@@ -0,0 +1 @@
+args@{args, x, y, z}: x
diff --git a/tests/lang/parse-fail-regression-20060610.nix b/tests/lang/parse-fail-regression-20060610.nix
new file mode 100644
index 000000000000..b1934f7e1e82
--- /dev/null
+++ b/tests/lang/parse-fail-regression-20060610.nix
@@ -0,0 +1,11 @@
+let {
+  x =
+    {gcc}:
+    {
+      inherit gcc;
+    };
+
+  body = ({
+    inherit gcc;
+  }).gcc;
+}
diff --git a/tests/lang/parse-fail-undef-var-2.nix b/tests/lang/parse-fail-undef-var-2.nix
new file mode 100644
index 000000000000..c10a52b1ea42
--- /dev/null
+++ b/tests/lang/parse-fail-undef-var-2.nix
@@ -0,0 +1,7 @@
+let {
+
+  f = {x, y : ["baz" "bar" z "bat"]}: x + y;
+
+  body = f {x = "foo"; y = "bar";};
+
+}
diff --git a/tests/lang/parse-fail-undef-var.nix b/tests/lang/parse-fail-undef-var.nix
new file mode 100644
index 000000000000..7b63008110db
--- /dev/null
+++ b/tests/lang/parse-fail-undef-var.nix
@@ -0,0 +1 @@
+x: y
diff --git a/tests/lang/parse-okay-1.nix b/tests/lang/parse-okay-1.nix
new file mode 100644
index 000000000000..23a58ed109b1
--- /dev/null
+++ b/tests/lang/parse-okay-1.nix
@@ -0,0 +1 @@
+{x, y, z}: x + y + z
diff --git a/tests/lang/parse-okay-crlf.nix b/tests/lang/parse-okay-crlf.nix
new file mode 100644
index 000000000000..21518d4c6d80
--- /dev/null
+++ b/tests/lang/parse-okay-crlf.nix
@@ -0,0 +1,17 @@
+rec {
+
+  /* Dit is

+  een test. */
+
+  x = 
+  # Dit is een test.
y;
+  
+  y = 123;

+
+  # CR or CR/LF (but not explicit \r's) in strings should be
+  # translated to LF.
+  foo = "multi
line

+  string
+  test\r";
+
+  z = 456;
}
diff --git a/tests/lang/parse-okay-dup-attrs-5.nix b/tests/lang/parse-okay-dup-attrs-5.nix
new file mode 100644
index 000000000000..f4b9efd0c596
--- /dev/null
+++ b/tests/lang/parse-okay-dup-attrs-5.nix
@@ -0,0 +1,4 @@
+{
+  services.ssh = { enable = true; };
+  services.ssh.port = 23;
+}
diff --git a/tests/lang/parse-okay-regression-20041027.nix b/tests/lang/parse-okay-regression-20041027.nix
new file mode 100644
index 000000000000..ae2e256eeaaa
--- /dev/null
+++ b/tests/lang/parse-okay-regression-20041027.nix
@@ -0,0 +1,11 @@
+{stdenv, fetchurl /* pkgconfig, libX11 */ }:
+
+stdenv.mkDerivation {
+  name = "libXi-6.0.1";
+  src = fetchurl {
+    url = http://freedesktop.org/~xlibs/release/libXi-6.0.1.tar.bz2;
+    md5 = "7e935a42428d63a387b3c048be0f2756";
+  };
+/*  buildInputs = [pkgconfig];
+  propagatedBuildInputs = [libX11]; */
+}
diff --git a/tests/lang/parse-okay-subversion.nix b/tests/lang/parse-okay-subversion.nix
new file mode 100644
index 000000000000..356272815d26
--- /dev/null
+++ b/tests/lang/parse-okay-subversion.nix
@@ -0,0 +1,43 @@
+{ localServer ? false
+, httpServer ? false
+, sslSupport ? false
+, pythonBindings ? false
+, javaSwigBindings ? false
+, javahlBindings ? false
+, stdenv, fetchurl
+, openssl ? null, httpd ? null, db4 ? null, expat, swig ? null, j2sdk ? null
+}:
+
+assert expat != null;
+assert localServer -> db4 != null;
+assert httpServer -> httpd != null && httpd.expat == expat;
+assert sslSupport -> openssl != null && (httpServer -> httpd.openssl == openssl);
+assert pythonBindings -> swig != null && swig.pythonSupport;
+assert javaSwigBindings -> swig != null && swig.javaSupport;
+assert javahlBindings -> j2sdk != null;
+
+stdenv.mkDerivation {
+  name = "subversion-1.1.1";
+
+  builder = /foo/bar;
+  src = fetchurl {
+    url = http://subversion.tigris.org/tarballs/subversion-1.1.1.tar.bz2;
+    md5 = "a180c3fe91680389c210c99def54d9e0";
+  };
+
+  # This is a hopefully temporary fix for the problem that
+  # libsvnjavahl.so isn't linked against libstdc++, which causes
+  # loading the library into the JVM to fail.
+  patches = if javahlBindings then [/javahl.patch] else [];
+
+  openssl = if sslSupport then openssl else null;
+  httpd = if httpServer then httpd else null;
+  db4 = if localServer then db4 else null;
+  swig = if pythonBindings || javaSwigBindings then swig else null;
+  python = if pythonBindings then swig.python else null;
+  j2sdk = if javaSwigBindings then swig.j2sdk else
+          if javahlBindings then j2sdk else null;
+
+  inherit expat localServer httpServer sslSupport
+          pythonBindings javaSwigBindings javahlBindings;
+}
diff --git a/tests/lang/parse-okay-url.nix b/tests/lang/parse-okay-url.nix
new file mode 100644
index 000000000000..fce3b13ee64b
--- /dev/null
+++ b/tests/lang/parse-okay-url.nix
@@ -0,0 +1,7 @@
+[ x:x
+  https://svn.cs.uu.nl:12443/repos/trace/trunk
+  http://www2.mplayerhq.hu/MPlayer/releases/fonts/font-arial-iso-8859-1.tar.bz2
+  http://losser.st-lab.cs.uu.nl/~armijn/.nix/gcc-3.3.4-static-nix.tar.gz
+  http://fpdownload.macromedia.com/get/shockwave/flash/english/linux/7.0r25/install_flash_player_7_linux.tar.gz
+  ftp://ftp.gtk.org/pub/gtk/v1.2/gtk+-1.2.10.tar.gz
+]
diff --git a/tests/local.mk b/tests/local.mk
new file mode 100644
index 000000000000..65aa12637055
--- /dev/null
+++ b/tests/local.mk
@@ -0,0 +1,22 @@
+check:
+	@echo "Warning: Nix has no 'make check'. Please install Nix and run 'make installcheck' instead."
+
+nix_tests = \
+  init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
+  build-hook.sh substitutes.sh substitutes2.sh \
+  fallback.sh nix-push.sh gc.sh gc-concurrent.sh nix-pull.sh \
+  referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
+  gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
+  remote-store.sh export.sh export-graph.sh negative-caching.sh \
+  binary-patching.sh timeout.sh secure-drv-outputs.sh nix-channel.sh \
+  multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
+  binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh
+  # parallel.sh
+
+install-tests += $(foreach x, $(nix_tests), tests/$(x))
+
+tests-environment = NIX_REMOTE= $(bash) -e
+
+clean-files += $(d)/common.sh
+
+installcheck: $(d)/common.sh
diff --git a/tests/logging.sh b/tests/logging.sh
new file mode 100644
index 000000000000..0113ed11c401
--- /dev/null
+++ b/tests/logging.sh
@@ -0,0 +1,25 @@
+source common.sh
+
+clearStore
+
+# Produce an escaped log file.
+set -x
+path=$(nix-build --log-type escapes -vv dependencies.nix --no-out-link 2> $TEST_ROOT/log.esc)
+
+# Convert it to an XML representation.
+nix-log2xml < $TEST_ROOT/log.esc > $TEST_ROOT/log.xml
+
+# Is this well-formed XML?
+if test "$xmllint" != "false"; then
+    $xmllint --noout $TEST_ROOT/log.xml || fail "malformed XML"
+fi
+
+# Test nix-store -l.
+[ "$(nix-store -l $path)" = FOO ]
+
+# Test compressed logs.
+clearStore
+rm -rf $NIX_LOG_DIR
+! nix-store -l $path
+nix-build dependencies.nix --no-out-link --option build-compress-log true
+[ "$(nix-store -l $path)" = FOO ]
diff --git a/tests/misc.sh b/tests/misc.sh
new file mode 100644
index 000000000000..1b4d8f2cfc8e
--- /dev/null
+++ b/tests/misc.sh
@@ -0,0 +1,16 @@
+source common.sh
+
+# Tests miscellaneous commands.
+
+# Do all commands have help?
+#nix-env --help | grep -q install
+#nix-store --help | grep -q realise
+#nix-instantiate --help | grep -q eval
+#nix-hash --help | grep -q base32
+
+# Can we ask for the version number?
+nix-env --version | grep "$version"
+
+# Usage errors.
+nix-env --foo 2>&1 | grep "no operation"
+nix-env -q --foo 2>&1 | grep "unknown flag"
diff --git a/tests/multiple-outputs.nix b/tests/multiple-outputs.nix
new file mode 100644
index 000000000000..4a9010d1868e
--- /dev/null
+++ b/tests/multiple-outputs.nix
@@ -0,0 +1,68 @@
+with import ./config.nix;
+
+rec {
+
+  a = mkDerivation {
+    name = "multiple-outputs-a";
+    outputs = [ "first" "second" ];
+    builder = builtins.toFile "builder.sh"
+      ''
+        mkdir $first $second
+        test -z $all
+        echo "first" > $first/file
+        echo "second" > $second/file
+        ln -s $first $second/link
+      '';
+    helloString = "Hello, world!";
+  };
+
+  b = mkDerivation {
+    defaultOutput = assert a.second.helloString == "Hello, world!"; a;
+    firstOutput = assert a.outputName == "first"; a.first.first;
+    secondOutput = assert a.second.outputName == "second"; a.second.first.first.second.second.first.second;
+    allOutputs = a.all;
+    name = "multiple-outputs-b";
+    builder = builtins.toFile "builder.sh"
+      ''
+        mkdir $out
+        test "$firstOutput $secondOutput" = "$allOutputs"
+        test "$defaultOutput" = "$firstOutput"
+        test "$(cat $firstOutput/file)" = "first"
+        test "$(cat $secondOutput/file)" = "second"
+        echo "success" > $out/file
+      '';
+  };
+
+  c = mkDerivation {
+    name = "multiple-outputs-c";
+    drv = b.drvPath;
+    builder = builtins.toFile "builder.sh"
+      ''
+        mkdir $out
+        ln -s $drv $out/drv
+      '';
+  };
+
+  d = mkDerivation {
+    name = "multiple-outputs-d";
+    drv = builtins.unsafeDiscardOutputDependency b.drvPath;
+    builder = builtins.toFile "builder.sh"
+      ''
+        mkdir $out
+        echo $drv > $out/drv
+      '';
+  };
+
+  cyclic = (mkDerivation {
+    name = "cyclic-outputs";
+    outputs = [ "a" "b" "c" ];
+    builder = builtins.toFile "builder.sh"
+      ''
+        mkdir $a $b $c
+        echo $a > $b/foo
+        echo $b > $c/bar
+        echo $c > $a/baz
+      '';
+  }).a;
+
+}
diff --git a/tests/multiple-outputs.sh b/tests/multiple-outputs.sh
new file mode 100644
index 000000000000..ac622a7b4eaf
--- /dev/null
+++ b/tests/multiple-outputs.sh
@@ -0,0 +1,63 @@
+source common.sh
+
+clearStore
+
+# Test whether read-only evaluation works when referring to the
+# ‘drvPath’ attribute.
+echo "evaluating c..."
+#drvPath=$(nix-instantiate multiple-outputs.nix -A c --readonly-mode)
+
+# And check whether the resulting derivation explicitly depends on all
+# outputs.
+drvPath=$(nix-instantiate multiple-outputs.nix -A c)
+#[ "$drvPath" = "$drvPath2" ]
+grep -q 'multiple-outputs-a.drv",\["first","second"\]' $drvPath
+grep -q 'multiple-outputs-b.drv",\["out"\]' $drvPath
+
+# While we're at it, test the ‘unsafeDiscardOutputDependency’ primop.
+outPath=$(nix-build multiple-outputs.nix -A d --no-out-link)
+drvPath=$(cat $outPath/drv)
+outPath=$(nix-store -q $drvPath)
+! [ -e "$outPath" ]
+
+# Do a build of something that depends on a derivation with multiple
+# outputs.
+echo "building b..."
+outPath=$(nix-build multiple-outputs.nix -A b --no-out-link)
+echo "output path is $outPath"
+[ "$(cat "$outPath"/file)" = "success" ]
+
+# Test nix-build on a derivation with multiple outputs.
+nix-build multiple-outputs.nix -A a -o $TEST_ROOT/result
+[ -e $TEST_ROOT/result-first ]
+! [ -e $TEST_ROOT/result-second ]
+nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result
+[ "$(cat $TEST_ROOT/result-first/file)" = "first" ]
+[ "$(cat $TEST_ROOT/result-second/file)" = "second" ]
+[ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ]
+hash1=$(nix-store -q --hash $TEST_ROOT/result-second)
+
+# Delete one of the outputs and rebuild it.  This will cause a hash
+# rewrite.
+nix-store --delete $TEST_ROOT/result-second --ignore-liveness
+nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result
+[ "$(cat $TEST_ROOT/result-second/file)" = "second" ]
+[ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ]
+hash2=$(nix-store -q --hash $TEST_ROOT/result-second)
+[ "$hash1" = "$hash2" ]
+
+# Make sure that nix-build works on derivations with multiple outputs.
+echo "building a.first..."
+nix-build multiple-outputs.nix -A a.first --no-out-link
+
+# Cyclic outputs should be rejected.
+echo "building cyclic..."
+if nix-build multiple-outputs.nix -A cyclic --no-out-link; then
+    echo "Cyclic outputs incorrectly accepted!"
+    exit 1
+fi
+
+echo "collecting garbage..."
+rm $TEST_ROOT/result*
+nix-store --gc --option gc-keep-derivations true --option gc-keep-outputs true
+nix-store --gc --print-roots
diff --git a/tests/negative-caching.nix b/tests/negative-caching.nix
new file mode 100644
index 000000000000..10df67a748fc
--- /dev/null
+++ b/tests/negative-caching.nix
@@ -0,0 +1,21 @@
+with import ./config.nix;
+
+rec {
+
+  fail = mkDerivation {
+    name = "fail";
+    builder = builtins.toFile "builder.sh" "echo FAIL; exit 1";
+  };
+
+  succeed = mkDerivation {
+    name = "succeed";
+    builder = builtins.toFile "builder.sh" "echo SUCCEED; mkdir $out";
+  };
+
+  depOnFail = mkDerivation {
+    name = "dep-on-fail";
+    builder = builtins.toFile "builder.sh" "echo URGH; mkdir $out";
+    inputs = [fail succeed];
+  };
+
+}
diff --git a/tests/negative-caching.sh b/tests/negative-caching.sh
new file mode 100644
index 000000000000..4217bc38e121
--- /dev/null
+++ b/tests/negative-caching.sh
@@ -0,0 +1,22 @@
+source common.sh
+
+clearStore
+
+set +e
+
+opts="--option build-cache-failure true --print-build-trace"
+
+# This build should fail, and the failure should be cached.
+log=$(nix-build $opts negative-caching.nix -A fail --no-out-link 2>&1) && fail "should fail"
+echo "$log" | grep -q "@ build-failed" || fail "no build-failed trace"
+
+# Do it again.  The build shouldn't be tried again.
+log=$(nix-build $opts negative-caching.nix -A fail --no-out-link 2>&1) && fail "should fail"
+echo "$log" | grep -q "FAIL" && fail "failed build not cached"
+echo "$log" | grep -q "@ build-failed .* cached" || fail "trace doesn't say cached"
+
+# Check that --keep-going works properly with cached failures.
+log=$(nix-build $opts --keep-going negative-caching.nix -A depOnFail --no-out-link 2>&1) && fail "should fail"
+echo "$log" | grep -q "FAIL" && fail "failed build not cached (2)"
+echo "$log" | grep -q "@ build-failed .* cached" || fail "trace doesn't say cached (2)"
+echo "$log" | grep -q "@ build-succeeded .*-succeed" || fail "didn't keep going"
diff --git a/tests/nix-build.sh b/tests/nix-build.sh
new file mode 100644
index 000000000000..dc0e99c73621
--- /dev/null
+++ b/tests/nix-build.sh
@@ -0,0 +1,19 @@
+source common.sh
+
+clearStore
+
+nix-build dependencies.nix -o $TEST_ROOT/result
+test "$(cat $TEST_ROOT/result/foobar)" = FOOBAR
+
+# The result should be retained by a GC.
+echo A
+target=$(readLink $TEST_ROOT/result)
+echo B
+echo target is $target
+nix-store --gc
+test -e $target/foobar
+
+# But now it should be gone.
+rm $TEST_ROOT/result
+nix-store --gc
+if test -e $target/foobar; then false; fi
diff --git a/tests/nix-channel.sh b/tests/nix-channel.sh
new file mode 100644
index 000000000000..a25d56bec11e
--- /dev/null
+++ b/tests/nix-channel.sh
@@ -0,0 +1,43 @@
+source common.sh
+
+clearProfiles
+clearManifests
+
+rm -f $TEST_ROOT/.nix-channels
+
+# Override location of ~/.nix-channels.
+export HOME=$TEST_ROOT
+
+# Test add/list/remove.
+nix-channel --add http://foo/bar xyzzy
+nix-channel --list | grep -q http://foo/bar
+nix-channel --remove xyzzy
+
+[ -e $TEST_ROOT/.nix-channels ]
+[ "$(cat $TEST_ROOT/.nix-channels)" = '' ]
+
+# Create a channel.
+rm -rf $TEST_ROOT/foo
+mkdir -p $TEST_ROOT/foo
+nix-push --dest $TEST_ROOT/foo --manifest --bzip2 $(nix-store -r $(nix-instantiate dependencies.nix))
+rm -rf $TEST_ROOT/nixexprs
+mkdir -p $TEST_ROOT/nixexprs
+cp config.nix dependencies.nix dependencies.builder*.sh $TEST_ROOT/nixexprs/
+ln -s dependencies.nix $TEST_ROOT/nixexprs/default.nix
+(cd $TEST_ROOT && tar cvf - nixexprs) | bzip2 > $TEST_ROOT/foo/nixexprs.tar.bz2
+
+# Test the update action.
+nix-channel --add file://$TEST_ROOT/foo
+nix-channel --update
+
+# Do a query.
+nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml
+if [ "$xmllint" != false ]; then
+    $xmllint --noout $TEST_ROOT/meta.xml || fail "malformed XML"
+fi
+grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml
+grep -q 'item.*attrPath="foo".*name="dependencies"' $TEST_ROOT/meta.xml
+
+# Do an install.
+nix-env -i dependencies
+[ -e $TEST_ROOT/var/nix/profiles/default/foobar ]
diff --git a/tests/nix-copy-closure.nix b/tests/nix-copy-closure.nix
new file mode 100644
index 000000000000..1418c65897d3
--- /dev/null
+++ b/tests/nix-copy-closure.nix
@@ -0,0 +1,63 @@
+# Test ‘nix-copy-closure’.
+
+{ system, nix }:
+
+with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
+
+makeTest (let pkgA = pkgs.aterm; pkgB = pkgs.wget; pkgC = pkgs.hello; in {
+
+  nodes =
+    { client =
+        { config, pkgs, ... }:
+        { virtualisation.writableStore = true;
+          virtualisation.pathsInNixDB = [ pkgA ];
+          nix.package = nix;
+          nix.binaryCaches = [ ];
+        };
+
+      server =
+        { config, pkgs, ... }:
+        { services.openssh.enable = true;
+          virtualisation.writableStore = true;
+          virtualisation.pathsInNixDB = [ pkgB pkgC ];
+          nix.package = nix;
+        };
+    };
+
+  testScript = { nodes }:
+    ''
+      startAll;
+
+      # Create an SSH key on the client.
+      my $key = `${pkgs.openssh}/bin/ssh-keygen -t dsa -f key -N ""`;
+      $client->succeed("mkdir -m 700 /root/.ssh");
+      $client->copyFileFromHost("key", "/root/.ssh/id_dsa");
+      $client->succeed("chmod 600 /root/.ssh/id_dsa");
+
+      # Install the SSH key on the server.
+      $server->succeed("mkdir -m 700 /root/.ssh");
+      $server->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");
+      $server->waitForUnit("sshd");
+      $client->waitForUnit("network.target");
+      $client->succeed("ssh -o StrictHostKeyChecking=no " . $server->name() . " 'echo hello world'");
+
+      # Copy the closure of package A from the client to the server.
+      $server->fail("nix-store --check-validity ${pkgA}");
+      $client->succeed("nix-copy-closure --to server --gzip ${pkgA} >&2");
+      $server->succeed("nix-store --check-validity ${pkgA}");
+
+      # Copy the closure of package B from the server to the client.
+      $client->fail("nix-store --check-validity ${pkgB}");
+      $client->succeed("nix-copy-closure --from server --gzip ${pkgB} >&2");
+      $client->succeed("nix-store --check-validity ${pkgB}");
+
+      # Copy the closure of package C via the SSH substituter.
+      $client->fail("nix-store -r ${pkgC}");
+      $client->succeed(
+        "nix-store --option use-ssh-substituter true"
+        . " --option ssh-substituter-hosts root\@server"
+        . " -r ${pkgC} >&2");
+      $client->succeed("nix-store --check-validity ${pkgC}");
+    '';
+
+})
diff --git a/tests/nix-profile.sh b/tests/nix-profile.sh
new file mode 100644
index 000000000000..3586a7efc3c8
--- /dev/null
+++ b/tests/nix-profile.sh
@@ -0,0 +1,10 @@
+source common.sh
+
+home=$TEST_ROOT/home
+rm -rf $home
+mkdir -p $home
+HOME=$home $SHELL -e -c ". ../scripts/nix-profile.sh"
+HOME=$home $SHELL -e -c ". ../scripts/nix-profile.sh" # test idempotency
+
+[ -L $home/.nix-profile ]
+[ -e $home/.nix-channels ]
diff --git a/tests/nix-pull.sh b/tests/nix-pull.sh
new file mode 100644
index 000000000000..87239948c481
--- /dev/null
+++ b/tests/nix-pull.sh
@@ -0,0 +1,33 @@
+source common.sh
+
+pullCache () {
+    echo "pulling cache..."
+    nix-pull file://$TEST_ROOT/cache/MANIFEST
+}
+
+clearStore
+clearManifests
+pullCache
+
+drvPath=$(nix-instantiate dependencies.nix)
+outPath=$(nix-store -q $drvPath)
+
+echo "building $outPath using substitutes..."
+nix-store -r $outPath
+
+cat $outPath/input-2/bar
+
+clearStore
+clearManifests
+pullCache
+
+echo "building $drvPath using substitutes..."
+nix-store -r $drvPath
+
+cat $outPath/input-2/bar
+
+# Check that the derivers are set properly.
+test $(nix-store -q --deriver "$outPath") = "$drvPath"
+nix-store -q --deriver $(readLink $outPath/input-2) | grep -q -- "-input-2.drv"
+
+clearManifests
diff --git a/tests/nix-push.sh b/tests/nix-push.sh
new file mode 100644
index 000000000000..8ea59516c62c
--- /dev/null
+++ b/tests/nix-push.sh
@@ -0,0 +1,12 @@
+source common.sh
+
+clearStore
+
+drvPath=$(nix-instantiate dependencies.nix)
+outPath=$(nix-store -r $drvPath)
+
+echo "pushing $drvPath"
+
+mkdir -p $TEST_ROOT/cache
+
+nix-push --dest $TEST_ROOT/cache --manifest $drvPath --bzip2
diff --git a/tests/optimise-store.sh b/tests/optimise-store.sh
new file mode 100644
index 000000000000..ea4478693e78
--- /dev/null
+++ b/tests/optimise-store.sh
@@ -0,0 +1,43 @@
+source common.sh
+
+clearStore
+
+outPath1=$(echo 'with import ./config.nix; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --option auto-optimise-store true)
+outPath2=$(echo 'with import ./config.nix; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --option auto-optimise-store true)
+
+inode1="$(perl -e "print ((lstat('$outPath1/foo'))[1])")"
+inode2="$(perl -e "print ((lstat('$outPath2/foo'))[1])")"
+if [ "$inode1" != "$inode2" ]; then
+    echo "inodes do not match"
+    exit 1
+fi
+
+nlink="$(perl -e "print ((lstat('$outPath1/foo'))[3])")"
+if [ "$nlink" != 3 ]; then
+    echo "link count incorrect"
+    exit 1
+fi
+
+outPath3=$(echo 'with import ./config.nix; mkDerivation { name = "foo3"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link)
+
+inode3="$(perl -e "print ((lstat('$outPath3/foo'))[1])")"
+if [ "$inode1" = "$inode3" ]; then
+    echo "inodes match unexpectedly"
+    exit 1
+fi
+
+nix-store --optimise
+
+inode1="$(perl -e "print ((lstat('$outPath1/foo'))[1])")"
+inode3="$(perl -e "print ((lstat('$outPath3/foo'))[1])")"
+if [ "$inode1" != "$inode3" ]; then
+    echo "inodes do not match"
+    exit 1
+fi
+
+nix-store --gc
+
+if [ -n "$(ls $NIX_STORE_DIR/.links)" ]; then
+    echo ".links directory not empty after GC"
+    exit 1
+fi
diff --git a/tests/parallel.builder.sh b/tests/parallel.builder.sh
new file mode 100644
index 000000000000..d092bc5a6bd4
--- /dev/null
+++ b/tests/parallel.builder.sh
@@ -0,0 +1,29 @@
+echo "DOING $text"
+
+
+# increase counter
+while ! ln -s x $shared.lock 2> /dev/null; do
+    sleep 1
+done
+test -f $shared.cur || echo 0 > $shared.cur
+test -f $shared.max || echo 0 > $shared.max
+new=$(($(cat $shared.cur) + 1))
+if test $new -gt $(cat $shared.max); then
+    echo $new > $shared.max
+fi
+echo $new > $shared.cur
+rm $shared.lock
+
+
+echo -n $(cat $inputs)$text > $out
+
+sleep $sleepTime
+
+
+# decrease counter
+while ! ln -s x $shared.lock 2> /dev/null; do
+    sleep 1
+done
+test -f $shared.cur || echo 0 > $shared.cur
+echo $(($(cat $shared.cur) - 1)) > $shared.cur
+rm $shared.lock
diff --git a/tests/parallel.nix b/tests/parallel.nix
new file mode 100644
index 000000000000..23f142059f58
--- /dev/null
+++ b/tests/parallel.nix
@@ -0,0 +1,19 @@
+{sleepTime ? 3}:
+
+with import ./config.nix;
+
+let
+
+  mkDrv = text: inputs: mkDerivation {
+    name = "parallel";
+    builder = ./parallel.builder.sh;
+    inherit text inputs shared sleepTime;
+  };
+
+  a = mkDrv "a" [];
+  b = mkDrv "b" [a];
+  c = mkDrv "c" [a];
+  d = mkDrv "d" [a];
+  e = mkDrv "e" [b c d];
+
+in e
diff --git a/tests/parallel.sh b/tests/parallel.sh
new file mode 100644
index 000000000000..3b7bbe5a2251
--- /dev/null
+++ b/tests/parallel.sh
@@ -0,0 +1,56 @@
+source common.sh
+
+
+# First, test that -jN performs builds in parallel.
+echo "testing nix-build -j..."
+
+clearStore
+
+rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max
+
+outPath=$(nix-build -j10000 parallel.nix --no-out-link)
+
+echo "output path is $outPath"
+
+text=$(cat "$outPath")
+if test "$text" != "abacade"; then exit 1; fi
+
+if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi
+if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi
+
+
+# Second, test that parallel invocations of nix-build perform builds
+# in parallel, and don't block waiting on locks held by the others.
+echo "testing multiple nix-build -j1..."
+
+clearStore
+
+rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max
+
+drvPath=$(nix-instantiate parallel.nix --argstr sleepTime 15)
+
+cmd="nix-store -j1 -r $drvPath"
+
+$cmd &
+pid1=$!
+echo "pid 1 is $pid1"
+
+$cmd &
+pid2=$!
+echo "pid 2 is $pid2"
+
+$cmd &
+pid3=$!
+echo "pid 3 is $pid3"
+
+$cmd &
+pid4=$!
+echo "pid 4 is $pid4"
+
+wait $pid1 || fail "instance 1 failed: $?"
+wait $pid2 || fail "instance 2 failed: $?"
+wait $pid3 || fail "instance 3 failed: $?"
+wait $pid4 || fail "instance 4 failed: $?"
+
+if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi
+if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi
diff --git a/tests/referrers.sh b/tests/referrers.sh
new file mode 100644
index 000000000000..0a1c86e0ab19
--- /dev/null
+++ b/tests/referrers.sh
@@ -0,0 +1,36 @@
+source common.sh
+
+clearStore
+
+max=500
+
+reference=$NIX_STORE_DIR/abcdef
+touch $reference
+(echo $reference && echo && echo 0) | nix-store --register-validity 
+
+echo "making registration..."
+
+set +x
+for ((n = 0; n < $max; n++)); do
+    storePath=$NIX_STORE_DIR/$n
+    echo -n > $storePath
+    ref2=$NIX_STORE_DIR/$((n+1))
+    if test $((n+1)) = $max; then
+        ref2=$reference
+    fi
+    echo $storePath; echo; echo 2; echo $reference; echo $ref2
+done > $TEST_ROOT/reg_info
+set -x
+
+echo "registering..."
+
+nix-store --register-validity < $TEST_ROOT/reg_info
+
+echo "collecting garbage..."
+ln -sfn $reference "$NIX_STATE_DIR"/gcroots/ref
+nix-store --gc
+
+if [ -n "$(type -p sqlite3)" -a "$(sqlite3 ./test-tmp/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then
+    echo "referrers not cleaned up"
+    exit 1
+fi
diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix
new file mode 100644
index 000000000000..81b81b87c115
--- /dev/null
+++ b/tests/remote-builds.nix
@@ -0,0 +1,103 @@
+# Test Nix's remote build feature.
+
+{ system, nix }:
+
+with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
+
+makeTest (
+
+let
+
+  # The configuration of the build slaves.
+  slave =
+    { config, pkgs, ... }:
+    { services.openssh.enable = true;
+      virtualisation.writableStore = true;
+      environment.nix = nix;
+    };
+
+  # Trivial Nix expression to build remotely.
+  expr = config: nr: pkgs.writeText "expr.nix"
+    ''
+      let utils = builtins.storePath ${config.system.build.extraUtils}; in
+      derivation {
+        name = "hello-${toString nr}";
+        system = "i686-linux";
+        PATH = "''${utils}/bin";
+        builder = "''${utils}/bin/sh";
+        args = [ "-c" "if [ ${toString nr} = 5 ]; then echo FAIL; exit 1; fi; echo Hello; mkdir $out $foo; cat /proc/sys/kernel/hostname > $out/host; ln -s $out $foo/bar; sleep 5" ];
+        outputs = [ "out" "foo" ];
+      }
+    '';
+
+in
+
+{
+
+  nodes =
+    { slave1 = slave;
+      slave2 = slave;
+
+      client =
+        { config, pkgs, ... }:
+        { nix.maxJobs = 0; # force remote building
+          nix.distributedBuilds = true;
+          nix.buildMachines =
+            [ { hostName = "slave1";
+                sshUser = "root";
+                sshKey = "/root/.ssh/id_dsa";
+                system = "i686-linux";
+                maxJobs = 1;
+              }
+              { hostName = "slave2";
+                sshUser = "root";
+                sshKey = "/root/.ssh/id_dsa";
+                system = "i686-linux";
+                maxJobs = 1;
+              }
+            ];
+          virtualisation.writableStore = true;
+          virtualisation.pathsInNixDB = [ config.system.build.extraUtils ];
+          nix.package = nix;
+          nix.binaryCaches = [ ];
+        };
+    };
+
+  testScript = { nodes }:
+    ''
+      startAll;
+
+      # Create an SSH key on the client.
+      my $key = `${pkgs.openssh}/bin/ssh-keygen -t dsa -f key -N ""`;
+      $client->succeed("mkdir -m 700 /root/.ssh");
+      $client->copyFileFromHost("key", "/root/.ssh/id_dsa");
+      $client->succeed("chmod 600 /root/.ssh/id_dsa");
+
+      # Install the SSH key on the slaves.
+      $client->waitForUnit("network.target");
+      foreach my $slave ($slave1, $slave2) {
+          $slave->succeed("mkdir -m 700 /root/.ssh");
+          $slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");
+          $slave->waitForUnit("sshd");
+          $client->succeed("ssh -o StrictHostKeyChecking=no " . $slave->name() . " 'echo hello world'");
+      }
+
+      # Perform a build and check that it was performed on the slave.
+      my $out = $client->succeed("nix-build ${expr nodes.client.config 1}");
+      $slave1->succeed("test -e $out");
+
+      # And a parallel build.
+      my ($out1, $out2) = split /\s/,
+          $client->succeed('nix-store -r $(nix-instantiate ${expr nodes.client.config 2})\!out $(nix-instantiate ${expr nodes.client.config 3})\!out');
+      $slave1->succeed("test -e $out1 -o -e $out2");
+      $slave2->succeed("test -e $out1 -o -e $out2");
+
+      # And a failing build.
+      $client->fail("nix-build ${expr nodes.client.config 5}");
+
+      # Test whether the build hook automatically skips unavailable slaves.
+      $slave1->block;
+      $client->succeed("nix-build ${expr nodes.client.config 4}");
+    '';
+
+})
diff --git a/tests/remote-store.sh b/tests/remote-store.sh
new file mode 100644
index 000000000000..8312424f0ac6
--- /dev/null
+++ b/tests/remote-store.sh
@@ -0,0 +1,16 @@
+source common.sh
+
+clearStore
+clearManifests
+
+startDaemon
+
+$SHELL ./user-envs.sh
+
+nix-store --dump-db > $TEST_ROOT/d1
+NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2
+cmp $TEST_ROOT/d1 $TEST_ROOT/d2
+
+nix-store --gc --max-freed 1K
+
+killDaemon
diff --git a/tests/repair.sh b/tests/repair.sh
new file mode 100644
index 000000000000..ae82b649c6ac
--- /dev/null
+++ b/tests/repair.sh
@@ -0,0 +1,65 @@
+source common.sh
+
+clearStore
+
+path=$(nix-build dependencies.nix -o $TEST_ROOT/result)
+path2=$(nix-store -qR $path | grep input-2)
+
+nix-store --verify --check-contents -v
+
+hash=$(nix-hash $path2)
+
+# Corrupt a path and check whether nix-build --repair can fix it.
+chmod u+w $path2
+touch $path2/bad
+
+if nix-store --verify --check-contents -v; then
+    echo "nix-store --verify succeeded unexpectedly" >&2
+    exit 1
+fi
+
+if nix-store --verify --check-contents --repair; then
+    echo "nix-store --verify --repair succeeded unexpectedly" >&2
+    exit 1
+fi
+
+nix-build dependencies.nix -o $TEST_ROOT/result --repair
+
+if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
+    echo "path not repaired properly" >&2
+    exit 1
+fi
+
+# Corrupt a path that has a substitute and check whether nix-store
+# --verify can fix it.
+clearCache
+
+nix-push --dest $cacheDir $path
+
+chmod u+w $path2
+rm -rf $path2
+
+nix-store --verify --check-contents --repair --option binary-caches "file://$cacheDir"
+
+if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
+    echo "path not repaired properly" >&2
+    exit 1
+fi
+
+# Check --verify-path and --repair-path.
+nix-store --verify-path $path2
+
+chmod u+w $path2
+rm -rf $path2
+
+if nix-store --verify-path $path2; then
+    echo "nix-store --verify-path succeeded unexpectedly" >&2
+    exit 1
+fi
+
+nix-store --repair-path $path2 --option binary-caches "file://$cacheDir"
+
+if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
+    echo "path not repaired properly" >&2
+    exit 1
+fi
diff --git a/tests/secure-drv-outputs.nix b/tests/secure-drv-outputs.nix
new file mode 100644
index 000000000000..b4ac8ff531f8
--- /dev/null
+++ b/tests/secure-drv-outputs.nix
@@ -0,0 +1,23 @@
+with import ./config.nix;
+
+{
+
+  good = mkDerivation {
+    name = "good";
+    builder = builtins.toFile "builder"
+      ''
+        mkdir $out
+        echo > $out/good
+      '';
+  };
+
+  bad = mkDerivation {
+    name = "good";
+    builder = builtins.toFile "builder"
+      ''
+        mkdir $out
+        echo > $out/bad
+      '';
+  };
+
+}
diff --git a/tests/secure-drv-outputs.sh b/tests/secure-drv-outputs.sh
new file mode 100644
index 000000000000..4888123da910
--- /dev/null
+++ b/tests/secure-drv-outputs.sh
@@ -0,0 +1,37 @@
+# Test that users cannot register specially-crafted derivations that
+# produce output paths belonging to other derivations.  This could be
+# used to inject malware into the store.
+
+source common.sh
+
+clearStore
+clearManifests
+
+startDaemon
+
+# Determine the output path of the "good" derivation.
+goodOut=$(nix-store -q $(nix-instantiate ./secure-drv-outputs.nix -A good))
+
+# Instantiate the "bad" derivation.
+badDrv=$(nix-instantiate ./secure-drv-outputs.nix -A bad)
+badOut=$(nix-store -q $badDrv)
+
+# Rewrite the bad derivation to produce the output path of the good
+# derivation.
+rm -f $TEST_ROOT/bad.drv
+sed -e "s|$badOut|$goodOut|g" < $badDrv > $TEST_ROOT/bad.drv
+
+# Add the manipulated derivation to the store and build it.  This
+# should fail.
+if badDrv2=$(nix-store --add $TEST_ROOT/bad.drv); then
+    nix-store -r "$badDrv2"
+fi
+
+# Now build the good derivation.
+goodOut2=$(nix-build ./secure-drv-outputs.nix -A good --no-out-link)
+test "$goodOut" = "$goodOut2"
+
+if ! test -e "$goodOut"/good; then
+    echo "Bad derivation stole the output path of the good derivation!"
+    exit 1
+fi
diff --git a/tests/simple.builder.sh b/tests/simple.builder.sh
new file mode 100644
index 000000000000..569e8ca88c1e
--- /dev/null
+++ b/tests/simple.builder.sh
@@ -0,0 +1,11 @@
+echo "PATH=$PATH"
+
+# Verify that the PATH is empty.
+if mkdir foo 2> /dev/null; then exit 1; fi
+
+# Set a PATH (!!! impure).
+export PATH=$goodPath
+
+mkdir $out
+
+echo "Hello World!" > $out/hello
\ No newline at end of file
diff --git a/tests/simple.nix b/tests/simple.nix
new file mode 100644
index 000000000000..4223c0f23a5b
--- /dev/null
+++ b/tests/simple.nix
@@ -0,0 +1,8 @@
+with import ./config.nix;
+
+mkDerivation {
+  name = "simple";
+  builder = ./simple.builder.sh;
+  PATH = "";
+  goodPath = path;
+}
diff --git a/tests/simple.sh b/tests/simple.sh
new file mode 100644
index 000000000000..af8bccc2b457
--- /dev/null
+++ b/tests/simple.sh
@@ -0,0 +1,25 @@
+source common.sh
+
+drvPath=$(nix-instantiate simple.nix)
+
+test "$(nix-store -q --binding system "$drvPath")" = "$system"
+
+echo "derivation is $drvPath"
+
+outPath=$(nix-store -rvv "$drvPath")
+
+echo "output path is $outPath"
+
+text=$(cat "$outPath"/hello)
+if test "$text" != "Hello World!"; then exit 1; fi
+
+# Directed delete: $outPath is not reachable from a root, so it should
+# be deleteable.
+nix-store --delete $outPath
+if test -e $outPath/hello; then false; fi
+
+outPath="$(NIX_STORE_DIR=/foo nix-instantiate --readonly-mode hash-check.nix)"
+if test "$outPath" != "/foo/lfy1s6ca46rm5r6w4gg9hc0axiakjcnm-dependencies.drv"; then
+    echo "hashDerivationModulo appears broken, got $outPath"
+    exit 1
+fi
diff --git a/tests/substituter.sh b/tests/substituter.sh
new file mode 100755
index 000000000000..9aab295de87b
--- /dev/null
+++ b/tests/substituter.sh
@@ -0,0 +1,37 @@
+#! /bin/sh -e
+echo
+echo substituter args: $* >&2
+
+if test $1 = "--query"; then
+    while read cmd args; do
+        echo "CMD = $cmd, ARGS = $args" >&2
+        if test "$cmd" = "have"; then
+            for path in $args; do 
+                read path
+                if grep -q "$path" $TEST_ROOT/sub-paths; then
+                    echo $path
+                fi
+            done
+            echo
+        elif test "$cmd" = "info"; then
+            for path in $args; do
+                echo $path
+                echo "" # deriver
+                echo 0 # nr of refs
+                echo $((1 * 1024 * 1024)) # download size
+                echo $((2 * 1024 * 1024)) # nar size
+            done
+            echo
+        else
+            echo "bad command $cmd"
+            exit 1
+        fi
+    done
+elif test $1 = "--substitute"; then
+    mkdir $2
+    echo "Hallo Wereld" > $2/hello
+    echo # no expected hash
+else
+    echo "unknown substituter operation"
+    exit 1
+fi
diff --git a/tests/substituter2.sh b/tests/substituter2.sh
new file mode 100755
index 000000000000..5d1763599c25
--- /dev/null
+++ b/tests/substituter2.sh
@@ -0,0 +1,33 @@
+#! /bin/sh -e
+echo
+echo substituter2 args: $* >&2
+
+if test $1 = "--query"; then
+    while read cmd args; do
+        if test "$cmd" = have; then
+            for path in $args; do
+                if grep -q "$path" $TEST_ROOT/sub-paths; then
+                    echo $path
+                fi
+            done
+            echo
+        elif test "$cmd" = info; then
+            for path in $args; do
+                echo $path
+                echo "" # deriver
+                echo 0 # nr of refs
+                echo 0 # download size
+                echo 0 # nar size
+            done
+            echo
+        else
+            echo "bad command $cmd"
+            exit 1
+        fi
+    done
+elif test $1 = "--substitute"; then
+    exit 1
+else
+    echo "unknown substituter operation"
+    exit 1
+fi
diff --git a/tests/substitutes.sh b/tests/substitutes.sh
new file mode 100644
index 000000000000..0c6adf2601fa
--- /dev/null
+++ b/tests/substitutes.sh
@@ -0,0 +1,22 @@
+source common.sh
+
+clearStore
+
+# Instantiate.
+drvPath=$(nix-instantiate simple.nix)
+echo "derivation is $drvPath"
+
+# Find the output path.
+outPath=$(nix-store -qvv "$drvPath")
+echo "output path is $outPath"
+
+echo $outPath > $TEST_ROOT/sub-paths
+
+export NIX_SUBSTITUTERS=$(pwd)/substituter.sh
+
+nix-store -r "$drvPath" --dry-run 2>&1 | grep -q "1.00 MiB.*2.00 MiB"
+
+nix-store -rvv "$drvPath"
+
+text=$(cat "$outPath"/hello)
+if test "$text" != "Hallo Wereld"; then echo "wrong substitute output: $text"; exit 1; fi
diff --git a/tests/substitutes2.sh b/tests/substitutes2.sh
new file mode 100644
index 000000000000..bd914575cca8
--- /dev/null
+++ b/tests/substitutes2.sh
@@ -0,0 +1,21 @@
+source common.sh
+
+clearStore
+
+# Instantiate.
+drvPath=$(nix-instantiate simple.nix)
+echo "derivation is $drvPath"
+
+# Find the output path.
+outPath=$(nix-store -qvvvvv "$drvPath")
+echo "output path is $outPath"
+
+echo $outPath > $TEST_ROOT/sub-paths
+
+# First try a substituter that fails, then one that succeeds
+export NIX_SUBSTITUTERS=$(pwd)/substituter2.sh:$(pwd)/substituter.sh
+
+nix-store -j0 -rvv "$drvPath"
+
+text=$(cat "$outPath"/hello)
+if test "$text" != "Hallo Wereld"; then echo "wrong substitute output: $text"; exit 1; fi
diff --git a/tests/timeout.builder.sh b/tests/timeout.builder.sh
new file mode 100644
index 000000000000..3fbdd57946ad
--- /dev/null
+++ b/tests/timeout.builder.sh
@@ -0,0 +1,2 @@
+echo "‘timeout’ builder entering an infinite loop"
+while true ; do echo -n .; done
diff --git a/tests/timeout.nix b/tests/timeout.nix
new file mode 100644
index 000000000000..b41368bb38e2
--- /dev/null
+++ b/tests/timeout.nix
@@ -0,0 +1,6 @@
+with import ./config.nix;
+
+mkDerivation {
+  name = "timeout";
+  builder = ./timeout.builder.sh;
+}
diff --git a/tests/timeout.sh b/tests/timeout.sh
new file mode 100644
index 000000000000..2ebd06b9330c
--- /dev/null
+++ b/tests/timeout.sh
@@ -0,0 +1,21 @@
+# Test the `--timeout' option.
+
+source common.sh
+
+failed=0
+messages="`nix-build -Q timeout.nix --timeout 2 2>&1 || failed=1`"
+if [ $failed -ne 0 ]; then
+    echo "error: ‘nix-store’ succeeded; should have timed out"
+    exit 1
+fi
+
+if ! echo "$messages" | grep -q "timed out"; then
+    echo "error: build may have failed for reasons other than timeout; output:"
+    echo "$messages" >&2
+    exit 1
+fi
+
+if nix-build -Q timeout.nix --option build-max-log-size 100; then
+    echo "build should have failed"
+    exit 1
+fi
diff --git a/tests/user-envs.builder.sh b/tests/user-envs.builder.sh
new file mode 100644
index 000000000000..5fafa797f11e
--- /dev/null
+++ b/tests/user-envs.builder.sh
@@ -0,0 +1,5 @@
+mkdir $out
+mkdir $out/bin
+echo "#! $shell" > $out/bin/$progName
+echo "echo $name" >> $out/bin/$progName
+chmod +x $out/bin/$progName
diff --git a/tests/user-envs.nix b/tests/user-envs.nix
new file mode 100644
index 000000000000..1aa410cc9680
--- /dev/null
+++ b/tests/user-envs.nix
@@ -0,0 +1,29 @@
+# Some dummy arguments...
+{ foo ? "foo"
+}:
+
+with import ./config.nix;
+
+assert foo == "foo";
+
+let
+
+  makeDrv = name: progName: (mkDerivation {
+    inherit name progName system;
+    builder = ./user-envs.builder.sh;
+  } // {
+    meta = {
+      description = "A silly test package";
+    };
+  });
+
+in
+
+  [
+    (makeDrv "foo-1.0" "foo")
+    (makeDrv "foo-2.0pre1" "foo")
+    (makeDrv "bar-0.1" "bar")
+    (makeDrv "foo-2.0" "foo")
+    (makeDrv "bar-0.1.1" "bar")
+    (makeDrv "foo-0.1" "foo" // { meta.priority = 10; })
+  ]
diff --git a/tests/user-envs.sh b/tests/user-envs.sh
new file mode 100644
index 000000000000..0d997acd69d9
--- /dev/null
+++ b/tests/user-envs.sh
@@ -0,0 +1,168 @@
+source common.sh
+
+clearStore
+clearProfiles
+
+set -x
+
+# Query installed: should be empty.
+test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0
+
+export HOME=$TEST_ROOT/home
+mkdir -p $HOME
+nix-env --switch-profile $profiles/test
+
+# Query available: should contain several.
+test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6
+outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0)
+drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0)
+[ -n "$outPath10" -a -n "$drvPath10" ]
+
+# Query descriptions.
+nix-env -f ./user-envs.nix -qa '*' --description | grep -q silly
+rm -f $HOME/.nix-defexpr
+ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr
+nix-env -qa '*' --description | grep -q silly
+
+# Install "foo-1.0".
+nix-env -i foo-1.0
+
+# Query installed: should contain foo-1.0 now (which should be
+# executable).
+test "$(nix-env -q '*' | wc -l)" -eq 1
+nix-env -q '*' | grep -q foo-1.0
+test "$($profiles/test/bin/foo)" = "foo-1.0"
+
+# Test nix-env -qc to compare installed against available packages, and vice versa.
+nix-env -qc '*' | grep -q '< 2.0'
+nix-env -qac '*' | grep -q '> 1.0'
+
+# Test the -b flag to filter out source-only packages.
+[ "$(nix-env -qab | wc -l)" -eq 1 ]
+
+# Test the -s flag to get package status.
+nix-env -qas | grep -q 'IP-  foo-1.0'
+nix-env -qas | grep -q -- '---  bar-0.1'
+
+# Disable foo.
+nix-env --set-flag active false foo
+! [ -e "$profiles/test/bin/foo" ]
+
+# Enable foo.
+nix-env --set-flag active true foo
+[ -e "$profiles/test/bin/foo" ]
+
+# Store the path of foo-1.0.
+outPath10_=$(nix-env -q --out-path --no-name '*' | grep foo-1.0)
+echo "foo-1.0 = $outPath10"
+[ "$outPath10" = "$outPath10_" ]
+
+# Install "foo-2.0pre1": should remove foo-1.0.
+nix-env -i foo-2.0pre1
+
+# Query installed: should contain foo-2.0pre1 now.
+test "$(nix-env -q '*' | wc -l)" -eq 1
+nix-env -q '*' | grep -q foo-2.0pre1
+test "$($profiles/test/bin/foo)" = "foo-2.0pre1"
+
+# Upgrade "foo": should install foo-2.0.
+NIX_PATH=nixpkgs=./user-envs.nix:$NIX_PATH nix-env -f '<nixpkgs>' -u foo
+
+# Query installed: should contain foo-2.0 now.
+test "$(nix-env -q '*' | wc -l)" -eq 1
+nix-env -q '*' | grep -q foo-2.0
+test "$($profiles/test/bin/foo)" = "foo-2.0"
+
+# Store the path of foo-2.0.
+outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0)
+test -n "$outPath20"
+
+# Install bar-0.1, uninstall foo.
+nix-env -i bar-0.1
+nix-env -e foo
+
+# Query installed: should only contain bar-0.1 now.
+if nix-env -q '*' | grep -q foo; then false; fi
+nix-env -q '*' | grep -q bar
+
+# Rollback: should bring "foo" back.
+oldGen="$(nix-store -q --resolve $profiles/test)"
+nix-env --rollback
+[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ]
+nix-env -q '*' | grep -q foo-2.0
+nix-env -q '*' | grep -q bar
+
+# Rollback again: should remove "bar".
+nix-env --rollback
+nix-env -q '*' | grep -q foo-2.0
+if nix-env -q '*' | grep -q bar; then false; fi
+
+# Count generations.
+nix-env --list-generations
+test "$(nix-env --list-generations | wc -l)" -eq 7
+
+# Switch to a specified generation.
+nix-env --switch-generation 7
+[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ]
+
+# Install foo-1.0, now using its store path.
+nix-env -i "$outPath10"
+nix-env -q '*' | grep -q foo-1.0
+nix-store -qR $profiles/test | grep "$outPath10"
+nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)"
+[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ]
+
+# Uninstall foo-1.0, using a symlink to its store path.
+ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink
+nix-env -e $TEST_ROOT/symlink
+if nix-env -q '*' | grep -q foo; then false; fi
+! nix-store -qR $profiles/test | grep "$outPath10"
+
+# Install foo-1.0, now using a symlink to its store path.
+nix-env -i $TEST_ROOT/symlink
+nix-env -q '*' | grep -q foo
+
+# Delete all old generations.
+nix-env --delete-generations old
+
+# Run the garbage collector.  This should get rid of foo-2.0 but not
+# foo-1.0.
+nix-collect-garbage
+test -e "$outPath10"
+! [ -e "$outPath20" ]
+
+# Uninstall everything
+nix-env -e '*'
+test "$(nix-env -q '*' | wc -l)" -eq 0
+
+# Installing "foo" should only install the newest foo.
+nix-env -i foo
+test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1
+nix-env -q '*' | grep -q foo-2.0
+
+# On the other hand, this should install both (and should fail due to
+# a collision).
+nix-env -e '*'
+! nix-env -i foo-1.0 foo-2.0
+
+# Installing "*" should install one foo and one bar.
+nix-env -e '*'
+nix-env -i '*'
+test "$(nix-env -q '*' | wc -l)" -eq 2
+nix-env -q '*' | grep -q foo-2.0
+nix-env -q '*' | grep -q bar-0.1.1
+
+# Test priorities: foo-0.1 has a lower priority than foo-1.0, so it
+# should be possible to install both without a collision.  Also test
+# ‘--set-flag priority’ to manually override the declared priorities.
+nix-env -e '*'
+nix-env -i foo-0.1 foo-1.0
+[ "$($profiles/test/bin/foo)" = "foo-1.0" ]
+nix-env --set-flag priority 1 foo-0.1
+[ "$($profiles/test/bin/foo)" = "foo-0.1" ]
+
+# Test nix-env --set.
+nix-env --set $outPath10
+[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ]
+nix-env --set $drvPath10
+[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ]
diff --git a/version b/version
new file mode 100644
index 000000000000..468437494697
--- /dev/null
+++ b/version
@@ -0,0 +1 @@
+1.8
\ No newline at end of file