about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--Makefile4
-rw-r--r--configure.ac2
-rw-r--r--local.mk2
-rwxr-xr-xscripts/build-remote.pl.in275
-rw-r--r--scripts/local.mk9
-rwxr-xr-xscripts/nix-copy-closure.in103
-rw-r--r--src/build-remote/build-remote.cc1
-rw-r--r--src/build-remote/local.mk2
-rw-r--r--src/download-via-ssh/download-via-ssh.cc142
-rw-r--r--src/download-via-ssh/local.mk11
-rw-r--r--src/libexpr/json-to-value.cc1
-rw-r--r--src/libexpr/symbol-table.hh2
-rw-r--r--src/libexpr/value.hh1
-rw-r--r--src/libmain/shared.cc2
-rw-r--r--src/libmain/stack.cc2
-rw-r--r--src/libstore/binary-cache-store.hh21
-rw-r--r--src/libstore/build.cc2
-rw-r--r--src/libstore/download.cc28
-rw-r--r--src/libstore/download.hh2
-rw-r--r--src/libstore/globals.cc2
-rw-r--r--src/libstore/legacy-ssh-store.cc247
-rw-r--r--src/libstore/local-store.cc1
-rw-r--r--src/libstore/optimise-store.cc2
-rw-r--r--src/libstore/remote-store.cc1
-rw-r--r--src/libstore/s3-binary-cache-store.cc143
-rw-r--r--src/libstore/s3.hh33
-rw-r--r--src/libstore/serve-protocol.hh (renamed from src/nix-store/serve-protocol.hh)0
-rw-r--r--src/libstore/ssh-store.cc25
-rw-r--r--src/libstore/store-api.cc46
-rw-r--r--src/libstore/store-api.hh20
-rw-r--r--src/libutil/archive.cc2
-rw-r--r--src/libutil/hash.cc2
-rw-r--r--src/libutil/logging.hh1
-rw-r--r--src/libutil/serialise.hh15
-rw-r--r--src/libutil/types.hh1
-rw-r--r--src/libutil/util.cc2
-rw-r--r--src/nix-copy-closure/local.mk7
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc59
-rw-r--r--src/nix-daemon/nix-daemon.cc20
-rw-r--r--src/nix-store/nix-store.cc4
41 files changed, 554 insertions, 697 deletions
diff --git a/.gitignore b/.gitignore
index 92f95fe1fcb9..951efb4c908f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,7 +35,6 @@ Makefile.config
 # /scripts/
 /scripts/nix-profile.sh
 /scripts/nix-copy-closure
-/scripts/build-remote.pl
 /scripts/nix-reduce-build
 /scripts/nix-http-export.cgi
 
@@ -72,15 +71,14 @@ Makefile.config
 # /src/nix-channel/
 /src/nix-channel/nix-channel
 
-# /src/download-via-ssh/
-/src/download-via-ssh/download-via-ssh
-
 # /src/buildenv/
 /src/buildenv/buildenv
 
 # /src/nix-build/
 /src/nix-build/nix-build
 
+/src/nix-copy-closure/nix-copy-closure
+
 /src/build-remote/build-remote
 
 # /tests/
diff --git a/Makefile b/Makefile
index 14be271bb107..d26cf8d99d22 100644
--- a/Makefile
+++ b/Makefile
@@ -11,6 +11,7 @@ makefiles = \
   src/nix-env/local.mk \
   src/nix-daemon/local.mk \
   src/nix-collect-garbage/local.mk \
+  src/nix-copy-closure/local.mk \
   src/nix-prefetch-url/local.mk \
   src/buildenv/local.mk \
   src/resolve-system-dependencies/local.mk \
@@ -26,9 +27,8 @@ makefiles = \
   misc/emacs/local.mk \
   doc/manual/local.mk \
   tests/local.mk
-  #src/download-via-ssh/local.mk \
 
-GLOBAL_CXXFLAGS += -std=c++14 -g -Wall
+GLOBAL_CXXFLAGS += -std=c++14 -g -Wall -include config.h
 
 -include Makefile.config
 
diff --git a/configure.ac b/configure.ac
index e6b11be2df19..bfe81840c3b4 100644
--- a/configure.ac
+++ b/configure.ac
@@ -61,7 +61,7 @@ CFLAGS=
 CXXFLAGS=
 AC_PROG_CC
 AC_PROG_CXX
-AX_CXX_COMPILE_STDCXX_11
+AX_CXX_COMPILE_STDCXX_14
 
 
 # Use 64-bit file system calls so that we can support files > 2 GiB.
diff --git a/local.mk b/local.mk
index 2541f3f32290..eebd71961198 100644
--- a/local.mk
+++ b/local.mk
@@ -10,7 +10,7 @@ clean-files += Makefile.config
 GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr \
   -Wno-unneeded-internal-declaration
 
-$(foreach i, config.h $(call rwildcard, src/lib*, *.hh) src/nix-store/serve-protocol.hh, \
+$(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \
   $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644)))
 
 $(foreach i, $(call rwildcard, src/boost, *.hpp), $(eval $(call install-file-in, $(i), $(includedir)/nix/$(patsubst src/%/,%,$(dir $(i))), 0644)))
diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in
deleted file mode 100755
index b5fc629eb499..000000000000
--- a/scripts/build-remote.pl.in
+++ /dev/null
@@ -1,275 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use Fcntl qw(:DEFAULT :flock);
-use English '-no_match_vars';
-use IO::Handle;
-use Nix::Config;
-use Nix::SSH;
-use Nix::CopyClosure;
-use Nix::Store;
-use Encode;
-no warnings('once');
-
-STDERR->autoflush(1);
-binmode STDERR, ":encoding(utf8)";
-
-my $debug = defined $ENV{NIX_DEBUG_HOOK};
-
-
-# General operation:
-#
-# Try to find a free machine of type $neededSystem.  We do this as
-# follows:
-# - We acquire an exclusive lock on $currentLoad/main-lock.
-# - For each machine $machine of type $neededSystem and for each $slot
-#   less than the maximum load for that machine, we try to get an
-#   exclusive lock on $currentLoad/$machine-$slot (without blocking).
-#   If we get such a lock, we send "accept" to the caller.  Otherwise,
-#   we send "postpone" and exit.
-# - We release the exclusive lock on $currentLoad/main-lock.
-# - We perform the build on $neededSystem.
-# - We release the exclusive lock on $currentLoad/$machine-$slot.
-#
-# The nice thing about this scheme is that if we die prematurely, the
-# locks are released automatically.
-
-
-# Make sure that we don't get any SSH passphrase or host key popups -
-# if there is any problem it should fail, not do something
-# interactive.
-$ENV{"DISPLAY"} = "";
-$ENV{"SSH_ASKPASS"} = "";
-
-
-sub sendReply {
-    my $reply = shift;
-    print STDERR "# $reply\n";
-}
-
-sub all { $_ || return 0 for @_; 1 }
-
-
-# Initialisation.
-my $loadIncreased = 0;
-
-my ($localSystem, $maxSilentTime, $buildTimeout) = @ARGV;
-
-my $currentLoad = $ENV{"NIX_CURRENT_LOAD"} // "/run/nix/current-load";
-my $conf = $ENV{"NIX_REMOTE_SYSTEMS"} // "@sysconfdir@/nix/machines";
-
-
-sub openSlotLock {
-    my ($machine, $slot) = @_;
-    my $slotLockFn = "$currentLoad/" . (join '+', @{$machine->{systemTypes}}) . "-" . $machine->{hostName} . "-$slot";
-    my $slotLock = new IO::Handle;
-    sysopen $slotLock, "$slotLockFn", O_RDWR|O_CREAT, 0600 or die;
-    return $slotLock;
-}
-
-
-# Read the list of machines.
-my @machines;
-if (defined $conf && -e $conf) {
-    open CONF, "<$conf" or die;
-    while (<CONF>) {
-        chomp;
-        s/\#.*$//g;
-        next if /^\s*$/;
-        my @tokens = split /\s/, $_;
-        my @supportedFeatures = split(/,/, $tokens[5] || "");
-        my @mandatoryFeatures = split(/,/, $tokens[6] || "");
-        push @machines,
-            { hostName => $tokens[0]
-            , systemTypes => [ split(/,/, $tokens[1]) ]
-            , sshKey => $tokens[2]
-            , maxJobs => int($tokens[3])
-            , speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1)
-            , supportedFeatures => [ @supportedFeatures, @mandatoryFeatures ]
-            , mandatoryFeatures => [ @mandatoryFeatures ]
-            , enabled => 1
-            };
-    }
-    close CONF;
-}
-
-
-
-# Wait for the calling process to ask us whether we can build some derivation.
-my ($drvPath, $hostName, $slotLock);
-my ($from, $to);
-
-REQ: while (1) {
-    $_ = <STDIN> || exit 0;
-    (my $amWilling, my $neededSystem, $drvPath, my $requiredFeatures) = split;
-    my @requiredFeatures = split /,/, $requiredFeatures;
-
-    my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem);
-
-    if (!defined $currentLoad) {
-        sendReply "decline";
-        next;
-    }
-
-    # Acquire the exclusive lock on $currentLoad/main-lock.
-    mkdir $currentLoad, 0777 or die unless -d $currentLoad;
-    my $mainLock = "$currentLoad/main-lock";
-    sysopen MAINLOCK, "$mainLock", O_RDWR|O_CREAT, 0600 or die;
-    flock(MAINLOCK, LOCK_EX) or die;
-
-
-    while (1) {
-        # Find all machine that can execute this build, i.e., that
-        # support builds for the given platform and features, and are
-        # not at their job limit.
-        my $rightType = 0;
-        my @available = ();
-        LOOP: foreach my $cur (@machines) {
-            if ($cur->{enabled}
-                && (grep { $neededSystem eq $_ } @{$cur->{systemTypes}})
-                && all(map { my $f = $_; 0 != grep { $f eq $_ } @{$cur->{supportedFeatures}} } (@requiredFeatures, @mandatoryFeatures))
-                && all(map { my $f = $_; 0 != grep { $f eq $_ } @requiredFeatures } @{$cur->{mandatoryFeatures}})
-                )
-            {
-                $rightType = 1;
-
-                # We have a machine of the right type.  Determine the load on
-                # the machine.
-                my $slot = 0;
-                my $load = 0;
-                my $free;
-                while ($slot < $cur->{maxJobs}) {
-                    my $slotLock = openSlotLock($cur, $slot);
-                    if (flock($slotLock, LOCK_EX | LOCK_NB)) {
-                        $free = $slot unless defined $free;
-                        flock($slotLock, LOCK_UN) or die;
-                    } else {
-                        $load++;
-                    }
-                    close $slotLock;
-                    $slot++;
-                }
-
-                push @available, { machine => $cur, load => $load, free => $free }
-                if $load < $cur->{maxJobs};
-            }
-        }
-
-        if ($debug) {
-            print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n"
-                foreach @available;
-        }
-
-
-        # Didn't find any available machine?  Then decline or postpone.
-        if (scalar @available == 0) {
-            # Postpone if we have a machine of the right type, except
-            # if the local system can and wants to do the build.
-            if ($rightType && !$canBuildLocally) {
-                sendReply "postpone";
-            } else {
-                sendReply "decline";
-            }
-            close MAINLOCK;
-            next REQ;
-        }
-
-
-        # Prioritise the available machines as follows:
-        # - First by load divided by speed factor, rounded to the nearest
-        #   integer.  This causes fast machines to be preferred over slow
-        #   machines with similar loads.
-        # - Then by speed factor.
-        # - Finally by load.
-        sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); }
-        @available = sort
-            { lf($a) <=> lf($b)
-                  || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor}
-                  || $a->{load} <=> $b->{load}
-            } @available;
-
-
-        # Select the best available machine and lock a free slot.
-        my $selected = $available[0];
-        my $machine = $selected->{machine};
-
-        $slotLock = openSlotLock($machine, $selected->{free});
-        flock($slotLock, LOCK_EX | LOCK_NB) or die;
-        utime undef, undef, $slotLock;
-
-        close MAINLOCK;
-
-
-        # Connect to the selected machine.
-        my @sshOpts = ("-i", $machine->{sshKey});
-        $hostName = $machine->{hostName};
-        eval {
-            ($from, $to) = connectToRemoteNix($hostName, \@sshOpts, "2>&4");
-            # FIXME: check if builds are inhibited.
-        };
-        last REQ unless $@;
-        print STDERR "$@";
-        warn "unable to open SSH connection to ‘$hostName’, trying other available machines...\n";
-        $from = undef;
-        $to = undef;
-        $machine->{enabled} = 0;
-    }
-}
-
-
-# Tell Nix we've accepted the build.
-sendReply "accept";
-my @inputs = split /\s/, readline(STDIN);
-my @outputs = split /\s/, readline(STDIN);
-
-
-# Copy the derivation and its dependencies to the build machine.  This
-# is guarded by an exclusive lock per machine to prevent multiple
-# build-remote instances from copying to a machine simultaneously.
-# That's undesirable because we may end up with N instances uploading
-# the same missing path simultaneously, causing the effective network
-# bandwidth and target disk speed to be divided by N.
-my $uploadLock = "$currentLoad/$hostName.upload-lock";
-sysopen UPLOADLOCK, "$uploadLock", O_RDWR|O_CREAT, 0600 or die;
-eval {
-    local $SIG{ALRM} = sub { die "alarm\n" };
-    # Don't wait forever, so that a process that gets stuck while
-    # holding the lock doesn't block everybody else indefinitely.
-    # It's safe to continue after a timeout, just (potentially)
-    # inefficient.
-    alarm 15 * 60;
-    flock(UPLOADLOCK, LOCK_EX);
-    alarm 0;
-};
-if ($@) {
-    die unless $@ eq "alarm\n";
-    print STDERR "somebody is hogging $uploadLock, continuing...\n";
-    unlink $uploadLock;
-}
-Nix::CopyClosure::copyToOpen($from, $to, $hostName, [ $drvPath, @inputs ], 0, 0);
-close UPLOADLOCK;
-
-
-# Perform the build.
-print STDERR "building ‘$drvPath’ on ‘$hostName’\n";
-writeInt(6, $to) or die; # == cmdBuildPaths
-writeStrings([$drvPath], $to);
-writeInt($maxSilentTime, $to);
-writeInt($buildTimeout, $to);
-my $res = readInt($from);
-if ($res != 0) {
-    my $msg = decode("utf-8", readString($from));
-    print STDERR "error: $msg on ‘$hostName’\n";
-    exit $res;
-}
-
-
-# Copy the output from the build machine.
-my @outputs2 = grep { !isValidPath($_) } @outputs;
-if (scalar @outputs2 > 0) {
-    writeInt(5, $to); # == cmdExportPaths
-    writeInt(0, $to); # don't sign
-    writeStrings(\@outputs2, $to);
-    $ENV{'NIX_HELD_LOCKS'} = "@outputs2"; # FIXME: ugly
-    importPaths(fileno($from), 1);
-}
diff --git a/scripts/local.mk b/scripts/local.mk
index ee8ae6845dc1..9524baf81b03 100644
--- a/scripts/local.mk
+++ b/scripts/local.mk
@@ -1,10 +1,4 @@
-nix_bin_scripts := \
-  $(d)/nix-copy-closure \
-
-bin-scripts += $(nix_bin_scripts)
-
 nix_noinst_scripts := \
-  $(d)/build-remote.pl \
   $(d)/nix-http-export.cgi \
   $(d)/nix-profile.sh \
   $(d)/nix-reduce-build
@@ -14,6 +8,5 @@ noinst-scripts += $(nix_noinst_scripts)
 profiledir = $(sysconfdir)/profile.d
 
 $(eval $(call install-file-as, $(d)/nix-profile.sh, $(profiledir)/nix.sh, 0644))
-$(eval $(call install-program-in, $(d)/build-remote.pl, $(libexecdir)/nix))
 
-clean-files += $(nix_bin_scripts) $(nix_noinst_scripts)
+clean-files += $(nix_noinst_scripts)
diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in
deleted file mode 100755
index af1d30919263..000000000000
--- a/scripts/nix-copy-closure.in
+++ /dev/null
@@ -1,103 +0,0 @@
-#! @perl@ -w @perlFlags@
-
-use utf8;
-use strict;
-use Nix::SSH;
-use Nix::Config;
-use Nix::Store;
-use Nix::CopyClosure;
-use List::Util qw(sum);
-
-binmode STDERR, ":encoding(utf8)";
-
-if (scalar @ARGV < 1) {
-    print STDERR <<EOF
-Usage: nix-copy-closure [--from | --to] HOSTNAME [--gzip] [--bzip2] [--xz] PATHS...
-EOF
-    ;
-    exit 1;
-}
-
-
-# Get the target host.
-my $sshHost;
-my $toMode = 1;
-my $includeOutputs = 0;
-my $dryRun = 0;
-my $useSubstitutes = 0;
-my $verbosity = 1;
-
-
-# !!! Copied from nix-pack-closure, should put this in a module.
-my @storePaths = ();
-
-while (@ARGV) {
-    my $arg = shift @ARGV;
-
-    if ($arg eq "--help") {
-        exec "man nix-copy-closure" or die;
-    }
-    elsif ($arg eq "--gzip" || $arg eq "--bzip2" || $arg eq "--xz") {
-        warn "$0: ‘$arg’ is not implemented\n" if $arg ne "--gzip";
-        push @globalSshOpts, "-C";
-    }
-    elsif ($arg eq "--from") {
-        $toMode = 0;
-    }
-    elsif ($arg eq "--to") {
-        $toMode = 1;
-    }
-    elsif ($arg eq "--include-outputs") {
-        $includeOutputs = 1;
-    }
-    elsif ($arg eq "--show-progress") {
-        warn "$0: ‘$arg’ is not implemented\n";
-    }
-    elsif ($arg eq "--dry-run") {
-        $dryRun = 1;
-    }
-    elsif ($arg eq "--use-substitutes" || $arg eq "-s") {
-        $useSubstitutes = 1;
-    }
-    elsif ($arg eq "-v") {
-        $verbosity++;
-        setVerbosity($verbosity);
-    }
-    elsif (!defined $sshHost) {
-        $sshHost = $arg;
-    }
-    else {
-        push @storePaths, $arg;
-    }
-}
-
-die "$0: you did not specify a host name\n" unless defined $sshHost;
-
-
-if ($toMode) { # Copy TO the remote machine.
-    Nix::CopyClosure::copyTo(
-        $sshHost, [ @storePaths ],
-        $includeOutputs, $dryRun, $useSubstitutes);
-}
-
-else { # Copy FROM the remote machine.
-
-    my ($from, $to) = connectToRemoteNix($sshHost, []);
-
-    # Query the closure of the given store paths on the remote
-    # machine.  Paths are assumed to be store paths; there is no
-    # resolution (following of symlinks).
-    syswrite($to, pack("L<x4L<x4", 7, $includeOutputs ? 1 : 0)) or die;
-    writeStrings(\@storePaths, $to);
-    my @missing = grep { !isValidPath($_) } readStrings($from);
-
-    # Export the store paths on the remote machine and import them locally.
-    if (scalar @missing > 0) {
-        print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n";
-        writeInt(5, $to); # == cmdExportPaths
-        writeInt(0, $to); # obsolete
-        writeStrings(\@missing, $to);
-        importPaths(fileno($from), 1);
-    }
-
-}
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index acbd308f84e1..2ce20882da17 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -12,7 +12,6 @@
 #include "shared.hh"
 #include "pathlocks.hh"
 #include "globals.hh"
-#include "serve-protocol.hh"
 #include "serialise.hh"
 #include "store-api.hh"
 #include "derivations.hh"
diff --git a/src/build-remote/local.mk b/src/build-remote/local.mk
index 05b8cb451435..62d5a010c247 100644
--- a/src/build-remote/local.mk
+++ b/src/build-remote/local.mk
@@ -8,4 +8,4 @@ build-remote_LIBS = libmain libutil libformat libstore
 
 build-remote_SOURCES := $(d)/build-remote.cc
 
-build-remote_CXXFLAGS = -DSYSCONFDIR="\"$(sysconfdir)\"" -Isrc/nix-store
+build-remote_CXXFLAGS = -DSYSCONFDIR="\"$(sysconfdir)\""
diff --git a/src/download-via-ssh/download-via-ssh.cc b/src/download-via-ssh/download-via-ssh.cc
deleted file mode 100644
index 4a1ba9a11235..000000000000
--- a/src/download-via-ssh/download-via-ssh.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-#include "shared.hh"
-#include "util.hh"
-#include "serialise.hh"
-#include "archive.hh"
-#include "affinity.hh"
-#include "globals.hh"
-#include "serve-protocol.hh"
-#include "worker-protocol.hh"
-#include "store-api.hh"
-
-#include <iostream>
-#include <cstdlib>
-#include <unistd.h>
-
-using namespace nix;
-
-// !!! TODO:
-// * Respect more than the first host
-// * use a database
-// * show progress
-
-
-static std::pair<FdSink, FdSource> connect(const string & conn)
-{
-    Pipe to, from;
-    to.create();
-    from.create();
-    startProcess([&]() {
-        if (dup2(to.readSide, STDIN_FILENO) == -1)
-            throw SysError("dupping stdin");
-        if (dup2(from.writeSide, STDOUT_FILENO) == -1)
-            throw SysError("dupping stdout");
-        restoreSignals();
-        execlp("ssh", "ssh", "-x", "-T", conn.c_str(), "nix-store --serve", NULL);
-        throw SysError("executing ssh");
-    });
-    // If child exits unexpectedly, we'll EPIPE or EOF early.
-    // If we exit unexpectedly, child will EPIPE or EOF early.
-    // So no need to keep track of it.
-
-    return std::pair<FdSink, FdSource>(to.writeSide.borrow(), from.readSide.borrow());
-}
-
-
-static void substitute(std::pair<FdSink, FdSource> & pipes, Path storePath, Path destPath)
-{
-    pipes.first << cmdDumpStorePath << storePath;
-    pipes.first.flush();
-    restorePath(destPath, pipes.second);
-    std::cout << std::endl;
-}
-
-
-static void query(std::pair<FdSink, FdSource> & pipes)
-{
-    for (string line; getline(std::cin, line);) {
-        Strings tokenized = tokenizeString<Strings>(line);
-        string cmd = tokenized.front();
-        tokenized.pop_front();
-        if (cmd == "have") {
-            pipes.first
-                << cmdQueryValidPaths
-                << 0 // don't lock
-                << 0 // don't substitute
-                << tokenized;
-            pipes.first.flush();
-            PathSet paths = readStrings<PathSet>(pipes.second);
-            for (auto & i : paths)
-                std::cout << i << std::endl;
-        } else if (cmd == "info") {
-            pipes.first << cmdQueryPathInfos << tokenized;
-            pipes.first.flush();
-            while (1) {
-                Path path = readString(pipes.second);
-                if (path.empty()) break;
-                assertStorePath(path);
-                std::cout << path << std::endl;
-                string deriver = readString(pipes.second);
-                if (!deriver.empty()) assertStorePath(deriver);
-                std::cout << deriver << std::endl;
-                PathSet references = readStorePaths<PathSet>(pipes.second);
-                std::cout << references.size() << std::endl;
-                for (auto & i : references)
-                    std::cout << i << std::endl;
-                std::cout << readLongLong(pipes.second) << std::endl;
-                std::cout << readLongLong(pipes.second) << std::endl;
-            }
-        } else
-            throw Error(format("unknown substituter query ‘%1%’") % cmd);
-        std::cout << std::endl;
-    }
-}
-
-
-int main(int argc, char * * argv)
-{
-    return handleExceptions(argv[0], [&]() {
-        if (argc < 2)
-            throw UsageError("download-via-ssh requires an argument");
-
-        initNix();
-
-        settings.update();
-
-        if (settings.sshSubstituterHosts.empty())
-            return;
-
-        std::cout << std::endl;
-
-        /* Pass on the location of the daemon client's SSH
-           authentication socket. */
-        string sshAuthSock = settings.get("ssh-auth-sock", string(""));
-        if (sshAuthSock != "") setenv("SSH_AUTH_SOCK", sshAuthSock.c_str(), 1);
-
-        string host = settings.sshSubstituterHosts.front();
-        std::pair<FdSink, FdSource> pipes = connect(host);
-
-        /* Exchange the greeting */
-        pipes.first << SERVE_MAGIC_1;
-        pipes.first.flush();
-        unsigned int magic = readInt(pipes.second);
-        if (magic != SERVE_MAGIC_2)
-            throw Error("protocol mismatch");
-        readInt(pipes.second); // Server version, unused for now
-        pipes.first << SERVE_PROTOCOL_VERSION;
-        pipes.first.flush();
-
-        string arg = argv[1];
-        if (arg == "--query")
-            query(pipes);
-        else if (arg == "--substitute") {
-            if (argc != 4)
-                throw UsageError("download-via-ssh: --substitute takes exactly two arguments");
-            Path storePath = argv[2];
-            Path destPath = argv[3];
-            printError(format("downloading ‘%1%’ via SSH from ‘%2%’...") % storePath % host);
-            substitute(pipes, storePath, destPath);
-        }
-        else
-            throw UsageError(format("download-via-ssh: unknown command ‘%1%’") % arg);
-    });
-}
diff --git a/src/download-via-ssh/local.mk b/src/download-via-ssh/local.mk
deleted file mode 100644
index 80f4c385acb3..000000000000
--- a/src/download-via-ssh/local.mk
+++ /dev/null
@@ -1,11 +0,0 @@
-programs += download-via-ssh
-
-download-via-ssh_DIR := $(d)
-
-download-via-ssh_SOURCES := $(d)/download-via-ssh.cc
-
-download-via-ssh_INSTALL_DIR := $(libexecdir)/nix/substituters
-
-download-via-ssh_CXXFLAGS = -Isrc/nix-store
-
-download-via-ssh_LIBS = libmain libstore libutil libformat
diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc
index f671802bcc24..c189cdef35e7 100644
--- a/src/libexpr/json-to-value.cc
+++ b/src/libexpr/json-to-value.cc
@@ -1,4 +1,3 @@
-#include "config.h"
 #include "json-to-value.hh"
 
 #include <cstring>
diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh
index 2fdf820211c8..c2ee49dd32fb 100644
--- a/src/libexpr/symbol-table.hh
+++ b/src/libexpr/symbol-table.hh
@@ -1,7 +1,5 @@
 #pragma once
 
-#include "config.h"
-
 #include <map>
 #include <unordered_set>
 
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 271e6a1b24a2..81f918d48de7 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -1,6 +1,5 @@
 #pragma once
 
-#include "config.h"
 #include "symbol-table.hh"
 
 #if HAVE_BOEHMGC
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 52cb2312826b..56aa3db00158 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "common-args.hh"
 #include "globals.hh"
 #include "shared.hh"
diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc
index abf59dc4baa6..57b6a197c0f0 100644
--- a/src/libmain/stack.cc
+++ b/src/libmain/stack.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "types.hh"
 
 #include <cstring>
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index 31878bbb2476..a70d50d4949c 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -71,9 +71,6 @@ public:
         PathSet & referrers) override
     { notImpl(); }
 
-    PathSet queryValidDerivers(const Path & path) override
-    { return {}; }
-
     PathSet queryDerivationOutputs(const Path & path) override
     { notImpl(); }
 
@@ -83,13 +80,6 @@ public:
     Path queryPathFromHashPart(const string & hashPart) override
     { notImpl(); }
 
-    PathSet querySubstitutablePaths(const PathSet & paths) override
-    { return {}; }
-
-    void querySubstitutablePathInfos(const PathSet & paths,
-        SubstitutablePathInfos & infos) override
-    { }
-
     bool wantMassQuery() override { return wantMassQuery_; }
 
     void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
@@ -121,25 +111,14 @@ public:
     void addIndirectRoot(const Path & path) override
     { notImpl(); }
 
-    void syncWithGC() override
-    { }
-
     Roots findRoots() override
     { notImpl(); }
 
     void collectGarbage(const GCOptions & options, GCResults & results) override
     { notImpl(); }
 
-    void optimiseStore() override
-    { }
-
-    bool verifyStore(bool checkContents, bool repair) override
-    { return true; }
-
     ref<FSAccessor> getFSAccessor() override;
 
-public:
-
     void addSignatures(const Path & storePath, const StringSet & sigs) override
     { notImpl(); }
 
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index 5d6fff4e349f..1aee150fda37 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "references.hh"
 #include "pathlocks.hh"
 #include "globals.hh"
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index b2adc154818e..f93fb1e968a9 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -4,6 +4,7 @@
 #include "hash.hh"
 #include "store-api.hh"
 #include "archive.hh"
+#include "s3.hh"
 
 #include <unistd.h>
 #include <fcntl.h>
@@ -488,6 +489,31 @@ struct CurlDownloader : public Downloader
         std::function<void(const DownloadResult &)> success,
         std::function<void(std::exception_ptr exc)> failure) override
     {
+        /* Ugly hack to support s3:// URIs. */
+        if (hasPrefix(request.uri, "s3://")) {
+            // FIXME: do this on a worker thread
+            sync2async<DownloadResult>(success, failure, [&]() {
+#ifdef ENABLE_S3
+                S3Helper s3Helper;
+                auto slash = request.uri.find('/', 5);
+                if (slash == std::string::npos)
+                    throw nix::Error("bad S3 URI ‘%s’", request.uri);
+                std::string bucketName(request.uri, 5, slash - 5);
+                std::string key(request.uri, slash + 1);
+                // FIXME: implement ETag
+                auto s3Res = s3Helper.getObject(bucketName, key);
+                DownloadResult res;
+                if (!s3Res.data)
+                    throw DownloadError(NotFound, fmt("S3 object ‘%s’ does not exist", request.uri));
+                res.data = s3Res.data;
+                return res;
+#else
+                throw nix::Error("cannot download ‘%s’ because Nix is not built with S3 support", request.uri);
+#endif
+            });
+            return;
+        }
+
         auto item = std::make_shared<DownloadItem>(*this, request);
         item->success = success;
         item->failure = failure;
@@ -637,7 +663,7 @@ bool isUri(const string & s)
     size_t pos = s.find("://");
     if (pos == string::npos) return false;
     string scheme(s, 0, pos);
-    return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git";
+    return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3";
 }
 
 
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index 82b5d641fde9..bdb5011e7830 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -23,7 +23,7 @@ struct DownloadRequest
 
 struct DownloadResult
 {
-    bool cached;
+    bool cached = false;
     std::string etag;
     std::string effectiveUrl;
     std::shared_ptr<std::string> data;
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 00b468892529..90f83a5bbd95 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "globals.hh"
 #include "util.hh"
 #include "archive.hh"
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
new file mode 100644
index 000000000000..5d9e5aad6e0a
--- /dev/null
+++ b/src/libstore/legacy-ssh-store.cc
@@ -0,0 +1,247 @@
+#include "archive.hh"
+#include "pool.hh"
+#include "remote-store.hh"
+#include "serve-protocol.hh"
+#include "store-api.hh"
+#include "worker-protocol.hh"
+
+namespace nix {
+
+static std::string uriScheme = "legacy-ssh://";
+
+struct LegacySSHStore : public Store
+{
+    string host;
+
+    struct Connection
+    {
+        Pid sshPid;
+        AutoCloseFD out;
+        AutoCloseFD in;
+        FdSink to;
+        FdSource from;
+    };
+
+    AutoDelete tmpDir;
+
+    Path socketPath;
+
+    Pid sshMaster;
+
+    ref<Pool<Connection>> connections;
+
+    Path key;
+
+    LegacySSHStore(const string & host, const Params & params,
+        size_t maxConnections = std::numeric_limits<size_t>::max())
+        : Store(params)
+        , host(host)
+        , tmpDir(createTempDir("", "nix", true, true, 0700))
+        , socketPath((Path) tmpDir + "/ssh.sock")
+        , connections(make_ref<Pool<Connection>>(
+            maxConnections,
+            [this]() { return openConnection(); },
+            [](const ref<Connection> & r) { return true; }
+            ))
+        , key(get(params, "ssh-key", ""))
+    {
+    }
+
+    ref<Connection> openConnection()
+    {
+        if ((pid_t) sshMaster == -1) {
+            sshMaster = startProcess([&]() {
+                restoreSignals();
+                Strings args{ "ssh", "-M", "-S", socketPath, "-N", "-x", "-a", host };
+                if (!key.empty())
+                    args.insert(args.end(), {"-i", key});
+                execvp("ssh", stringsToCharPtrs(args).data());
+                throw SysError("starting SSH master connection to host ‘%s’", host);
+            });
+        }
+
+        auto conn = make_ref<Connection>();
+        Pipe in, out;
+        in.create();
+        out.create();
+        conn->sshPid = startProcess([&]() {
+            if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
+                throw SysError("duping over STDIN");
+            if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+                throw SysError("duping over STDOUT");
+            execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-store", "--serve", "--write", nullptr);
+            throw SysError("executing ‘nix-store --serve’ on remote host ‘%s’", host);
+        });
+        in.readSide = -1;
+        out.writeSide = -1;
+        conn->out = std::move(out.readSide);
+        conn->in = std::move(in.writeSide);
+        conn->to = FdSink(conn->in.get());
+        conn->from = FdSource(conn->out.get());
+
+        int remoteVersion;
+
+        try {
+            conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
+            conn->to.flush();
+
+            unsigned int magic = readInt(conn->from);
+            if (magic != SERVE_MAGIC_2)
+                throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%s’", host);
+            remoteVersion = readInt(conn->from);
+            if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
+                throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%s’", host);
+
+        } catch (EndOfFile & e) {
+            throw Error("cannot connect to ‘%1%’", host);
+        }
+
+        return conn;
+    };
+
+    string getUri() override
+    {
+        return uriScheme + host;
+    }
+
+    void queryPathInfoUncached(const Path & path,
+        std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+        std::function<void(std::exception_ptr exc)> failure) override
+    {
+        sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> {
+            auto conn(connections->get());
+
+            debug("querying remote host ‘%s’ for info on ‘%s’", host, path);
+
+            conn->to << cmdQueryPathInfos << PathSet{path};
+            conn->to.flush();
+
+            auto info = std::make_shared<ValidPathInfo>();
+            conn->from >> info->path;
+            if (info->path.empty()) return nullptr;
+            assert(path == info->path);
+
+            PathSet references;
+            conn->from >> info->deriver;
+            info->references = readStorePaths<PathSet>(*this, conn->from);
+            readLongLong(conn->from); // download size
+            info->narSize = readLongLong(conn->from);
+
+            auto s = readString(conn->from);
+            assert(s == "");
+
+            return info;
+        });
+    }
+
+    void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+        bool repair, bool dontCheckSigs,
+        std::shared_ptr<FSAccessor> accessor) override
+    {
+        debug("adding path ‘%s’ to remote host ‘%s’", info.path, host);
+
+        auto conn(connections->get());
+
+        conn->to
+            << cmdImportPaths
+            << 1;
+        conn->to(*nar);
+        conn->to
+            << exportMagic
+            << info.path
+            << info.references
+            << info.deriver
+            << 0
+            << 0;
+        conn->to.flush();
+
+        if (readInt(conn->from) != 1)
+            throw Error("failed to add path ‘%s’ to remote host ‘%s’, info.path, host");
+
+    }
+
+    void narFromPath(const Path & path, Sink & sink) override
+    {
+        auto conn(connections->get());
+
+        conn->to << cmdDumpStorePath << path;
+        conn->to.flush();
+
+        /* FIXME: inefficient. */
+        ParseSink parseSink; /* null sink; just parse the NAR */
+        SavingSourceAdapter savedNAR(conn->from);
+        parseDump(parseSink, savedNAR);
+        sink(savedNAR.s);
+    }
+
+    /* Unsupported methods. */
+    [[noreturn]] void unsupported()
+    {
+        throw Error("operation not supported on SSH stores");
+    }
+
+    PathSet queryAllValidPaths() override { unsupported(); }
+
+    void queryReferrers(const Path & path, PathSet & referrers) override
+    { unsupported(); }
+
+    PathSet queryDerivationOutputs(const Path & path) override
+    { unsupported(); }
+
+    StringSet queryDerivationOutputNames(const Path & path) override
+    { unsupported(); }
+
+    Path queryPathFromHashPart(const string & hashPart) override
+    { unsupported(); }
+
+    Path addToStore(const string & name, const Path & srcPath,
+        bool recursive, HashType hashAlgo,
+        PathFilter & filter, bool repair) override
+    { unsupported(); }
+
+    Path addTextToStore(const string & name, const string & s,
+        const PathSet & references, bool repair) override
+    { unsupported(); }
+
+    void buildPaths(const PathSet & paths, BuildMode buildMode) override
+    { unsupported(); }
+
+    BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+        BuildMode buildMode) override
+    { unsupported(); }
+
+    void ensurePath(const Path & path) override
+    { unsupported(); }
+
+    void addTempRoot(const Path & path) override
+    { unsupported(); }
+
+    void addIndirectRoot(const Path & path) override
+    { unsupported(); }
+
+    Roots findRoots() override
+    { unsupported(); }
+
+    void collectGarbage(const GCOptions & options, GCResults & results) override
+    { unsupported(); }
+
+    ref<FSAccessor> getFSAccessor()
+    { unsupported(); }
+
+    void addSignatures(const Path & storePath, const StringSet & sigs) override
+    { unsupported(); }
+
+    bool isTrusted() override
+    { return true; }
+
+};
+
+static RegisterStoreImplementation regStore([](
+    const std::string & uri, const Store::Params & params)
+    -> std::shared_ptr<Store>
+{
+    if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
+    return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params);
+});
+
+}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 612efde7bb8f..4c161cfb341f 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -1,4 +1,3 @@
-#include "config.h"
 #include "local-store.hh"
 #include "globals.hh"
 #include "archive.hh"
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index b71c7e905ff1..cf234e35d373 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "util.hh"
 #include "local-store.hh"
 #include "globals.hh"
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 816d95ba6075..42c09ec7e0b6 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -37,6 +37,7 @@ template<class T> T readStorePaths(Store & store, Source & from)
 }
 
 template PathSet readStorePaths(Store & store, Source & from);
+template Paths readStorePaths(Store & store, Source & from);
 
 /* TODO: Separate these store impls into different files, give them better names */
 RemoteStore::RemoteStore(const Params & params, size_t maxConnections)
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index ccb71f1eefe5..ac083410b353 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -1,8 +1,6 @@
-#include "config.h"
-
 #if ENABLE_S3
-#if __linux__
 
+#include "s3.hh"
 #include "s3-binary-cache-store.hh"
 #include "nar-info.hh"
 #include "nar-info-disk-cache.hh"
@@ -20,15 +18,6 @@
 
 namespace nix {
 
-struct istringstream_nocopy : public std::stringstream
-{
-    istringstream_nocopy(const std::string & s)
-    {
-        rdbuf()->pubsetbuf(
-            (char *) s.data(), s.size());
-    }
-};
-
 struct S3Error : public Error
 {
     Aws::S3::S3Errors err;
@@ -62,21 +51,81 @@ static void initAWS()
     });
 }
 
+S3Helper::S3Helper()
+    : config(makeConfig())
+    , client(make_ref<Aws::S3::S3Client>(*config))
+{
+}
+
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig()
+{
+    initAWS();
+    auto res = make_ref<Aws::Client::ClientConfiguration>();
+    res->region = Aws::Region::US_EAST_1; // FIXME: make configurable
+    res->requestTimeoutMs = 600 * 1000;
+    return res;
+}
+
+S3Helper::DownloadResult S3Helper::getObject(
+    const std::string & bucketName, const std::string & key)
+{
+    debug("fetching ‘s3://%s/%s’...", bucketName, key);
+
+    auto request =
+        Aws::S3::Model::GetObjectRequest()
+        .WithBucket(bucketName)
+        .WithKey(key);
+
+    request.SetResponseStreamFactory([&]() {
+        return Aws::New<std::stringstream>("STRINGSTREAM");
+    });
+
+    DownloadResult res;
+
+    auto now1 = std::chrono::steady_clock::now();
+
+    try {
+
+        auto result = checkAws(fmt("AWS error fetching ‘%s’", key),
+            client->GetObject(request));
+
+        res.data = std::make_shared<std::string>(
+            dynamic_cast<std::stringstream &>(result.GetBody()).str());
+
+    } catch (S3Error & e) {
+        if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
+    }
+
+    auto now2 = std::chrono::steady_clock::now();
+
+    res.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+
+    return res;
+}
+
+#if __linux__
+
+struct istringstream_nocopy : public std::stringstream
+{
+    istringstream_nocopy(const std::string & s)
+    {
+        rdbuf()->pubsetbuf(
+            (char *) s.data(), s.size());
+    }
+};
+
 struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
 {
     std::string bucketName;
 
-    ref<Aws::Client::ClientConfiguration> config;
-    ref<Aws::S3::S3Client> client;
-
     Stats stats;
 
+    S3Helper s3Helper;
+
     S3BinaryCacheStoreImpl(
         const Params & params, const std::string & bucketName)
         : S3BinaryCacheStore(params)
         , bucketName(bucketName)
-        , config(makeConfig())
-        , client(make_ref<Aws::S3::S3Client>(*config))
     {
         diskCache = getNarInfoDiskCache();
     }
@@ -86,15 +135,6 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         return "s3://" + bucketName;
     }
 
-    ref<Aws::Client::ClientConfiguration> makeConfig()
-    {
-        initAWS();
-        auto res = make_ref<Aws::Client::ClientConfiguration>();
-        res->region = Aws::Region::US_EAST_1; // FIXME: make configurable
-        res->requestTimeoutMs = 600 * 1000;
-        return res;
-    }
-
     void init() override
     {
         if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
@@ -102,7 +142,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
             /* Create the bucket if it doesn't already exists. */
             // FIXME: HeadBucket would be more appropriate, but doesn't return
             // an easily parsed 404 message.
-            auto res = client->GetBucketLocation(
+            auto res = s3Helper.client->GetBucketLocation(
                 Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
 
             if (!res.IsSuccess()) {
@@ -110,7 +150,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
                     throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage());
 
                 checkAws(format("AWS error creating bucket ‘%s’") % bucketName,
-                    client->CreateBucket(
+                    s3Helper.client->CreateBucket(
                         Aws::S3::Model::CreateBucketRequest()
                         .WithBucket(bucketName)
                         .WithCreateBucketConfiguration(
@@ -148,7 +188,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
     {
         stats.head++;
 
-        auto res = client->HeadObject(
+        auto res = s3Helper.client->HeadObject(
             Aws::S3::Model::HeadObjectRequest()
             .WithBucket(bucketName)
             .WithKey(path));
@@ -181,7 +221,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         auto now1 = std::chrono::steady_clock::now();
 
         auto result = checkAws(format("AWS error uploading ‘%s’") % path,
-            client->PutObject(request));
+            s3Helper.client->PutObject(request));
 
         auto now2 = std::chrono::steady_clock::now();
 
@@ -200,42 +240,18 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
         sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
             debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
 
-            auto request =
-                Aws::S3::Model::GetObjectRequest()
-                .WithBucket(bucketName)
-                .WithKey(path);
-
-            request.SetResponseStreamFactory([&]() {
-                return Aws::New<std::stringstream>("STRINGSTREAM");
-            });
-
             stats.get++;
 
-            try {
-
-                auto now1 = std::chrono::steady_clock::now();
-
-                auto result = checkAws(format("AWS error fetching ‘%s’") % path,
-                    client->GetObject(request));
-
-                auto now2 = std::chrono::steady_clock::now();
+            auto res = s3Helper.getObject(bucketName, path);
 
-                auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str();
+            stats.getBytes += res.data ? res.data->size() : 0;
+            stats.getTimeMs += res.durationMs;
 
-                auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+            if (res.data)
+                printTalkative("downloaded ‘s3://%s/%s’ (%d bytes) in %d ms",
+                    bucketName, path, res.data->size(), res.durationMs);
 
-                printMsg(lvlTalkative, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
-                    % bucketName % path % res.size() % duration);
-
-                stats.getBytes += res.size();
-                stats.getTimeMs += duration;
-
-                return std::make_shared<std::string>(res);
-
-            } catch (S3Error & e) {
-                if (e.err == Aws::S3::S3Errors::NO_SUCH_KEY) return std::shared_ptr<std::string>();
-                throw;
-            }
+            return res.data;
         });
     }
 
@@ -248,7 +264,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
             debug(format("listing bucket ‘s3://%s’ from key ‘%s’...") % bucketName % marker);
 
             auto res = checkAws(format("AWS error listing bucket ‘%s’") % bucketName,
-                client->ListObjects(
+                s3Helper.client->ListObjects(
                     Aws::S3::Model::ListObjectsRequest()
                     .WithBucket(bucketName)
                     .WithDelimiter("/")
@@ -283,7 +299,8 @@ static RegisterStoreImplementation regStore([](
     return store;
 });
 
+#endif
+
 }
 
 #endif
-#endif
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
new file mode 100644
index 000000000000..5d5d3475c449
--- /dev/null
+++ b/src/libstore/s3.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#if ENABLE_S3
+
+#include "ref.hh"
+
+namespace Aws { namespace Client { class ClientConfiguration; } }
+namespace Aws { namespace S3 { class S3Client; } }
+
+namespace nix {
+
+struct S3Helper
+{
+    ref<Aws::Client::ClientConfiguration> config;
+    ref<Aws::S3::S3Client> client;
+
+    S3Helper();
+
+    ref<Aws::Client::ClientConfiguration> makeConfig();
+
+    struct DownloadResult
+    {
+        std::shared_ptr<std::string> data;
+        unsigned int durationMs;
+    };
+
+    DownloadResult getObject(
+        const std::string & bucketName, const std::string & key);
+};
+
+}
+
+#endif
diff --git a/src/nix-store/serve-protocol.hh b/src/libstore/serve-protocol.hh
index f8cc9a4b6ebe..f8cc9a4b6ebe 100644
--- a/src/nix-store/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index f5d0a270438d..6f1862afa899 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -7,11 +7,13 @@
 
 namespace nix {
 
+static std::string uriScheme = "ssh://";
+
 class SSHStore : public RemoteStore
 {
 public:
 
-    SSHStore(string uri, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+    SSHStore(string host, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
 
     std::string getUri() override;
 
@@ -36,18 +38,21 @@ private:
 
     Pid sshMaster;
 
-    string uri;
+    string host;
 
     Path key;
+
+    bool compress;
 };
 
-SSHStore::SSHStore(string uri, const Params & params, size_t maxConnections)
+SSHStore::SSHStore(string host, const Params & params, size_t maxConnections)
     : Store(params)
     , RemoteStore(params, maxConnections)
     , tmpDir(createTempDir("", "nix", true, true, 0700))
     , socketPath((Path) tmpDir + "/ssh.sock")
-    , uri(std::move(uri))
+    , host(std::move(host))
     , key(get(params, "ssh-key", ""))
+    , compress(get(params, "compress", "") == "true")
 {
     /* open a connection and perform the handshake to verify all is well */
     connections->get();
@@ -55,7 +60,7 @@ SSHStore::SSHStore(string uri, const Params & params, size_t maxConnections)
 
 string SSHStore::getUri()
 {
-    return "ssh://" + uri;
+    return uriScheme + host;
 }
 
 class ForwardSource : public Source
@@ -93,9 +98,9 @@ ref<RemoteStore::Connection> SSHStore::openConnection()
         sshMaster = startProcess([&]() {
             restoreSignals();
             if (key.empty())
-                execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), uri.c_str(), NULL);
+                execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), host.c_str(), NULL);
             else
-                execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), uri.c_str(), NULL);
+                execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), host.c_str(), NULL);
             throw SysError("starting ssh master");
         });
     }
@@ -109,7 +114,7 @@ ref<RemoteStore::Connection> SSHStore::openConnection()
             throw SysError("duping over STDIN");
         if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
             throw SysError("duping over STDOUT");
-        execlp("ssh", "ssh", "-S", socketPath.c_str(), uri.c_str(), "nix-daemon", "--stdio", NULL);
+        execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-daemon", "--stdio", NULL);
         throw SysError("executing nix-daemon --stdio over ssh");
     });
     in.readSide = -1;
@@ -126,8 +131,8 @@ static RegisterStoreImplementation regStore([](
     const std::string & uri, const Store::Params & params)
     -> std::shared_ptr<Store>
 {
-    if (std::string(uri, 0, 6) != "ssh://") return 0;
-    return std::make_shared<SSHStore>(uri.substr(6), params);
+    if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
+    return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
 });
 
 }
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index a42d11834053..b5934a0d1232 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -5,6 +5,7 @@
 #include "nar-info-disk-cache.hh"
 #include "thread-pool.hh"
 #include "json.hh"
+#include "derivations.hh"
 
 #include <future>
 
@@ -285,6 +286,19 @@ bool Store::isValidPath(const Path & storePath)
 }
 
 
+/* Default implementation for stores that only implement
+   queryPathInfoUncached(). */
+bool Store::isValidPathUncached(const Path & path)
+{
+    try {
+        queryPathInfo(path);
+        return true;
+    } catch (InvalidPath &) {
+        return false;
+    }
+}
+
+
 ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
 {
     std::promise<ref<ValidPathInfo>> promise;
@@ -516,6 +530,15 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
     StringSink sink;
     srcStore->narFromPath({storePath}, sink);
 
+    if (srcStore->isTrusted())
+        dontCheckSigs = true;
+
+    if (!info->narHash && dontCheckSigs) {
+        auto info2 = make_ref<ValidPathInfo>(*info);
+        info2->narHash = hashString(htSHA256, *sink.s);
+        info = info2;
+    }
+
     dstStore->addToStore(*info, sink.s, repair, dontCheckSigs);
 }
 
@@ -758,8 +781,27 @@ std::list<ref<Store>> getDefaultSubstituters()
 }
 
 
-void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths)
-{
+void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute)
+{
+    if (substitute) {
+        /* Filter out .drv files (we don't want to build anything). */
+        PathSet paths2;
+        for (auto & path : storePaths)
+            if (!isDerivation(path)) paths2.insert(path);
+        unsigned long long downloadSize, narSize;
+        PathSet willBuild, willSubstitute, unknown;
+        to->queryMissing(PathSet(paths2.begin(), paths2.end()),
+            willBuild, willSubstitute, unknown, downloadSize, narSize);
+        /* FIXME: should use ensurePath(), but it only
+           does one path at a time. */
+        if (!willSubstitute.empty())
+            try {
+                to->buildPaths(willSubstitute);
+            } catch (Error & e) {
+                printMsg(lvlError, format("warning: %1%") % e.msg());
+            }
+    }
+
     std::string copiedLabel = "copied";
 
     logger->setExpected(copiedLabel, storePaths.size());
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 3fee999072fa..d03e70849f93 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -320,7 +320,7 @@ public:
 
 protected:
 
-    virtual bool isValidPathUncached(const Path & path) = 0;
+    virtual bool isValidPathUncached(const Path & path);
 
 public:
 
@@ -360,7 +360,7 @@ public:
        output.  (Note that the result of `queryDeriver()' is the
        derivation that was actually used to produce `path', which may
        not exist anymore.) */
-    virtual PathSet queryValidDerivers(const Path & path) = 0;
+    virtual PathSet queryValidDerivers(const Path & path) { return {}; };
 
     /* Query the outputs of the derivation denoted by `path'. */
     virtual PathSet queryDerivationOutputs(const Path & path) = 0;
@@ -373,13 +373,13 @@ public:
     virtual Path queryPathFromHashPart(const string & hashPart) = 0;
 
     /* Query which of the given paths have substitutes. */
-    virtual PathSet querySubstitutablePaths(const PathSet & paths) = 0;
+    virtual PathSet querySubstitutablePaths(const PathSet & paths) { return {}; };
 
     /* Query substitute info (i.e. references, derivers and download
        sizes) of a set of paths.  If a path does not have substitute
        info, it's omitted from the resulting ‘infos’ map. */
     virtual void querySubstitutablePathInfos(const PathSet & paths,
-        SubstitutablePathInfos & infos) = 0;
+        SubstitutablePathInfos & infos) { return; };
 
     virtual bool wantMassQuery() { return false; }
 
@@ -454,7 +454,7 @@ public:
          permanent root and sees our's.
 
        In either case the permanent root is seen by the collector. */
-    virtual void syncWithGC() = 0;
+    virtual void syncWithGC() { };
 
     /* Find the roots of the garbage collector.  Each root is a pair
        (link, storepath) where `link' is the path of the symlink
@@ -485,11 +485,11 @@ public:
 
     /* Optimise the disk space usage of the Nix store by hard-linking files
        with the same contents. */
-    virtual void optimiseStore() = 0;
+    virtual void optimiseStore() { };
 
     /* Check the integrity of the Nix store.  Returns true if errors
        remain. */
-    virtual bool verifyStore(bool checkContents, bool repair) = 0;
+    virtual bool verifyStore(bool checkContents, bool repair) { return false; };
 
     /* Return an object to access files in the Nix store. */
     virtual ref<FSAccessor> getFSAccessor() = 0;
@@ -562,6 +562,10 @@ public:
 
     const Stats & getStats();
 
+    /* Whether this store paths from this store can be imported even
+       if they lack a signature. */
+    virtual bool isTrusted() { return false; }
+
 protected:
 
     Stats stats;
@@ -639,7 +643,7 @@ void removeTempRoots();
 ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE"));
 
 
-void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths);
+void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute = false);
 
 enum StoreType {
     tDaemon,
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index fbba7f853f95..e0e6f5dfd73c 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include <cerrno>
 #include <algorithm>
 #include <vector>
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index aa50fceb9e3e..f447c80c5d81 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include <iostream>
 #include <cstring>
 
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index 3e6c4b54853c..3f83664794f7 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -78,6 +78,7 @@ extern Verbosity verbosity; /* suppress msgs > this */
 
 #define printError(args...) printMsg(lvlError, args)
 #define printInfo(args...) printMsg(lvlInfo, args)
+#define printTalkative(args...) printMsg(lvlTalkative, args)
 #define debug(args...) printMsg(lvlDebug, args)
 #define vomit(args...) printMsg(lvlVomit, args)
 
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index f12f02543bc0..5646d08c1314 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -139,6 +139,21 @@ struct StringSource : Source
 };
 
 
+/* Adapter class of a Source that saves all data read to `s'. */
+struct SavingSourceAdapter : Source
+{
+    Source & orig;
+    string s;
+    SavingSourceAdapter(Source & orig) : orig(orig) { }
+    size_t read(unsigned char * data, size_t len)
+    {
+        size_t n = orig.read(data, len);
+        s.append((const char *) data, n);
+        return n;
+    }
+};
+
+
 void writePadding(size_t len, Sink & sink);
 void writeString(const unsigned char * buf, size_t len, Sink & sink);
 
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
index b9a93d27d2ad..97d79af9b5d6 100644
--- a/src/libutil/types.hh
+++ b/src/libutil/types.hh
@@ -1,6 +1,5 @@
 #pragma once
 
-#include "config.h"
 
 #include "ref.hh"
 
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 6c4c5c969d86..336599368009 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -1,5 +1,3 @@
-#include "config.h"
-
 #include "util.hh"
 #include "affinity.hh"
 #include "sync.hh"
diff --git a/src/nix-copy-closure/local.mk b/src/nix-copy-closure/local.mk
new file mode 100644
index 000000000000..42bb34dd8201
--- /dev/null
+++ b/src/nix-copy-closure/local.mk
@@ -0,0 +1,7 @@
+programs += nix-copy-closure
+
+nix-copy-closure_DIR := $(d)
+
+nix-copy-closure_LIBS = libmain libutil libformat libstore
+
+nix-copy-closure_SOURCES := $(d)/nix-copy-closure.cc
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
new file mode 100755
index 000000000000..4340443b5cc2
--- /dev/null
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -0,0 +1,59 @@
+#include "shared.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+int main(int argc, char ** argv)
+{
+    return handleExceptions(argv[0], [&]() {
+        initNix();
+
+        auto gzip = false;
+        auto toMode = true;
+        auto includeOutputs = false;
+        auto dryRun = false;
+        auto useSubstitutes = false;
+        std::string sshHost;
+        PathSet storePaths;
+
+        parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
+            if (*arg == "--help")
+                showManPage("nix-copy-closure");
+            else if (*arg == "--version")
+                printVersion("nix-copy-closure");
+            else if (*arg == "--gzip" || *arg == "--bzip2" || *arg == "--xz") {
+                if (*arg != "--gzip")
+                    printMsg(lvlError, format("Warning: ‘%1%’ is not implemented, falling back to gzip") % *arg);
+                gzip = true;
+            } else if (*arg == "--from")
+                toMode = false;
+            else if (*arg == "--to")
+                toMode = true;
+            else if (*arg == "--include-outputs")
+                includeOutputs = true;
+            else if (*arg == "--show-progress")
+                printMsg(lvlError, "Warning: ‘--show-progress’ is not implemented");
+            else if (*arg == "--dry-run")
+                dryRun = true;
+            else if (*arg == "--use-substitutes" || *arg == "-s")
+                useSubstitutes = true;
+            else if (sshHost.empty())
+                sshHost = *arg;
+            else
+                storePaths.insert(*arg);
+            return true;
+        });
+
+        if (sshHost.empty())
+            throw UsageError("no host name specified");
+
+        auto remoteUri = "legacy-ssh://" + sshHost + (gzip ? "?compress=true" : "");
+        auto to = toMode ? openStore(remoteUri) : openStore();
+        auto from = toMode ? openStore() : openStore(remoteUri);
+
+        PathSet closure;
+        from->computeFSClosure(storePaths, closure, false, includeOutputs);
+
+        copyPaths(from, to, Paths(closure.begin(), closure.end()), useSubstitutes);
+    });
+}
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 90a7301873c4..3b43ddfa16d5 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -23,6 +23,7 @@
 #include <pwd.h>
 #include <grp.h>
 #include <fcntl.h>
+#include <limits.h>
 
 #if __APPLE__ || __FreeBSD__
 #include <sys/ucred.h>
@@ -168,21 +169,6 @@ struct RetrieveRegularNARSink : ParseSink
 };
 
 
-/* Adapter class of a Source that saves all data read to `s'. */
-struct SavingSourceAdapter : Source
-{
-    Source & orig;
-    string s;
-    SavingSourceAdapter(Source & orig) : orig(orig) { }
-    size_t read(unsigned char * data, size_t len)
-    {
-        size_t n = orig.read(data, len);
-        s.append((const char *) data, n);
-        return n;
-    }
-};
-
-
 static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVersion,
     Source & from, Sink & to, unsigned int op)
 {
@@ -982,14 +968,14 @@ int main(int argc, char * * argv)
                     if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1)
                         throw SysError("waiting for data from client or server");
                     if (FD_ISSET(s, &fds)) {
-                        auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SIZE_MAX, SPLICE_F_MOVE);
+                        auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
                         if (res == -1)
                             throw SysError("splicing data from daemon socket to stdout");
                         else if (res == 0)
                             throw EndOfFile("unexpected EOF from daemon socket");
                     }
                     if (FD_ISSET(STDIN_FILENO, &fds)) {
-                        auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SIZE_MAX, SPLICE_F_MOVE);
+                        auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
                         if (res == -1)
                             throw SysError("splicing data from stdin to daemon socket");
                         else if (res == 0)
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index c1e6afef0e50..0aabe66c5626 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -922,9 +922,7 @@ static void opServe(Strings opFlags, Strings opArgs)
 
             case cmdExportPaths: {
                 readInt(in); // obsolete
-                Paths sorted = store->topoSortPaths(readStorePaths<PathSet>(*store, in));
-                reverse(sorted.begin(), sorted.end());
-                store->exportPaths(sorted, out);
+                store->exportPaths(readStorePaths<Paths>(*store, in), out);
                 break;
             }