about summary refs log tree commit diff
path: root/src/libstore/http-binary-cache-store.cc
diff options
context:
space:
mode:
authorEelco Dolstra <eelco.dolstra@logicblox.com>2016-09-14T14·00+0200
committerEelco Dolstra <eelco.dolstra@logicblox.com>2016-09-14T14·36+0200
commit90ad02bf626b885a5dd8967894e2eafc953bdf92 (patch)
tree7af8764fe95e2093f9c99fc5827a2f9ffde8dd5d /src/libstore/http-binary-cache-store.cc
parenta75d11a7e6984b3df15da9677fbd49ee8de7a9c3 (diff)
Enable HTTP/2 support
The binary cache store can now use HTTP/2 to do lookups. This is much
more efficient than HTTP/1.1 due to multiplexing: we can issue many
requests in parallel over a single TCP connection. Thus it's no longer
necessary to use a bunch of concurrent TCP connections (25 by
default).

For example, downloading 802 .narinfo files from
https://cache.nixos.org/, using a single TCP connection, takes 11.8s
with HTTP/1.1, but only 0.61s with HTTP/2.

This did require a fairly substantial rewrite of the Downloader class
to use the curl multi interface, because otherwise curl wouldn't be
able to do multiplexing for us. As a bonus, we get connection reuse
even with HTTP/1.1. All downloads are now handled by a single worker
thread. Clients call Downloader::enqueueDownload() to tell the worker
thread to start the download, getting a std::future to the result.
Diffstat (limited to 'src/libstore/http-binary-cache-store.cc')
-rw-r--r--src/libstore/http-binary-cache-store.cc26
1 files changed, 9 insertions, 17 deletions
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index bdcd2fd3998b..91ee6fcb69e2 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -13,17 +13,12 @@ private:
 
     Path cacheUri;
 
-    Pool<Downloader> downloaders;
-
 public:
 
     HttpBinaryCacheStore(
         const Params & params, const Path & _cacheUri)
         : BinaryCacheStore(params)
         , cacheUri(_cacheUri)
-        , downloaders(
-            std::numeric_limits<size_t>::max(),
-            []() { return makeDownloader(); })
     {
         if (cacheUri.back() == '/')
             cacheUri.pop_back();
@@ -54,12 +49,11 @@ protected:
     bool fileExists(const std::string & path) override
     {
         try {
-            auto downloader(downloaders.get());
-            DownloadOptions options;
-            options.showProgress = DownloadOptions::no;
-            options.head = true;
-            options.tries = 5;
-            downloader->download(cacheUri + "/" + path, options);
+            DownloadRequest request(cacheUri + "/" + path);
+            request.showProgress = DownloadRequest::no;
+            request.head = true;
+            request.tries = 5;
+            getDownloader()->download(request);
             return true;
         } catch (DownloadError & e) {
             /* S3 buckets return 403 if a file doesn't exist and the
@@ -77,13 +71,11 @@ protected:
 
     std::shared_ptr<std::string> getFile(const std::string & path) override
     {
-        auto downloader(downloaders.get());
-        DownloadOptions options;
-        options.showProgress = DownloadOptions::no;
-        options.tries = 5;
-        options.baseRetryTimeMs = 1000;
+        DownloadRequest request(cacheUri + "/" + path);
+        request.showProgress = DownloadRequest::no;
+        request.tries = 8;
         try {
-            return downloader->download(cacheUri + "/" + path, options).data;
+            return getDownloader()->download(request).data;
         } catch (DownloadError & e) {
             if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
                 return 0;