From bc7e3a4dd62baa99dbd1985d329a2a806d59a422 Mon Sep 17 00:00:00 2001 From: AmineChikhaoui Date: Tue, 6 Feb 2018 22:42:02 +0100 Subject: support multi threaded xz encoder, this might be particularly useful in the case of hydra where the overhead of single threaded encoding is more noticeable e.g most of the time spent in "Sending inputs"/"Receiving outputs" is due to compression while the actual upload to the binary cache seems to be negligible. --- src/libutil/compression.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 5e2631ba3408..aad6e9b5bc1e 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -191,8 +191,13 @@ struct XzSink : CompressionSink XzSink(Sink & nextSink) : nextSink(nextSink) { - lzma_ret ret = lzma_easy_encoder( - &strm, 6, LZMA_CHECK_CRC64); + lzma_mt mt_options = {}; + mt_options.flags = 0; + mt_options.timeout = 300; + mt_options.check = LZMA_CHECK_CRC64; + mt_options.threads = lzma_cputhreads(); + lzma_ret ret = lzma_stream_encoder_mt( + &strm, &mt_options); if (ret != LZMA_OK) throw CompressionError("unable to initialise lzma encoder"); // FIXME: apply the x86 BCJ filter? -- cgit 1.4.1 From 9d1e22f743ea9ca232d39d498b675d7e5ac1ca87 Mon Sep 17 00:00:00 2001 From: AmineChikhaoui Date: Wed, 7 Feb 2018 11:18:55 +0100 Subject: set block size to 0 to let the lzma lib choose the right one, add some comments about possible improvements wrt memory usage/threading. --- src/libutil/compression.cc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index aad6e9b5bc1e..a36c4405f8bb 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -193,9 +193,14 @@ struct XzSink : CompressionSink { lzma_mt mt_options = {}; mt_options.flags = 0; - mt_options.timeout = 300; + mt_options.timeout = 300; // Using the same setting as the xz cmd line mt_options.check = LZMA_CHECK_CRC64; mt_options.threads = lzma_cputhreads(); + mt_options.block_size = 0; + if (mt_options.threads == 0) + mt_options.threads = 1; + // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the + // number of threads. lzma_ret ret = lzma_stream_encoder_mt( &strm, &mt_options); if (ret != LZMA_OK) -- cgit 1.4.1 From 55ecdfe2a83a161c27d6497733cdc60fa112a43d Mon Sep 17 00:00:00 2001 From: AmineChikhaoui Date: Wed, 7 Feb 2018 17:54:08 +0100 Subject: make multi threaded compression configurable and use single threaded by default. --- src/libstore/binary-cache-store.cc | 2 +- src/libstore/globals.hh | 3 +++ src/libutil/compression.cc | 42 +++++++++++++++++++++++--------------- src/libutil/compression.hh | 4 ++-- 4 files changed, 31 insertions(+), 20 deletions(-) (limited to 'src') diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index ab971dd8b6d9..d34adbd60c71 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -149,7 +149,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const refcompression = compression; auto now1 = std::chrono::steady_clock::now(); - auto narCompressed = compress(compression, *nar); + auto narCompressed = compress(compression, *nar, settings.parallelCompression); auto now2 = std::chrono::steady_clock::now(); narInfo->fileHash = hashString(htSHA256, *narCompressed); narInfo->fileSize = narCompressed->size(); diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 20ac8fe4e9ae..aafec2ea269b 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -174,6 +174,9 @@ public: "Whether to compress logs.", {"build-compress-log"}}; + Setting parallelCompression{this, false, "parallel-compression", + "Whether to enable parallel compression, only supported with xz currently"}; + Setting maxLogSize{this, 0, "max-build-log-size", "Maximum number of bytes a builder can write to stdout/stderr " "before being killed (0 means no limit).", diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index a36c4405f8bb..ed15761b32a2 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -151,10 +151,10 @@ static ref decompressBrotli(const std::string & in) #endif // HAVE_BROTLI } -ref compress(const std::string & method, const std::string & in) +ref compress(const std::string & method, const std::string & in, const bool parallel) { StringSink ssink; - auto sink = makeCompressionSink(method, ssink); + auto sink = makeCompressionSink(method, ssink, parallel); (*sink)(in); sink->finish(); return ssink.s; @@ -189,20 +189,28 @@ struct XzSink : CompressionSink lzma_stream strm = LZMA_STREAM_INIT; bool finished = false; - XzSink(Sink & nextSink) : nextSink(nextSink) + XzSink(Sink & nextSink, const bool parallel) : nextSink(nextSink) { - lzma_mt mt_options = {}; - mt_options.flags = 0; - mt_options.timeout = 300; // Using the same setting as the xz cmd line - mt_options.check = LZMA_CHECK_CRC64; - mt_options.threads = lzma_cputhreads(); - mt_options.block_size = 0; - if (mt_options.threads == 0) - mt_options.threads = 1; - // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the - // number of threads. - lzma_ret ret = lzma_stream_encoder_mt( - &strm, &mt_options); + lzma_ret ret; + if (parallel) { + lzma_mt mt_options = {}; + mt_options.flags = 0; + mt_options.timeout = 300; // Using the same setting as the xz cmd line + mt_options.preset = LZMA_PRESET_DEFAULT; + mt_options.filters = NULL; + mt_options.check = LZMA_CHECK_CRC64; + mt_options.threads = lzma_cputhreads(); + mt_options.block_size = 0; + if (mt_options.threads == 0) + mt_options.threads = 1; + // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the + // number of threads. + ret = lzma_stream_encoder_mt( + &strm, &mt_options); + } else + ret = lzma_easy_encoder( + &strm, 6, LZMA_CHECK_CRC64); + if (ret != LZMA_OK) throw CompressionError("unable to initialise lzma encoder"); // FIXME: apply the x86 BCJ filter? @@ -459,12 +467,12 @@ struct BrotliSink : CompressionSink }; #endif // HAVE_BROTLI -ref makeCompressionSink(const std::string & method, Sink & nextSink) +ref makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel) { if (method == "none") return make_ref(nextSink); else if (method == "xz") - return make_ref(nextSink); + return make_ref(nextSink, parallel); else if (method == "bzip2") return make_ref(nextSink); else if (method == "br") diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index e3e6f5a99303..a0d7530d74fc 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -8,7 +8,7 @@ namespace nix { -ref compress(const std::string & method, const std::string & in); +ref compress(const std::string & method, const std::string & in, const bool parallel = false); ref decompress(const std::string & method, const std::string & in); @@ -17,7 +17,7 @@ struct CompressionSink : BufferedSink virtual void finish() = 0; }; -ref makeCompressionSink(const std::string & method, Sink & nextSink); +ref makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false); MakeError(UnknownCompressionMethod, Error); -- cgit 1.4.1 From 47ad88099b1cd2b19bdf3eef35c21baf35cc7e82 Mon Sep 17 00:00:00 2001 From: AmineChikhaoui Date: Wed, 7 Feb 2018 21:06:11 +0100 Subject: move the parallel-compression setting to binary-cache-store, the setting can be done now from the url e.g s3://nix-cache?parallel-compression=1 instead of nix.conf. --- src/libstore/binary-cache-store.cc | 2 +- src/libstore/binary-cache-store.hh | 2 ++ src/libstore/globals.hh | 3 --- 3 files changed, 3 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index d34adbd60c71..d1b278b8efbe 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -149,7 +149,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const refcompression = compression; auto now1 = std::chrono::steady_clock::now(); - auto narCompressed = compress(compression, *nar, settings.parallelCompression); + auto narCompressed = compress(compression, *nar, parallelCompression); auto now2 = std::chrono::steady_clock::now(); narInfo->fileHash = hashString(htSHA256, *narCompressed); narInfo->fileSize = narCompressed->size(); diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index 8492ff600eba..e20b968442b7 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -19,6 +19,8 @@ public: const Setting writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"}; const Setting secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; const Setting localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"}; + const Setting parallelCompression{this, false, "parallel-compression", + "enable multi-threading compression, available for xz only currently"}; private: diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index aafec2ea269b..20ac8fe4e9ae 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -174,9 +174,6 @@ public: "Whether to compress logs.", {"build-compress-log"}}; - Setting parallelCompression{this, false, "parallel-compression", - "Whether to enable parallel compression, only supported with xz currently"}; - Setting maxLogSize{this, 0, "max-build-log-size", "Maximum number of bytes a builder can write to stdout/stderr " "before being killed (0 means no limit).", -- cgit 1.4.1