From d127f9bd0e7b9b2e0df2de8a2227f77c0907468d Mon Sep 17 00:00:00 2001 From: Vincent Ambo Date: Wed, 18 May 2022 17:39:39 +0200 Subject: chore(3p/nix): unvendor tvix 0.1 Nothing is using this now, and we'll likely never pick this up again, but we learned a lot in the process. Every now and then this breaks in some bizarre way on channel bumps and it's just a waste of time to maintain that. Change-Id: Idcf2f5acd4ca7070ce18d7149cbfc0d967dc0a44 Reviewed-on: https://cl.tvl.fyi/c/depot/+/5632 Tested-by: BuildkiteCI Reviewed-by: sterni Reviewed-by: lukegb Autosubmit: tazjin --- third_party/nix/.clang-format | 11 - third_party/nix/.clang-tidy | 4 - third_party/nix/.dir-locals.el | 1 - third_party/nix/.github/ISSUE_TEMPLATE.md | 27 - third_party/nix/.gitignore | 119 - third_party/nix/.skip-subtree | 1 - third_party/nix/.travis.yml | 2 - third_party/nix/.version | 1 - third_party/nix/CMakeLists.txt | 77 - third_party/nix/COPYING | 504 - third_party/nix/OWNERS | 5 - third_party/nix/README.md | 179 - third_party/nix/clangd.nix | 30 - third_party/nix/config.h.in | 130 - third_party/nix/config/config.sub | 1818 -- third_party/nix/config/install-sh | 527 - third_party/nix/contrib/stack-collapse.py | 38 - third_party/nix/corepkgs/buildenv.nix | 27 - third_party/nix/corepkgs/config.nix.in | 29 - third_party/nix/corepkgs/derivation.nix | 30 - third_party/nix/corepkgs/fetchurl.nix | 46 - .../nix/corepkgs/imported-drv-to-derivation.nix | 24 - third_party/nix/corepkgs/unpack-channel.nix | 39 - third_party/nix/default.nix | 270 - .../doc/manual/advanced-topics/advanced-topics.xml | 14 - .../doc/manual/advanced-topics/cores-vs-jobs.xml | 121 - .../nix/doc/manual/advanced-topics/diff-hook.xml | 205 - .../manual/advanced-topics/distributed-builds.xml | 190 - .../doc/manual/advanced-topics/post-build-hook.xml | 160 - .../nix/doc/manual/command-ref/command-ref.xml | 20 - .../nix/doc/manual/command-ref/conf-file.xml | 1202 -- .../nix/doc/manual/command-ref/env-common.xml | 202 - third_party/nix/doc/manual/command-ref/files.xml | 14 - .../nix/doc/manual/command-ref/main-commands.xml | 17 - .../nix/doc/manual/command-ref/nix-build.xml | 190 - .../nix/doc/manual/command-ref/nix-channel.xml | 178 - .../doc/manual/command-ref/nix-collect-garbage.xml | 63 - .../doc/manual/command-ref/nix-copy-closure.xml | 169 - .../nix/doc/manual/command-ref/nix-daemon.xml | 51 - third_party/nix/doc/manual/command-ref/nix-env.xml | 1505 -- .../nix/doc/manual/command-ref/nix-hash.xml | 176 - .../nix/doc/manual/command-ref/nix-instantiate.xml | 278 - .../doc/manual/command-ref/nix-prefetch-url.xml | 131 - .../nix/doc/manual/command-ref/nix-shell.xml | 397 - .../nix/doc/manual/command-ref/nix-store.xml | 1525 -- .../nix/doc/manual/command-ref/opt-common-syn.xml | 64 - .../nix/doc/manual/command-ref/opt-common.xml | 366 - .../nix/doc/manual/command-ref/opt-inst-syn.xml | 22 - .../nix/doc/manual/command-ref/utilities.xml | 20 - .../doc/manual/expressions/advanced-attributes.xml | 340 - .../doc/manual/expressions/arguments-variables.xml | 121 - .../nix/doc/manual/expressions/build-script.xml | 119 - .../nix/doc/manual/expressions/builder-syntax.xml | 119 - .../nix/doc/manual/expressions/builtins.xml | 1658 -- .../nix/doc/manual/expressions/derivations.xml | 211 - .../doc/manual/expressions/expression-language.xml | 30 - .../doc/manual/expressions/expression-syntax.xml | 148 - .../nix/doc/manual/expressions/generic-builder.xml | 98 - .../doc/manual/expressions/language-constructs.xml | 409 - .../doc/manual/expressions/language-operators.xml | 222 - .../nix/doc/manual/expressions/language-values.xml | 313 - .../manual/expressions/simple-building-testing.xml | 84 - .../doc/manual/expressions/simple-expression.xml | 47 - .../manual/expressions/writing-nix-expressions.xml | 26 - .../nix/doc/manual/figures/user-environments.png | Bin 85031 -> 0 bytes .../nix/doc/manual/figures/user-environments.sxd | Bin 8412 -> 0 bytes third_party/nix/doc/manual/glossary/glossary.xml | 199 - third_party/nix/doc/manual/hacking.xml | 41 - third_party/nix/doc/manual/images/callouts/1.gif | Bin 889 -> 0 bytes third_party/nix/doc/manual/images/callouts/10.gif | Bin 929 -> 0 bytes third_party/nix/doc/manual/images/callouts/11.gif | Bin 202 -> 0 bytes third_party/nix/doc/manual/images/callouts/12.gif | Bin 210 -> 0 bytes third_party/nix/doc/manual/images/callouts/13.gif | Bin 209 -> 0 bytes third_party/nix/doc/manual/images/callouts/14.gif | Bin 205 -> 0 bytes third_party/nix/doc/manual/images/callouts/15.gif | Bin 210 -> 0 bytes third_party/nix/doc/manual/images/callouts/2.gif | Bin 907 -> 0 bytes third_party/nix/doc/manual/images/callouts/3.gif | Bin 914 -> 0 bytes third_party/nix/doc/manual/images/callouts/4.gif | Bin 907 -> 0 bytes third_party/nix/doc/manual/images/callouts/5.gif | Bin 916 -> 0 bytes third_party/nix/doc/manual/images/callouts/6.gif | Bin 218 -> 0 bytes third_party/nix/doc/manual/images/callouts/7.gif | Bin 907 -> 0 bytes third_party/nix/doc/manual/images/callouts/8.gif | Bin 918 -> 0 bytes third_party/nix/doc/manual/images/callouts/9.gif | Bin 923 -> 0 bytes .../doc/manual/installation/building-source.xml | 49 - .../nix/doc/manual/installation/env-variables.xml | 89 - .../nix/doc/manual/installation/installation.xml | 34 - .../doc/manual/installation/installing-binary.xml | 190 - .../doc/manual/installation/installing-source.xml | 16 - .../nix/doc/manual/installation/multi-user.xml | 107 - .../nix/doc/manual/installation/nix-security.xml | 27 - .../doc/manual/installation/obtaining-source.xml | 30 - .../manual/installation/prerequisites-source.xml | 105 - .../nix/doc/manual/installation/single-user.xml | 21 - .../manual/installation/supported-platforms.xml | 36 - .../nix/doc/manual/installation/upgrading.xml | 22 - .../nix/doc/manual/introduction/about-nix.xml | 268 - .../nix/doc/manual/introduction/introduction.xml | 12 - .../nix/doc/manual/introduction/quick-start.xml | 124 - third_party/nix/doc/manual/manual.xml | 52 - third_party/nix/doc/manual/nix-lang-ref.xml | 182 - .../nix/doc/manual/packages/basic-package-mgmt.xml | 194 - .../manual/packages/binary-cache-substituter.xml | 70 - third_party/nix/doc/manual/packages/channels.xml | 57 - .../nix/doc/manual/packages/copy-closure.xml | 50 - .../nix/doc/manual/packages/garbage-collection.xml | 86 - .../manual/packages/garbage-collector-roots.xml | 29 - .../nix/doc/manual/packages/package-management.xml | 23 - third_party/nix/doc/manual/packages/profiles.xml | 158 - .../nix/doc/manual/packages/s3-substituter.xml | 182 - .../nix/doc/manual/packages/sharing-packages.xml | 20 - .../nix/doc/manual/packages/ssh-substituter.xml | 73 - third_party/nix/doc/manual/quote-literals.xsl | 40 - .../nix/doc/manual/release-notes/release-notes.xml | 51 - .../nix/doc/manual/release-notes/rl-0.10.1.xml | 13 - .../nix/doc/manual/release-notes/rl-0.10.xml | 323 - .../nix/doc/manual/release-notes/rl-0.11.xml | 261 - .../nix/doc/manual/release-notes/rl-0.12.xml | 175 - .../nix/doc/manual/release-notes/rl-0.13.xml | 106 - .../nix/doc/manual/release-notes/rl-0.14.xml | 46 - .../nix/doc/manual/release-notes/rl-0.15.xml | 14 - .../nix/doc/manual/release-notes/rl-0.16.xml | 55 - .../nix/doc/manual/release-notes/rl-0.5.xml | 11 - .../nix/doc/manual/release-notes/rl-0.6.xml | 122 - .../nix/doc/manual/release-notes/rl-0.7.xml | 35 - .../nix/doc/manual/release-notes/rl-0.8.1.xml | 21 - .../nix/doc/manual/release-notes/rl-0.8.xml | 246 - .../nix/doc/manual/release-notes/rl-0.9.1.xml | 13 - .../nix/doc/manual/release-notes/rl-0.9.2.xml | 28 - .../nix/doc/manual/release-notes/rl-0.9.xml | 98 - .../nix/doc/manual/release-notes/rl-1.0.xml | 119 - .../nix/doc/manual/release-notes/rl-1.1.xml | 100 - .../nix/doc/manual/release-notes/rl-1.10.xml | 64 - .../nix/doc/manual/release-notes/rl-1.11.10.xml | 31 - .../nix/doc/manual/release-notes/rl-1.11.xml | 141 - .../nix/doc/manual/release-notes/rl-1.2.xml | 157 - .../nix/doc/manual/release-notes/rl-1.3.xml | 19 - .../nix/doc/manual/release-notes/rl-1.4.xml | 39 - .../nix/doc/manual/release-notes/rl-1.5.1.xml | 12 - .../nix/doc/manual/release-notes/rl-1.5.2.xml | 12 - .../nix/doc/manual/release-notes/rl-1.5.xml | 12 - .../nix/doc/manual/release-notes/rl-1.6.1.xml | 69 - .../nix/doc/manual/release-notes/rl-1.6.xml | 127 - .../nix/doc/manual/release-notes/rl-1.7.xml | 263 - .../nix/doc/manual/release-notes/rl-1.8.xml | 123 - .../nix/doc/manual/release-notes/rl-1.9.xml | 216 - .../nix/doc/manual/release-notes/rl-2.0.xml | 1012 - .../nix/doc/manual/release-notes/rl-2.1.xml | 133 - .../nix/doc/manual/release-notes/rl-2.2.xml | 143 - .../nix/doc/manual/release-notes/rl-2.3.xml | 91 - third_party/nix/doc/manual/schemas.xml | 4 - third_party/nix/misc/systemd/nix-daemon.service.in | 12 - third_party/nix/misc/systemd/nix-daemon.socket.in | 11 - third_party/nix/scripts/build.sh | 24 - third_party/nix/scripts/daemon.sh | 24 - third_party/nix/scripts/eval.sh | 23 - .../nix/scripts/install-darwin-multi-user.sh | 144 - third_party/nix/scripts/install-multi-user.sh | 798 - .../nix/scripts/install-nix-from-closure.sh | 180 - .../nix/scripts/install-systemd-multi-user.sh | 188 - third_party/nix/scripts/install.in | 66 - third_party/nix/scripts/nix-http-export.cgi.in | 51 - third_party/nix/scripts/nix-profile-daemon.sh.in | 29 - third_party/nix/scripts/nix-profile.sh.in | 39 - third_party/nix/scripts/nix-reduce-build.in | 171 - third_party/nix/scripts/repl.sh | 23 - third_party/nix/scripts/setup_store.sh | 11 - third_party/nix/src/CMakeLists.txt | 85 - third_party/nix/src/build-remote/build-remote.cc | 274 - third_party/nix/src/cpptoml/LICENSE | 18 - third_party/nix/src/cpptoml/cpptoml.h | 3668 ---- third_party/nix/src/libexpr/CMakeLists.txt | 85 - third_party/nix/src/libexpr/attr-path.cc | 109 - third_party/nix/src/libexpr/attr-path.hh | 13 - third_party/nix/src/libexpr/attr-set.cc | 111 - third_party/nix/src/libexpr/attr-set.hh | 69 - third_party/nix/src/libexpr/common-eval-args.cc | 72 - third_party/nix/src/libexpr/common-eval-args.hh | 26 - third_party/nix/src/libexpr/eval-inline.hh | 90 - third_party/nix/src/libexpr/eval.cc | 1878 -- third_party/nix/src/libexpr/eval.hh | 365 - third_party/nix/src/libexpr/function-trace.cc | 19 - third_party/nix/src/libexpr/function-trace.hh | 14 - third_party/nix/src/libexpr/get-drvs.cc | 446 - third_party/nix/src/libexpr/get-drvs.hh | 83 - third_party/nix/src/libexpr/json-to-value.cc | 152 - third_party/nix/src/libexpr/json-to-value.hh | 13 - third_party/nix/src/libexpr/lexer.l | 193 - third_party/nix/src/libexpr/names.cc | 121 - third_party/nix/src/libexpr/names.hh | 31 - third_party/nix/src/libexpr/nix-expr.pc.in | 10 - third_party/nix/src/libexpr/nixexpr.cc | 414 - third_party/nix/src/libexpr/nixexpr.hh | 361 - third_party/nix/src/libexpr/parser.cc | 332 - third_party/nix/src/libexpr/parser.hh | 100 - third_party/nix/src/libexpr/parser.y | 359 - third_party/nix/src/libexpr/primops.cc | 2335 --- third_party/nix/src/libexpr/primops.hh | 17 - third_party/nix/src/libexpr/primops/context.cc | 202 - third_party/nix/src/libexpr/primops/fetchGit.cc | 277 - .../nix/src/libexpr/primops/fetchMercurial.cc | 246 - third_party/nix/src/libexpr/primops/fromTOML.cc | 94 - third_party/nix/src/libexpr/symbol-table.cc | 24 - third_party/nix/src/libexpr/symbol-table.hh | 69 - third_party/nix/src/libexpr/value-to-json.cc | 91 - third_party/nix/src/libexpr/value-to-json.hh | 19 - third_party/nix/src/libexpr/value-to-xml.cc | 184 - third_party/nix/src/libexpr/value-to-xml.hh | 14 - third_party/nix/src/libexpr/value.cc | 121 - third_party/nix/src/libexpr/value.hh | 191 - third_party/nix/src/libmain/CMakeLists.txt | 33 - third_party/nix/src/libmain/common-args.cc | 56 - third_party/nix/src/libmain/common-args.hh | 27 - third_party/nix/src/libmain/nix-main.pc.in | 9 - third_party/nix/src/libmain/shared.cc | 386 - third_party/nix/src/libmain/shared.hh | 134 - third_party/nix/src/libmain/stack.cc | 75 - third_party/nix/src/libstore/CMakeLists.txt | 127 - third_party/nix/src/libstore/binary-cache-store.cc | 396 - third_party/nix/src/libstore/binary-cache-store.hh | 115 - third_party/nix/src/libstore/build.cc | 4820 ----- third_party/nix/src/libstore/builtins.hh | 11 - third_party/nix/src/libstore/builtins/buildenv.cc | 240 - third_party/nix/src/libstore/builtins/fetchurl.cc | 93 - third_party/nix/src/libstore/crypto.cc | 138 - third_party/nix/src/libstore/crypto.hh | 49 - third_party/nix/src/libstore/derivations.cc | 520 - third_party/nix/src/libstore/derivations.hh | 130 - third_party/nix/src/libstore/download.cc | 1024 - third_party/nix/src/libstore/download.hh | 133 - third_party/nix/src/libstore/export-import.cc | 111 - third_party/nix/src/libstore/fs-accessor.hh | 31 - third_party/nix/src/libstore/gc.cc | 997 - third_party/nix/src/libstore/globals.cc | 178 - third_party/nix/src/libstore/globals.hh | 464 - .../nix/src/libstore/http-binary-cache-store.cc | 171 - third_party/nix/src/libstore/legacy-ssh-store.cc | 282 - .../nix/src/libstore/local-binary-cache-store.cc | 93 - third_party/nix/src/libstore/local-fs-store.cc | 123 - third_party/nix/src/libstore/local-store.cc | 1519 -- third_party/nix/src/libstore/local-store.hh | 319 - third_party/nix/src/libstore/machines.cc | 114 - third_party/nix/src/libstore/machines.hh | 36 - third_party/nix/src/libstore/misc.cc | 331 - .../nix/src/libstore/mock-binary-cache-store.cc | 91 - .../nix/src/libstore/mock-binary-cache-store.hh | 59 - third_party/nix/src/libstore/nar-accessor.cc | 268 - third_party/nix/src/libstore/nar-accessor.hh | 29 - .../nix/src/libstore/nar-info-disk-cache.cc | 295 - .../nix/src/libstore/nar-info-disk-cache.hh | 30 - third_party/nix/src/libstore/nar-info.cc | 142 - third_party/nix/src/libstore/nar-info.hh | 23 - third_party/nix/src/libstore/nix-store.pc.in | 9 - third_party/nix/src/libstore/optimise-store.cc | 296 - third_party/nix/src/libstore/parsed-derivations.cc | 128 - third_party/nix/src/libstore/parsed-derivations.hh | 34 - third_party/nix/src/libstore/pathlocks.cc | 172 - third_party/nix/src/libstore/pathlocks.hh | 35 - third_party/nix/src/libstore/profiles.cc | 252 - third_party/nix/src/libstore/profiles.hh | 61 - third_party/nix/src/libstore/references.cc | 126 - third_party/nix/src/libstore/references.hh | 11 - third_party/nix/src/libstore/remote-fs-accessor.cc | 133 - third_party/nix/src/libstore/remote-fs-accessor.hh | 38 - third_party/nix/src/libstore/remote-store.cc | 686 - third_party/nix/src/libstore/remote-store.hh | 141 - third_party/nix/src/libstore/rpc-store.cc | 549 - third_party/nix/src/libstore/rpc-store.hh | 129 - .../nix/src/libstore/s3-binary-cache-store.cc | 431 - .../nix/src/libstore/s3-binary-cache-store.hh | 27 - third_party/nix/src/libstore/s3.hh | 42 - third_party/nix/src/libstore/sandbox-defaults.sb | 87 - third_party/nix/src/libstore/sandbox-minimal.sb | 5 - third_party/nix/src/libstore/sandbox-network.sb | 16 - third_party/nix/src/libstore/schema.sql | 42 - third_party/nix/src/libstore/serve-protocol.hh | 24 - third_party/nix/src/libstore/sqlite.cc | 195 - third_party/nix/src/libstore/sqlite.hh | 109 - third_party/nix/src/libstore/ssh-store.cc | 89 - third_party/nix/src/libstore/ssh.cc | 160 - third_party/nix/src/libstore/ssh.hh | 41 - third_party/nix/src/libstore/store-api.cc | 1167 -- third_party/nix/src/libstore/store-api.hh | 816 - third_party/nix/src/libstore/worker-protocol.hh | 68 - third_party/nix/src/libutil/CMakeLists.txt | 68 - third_party/nix/src/libutil/affinity.cc | 60 - third_party/nix/src/libutil/affinity.hh | 9 - third_party/nix/src/libutil/archive.cc | 398 - third_party/nix/src/libutil/archive.hh | 77 - third_party/nix/src/libutil/args.cc | 219 - third_party/nix/src/libutil/args.hh | 221 - third_party/nix/src/libutil/compression.cc | 400 - third_party/nix/src/libutil/compression.hh | 31 - third_party/nix/src/libutil/config.cc | 370 - third_party/nix/src/libutil/config.hh | 228 - third_party/nix/src/libutil/finally.hh | 13 - third_party/nix/src/libutil/hash.cc | 484 - third_party/nix/src/libutil/hash.hh | 147 - .../nix/src/libutil/istringstream_nocopy.hh | 85 - third_party/nix/src/libutil/json.cc | 198 - third_party/nix/src/libutil/json.hh | 144 - third_party/nix/src/libutil/lazy.hh | 45 - third_party/nix/src/libutil/lru-cache.hh | 90 - third_party/nix/src/libutil/monitor-fd.hh | 57 - third_party/nix/src/libutil/pool.hh | 176 - third_party/nix/src/libutil/proto.hh | 174 - third_party/nix/src/libutil/ref.hh | 65 - third_party/nix/src/libutil/serialise.cc | 311 - third_party/nix/src/libutil/serialise.hh | 289 - third_party/nix/src/libutil/status.hh | 17 - third_party/nix/src/libutil/sync.hh | 84 - third_party/nix/src/libutil/thread-pool.cc | 163 - third_party/nix/src/libutil/thread-pool.hh | 140 - third_party/nix/src/libutil/types.hh | 118 - third_party/nix/src/libutil/util.cc | 1426 -- third_party/nix/src/libutil/util.hh | 476 - third_party/nix/src/libutil/visitor.hh | 19 - third_party/nix/src/libutil/xml-writer.cc | 93 - third_party/nix/src/libutil/xml-writer.hh | 52 - third_party/nix/src/nix-build/nix-build.cc | 581 - third_party/nix/src/nix-channel/nix-channel.cc | 275 - .../src/nix-collect-garbage/nix-collect-garbage.cc | 103 - .../nix/src/nix-copy-closure/nix-copy-closure.cc | 73 - third_party/nix/src/nix-daemon/CMakeLists.txt | 29 - .../nix/src/nix-daemon/nix-daemon-legacy.cc | 1185 -- third_party/nix/src/nix-daemon/nix-daemon-proto.cc | 799 - third_party/nix/src/nix-daemon/nix-daemon-proto.hh | 12 - third_party/nix/src/nix-daemon/nix-daemon.cc | 201 - third_party/nix/src/nix-env/nix-env.cc | 1543 -- third_party/nix/src/nix-env/user-env.cc | 169 - third_party/nix/src/nix-env/user-env.hh | 12 - .../nix/src/nix-instantiate/nix-instantiate.cc | 219 - .../nix/src/nix-prefetch-url/nix-prefetch-url.cc | 253 - third_party/nix/src/nix-store/dotgraph.cc | 141 - third_party/nix/src/nix-store/dotgraph.hh | 11 - third_party/nix/src/nix-store/graphml.cc | 80 - third_party/nix/src/nix-store/graphml.hh | 11 - third_party/nix/src/nix-store/nix-store.cc | 1302 -- third_party/nix/src/nix/add-to-store.cc | 51 - third_party/nix/src/nix/build.cc | 68 - third_party/nix/src/nix/cat.cc | 56 - third_party/nix/src/nix/command.cc | 156 - third_party/nix/src/nix/command.hh | 194 - third_party/nix/src/nix/copy.cc | 86 - third_party/nix/src/nix/doctor.cc | 142 - third_party/nix/src/nix/dump-path.cc | 28 - third_party/nix/src/nix/edit.cc | 75 - third_party/nix/src/nix/eval.cc | 56 - third_party/nix/src/nix/hash.cc | 152 - third_party/nix/src/nix/installables.cc | 349 - third_party/nix/src/nix/legacy.cc | 7 - third_party/nix/src/nix/legacy.hh | 23 - third_party/nix/src/nix/log.cc | 63 - third_party/nix/src/nix/ls.cc | 137 - third_party/nix/src/nix/main.cc | 185 - third_party/nix/src/nix/optimise-store.cc | 27 - third_party/nix/src/nix/path-info.cc | 133 - third_party/nix/src/nix/ping-store.cc | 25 - third_party/nix/src/nix/repl.cc | 819 - third_party/nix/src/nix/run.cc | 283 - third_party/nix/src/nix/search.cc | 276 - third_party/nix/src/nix/show-config.cc | 31 - third_party/nix/src/nix/show-derivation.cc | 113 - third_party/nix/src/nix/sigs.cc | 146 - third_party/nix/src/nix/upgrade-nix.cc | 167 - third_party/nix/src/nix/verify.cc | 171 - third_party/nix/src/nix/why-depends.cc | 269 - third_party/nix/src/nlohmann/json.hpp | 20406 ------------------- third_party/nix/src/proto/CMakeLists.txt | 37 - third_party/nix/src/proto/worker.proto | 374 - third_party/nix/src/tests/CMakeLists.txt | 78 - third_party/nix/src/tests/arbitrary.hh | 176 - third_party/nix/src/tests/attr-set.cc | 71 - third_party/nix/src/tests/derivations_test.cc | 109 - third_party/nix/src/tests/dummy-store.hh | 48 - third_party/nix/src/tests/hash_test.cc | 101 - third_party/nix/src/tests/lang/binary-data | Bin 1024 -> 0 bytes third_party/nix/src/tests/lang/data | 1 - third_party/nix/src/tests/lang/dir1/a.nix | 1 - third_party/nix/src/tests/lang/dir2/a.nix | 1 - third_party/nix/src/tests/lang/dir2/b.nix | 1 - third_party/nix/src/tests/lang/dir3/a.nix | 1 - third_party/nix/src/tests/lang/dir3/b.nix | 1 - third_party/nix/src/tests/lang/dir3/c.nix | 1 - third_party/nix/src/tests/lang/dir4/a.nix | 1 - third_party/nix/src/tests/lang/dir4/c.nix | 1 - third_party/nix/src/tests/lang/disabled/README.txt | 7 - .../nix/src/tests/lang/disabled/eval-okay-path.nix | 7 - .../tests/lang/disabled/eval-okay-search-path.exp | 1 - .../lang/disabled/eval-okay-search-path.flags | 1 - .../tests/lang/disabled/eval-okay-search-path.nix | 11 - .../tests/lang/disabled/eval-okay-tail-call-1.nix | 3 - .../nix/src/tests/lang/disabled/eval-okay-xml.exp | 52 - .../nix/src/tests/lang/disabled/eval-okay-xml.nix | 21 - third_party/nix/src/tests/lang/eval-fail-abort.nix | 1 - .../src/tests/lang/eval-fail-antiquoted-path.nix | 4 - .../nix/src/tests/lang/eval-fail-assert.nix | 5 - .../src/tests/lang/eval-fail-bad-antiquote-1.nix | 1 - .../src/tests/lang/eval-fail-bad-antiquote-2.nix | 1 - .../src/tests/lang/eval-fail-bad-antiquote-3.nix | 1 - .../nix/src/tests/lang/eval-fail-blackhole.nix | 5 - .../nix/src/tests/lang/eval-fail-deepseq.nix | 1 - .../src/tests/lang/eval-fail-hashfile-missing.nix | 5 - .../nix/src/tests/lang/eval-fail-missing-arg.nix | 1 - .../nix/src/tests/lang/eval-fail-remove.nix | 5 - .../nix/src/tests/lang/eval-fail-scope-5.nix | 10 - third_party/nix/src/tests/lang/eval-fail-seq.nix | 1 - .../nix/src/tests/lang/eval-fail-substring.nix | 1 - .../nix/src/tests/lang/eval-fail-to-path.nix | 1 - .../src/tests/lang/eval-fail-undeclared-arg.nix | 1 - .../nix/src/tests/lang/eval-okay-any-all.exp | 1 - .../nix/src/tests/lang/eval-okay-any-all.nix | 11 - .../nix/src/tests/lang/eval-okay-arithmetic.exp | 1 - .../nix/src/tests/lang/eval-okay-arithmetic.nix | 59 - .../nix/src/tests/lang/eval-okay-attrnames.exp | 1 - .../nix/src/tests/lang/eval-okay-attrnames.nix | 11 - third_party/nix/src/tests/lang/eval-okay-attrs.exp | 1 - third_party/nix/src/tests/lang/eval-okay-attrs.nix | 5 - .../nix/src/tests/lang/eval-okay-attrs2.exp | 1 - .../nix/src/tests/lang/eval-okay-attrs2.nix | 10 - .../nix/src/tests/lang/eval-okay-attrs3.exp | 1 - .../nix/src/tests/lang/eval-okay-attrs3.nix | 22 - .../nix/src/tests/lang/eval-okay-attrs4.exp | 1 - .../nix/src/tests/lang/eval-okay-attrs4.nix | 7 - .../nix/src/tests/lang/eval-okay-attrs5.exp | 1 - .../nix/src/tests/lang/eval-okay-attrs5.nix | 21 - .../nix/src/tests/lang/eval-okay-autoargs.flags | 1 - .../tests/lang/eval-okay-backslash-newline-1.exp | 1 - .../tests/lang/eval-okay-backslash-newline-1.nix | 2 - .../tests/lang/eval-okay-backslash-newline-2.exp | 1 - .../tests/lang/eval-okay-backslash-newline-2.nix | 2 - .../nix/src/tests/lang/eval-okay-builtins-add.exp | 1 - .../nix/src/tests/lang/eval-okay-builtins-add.nix | 8 - .../nix/src/tests/lang/eval-okay-builtins.exp | 1 - .../nix/src/tests/lang/eval-okay-builtins.nix | 12 - .../src/tests/lang/eval-okay-callable-attrs.exp | 1 - .../src/tests/lang/eval-okay-callable-attrs.nix | 1 - .../nix/src/tests/lang/eval-okay-catattrs.exp | 1 - .../nix/src/tests/lang/eval-okay-catattrs.nix | 1 - .../nix/src/tests/lang/eval-okay-closure.exp | 1 - .../nix/src/tests/lang/eval-okay-closure.nix | 13 - .../nix/src/tests/lang/eval-okay-comments.exp | 1 - .../nix/src/tests/lang/eval-okay-comments.nix | 59 - .../nix/src/tests/lang/eval-okay-concat.exp | 1 - .../nix/src/tests/lang/eval-okay-concat.nix | 1 - .../nix/src/tests/lang/eval-okay-concatmap.exp | 1 - .../nix/src/tests/lang/eval-okay-concatmap.nix | 5 - .../src/tests/lang/eval-okay-concatstringssep.exp | 1 - .../src/tests/lang/eval-okay-concatstringssep.nix | 8 - .../nix/src/tests/lang/eval-okay-curpos.exp | 1 - .../nix/src/tests/lang/eval-okay-curpos.nix | 5 - .../nix/src/tests/lang/eval-okay-deepseq.exp | 1 - .../nix/src/tests/lang/eval-okay-deepseq.nix | 1 - .../tests/lang/eval-okay-delayed-with-inherit.exp | 1 - .../tests/lang/eval-okay-delayed-with-inherit.nix | 24 - .../nix/src/tests/lang/eval-okay-delayed-with.exp | 1 - .../nix/src/tests/lang/eval-okay-delayed-with.nix | 29 - .../src/tests/lang/eval-okay-dynamic-attrs-2.exp | 1 - .../src/tests/lang/eval-okay-dynamic-attrs-2.nix | 1 - .../tests/lang/eval-okay-dynamic-attrs-bare.exp | 1 - .../tests/lang/eval-okay-dynamic-attrs-bare.nix | 17 - .../nix/src/tests/lang/eval-okay-dynamic-attrs.exp | 1 - .../nix/src/tests/lang/eval-okay-dynamic-attrs.nix | 17 - third_party/nix/src/tests/lang/eval-okay-elem.exp | 1 - third_party/nix/src/tests/lang/eval-okay-elem.nix | 6 - .../nix/src/tests/lang/eval-okay-empty-args.exp | 1 - .../nix/src/tests/lang/eval-okay-empty-args.nix | 1 - .../src/tests/lang/eval-okay-eq-derivations.exp | 1 - .../src/tests/lang/eval-okay-eq-derivations.nix | 10 - third_party/nix/src/tests/lang/eval-okay-eq.exp | 1 - third_party/nix/src/tests/lang/eval-okay-eq.nix | 3 - .../nix/src/tests/lang/eval-okay-filter.exp | 1 - .../nix/src/tests/lang/eval-okay-filter.nix | 5 - .../nix/src/tests/lang/eval-okay-flatten.exp | 1 - .../nix/src/tests/lang/eval-okay-flatten.nix | 8 - third_party/nix/src/tests/lang/eval-okay-float.exp | 1 - third_party/nix/src/tests/lang/eval-okay-float.nix | 6 - .../nix/src/tests/lang/eval-okay-fromTOML.exp | 1 - .../nix/src/tests/lang/eval-okay-fromTOML.nix | 208 - .../nix/src/tests/lang/eval-okay-fromjson.exp | 1 - .../nix/src/tests/lang/eval-okay-fromjson.nix | 36 - .../nix/src/tests/lang/eval-okay-functionargs.exp | 1 - .../nix/src/tests/lang/eval-okay-functionargs.nix | 89 - .../tests/lang/eval-okay-getattrpos-undefined.exp | 1 - .../tests/lang/eval-okay-getattrpos-undefined.nix | 1 - .../nix/src/tests/lang/eval-okay-getattrpos.exp | 1 - .../nix/src/tests/lang/eval-okay-getattrpos.nix | 6 - .../nix/src/tests/lang/eval-okay-getenv.exp | 1 - .../nix/src/tests/lang/eval-okay-getenv.nix | 1 - third_party/nix/src/tests/lang/eval-okay-hash.exp | 0 .../nix/src/tests/lang/eval-okay-hashfile.exp | 1 - .../nix/src/tests/lang/eval-okay-hashfile.nix | 4 - .../nix/src/tests/lang/eval-okay-hashstring.exp | 1 - .../nix/src/tests/lang/eval-okay-hashstring.nix | 4 - third_party/nix/src/tests/lang/eval-okay-if.exp | 1 - third_party/nix/src/tests/lang/eval-okay-if.nix | 1 - .../nix/src/tests/lang/eval-okay-import.exp | 1 - .../nix/src/tests/lang/eval-okay-import.nix | 11 - .../nix/src/tests/lang/eval-okay-ind-string.exp | 1 - .../nix/src/tests/lang/eval-okay-ind-string.nix | 128 - third_party/nix/src/tests/lang/eval-okay-let.exp | 1 - third_party/nix/src/tests/lang/eval-okay-let.nix | 5 - third_party/nix/src/tests/lang/eval-okay-list.exp | 1 - third_party/nix/src/tests/lang/eval-okay-list.nix | 7 - .../nix/src/tests/lang/eval-okay-listtoattrs.exp | 1 - .../nix/src/tests/lang/eval-okay-listtoattrs.nix | 11 - third_party/nix/src/tests/lang/eval-okay-logic.exp | 1 - third_party/nix/src/tests/lang/eval-okay-logic.nix | 1 - third_party/nix/src/tests/lang/eval-okay-map.exp | 1 - third_party/nix/src/tests/lang/eval-okay-map.nix | 3 - .../nix/src/tests/lang/eval-okay-mapattrs.exp | 1 - .../nix/src/tests/lang/eval-okay-mapattrs.nix | 3 - .../nix/src/tests/lang/eval-okay-nested-with.exp | 1 - .../nix/src/tests/lang/eval-okay-nested-with.nix | 3 - .../nix/src/tests/lang/eval-okay-new-let.exp | 1 - .../nix/src/tests/lang/eval-okay-new-let.nix | 14 - .../tests/lang/eval-okay-null-dynamic-attrs.exp | 1 - .../tests/lang/eval-okay-null-dynamic-attrs.nix | 1 - .../nix/src/tests/lang/eval-okay-partition.exp | 1 - .../nix/src/tests/lang/eval-okay-partition.nix | 5 - .../nix/src/tests/lang/eval-okay-pathexists.exp | 1 - .../nix/src/tests/lang/eval-okay-pathexists.nix | 5 - .../nix/src/tests/lang/eval-okay-patterns.exp | 1 - .../nix/src/tests/lang/eval-okay-patterns.nix | 16 - .../nix/src/tests/lang/eval-okay-readDir.exp | 1 - .../nix/src/tests/lang/eval-okay-readDir.nix | 1 - .../nix/src/tests/lang/eval-okay-readfile.exp | 1 - .../nix/src/tests/lang/eval-okay-readfile.nix | 1 - .../src/tests/lang/eval-okay-redefine-builtin.exp | 1 - .../src/tests/lang/eval-okay-redefine-builtin.nix | 3 - .../nix/src/tests/lang/eval-okay-regex-match.exp | 1 - .../nix/src/tests/lang/eval-okay-regex-match.nix | 29 - .../nix/src/tests/lang/eval-okay-regex-split.exp | 1 - .../nix/src/tests/lang/eval-okay-regex-split.nix | 48 - .../nix/src/tests/lang/eval-okay-remove.exp | 1 - .../nix/src/tests/lang/eval-okay-remove.nix | 5 - .../src/tests/lang/eval-okay-replacestrings.exp | 1 - .../src/tests/lang/eval-okay-replacestrings.nix | 11 - .../nix/src/tests/lang/eval-okay-scope-1.exp | 1 - .../nix/src/tests/lang/eval-okay-scope-1.nix | 6 - .../nix/src/tests/lang/eval-okay-scope-2.exp | 1 - .../nix/src/tests/lang/eval-okay-scope-2.nix | 6 - .../nix/src/tests/lang/eval-okay-scope-3.exp | 1 - .../nix/src/tests/lang/eval-okay-scope-3.nix | 6 - .../nix/src/tests/lang/eval-okay-scope-4.exp | 1 - .../nix/src/tests/lang/eval-okay-scope-4.nix | 10 - .../nix/src/tests/lang/eval-okay-scope-6.exp | 1 - .../nix/src/tests/lang/eval-okay-scope-6.nix | 7 - .../nix/src/tests/lang/eval-okay-scope-7.exp | 1 - .../nix/src/tests/lang/eval-okay-scope-7.nix | 6 - third_party/nix/src/tests/lang/eval-okay-seq.exp | 1 - third_party/nix/src/tests/lang/eval-okay-seq.nix | 1 - third_party/nix/src/tests/lang/eval-okay-sort.exp | 1 - third_party/nix/src/tests/lang/eval-okay-sort.nix | 8 - .../nix/src/tests/lang/eval-okay-splitversion.exp | 1 - .../nix/src/tests/lang/eval-okay-splitversion.nix | 1 - .../nix/src/tests/lang/eval-okay-string.exp | 1 - .../nix/src/tests/lang/eval-okay-string.nix | 12 - .../lang/eval-okay-strings-as-attrs-names.exp | 1 - .../lang/eval-okay-strings-as-attrs-names.nix | 20 - .../nix/src/tests/lang/eval-okay-substring.exp | 1 - .../nix/src/tests/lang/eval-okay-substring.nix | 21 - .../tests/lang/eval-okay-tail-call-1.exp-disabled | 1 - .../nix/src/tests/lang/eval-okay-tojson.exp | 1 - .../nix/src/tests/lang/eval-okay-tojson.nix | 13 - .../nix/src/tests/lang/eval-okay-toxml2.exp | 1 - .../nix/src/tests/lang/eval-okay-toxml2.nix | 1 - .../nix/src/tests/lang/eval-okay-tryeval.exp | 1 - .../nix/src/tests/lang/eval-okay-tryeval.nix | 5 - third_party/nix/src/tests/lang/eval-okay-types.exp | 1 - third_party/nix/src/tests/lang/eval-okay-types.nix | 38 - .../nix/src/tests/lang/eval-okay-versions.exp | 1 - .../nix/src/tests/lang/eval-okay-versions.nix | 40 - third_party/nix/src/tests/lang/eval-okay-with.exp | 1 - third_party/nix/src/tests/lang/eval-okay-with.nix | 19 - .../nix/src/tests/lang/evalargs-okay-autoargs.nix | 15 - .../nix/src/tests/lang/evalstore-okay-autoargs.exp | 1 - .../lang/evalstore-okay-context-introspection.exp | 1 - .../lang/evalstore-okay-context-introspection.nix | 24 - .../nix/src/tests/lang/evalstore-okay-context.exp | 1 - .../nix/src/tests/lang/evalstore-okay-context.nix | 6 - .../nix/src/tests/lang/evalstore-okay-toxml.exp | 1 - .../nix/src/tests/lang/evalstore-okay-toxml.nix | 3 - third_party/nix/src/tests/lang/imported.nix | 3 - third_party/nix/src/tests/lang/imported2.nix | 1 - third_party/nix/src/tests/lang/lib.nix | 61 - .../nix/src/tests/lang/parse-fail-dup-attrs-1.nix | 5 - .../nix/src/tests/lang/parse-fail-dup-attrs-2.nix | 13 - .../nix/src/tests/lang/parse-fail-dup-attrs-3.nix | 13 - .../nix/src/tests/lang/parse-fail-dup-attrs-4.nix | 4 - .../nix/src/tests/lang/parse-fail-dup-attrs-7.nix | 9 - .../nix/src/tests/lang/parse-fail-dup-formals.nix | 1 - .../tests/lang/parse-fail-mixed-nested-attrs1.nix | 4 - .../tests/lang/parse-fail-mixed-nested-attrs2.nix | 4 - .../nix/src/tests/lang/parse-fail-path-slash.nix | 6 - .../nix/src/tests/lang/parse-fail-patterns-1.nix | 1 - .../tests/lang/parse-fail-regression-20060610.nix | 11 - third_party/nix/src/tests/lang/parse-fail-uft8.nix | 1 - .../nix/src/tests/lang/parse-fail-undef-var-2.nix | 7 - .../nix/src/tests/lang/parse-fail-undef-var.nix | 1 - third_party/nix/src/tests/lang/parse-okay-1.nix | 1 - third_party/nix/src/tests/lang/parse-okay-crlf.nix | 17 - .../nix/src/tests/lang/parse-okay-dup-attrs-5.nix | 4 - .../nix/src/tests/lang/parse-okay-dup-attrs-6.nix | 4 - .../tests/lang/parse-okay-mixed-nested-attrs-1.nix | 4 - .../tests/lang/parse-okay-mixed-nested-attrs-2.nix | 4 - .../tests/lang/parse-okay-mixed-nested-attrs-3.nix | 7 - .../tests/lang/parse-okay-regression-20041027.nix | 11 - .../src/tests/lang/parse-okay-regression-751.nix | 2 - .../nix/src/tests/lang/parse-okay-subversion.nix | 43 - third_party/nix/src/tests/lang/parse-okay-url.nix | 7 - third_party/nix/src/tests/lang/readDir/bar | 0 .../tests/lang/readDir/foo/git-hates-directories | 0 third_party/nix/src/tests/language-tests.cc | 290 - third_party/nix/src/tests/references_test.cc | 74 - third_party/nix/src/tests/status_helpers.h | 83 - third_party/nix/src/tests/store-api-test.cc | 28 - third_party/nix/src/tests/store-util.hh | 76 - third_party/nix/src/tests/store_tests.cc | 122 - third_party/nix/src/tests/value-to-json.cc | 257 - third_party/nix/test-vm.nix | 20 - third_party/nix/tests/add.sh | 28 - third_party/nix/tests/binary-cache.sh | 170 - third_party/nix/tests/brotli.sh | 21 - third_party/nix/tests/build-dry.sh | 52 - third_party/nix/tests/build-hook.nix | 23 - third_party/nix/tests/build-remote.sh | 24 - third_party/nix/tests/case-hack.sh | 19 - third_party/nix/tests/case.nar | Bin 2416 -> 0 bytes third_party/nix/tests/check-refs.nix | 70 - third_party/nix/tests/check-refs.sh | 42 - third_party/nix/tests/check-reqs.nix | 57 - third_party/nix/tests/check-reqs.sh | 16 - third_party/nix/tests/check.nix | 22 - third_party/nix/tests/check.sh | 47 - third_party/nix/tests/common.sh.in | 118 - third_party/nix/tests/config.nix | 20 - third_party/nix/tests/dependencies.builder0.sh | 16 - third_party/nix/tests/dependencies.builder1.sh | 2 - third_party/nix/tests/dependencies.builder2.sh | 2 - third_party/nix/tests/dependencies.nix | 24 - third_party/nix/tests/dependencies.sh | 52 - third_party/nix/tests/dump-db.sh | 20 - third_party/nix/tests/export-graph.nix | 29 - third_party/nix/tests/export-graph.sh | 30 - third_party/nix/tests/export.sh | 36 - third_party/nix/tests/fetchGit.sh | 141 - third_party/nix/tests/fetchMercurial.sh | 93 - third_party/nix/tests/fetchurl.sh | 78 - third_party/nix/tests/filter-source.nix | 12 - third_party/nix/tests/filter-source.sh | 19 - third_party/nix/tests/fixed.builder1.sh | 3 - third_party/nix/tests/fixed.builder2.sh | 6 - third_party/nix/tests/fixed.nix | 50 - third_party/nix/tests/fixed.sh | 56 - third_party/nix/tests/function-trace.sh | 85 - third_party/nix/tests/gc-auto.sh | 70 - third_party/nix/tests/gc-concurrent.builder.sh | 13 - third_party/nix/tests/gc-concurrent.nix | 27 - third_party/nix/tests/gc-concurrent.sh | 58 - third_party/nix/tests/gc-concurrent2.builder.sh | 7 - third_party/nix/tests/gc-runtime.nix | 17 - third_party/nix/tests/gc-runtime.sh | 38 - third_party/nix/tests/gc.sh | 40 - third_party/nix/tests/hash-check.nix | 29 - third_party/nix/tests/hash.sh | 87 - third_party/nix/tests/import-derivation.nix | 26 - third_party/nix/tests/import-derivation.sh | 12 - third_party/nix/tests/init.sh | 34 - third_party/nix/tests/install-darwin.sh | 96 - third_party/nix/tests/lang.sh | 68 - third_party/nix/tests/linux-sandbox.sh | 30 - third_party/nix/tests/logging.sh | 15 - third_party/nix/tests/misc.sh | 19 - third_party/nix/tests/multiple-outputs.nix | 68 - third_party/nix/tests/multiple-outputs.sh | 76 - third_party/nix/tests/nar-access.nix | 23 - third_party/nix/tests/nar-access.sh | 44 - third_party/nix/tests/nix-build.sh | 25 - third_party/nix/tests/nix-channel.sh | 59 - third_party/nix/tests/nix-copy-closure.nix | 64 - third_party/nix/tests/nix-copy-ssh.sh | 20 - third_party/nix/tests/nix-profile.sh | 9 - third_party/nix/tests/nix-shell.sh | 57 - third_party/nix/tests/optimise-store.sh | 43 - third_party/nix/tests/parallel.builder.sh | 29 - third_party/nix/tests/parallel.nix | 19 - third_party/nix/tests/parallel.sh | 56 - third_party/nix/tests/pass-as-file.sh | 18 - third_party/nix/tests/placeholders.sh | 20 - third_party/nix/tests/post-hook.sh | 15 - third_party/nix/tests/pure-eval.nix | 3 - third_party/nix/tests/pure-eval.sh | 18 - third_party/nix/tests/push-to-store.sh | 4 - third_party/nix/tests/referrers.sh | 36 - third_party/nix/tests/remote-builds.nix | 108 - third_party/nix/tests/remote-store.sh | 19 - third_party/nix/tests/repair.sh | 77 - third_party/nix/tests/restricted.nix | 1 - third_party/nix/tests/restricted.sh | 51 - third_party/nix/tests/run.nix | 17 - third_party/nix/tests/run.sh | 28 - third_party/nix/tests/search.nix | 25 - third_party/nix/tests/search.sh | 43 - third_party/nix/tests/secure-drv-outputs.nix | 23 - third_party/nix/tests/secure-drv-outputs.sh | 36 - third_party/nix/tests/setuid.nix | 108 - third_party/nix/tests/shell.nix | 56 - third_party/nix/tests/shell.shebang.rb | 7 - third_party/nix/tests/shell.shebang.sh | 4 - third_party/nix/tests/signing.sh | 105 - third_party/nix/tests/simple.builder.sh | 11 - third_party/nix/tests/simple.nix | 8 - third_party/nix/tests/simple.sh | 25 - third_party/nix/tests/structured-attrs.nix | 66 - third_party/nix/tests/structured-attrs.sh | 7 - third_party/nix/tests/tarball.sh | 28 - third_party/nix/tests/timeout.nix | 31 - third_party/nix/tests/timeout.sh | 40 - third_party/nix/tests/user-envs.builder.sh | 5 - third_party/nix/tests/user-envs.nix | 29 - third_party/nix/tests/user-envs.sh | 181 - 721 files changed, 104195 deletions(-) delete mode 100644 third_party/nix/.clang-format delete mode 100644 third_party/nix/.clang-tidy delete mode 100644 third_party/nix/.dir-locals.el delete mode 100644 third_party/nix/.github/ISSUE_TEMPLATE.md delete mode 100644 third_party/nix/.gitignore delete mode 100644 third_party/nix/.skip-subtree delete mode 100644 third_party/nix/.travis.yml delete mode 100644 third_party/nix/.version delete mode 100644 third_party/nix/CMakeLists.txt delete mode 100644 third_party/nix/COPYING delete mode 100644 third_party/nix/OWNERS delete mode 100644 third_party/nix/README.md delete mode 100644 third_party/nix/clangd.nix delete mode 100644 third_party/nix/config.h.in delete mode 100755 third_party/nix/config/config.sub delete mode 100755 third_party/nix/config/install-sh delete mode 100755 third_party/nix/contrib/stack-collapse.py delete mode 100644 third_party/nix/corepkgs/buildenv.nix delete mode 100644 third_party/nix/corepkgs/config.nix.in delete mode 100644 third_party/nix/corepkgs/derivation.nix delete mode 100644 third_party/nix/corepkgs/fetchurl.nix delete mode 100644 third_party/nix/corepkgs/imported-drv-to-derivation.nix delete mode 100644 third_party/nix/corepkgs/unpack-channel.nix delete mode 100644 third_party/nix/default.nix delete mode 100644 third_party/nix/doc/manual/advanced-topics/advanced-topics.xml delete mode 100644 third_party/nix/doc/manual/advanced-topics/cores-vs-jobs.xml delete mode 100644 third_party/nix/doc/manual/advanced-topics/diff-hook.xml delete mode 100644 third_party/nix/doc/manual/advanced-topics/distributed-builds.xml delete mode 100644 third_party/nix/doc/manual/advanced-topics/post-build-hook.xml delete mode 100644 third_party/nix/doc/manual/command-ref/command-ref.xml delete mode 100644 third_party/nix/doc/manual/command-ref/conf-file.xml delete mode 100644 third_party/nix/doc/manual/command-ref/env-common.xml delete mode 100644 third_party/nix/doc/manual/command-ref/files.xml delete mode 100644 third_party/nix/doc/manual/command-ref/main-commands.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-build.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-channel.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-collect-garbage.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-copy-closure.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-daemon.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-env.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-hash.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-instantiate.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-prefetch-url.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-shell.xml delete mode 100644 third_party/nix/doc/manual/command-ref/nix-store.xml delete mode 100644 third_party/nix/doc/manual/command-ref/opt-common-syn.xml delete mode 100644 third_party/nix/doc/manual/command-ref/opt-common.xml delete mode 100644 third_party/nix/doc/manual/command-ref/opt-inst-syn.xml delete mode 100644 third_party/nix/doc/manual/command-ref/utilities.xml delete mode 100644 third_party/nix/doc/manual/expressions/advanced-attributes.xml delete mode 100644 third_party/nix/doc/manual/expressions/arguments-variables.xml delete mode 100644 third_party/nix/doc/manual/expressions/build-script.xml delete mode 100644 third_party/nix/doc/manual/expressions/builder-syntax.xml delete mode 100644 third_party/nix/doc/manual/expressions/builtins.xml delete mode 100644 third_party/nix/doc/manual/expressions/derivations.xml delete mode 100644 third_party/nix/doc/manual/expressions/expression-language.xml delete mode 100644 third_party/nix/doc/manual/expressions/expression-syntax.xml delete mode 100644 third_party/nix/doc/manual/expressions/generic-builder.xml delete mode 100644 third_party/nix/doc/manual/expressions/language-constructs.xml delete mode 100644 third_party/nix/doc/manual/expressions/language-operators.xml delete mode 100644 third_party/nix/doc/manual/expressions/language-values.xml delete mode 100644 third_party/nix/doc/manual/expressions/simple-building-testing.xml delete mode 100644 third_party/nix/doc/manual/expressions/simple-expression.xml delete mode 100644 third_party/nix/doc/manual/expressions/writing-nix-expressions.xml delete mode 100644 third_party/nix/doc/manual/figures/user-environments.png delete mode 100644 third_party/nix/doc/manual/figures/user-environments.sxd delete mode 100644 third_party/nix/doc/manual/glossary/glossary.xml delete mode 100644 third_party/nix/doc/manual/hacking.xml delete mode 100644 third_party/nix/doc/manual/images/callouts/1.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/10.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/11.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/12.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/13.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/14.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/15.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/2.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/3.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/4.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/5.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/6.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/7.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/8.gif delete mode 100644 third_party/nix/doc/manual/images/callouts/9.gif delete mode 100644 third_party/nix/doc/manual/installation/building-source.xml delete mode 100644 third_party/nix/doc/manual/installation/env-variables.xml delete mode 100644 third_party/nix/doc/manual/installation/installation.xml delete mode 100644 third_party/nix/doc/manual/installation/installing-binary.xml delete mode 100644 third_party/nix/doc/manual/installation/installing-source.xml delete mode 100644 third_party/nix/doc/manual/installation/multi-user.xml delete mode 100644 third_party/nix/doc/manual/installation/nix-security.xml delete mode 100644 third_party/nix/doc/manual/installation/obtaining-source.xml delete mode 100644 third_party/nix/doc/manual/installation/prerequisites-source.xml delete mode 100644 third_party/nix/doc/manual/installation/single-user.xml delete mode 100644 third_party/nix/doc/manual/installation/supported-platforms.xml delete mode 100644 third_party/nix/doc/manual/installation/upgrading.xml delete mode 100644 third_party/nix/doc/manual/introduction/about-nix.xml delete mode 100644 third_party/nix/doc/manual/introduction/introduction.xml delete mode 100644 third_party/nix/doc/manual/introduction/quick-start.xml delete mode 100644 third_party/nix/doc/manual/manual.xml delete mode 100644 third_party/nix/doc/manual/nix-lang-ref.xml delete mode 100644 third_party/nix/doc/manual/packages/basic-package-mgmt.xml delete mode 100644 third_party/nix/doc/manual/packages/binary-cache-substituter.xml delete mode 100644 third_party/nix/doc/manual/packages/channels.xml delete mode 100644 third_party/nix/doc/manual/packages/copy-closure.xml delete mode 100644 third_party/nix/doc/manual/packages/garbage-collection.xml delete mode 100644 third_party/nix/doc/manual/packages/garbage-collector-roots.xml delete mode 100644 third_party/nix/doc/manual/packages/package-management.xml delete mode 100644 third_party/nix/doc/manual/packages/profiles.xml delete mode 100644 third_party/nix/doc/manual/packages/s3-substituter.xml delete mode 100644 third_party/nix/doc/manual/packages/sharing-packages.xml delete mode 100644 third_party/nix/doc/manual/packages/ssh-substituter.xml delete mode 100644 third_party/nix/doc/manual/quote-literals.xsl delete mode 100644 third_party/nix/doc/manual/release-notes/release-notes.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.10.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.10.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.11.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.12.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.13.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.14.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.15.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.16.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.5.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.6.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.7.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.8.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.8.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.9.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.9.2.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-0.9.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.0.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.10.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.11.10.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.11.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.2.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.3.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.4.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.5.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.5.2.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.5.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.6.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.6.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.7.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.8.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-1.9.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-2.0.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-2.1.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-2.2.xml delete mode 100644 third_party/nix/doc/manual/release-notes/rl-2.3.xml delete mode 100644 third_party/nix/doc/manual/schemas.xml delete mode 100644 third_party/nix/misc/systemd/nix-daemon.service.in delete mode 100644 third_party/nix/misc/systemd/nix-daemon.socket.in delete mode 100755 third_party/nix/scripts/build.sh delete mode 100755 third_party/nix/scripts/daemon.sh delete mode 100755 third_party/nix/scripts/eval.sh delete mode 100644 third_party/nix/scripts/install-darwin-multi-user.sh delete mode 100644 third_party/nix/scripts/install-multi-user.sh delete mode 100644 third_party/nix/scripts/install-nix-from-closure.sh delete mode 100755 third_party/nix/scripts/install-systemd-multi-user.sh delete mode 100644 third_party/nix/scripts/install.in delete mode 100755 third_party/nix/scripts/nix-http-export.cgi.in delete mode 100644 third_party/nix/scripts/nix-profile-daemon.sh.in delete mode 100644 third_party/nix/scripts/nix-profile.sh.in delete mode 100755 third_party/nix/scripts/nix-reduce-build.in delete mode 100755 third_party/nix/scripts/repl.sh delete mode 100755 third_party/nix/scripts/setup_store.sh delete mode 100644 third_party/nix/src/CMakeLists.txt delete mode 100644 third_party/nix/src/build-remote/build-remote.cc delete mode 100644 third_party/nix/src/cpptoml/LICENSE delete mode 100644 third_party/nix/src/cpptoml/cpptoml.h delete mode 100644 third_party/nix/src/libexpr/CMakeLists.txt delete mode 100644 third_party/nix/src/libexpr/attr-path.cc delete mode 100644 third_party/nix/src/libexpr/attr-path.hh delete mode 100644 third_party/nix/src/libexpr/attr-set.cc delete mode 100644 third_party/nix/src/libexpr/attr-set.hh delete mode 100644 third_party/nix/src/libexpr/common-eval-args.cc delete mode 100644 third_party/nix/src/libexpr/common-eval-args.hh delete mode 100644 third_party/nix/src/libexpr/eval-inline.hh delete mode 100644 third_party/nix/src/libexpr/eval.cc delete mode 100644 third_party/nix/src/libexpr/eval.hh delete mode 100644 third_party/nix/src/libexpr/function-trace.cc delete mode 100644 third_party/nix/src/libexpr/function-trace.hh delete mode 100644 third_party/nix/src/libexpr/get-drvs.cc delete mode 100644 third_party/nix/src/libexpr/get-drvs.hh delete mode 100644 third_party/nix/src/libexpr/json-to-value.cc delete mode 100644 third_party/nix/src/libexpr/json-to-value.hh delete mode 100644 third_party/nix/src/libexpr/lexer.l delete mode 100644 third_party/nix/src/libexpr/names.cc delete mode 100644 third_party/nix/src/libexpr/names.hh delete mode 100644 third_party/nix/src/libexpr/nix-expr.pc.in delete mode 100644 third_party/nix/src/libexpr/nixexpr.cc delete mode 100644 third_party/nix/src/libexpr/nixexpr.hh delete mode 100644 third_party/nix/src/libexpr/parser.cc delete mode 100644 third_party/nix/src/libexpr/parser.hh delete mode 100644 third_party/nix/src/libexpr/parser.y delete mode 100644 third_party/nix/src/libexpr/primops.cc delete mode 100644 third_party/nix/src/libexpr/primops.hh delete mode 100644 third_party/nix/src/libexpr/primops/context.cc delete mode 100644 third_party/nix/src/libexpr/primops/fetchGit.cc delete mode 100644 third_party/nix/src/libexpr/primops/fetchMercurial.cc delete mode 100644 third_party/nix/src/libexpr/primops/fromTOML.cc delete mode 100644 third_party/nix/src/libexpr/symbol-table.cc delete mode 100644 third_party/nix/src/libexpr/symbol-table.hh delete mode 100644 third_party/nix/src/libexpr/value-to-json.cc delete mode 100644 third_party/nix/src/libexpr/value-to-json.hh delete mode 100644 third_party/nix/src/libexpr/value-to-xml.cc delete mode 100644 third_party/nix/src/libexpr/value-to-xml.hh delete mode 100644 third_party/nix/src/libexpr/value.cc delete mode 100644 third_party/nix/src/libexpr/value.hh delete mode 100644 third_party/nix/src/libmain/CMakeLists.txt delete mode 100644 third_party/nix/src/libmain/common-args.cc delete mode 100644 third_party/nix/src/libmain/common-args.hh delete mode 100644 third_party/nix/src/libmain/nix-main.pc.in delete mode 100644 third_party/nix/src/libmain/shared.cc delete mode 100644 third_party/nix/src/libmain/shared.hh delete mode 100644 third_party/nix/src/libmain/stack.cc delete mode 100644 third_party/nix/src/libstore/CMakeLists.txt delete mode 100644 third_party/nix/src/libstore/binary-cache-store.cc delete mode 100644 third_party/nix/src/libstore/binary-cache-store.hh delete mode 100644 third_party/nix/src/libstore/build.cc delete mode 100644 third_party/nix/src/libstore/builtins.hh delete mode 100644 third_party/nix/src/libstore/builtins/buildenv.cc delete mode 100644 third_party/nix/src/libstore/builtins/fetchurl.cc delete mode 100644 third_party/nix/src/libstore/crypto.cc delete mode 100644 third_party/nix/src/libstore/crypto.hh delete mode 100644 third_party/nix/src/libstore/derivations.cc delete mode 100644 third_party/nix/src/libstore/derivations.hh delete mode 100644 third_party/nix/src/libstore/download.cc delete mode 100644 third_party/nix/src/libstore/download.hh delete mode 100644 third_party/nix/src/libstore/export-import.cc delete mode 100644 third_party/nix/src/libstore/fs-accessor.hh delete mode 100644 third_party/nix/src/libstore/gc.cc delete mode 100644 third_party/nix/src/libstore/globals.cc delete mode 100644 third_party/nix/src/libstore/globals.hh delete mode 100644 third_party/nix/src/libstore/http-binary-cache-store.cc delete mode 100644 third_party/nix/src/libstore/legacy-ssh-store.cc delete mode 100644 third_party/nix/src/libstore/local-binary-cache-store.cc delete mode 100644 third_party/nix/src/libstore/local-fs-store.cc delete mode 100644 third_party/nix/src/libstore/local-store.cc delete mode 100644 third_party/nix/src/libstore/local-store.hh delete mode 100644 third_party/nix/src/libstore/machines.cc delete mode 100644 third_party/nix/src/libstore/machines.hh delete mode 100644 third_party/nix/src/libstore/misc.cc delete mode 100644 third_party/nix/src/libstore/mock-binary-cache-store.cc delete mode 100644 third_party/nix/src/libstore/mock-binary-cache-store.hh delete mode 100644 third_party/nix/src/libstore/nar-accessor.cc delete mode 100644 third_party/nix/src/libstore/nar-accessor.hh delete mode 100644 third_party/nix/src/libstore/nar-info-disk-cache.cc delete mode 100644 third_party/nix/src/libstore/nar-info-disk-cache.hh delete mode 100644 third_party/nix/src/libstore/nar-info.cc delete mode 100644 third_party/nix/src/libstore/nar-info.hh delete mode 100644 third_party/nix/src/libstore/nix-store.pc.in delete mode 100644 third_party/nix/src/libstore/optimise-store.cc delete mode 100644 third_party/nix/src/libstore/parsed-derivations.cc delete mode 100644 third_party/nix/src/libstore/parsed-derivations.hh delete mode 100644 third_party/nix/src/libstore/pathlocks.cc delete mode 100644 third_party/nix/src/libstore/pathlocks.hh delete mode 100644 third_party/nix/src/libstore/profiles.cc delete mode 100644 third_party/nix/src/libstore/profiles.hh delete mode 100644 third_party/nix/src/libstore/references.cc delete mode 100644 third_party/nix/src/libstore/references.hh delete mode 100644 third_party/nix/src/libstore/remote-fs-accessor.cc delete mode 100644 third_party/nix/src/libstore/remote-fs-accessor.hh delete mode 100644 third_party/nix/src/libstore/remote-store.cc delete mode 100644 third_party/nix/src/libstore/remote-store.hh delete mode 100644 third_party/nix/src/libstore/rpc-store.cc delete mode 100644 third_party/nix/src/libstore/rpc-store.hh delete mode 100644 third_party/nix/src/libstore/s3-binary-cache-store.cc delete mode 100644 third_party/nix/src/libstore/s3-binary-cache-store.hh delete mode 100644 third_party/nix/src/libstore/s3.hh delete mode 100644 third_party/nix/src/libstore/sandbox-defaults.sb delete mode 100644 third_party/nix/src/libstore/sandbox-minimal.sb delete mode 100644 third_party/nix/src/libstore/sandbox-network.sb delete mode 100644 third_party/nix/src/libstore/schema.sql delete mode 100644 third_party/nix/src/libstore/serve-protocol.hh delete mode 100644 third_party/nix/src/libstore/sqlite.cc delete mode 100644 third_party/nix/src/libstore/sqlite.hh delete mode 100644 third_party/nix/src/libstore/ssh-store.cc delete mode 100644 third_party/nix/src/libstore/ssh.cc delete mode 100644 third_party/nix/src/libstore/ssh.hh delete mode 100644 third_party/nix/src/libstore/store-api.cc delete mode 100644 third_party/nix/src/libstore/store-api.hh delete mode 100644 third_party/nix/src/libstore/worker-protocol.hh delete mode 100644 third_party/nix/src/libutil/CMakeLists.txt delete mode 100644 third_party/nix/src/libutil/affinity.cc delete mode 100644 third_party/nix/src/libutil/affinity.hh delete mode 100644 third_party/nix/src/libutil/archive.cc delete mode 100644 third_party/nix/src/libutil/archive.hh delete mode 100644 third_party/nix/src/libutil/args.cc delete mode 100644 third_party/nix/src/libutil/args.hh delete mode 100644 third_party/nix/src/libutil/compression.cc delete mode 100644 third_party/nix/src/libutil/compression.hh delete mode 100644 third_party/nix/src/libutil/config.cc delete mode 100644 third_party/nix/src/libutil/config.hh delete mode 100644 third_party/nix/src/libutil/finally.hh delete mode 100644 third_party/nix/src/libutil/hash.cc delete mode 100644 third_party/nix/src/libutil/hash.hh delete mode 100644 third_party/nix/src/libutil/istringstream_nocopy.hh delete mode 100644 third_party/nix/src/libutil/json.cc delete mode 100644 third_party/nix/src/libutil/json.hh delete mode 100644 third_party/nix/src/libutil/lazy.hh delete mode 100644 third_party/nix/src/libutil/lru-cache.hh delete mode 100644 third_party/nix/src/libutil/monitor-fd.hh delete mode 100644 third_party/nix/src/libutil/pool.hh delete mode 100644 third_party/nix/src/libutil/proto.hh delete mode 100644 third_party/nix/src/libutil/ref.hh delete mode 100644 third_party/nix/src/libutil/serialise.cc delete mode 100644 third_party/nix/src/libutil/serialise.hh delete mode 100644 third_party/nix/src/libutil/status.hh delete mode 100644 third_party/nix/src/libutil/sync.hh delete mode 100644 third_party/nix/src/libutil/thread-pool.cc delete mode 100644 third_party/nix/src/libutil/thread-pool.hh delete mode 100644 third_party/nix/src/libutil/types.hh delete mode 100644 third_party/nix/src/libutil/util.cc delete mode 100644 third_party/nix/src/libutil/util.hh delete mode 100644 third_party/nix/src/libutil/visitor.hh delete mode 100644 third_party/nix/src/libutil/xml-writer.cc delete mode 100644 third_party/nix/src/libutil/xml-writer.hh delete mode 100644 third_party/nix/src/nix-build/nix-build.cc delete mode 100644 third_party/nix/src/nix-channel/nix-channel.cc delete mode 100644 third_party/nix/src/nix-collect-garbage/nix-collect-garbage.cc delete mode 100644 third_party/nix/src/nix-copy-closure/nix-copy-closure.cc delete mode 100644 third_party/nix/src/nix-daemon/CMakeLists.txt delete mode 100644 third_party/nix/src/nix-daemon/nix-daemon-legacy.cc delete mode 100644 third_party/nix/src/nix-daemon/nix-daemon-proto.cc delete mode 100644 third_party/nix/src/nix-daemon/nix-daemon-proto.hh delete mode 100644 third_party/nix/src/nix-daemon/nix-daemon.cc delete mode 100644 third_party/nix/src/nix-env/nix-env.cc delete mode 100644 third_party/nix/src/nix-env/user-env.cc delete mode 100644 third_party/nix/src/nix-env/user-env.hh delete mode 100644 third_party/nix/src/nix-instantiate/nix-instantiate.cc delete mode 100644 third_party/nix/src/nix-prefetch-url/nix-prefetch-url.cc delete mode 100644 third_party/nix/src/nix-store/dotgraph.cc delete mode 100644 third_party/nix/src/nix-store/dotgraph.hh delete mode 100644 third_party/nix/src/nix-store/graphml.cc delete mode 100644 third_party/nix/src/nix-store/graphml.hh delete mode 100644 third_party/nix/src/nix-store/nix-store.cc delete mode 100644 third_party/nix/src/nix/add-to-store.cc delete mode 100644 third_party/nix/src/nix/build.cc delete mode 100644 third_party/nix/src/nix/cat.cc delete mode 100644 third_party/nix/src/nix/command.cc delete mode 100644 third_party/nix/src/nix/command.hh delete mode 100644 third_party/nix/src/nix/copy.cc delete mode 100644 third_party/nix/src/nix/doctor.cc delete mode 100644 third_party/nix/src/nix/dump-path.cc delete mode 100644 third_party/nix/src/nix/edit.cc delete mode 100644 third_party/nix/src/nix/eval.cc delete mode 100644 third_party/nix/src/nix/hash.cc delete mode 100644 third_party/nix/src/nix/installables.cc delete mode 100644 third_party/nix/src/nix/legacy.cc delete mode 100644 third_party/nix/src/nix/legacy.hh delete mode 100644 third_party/nix/src/nix/log.cc delete mode 100644 third_party/nix/src/nix/ls.cc delete mode 100644 third_party/nix/src/nix/main.cc delete mode 100644 third_party/nix/src/nix/optimise-store.cc delete mode 100644 third_party/nix/src/nix/path-info.cc delete mode 100644 third_party/nix/src/nix/ping-store.cc delete mode 100644 third_party/nix/src/nix/repl.cc delete mode 100644 third_party/nix/src/nix/run.cc delete mode 100644 third_party/nix/src/nix/search.cc delete mode 100644 third_party/nix/src/nix/show-config.cc delete mode 100644 third_party/nix/src/nix/show-derivation.cc delete mode 100644 third_party/nix/src/nix/sigs.cc delete mode 100644 third_party/nix/src/nix/upgrade-nix.cc delete mode 100644 third_party/nix/src/nix/verify.cc delete mode 100644 third_party/nix/src/nix/why-depends.cc delete mode 100644 third_party/nix/src/nlohmann/json.hpp delete mode 100644 third_party/nix/src/proto/CMakeLists.txt delete mode 100644 third_party/nix/src/proto/worker.proto delete mode 100644 third_party/nix/src/tests/CMakeLists.txt delete mode 100644 third_party/nix/src/tests/arbitrary.hh delete mode 100644 third_party/nix/src/tests/attr-set.cc delete mode 100644 third_party/nix/src/tests/derivations_test.cc delete mode 100644 third_party/nix/src/tests/dummy-store.hh delete mode 100644 third_party/nix/src/tests/hash_test.cc delete mode 100644 third_party/nix/src/tests/lang/binary-data delete mode 100644 third_party/nix/src/tests/lang/data delete mode 100644 third_party/nix/src/tests/lang/dir1/a.nix delete mode 100644 third_party/nix/src/tests/lang/dir2/a.nix delete mode 100644 third_party/nix/src/tests/lang/dir2/b.nix delete mode 100644 third_party/nix/src/tests/lang/dir3/a.nix delete mode 100644 third_party/nix/src/tests/lang/dir3/b.nix delete mode 100644 third_party/nix/src/tests/lang/dir3/c.nix delete mode 100644 third_party/nix/src/tests/lang/dir4/a.nix delete mode 100644 third_party/nix/src/tests/lang/dir4/c.nix delete mode 100644 third_party/nix/src/tests/lang/disabled/README.txt delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-path.nix delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-search-path.exp delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-search-path.flags delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-search-path.nix delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-tail-call-1.nix delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-xml.exp delete mode 100644 third_party/nix/src/tests/lang/disabled/eval-okay-xml.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-abort.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-antiquoted-path.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-assert.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-bad-antiquote-1.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-bad-antiquote-2.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-bad-antiquote-3.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-blackhole.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-deepseq.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-hashfile-missing.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-missing-arg.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-remove.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-scope-5.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-seq.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-substring.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-to-path.nix delete mode 100644 third_party/nix/src/tests/lang/eval-fail-undeclared-arg.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-any-all.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-any-all.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-arithmetic.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-arithmetic.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrnames.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrnames.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs2.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs2.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs3.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs3.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs4.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs4.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs5.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-attrs5.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-autoargs.flags delete mode 100644 third_party/nix/src/tests/lang/eval-okay-backslash-newline-1.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-backslash-newline-1.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-backslash-newline-2.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-backslash-newline-2.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-builtins-add.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-builtins-add.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-builtins.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-builtins.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-callable-attrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-callable-attrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-catattrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-catattrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-closure.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-closure.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-comments.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-comments.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-concat.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-concat.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-concatmap.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-concatmap.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-concatstringssep.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-concatstringssep.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-curpos.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-curpos.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-deepseq.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-deepseq.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-delayed-with-inherit.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-delayed-with-inherit.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-delayed-with.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-delayed-with.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-dynamic-attrs-2.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-dynamic-attrs-2.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-dynamic-attrs-bare.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-dynamic-attrs-bare.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-dynamic-attrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-dynamic-attrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-elem.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-elem.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-empty-args.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-empty-args.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-eq-derivations.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-eq-derivations.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-eq.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-eq.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-filter.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-filter.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-flatten.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-flatten.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-float.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-float.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-fromTOML.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-fromTOML.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-fromjson.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-fromjson.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-functionargs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-functionargs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-getattrpos-undefined.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-getattrpos-undefined.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-getattrpos.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-getattrpos.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-getenv.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-getenv.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-hash.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-hashfile.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-hashfile.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-hashstring.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-hashstring.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-if.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-if.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-import.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-import.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-ind-string.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-ind-string.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-let.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-let.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-list.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-list.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-listtoattrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-listtoattrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-logic.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-logic.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-map.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-map.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-mapattrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-mapattrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-nested-with.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-nested-with.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-new-let.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-new-let.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-null-dynamic-attrs.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-null-dynamic-attrs.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-partition.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-partition.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-pathexists.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-pathexists.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-patterns.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-patterns.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-readDir.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-readDir.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-readfile.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-readfile.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-redefine-builtin.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-redefine-builtin.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-regex-match.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-regex-match.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-regex-split.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-regex-split.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-remove.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-remove.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-replacestrings.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-replacestrings.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-1.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-1.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-2.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-2.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-3.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-3.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-4.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-4.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-6.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-6.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-7.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-scope-7.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-seq.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-seq.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-sort.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-sort.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-splitversion.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-splitversion.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-string.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-string.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-strings-as-attrs-names.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-strings-as-attrs-names.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-substring.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-substring.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-tail-call-1.exp-disabled delete mode 100644 third_party/nix/src/tests/lang/eval-okay-tojson.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-tojson.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-toxml2.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-toxml2.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-tryeval.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-tryeval.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-types.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-types.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-versions.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-versions.nix delete mode 100644 third_party/nix/src/tests/lang/eval-okay-with.exp delete mode 100644 third_party/nix/src/tests/lang/eval-okay-with.nix delete mode 100644 third_party/nix/src/tests/lang/evalargs-okay-autoargs.nix delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-autoargs.exp delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-context-introspection.exp delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-context-introspection.nix delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-context.exp delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-context.nix delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-toxml.exp delete mode 100644 third_party/nix/src/tests/lang/evalstore-okay-toxml.nix delete mode 100644 third_party/nix/src/tests/lang/imported.nix delete mode 100644 third_party/nix/src/tests/lang/imported2.nix delete mode 100644 third_party/nix/src/tests/lang/lib.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-dup-attrs-1.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-dup-attrs-2.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-dup-attrs-3.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-dup-attrs-4.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-dup-attrs-7.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-dup-formals.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-mixed-nested-attrs1.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-mixed-nested-attrs2.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-path-slash.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-patterns-1.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-regression-20060610.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-uft8.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-undef-var-2.nix delete mode 100644 third_party/nix/src/tests/lang/parse-fail-undef-var.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-1.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-crlf.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-dup-attrs-5.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-dup-attrs-6.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-mixed-nested-attrs-1.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-mixed-nested-attrs-2.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-mixed-nested-attrs-3.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-regression-20041027.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-regression-751.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-subversion.nix delete mode 100644 third_party/nix/src/tests/lang/parse-okay-url.nix delete mode 100644 third_party/nix/src/tests/lang/readDir/bar delete mode 100644 third_party/nix/src/tests/lang/readDir/foo/git-hates-directories delete mode 100644 third_party/nix/src/tests/language-tests.cc delete mode 100644 third_party/nix/src/tests/references_test.cc delete mode 100644 third_party/nix/src/tests/status_helpers.h delete mode 100644 third_party/nix/src/tests/store-api-test.cc delete mode 100644 third_party/nix/src/tests/store-util.hh delete mode 100644 third_party/nix/src/tests/store_tests.cc delete mode 100644 third_party/nix/src/tests/value-to-json.cc delete mode 100644 third_party/nix/test-vm.nix delete mode 100644 third_party/nix/tests/add.sh delete mode 100644 third_party/nix/tests/binary-cache.sh delete mode 100644 third_party/nix/tests/brotli.sh delete mode 100644 third_party/nix/tests/build-dry.sh delete mode 100644 third_party/nix/tests/build-hook.nix delete mode 100644 third_party/nix/tests/build-remote.sh delete mode 100644 third_party/nix/tests/case-hack.sh delete mode 100644 third_party/nix/tests/case.nar delete mode 100644 third_party/nix/tests/check-refs.nix delete mode 100644 third_party/nix/tests/check-refs.sh delete mode 100644 third_party/nix/tests/check-reqs.nix delete mode 100644 third_party/nix/tests/check-reqs.sh delete mode 100644 third_party/nix/tests/check.nix delete mode 100644 third_party/nix/tests/check.sh delete mode 100644 third_party/nix/tests/common.sh.in delete mode 100644 third_party/nix/tests/config.nix delete mode 100644 third_party/nix/tests/dependencies.builder0.sh delete mode 100644 third_party/nix/tests/dependencies.builder1.sh delete mode 100644 third_party/nix/tests/dependencies.builder2.sh delete mode 100644 third_party/nix/tests/dependencies.nix delete mode 100644 third_party/nix/tests/dependencies.sh delete mode 100644 third_party/nix/tests/dump-db.sh delete mode 100644 third_party/nix/tests/export-graph.nix delete mode 100644 third_party/nix/tests/export-graph.sh delete mode 100644 third_party/nix/tests/export.sh delete mode 100644 third_party/nix/tests/fetchGit.sh delete mode 100644 third_party/nix/tests/fetchMercurial.sh delete mode 100644 third_party/nix/tests/fetchurl.sh delete mode 100644 third_party/nix/tests/filter-source.nix delete mode 100644 third_party/nix/tests/filter-source.sh delete mode 100644 third_party/nix/tests/fixed.builder1.sh delete mode 100644 third_party/nix/tests/fixed.builder2.sh delete mode 100644 third_party/nix/tests/fixed.nix delete mode 100644 third_party/nix/tests/fixed.sh delete mode 100755 third_party/nix/tests/function-trace.sh delete mode 100644 third_party/nix/tests/gc-auto.sh delete mode 100644 third_party/nix/tests/gc-concurrent.builder.sh delete mode 100644 third_party/nix/tests/gc-concurrent.nix delete mode 100644 third_party/nix/tests/gc-concurrent.sh delete mode 100644 third_party/nix/tests/gc-concurrent2.builder.sh delete mode 100644 third_party/nix/tests/gc-runtime.nix delete mode 100644 third_party/nix/tests/gc-runtime.sh delete mode 100644 third_party/nix/tests/gc.sh delete mode 100644 third_party/nix/tests/hash-check.nix delete mode 100644 third_party/nix/tests/hash.sh delete mode 100644 third_party/nix/tests/import-derivation.nix delete mode 100644 third_party/nix/tests/import-derivation.sh delete mode 100644 third_party/nix/tests/init.sh delete mode 100755 third_party/nix/tests/install-darwin.sh delete mode 100644 third_party/nix/tests/lang.sh delete mode 100644 third_party/nix/tests/linux-sandbox.sh delete mode 100644 third_party/nix/tests/logging.sh delete mode 100644 third_party/nix/tests/misc.sh delete mode 100644 third_party/nix/tests/multiple-outputs.nix delete mode 100644 third_party/nix/tests/multiple-outputs.sh delete mode 100644 third_party/nix/tests/nar-access.nix delete mode 100644 third_party/nix/tests/nar-access.sh delete mode 100644 third_party/nix/tests/nix-build.sh delete mode 100644 third_party/nix/tests/nix-channel.sh delete mode 100644 third_party/nix/tests/nix-copy-closure.nix delete mode 100644 third_party/nix/tests/nix-copy-ssh.sh delete mode 100644 third_party/nix/tests/nix-profile.sh delete mode 100644 third_party/nix/tests/nix-shell.sh delete mode 100644 third_party/nix/tests/optimise-store.sh delete mode 100644 third_party/nix/tests/parallel.builder.sh delete mode 100644 third_party/nix/tests/parallel.nix delete mode 100644 third_party/nix/tests/parallel.sh delete mode 100644 third_party/nix/tests/pass-as-file.sh delete mode 100644 third_party/nix/tests/placeholders.sh delete mode 100644 third_party/nix/tests/post-hook.sh delete mode 100644 third_party/nix/tests/pure-eval.nix delete mode 100644 third_party/nix/tests/pure-eval.sh delete mode 100755 third_party/nix/tests/push-to-store.sh delete mode 100644 third_party/nix/tests/referrers.sh delete mode 100644 third_party/nix/tests/remote-builds.nix delete mode 100644 third_party/nix/tests/remote-store.sh delete mode 100644 third_party/nix/tests/repair.sh delete mode 100644 third_party/nix/tests/restricted.nix delete mode 100644 third_party/nix/tests/restricted.sh delete mode 100644 third_party/nix/tests/run.nix delete mode 100644 third_party/nix/tests/run.sh delete mode 100644 third_party/nix/tests/search.nix delete mode 100644 third_party/nix/tests/search.sh delete mode 100644 third_party/nix/tests/secure-drv-outputs.nix delete mode 100644 third_party/nix/tests/secure-drv-outputs.sh delete mode 100644 third_party/nix/tests/setuid.nix delete mode 100644 third_party/nix/tests/shell.nix delete mode 100644 third_party/nix/tests/shell.shebang.rb delete mode 100755 third_party/nix/tests/shell.shebang.sh delete mode 100644 third_party/nix/tests/signing.sh delete mode 100644 third_party/nix/tests/simple.builder.sh delete mode 100644 third_party/nix/tests/simple.nix delete mode 100644 third_party/nix/tests/simple.sh delete mode 100644 third_party/nix/tests/structured-attrs.nix delete mode 100644 third_party/nix/tests/structured-attrs.sh delete mode 100644 third_party/nix/tests/tarball.sh delete mode 100644 third_party/nix/tests/timeout.nix delete mode 100644 third_party/nix/tests/timeout.sh delete mode 100644 third_party/nix/tests/user-envs.builder.sh delete mode 100644 third_party/nix/tests/user-envs.nix delete mode 100644 third_party/nix/tests/user-envs.sh diff --git a/third_party/nix/.clang-format b/third_party/nix/.clang-format deleted file mode 100644 index b8c36e122b..0000000000 --- a/third_party/nix/.clang-format +++ /dev/null @@ -1,11 +0,0 @@ -# Use the Google style in this project. -BasedOnStyle: Google -DerivePointerAlignment: false -PointerAlignment: Left -IncludeCategories: - - Regex: '^<.*\.h>' - Priority: 2 - - Regex: '^<.*' - Priority: 1 - - Regex: '.*' - Priority: 3 diff --git a/third_party/nix/.clang-tidy b/third_party/nix/.clang-tidy deleted file mode 100644 index 5b22be767f..0000000000 --- a/third_party/nix/.clang-tidy +++ /dev/null @@ -1,4 +0,0 @@ ---- -Checks: 'abseil-c*,clang-analyzer-security-*,bugprone-*,google-*,modernize-*,cppcoreguidelines-*,misc-*,-modernize-use-trailing-return-type' -WarningsAsErrors: 'abseil-*,clang-analyzer-security*' -... diff --git a/third_party/nix/.dir-locals.el b/third_party/nix/.dir-locals.el deleted file mode 100644 index 92aa816f10..0000000000 --- a/third_party/nix/.dir-locals.el +++ /dev/null @@ -1 +0,0 @@ -((c++-mode . ((c-file-style . "google")))) diff --git a/third_party/nix/.github/ISSUE_TEMPLATE.md b/third_party/nix/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 3372b1f03f..0000000000 --- a/third_party/nix/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,27 +0,0 @@ - diff --git a/third_party/nix/.gitignore b/third_party/nix/.gitignore deleted file mode 100644 index 7f2d477131..0000000000 --- a/third_party/nix/.gitignore +++ /dev/null @@ -1,119 +0,0 @@ -Makefile.config -perl/Makefile.config - -# / -/aclocal.m4 -/autom4te.cache -/configure -/nix.spec -/stamp-h1 -/svn-revision -/build-gcc -/libtool - -/corepkgs/config.nix - -# /corepkgs/channels/ -/corepkgs/channels/unpack.sh - -# /corepkgs/nar/ -/corepkgs/nar/nar.sh -/corepkgs/nar/unnar.sh - -# /doc/manual/ -/doc/manual/manual.html -/doc/manual/manual.xmli -/doc/manual/manual.pdf -/doc/manual/manual.is-valid -/doc/manual/*.1 -/doc/manual/*.5 -/doc/manual/*.8 -/doc/manual/version.txt - -# /scripts/ -/scripts/nix-profile.sh -/scripts/nix-copy-closure -/scripts/nix-reduce-build -/scripts/nix-http-export.cgi -/scripts/nix-profile-daemon.sh - -# /src/libexpr/ -/src/libexpr/lexer-tab.cc -/src/libexpr/lexer-tab.hh -/src/libexpr/parser-tab.cc -/src/libexpr/parser-tab.hh -/src/libexpr/parser-tab.output -/src/libexpr/nix.tbl - -# /src/libstore/ -/src/libstore/*.gen.hh - -/src/nix/nix - -# /src/nix-env/ -/src/nix-env/nix-env - -# /src/nix-instantiate/ -/src/nix-instantiate/nix-instantiate - -# /src/nix-store/ -/src/nix-store/nix-store - -/src/nix-prefetch-url/nix-prefetch-url - -# /src/nix-daemon/ -/src/nix-daemon/nix-daemon - -/src/nix-collect-garbage/nix-collect-garbage - -# /src/nix-channel/ -/src/nix-channel/nix-channel - -# /src/nix-build/ -/src/nix-build/nix-build - -/src/nix-copy-closure/nix-copy-closure - -/src/build-remote/build-remote - -# /tests/ -/tests/test-tmp -/tests/common.sh -/tests/dummy -/tests/result* -/tests/restricted-innocent -/tests/shell -/tests/shell.drv - -# /tests/lang/ -/tests/lang/*.out -/tests/lang/*.out.xml -/tests/lang/*.ast - -/perl/lib/Nix/Config.pm -/perl/lib/Nix/Store.cc - -/misc/systemd/nix-daemon.service -/misc/systemd/nix-daemon.socket -/misc/upstart/nix-daemon.conf - -/src/resolve-system-dependencies/resolve-system-dependencies - -inst/ - -*.a -*.o -*.so -*.dylib -*.dll -*.exe -*.dep -*~ -*.pc -*.plist - -# GNU Global -GPATH -GRTAGS -GSYMS -GTAGS diff --git a/third_party/nix/.skip-subtree b/third_party/nix/.skip-subtree deleted file mode 100644 index d49b47f75a..0000000000 --- a/third_party/nix/.skip-subtree +++ /dev/null @@ -1 +0,0 @@ -Third-party code with non-depot layout. diff --git a/third_party/nix/.travis.yml b/third_party/nix/.travis.yml deleted file mode 100644 index 99218a963c..0000000000 --- a/third_party/nix/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -os: osx -script: ./tests/install-darwin.sh diff --git a/third_party/nix/.version b/third_party/nix/.version deleted file mode 100644 index fd06a9268d..0000000000 --- a/third_party/nix/.version +++ /dev/null @@ -1 +0,0 @@ -2.3.4 \ No newline at end of file diff --git a/third_party/nix/CMakeLists.txt b/third_party/nix/CMakeLists.txt deleted file mode 100644 index 5d89572f16..0000000000 --- a/third_party/nix/CMakeLists.txt +++ /dev/null @@ -1,77 +0,0 @@ -# -*- mode: cmake; -*- -cmake_minimum_required(VERSION 3.16) -project(nix CXX) -set(CMAKE_CXX_STANDARD 17) - -# Export compile_commands.json which can be used by tools such as -# clangd and clang-tidy. -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -# Enable warnings -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror") - -# Provide an output path for pkgconfig. -include(GNUInstallDirs) -set(PKGCONFIG_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig) - -# The following lines import CMake-native dependencies which may -# contain useful definitions. Other dependencies are not treated -# specially by CMake and are only linked into the resulting binary. -find_package(BZip2) -find_package(Boost COMPONENTS context) -find_package(CURL) -find_package(LibLZMA) -find_package(Protobuf REQUIRED) -find_package(SQLite3) -find_package(Threads) -find_package(absl REQUIRED) -find_package(gRPC REQUIRED) -find_package(glog REQUIRED) - -find_program(CLANG_TIDY_PATH clang-tidy) -if (CLANG_TIDY_PATH) - # TODO(kanepyork): figure out how to reenable - #message("Found clang-tidy: ${CLANG_TIDY_PATH}") - #set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH};--line-filter=[{\"name\":\"src/cpptoml/cpptoml.h\"},{\"name\":\"google/protobuf/metadata_lite.h\"}]") - - # nix's toolchain has a problem with system header includes, so clang-tidy requires a manual -isystem - if (DEFINED ENV{LIBCXX_INCLUDE}) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -isystem $ENV{LIBCXX_INCLUDE}") - endif() -endif() - -if (DEFINED ENV{SANDBOX_SHELL}) - message("Using SANDBOX_SHELL = $ENV{SANDBOX_SHELL}") - set(SANDBOX_SHELL "$ENV{SANDBOX_SHELL}") -else() - find_program(BUSYBOX busybox) - if (BUSYBOX) - set(SANDBOX_SHELL "${BUSYBOX}") - else() - message(FATAL_ERROR "Could not find busybox and SANDBOX_SHELL is not set") - endif() -endif() - -# generate a configuration file (autoheader-style) to configure -# certain symbols that Nix depends on. -configure_file(config.h.in nix_config.h @ONLY) -INSTALL(FILES "${PROJECT_BINARY_DIR}/nix_config.h" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/nix") - -# install corepkgs -configure_file(corepkgs/config.nix.in config.nix @ONLY) -INSTALL(DIRECTORY corepkgs - DESTINATION ${CMAKE_INSTALL_DATADIR}/nix - FILES_MATCHING - PATTERN "*.nix") -INSTALL(FILES "${PROJECT_BINARY_DIR}/config.nix" DESTINATION "${CMAKE_INSTALL_DATADIR}/nix/corepkgs") - -# Conditionally run tests -option(PACKAGE_TESTS "Build the tests" ON) -if (PACKAGE_TESTS) - enable_testing() - find_package(GTest) - find_package(rapidcheck) - include(GoogleTest) -endif() - -add_subdirectory(src) diff --git a/third_party/nix/COPYING b/third_party/nix/COPYING deleted file mode 100644 index 5ab7695ab8..0000000000 --- a/third_party/nix/COPYING +++ /dev/null @@ -1,504 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - diff --git a/third_party/nix/OWNERS b/third_party/nix/OWNERS deleted file mode 100644 index 886f766d0c..0000000000 --- a/third_party/nix/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -inherited: true -owners: - - grfn - - tazjin - - kanepyork diff --git a/third_party/nix/README.md b/third_party/nix/README.md deleted file mode 100644 index 4bb5896831..0000000000 --- a/third_party/nix/README.md +++ /dev/null @@ -1,179 +0,0 @@ -Tvix, also known as TVL's fork of Nix -------------------------------------- - -Nix is a new take on package management that is fairly unique. Because -of its purity aspects, a lot of issues found in traditional package -managers don't appear with Nix. - -To find out more about the tool, usage and installation instructions, -please read the manual, which is available on the Nix website at -. - -This repository is [TVL's](https://tvl.fyi)'s fork of Nix, which we lovingly -refer to as Tvix. - -## Fork background - -Nix is a fantastic project with over a decade of demonstrated -real-world usage, but also with quite a few problems. - -First of all, the project consists of two main components: The Nix -package collection ("[nixpkgs][]") and the package manager itself. - -The package collection is an enormous effort with hundreds of -thousands of commits, encoding expert knowledge about lots of -different software and ways of building and managing it. It is a very -valuable piece of software. - -The package manager however is an old C++ project with severe code -quality issues, little to no documentation, no consistent style and no -unit test coverage. - -Its codebase is larger than it needs to be (often due to custom -reimplementations of basic functionality) and is mostly ad-hoc -structured, making it difficult to correctly implement large-scale -improvements. - -In addition, the upstream Nix project is diverging from the opinions -of some community members via the introduction of concepts such as Nix -flakes. - -To counteract these things we have decided to fork Nix. - -## Fork goals - -The things listed here are explicitly in-scope for work on the fork. -This list is not exhaustive, and it is very likely that many other -smaller things will be discovered along the way. - -### nixpkgs compatibility - -This fork will maintain compatibility with nixpkgs as much as -possible. If at any point we do need to diverge, we will do it in a -way that is backwards compatible. - -### Code quality improvements - -Code quality encompasses several different issues. - -One goal is to slowly bring the codebase in line with the [Google C++ -style guide][google-style]. Apart from the trivial reformatting (which -is already done), this means slowly chipping away at incorrectly -structured type hierarchies, usage of exceptions, usage of raw -pointers, global mutability and so on. - -Another goal is to reduce the amount of code in Nix by removing custom -reimplementations of basic functionality (such as string splitting or -reading files). - -For functionality that is not part of the C++17 standard library, -[Abseil][] will be the primary external library used. - -### Explicit RPC mechanisms - -Nix currently uses homegrown mechanisms of interacting with other Nix -binaries, for example for remote builds or interaction between the CLI -and the Nix daemon. - -This will be replaced with [gRPC][]. - -### New sandboxing mechanism - -Nix implements its own sandboxing mechanism. This was probably the -correct decision at the time, but is not necessary anymore because -Linux containers have become massively popular and lots of new tooling -is now available. - -The goal is to replace the custom sandboxing implementation with -pluggable [OCI runtimes][oci], which will make it possible to use -arbitrary container runtimes such as [gVisor][] or [systemd-nspawn][] - -### Pluggable Nix store backends - -The current Nix store implementation will be removed from Nix' core -and instead be refactored into a gRPC API that can be implemented by -different backends. - -### Builds as graph reductions - -A Nix derivation that should be instantiated describes a build graph. -This graph will become a first-class citizen, making it possible to -distribute different parts of the computation to different nodes. - -Implementing this properly will also allow us to improve the -implementation of import-from-derivation by explicitly moving through -different graph reduction stages. - -## Fork non-goals - -To set expectations, there are some explicit non-goals, too. - -* Merging these changes back into upstream is not a goal, and maybe - not even feasible. The core work has not even started yet and just - basic cleanup has already created a diff of over 40 000 lines. - - This would likely also turn into a political effort, which we have - no interest in. - -* Improved performance is not an (initial) goal. Nix performance is - very unevenly distributed across the codebase (some things have seen - a lot of ad-hoc optimisation, others are written like inefficient - toy implementations) and we simply don't know what effect the - cleanup will have. - - Once the codebase is in a better state we will be able to start - optimising it again while retaining readability, but this is not a - goal until a later point in time. - -* Compatibility with new upstream features is not a goal. Specifically - we do not want Nix flakes, but other changes upstream makes will be - considered for inclusion. - -* Support for non-Linux systems. Currently Nix support Mac OS and - potentially other systems, but this support will be dropped. - - Once we have OCI-compatible sandboxes and a store protocol it will - be possible to reintroduce these with less friction. - -## Building - -To build the project, set up an out-of-tree cmake directory and run cmake in -nix-shell. - -``` -mkdir ~/build/tvix -cd ~/build/tvix - -nix-shell $DEPOT_PATH -A third_party.nix.build-shell - -# Disable clang-tidy for quicker builds -cmake $DEPOT_PATH/third_party/nix/ -DCLANG_TIDY_PATH="" -make -j16 -l12 - -# Run tests -make test -``` - -## Contributing to the fork - -The TVL depot's default [contribution guidelines][contributing] apply. - -In addition, please make sure that submitted code builds and is -formatted with `clang-format`, using the configuration found in this -folder. - -## License - -Nix is released under the LGPL v2.1 - -This product includes software developed by the OpenSSL Project for -use in the [OpenSSL Toolkit](http://www.OpenSSL.org/). - -[nixpkgs]: https://github.com/NixOS/nixpkgs -[google-style]: https://google.github.io/styleguide/cppguide.html -[Abseil]: https://abseil.io/ -[gRPC]: https://grpc.io/ -[oci]: https://www.opencontainers.org/ -[gVisor]: https://gvisor.dev/ -[systemd-nspawn]: https://www.freedesktop.org/software/systemd/man/systemd-nspawn.html -[contributing]: https://cs.tvl.fyi/depot/-/blob/docs/CONTRIBUTING.md diff --git a/third_party/nix/clangd.nix b/third_party/nix/clangd.nix deleted file mode 100644 index 7a29819b10..0000000000 --- a/third_party/nix/clangd.nix +++ /dev/null @@ -1,30 +0,0 @@ -# Create a clangd wrapper script that can be used with this project. -# The default Nix wrapper only works with C projects, not C++ -# projects. -# -# The CPATH construction logic is lifted from the original wrapper -# script. - -pkgs: - -pkgs.writeShellScriptBin "nix-clangd" '' - buildcpath() { - local path - while (( $# )); do - case $1 in - -isystem) - shift - path=$path''${path:+':'}$1 - esac - shift - done - echo $path - } - - export CPATH=''${CPATH}''${CPATH:+':'}:$(buildcpath ''${NIX_CFLAGS_COMPILE}) - export CPATH=${pkgs.glibc.dev}/include''${CPATH:+':'}''${CPATH} - export CPLUS_INCLUDE_PATH=${pkgs.llvmPackages_11.libcxx}/include/c++/v1:''${CPATH} - - # TODO(tazjin): Configurable commands directory? - exec -a clangd ${pkgs.llvmPackages_11.clang-unwrapped}/bin/clangd -cross-file-rename $@ -'' diff --git a/third_party/nix/config.h.in b/third_party/nix/config.h.in deleted file mode 100644 index 986969705b..0000000000 --- a/third_party/nix/config.h.in +++ /dev/null @@ -1,130 +0,0 @@ -// This file configures various build-time settings in Nix. In -// previous iterations it was mostly responsible for configuring -// OS-dependent settings, which are still preserved below but should -// be removed. - -#ifndef NIX_CONFIG_H -#define NIX_CONFIG_H - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "2.3.4" - -/* Platform identifier (`cpu-os`) */ -// TODO(tazjin): generate -#define SYSTEM "x86_64-linux" - -// TODO(tazjin): some of these values are nonsensical for Nix -#define NIX_PREFIX "@CMAKE_INSTALL_PREFIX@" -#define NIX_STORE_DIR "/nix/store" -#define NIX_DATA_DIR "@CMAKE_INSTALL_FULL_DATADIR@" -#define NIX_LOG_DIR "/nix/var/log/nix" -#define NIX_STATE_DIR "/nix/var/nix" -#define NIX_CONF_DIR "/etc/nix" -#define NIX_LIBEXEC_DIR "@CMAKE_INSTALL_FULL_LIBEXECDIR@" -#define NIX_BIN_DIR "@CMAKE_INSTALL_FULL_BINDIR@" -#define NIX_MAN_DIR "@CMAKE_INSTALL_FULL_MANDIR@" -#define SANDBOX_SHELL "@SANDBOX_SHELL@" - -// Defines used only in tests (e.g. to access data) -#define NIX_SRC_DIR "@CMAKE_SOURCE_DIR@" - -// These are hardcoded either because support for non-Linux is being -// dropped, or because a decision was made to make inclusion of these -// libraries mandatory. - -#define HAVE_STRUCT_DIRENT_D_TYPE 1 - -#define HAVE_LUTIMES 1 - -// Whether link() works on symlinks -#define CAN_LINK_SYMLINK 1 - -/* Whether to use the Boehm garbage collector. */ -#define HAVE_BOEHMGC 1 - -/* Define if the Boost library is available. */ -#define HAVE_BOOST 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_BZLIB_H 1 - -/* Define if the compiler supports basic C++17 syntax */ -#define HAVE_CXX17 1 - -/* Define to 1 if you have the header file, and it defines `DIR` */ -#define HAVE_DIRENT_H 1 - -/* Define to 1 if you have the header file, and it defines `DIR` */ -#define HAVE_DIR_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_EDITLINE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if you have the `lchown` function. */ -#define HAVE_LCHOWN 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_LOCALE 1 - -/* Define to 1 if you have the `lutimes` function. */ -#define HAVE_LUTIMES 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `pipe2` function. */ -#define HAVE_PIPE2 1 - -/* Define to 1 if you have the `posix_fallocate` function. */ -#define HAVE_POSIX_FALLOCATE 1 - -/* Define to 1 if you have the `pubsetbuf` function. */ -#define HAVE_PUBSETBUF 1 - -/* Whether seccomp is available and should be used for sandboxing. */ -#define HAVE_SECCOMP 1 - -/* Define to 1 if you have the `setresuid` function. */ -#define HAVE_SETRESUID 1 - -/* Define to 1 if you have the `setreuid` function. */ -#define HAVE_SETREUID 1 - -/* Whether to use libsodium for cryptography. */ -#define HAVE_SODIUM 1 - -/* Define to 1 if you have the `statvfs` function. */ -#define HAVE_STATVFS 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strsignal` function. */ -#define HAVE_STRSIGNAL 1 - -/* Define to 1 if you have the `sysconf` function. */ -#define HAVE_SYSCONF 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - - -#endif diff --git a/third_party/nix/config/config.sub b/third_party/nix/config/config.sub deleted file mode 100755 index c19e671805..0000000000 --- a/third_party/nix/config/config.sub +++ /dev/null @@ -1,1818 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright 1992-2018 Free Software Foundation, Inc. - -timestamp='2018-08-13' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). - - -# Please send patches to . -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS - -Canonicalize a configuration name. - -Options: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright 1992-2018 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo "$1" - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Split fields of configuration type -IFS="-" read -r field1 field2 field3 field4 <&2 - exit 1 - ;; - *-*-*-*) - basic_machine=$field1-$field2 - os=$field3-$field4 - ;; - *-*-*) - # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two - # parts - maybe_os=$field2-$field3 - case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \ - | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \ - | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ - | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ - | storm-chaos* | os2-emx* | rtmk-nova*) - basic_machine=$field1 - os=$maybe_os - ;; - android-linux) - basic_machine=$field1-unknown - os=linux-android - ;; - *) - basic_machine=$field1-$field2 - os=$field3 - ;; - esac - ;; - *-*) - # Second component is usually, but not always the OS - case $field2 in - # Prevent following clause from handling this valid os - sun*os*) - basic_machine=$field1 - os=$field2 - ;; - # Manufacturers - dec* | mips* | sequent* | encore* | pc532* | sgi* | sony* \ - | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ - | unicom* | ibm* | next | hp | isi* | apollo | altos* \ - | convergent* | ncr* | news | 32* | 3600* | 3100* | hitachi* \ - | c[123]* | convex* | sun | crds | omron* | dg | ultra | tti* \ - | harris | dolphin | highlevel | gould | cbm | ns | masscomp \ - | apple | axis | knuth | cray | microblaze* \ - | sim | cisco | oki | wec | wrs | winbond) - basic_machine=$field1-$field2 - os= - ;; - *) - basic_machine=$field1 - os=$field2 - ;; - esac - ;; - *) - # Convert single-component short-hands not valid as part of - # multi-component configurations. - case $field1 in - 386bsd) - basic_machine=i386-pc - os=bsd - ;; - a29khif) - basic_machine=a29k-amd - os=udi - ;; - adobe68k) - basic_machine=m68010-adobe - os=scout - ;; - alliant) - basic_machine=fx80-alliant - os= - ;; - altos | altos3068) - basic_machine=m68k-altos - os= - ;; - am29k) - basic_machine=a29k-none - os=bsd - ;; - amdahl) - basic_machine=580-amdahl - os=sysv - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=bsd - ;; - aros) - basic_machine=i386-pc - os=aros - ;; - aux) - basic_machine=m68k-apple - os=aux - ;; - balance) - basic_machine=ns32k-sequent - os=dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=linux - ;; - cegcc) - basic_machine=arm-unknown - os=cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=bsd - ;; - convex-c2) - basic_machine=c2-convex - os=bsd - ;; - convex-c32) - basic_machine=c32-convex - os=bsd - ;; - convex-c34) - basic_machine=c34-convex - os=bsd - ;; - convex-c38) - basic_machine=c38-convex - os=bsd - ;; - cray) - basic_machine=j90-cray - os=unicos - ;; - crds | unos) - basic_machine=m68k-crds - os= - ;; - delta88) - basic_machine=m88k-motorola - os=sysv3 - ;; - dicos) - basic_machine=i686-pc - os=dicos - ;; - djgpp) - basic_machine=i586-pc - os=msdosdjgpp - ;; - ebmon29k) - basic_machine=a29k-amd - os=ebmon - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=ose - ;; - gmicro) - basic_machine=tron-gmicro - os=sysv - ;; - go32) - basic_machine=i386-pc - os=go32 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=hms - ;; - harris) - basic_machine=m88k-harris - os=sysv3 - ;; - hp300bsd) - basic_machine=m68k-hp - os=bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=hpux - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=proelf - ;; - i386mach) - basic_machine=i386-mach - os=mach - ;; - vsta) - basic_machine=i386-pc - os=vsta - ;; - isi68 | isi) - basic_machine=m68k-isi - os=sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=linux - ;; - magnum | m3230) - basic_machine=mips-mips - os=sysv - ;; - merlin) - basic_machine=ns32k-utek - os=sysv - ;; - mingw64) - basic_machine=x86_64-pc - os=mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=mingw32ce - ;; - monitor) - basic_machine=m68k-rom68k - os=coff - ;; - morphos) - basic_machine=powerpc-unknown - os=morphos - ;; - moxiebox) - basic_machine=moxie-unknown - os=moxiebox - ;; - msdos) - basic_machine=i386-pc - os=msdos - ;; - msys) - basic_machine=i686-pc - os=msys - ;; - mvs) - basic_machine=i370-ibm - os=mvs - ;; - nacl) - basic_machine=le32-unknown - os=nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=sysv4 - ;; - netbsd386) - basic_machine=i386-pc - os=netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=newsos - ;; - news1000) - basic_machine=m68030-sony - os=newsos - ;; - necv70) - basic_machine=v70-nec - os=sysv - ;; - nh3000) - basic_machine=m68k-harris - os=cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=cxux - ;; - nindy960) - basic_machine=i960-intel - os=nindy - ;; - mon960) - basic_machine=i960-intel - os=mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=nonstopux - ;; - os400) - basic_machine=powerpc-ibm - os=os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=ose - ;; - os68k) - basic_machine=m68k-none - os=os68k - ;; - paragon) - basic_machine=i860-intel - os=osf - ;; - parisc) - basic_machine=hppa-unknown - os=linux - ;; - pw32) - basic_machine=i586-unknown - os=pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=rdos - ;; - rdos32) - basic_machine=i386-pc - os=rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=coff - ;; - sa29200) - basic_machine=a29k-amd - os=udi - ;; - sei) - basic_machine=mips-sei - os=seiux - ;; - sps7) - basic_machine=m68k-bull - os=sysv2 - ;; - st2000) - basic_machine=m68k-tandem - os= - ;; - stratus) - basic_machine=i860-stratus - os=sysv4 - ;; - sun2) - basic_machine=m68000-sun - os= - ;; - sun2os3) - basic_machine=m68000-sun - os=sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=sunos4 - ;; - sun3) - basic_machine=m68k-sun - os= - ;; - sun3os3) - basic_machine=m68k-sun - os=sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=sunos4 - ;; - sun4) - basic_machine=sparc-sun - os= - ;; - sun4os3) - basic_machine=sparc-sun - os=sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=solaris2 - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - os= - ;; - sv1) - basic_machine=sv1-cray - os=unicos - ;; - symmetry) - basic_machine=i386-sequent - os=dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=unicos - ;; - t90) - basic_machine=t90-cray - os=unicos - ;; - toad1) - basic_machine=pdp10-xkl - os=tops20 - ;; - tpf) - basic_machine=s390x-ibm - os=tpf - ;; - udi29k) - basic_machine=a29k-amd - os=udi - ;; - ultra3) - basic_machine=a29k-nyu - os=sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=none - ;; - vaxv) - basic_machine=vax-dec - os=sysv - ;; - vms) - basic_machine=vax-dec - os=vms - ;; - vxworks960) - basic_machine=i960-wrs - os=vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=vxworks - ;; - xbox) - basic_machine=i686-pc - os=mingw32 - ;; - ymp) - basic_machine=ymp-cray - os=unicos - ;; - *) - basic_machine=$1 - os= - ;; - esac - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Here we handle the default manufacturer of certain CPU types. It is in - # some cases the only manufacturer, in others, it is the most popular. - craynv) - basic_machine=craynv-cray - os=${os:-unicosmp} - ;; - fx80) - basic_machine=fx80-alliant - ;; - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - cydra) - basic_machine=cydra-cydrome - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | abacus \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv6m | armv[78][arm] \ - | avr | avr32 \ - | asmjs \ - | ba \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper | csky \ - | d10v | d30v | dlx | dsp16xx \ - | e2k | epiphany \ - | fido | fr30 | frv | ft32 \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia16 | ia64 \ - | ip2k | iq2000 \ - | k1om \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r6 | mipsisa32r6el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r6 | mipsisa64r6el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nfp \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 | or1k | or1knd | or32 \ - | pdp10 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pru \ - | pyramid \ - | riscv | riscv32 | riscv64 \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh[23]ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | visium \ - | wasm32 \ - | x86 | xc16x | xstormy16 | xgate | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - leon|leon[3-9]) - basic_machine=sparc-$basic_machine - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65) - ;; - m9s12z | m68hcs12z | hcs12z | s12z) - basic_machine=s12z-unknown - ;; - m9s12z-* | m68hcs12z-* | hcs12z-* | s12z-*) - basic_machine=s12z-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - ms1) - basic_machine=mt-unknown - ;; - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Recognize the basic CPU types with company name. - 1750a-* | 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | abacus-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* \ - | am33_2.0-* \ - | arc-* | arceb-* \ - | arm-* | arm[lb]e-* | arme[lb]-* | armv*-* \ - | avr-* | avr32-* \ - | asmjs-* \ - | ba-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | csky-* | cydra-* \ - | d10v-* | d30v-* | dlx-* | dsp16xx-* \ - | e2k-* | elxsi-* | epiphany-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | ft32-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i370-* | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ - | ip2k-* | iq2000-* \ - | k1om-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m5200-* | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* | v70-* | w65-* \ - | m6811-* | m68hc11-* | m6812-* | m68hc12-* | m68hcs12x-* | nvptx-* | picochip-* \ - | m88110-* | m88k-* | maxq-* | mb-* | mcore-* | mep-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa32r6-* | mipsisa32r6el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64r6-* | mipsisa64r6el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mn10200-* | mn10300-* \ - | moxie-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nfp-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | or1k*-* \ - | or32-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pru-* \ - | pyramid-* \ - | riscv-* | riscv32-* | riscv64-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | score-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]ae[lb]-* | sh[23]e-* | she[lb]-* | sh[lb]e-* \ - | sh[1234]e[lb]-* | sh[12345][lb]e-* | sh[23]ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ - | spu-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | visium-* \ - | wasm32-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xgate-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - blackfin-*) - basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'` - os=linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=cnk - ;; - c54x-*) - basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=${os:-unicos} - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=${os:-elf} - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=${os:-elf} - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=${os:-bosx} - ;; - dpx2*) - basic_machine=m68k-bull - os=sysv3 - ;; - e500v[12]) - basic_machine=powerpc-unknown - os=$os"spe" - ;; - e500v[12]-*) - basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` - os=$os"spe" - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - elxsi) - basic_machine=elxsi-elxsi - os=${os:-bsd} - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=hiuxwe2 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - i*86v32) - basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` - os=sysv32 - ;; - i*86v4*) - basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` - os=sysv4 - ;; - i*86v) - basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` - os=sysv - ;; - i*86sol2) - basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` - os=solaris2 - ;; - j90 | j90-cray) - basic_machine=j90-cray - os=${os:-unicos} - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - irix*) - ;; - *) - os=irix4 - ;; - esac - ;; - leon-*|leon[3-9]-*) - basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'` - ;; - m68knommu-*) - basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'` - os=linux - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=mint - ;; - mips3*-*) - basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown - ;; - ms1-*) - basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'` - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=newsos - ;; - next | m*-next) - basic_machine=m68k-next - case $os in - nextstep* ) - ;; - ns2*) - os=nextstep2 - ;; - *) - os=nextstep3 - ;; - esac - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - nsv-tandem) - basic_machine=nsv-tandem - ;; - nsx-tandem) - basic_machine=nsx-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=hiuxwe2 - ;; - parisc-*) - basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'` - os=linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=${os:-elf} - ;; - sequent) - basic_machine=i386-sequent - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh5el-*) - basic_machine=sh5le-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - simso-wrs) - basic_machine=sparclite-wrs - os=vxworks - ;; - spur) - basic_machine=spur-unknown - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'` - ;; - tile*-*) - ;; - tile*) - basic_machine=$basic_machine-unknown - os=${os:-linux-gnu} - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - w65*) - basic_machine=w65-wdc - os=none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=proelf - ;; - x64) - basic_machine=x86_64-pc - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'` - ;; - none) - basic_machine=none-none - ;; - - *) - echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x$os != x ] -then -case $os in - # First match some system type aliases that might get confused - # with valid system types. - # solaris* is a basic system type, with this one exception. - auroraux) - os=auroraux - ;; - bluegene*) - os=cnk - ;; - solaris1 | solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - solaris) - os=solaris2 - ;; - unixware*) - os=sysv4.2uw - ;; - gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # es1800 is here to avoid being matched by es* (a different OS) - es1800*) - os=ose - ;; - # Some version numbers need modification - chorusos*) - os=chorusos - ;; - isc) - os=isc2.2 - ;; - sco6) - os=sco5v6 - ;; - sco5) - os=sco3.2v5 - ;; - sco4) - os=sco3.2v4 - ;; - sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - ;; - sco3.2v[4-9]* | sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - ;; - scout) - # Don't match below - ;; - sco*) - os=sco3.2v2 - ;; - psos*) - os=psos - ;; - # Now accept the basic system types. - # The portable systems comes first. - # Each alternative MUST end in a * to match a version number. - # sysv* is not here because it comes later, after sysvr4. - gnu* | bsd* | mach* | minix* | genix* | ultrix* | irix* \ - | *vms* | esix* | aix* | cnk* | sunos | sunos[34]*\ - | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ - | sym* | kopensolaris* | plan9* \ - | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ - | aos* | aros* | cloudabi* | sortix* \ - | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \ - | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \ - | knetbsd* | mirbsd* | netbsd* \ - | bitrig* | openbsd* | solidbsd* | libertybsd* \ - | ekkobsd* | kfreebsd* | freebsd* | riscix* | lynxos* \ - | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ - | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ - | udi* | eabi* | lites* | ieee* | go32* | aux* | hcos* \ - | chorusrdb* | cegcc* | glidix* \ - | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ - | midipix* | mingw32* | mingw64* | linux-gnu* | linux-android* \ - | linux-newlib* | linux-musl* | linux-uclibc* \ - | uxpv* | beos* | mpeix* | udk* | moxiebox* \ - | interix* | uwin* | mks* | rhapsody* | darwin* \ - | openstep* | oskit* | conix* | pw32* | nonstopux* \ - | storm-chaos* | tops10* | tenex* | tops20* | its* \ - | os2* | vos* | palmos* | uclinux* | nucleus* \ - | morphos* | superux* | rtmk* | windiss* \ - | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \ - | skyos* | haiku* | rdos* | toppers* | drops* | es* \ - | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ - | midnightbsd*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=nto-$os - ;; - esac - ;; - hiux*) - os=hiuxwe2 - ;; - nto-qnx*) - ;; - nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - sim | xray | os68k* | v88r* \ - | windows* | osx | abug | netware* | os9* \ - | macos* | mpw* | magic* | mmixware* | mon960* | lnews*) - ;; - linux-dietlibc) - os=linux-dietlibc - ;; - linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - lynx*178) - os=lynxos178 - ;; - lynx*5) - os=lynxos5 - ;; - lynx*) - os=lynxos - ;; - mac*) - os=`echo "$os" | sed -e 's|mac|macos|'` - ;; - opened*) - os=openedition - ;; - os400*) - os=os400 - ;; - sunos5*) - os=`echo "$os" | sed -e 's|sunos5|solaris2|'` - ;; - sunos6*) - os=`echo "$os" | sed -e 's|sunos6|solaris3|'` - ;; - wince*) - os=wince - ;; - utek*) - os=bsd - ;; - dynix*) - os=bsd - ;; - acis*) - os=aos - ;; - atheos*) - os=atheos - ;; - syllable*) - os=syllable - ;; - 386bsd) - os=bsd - ;; - ctix* | uts*) - os=sysv - ;; - nova*) - os=rtmk-nova - ;; - ns2) - os=nextstep2 - ;; - nsk*) - os=nsk - ;; - # Preserve the version number of sinix5. - sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - sinix*) - os=sysv4 - ;; - tpf*) - os=tpf - ;; - triton*) - os=sysv3 - ;; - oss*) - os=sysv3 - ;; - svr4*) - os=sysv4 - ;; - svr3) - os=sysv3 - ;; - sysvr4) - os=sysv4 - ;; - # This must come after sysvr4. - sysv*) - ;; - ose*) - os=ose - ;; - *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) - os=mint - ;; - zvmoe) - os=zvmoe - ;; - dicos*) - os=dicos - ;; - pikeos*) - # Until real need of OS specific support for - # particular features comes up, bare metal - # configurations are quite functional. - case $basic_machine in - arm*) - os=eabi - ;; - *) - os=elf - ;; - esac - ;; - nacl*) - ;; - ios) - ;; - none) - ;; - *-eabi) - ;; - *) - echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=elf - ;; - spu-*) - os=elf - ;; - *-acorn) - os=riscix1.2 - ;; - arm*-rebel) - os=linux - ;; - arm*-semi) - os=aout - ;; - c4x-* | tic4x-*) - os=coff - ;; - c8051-*) - os=elf - ;; - clipper-intergraph) - os=clix - ;; - hexagon-*) - os=elf - ;; - tic54x-*) - os=coff - ;; - tic55x-*) - os=coff - ;; - tic6x-*) - os=coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=tops20 - ;; - pdp11-*) - os=none - ;; - *-dec | vax-*) - os=ultrix4.2 - ;; - m68*-apollo) - os=domain - ;; - i386-sun) - os=sunos4.0.2 - ;; - m68000-sun) - os=sunos3 - ;; - m68*-cisco) - os=aout - ;; - mep-*) - os=elf - ;; - mips*-cisco) - os=elf - ;; - mips*-*) - os=elf - ;; - or32-*) - os=coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=sysv3 - ;; - sparc-* | *-sun) - os=sunos4.1.1 - ;; - pru-*) - os=elf - ;; - *-be) - os=beos - ;; - *-ibm) - os=aix - ;; - *-knuth) - os=mmixware - ;; - *-wec) - os=proelf - ;; - *-winbond) - os=proelf - ;; - *-oki) - os=proelf - ;; - *-hp) - os=hpux - ;; - *-hitachi) - os=hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=sysv - ;; - *-cbm) - os=amigaos - ;; - *-dg) - os=dgux - ;; - *-dolphin) - os=sysv3 - ;; - m68k-ccur) - os=rtu - ;; - m88k-omron*) - os=luna - ;; - *-next) - os=nextstep - ;; - *-sequent) - os=ptx - ;; - *-crds) - os=unos - ;; - *-ns) - os=genix - ;; - i370-*) - os=mvs - ;; - *-gould) - os=sysv - ;; - *-highlevel) - os=bsd - ;; - *-encore) - os=bsd - ;; - *-sgi) - os=irix - ;; - *-siemens) - os=sysv4 - ;; - *-masscomp) - os=rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=uxpv - ;; - *-rom68k) - os=coff - ;; - *-*bug) - os=coff - ;; - *-apple) - os=macos - ;; - *-atari*) - os=mint - ;; - *-wrs) - os=vxworks - ;; - *) - os=none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - riscix*) - vendor=acorn - ;; - sunos*) - vendor=sun - ;; - cnk*|-aix*) - vendor=ibm - ;; - beos*) - vendor=be - ;; - hpux*) - vendor=hp - ;; - mpeix*) - vendor=hp - ;; - hiux*) - vendor=hitachi - ;; - unos*) - vendor=crds - ;; - dgux*) - vendor=dg - ;; - luna*) - vendor=omron - ;; - genix*) - vendor=ns - ;; - clix*) - vendor=intergraph - ;; - mvs* | opened*) - vendor=ibm - ;; - os400*) - vendor=ibm - ;; - ptx*) - vendor=sequent - ;; - tpf*) - vendor=ibm - ;; - vxsim* | vxworks* | windiss*) - vendor=wrs - ;; - aux*) - vendor=apple - ;; - hms*) - vendor=hitachi - ;; - mpw* | macos*) - vendor=apple - ;; - *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) - vendor=atari - ;; - vos*) - vendor=stratus - ;; - esac - basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"` - ;; -esac - -echo "$basic_machine-$os" -exit - -# Local variables: -# eval: (add-hook 'before-save-hook 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/third_party/nix/config/install-sh b/third_party/nix/config/install-sh deleted file mode 100755 index 377bb8687f..0000000000 --- a/third_party/nix/config/install-sh +++ /dev/null @@ -1,527 +0,0 @@ -#!/bin/sh -# install - install a program, script, or datafile - -scriptversion=2011-11-20.07; # UTC - -# This originates from X11R5 (mit/util/scripts/install.sh), which was -# later released in X11R6 (xc/config/util/install.sh) with the -# following copyright and license. -# -# Copyright (C) 1994 X Consortium -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN -# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- -# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Except as contained in this notice, the name of the X Consortium shall not -# be used in advertising or otherwise to promote the sale, use or other deal- -# ings in this Software without prior written authorization from the X Consor- -# tium. -# -# -# FSF changes to this file are in the public domain. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# 'make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. - -nl=' -' -IFS=" "" $nl" - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit=${DOITPROG-} -if test -z "$doit"; then - doit_exec=exec -else - doit_exec=$doit -fi - -# Put in absolute file names if you don't have them in your path; -# or use environment vars. - -chgrpprog=${CHGRPPROG-chgrp} -chmodprog=${CHMODPROG-chmod} -chownprog=${CHOWNPROG-chown} -cmpprog=${CMPPROG-cmp} -cpprog=${CPPROG-cp} -mkdirprog=${MKDIRPROG-mkdir} -mvprog=${MVPROG-mv} -rmprog=${RMPROG-rm} -stripprog=${STRIPPROG-strip} - -posix_glob='?' -initialize_posix_glob=' - test "$posix_glob" != "?" || { - if (set -f) 2>/dev/null; then - posix_glob= - else - posix_glob=: - fi - } -' - -posix_mkdir= - -# Desired mode of installed file. -mode=0755 - -chgrpcmd= -chmodcmd=$chmodprog -chowncmd= -mvcmd=$mvprog -rmcmd="$rmprog -f" -stripcmd= - -src= -dst= -dir_arg= -dst_arg= - -copy_on_change=false -no_target_directory= - -usage="\ -Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE - or: $0 [OPTION]... SRCFILES... DIRECTORY - or: $0 [OPTION]... -t DIRECTORY SRCFILES... - or: $0 [OPTION]... -d DIRECTORIES... - -In the 1st form, copy SRCFILE to DSTFILE. -In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. -In the 4th, create DIRECTORIES. - -Options: - --help display this help and exit. - --version display version info and exit. - - -c (ignored) - -C install only if different (preserve the last data modification time) - -d create directories instead of installing files. - -g GROUP $chgrpprog installed files to GROUP. - -m MODE $chmodprog installed files to MODE. - -o USER $chownprog installed files to USER. - -s $stripprog installed files. - -t DIRECTORY install into DIRECTORY. - -T report an error if DSTFILE is a directory. - -Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG - RMPROG STRIPPROG -" - -while test $# -ne 0; do - case $1 in - -c) ;; - - -C) copy_on_change=true;; - - -d) dir_arg=true;; - - -g) chgrpcmd="$chgrpprog $2" - shift;; - - --help) echo "$usage"; exit $?;; - - -m) mode=$2 - case $mode in - *' '* | *' '* | *' -'* | *'*'* | *'?'* | *'['*) - echo "$0: invalid mode: $mode" >&2 - exit 1;; - esac - shift;; - - -o) chowncmd="$chownprog $2" - shift;; - - -s) stripcmd=$stripprog;; - - -t) dst_arg=$2 - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - shift;; - - -T) no_target_directory=true;; - - --version) echo "$0 $scriptversion"; exit $?;; - - --) shift - break;; - - -*) echo "$0: invalid option: $1" >&2 - exit 1;; - - *) break;; - esac - shift -done - -if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then - # When -d is used, all remaining arguments are directories to create. - # When -t is used, the destination is already specified. - # Otherwise, the last argument is the destination. Remove it from $@. - for arg - do - if test -n "$dst_arg"; then - # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dst_arg" - shift # fnord - fi - shift # arg - dst_arg=$arg - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - done -fi - -if test $# -eq 0; then - if test -z "$dir_arg"; then - echo "$0: no input file specified." >&2 - exit 1 - fi - # It's OK to call 'install-sh -d' without argument. - # This can happen when creating conditional directories. - exit 0 -fi - -if test -z "$dir_arg"; then - do_exit='(exit $ret); exit $ret' - trap "ret=129; $do_exit" 1 - trap "ret=130; $do_exit" 2 - trap "ret=141; $do_exit" 13 - trap "ret=143; $do_exit" 15 - - # Set umask so as not to create temps with too-generous modes. - # However, 'strip' requires both read and write access to temps. - case $mode in - # Optimize common cases. - *644) cp_umask=133;; - *755) cp_umask=22;; - - *[0-7]) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw='% 200' - fi - cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; - *) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw=,u+rw - fi - cp_umask=$mode$u_plus_rw;; - esac -fi - -for src -do - # Protect names problematic for 'test' and other utilities. - case $src in - -* | [=\(\)!]) src=./$src;; - esac - - if test -n "$dir_arg"; then - dst=$src - dstdir=$dst - test -d "$dstdir" - dstdir_status=$? - else - - # Waiting for this to be detected by the "$cpprog $src $dsttmp" command - # might cause directories to be created, which would be especially bad - # if $src (and thus $dsttmp) contains '*'. - if test ! -f "$src" && test ! -d "$src"; then - echo "$0: $src does not exist." >&2 - exit 1 - fi - - if test -z "$dst_arg"; then - echo "$0: no destination specified." >&2 - exit 1 - fi - dst=$dst_arg - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. - if test -d "$dst"; then - if test -n "$no_target_directory"; then - echo "$0: $dst_arg: Is a directory" >&2 - exit 1 - fi - dstdir=$dst - dst=$dstdir/`basename "$src"` - dstdir_status=0 - else - # Prefer dirname, but fall back on a substitute if dirname fails. - dstdir=` - (dirname "$dst") 2>/dev/null || - expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$dst" : 'X\(//\)[^/]' \| \ - X"$dst" : 'X\(//\)$' \| \ - X"$dst" : 'X\(/\)' \| . 2>/dev/null || - echo X"$dst" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q' - ` - - test -d "$dstdir" - dstdir_status=$? - fi - fi - - obsolete_mkdir_used=false - - if test $dstdir_status != 0; then - case $posix_mkdir in - '') - # Create intermediate dirs using mode 755 as modified by the umask. - # This is like FreeBSD 'install' as of 1997-10-28. - umask=`umask` - case $stripcmd.$umask in - # Optimize common cases. - *[2367][2367]) mkdir_umask=$umask;; - .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; - - *[0-7]) - mkdir_umask=`expr $umask + 22 \ - - $umask % 100 % 40 + $umask % 20 \ - - $umask % 10 % 4 + $umask % 2 - `;; - *) mkdir_umask=$umask,go-w;; - esac - - # With -d, create the new directory with the user-specified mode. - # Otherwise, rely on $mkdir_umask. - if test -n "$dir_arg"; then - mkdir_mode=-m$mode - else - mkdir_mode= - fi - - posix_mkdir=false - case $umask in - *[123567][0-7][0-7]) - # POSIX mkdir -p sets u+wx bits regardless of umask, which - # is incompatible with FreeBSD 'install' when (umask & 300) != 0. - ;; - *) - tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ - trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 - - if (umask $mkdir_umask && - exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 - then - if test -z "$dir_arg" || { - # Check for POSIX incompatibilities with -m. - # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or - # other-writable bit of parent directory when it shouldn't. - # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. - ls_ld_tmpdir=`ls -ld "$tmpdir"` - case $ls_ld_tmpdir in - d????-?r-*) different_mode=700;; - d????-?--*) different_mode=755;; - *) false;; - esac && - $mkdirprog -m$different_mode -p -- "$tmpdir" && { - ls_ld_tmpdir_1=`ls -ld "$tmpdir"` - test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" - } - } - then posix_mkdir=: - fi - rmdir "$tmpdir/d" "$tmpdir" - else - # Remove any dirs left behind by ancient mkdir implementations. - rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null - fi - trap '' 0;; - esac;; - esac - - if - $posix_mkdir && ( - umask $mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" - ) - then : - else - - # The umask is ridiculous, or mkdir does not conform to POSIX, - # or it failed possibly due to a race condition. Create the - # directory the slow way, step by step, checking for races as we go. - - case $dstdir in - /*) prefix='/';; - [-=\(\)!]*) prefix='./';; - *) prefix='';; - esac - - eval "$initialize_posix_glob" - - oIFS=$IFS - IFS=/ - $posix_glob set -f - set fnord $dstdir - shift - $posix_glob set +f - IFS=$oIFS - - prefixes= - - for d - do - test X"$d" = X && continue - - prefix=$prefix$d - if test -d "$prefix"; then - prefixes= - else - if $posix_mkdir; then - (umask=$mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break - # Don't fail if two instances are running concurrently. - test -d "$prefix" || exit 1 - else - case $prefix in - *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; - *) qprefix=$prefix;; - esac - prefixes="$prefixes '$qprefix'" - fi - fi - prefix=$prefix/ - done - - if test -n "$prefixes"; then - # Don't fail if two instances are running concurrently. - (umask $mkdir_umask && - eval "\$doit_exec \$mkdirprog $prefixes") || - test -d "$dstdir" || exit 1 - obsolete_mkdir_used=true - fi - fi - fi - - if test -n "$dir_arg"; then - { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && - { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || - test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 - else - - # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ - - # Trap to clean up those temp files at exit. - trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 - - # Copy the file name to the temp name. - (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && - - # and set any options; do chmod last to preserve setuid bits. - # - # If any of these fail, we abort the whole thing. If we want to - # ignore errors from any of these, just make sure not to ignore - # errors from the above "$doit $cpprog $src $dsttmp" command. - # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && - { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && - { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && - - # If -C, don't bother to copy if it wouldn't change the file. - if $copy_on_change && - old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && - new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && - - eval "$initialize_posix_glob" && - $posix_glob set -f && - set X $old && old=:$2:$4:$5:$6 && - set X $new && new=:$2:$4:$5:$6 && - $posix_glob set +f && - - test "$old" = "$new" && - $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 - then - rm -f "$dsttmp" - else - # Rename the file to the real destination. - $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || - - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - { - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - test ! -f "$dst" || - $doit $rmcmd -f "$dst" 2>/dev/null || - { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && - { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } - } || - { echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" - } - fi || exit 1 - - trap '' 0 - fi -done - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/third_party/nix/contrib/stack-collapse.py b/third_party/nix/contrib/stack-collapse.py deleted file mode 100755 index f5602c95c4..0000000000 --- a/third_party/nix/contrib/stack-collapse.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env nix-shell -#!nix-shell -i python3 -p python3 --pure - -# To be used with `--trace-function-calls` and `flamegraph.pl`. -# -# For example: -# -# nix-instantiate --trace-function-calls '' -A hello 2> nix-function-calls.trace -# ./contrib/stack-collapse.py nix-function-calls.trace > nix-function-calls.folded -# nix-shell -p flamegraph --run "flamegraph.pl nix-function-calls.folded > nix-function-calls.svg" - -import sys -from pprint import pprint -import fileinput - -stack = [] -timestack = [] - -for line in fileinput.input(): - components = line.strip().split(" ", 2) - if components[0] != "function-trace": - continue - - direction = components[1] - components = components[2].rsplit(" ", 2) - - loc = components[0] - _at = components[1] - time = int(components[2]) - - if direction == "entered": - stack.append(loc) - timestack.append(time) - elif direction == "exited": - dur = time - timestack.pop() - vst = ";".join(stack) - print(f"{vst} {dur}") - stack.pop() diff --git a/third_party/nix/corepkgs/buildenv.nix b/third_party/nix/corepkgs/buildenv.nix deleted file mode 100644 index 4da0db2ae2..0000000000 --- a/third_party/nix/corepkgs/buildenv.nix +++ /dev/null @@ -1,27 +0,0 @@ -{ derivations, manifest }: - -derivation { - name = "user-environment"; - system = "builtin"; - builder = "builtin:buildenv"; - - inherit manifest; - - # !!! grmbl, need structured data for passing this in a clean way. - derivations = - map - (d: - [ - (d.meta.active or "true") - (d.meta.priority or 5) - (builtins.length d.outputs) - ] ++ map (output: builtins.getAttr output d) d.outputs) - derivations; - - # Building user environments remotely just causes huge amounts of - # network traffic, so don't do that. - preferLocalBuild = true; - - # Also don't bother substituting. - allowSubstitutes = false; -} diff --git a/third_party/nix/corepkgs/config.nix.in b/third_party/nix/corepkgs/config.nix.in deleted file mode 100644 index 0e4a2f0c90..0000000000 --- a/third_party/nix/corepkgs/config.nix.in +++ /dev/null @@ -1,29 +0,0 @@ -let - fromEnv = var: def: - let val = builtins.getEnv var; in - if val != "" then val else def; -in rec { - shell = "@bash@"; - coreutils = "@coreutils@"; - bzip2 = "@bzip2@"; - gzip = "@gzip@"; - xz = "@xz@"; - tar = "@tar@"; - tarFlags = "@tarFlags@"; - tr = "@tr@"; - nixBinDir = fromEnv "NIX_BIN_DIR" "@CMAKE_INSTALL_FULL_BINDIR@"; - nixPrefix = "@CMAKE_INSTALL_PREFIX@"; - nixLibexecDir = fromEnv "NIX_LIBEXEC_DIR" "@CMAKE_INSTALL_FULL_LIBEXECDIR@"; - nixLocalstateDir = "/nix/var"; - nixSysconfDir = "/etc"; - nixStoreDir = fromEnv "NIX_STORE_DIR" "/nix/store"; - - # If Nix is installed in the Nix store, then automatically add it as - # a dependency to the core packages. This ensures that they work - # properly in a chroot. - chrootDeps = - if dirOf nixPrefix == builtins.storeDir then - [ (builtins.storePath nixPrefix) ] - else - [ ]; -} diff --git a/third_party/nix/corepkgs/derivation.nix b/third_party/nix/corepkgs/derivation.nix deleted file mode 100644 index 1f95cf88ec..0000000000 --- a/third_party/nix/corepkgs/derivation.nix +++ /dev/null @@ -1,30 +0,0 @@ -/* This is the implementation of the ‘derivation’ builtin function. - It's actually a wrapper around the ‘derivationStrict’ primop. */ - -drvAttrs @ { outputs ? [ "out" ], ... }: - -let - - strict = derivationStrict drvAttrs; - - commonAttrs = drvAttrs // (builtins.listToAttrs outputsList) // - { - all = map (x: x.value) outputsList; - inherit drvAttrs; - }; - - outputToAttrListElement = outputName: - { - name = outputName; - value = commonAttrs // { - outPath = builtins.getAttr outputName strict; - drvPath = strict.drvPath; - type = "derivation"; - inherit outputName; - }; - }; - - outputsList = map outputToAttrListElement outputs; - -in -(builtins.head outputsList).value diff --git a/third_party/nix/corepkgs/fetchurl.nix b/third_party/nix/corepkgs/fetchurl.nix deleted file mode 100644 index 9933b7cc12..0000000000 --- a/third_party/nix/corepkgs/fetchurl.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ system ? "" # obsolete -, url -, hash ? "" # an SRI ash - - # Legacy hash specification -, md5 ? "" -, sha1 ? "" -, sha256 ? "" -, sha512 ? "" -, outputHash ? if hash != "" then hash else if sha512 != "" then sha512 else if sha1 != "" then sha1 else if md5 != "" then md5 else sha256 -, outputHashAlgo ? if hash != "" then "" else if sha512 != "" then "sha512" else if sha1 != "" then "sha1" else if md5 != "" then "md5" else "sha256" - -, executable ? false -, unpack ? false -, name ? baseNameOf (toString url) -}: - -derivation { - builder = "builtin:fetchurl"; - - # New-style output content requirements. - inherit outputHashAlgo outputHash; - outputHashMode = if unpack || executable then "recursive" else "flat"; - - inherit name url executable unpack; - - system = "builtin"; - - # No need to double the amount of network traffic - preferLocalBuild = true; - - impureEnvVars = [ - # We borrow these environment variables from the caller to allow - # easy proxy configuration. This is impure, but a fixed-output - # derivation like fetchurl is allowed to do so since its result is - # by definition pure. - "http_proxy" - "https_proxy" - "ftp_proxy" - "all_proxy" - "no_proxy" - ]; - - # To make "nix-prefetch-url" work. - urls = [ url ]; -} diff --git a/third_party/nix/corepkgs/imported-drv-to-derivation.nix b/third_party/nix/corepkgs/imported-drv-to-derivation.nix deleted file mode 100644 index 639f068332..0000000000 --- a/third_party/nix/corepkgs/imported-drv-to-derivation.nix +++ /dev/null @@ -1,24 +0,0 @@ -attrs @ { drvPath, outputs, name, ... }: - -let - - commonAttrs = (builtins.listToAttrs outputsList) // - { - all = map (x: x.value) outputsList; - inherit drvPath name; - type = "derivation"; - }; - - outputToAttrListElement = outputName: - { - name = outputName; - value = commonAttrs // { - outPath = builtins.getAttr outputName attrs; - inherit outputName; - }; - }; - - outputsList = map outputToAttrListElement outputs; - -in -(builtins.head outputsList).value diff --git a/third_party/nix/corepkgs/unpack-channel.nix b/third_party/nix/corepkgs/unpack-channel.nix deleted file mode 100644 index d39a206378..0000000000 --- a/third_party/nix/corepkgs/unpack-channel.nix +++ /dev/null @@ -1,39 +0,0 @@ -with import ; - -let - - builder = builtins.toFile "unpack-channel.sh" - '' - mkdir $out - cd $out - xzpat="\.xz\$" - gzpat="\.gz\$" - if [[ "$src" =~ $xzpat ]]; then - ${xz} -d < $src | ${tar} xf - ${tarFlags} - elif [[ "$src" =~ $gzpat ]]; then - ${gzip} -d < $src | ${tar} xf - ${tarFlags} - else - ${bzip2} -d < $src | ${tar} xf - ${tarFlags} - fi - if [ * != $channelName ]; then - mv * $out/$channelName - fi - ''; - -in - -{ name, channelName, src }: - -derivation { - system = builtins.currentSystem; - builder = shell; - args = [ "-e" builder ]; - inherit name channelName src; - - PATH = "${nixBinDir}:${coreutils}"; - - # No point in doing this remotely. - preferLocalBuild = true; - - inherit chrootDeps; -} diff --git a/third_party/nix/default.nix b/third_party/nix/default.nix deleted file mode 100644 index ad50ac6b7f..0000000000 --- a/third_party/nix/default.nix +++ /dev/null @@ -1,270 +0,0 @@ -args@{ depot ? (import ../.. { }) -, pkgs ? depot.third_party.nixpkgs -, lib -, buildType ? "release" -, ... -}: - -let - # Override some external dependencies for C++17 & clang compat. - abseil-cpp = pkgs.abseil-cpp.override { - stdenv = pkgs.fullLlvm11Stdenv; - cxxStandard = "17"; - }; - - protobuf = pkgs.callPackage (pkgs.path + "/pkgs/development/libraries/protobuf/generic-v3.nix") { - version = "3.12.2"; - sha256 = "1lp368aa206vpic9fmax4k6llnmf28plfvkkm4vqhgphmjqykvl2"; - stdenv = pkgs.fullLlvm11Stdenv; - buildPackages = { - inherit (pkgs.buildPackages) which; - stdenv = pkgs.buildPackages.fullLlvm11Stdenv; - }; - }; - - re2 = pkgs.re2.override { - stdenv = pkgs.fullLlvm11Stdenv; - }; - - grpc = (pkgs.grpc.override { - inherit abseil-cpp protobuf re2; - stdenv = pkgs.fullLlvm11Stdenv; - }).overrideAttrs (orig: rec { - cmakeFlags = orig.cmakeFlags ++ [ - "-DCMAKE_CXX_STANDARD_REQUIRED=ON" - "-DCMAKE_CXX_STANDARD=17" - ]; - }); - - aws-s3-cpp = pkgs.aws-sdk-cpp.override { - apis = [ "s3" "transfer" ]; - customMemoryManagement = false; - }; - - src = - let - srcDir = ./.; - # create relative paths for all the sources we are filtering - asRelative = path: - let - srcS = toString srcDir; - pathS = toString path; - in - if ! lib.hasPrefix srcS pathS then - throw "Path is outside of the working directory." - else - lib.removePrefix srcS pathS; - - in - builtins.filterSource - (path: type: - # Strip out .nix files that are in the root of the repository. Changing - # the expression of tvix shouldn't cause a rebuild of tvix unless really - # required. - !(dirOf (asRelative path) == "/" && lib.hasSuffix ".nix" path) && - - # remove the proto files from the repo as those are compiled separately - !(lib.hasPrefix "src/proto" (asRelative path)) && - - # ignore result symlinks - !(type == "symlink" && lib.hasPrefix "result" (baseNameOf path)) - ) - srcDir; - - # Proto generation in CMake is theoretically possible, but that is - # very theoretical - this does it in Nix instead. - protoSrcs = pkgs.runCommand "nix-proto-srcs" { } '' - export PROTO_SRCS=${./src/proto} - mkdir -p $out/libproto - ${protobuf}/bin/protoc -I=$PROTO_SRCS \ - --cpp_out=$out/libproto \ - --plugin=protoc-gen-grpc=${grpc}/bin/grpc_cpp_plugin \ - --grpc_out=$out/libproto \ - $PROTO_SRCS/*.proto - ''; - - # Derivation for busybox that just has the `busybox` binary in bin/, not all - # the symlinks, so cmake can find it - busybox = pkgs.runCommand "busybox" { } '' - mkdir -p $out/bin - cp ${pkgs.busybox}/bin/busybox $out/bin - ''; - -in -lib.fix (self: pkgs.fullLlvm11Stdenv.mkDerivation { - pname = "tvix"; - version = "2.3.4"; - inherit src; - - nativeBuildInputs = with pkgs; [ - bison - clang-tools_11 - cmake - libxml2 - libxslt - pkgconfig - (import ./clangd.nix pkgs) - ]; - - # TODO(tazjin): Some of these might only be required for native inputs - buildInputs = (with pkgs; [ - aws-s3-cpp - brotli - bzip2 - c-ares - curl - editline - flex - glog - libseccomp - libsodium - openssl - sqlite - systemd.dev - xz - - # dependencies with custom overrides - abseil-cpp - grpc - protobuf - ]); - - doCheck = false; - doInstallCheck = true; - - # Preserve debug symbols, for core dumps + other live debugging - dontStrip = true; - - installCheckInputs = with depot.third_party; [ - gtest - pkgs.fd - rapidcheck - ]; - - propagatedBuildInputs = with pkgs; [ - boost - ]; - - configurePhase = '' - mkdir build - cd build - cmake .. \ - -DCMAKE_INSTALL_PREFIX=$out \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo \ - -DCMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY=OFF \ - -DCMAKE_FIND_USE_PACKAGE_REGISTRY=OFF \ - -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON - ''; - - installCheckPhase = '' - export NIX_DATA_DIR=$out/share - export NIX_TEST_VAR=foo # this is required by a language test - make test - ''; - - preBuild = '' - if [ -n "$NIX_BUILD_CORES" ]; then - makeFlags+="-j$NIX_BUILD_CORES " - makeFlags+="-l$NIX_BUILD_CORES " - fi - ''; - - # Forward the location of the generated Protobuf / gRPC files so - # that they can be included by CMake. - NIX_PROTO_SRCS = protoSrcs; - - # Work around broken system header include flags in the cxx toolchain. - LIBCXX_INCLUDE = "${pkgs.llvmPackages_11.libcxx}/include/c++/v1"; - - SANDBOX_SHELL = "${pkgs.busybox}/bin/busybox"; - - # Install the various symlinks to the Nix binary which users expect - # to exist. - postInstall = '' - ln -s $out/bin/nix $out/bin/nix-build - ln -s $out/bin/nix $out/bin/nix-channel - ln -s $out/bin/nix $out/bin/nix-collect-garbage - ln -s $out/bin/nix $out/bin/nix-copy-closure - ln -s $out/bin/nix $out/bin/nix-env - ln -s $out/bin/nix $out/bin/nix-hash - ln -s $out/bin/nix $out/bin/nix-instantiate - ln -s $out/bin/nix $out/bin/nix-prefetch-url - ln -s $out/bin/nix $out/bin/nix-shell - ln -s $out/bin/nix $out/bin/nix-store - - mkdir -p $out/libexec/nix - ln -s $out/bin/nix $out/libexec/nix/build-remote - - # configuration variables for templated files - export storedir=/nix/store - export localstatedir=/nix/var - export bindir=$out/bin - - mkdir -p $out/lib/systemd/system - substituteAll \ - ${src}/misc/systemd/nix-daemon.service.in \ - $out/lib/systemd/system/nix-daemon.service - substituteAll \ - ${src}/misc/systemd/nix-daemon.socket.in \ - $out/lib/systemd/system/nix-daemon.socket - - mkdir -p $out/etc/profile.d - substituteAll \ - ${src}/scripts/nix-profile.sh.in $out/etc/profile.d/nix.sh - substituteAll \ - ${src}/scripts/nix-profile-daemon.sh.in $out/etc/profile.d/nix-daemon.sh - ''; - - # TODO(tazjin): integration test setup? - # TODO(tazjin): docs generation? - - passthru = { - build-shell = self.overrideAttrs (up: rec { - run_clang_tidy = pkgs.writeShellScriptBin "run-clang-tidy" '' - test -f compile_commands.json || (echo "run from build output directory"; exit 1) || exit 1 - ${pkgs.jq}/bin/jq < compile_commands.json -r 'map(.file)|.[]' | grep -v '/generated/' | ${pkgs.parallel}/bin/parallel ${pkgs.clang-tools}/bin/clang-tidy -p compile_commands.json $@ - ''; - - installCheckInputs = up.installCheckInputs ++ [ run_clang_tidy ]; - - shellHook = '' - export NIX_DATA_DIR="${toString depot.path}/third_party" - export NIX_TEST_VAR=foo - ''; - }); - - # Ensure formatting is coherent, - # but do this in parallel to the main build because: - # - (in favor of building this after tvix) - # tests run so that developers get all the useful feedback - # - (in favor of building this before tvix) - # if the formatting is broken, and this build was submitted to CI - # it would be a good idea to get this feedback rather sooner than later - # - we don't want builds to differ between local and CI runs - checkfmt = pkgs.fullLlvm11Stdenv.mkDerivation { - name = "tvix-checkfmt"; - inherit src; - nativeBuildInputs = with pkgs; [ clang-tools_11 fd ]; - SANDBOX_SHELL = "${pkgs.busybox}/bin/busybox"; - - buildPhase = '' - set -e - runHook preBuild - fd . $src -e hh -e cc | xargs clang-format --dry-run --Werror - runHook postBuild - ''; - - installPhase = '' - runHook preInstall - touch $out - runHook postInstall - ''; - }; - - test-vm = import ./test-vm.nix args; - }; - - meta.ci.targets = [ - "checkfmt" - ]; -}) diff --git a/third_party/nix/doc/manual/advanced-topics/advanced-topics.xml b/third_party/nix/doc/manual/advanced-topics/advanced-topics.xml deleted file mode 100644 index 871b7eb1d3..0000000000 --- a/third_party/nix/doc/manual/advanced-topics/advanced-topics.xml +++ /dev/null @@ -1,14 +0,0 @@ - - -Advanced Topics - - - - - - - diff --git a/third_party/nix/doc/manual/advanced-topics/cores-vs-jobs.xml b/third_party/nix/doc/manual/advanced-topics/cores-vs-jobs.xml deleted file mode 100644 index eba645faf8..0000000000 --- a/third_party/nix/doc/manual/advanced-topics/cores-vs-jobs.xml +++ /dev/null @@ -1,121 +0,0 @@ - - -Tuning Cores and Jobs - -Nix has two relevant settings with regards to how your CPU cores -will be utilized: and -. This chapter will talk about what -they are, how they interact, and their configuration trade-offs. - - - - - - Dictates how many separate derivations will be built at the same - time. If you set this to zero, the local machine will do no - builds. Nix will still substitute from binary caches, and build - remotely if remote builders are configured. - - - - - - Suggests how many cores each derivation should use. Similar to - make -j. - - - - -The setting determines the value of -NIX_BUILD_CORES. NIX_BUILD_CORES is equal -to , unless -equals 0, in which case NIX_BUILD_CORES -will be the total number of cores in the system. - -The total number of consumed cores is a simple multiplication, - * NIX_BUILD_CORES. - -The balance on how to set these two independent variables depends -upon each builder's workload and hardware. Here are a few example -scenarios on a machine with 24 cores: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Balancing 24 Build Cores
NIX_BUILD_CORESMaximum ProcessesResult
1242424 - One derivation will be built at a time, each one can use 24 - cores. Undersold if a job can’t use 24 cores. -
46624 - Four derivations will be built at once, each given access to - six cores. -
126672 - 12 derivations will be built at once, each given access to six - cores. This configuration is over-sold. If all 12 derivations - being built simultaneously try to use all six cores, the - machine's performance will be degraded due to extensive context - switching between the 12 builds. -
241124 - 24 derivations can build at the same time, each using a single - core. Never oversold, but derivations which require many cores - will be very slow to compile. -
24024576 - 24 derivations can build at the same time, each using all the - available cores of the machine. Very likely to be oversold, - and very likely to suffer context switches. -
- -It is up to the derivations' build script to respect -host's requested cores-per-build by following the value of the -NIX_BUILD_CORES environment variable. - -
diff --git a/third_party/nix/doc/manual/advanced-topics/diff-hook.xml b/third_party/nix/doc/manual/advanced-topics/diff-hook.xml deleted file mode 100644 index fb4bf819f9..0000000000 --- a/third_party/nix/doc/manual/advanced-topics/diff-hook.xml +++ /dev/null @@ -1,205 +0,0 @@ - - -Verifying Build Reproducibility with <option linkend="conf-diff-hook">diff-hook</option> - -Check build reproducibility by running builds multiple times -and comparing their results. - -Specify a program with Nix's to -compare build results when two builds produce different results. Note: -this hook is only executed if the results are not the same, this hook -is not used for determining if the results are the same. - -For purposes of demonstration, we'll use the following Nix file, -deterministic.nix for testing: - - -let - inherit (import <nixpkgs> {}) runCommand; -in { - stable = runCommand "stable" {} '' - touch $out - ''; - - unstable = runCommand "unstable" {} '' - echo $RANDOM > $out - ''; -} - - -Additionally, nix.conf contains: - - -diff-hook = /etc/nix/my-diff-hook -run-diff-hook = true - - -where /etc/nix/my-diff-hook is an executable -file containing: - - -#!/bin/sh -exec >&2 -echo "For derivation $3:" -/run/current-system/sw/bin/diff -r "$1" "$2" - - - - -The diff hook is executed by the same user and group who ran the -build. However, the diff hook does not have write access to the store -path just built. - -
- - Spot-Checking Build Determinism - - - - Verify a path which already exists in the Nix store by passing - to the build command. - - - If the build passes and is deterministic, Nix will exit with a - status code of 0: - - -$ nix-build ./deterministic.nix -A stable -these derivations will be built: - /nix/store/z98fasz2jqy9gs0xbvdj939p27jwda38-stable.drv -building '/nix/store/z98fasz2jqy9gs0xbvdj939p27jwda38-stable.drv'... -/nix/store/yyxlzw3vqaas7wfp04g0b1xg51f2czgq-stable - -$ nix-build ./deterministic.nix -A stable --check -checking outputs of '/nix/store/z98fasz2jqy9gs0xbvdj939p27jwda38-stable.drv'... -/nix/store/yyxlzw3vqaas7wfp04g0b1xg51f2czgq-stable - - - If the build is not deterministic, Nix will exit with a status - code of 1: - - -$ nix-build ./deterministic.nix -A unstable -these derivations will be built: - /nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv -building '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv'... -/nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable - -$ nix-build ./deterministic.nix -A unstable --check -checking outputs of '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv'... -error: derivation '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv' may not be deterministic: output '/nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable' differs - - -In the Nix daemon's log, we will now see: - -For derivation /nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv: -1c1 -< 8108 ---- -> 30204 - - - - Using with - will cause Nix to keep the second build's output in a special, - .check path: - - -$ nix-build ./deterministic.nix -A unstable --check --keep-failed -checking outputs of '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv'... -note: keeping build directory '/tmp/nix-build-unstable.drv-0' -error: derivation '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv' may not be deterministic: output '/nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable' differs from '/nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable.check' - - - In particular, notice the - /nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable.check - output. Nix has copied the build results to that directory where you - can examine it. - - - <literal>.check</literal> paths are not registered store paths - - Check paths are not protected against garbage collection, - and this path will be deleted on the next garbage collection. - - The path is guaranteed to be alive for the duration of - 's execution, but may be deleted - any time after. - - If the comparison is performed as part of automated tooling, - please use the diff-hook or author your tooling to handle the case - where the build was not deterministic and also a check path does - not exist. - - - - is only usable if the derivation has - been built on the system already. If the derivation has not been - built Nix will fail with the error: - -error: some outputs of '/nix/store/hzi1h60z2qf0nb85iwnpvrai3j2w7rr6-unstable.drv' are not valid, so checking is not possible - - - Run the build without , and then try with - again. - -
- -
- - Automatic and Optionally Enforced Determinism Verification - - - - Automatically verify every build at build time by executing the - build multiple times. - - - - Setting and - in your - nix.conf permits the automated verification - of every build Nix performs. - - - - The following configuration will run each build three times, and - will require the build to be deterministic: - - -enforce-determinism = true -repeat = 2 - - - - - Setting to false as in - the following configuration will run the build multiple times, - execute the build hook, but will allow the build to succeed even - if it does not build reproducibly: - - -enforce-determinism = false -repeat = 1 - - - - - An example output of this configuration: - -$ nix-build ./test.nix -A unstable -these derivations will be built: - /nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv -building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 1/2)... -building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 2/2)... -output '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable' of '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' differs from '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable.check' from previous round -/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable - - -
-
diff --git a/third_party/nix/doc/manual/advanced-topics/distributed-builds.xml b/third_party/nix/doc/manual/advanced-topics/distributed-builds.xml deleted file mode 100644 index 9ac4a92cd5..0000000000 --- a/third_party/nix/doc/manual/advanced-topics/distributed-builds.xml +++ /dev/null @@ -1,190 +0,0 @@ - - -Remote Builds - -Nix supports remote builds, where a local Nix installation can -forward Nix builds to other machines. This allows multiple builds to -be performed in parallel and allows Nix to perform multi-platform -builds in a semi-transparent way. For instance, if you perform a -build for a x86_64-darwin on an -i686-linux machine, Nix can automatically forward -the build to a x86_64-darwin machine, if -available. - -To forward a build to a remote machine, it’s required that the -remote machine is accessible via SSH and that it has Nix -installed. You can test whether connecting to the remote Nix instance -works, e.g. - - -$ nix ping-store --store ssh://mac - - -will try to connect to the machine named mac. It is -possible to specify an SSH identity file as part of the remote store -URI, e.g. - - -$ nix ping-store --store ssh://mac?ssh-key=/home/alice/my-key - - -Since builds should be non-interactive, the key should not have a -passphrase. Alternatively, you can load identities ahead of time into -ssh-agent or gpg-agent. - -If you get the error - - -bash: nix-store: command not found -error: cannot connect to 'mac' - - -then you need to ensure that the PATH of -non-interactive login shells contains Nix. - -If you are building via the Nix daemon, it is the Nix -daemon user account (that is, root) that should -have SSH access to the remote machine. If you can’t or don’t want to -configure root to be able to access to remote -machine, you can use a private Nix store instead by passing -e.g. --store ~/my-nix. - -The list of remote machines can be specified on the command line -or in the Nix configuration file. The former is convenient for -testing. For example, the following command allows you to build a -derivation for x86_64-darwin on a Linux machine: - - -$ uname -Linux - -$ nix build \ - '(with import <nixpkgs> { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ - --builders 'ssh://mac x86_64-darwin' -[1/0/1 built, 0.0 MiB DL] building foo on ssh://mac - -$ cat ./result -Darwin - - -It is possible to specify multiple builders separated by a semicolon -or a newline, e.g. - - - --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' - - - -Each machine specification consists of the following elements, -separated by spaces. Only the first element is required. -To leave a field at its default, set it to -. - - - - The URI of the remote store in the format - ssh://[username@]hostname, - e.g. ssh://nix@mac or - ssh://mac. For backward compatibility, - ssh:// may be omitted. The hostname may be an - alias defined in your - ~/.ssh/config. - - A comma-separated list of Nix platform type - identifiers, such as x86_64-darwin. It is - possible for a machine to support multiple platform types, e.g., - i686-linux,x86_64-linux. If omitted, this - defaults to the local platform type. - - The SSH identity file to be used to log in to the - remote machine. If omitted, SSH will use its regular - identities. - - The maximum number of builds that Nix will execute - in parallel on the machine. Typically this should be equal to the - number of CPU cores. For instance, the machine - itchy in the example will execute up to 8 builds - in parallel. - - The “speed factor”, indicating the relative speed of - the machine. If there are multiple machines of the right type, Nix - will prefer the fastest, taking load into account. - - A comma-separated list of supported - features. If a derivation has the - requiredSystemFeatures attribute, then Nix will - only perform the derivation on a machine that has the specified - features. For instance, the attribute - - -requiredSystemFeatures = [ "kvm" ]; - - - will cause the build to be performed on a machine that has the - kvm feature. - - A comma-separated list of mandatory - features. A machine will only be used to build a - derivation if all of the machine’s mandatory features appear in the - derivation’s requiredSystemFeatures - attribute.. - - - -For example, the machine specification - - -nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm -nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 -nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 1 2 kvm benchmark - - -specifies several machines that can perform -i686-linux builds. However, -poochie will only do builds that have the attribute - - -requiredSystemFeatures = [ "benchmark" ]; - - -or - - -requiredSystemFeatures = [ "benchmark" "kvm" ]; - - -itchy cannot do builds that require -kvm, but scratchy does support -such builds. For regular builds, itchy will be -preferred over scratchy because it has a higher -speed factor. - -Remote builders can also be configured in -nix.conf, e.g. - - -builders = ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd - - -Finally, remote builders can be configured in a separate configuration -file included in via the syntax -@file. For example, - - -builders = @/etc/nix/machines - - -causes the list of machines in /etc/nix/machines -to be included. (This is the default.) - -If you want the builders to use caches, you likely want to set -the option builders-use-substitutes -in your local nix.conf. - -To build only on remote builders and disable building on the local machine, -you can use the option . - - diff --git a/third_party/nix/doc/manual/advanced-topics/post-build-hook.xml b/third_party/nix/doc/manual/advanced-topics/post-build-hook.xml deleted file mode 100644 index 3dc43ee795..0000000000 --- a/third_party/nix/doc/manual/advanced-topics/post-build-hook.xml +++ /dev/null @@ -1,160 +0,0 @@ - - -Using the <xref linkend="conf-post-build-hook" /> -Uploading to an S3-compatible binary cache after each build - - -
- Implementation Caveats - Here we use the post-build hook to upload to a binary cache. - This is a simple and working example, but it is not suitable for all - use cases. - - The post build hook program runs after each executed build, - and blocks the build loop. The build loop exits if the hook program - fails. - - Concretely, this implementation will make Nix slow or unusable - when the internet is slow or unreliable. - - A more advanced implementation might pass the store paths to a - user-supplied daemon or queue for processing the store paths outside - of the build loop. -
- -
- Prerequisites - - - This tutorial assumes you have configured an S3-compatible binary cache - according to the instructions at - , and - that the root user's default AWS profile can - upload to the bucket. - -
- -
- Set up a Signing Key - Use nix-store --generate-binary-cache-key to - create our public and private signing keys. We will sign paths - with the private key, and distribute the public key for verifying - the authenticity of the paths. - - -# nix-store --generate-binary-cache-key example-nix-cache-1 /etc/nix/key.private /etc/nix/key.public -# cat /etc/nix/key.public -example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM= - - -Then, add the public key and the cache URL to your -nix.conf's -and like: - - -substituters = https://cache.nixos.org/ s3://example-nix-cache -trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM= - - -we will restart the Nix daemon a later step. -
- -
- Implementing the build hook - Write the following script to - /etc/nix/upload-to-cache.sh: - - - -#!/bin/sh - -set -eu -set -f # disable globbing -export IFS=' ' - -echo "Signing paths" $OUT_PATHS -nix sign-paths --key-file /etc/nix/key.private $OUT_PATHS -echo "Uploading paths" $OUT_PATHS -exec nix copy --to 's3://example-nix-cache' $OUT_PATHS - - - - Should <literal>$OUT_PATHS</literal> be quoted? - - The $OUT_PATHS variable is a space-separated - list of Nix store paths. In this case, we expect and want the - shell to perform word splitting to make each output path its - own argument to nix sign-paths. Nix guarantees - the paths will not contain any spaces, however a store path - might contain glob characters. The set -f - disables globbing in the shell. - - - - Then make sure the hook program is executable by the root user: - -# chmod +x /etc/nix/upload-to-cache.sh - -
- -
- Updating Nix Configuration - - Edit /etc/nix/nix.conf to run our hook, - by adding the following configuration snippet at the end: - - -post-build-hook = /etc/nix/upload-to-cache.sh - - -Then, restart the nix-daemon. -
- -
- Testing - - Build any derivation, for example: - - -$ nix-build -E '(import <nixpkgs> {}).writeText "example" (builtins.toString builtins.currentTime)' -these derivations will be built: - /nix/store/s4pnfbkalzy5qz57qs6yybna8wylkig6-example.drv -building '/nix/store/s4pnfbkalzy5qz57qs6yybna8wylkig6-example.drv'... -running post-build-hook '/home/grahamc/projects/github.com/NixOS/nix/post-hook.sh'... -post-build-hook: Signing paths /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example -post-build-hook: Uploading paths /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example -/nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example - - - Then delete the path from the store, and try substituting it from the binary cache: - -$ rm ./result -$ nix-store --delete /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example - - -Now, copy the path back from the cache: - -$ nix store --realize /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example -copying path '/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example from 's3://example-nix-cache'... -warning: you did not specify '--add-root'; the result might be removed by the garbage collector -/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example - -
-
- Conclusion - - We now have a Nix installation configured to automatically sign and - upload every local build to a remote binary cache. - - - - Before deploying this to production, be sure to consider the - implementation caveats in . - -
-
diff --git a/third_party/nix/doc/manual/command-ref/command-ref.xml b/third_party/nix/doc/manual/command-ref/command-ref.xml deleted file mode 100644 index cfad9b7d79..0000000000 --- a/third_party/nix/doc/manual/command-ref/command-ref.xml +++ /dev/null @@ -1,20 +0,0 @@ - - -Command Reference - - -This section lists commands and options that you can use when you -work with Nix. - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/conf-file.xml b/third_party/nix/doc/manual/command-ref/conf-file.xml deleted file mode 100644 index 4a5400b193..0000000000 --- a/third_party/nix/doc/manual/command-ref/conf-file.xml +++ /dev/null @@ -1,1202 +0,0 @@ - - - - - nix.conf - 5 - Nix - - - - - nix.conf - Nix configuration file - - -Description - -Nix reads settings from two configuration files: - - - - - The system-wide configuration file - sysconfdir/nix/nix.conf - (i.e. /etc/nix/nix.conf on most systems), or - $NIX_CONF_DIR/nix.conf if - NIX_CONF_DIR is set. - - - - The user configuration file - $XDG_CONFIG_HOME/nix/nix.conf, or - ~/.config/nix/nix.conf if - XDG_CONFIG_HOME is not set. - - - - -The configuration files consist of -name = -value pairs, one per line. Other -files can be included with a line like include -path, where -path is interpreted relative to the current -conf file and a missing file is an error unless -!include is used instead. -Comments start with a # character. Here is an -example configuration file: - - -keep-outputs = true # Nice for developers -keep-derivations = true # Idem - - -You can override settings on the command line using the - flag, e.g. --option keep-outputs -false. - -The following settings are currently available: - - - - - allowed-uris - - - - A list of URI prefixes to which access is allowed in - restricted evaluation mode. For example, when set to - https://github.com/NixOS, builtin functions - such as fetchGit are allowed to access - https://github.com/NixOS/patchelf.git. - - - - - - - allow-import-from-derivation - - By default, Nix allows you to import from a derivation, - allowing building at evaluation time. With this option set to false, Nix will throw an error - when evaluating an expression that uses this feature, allowing users to ensure their evaluation - will not require any builds to take place. - - - - - allow-new-privileges - - (Linux-specific.) By default, builders on Linux - cannot acquire new privileges by calling setuid/setgid programs or - programs that have file capabilities. For example, programs such - as sudo or ping will - fail. (Note that in sandbox builds, no such programs are available - unless you bind-mount them into the sandbox via the - option.) You can allow the - use of such programs by enabling this option. This is impure and - usually undesirable, but may be useful in certain scenarios - (e.g. to spin up containers or set up userspace network interfaces - in tests). - - - - - allowed-users - - - - A list of names of users (separated by whitespace) that - are allowed to connect to the Nix daemon. As with the - option, you can specify groups by - prefixing them with @. Also, you can allow - all users by specifying *. The default is - *. - - Note that trusted users are always allowed to connect. - - - - - - - auto-optimise-store - - If set to true, Nix - automatically detects files in the store that have identical - contents, and replaces them with hard links to a single copy. - This saves disk space. If set to false (the - default), you can still run nix-store - --optimise to get rid of duplicate - files. - - - - - builders - - A list of machines on which to perform builds. See for details. - - - - - builders-use-substitutes - - If set to true, Nix will instruct - remote build machines to use their own binary substitutes if available. In - practical terms, this means that remote hosts will fetch as many build - dependencies as possible from their own substitutes (e.g, from - cache.nixos.org), instead of waiting for this host to - upload them all. This can drastically reduce build times if the network - connection between this computer and the remote build host is slow. Defaults - to false. - - - - build-users-group - - This options specifies the Unix group containing - the Nix build user accounts. In multi-user Nix installations, - builds should not be performed by the Nix account since that would - allow users to arbitrarily modify the Nix store and database by - supplying specially crafted builders; and they cannot be performed - by the calling user since that would allow him/her to influence - the build result. - - Therefore, if this option is non-empty and specifies a valid - group, builds will be performed under the user accounts that are a - member of the group specified here (as listed in - /etc/group). Those user accounts should not - be used for any other purpose! - - Nix will never run two builds under the same user account at - the same time. This is to prevent an obvious security hole: a - malicious user writing a Nix expression that modifies the build - result of a legitimate Nix expression being built by another user. - Therefore it is good to have as many Nix build user accounts as - you can spare. (Remember: uids are cheap.) - - The build users should have permission to create files in - the Nix store, but not delete them. Therefore, - /nix/store should be owned by the Nix - account, its group should be the group specified here, and its - mode should be 1775. - - If the build users group is empty, builds will be performed - under the uid of the Nix process (that is, the uid of the caller - if NIX_REMOTE is empty, the uid under which the Nix - daemon runs if NIX_REMOTE is - daemon). Obviously, this should not be used in - multi-user settings with untrusted users. - - - - - - - compress-build-log - - If set to true (the default), - build logs written to /nix/var/log/nix/drvs - will be compressed on the fly using bzip2. Otherwise, they will - not be compressed. - - - - connect-timeout - - - - The timeout (in seconds) for establishing connections in - the binary cache substituter. It corresponds to - curl’s - option. - - - - - - - cores - - Sets the value of the - NIX_BUILD_CORES environment variable in the - invocation of builders. Builders can use this variable at their - discretion to control the maximum amount of parallelism. For - instance, in Nixpkgs, if the derivation attribute - enableParallelBuilding is set to - true, the builder passes the - flag to GNU Make. - It can be overridden using the command line switch and - defaults to 1. The value 0 - means that the builder should use all available CPU cores in the - system. - - See also . - - - diff-hook - - - Absolute path to an executable capable of diffing build results. - The hook executes if is - true, and the output of a build is known to not be the same. - This program is not executed to determine if two results are the - same. - - - - The diff hook is executed by the same user and group who ran the - build. However, the diff hook does not have write access to the - store path just built. - - - The diff hook program receives three parameters: - - - - - A path to the previous build's results - - - - - - A path to the current build's results - - - - - - The path to the build's derivation - - - - - - The path to the build's scratch directory. This directory - will exist only if the build was run with - . - - - - - - The stderr and stdout output from the diff hook will not be - displayed to the user. Instead, it will print to the nix-daemon's - log. - - - When using the Nix daemon, diff-hook must - be set in the nix.conf configuration file, and - cannot be passed at the command line. - - - - - - enforce-determinism - - See . - - - - extra-sandbox-paths - - A list of additional paths appended to - . Useful if you want to extend - its default value. - - - - - extra-platforms - - Platforms other than the native one which - this machine is capable of building for. This can be useful for - supporting additional architectures on compatible machines: - i686-linux can be built on x86_64-linux machines (and the default - for this setting reflects this); armv7 is backwards-compatible with - armv6 and armv5tel; some aarch64 machines can also natively run - 32-bit ARM code; and qemu-user may be used to support non-native - platforms (though this may be slow and buggy). Most values for this - are not enabled by default because build systems will often - misdetect the target platform and generate incompatible code, so you - may wish to cross-check the results of using this option against - proper natively-built versions of your - derivations. - - - - - extra-substituters - - Additional binary caches appended to those - specified in . When used by - unprivileged users, untrusted substituters (i.e. those not listed - in ) are silently - ignored. - - - - fallback - - If set to true, Nix will fall - back to building from source if a binary substitute fails. This - is equivalent to the flag. The - default is false. - - - - fsync-metadata - - If set to true, changes to the - Nix store metadata (in /nix/var/nix/db) are - synchronously flushed to disk. This improves robustness in case - of system crashes, but reduces performance. The default is - true. - - - - hashed-mirrors - - A list of web servers used by - builtins.fetchurl to obtain files by - hash. The default is - http://tarballs.nixos.org/. Given a hash type - ht and a base-16 hash - h, Nix will try to download the file - from - hashed-mirror/ht/h. - This allows files to be downloaded even if they have disappeared - from their original URI. For example, given the default mirror - http://tarballs.nixos.org/, when building the derivation - - -builtins.fetchurl { - url = https://example.org/foo-1.2.3.tar.xz; - sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; -} - - - Nix will attempt to download this file from - http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae - first. If it is not available there, if will try the original URI. - - - - - http-connections - - The maximum number of parallel TCP connections - used to fetch files from binary caches and by other downloads. It - defaults to 25. 0 means no limit. - - - - - keep-build-log - - If set to true (the default), - Nix will write the build log of a derivation (i.e. the standard - output and error of its builder) to the directory - /nix/var/log/nix/drvs. The build log can be - retrieved using the command nix-store -l - path. - - - - - keep-derivations - - If true (default), the garbage - collector will keep the derivations from which non-garbage store - paths were built. If false, they will be - deleted unless explicitly registered as a root (or reachable from - other roots). - - Keeping derivation around is useful for querying and - traceability (e.g., it allows you to ask with what dependencies or - options a store path was built), so by default this option is on. - Turn it off to save a bit of disk space (or a lot if - keep-outputs is also turned on). - - - keep-env-derivations - - If false (default), derivations - are not stored in Nix user environments. That is, the derivations of - any build-time-only dependencies may be garbage-collected. - - If true, when you add a Nix derivation to - a user environment, the path of the derivation is stored in the - user environment. Thus, the derivation will not be - garbage-collected until the user environment generation is deleted - (nix-env --delete-generations). To prevent - build-time-only dependencies from being collected, you should also - turn on keep-outputs. - - The difference between this option and - keep-derivations is that this one is - “sticky”: it applies to any user environment created while this - option was enabled, while keep-derivations - only applies at the moment the garbage collector is - run. - - - - keep-outputs - - If true, the garbage collector - will keep the outputs of non-garbage derivations. If - false (default), outputs will be deleted unless - they are GC roots themselves (or reachable from other roots). - - In general, outputs must be registered as roots separately. - However, even if the output of a derivation is registered as a - root, the collector will still delete store paths that are used - only at build time (e.g., the C compiler, or source tarballs - downloaded from the network). To prevent it from doing so, set - this option to true. - - - max-build-log-size - - - - This option defines the maximum number of bytes that a - builder can write to its stdout/stderr. If the builder exceeds - this limit, it’s killed. A value of 0 (the - default) means that there is no limit. - - - - - - max-free - - When a garbage collection is triggered by the - min-free option, it stops as soon as - max-free bytes are available. The default is - infinity (i.e. delete all garbage). - - - - max-jobs - - This option defines the maximum number of jobs - that Nix will try to build in parallel. The default is - 1. The special value auto - causes Nix to use the number of CPUs in your system. 0 - is useful when using remote builders to prevent any local builds (except for - preferLocalBuild derivation attribute which executes locally - regardless). It can be - overridden using the () - command line switch. - - See also . - - - - max-silent-time - - - - This option defines the maximum number of seconds that a - builder can go without producing any data on standard output or - standard error. This is useful (for instance in an automated - build system) to catch builds that are stuck in an infinite - loop, or to catch remote builds that are hanging due to network - problems. It can be overridden using the command - line switch. - - The value 0 means that there is no - timeout. This is also the default. - - - - - - min-free - - - When free disk space in /nix/store - drops below min-free during a build, Nix - performs a garbage-collection until max-free - bytes are available or there is no more garbage. A value of - 0 (the default) disables this feature. - - - - - narinfo-cache-negative-ttl - - - - The TTL in seconds for negative lookups. If a store path is - queried from a substituter but was not found, there will be a - negative lookup cached in the local disk cache database for the - specified duration. - - - - - - narinfo-cache-positive-ttl - - - - The TTL in seconds for positive lookups. If a store path is - queried from a substituter, the result of the query will be cached - in the local disk cache database including some of the NAR - metadata. The default TTL is a month, setting a shorter TTL for - positive lookups can be useful for binary caches that have - frequent garbage collection, in which case having a more frequent - cache invalidation would prevent trying to pull the path again and - failing with a hash mismatch if the build isn't reproducible. - - - - - - - netrc-file - - If set to an absolute path to a netrc - file, Nix will use the HTTP authentication credentials in this file when - trying to download from a remote host through HTTP or HTTPS. Defaults to - $NIX_CONF_DIR/netrc. - - The netrc file consists of a list of - accounts in the following format: - - -machine my-machine -login my-username -password my-password - - - For the exact syntax, see the - curl documentation. - - This must be an absolute path, and ~ - is not resolved. For example, ~/.netrc won't - resolve to your home directory's .netrc. - - - - - - pre-build-hook - - - - - If set, the path to a program that can set extra - derivation-specific settings for this system. This is used for settings - that can't be captured by the derivation model itself and are too variable - between different versions of the same system to be hard-coded into nix. - - - The hook is passed the derivation path and, if sandboxes are enabled, - the sandbox directory. It can then modify the sandbox and send a series of - commands to modify various settings to stdout. The currently recognized - commands are: - - - - extra-sandbox-paths - - - - Pass a list of files and directories to be included in the - sandbox for this build. One entry per line, terminated by an empty - line. Entries have the same format as - sandbox-paths. - - - - - - - - - - - post-build-hook - - Optional. The path to a program to execute after each build. - - This option is only settable in the global - nix.conf, or on the command line by trusted - users. - - When using the nix-daemon, the daemon executes the hook as - root. If the nix-daemon is not involved, the - hook runs as the user executing the nix-build. - - - The hook executes after an evaluation-time build. - The hook does not execute on substituted paths. - The hook's output always goes to the user's terminal. - If the hook fails, the build succeeds but no further builds execute. - The hook executes synchronously, and blocks other builds from progressing while it runs. - - - The program executes with no arguments. The program's environment - contains the following environment variables: - - - - DRV_PATH - - The derivation for the built paths. - Example: - /nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv - - - - - - OUT_PATHS - - Output paths of the built derivation, separated by a space character. - Example: - /nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev - /nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc - /nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info - /nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man - /nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23. - - - - - - See for an example - implementation. - - - - - repeat - - How many times to repeat builds to check whether - they are deterministic. The default value is 0. If the value is - non-zero, every build is repeated the specified number of - times. If the contents of any of the runs differs from the - previous ones and is - true, the build is rejected and the resulting store paths are not - registered as “valid” in Nix’s database. - - - require-sigs - - If set to true (the default), - any non-content-addressed path added or copied to the Nix store - (e.g. when substituting from a binary cache) must have a valid - signature, that is, be signed using one of the keys listed in - or - . Set to false - to disable signature checking. - - - - - restrict-eval - - - - If set to true, the Nix evaluator will - not allow access to any files outside of the Nix search path (as - set via the NIX_PATH environment variable or the - option), or to URIs outside of - . The default is - false. - - - - - - run-diff-hook - - - If true, enable the execution of . - - - - When using the Nix daemon, run-diff-hook must - be set in the nix.conf configuration file, - and cannot be passed at the command line. - - - - - sandbox - - If set to true, builds will be - performed in a sandboxed environment, i.e., - they’re isolated from the normal file system hierarchy and will - only see their dependencies in the Nix store, the temporary build - directory, private versions of /proc, - /dev, /dev/shm and - /dev/pts (on Linux), and the paths configured with the - sandbox-paths - option. This is useful to prevent undeclared dependencies - on files in directories such as /usr/bin. In - addition, on Linux, builds run in private PID, mount, network, IPC - and UTS namespaces to isolate them from other processes in the - system (except that fixed-output derivations do not run in private - network namespace to ensure they can access the network). - - Currently, sandboxing only work on Linux and macOS. The use - of a sandbox requires that Nix is run as root (so you should use - the “build users” - feature to perform the actual builds under different users - than root). - - If this option is set to relaxed, then - fixed-output derivations and derivations that have the - __noChroot attribute set to - true do not run in sandboxes. - - The default is true on Linux and - false on all other platforms. - - - - - - sandbox-dev-shm-size - - This option determines the maximum size of the - tmpfs filesystem mounted on - /dev/shm in Linux sandboxes. For the format, - see the description of the option of - tmpfs in - mount8. The - default is 50%. - - - - - - sandbox-paths - - A list of paths bind-mounted into Nix sandbox - environments. You can use the syntax - target=source - to mount a path in a different location in the sandbox; for - instance, /bin=/nix-bin will mount the path - /nix-bin as /bin inside the - sandbox. If source is followed by - ?, then it is not an error if - source does not exist; for example, - /dev/nvidiactl? specifies that - /dev/nvidiactl will only be mounted in the - sandbox if it exists in the host filesystem. - - Depending on how Nix was built, the default value for this option - may be empty or provide /bin/sh as a - bind-mount of bash. - - - - - secret-key-files - - A whitespace-separated list of files containing - secret (private) keys. These are used to sign locally-built - paths. They can be generated using nix-store - --generate-binary-cache-key. The corresponding public - key can be distributed to other users, who can add it to - in their - nix.conf. - - - - - show-trace - - Causes Nix to print out a stack trace in case of Nix - expression evaluation errors. - - - - - substitute - - If set to true (default), Nix - will use binary substitutes if available. This option can be - disabled to force building from source. - - - - stalled-download-timeout - - The timeout (in seconds) for receiving data from servers - during download. Nix cancels idle downloads after this timeout's - duration. - - - - substituters - - A list of URLs of substituters, separated by - whitespace. The default is - https://cache.nixos.org. - - - - system - - This option specifies the canonical Nix system - name of the current installation, such as - i686-linux or - x86_64-darwin. Nix can only build derivations - whose system attribute equals the value - specified here. In general, it never makes sense to modify this - value from its default, since you can use it to ‘lie’ about the - platform you are building on (e.g., perform a Mac OS build on a - Linux machine; the result would obviously be wrong). It only - makes sense if the Nix binaries can run on multiple platforms, - e.g., ‘universal binaries’ that run on x86_64-linux and - i686-linux. - - It defaults to the canonical Nix system name detected by - configure at build time. - - - - - system-features - - A set of system “features” supported by this - machine, e.g. kvm. Derivations can express a - dependency on such features through the derivation attribute - requiredSystemFeatures. For example, the - attribute - - -requiredSystemFeatures = [ "kvm" ]; - - - ensures that the derivation can only be built on a machine with - the kvm feature. - - This setting by default includes kvm if - /dev/kvm is accessible, and the - pseudo-features nixos-test, - benchmark and big-parallel - that are used in Nixpkgs to route builds to specific - machines. - - - - - - tarball-ttl - - - Default: 3600 seconds. - - The number of seconds a downloaded tarball is considered - fresh. If the cached tarball is stale, Nix will check whether - it is still up to date using the ETag header. Nix will download - a new version if the ETag header is unsupported, or the - cached ETag doesn't match. - - - Setting the TTL to 0 forces Nix to always - check if the tarball is up to date. - - Nix caches tarballs in - $XDG_CACHE_HOME/nix/tarballs. - - Files fetched via NIX_PATH, - fetchGit, fetchMercurial, - fetchTarball, and fetchurl - respect this TTL. - - - - - timeout - - - - This option defines the maximum number of seconds that a - builder can run. This is useful (for instance in an automated - build system) to catch builds that are stuck in an infinite loop - but keep writing to their standard output or standard error. It - can be overridden using the command line - switch. - - The value 0 means that there is no - timeout. This is also the default. - - - - - - trace-function-calls - - - - Default: false. - - If set to true, the Nix evaluator will - trace every function call. Nix will print a log message at the - "vomit" level for every function entrance and function exit. - - -function-trace entered undefined position at 1565795816999559622 -function-trace exited undefined position at 1565795816999581277 -function-trace entered /nix/store/.../example.nix:226:41 at 1565795253249935150 -function-trace exited /nix/store/.../example.nix:226:41 at 1565795253249941684 - - - The undefined position means the function - call is a builtin. - - Use the contrib/stack-collapse.py script - distributed with the Nix source code to convert the trace logs - in to a format suitable for flamegraph.pl. - - - - - - trusted-public-keys - - A whitespace-separated list of public keys. When - paths are copied from another Nix store (such as a binary cache), - they must be signed with one of these keys. For example: - cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=. - - - - trusted-substituters - - A list of URLs of substituters, separated by - whitespace. These are not used by default, but can be enabled by - users of the Nix daemon by specifying --option - substituters urls on the - command line. Unprivileged users are only allowed to pass a - subset of the URLs listed in substituters and - trusted-substituters. - - - - trusted-users - - - - A list of names of users (separated by whitespace) that - have additional rights when connecting to the Nix daemon, such - as the ability to specify additional binary caches, or to import - unsigned NARs. You can also specify groups by prefixing them - with @; for instance, - @wheel means all users in the - wheel group. The default is - root. - - Adding a user to - is essentially equivalent to giving that user root access to the - system. For example, the user can set - and thereby obtain read access to - directories that are otherwise inacessible to - them. - - - - - - - - - - Deprecated Settings - - - - - - - binary-caches - - Deprecated: - binary-caches is now an alias to - . - - - - binary-cache-public-keys - - Deprecated: - binary-cache-public-keys is now an alias to - . - - - - build-compress-log - - Deprecated: - build-compress-log is now an alias to - . - - - - build-cores - - Deprecated: - build-cores is now an alias to - . - - - - build-extra-chroot-dirs - - Deprecated: - build-extra-chroot-dirs is now an alias to - . - - - - build-extra-sandbox-paths - - Deprecated: - build-extra-sandbox-paths is now an alias to - . - - - - build-fallback - - Deprecated: - build-fallback is now an alias to - . - - - - build-max-jobs - - Deprecated: - build-max-jobs is now an alias to - . - - - - build-max-log-size - - Deprecated: - build-max-log-size is now an alias to - . - - - - build-max-silent-time - - Deprecated: - build-max-silent-time is now an alias to - . - - - - build-repeat - - Deprecated: - build-repeat is now an alias to - . - - - - build-timeout - - Deprecated: - build-timeout is now an alias to - . - - - - build-use-chroot - - Deprecated: - build-use-chroot is now an alias to - . - - - - build-use-sandbox - - Deprecated: - build-use-sandbox is now an alias to - . - - - - build-use-substitutes - - Deprecated: - build-use-substitutes is now an alias to - . - - - - gc-keep-derivations - - Deprecated: - gc-keep-derivations is now an alias to - . - - - - gc-keep-outputs - - Deprecated: - gc-keep-outputs is now an alias to - . - - - - env-keep-derivations - - Deprecated: - env-keep-derivations is now an alias to - . - - - - extra-binary-caches - - Deprecated: - extra-binary-caches is now an alias to - . - - - - trusted-binary-caches - - Deprecated: - trusted-binary-caches is now an alias to - . - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/env-common.xml b/third_party/nix/doc/manual/command-ref/env-common.xml deleted file mode 100644 index 696d68c345..0000000000 --- a/third_party/nix/doc/manual/command-ref/env-common.xml +++ /dev/null @@ -1,202 +0,0 @@ - - -Common Environment Variables - - -Most Nix commands interpret the following environment variables: - - - -IN_NIX_SHELL - - Indicator that tells if the current environment was set up by - nix-shell. Since Nix 2.0 the values are - "pure" and "impure" - - - -NIX_PATH - - - - A colon-separated list of directories used to look up Nix - expressions enclosed in angle brackets (i.e., - <path>). For - instance, the value - - -/home/eelco/Dev:/etc/nixos - - will cause Nix to look for paths relative to - /home/eelco/Dev and - /etc/nixos, in that order. It is also - possible to match paths against a prefix. For example, the value - - -nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos - - will cause Nix to search for - <nixpkgs/path> in - /home/eelco/Dev/nixpkgs-branch/path - and - /etc/nixos/nixpkgs/path. - - If a path in the Nix search path starts with - http:// or https://, it is - interpreted as the URL of a tarball that will be downloaded and - unpacked to a temporary location. The tarball must consist of a - single top-level directory. For example, setting - NIX_PATH to - - -nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-15.09.tar.gz - - tells Nix to download the latest revision in the Nixpkgs/NixOS - 15.09 channel. - - A following shorthand can be used to refer to the official channels: - - nixpkgs=channel:nixos-15.09 - - - The search path can be extended using the option, which takes precedence over - NIX_PATH. - - - - -NIX_IGNORE_SYMLINK_STORE - - - - Normally, the Nix store directory (typically - /nix/store) is not allowed to contain any - symlink components. This is to prevent “impure” builds. Builders - sometimes “canonicalise” paths by resolving all symlink components. - Thus, builds on different machines (with - /nix/store resolving to different locations) - could yield different results. This is generally not a problem, - except when builds are deployed to machines where - /nix/store resolves differently. If you are - sure that you’re not going to do that, you can set - NIX_IGNORE_SYMLINK_STORE to 1. - - Note that if you’re symlinking the Nix store so that you can - put it on another file system than the root file system, on Linux - you’re better off using bind mount points, e.g., - - -$ mkdir /nix -$ mount -o bind /mnt/otherdisk/nix /nix - - Consult the mount - 8 manual page for details. - - - - - - -NIX_STORE_DIR - - Overrides the location of the Nix store (default - prefix/store). - - - - -NIX_DATA_DIR - - Overrides the location of the Nix static data - directory (default - prefix/share). - - - - -NIX_LOG_DIR - - Overrides the location of the Nix log directory - (default prefix/var/log/nix). - - - - -NIX_STATE_DIR - - Overrides the location of the Nix state directory - (default prefix/var/nix). - - - - -NIX_CONF_DIR - - Overrides the location of the Nix configuration - directory (default - prefix/etc/nix). - - - - -TMPDIR - - Use the specified directory to store temporary - files. In particular, this includes temporary build directories; - these can take up substantial amounts of disk space. The default is - /tmp. - - - - -NIX_REMOTE - - This variable should be set to - daemon if you want to use the Nix daemon to - execute Nix operations. This is necessary in multi-user Nix installations. - If the Nix daemon's Unix socket is at some non-standard path, - this variable should be set to unix://path/to/socket. - Otherwise, it should be left unset. - - - - -NIX_SHOW_STATS - - If set to 1, Nix will print some - evaluation statistics, such as the number of values - allocated. - - - - -NIX_COUNT_CALLS - - If set to 1, Nix will print how - often functions were called during Nix expression evaluation. This - is useful for profiling your Nix expressions. - - - - -GC_INITIAL_HEAP_SIZE - - If Nix has been configured to use the Boehm garbage - collector, this variable sets the initial size of the heap in bytes. - It defaults to 384 MiB. Setting it to a low value reduces memory - consumption, but will increase runtime due to the overhead of - garbage collection. - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/files.xml b/third_party/nix/doc/manual/command-ref/files.xml deleted file mode 100644 index 7bbc96e899..0000000000 --- a/third_party/nix/doc/manual/command-ref/files.xml +++ /dev/null @@ -1,14 +0,0 @@ - - -Files - -This section lists configuration files that you can use when you -work with Nix. - - - - \ No newline at end of file diff --git a/third_party/nix/doc/manual/command-ref/main-commands.xml b/third_party/nix/doc/manual/command-ref/main-commands.xml deleted file mode 100644 index 0f4169243c..0000000000 --- a/third_party/nix/doc/manual/command-ref/main-commands.xml +++ /dev/null @@ -1,17 +0,0 @@ - - -Main Commands - -This section lists commands and options that you can use when you -work with Nix. - - - - - - - \ No newline at end of file diff --git a/third_party/nix/doc/manual/command-ref/nix-build.xml b/third_party/nix/doc/manual/command-ref/nix-build.xml deleted file mode 100644 index c1b783c87d..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-build.xml +++ /dev/null @@ -1,190 +0,0 @@ - - - - nix-build - 1 - Nix - - - - - nix-build - build a Nix expression - - - - - nix-build - - name value - name value - - - - - - attrPath - - - - - - - - - outlink - - paths - - - -Description - -The nix-build command builds the derivations -described by the Nix expressions in paths. -If the build succeeds, it places a symlink to the result in the -current directory. The symlink is called result. -If there are multiple Nix expressions, or the Nix expressions evaluate -to multiple derivations, multiple sequentially numbered symlinks are -created (result, result-2, -and so on). - -If no paths are specified, then -nix-build will use default.nix -in the current directory, if it exists. - -If an element of paths starts with -http:// or https://, it is -interpreted as the URL of a tarball that will be downloaded and -unpacked to a temporary location. The tarball must include a single -top-level directory containing at least a file named -default.nix. - -nix-build is essentially a wrapper around -nix-instantiate -(to translate a high-level Nix expression to a low-level store -derivation) and nix-store ---realise (to build the store derivation). - -The result of the build is automatically registered as -a root of the Nix garbage collector. This root disappears -automatically when the result symlink is deleted -or renamed. So don’t rename the symlink. - - - - -Options - -All options not listed here are passed to nix-store ---realise, except for and - / which are passed to -nix-instantiate. See -also . - - - - - - Do not create a symlink to the output path. Note - that as a result the output does not become a root of the garbage - collector, and so might be deleted by nix-store - --gc. - - - - - Show what store paths would be built or downloaded - - - / - outlink - - Change the name of the symlink to the output path - created from result to - outlink. - - - - - -The following common options are supported: - - - - - - - - -Examples - - -$ nix-build '<nixpkgs>' -A firefox -store derivation is /nix/store/qybprl8sz2lc...-firefox-1.5.0.7.drv -/nix/store/d18hyl92g30l...-firefox-1.5.0.7 - -$ ls -l result -lrwxrwxrwx ... result -> /nix/store/d18hyl92g30l...-firefox-1.5.0.7 - -$ ls ./result/bin/ -firefox firefox-config - -If a derivation has multiple outputs, -nix-build will build the default (first) output. -You can also build all outputs: - -$ nix-build '<nixpkgs>' -A openssl.all - -This will create a symlink for each output named -result-outputname. -The suffix is omitted if the output name is out. -So if openssl has outputs out, -bin and man, -nix-build will create symlinks -result, result-bin and -result-man. It’s also possible to build a specific -output: - -$ nix-build '<nixpkgs>' -A openssl.man - -This will create a symlink result-man. - -Build a Nix expression given on the command line: - - -$ nix-build -E 'with import <nixpkgs> { }; runCommand "foo" { } "echo bar > $out"' -$ cat ./result -bar - - - - -Build the GNU Hello package from the latest revision of the -master branch of Nixpkgs: - - -$ nix-build https://github.com/NixOS/nixpkgs/archive/master.tar.gz -A hello - - - - - - - -Environment variables - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-channel.xml b/third_party/nix/doc/manual/command-ref/nix-channel.xml deleted file mode 100644 index 5a2866e6bc..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-channel.xml +++ /dev/null @@ -1,178 +0,0 @@ - - - - nix-channel - 1 - Nix - - - - - nix-channel - manage Nix channels - - - - - nix-channel - - url name - name - - names - generation - - - - -Description - -A Nix channel is a mechanism that allows you to automatically -stay up-to-date with a set of pre-built Nix expressions. A Nix -channel is just a URL that points to a place containing a set of Nix -expressions. See also . - -This command has the following operations: - - - - url [name] - - Adds a channel named - name with URL - url to the list of subscribed channels. - If name is omitted, it defaults to the - last component of url, with the - suffixes -stable or - -unstable removed. - - - - name - - Removes the channel named - name from the list of subscribed - channels. - - - - - - Prints the names and URLs of all subscribed - channels on standard output. - - - - [names…] - - Downloads the Nix expressions of all subscribed - channels (or only those included in - names if specified) and makes them the - default for nix-env operations (by symlinking - them from the directory - ~/.nix-defexpr). - - - - [generation] - - Reverts the previous call to nix-channel - --update. Optionally, you can specify a specific channel - generation number to restore. - - - - - - - -Note that does not automatically perform -an update. - -The list of subscribed channels is stored in -~/.nix-channels. - - - -Examples - -To subscribe to the Nixpkgs channel and install the GNU Hello package: - - -$ nix-channel --add https://nixos.org/channels/nixpkgs-unstable -$ nix-channel --update -$ nix-env -iA nixpkgs.hello - -You can revert channel updates using : - - -$ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.nixpkgsVersion' -"14.04.527.0e935f1" - -$ nix-channel --rollback -switching from generation 483 to 482 - -$ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.nixpkgsVersion' -"14.04.526.dbadfad" - - - - -Files - - - - /nix/var/nix/profiles/per-user/username/channels - - nix-channel uses a - nix-env profile to keep track of previous - versions of the subscribed channels. Every time you run - nix-channel --update, a new channel generation - (that is, a symlink to the channel Nix expressions in the Nix store) - is created. This enables nix-channel --rollback - to revert to previous versions. - - - - ~/.nix-defexpr/channels - - This is a symlink to - /nix/var/nix/profiles/per-user/username/channels. It - ensures that nix-env can find your channels. In - a multi-user installation, you may also have - ~/.nix-defexpr/channels_root, which links to - the channels of the root user. - - - - - - - -Channel format - -A channel URL should point to a directory containing the -following files: - - - - nixexprs.tar.xz - - A tarball containing Nix expressions and files - referenced by them (such as build scripts and patches). At the - top level, the tarball should contain a single directory. That - directory must contain a file default.nix - that serves as the channel’s “entry point”. - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-collect-garbage.xml b/third_party/nix/doc/manual/command-ref/nix-collect-garbage.xml deleted file mode 100644 index 43e0687969..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-collect-garbage.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - nix-collect-garbage - 1 - Nix - - - - - nix-collect-garbage - delete unreachable store paths - - - - - nix-collect-garbage - - - period - bytes - - - - -Description - -The command nix-collect-garbage is mostly an -alias of nix-store ---gc, that is, it deletes all unreachable paths in -the Nix store to clean up your system. However, it provides two -additional options: (), -which deletes all old generations of all profiles in -/nix/var/nix/profiles by invoking -nix-env --delete-generations old on all profiles -(of course, this makes rollbacks to previous configurations -impossible); and - period, -where period is a value such as 30d, which deletes -all generations older than the specified number of days in all profiles -in /nix/var/nix/profiles (except for the generations -that were active at that point in time). - - - - -Example - -To delete from the Nix store everything that is not used by the -current generations of each profile, do - - -$ nix-collect-garbage -d - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-copy-closure.xml b/third_party/nix/doc/manual/command-ref/nix-copy-closure.xml deleted file mode 100644 index e6dcf180ad..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-copy-closure.xml +++ /dev/null @@ -1,169 +0,0 @@ - - - - nix-copy-closure - 1 - Nix - - - - - nix-copy-closure - copy a closure to or from a remote machine via SSH - - - - - nix-copy-closure - - - - - - - - - - - - - - user@machine - - paths - - - - -Description - -nix-copy-closure gives you an easy and -efficient way to exchange software between machines. Given one or -more Nix store paths on the local -machine, nix-copy-closure computes the closure of -those paths (i.e. all their dependencies in the Nix store), and copies -all paths in the closure to the remote machine via the -ssh (Secure Shell) command. With the -, the direction is reversed: -the closure of paths on a remote machine is -copied to the Nix store on the local machine. - -This command is efficient because it only sends the store paths -that are missing on the target machine. - -Since nix-copy-closure calls -ssh, you may be asked to type in the appropriate -password or passphrase. In fact, you may be asked -twice because nix-copy-closure -currently connects twice to the remote machine, first to get the set -of paths missing on the target machine, and second to send the dump of -those paths. If this bothers you, use -ssh-agent. - - -Options - - - - - - Copy the closure of - paths from the local Nix store to the - Nix store on machine. This is the - default. - - - - - - Copy the closure of - paths from the Nix store on - machine to the local Nix - store. - - - - - - Enable compression of the SSH - connection. - - - - - - Also copy the outputs of store derivations - included in the closure. - - - - / - - Attempt to download missing paths on the target - machine using Nix’s substitute mechanism. Any paths that cannot - be substituted on the target are still copied normally from the - source. This is useful, for instance, if the connection between - the source and target machine is slow, but the connection between - the target machine and nixos.org (the default - binary cache server) is fast. - - - - - - Show verbose output. - - - - - - - - -Environment variables - - - - NIX_SSHOPTS - - Additional options to be passed to - ssh on the command line. - - - - - - - - -Examples - -Copy Firefox with all its dependencies to a remote machine: - - -$ nix-copy-closure --to alice@itchy.labs $(type -tP firefox) - - - -Copy Subversion from a remote machine and then install it into a -user environment: - - -$ nix-copy-closure --from alice@itchy.labs \ - /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4 -$ nix-env -i /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4 - - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-daemon.xml b/third_party/nix/doc/manual/command-ref/nix-daemon.xml deleted file mode 100644 index 9159d15d1c..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-daemon.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - nix-daemon - 8 - Nix - - - - - nix-daemon - Nix multi-user support daemon - - - - - nix-daemon - - - - -Description - -The Nix daemon is necessary in multi-user Nix installations. It -performs build actions and other operations on the Nix store on behalf -of unprivileged users. - - - - -Options - - - - - - Causes the nix daemon to forward stdin and stdout to and - from the actual daemon socket. This is used when communicating with a remote - store over SSH - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-env.xml b/third_party/nix/doc/manual/command-ref/nix-env.xml deleted file mode 100644 index d257a5e49c..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-env.xml +++ /dev/null @@ -1,1505 +0,0 @@ - - - - nix-env - 1 - Nix - - - - - nix-env - manipulate or query Nix user environments - - - - - nix-env - - name value - name value - - - - - - path - - - - - - - path - - - - system - - - operation - options - arguments - - - - -Description - -The command nix-env is used to manipulate Nix -user environments. User environments are sets of software packages -available to a user at some point in time. In other words, they are a -synthesised view of the programs available in the Nix store. There -may be many user environments: different users can have different -environments, and individual users can switch between different -environments. - -nix-env takes exactly one -operation flag which indicates the subcommand to -be performed. These are documented below. - - - - - - - -Selectors - -Several commands, such as nix-env -q and -nix-env -i, take a list of arguments that specify -the packages on which to operate. These are extended regular -expressions that must match the entire name of the package. (For -details on regular expressions, see -regex7.) -The match is case-sensitive. The regular expression can optionally be -followed by a dash and a version number; if omitted, any version of -the package will match. Here are some examples: - - - - - firefox - Matches the package name - firefox and any version. - - - - firefox-32.0 - Matches the package name - firefox and version - 32.0. - - - - gtk\\+ - Matches the package name - gtk+. The + character must - be escaped using a backslash to prevent it from being interpreted - as a quantifier, and the backslash must be escaped in turn with - another backslash to ensure that the shell passes it - on. - - - - .\* - Matches any package name. This is the default for - most commands. - - - - '.*zip.*' - Matches any package name containing the string - zip. Note the dots: '*zip*' - does not work, because in a regular expression, the character - * is interpreted as a - quantifier. - - - - '.*(firefox|chromium).*' - Matches any package name containing the strings - firefox or - chromium. - - - - - - - - - - - - -Common options - -This section lists the options that are common to all -operations. These options are allowed for every subcommand, though -they may not always have an effect. See -also . - - - - / path - - Specifies the Nix expression (designated below as - the active Nix expression) used by the - , , and - operations to obtain - derivations. The default is - ~/.nix-defexpr. - - If the argument starts with http:// or - https://, it is interpreted as the URL of a - tarball that will be downloaded and unpacked to a temporary - location. The tarball must include a single top-level directory - containing at least a file named default.nix. - - - - - - / path - - Specifies the profile to be used by those - operations that operate on a profile (designated below as the - active profile). A profile is a sequence of - user environments called generations, one of - which is the current - generation. - - - - - - For the , - , , - , - and - operations, this flag will cause - nix-env to print what - would be done if this flag had not been - specified, without actually doing it. - - also prints out which paths will - be substituted (i.e., - downloaded) and which paths will be built from source (because no - substitute is available). - - - - system - - By default, operations such as show derivations matching any platform. This - option allows you to use derivations for the specified platform - system. - - - - - - - - - - - - - - - -Files - - - - ~/.nix-defexpr - - The source for the default Nix - expressions used by the , - , and operations to obtain derivations. The - option may be used to override this - default. - - If ~/.nix-defexpr is a file, - it is loaded as a Nix expression. If the expression - is a set, it is used as the default Nix expression. - If the expression is a function, an empty set is passed - as argument and the return value is used as - the default Nix expression. - - If ~/.nix-defexpr is a directory - containing a default.nix file, that file - is loaded as in the above paragraph. - - If ~/.nix-defexpr is a directory without - a default.nix file, then its contents - (both files and subdirectories) are loaded as Nix expressions. - The expressions are combined into a single set, each expression - under an attribute with the same name as the original file - or subdirectory. - - - For example, if ~/.nix-defexpr contains - two files, foo.nix and bar.nix, - then the default Nix expression will essentially be - - -{ - foo = import ~/.nix-defexpr/foo.nix; - bar = import ~/.nix-defexpr/bar.nix; -} - - - - The file manifest.nix is always ignored. - Subdirectories without a default.nix file - are traversed recursively in search of more Nix expressions, - but the names of these intermediate directories are not - added to the attribute paths of the default Nix expression. - - The command nix-channel places symlinks - to the downloaded Nix expressions from each subscribed channel in - this directory. - - - - - ~/.nix-profile - - A symbolic link to the user's current profile. By - default, this symlink points to - prefix/var/nix/profiles/default. - The PATH environment variable should include - ~/.nix-profile/bin for the user environment - to be visible to the user. - - - - - - - - - - - -Operation <option>--install</option> - -Synopsis - - - nix-env - - - - - - - - - - - - - - args - - - - - -Description - -The install operation creates a new user environment, based on -the current generation of the active profile, to which a set of store -paths described by args is added. The -arguments args map to store paths in a -number of possible ways: - - - - By default, args is a set - of derivation names denoting derivations in the active Nix - expression. These are realised, and the resulting output paths are - installed. Currently installed derivations with a name equal to the - name of a derivation being added are removed unless the option - is - specified. - - If there are multiple derivations matching a name in - args that have the same name (e.g., - gcc-3.3.6 and gcc-4.1.1), then - the derivation with the highest priority is - used. A derivation can define a priority by declaring the - meta.priority attribute. This attribute should - be a number, with a higher value denoting a lower priority. The - default priority is 0. - - If there are multiple matching derivations with the same - priority, then the derivation with the highest version will be - installed. - - You can force the installation of multiple derivations with - the same name by being specific about the versions. For instance, - nix-env -i gcc-3.3.6 gcc-4.1.1 will install both - version of GCC (and will probably cause a user environment - conflict!). - - If - () is specified, the arguments are - attribute paths that select attributes from the - top-level Nix expression. This is faster than using derivation - names and unambiguous. To find out the attribute paths of available - packages, use nix-env -qaP. - - If - path is given, - args is a set of names denoting installed - store paths in the profile path. This is - an easy way to copy user environment elements from one profile to - another. - - If is given, - args are Nix functions that are called with the - active Nix expression as their single argument. The derivations - returned by those function calls are installed. This allows - derivations to be specified in an unambiguous way, which is necessary - if there are multiple derivations with the same - name. - - If args are store - derivations, then these are realised, and the resulting - output paths are installed. - - If args are store paths - that are not store derivations, then these are realised and - installed. - - By default all outputs are installed for each derivation. - That can be reduced by setting meta.outputsToInstall. - - - - - - - - - -Flags - - - - / - - Use only derivations for which a substitute is - registered, i.e., there is a pre-built binary available that can - be downloaded in lieu of building the derivation. Thus, no - packages will be built from source. - - - - - - - Do not remove derivations with a name matching one - of the derivations being installed. Usually, trying to have two - versions of the same package installed in the same generation of a - profile will lead to an error in building the generation, due to - file name clashes between the two versions. However, this is not - the case for all packages. - - - - - - - Remove all previously installed packages first. - This is equivalent to running nix-env -e '.*' - first, except that everything happens in a single - transaction. - - - - - - - - -Examples - -To install a specific version of gcc from the -active Nix expression: - - -$ nix-env --install gcc-3.3.2 -installing `gcc-3.3.2' -uninstalling `gcc-3.1' - -Note the previously installed version is removed, since - was not specified. - -To install an arbitrary version: - - -$ nix-env --install gcc -installing `gcc-3.3.2' - - - -To install using a specific attribute: - - -$ nix-env -i -A gcc40mips -$ nix-env -i -A xorg.xorgserver - - - -To install all derivations in the Nix expression foo.nix: - - -$ nix-env -f ~/foo.nix -i '.*' - - - -To copy the store path with symbolic name gcc -from another profile: - - -$ nix-env -i --from-profile /nix/var/nix/profiles/foo gcc - - - -To install a specific store derivation (typically created by -nix-instantiate): - - -$ nix-env -i /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv - - - -To install a specific output path: - - -$ nix-env -i /nix/store/y3cgx0xj1p4iv9x0pnnmdhr8iyg741vk-gcc-3.4.3 - - - -To install from a Nix expression specified on the command-line: - - -$ nix-env -f ./foo.nix -i -E \ - 'f: (f {system = "i686-linux";}).subversionWithJava' - -I.e., this evaluates to (f: (f {system = -"i686-linux";}).subversionWithJava) (import ./foo.nix), thus -selecting the subversionWithJava attribute from the -set returned by calling the function defined in -./foo.nix. - -A dry-run tells you which paths will be downloaded or built from -source: - - -$ nix-env -f '<nixpkgs>' -iA hello --dry-run -(dry run; not doing anything) -installing ‘hello-2.10’ -these paths will be fetched (0.04 MiB download, 0.19 MiB unpacked): - /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10 - ... - - - -To install Firefox from the latest revision in the Nixpkgs/NixOS -14.12 channel: - - -$ nix-env -f https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz -iA firefox - - -(The GitHub repository nixpkgs-channels is updated -automatically from the main nixpkgs repository -after certain tests have succeeded and binaries have been built and -uploaded to the binary cache at cache.nixos.org.) - - - - - - - - - -Operation <option>--upgrade</option> - -Synopsis - - - nix-env - - - - - - - - - - - - args - - - - -Description - -The upgrade operation creates a new user environment, based on -the current generation of the active profile, in which all store paths -are replaced for which there are newer versions in the set of paths -described by args. Paths for which there -are no newer versions are left untouched; this is not an error. It is -also not an error if an element of args -matches no installed derivations. - -For a description of how args is -mapped to a set of store paths, see . If -args describes multiple store paths with -the same symbolic name, only the one with the highest version is -installed. - - - -Flags - - - - - - Only upgrade a derivation to newer versions. This - is the default. - - - - - - In addition to upgrading to newer versions, also - “upgrade” to derivations that have the same version. Version are - not a unique identification of a derivation, so there may be many - derivations that have the same version. This flag may be useful - to force “synchronisation” between the installed and available - derivations. - - - - - - Only “upgrade” to derivations - that have the same version. This may not seem very useful, but it - actually is, e.g., when there is a new release of Nixpkgs and you - want to replace installed applications with the same versions - built against newer dependencies (to reduce the number of - dependencies floating around on your system). - - - - - - In addition to upgrading to newer versions, also - “upgrade” to derivations that have the same or a lower version. - I.e., derivations may actually be downgraded depending on what is - available in the active Nix expression. - - - - - -For the other flags, see . - - - -Examples - - -$ nix-env --upgrade gcc -upgrading `gcc-3.3.1' to `gcc-3.4' - -$ nix-env -u gcc-3.3.2 --always (switch to a specific version) -upgrading `gcc-3.4' to `gcc-3.3.2' - -$ nix-env --upgrade pan -(no upgrades available, so nothing happens) - -$ nix-env -u (try to upgrade everything) -upgrading `hello-2.1.2' to `hello-2.1.3' -upgrading `mozilla-1.2' to `mozilla-1.4' - - - -Versions - -The upgrade operation determines whether a derivation -y is an upgrade of a derivation -x by looking at their respective -name attributes. The names (e.g., -gcc-3.3.1 are split into two parts: the package -name (gcc), and the version -(3.3.1). The version part starts after the first -dash not followed by a letter. x is considered an -upgrade of y if their package names match, and the -version of y is higher that that of -x. - -The versions are compared by splitting them into contiguous -components of numbers and letters. E.g., 3.3.1pre5 -is split into [3, 3, 1, "pre", 5]. These lists are -then compared lexicographically (from left to right). Corresponding -components a and b are compared -as follows. If they are both numbers, integer comparison is used. If -a is an empty string and b is a -number, a is considered less than -b. The special string component -pre (for pre-release) is -considered to be less than other components. String components are -considered less than number components. Otherwise, they are compared -lexicographically (i.e., using case-sensitive string comparison). - -This is illustrated by the following examples: - - -1.0 < 2.3 -2.1 < 2.3 -2.3 = 2.3 -2.5 > 2.3 -3.1 > 2.3 -2.3.1 > 2.3 -2.3.1 > 2.3a -2.3pre1 < 2.3 -2.3pre3 < 2.3pre12 -2.3a < 2.3c -2.3pre1 < 2.3c -2.3pre1 < 2.3q - - - - - - - - - - - -Operation <option>--uninstall</option> - -Synopsis - - - nix-env - - - - - drvnames - - - -Description - -The uninstall operation creates a new user environment, based on -the current generation of the active profile, from which the store -paths designated by the symbolic names -names are removed. - - - -Examples - - -$ nix-env --uninstall gcc -$ nix-env -e '.*' (remove everything) - - - - - - - - - -Operation <option>--set</option> - -Synopsis - - - nix-env - - drvname - - - -Description - -The operation modifies the current generation of a -profile so that it contains exactly the specified derivation, and nothing else. - - - - -Examples - - -The following updates a profile such that its current generation will contain -just Firefox: - - -$ nix-env -p /nix/var/nix/profiles/browser --set firefox - - - - - - - - - - - -Operation <option>--set-flag</option> - -Synopsis - - - nix-env - - name - value - drvnames - - - -Description - -The operation allows meta attributes -of installed packages to be modified. There are several attributes -that can be usefully modified, because they affect the behaviour of -nix-env or the user environment build -script: - - - - priority can be changed to - resolve filename clashes. The user environment build script uses - the meta.priority attribute of derivations to - resolve filename collisions between packages. Lower priority values - denote a higher priority. For instance, the GCC wrapper package and - the Binutils package in Nixpkgs both have a file - bin/ld, so previously if you tried to install - both you would get a collision. Now, on the other hand, the GCC - wrapper declares a higher priority than Binutils, so the former’s - bin/ld is symlinked in the user - environment. - - keep can be set to - true to prevent the package from being upgraded - or replaced. This is useful if you want to hang on to an older - version of a package. - - active can be set to - false to “disable” the package. That is, no - symlinks will be generated to the files of the package, but it - remains part of the profile (so it won’t be garbage-collected). It - can be set back to true to re-enable the - package. - - - - - - - -Examples - -To prevent the currently installed Firefox from being upgraded: - - -$ nix-env --set-flag keep true firefox - -After this, nix-env -u will ignore Firefox. - -To disable the currently installed Firefox, then install a new -Firefox while the old remains part of the profile: - - -$ nix-env -q -firefox-2.0.0.9 (the current one) - -$ nix-env --preserve-installed -i firefox-2.0.0.11 -installing `firefox-2.0.0.11' -building path(s) `/nix/store/myy0y59q3ig70dgq37jqwg1j0rsapzsl-user-environment' -collision between `/nix/store/...-firefox-2.0.0.11/bin/firefox' - and `/nix/store/...-firefox-2.0.0.9/bin/firefox'. -(i.e., can’t have two active at the same time) - -$ nix-env --set-flag active false firefox -setting flag on `firefox-2.0.0.9' - -$ nix-env --preserve-installed -i firefox-2.0.0.11 -installing `firefox-2.0.0.11' - -$ nix-env -q -firefox-2.0.0.11 (the enabled one) -firefox-2.0.0.9 (the disabled one) - - - -To make files from binutils take precedence -over files from gcc: - - -$ nix-env --set-flag priority 5 binutils -$ nix-env --set-flag priority 10 gcc - - - - - - - - - - - -Operation <option>--query</option> - -Synopsis - - - nix-env - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - attribute-path - - - - - names - - - - - -Description - -The query operation displays information about either the store -paths that are installed in the current generation of the active -profile (), or the derivations that are -available for installation in the active Nix expression -(). It only prints information about -derivations whose symbolic name matches one of -names. - -The derivations are sorted by their name -attributes. - - - - -Source selection - -The following flags specify the set of things on which the query -operates. - - - - - - The query operates on the store paths that are - installed in the current generation of the active profile. This - is the default. - - - - - - - The query operates on the derivations that are - available in the active Nix expression. - - - - - - - - -Queries - -The following flags specify what information to display about -the selected derivations. Multiple flags may be specified, in which -case the information is shown in the order given here. Note that the -name of the derivation is shown unless is -specified. - - - - - - - - Print the result in an XML representation suitable - for automatic processing by other tools. The root element is - called items, which contains a - item element for each available or installed - derivation. The fields discussed below are all stored in - attributes of the item - elements. - - - - - - Print the result in a JSON representation suitable - for automatic processing by other tools. - - - - / - - Show only derivations for which a substitute is - registered, i.e., there is a pre-built binary available that can - be downloaded in lieu of building the derivation. Thus, this - shows all packages that probably can be installed - quickly. - - - - - - - Print the status of the - derivation. The status consists of three characters. The first - is I or -, indicating - whether the derivation is currently installed in the current - generation of the active profile. This is by definition the case - for , but not for - . The second is P - or -, indicating whether the derivation is - present on the system. This indicates whether installation of an - available derivation will require the derivation to be built. The - third is S or -, indicating - whether a substitute is available for the - derivation. - - - - - - - Print the attribute path of - the derivation, which can be used to unambiguously select it using - the option - available in commands that install derivations like - nix-env --install. - - - - - - Suppress printing of the name - attribute of each derivation. - - - - / - - - Compare installed versions to available versions, - or vice versa (if is given). This is - useful for quickly seeing whether upgrades for installed - packages are available in a Nix expression. A column is added - with the following meaning: - - - - < version - - A newer version of the package is available - or installed. - - - - = version - - At most the same version of the package is - available or installed. - - - - > version - - Only older versions of the package are - available or installed. - - - - - ? - - No version of the package is available or - installed. - - - - - - - - - - - - Print the system attribute of - the derivation. - - - - - - Print the path of the store - derivation. - - - - - - Print the output path of the - derivation. - - - - - - Print a short (one-line) description of the - derivation, if available. The description is taken from the - meta.description attribute of the - derivation. - - - - - - Print all of the meta-attributes of the - derivation. This option is only available with - or . - - - - - - - - -Examples - -To show installed packages: - - -$ nix-env -q -bison-1.875c -docbook-xml-4.2 -firefox-1.0.4 -MPlayer-1.0pre7 -ORBit2-2.8.3 - - - - - -To show available packages: - - -$ nix-env -qa -firefox-1.0.7 -GConf-2.4.0.1 -MPlayer-1.0pre7 -ORBit2-2.8.3 - - - - - -To show the status of available packages: - - -$ nix-env -qas --P- firefox-1.0.7 (not installed but present) ---S GConf-2.4.0.1 (not present, but there is a substitute for fast installation) ---S MPlayer-1.0pre3 (i.e., this is not the installed MPlayer, even though the version is the same!) -IP- ORBit2-2.8.3 (installed and by definition present) - - - - - -To show available packages in the Nix expression foo.nix: - - -$ nix-env -f ./foo.nix -qa -foo-1.2.3 - - - - -To compare installed versions to what’s available: - - -$ nix-env -qc -... -acrobat-reader-7.0 - ? (package is not available at all) -autoconf-2.59 = 2.59 (same version) -firefox-1.0.4 < 1.0.7 (a more recent version is available) -... - - - - -To show all packages with “zip” in the name: - - -$ nix-env -qa '.*zip.*' -bzip2-1.0.6 -gzip-1.6 -zip-3.0 - - - - - -To show all packages with “firefox” or -“chromium” in the name: - - -$ nix-env -qa '.*(firefox|chromium).*' -chromium-37.0.2062.94 -chromium-beta-38.0.2125.24 -firefox-32.0.3 -firefox-with-plugins-13.0.1 - - - - - -To show all packages in the latest revision of the Nixpkgs -repository: - - -$ nix-env -f https://github.com/NixOS/nixpkgs/archive/master.tar.gz -qa - - - - - - - - - - - - -Operation <option>--switch-profile</option> - -Synopsis - - - nix-env - - - - - path - - - - - -Description - -This operation makes path the current -profile for the user. That is, the symlink -~/.nix-profile is made to point to -path. - - - -Examples - - -$ nix-env -S ~/my-profile - - - - - - - - - -Operation <option>--list-generations</option> - -Synopsis - - - nix-env - - - - - - -Description - -This operation print a list of all the currently existing -generations for the active profile. These may be switched to using -the operation. It also prints -the creation date of the generation, and indicates the current -generation. - - - - -Examples - - -$ nix-env --list-generations - 95 2004-02-06 11:48:24 - 96 2004-02-06 11:49:01 - 97 2004-02-06 16:22:45 - 98 2004-02-06 16:24:33 (current) - - - - - - - - - -Operation <option>--delete-generations</option> - -Synopsis - - - nix-env - - generations - - - - - -Description - -This operation deletes the specified generations of the current -profile. The generations can be a list of generation numbers, the -special value old to delete all non-current -generations, a value such as 30d to delete all -generations older than the specified number of days (except for the -generation that was active at that point in time), or a value such as -+5 to keep the last 5 generations -ignoring any newer than current, e.g., if 30 is the current -generation +5 will delete generation 25 -and all older generations. -Periodically deleting old generations is important to make garbage collection -effective. - - - -Examples - - -$ nix-env --delete-generations 3 4 8 - -$ nix-env --delete-generations +5 - -$ nix-env --delete-generations 30d - -$ nix-env -p other_profile --delete-generations old - - - - - - - - - -Operation <option>--switch-generation</option> - -Synopsis - - - nix-env - - - - - generation - - - - - -Description - -This operation makes generation number -generation the current generation of the -active profile. That is, if the -profile is the path to -the active profile, then the symlink -profile is made to -point to -profile-generation-link, -which is in turn a symlink to the actual user environment in the Nix -store. - -Switching will fail if the specified generation does not exist. - - - - -Examples - - -$ nix-env -G 42 -switching from generation 50 to 42 - - - - - - - - - -Operation <option>--rollback</option> - -Synopsis - - - nix-env - - - - - -Description - -This operation switches to the “previous” generation of the -active profile, that is, the highest numbered generation lower than -the current generation, if it exists. It is just a convenience -wrapper around and -. - - - - -Examples - - -$ nix-env --rollback -switching from generation 92 to 91 - -$ nix-env --rollback -error: no generation older than the current (91) exists - - - - - - -Environment variables - - - - NIX_PROFILE - - Location of the Nix profile. Defaults to the - target of the symlink ~/.nix-profile, if it - exists, or /nix/var/nix/profiles/default - otherwise. - - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-hash.xml b/third_party/nix/doc/manual/command-ref/nix-hash.xml deleted file mode 100644 index 80263e18e3..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-hash.xml +++ /dev/null @@ -1,176 +0,0 @@ - - - - nix-hash - 1 - Nix - - - - - nix-hash - compute the cryptographic hash of a path - - - - - nix-hash - - - - hashAlgo - path - - - nix-hash - - hash - - - nix-hash - - hash - - - - -Description - -The command nix-hash computes the -cryptographic hash of the contents of each -path and prints it on standard output. By -default, it computes an MD5 hash, but other hash algorithms are -available as well. The hash is printed in hexadecimal. To generate -the same hash as nix-prefetch-url you have to -specify multiple arguments, see below for an example. - -The hash is computed over a serialisation -of each path: a dump of the file system tree rooted at the path. This -allows directories and symlinks to be hashed as well as regular files. -The dump is in the NAR format produced by nix-store -. Thus, nix-hash -path yields the same -cryptographic hash as nix-store --dump -path | md5sum. - - - - -Options - - - - - - Print the cryptographic hash of the contents of - each regular file path. That is, do - not compute the hash over the dump of - path. The result is identical to that - produced by the GNU commands md5sum and - sha1sum. - - - - - - Print the hash in a base-32 representation rather - than hexadecimal. This base-32 representation is more compact and - can be used in Nix expressions (such as in calls to - fetchurl). - - - - - - Truncate hashes longer than 160 bits (such as - SHA-256) to 160 bits. - - - - hashAlgo - - Use the specified cryptographic hash algorithm, - which can be one of md5, - sha1, and - sha256. - - - - - - Don’t hash anything, but convert the base-32 hash - representation hash to - hexadecimal. - - - - - - Don’t hash anything, but convert the hexadecimal - hash representation hash to - base-32. - - - - - - - - -Examples - -Computing the same hash as nix-prefetch-url: - -$ nix-prefetch-url file://<(echo test) -1lkgqb6fclns49861dwk9rzb6xnfkxbpws74mxnx01z9qyv1pjpj -$ nix-hash --type sha256 --flat --base32 <(echo test) -1lkgqb6fclns49861dwk9rzb6xnfkxbpws74mxnx01z9qyv1pjpj - - - -Computing hashes: - - -$ mkdir test -$ echo "hello" > test/world - -$ nix-hash test/ (MD5 hash; default) -8179d3caeff1869b5ba1744e5a245c04 - -$ nix-store --dump test/ | md5sum (for comparison) -8179d3caeff1869b5ba1744e5a245c04 - - -$ nix-hash --type sha1 test/ -e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 - -$ nix-hash --type sha1 --base32 test/ -nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 - -$ nix-hash --type sha256 --flat test/ -error: reading file `test/': Is a directory - -$ nix-hash --type sha256 --flat test/world -5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03 - - - -Converting between hexadecimal and base-32: - - -$ nix-hash --type sha1 --to-base32 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 -nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 - -$ nix-hash --type sha1 --to-base16 nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 -e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-instantiate.xml b/third_party/nix/doc/manual/command-ref/nix-instantiate.xml deleted file mode 100644 index 3fd2ef2a95..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-instantiate.xml +++ /dev/null @@ -1,278 +0,0 @@ - - - - nix-instantiate - 1 - Nix - - - - - nix-instantiate - instantiate store derivations from Nix expressions - - - - - nix-instantiate - - - - - - - - - - - name value - - - - - - attrPath - - path - - - - - - - files - - - nix-instantiate - - files - - - - -Description - -The command nix-instantiate generates store derivations from (high-level) -Nix expressions. It evaluates the Nix expressions in each of -files (which defaults to -./default.nix). Each top-level expression -should evaluate to a derivation, a list of derivations, or a set of -derivations. The paths of the resulting store derivations are printed -on standard output. - -If files is the character --, then a Nix expression will be read from standard -input. - -See also for a list of common options. - - - - -Options - - - - - path - - - See the corresponding - options in nix-store. - - - - - - Just parse the input files, and print their - abstract syntax trees on standard output in ATerm - format. - - - - - - Just parse and evaluate the input files, and print - the resulting values on standard output. No instantiation of - store derivations takes place. - - - - - - Look up the given files in Nix’s search path (as - specified by the NIX_PATH - environment variable). If found, print the corresponding absolute - paths on standard output. For instance, if - NIX_PATH is - nixpkgs=/home/alice/nixpkgs, then - nix-instantiate --find-file nixpkgs/default.nix - will print - /home/alice/nixpkgs/default.nix. - - - - - - When used with , - recursively evaluate list elements and attributes. Normally, such - sub-expressions are left unevaluated (since the Nix expression - language is lazy). - - This option can cause non-termination, because lazy - data structures can be infinitely large. - - - - - - - While instantiating the expression, the evaluator will - print the full path to any files it reads with the prefix - trace-file-access: to the standard error. - - - - - - When used with , print the resulting - value as an JSON representation of the abstract syntax tree rather - than as an ATerm. - - - - - - When used with , print the resulting - value as an XML representation of the abstract syntax tree rather than as - an ATerm. The schema is the same as that used by the toXML built-in. - - - - - - - When used with , perform - evaluation in read/write mode so nix language features that - require it will still work (at the cost of needing to do - instantiation of every evaluated derivation). If this option is - not enabled, there may be uninstantiated store paths in the final - output. - - - - - - - - - - - - - - -Examples - -Instantiating store derivations from a Nix expression, and -building them using nix-store: - - -$ nix-instantiate test.nix (instantiate) -/nix/store/cigxbmvy6dzix98dxxh9b6shg7ar5bvs-perl-BerkeleyDB-0.26.drv - -$ nix-store -r $(nix-instantiate test.nix) (build) -... -/nix/store/qhqk4n8ci095g3sdp93x7rgwyh9rdvgk-perl-BerkeleyDB-0.26 (output path) - -$ ls -l /nix/store/qhqk4n8ci095g3sdp93x7rgwyh9rdvgk-perl-BerkeleyDB-0.26 -dr-xr-xr-x 2 eelco users 4096 1970-01-01 01:00 lib -... - - - -You can also give a Nix expression on the command line: - - -$ nix-instantiate -E 'with import <nixpkgs> { }; hello' -/nix/store/j8s4zyv75a724q38cb0r87rlczaiag4y-hello-2.8.drv - - -This is equivalent to: - - -$ nix-instantiate '<nixpkgs>' -A hello - - - - -Parsing and evaluating Nix expressions: - - -$ nix-instantiate --parse -E '1 + 2' -1 + 2 - -$ nix-instantiate --eval -E '1 + 2' -3 - -$ nix-instantiate --eval --xml -E '1 + 2' - - - -]]> - - - -The difference between non-strict and strict evaluation: - - -$ nix-instantiate --eval --xml -E 'rec { x = "foo"; y = x; }' -... - - - - - ]]> -... - -Note that y is left unevaluated (the XML -representation doesn’t attempt to show non-normal forms). - - -$ nix-instantiate --eval --xml --strict -E 'rec { x = "foo"; y = x; }' -... - - - - - ]]> -... - - - - - -Conformance - The option is a nonstandard - extension added by Tvix in 2020. - - -Environment variables - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-prefetch-url.xml b/third_party/nix/doc/manual/command-ref/nix-prefetch-url.xml deleted file mode 100644 index 621ded72ec..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-prefetch-url.xml +++ /dev/null @@ -1,131 +0,0 @@ - - - - nix-prefetch-url - 1 - Nix - - - - - nix-prefetch-url - copy a file from a URL into the store and print its hash - - - - - nix-prefetch-url - - hashAlgo - - - name - url - hash - - - -Description - -The command nix-prefetch-url downloads the -file referenced by the URL url, prints its -cryptographic hash, and copies it into the Nix store. The file name -in the store is -hash-baseName, -where baseName is everything following the -final slash in url. - -This command is just a convenience for Nix expression writers. -Often a Nix expression fetches some source distribution from the -network using the fetchurl expression contained in -Nixpkgs. However, fetchurl requires a -cryptographic hash. If you don't know the hash, you would have to -download the file first, and then fetchurl would -download it again when you build your Nix expression. Since -fetchurl uses the same name for the downloaded file -as nix-prefetch-url, the redundant download can be -avoided. - -If hash is specified, then a download -is not performed if the Nix store already contains a file with the -same hash and base name. Otherwise, the file is downloaded, and an -error is signaled if the actual hash of the file does not match the -specified hash. - -This command prints the hash on standard output. Additionally, -if the option is used, the path of the -downloaded file in the Nix store is also printed. - - - - -Options - - - - hashAlgo - - Use the specified cryptographic hash algorithm, - which can be one of md5, - sha1, and - sha256. - - - - - - Print the store path of the downloaded file on - standard output. - - - - - - Unpack the archive (which must be a tarball or zip - file) and add the result to the Nix store. The resulting hash can - be used with functions such as Nixpkgs’s - fetchzip or - fetchFromGitHub. - - - - name - - Override the name of the file in the Nix store. By - default, this is - hash-basename, - where basename is the last component of - url. Overriding the name is necessary - when basename contains characters that - are not allowed in Nix store paths. - - - - - - - - -Examples - - -$ nix-prefetch-url ftp://ftp.gnu.org/pub/gnu/hello/hello-2.10.tar.gz -0ssi1wpaf7plaswqqjwigppsg5fyh99vdlb9kzl7c9lng89ndq1i - -$ nix-prefetch-url --print-path mirror://gnu/hello/hello-2.10.tar.gz -0ssi1wpaf7plaswqqjwigppsg5fyh99vdlb9kzl7c9lng89ndq1i -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz - -$ nix-prefetch-url --unpack --print-path https://github.com/NixOS/patchelf/archive/0.8.tar.gz -079agjlv0hrv7fxnx9ngipx14gyncbkllxrp9cccnh3a50fxcmy7 -/nix/store/19zrmhm3m40xxaw81c8cqm6aljgrnwj2-0.8.tar.gz - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-shell.xml b/third_party/nix/doc/manual/command-ref/nix-shell.xml deleted file mode 100644 index bb4a4e4201..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-shell.xml +++ /dev/null @@ -1,397 +0,0 @@ - - - - nix-shell - 1 - Nix - - - - - nix-shell - start an interactive shell based on a Nix expression - - - - - nix-shell - name value - name value - - - - - - attrPath - - cmd - cmd - regexp - - name - - - - - - - packages - - path - - - - -Description - -The command nix-shell will build the -dependencies of the specified derivation, but not the derivation -itself. It will then start an interactive shell in which all -environment variables defined by the derivation -path have been set to their corresponding -values, and the script $stdenv/setup has been -sourced. This is useful for reproducing the environment of a -derivation for development. - -If path is not given, -nix-shell defaults to -shell.nix if it exists, and -default.nix otherwise. - -If path starts with -http:// or https://, it is -interpreted as the URL of a tarball that will be downloaded and -unpacked to a temporary location. The tarball must include a single -top-level directory containing at least a file named -default.nix. - -If the derivation defines the variable -shellHook, it will be evaluated after -$stdenv/setup has been sourced. Since this hook is -not executed by regular Nix builds, it allows you to perform -initialisation specific to nix-shell. For example, -the derivation attribute - - -shellHook = - '' - echo "Hello shell" - ''; - - -will cause nix-shell to print Hello shell. - - - - -Options - -All options not listed here are passed to nix-store ---realise, except for and - / which are passed to -nix-instantiate. See -also . - - - - cmd - - In the environment of the derivation, run the - shell command cmd. This command is - executed in an interactive shell. (Use to - use a non-interactive shell instead.) However, a call to - exit is implicitly added to the command, so the - shell will exit after running the command. To prevent this, add - return at the end; e.g. --command - "echo Hello; return" will print Hello - and then drop you into the interactive shell. This can be useful - for doing any additional initialisation. - - - - cmd - - Like , but executes the - command in a non-interactive shell. This means (among other - things) that if you hit Ctrl-C while the command is running, the - shell exits. - - - - regexp - - Do not build any dependencies whose store path - matches the regular expression regexp. - This option may be specified multiple times. - - - - - - If this flag is specified, the environment is - almost entirely cleared before the interactive shell is started, - so you get an environment that more closely corresponds to the - “real” Nix build. A few variables, in particular - HOME, USER and - DISPLAY, are retained. Note that - ~/.bashrc and (depending on your Bash - installation) /etc/bashrc are still sourced, - so any variables set there will affect the interactive - shell. - - - - / packages - - Set up an environment in which the specified - packages are present. The command line arguments are interpreted - as attribute names inside the Nix Packages collection. Thus, - nix-shell -p libjpeg openjdk will start a shell - in which the packages denoted by the attribute names - libjpeg and openjdk are - present. - - - - interpreter - - The chained script interpreter to be invoked by - nix-shell. Only applicable in - #!-scripts (described below). - - - - name - - When a shell is started, - keep the listed environment variables. - - - - - -The following common options are supported: - - - - - - - - -Environment variables - - - - NIX_BUILD_SHELL - - Shell used to start the interactive environment. - Defaults to the bash found in PATH. - - - - - - - - -Examples - -To build the dependencies of the package Pan, and start an -interactive shell in which to build it: - - -$ nix-shell '<nixpkgs>' -A pan -[nix-shell]$ unpackPhase -[nix-shell]$ cd pan-* -[nix-shell]$ configurePhase -[nix-shell]$ buildPhase -[nix-shell]$ ./pan/gui/pan - - -To clear the environment first, and do some additional automatic -initialisation of the interactive shell: - - -$ nix-shell '<nixpkgs>' -A pan --pure \ - --command 'export NIX_DEBUG=1; export NIX_CORES=8; return' - - -Nix expressions can also be given on the command line. For instance, -the following starts a shell containing the packages -sqlite and libX11: - - -$ nix-shell -E 'with import <nixpkgs> { }; runCommand "dummy" { buildInputs = [ sqlite xorg.libX11 ]; } ""' - - -A shorter way to do the same is: - - -$ nix-shell -p sqlite xorg.libX11 -[nix-shell]$ echo $NIX_LDFLAGS -… -L/nix/store/j1zg5v…-sqlite-3.8.0.2/lib -L/nix/store/0gmcz9…-libX11-1.6.1/lib … - - -The -p flag looks up Nixpkgs in the Nix search -path. You can override it by passing or setting -NIX_PATH. For example, the following gives you a shell -containing the Pan package from a specific revision of Nixpkgs: - - -$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz - -[nix-shell:~]$ pan --version -Pan 0.139 - - - - - - - -Use as a <literal>#!</literal>-interpreter - -You can use nix-shell as a script interpreter -to allow scripts written in arbitrary languages to obtain their own -dependencies via Nix. This is done by starting the script with the -following lines: - - -#! /usr/bin/env nix-shell -#! nix-shell -i real-interpreter -p packages - - -where real-interpreter is the “real” script -interpreter that will be invoked by nix-shell after -it has obtained the dependencies and initialised the environment, and -packages are the attribute names of the -dependencies in Nixpkgs. - -The lines starting with #! nix-shell specify -nix-shell options (see above). Note that you cannot -write #! /usr/bin/env nix-shell -i ... because -many operating systems only allow one argument in -#! lines. - -For example, here is a Python script that depends on Python and -the prettytable package: - - -#! /usr/bin/env nix-shell -#! nix-shell -i python -p python pythonPackages.prettytable - -import prettytable - -# Print a simple table. -t = prettytable.PrettyTable(["N", "N^2"]) -for n in range(1, 10): t.add_row([n, n * n]) -print t - - - - -Similarly, the following is a Perl script that specifies that it -requires Perl and the HTML::TokeParser::Simple and -LWP packages: - - -#! /usr/bin/env nix-shell -#! nix-shell -i perl -p perl perlPackages.HTMLTokeParserSimple perlPackages.LWP - -use HTML::TokeParser::Simple; - -# Fetch nixos.org and print all hrefs. -my $p = HTML::TokeParser::Simple->new(url => 'http://nixos.org/'); - -while (my $token = $p->get_tag("a")) { - my $href = $token->get_attr("href"); - print "$href\n" if $href; -} - - - - -Sometimes you need to pass a simple Nix expression to customize -a package like Terraform: - - - -You must use double quotes (") when -passing a simple Nix expression in a nix-shell shebang. - - -Finally, using the merging of multiple nix-shell shebangs the -following Haskell script uses a specific branch of Nixpkgs/NixOS (the -18.03 stable branch): - - - -If you want to be even more precise, you can specify a specific -revision of Nixpkgs: - - -#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz - - - - -The examples above all used to get -dependencies from Nixpkgs. You can also use a Nix expression to build -your own dependencies. For example, the Python example could have been -written as: - - -#! /usr/bin/env nix-shell -#! nix-shell deps.nix -i python - - -where the file deps.nix in the same directory -as the #!-script contains: - - -with import <nixpkgs> {}; - -runCommand "dummy" { buildInputs = [ python pythonPackages.prettytable ]; } "" - - - - - - - -Environment variables - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/nix-store.xml b/third_party/nix/doc/manual/command-ref/nix-store.xml deleted file mode 100644 index 113a3c2e41..0000000000 --- a/third_party/nix/doc/manual/command-ref/nix-store.xml +++ /dev/null @@ -1,1525 +0,0 @@ - - - - nix-store - 1 - Nix - - - - - nix-store - manipulate or query the Nix store - - - - - nix-store - - path - - operation - options - arguments - - - - -Description - -The command nix-store performs primitive -operations on the Nix store. You generally do not need to run this -command manually. - -nix-store takes exactly one -operation flag which indicates the subcommand to -be performed. These are documented below. - - - - - - - -Common options - -This section lists the options that are common to all -operations. These options are allowed for every subcommand, though -they may not always have an effect. See -also for a list of common -options. - - - - path - - Causes the result of a realisation - ( and ) - to be registered as a root of the garbage collector (see ). The root is stored in - path, which must be inside a directory - that is scanned for roots by the garbage collector (i.e., - typically in a subdirectory of - /nix/var/nix/gcroots/) - unless the flag - is used. - - If there are multiple results, then multiple symlinks will - be created by sequentially numbering symlinks beyond the first one - (e.g., foo, foo-2, - foo-3, and so on). - - - - - - - - In conjunction with , this option - allows roots to be stored outside of the GC - roots directory. This is useful for commands such as - nix-build that place a symlink to the build - result in the current directory; such a build result should not be - garbage-collected unless the symlink is removed. - - The flag causes a uniquely named - symlink to path to be stored in - /nix/var/nix/gcroots/auto/. For instance, - - -$ nix-store --add-root /home/eelco/bla/result --indirect -r ... - -$ ls -l /nix/var/nix/gcroots/auto -lrwxrwxrwx 1 ... 2005-03-13 21:10 dn54lcypm8f8... -> /home/eelco/bla/result - -$ ls -l /home/eelco/bla/result -lrwxrwxrwx 1 ... 2005-03-13 21:10 /home/eelco/bla/result -> /nix/store/1r11343n6qd4...-f-spot-0.0.10 - - Thus, when /home/eelco/bla/result is removed, - the GC root in the auto directory becomes a - dangling symlink and will be ignored by the collector. - - Note that it is not possible to move or rename - indirect GC roots, since the symlink in the - auto directory will still point to the old - location. - - - - - - - - - - - - - - - - - -Operation <option>--realise</option> - -Synopsis - - - nix-store - - - - - paths - - - - - -Description - -The operation essentially “builds” -the specified store paths. Realisation is a somewhat overloaded term: - - - - If the store path is a - derivation, realisation ensures that the output - paths of the derivation are valid (i.e., the output path and its - closure exist in the file system). This can be done in several - ways. First, it is possible that the outputs are already valid, in - which case we are done immediately. Otherwise, there may be substitutes that produce the - outputs (e.g., by downloading them). Finally, the outputs can be - produced by performing the build action described by the - derivation. - - If the store path is not a derivation, realisation - ensures that the specified path is valid (i.e., it and its closure - exist in the file system). If the path is already valid, we are - done immediately. Otherwise, the path and any missing paths in its - closure may be produced through substitutes. If there are no - (successful) subsitutes, realisation fails. - - - - - -The output path of each derivation is printed on standard -output. (For non-derivations argument, the argument itself is -printed.) - -The following flags are available: - - - - - - Print on standard error a description of what - packages would be built or downloaded, without actually performing - the operation. - - - - - - If a non-derivation path does not have a - substitute, then silently ignore it. - - - - - - This option allows you to check whether a - derivation is deterministic. It rebuilds the specified derivation - and checks whether the result is bitwise-identical with the - existing outputs, printing an error if that’s not the case. The - outputs of the specified derivation must already exist. When used - with , if an output path is not identical to - the corresponding output from the previous build, the new output - path is left in - /nix/store/name.check. - - See also the configuration - option, which repeats a derivation a number of times and prevents - its outputs from being registered as “valid” in the Nix store - unless they are identical. - - - - - -Special exit codes: - - - - 100 - Generic build failure, the builder process - returned with a non-zero exit code. - - - 101 - Build timeout, the build was aborted because it - did not complete within the specified timeout. - - - - 102 - Hash mismatch, the build output was rejected - because it does not match the specified outputHash. - - - - 104 - Not deterministic, the build succeeded in check - mode but the resulting output is not binary reproducable. - - - - - -With the flag it's possible for -multiple failures to occur, in this case the 1xx status codes are or combined -using binary or. -1100100 - ^^^^ - |||`- timeout - ||`-- output hash mismatch - |`--- build failure - `---- not deterministic - - - - - -Examples - -This operation is typically used to build store derivations -produced by nix-instantiate: - - -$ nix-store -r $(nix-instantiate ./test.nix) -/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1 - -This is essentially what nix-build does. - -To test whether a previously-built derivation is deterministic: - - -$ nix-build '<nixpkgs>' -A hello --check -K - - - - - - - - - - - - - -Operation <option>--serve</option> - -Synopsis - - - nix-store - - - - - - -Description - -The operation provides access to -the Nix store over stdin and stdout, and is intended to be used -as a means of providing Nix store access to a restricted ssh user. - - -The following flags are available: - - - - - - Allow the connected client to request the realization - of derivations. In effect, this can be used to make the host act - as a remote builder. - - - - - - - - -Examples - -To turn a host into a build server, the -authorized_keys file can be used to provide build -access to a given SSH public key: - - -$ cat <<EOF >>/root/.ssh/authorized_keys -command="nice -n20 nix-store --serve --write" ssh-rsa AAAAB3NzaC1yc2EAAAA... -EOF - - - - - - - - - - - - - -Operation <option>--gc</option> - -Synopsis - - - nix-store - - - - - - - - bytes - - - - -Description - -Without additional flags, the operation -performs a garbage collection on the Nix store. That is, all paths in -the Nix store not reachable via file system references from a set of -“roots”, are deleted. - -The following suboperations may be specified: - - - - - - This operation prints on standard output the set - of roots used by the garbage collector. What constitutes a root - is described in . - - - - - - This operation prints on standard output the set - of “live” store paths, which are all the store paths reachable - from the roots. Live paths should never be deleted, since that - would break consistency — it would become possible that - applications are installed that reference things that are no - longer present in the store. - - - - - - This operation prints out on standard output the - set of “dead” store paths, which is just the opposite of the set - of live paths: any path in the store that is not live (with - respect to the roots) is dead. - - - - - - This operation performs an actual garbage - collection. All dead paths are removed from the - store. This is the default. - - - - - -By default, all unreachable paths are deleted. The following -options control what gets deleted and in what order: - - - - bytes - - Keep deleting paths until at least - bytes bytes have been deleted, then - stop. The argument bytes can be - followed by the multiplicative suffix K, - M, G or - T, denoting KiB, MiB, GiB or TiB - units. - - - - - - - -The behaviour of the collector is also influenced by the keep-outputs -and keep-derivations -variables in the Nix configuration file. - -With , the collector prints the total -number of freed bytes when it finishes (or when it is interrupted). -With , it prints the number of bytes that -would be freed. - - - - -Examples - -To delete all unreachable paths, just do: - - -$ nix-store --gc -deleting `/nix/store/kq82idx6g0nyzsp2s14gfsc38npai7lf-cairo-1.0.4.tar.gz.drv' -... -8825586 bytes freed (8.42 MiB) - - - -To delete at least 100 MiBs of unreachable paths: - - -$ nix-store --gc --max-freed $((100 * 1024 * 1024)) - - - - - - - - - - - - -Operation <option>--delete</option> - -Synopsis - - - nix-store - - - paths - - - - -Description - -The operation deletes the store paths -paths from the Nix store, but only if it is -safe to do so; that is, when the path is not reachable from a root of -the garbage collector. This means that you can only delete paths that -would also be deleted by nix-store --gc. Thus, ---delete is a more targeted version of ---gc. - -With the option , reachability -from the roots is ignored. However, the path still won’t be deleted -if there are other paths in the store that refer to it (i.e., depend -on it). - - - -Example - - -$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4 -0 bytes freed (0.00 MiB) -error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive - - - - - - - - - -Operation <option>--query</option> - -Synopsis - - - nix-store - - - - - - - - - - - - - - - - name - name - - - - - - - - - paths - - - - - -Description - -The operation displays various bits of -information about the store paths . The queries are described below. At -most one query can be specified. The default query is -. - -The paths paths may also be symlinks -from outside of the Nix store, to the Nix store. In that case, the -query is applied to the target of the symlink. - - - - - -Common query options - - - - - - - For each argument to the query that is a store - derivation, apply the query to the output path of the derivation - instead. - - - - - - - Realise each argument to the query first (see - nix-store - --realise). - - - - - - - - -Queries - - - - - - Prints out the output paths of the store - derivations paths. These are the paths - that will be produced when the derivation is - built. - - - - - - - Prints out the closure of the store path - paths. - - This query has one option: - - - - - - Also include the output path of store - derivations, and their closures. - - - - - - This query can be used to implement various kinds of - deployment. A source deployment is obtained - by distributing the closure of a store derivation. A - binary deployment is obtained by distributing - the closure of an output path. A cache - deployment (combined source/binary deployment, - including binaries of build-time-only dependencies) is obtained by - distributing the closure of a store derivation and specifying the - option . - - - - - - - - Prints the set of references of the store paths - paths, that is, their immediate - dependencies. (For all dependencies, use - .) - - - - - - Prints the set of referrers of - the store paths paths, that is, the - store paths currently existing in the Nix store that refer to one - of paths. Note that contrary to the - references, the set of referrers is not constant; it can change as - store paths are added or removed. - - - - - - Prints the closure of the set of store paths - paths under the referrers relation; that - is, all store paths that directly or indirectly refer to one of - paths. These are all the path currently - in the Nix store that are dependent on - paths. - - - - - - - Prints the deriver of the store paths - paths. If the path has no deriver - (e.g., if it is a source file), or if the deriver is not known - (e.g., in the case of a binary-only deployment), the string - unknown-deriver is printed. - - - - - - Prints the references graph of the store paths - paths in the format of the - dot tool of AT&T's Graphviz package. - This can be used to visualise dependency graphs. To obtain a - build-time dependency graph, apply this to a store derivation. To - obtain a runtime dependency graph, apply it to an output - path. - - - - - - Prints the references graph of the store paths - paths as a nested ASCII tree. - References are ordered by descending closure size; this tends to - flatten the tree, making it more readable. The query only - recurses into a store path when it is first encountered; this - prevents a blowup of the tree representation of the - graph. - - - - - - Prints the references graph of the store paths - paths in the GraphML file format. - This can be used to visualise dependency graphs. To obtain a - build-time dependency graph, apply this to a store derivation. To - obtain a runtime dependency graph, apply it to an output - path. - - - - name - name - - Prints the value of the attribute - name (i.e., environment variable) of - the store derivations paths. It is an - error for a derivation to not have the specified - attribute. - - - - - - Prints the SHA-256 hash of the contents of the - store paths paths (that is, the hash of - the output of nix-store --dump on the given - paths). Since the hash is stored in the Nix database, this is a - fast operation. - - - - - - Prints the size in bytes of the contents of the - store paths paths — to be precise, the - size of the output of nix-store --dump on the - given paths. Note that the actual disk space required by the - store paths may be higher, especially on filesystems with large - cluster sizes. - - - - - - Prints the garbage collector roots that point, - directly or indirectly, at the store paths - paths. - - - - - - - - -Examples - -Print the closure (runtime dependencies) of the -svn program in the current user environment: - - -$ nix-store -qR $(which svn) -/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4 -/nix/store/9lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 -... - - - -Print the build-time dependencies of svn: - - -$ nix-store -qR $(nix-store -qd $(which svn)) -/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv -/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh -/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv -... lots of other paths ... - -The difference with the previous example is that we ask the closure of -the derivation (), not the closure of the output -path that contains svn. - -Show the build-time dependencies as a tree: - - -$ nix-store -q --tree $(nix-store -qd $(which svn)) -/nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv -+---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh -+---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv -| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash -| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh -... - - - -Show all paths that depend on the same OpenSSL library as -svn: - - -$ nix-store -q --referrers $(nix-store -q --binding openssl $(nix-store -qd $(which svn))) -/nix/store/23ny9l9wixx21632y2wi4p585qhva1q8-sylpheed-1.0.0 -/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4 -/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3 -/nix/store/l51240xqsgg8a7yrbqdx1rfzyv6l26fx-lynx-2.8.5 - - - -Show all paths that directly or indirectly depend on the Glibc -(C library) used by svn: - - -$ nix-store -q --referrers-closure $(ldd $(which svn) | grep /libc.so | awk '{print $3}') -/nix/store/034a6h4vpz9kds5r6kzb9lhh81mscw43-libgnomeprintui-2.8.2 -/nix/store/15l3yi0d45prm7a82pcrknxdh6nzmxza-gawk-3.1.4 -... - -Note that ldd is a command that prints out the -dynamic libraries used by an ELF executable. - -Make a picture of the runtime dependency graph of the current -user environment: - - -$ nix-store -q --graph ~/.nix-profile | dot -Tps > graph.ps -$ gv graph.ps - - - -Show every garbage collector root that points to a store path -that depends on svn: - - -$ nix-store -q --roots $(which svn) -/nix/var/nix/profiles/default-81-link -/nix/var/nix/profiles/default-82-link -/nix/var/nix/profiles/per-user/eelco/profile-97-link - - - - - - - - - - - - - - - - - - - -Operation <option>--add</option> - -Synopsis - - - nix-store - - paths - - - - -Description - -The operation adds the specified paths to -the Nix store. It prints the resulting paths in the Nix store on -standard output. - - - -Example - - -$ nix-store --add ./foo.c -/nix/store/m7lrha58ph6rcnv109yzx1nk1cj7k7zf-foo.c - - - - - - - -Operation <option>--add-fixed</option> - -Synopsis - - - nix-store - - - algorithm - paths - - - - -Description - -The operation adds the specified paths to -the Nix store. Unlike paths are registered using the -specified hashing algorithm, resulting in the same output path as a fixed output -derivation. This can be used for sources that are not available from a public -url or broke since the download expression was written. - - -This operation has the following options: - - - - - - - Use recursive instead of flat hashing mode, used when adding directories - to the store. - - - - - - - - - - -Example - - -$ nix-store --add-fixed sha256 ./hello-2.10.tar.gz -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz - - - - - - - - - -Operation <option>--verify</option> - - - Synopsis - - nix-store - - - - - - -Description - -The operation verifies the internal -consistency of the Nix database, and the consistency between the Nix -database and the Nix store. Any inconsistencies encountered are -automatically repaired. Inconsistencies are generally the result of -the Nix store or database being modified by non-Nix tools, or of bugs -in Nix itself. - -This operation has the following options: - - - - - - Checks that the contents of every valid store path - has not been altered by computing a SHA-256 hash of the contents - and comparing it with the hash stored in the Nix database at build - time. Paths that have been modified are printed out. For large - stores, is obviously quite - slow. - - - - - - If any valid path is missing from the store, or - (if is given) the contents of a - valid path has been modified, then try to repair the path by - redownloading it. See nix-store --repair-path - for details. - - - - - - - - - - - - - - - -Operation <option>--verify-path</option> - - - Synopsis - - nix-store - - paths - - - -Description - -The operation compares the -contents of the given store paths to their cryptographic hashes stored -in Nix’s database. For every changed path, it prints a warning -message. The exit status is 0 if no path has changed, and 1 -otherwise. - - - -Example - -To verify the integrity of the svn command and all its dependencies: - - -$ nix-store --verify-path $(nix-store -qR $(which svn)) - - - - - - - - - - - -Operation <option>--repair-path</option> - - - Synopsis - - nix-store - - paths - - - -Description - -The operation attempts to -“repair” the specified paths by redownloading them using the available -substituters. If no substitutes are available, then repair is not -possible. - -During repair, there is a very small time window during -which the old path (if it exists) is moved out of the way and replaced -with the new path. If repair is interrupted in between, then the -system may be left in a broken state (e.g., if the path contains a -critical system component like the GNU C Library). - - - -Example - - -$ nix-store --verify-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 -path `/nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13' was modified! - expected hash `2db57715ae90b7e31ff1f2ecb8c12ec1cc43da920efcbe3b22763f36a1861588', - got `481c5aa5483ebc97c20457bb8bca24deea56550d3985cda0027f67fe54b808e4' - -$ nix-store --repair-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 -fetching path `/nix/store/d7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13'... -… - - - - - - - - - -Operation <option>--dump</option> - - - Synopsis - - nix-store - - path - - - -Description - -The operation produces a NAR (Nix -ARchive) file containing the contents of the file system tree rooted -at path. The archive is written to -standard output. - -A NAR archive is like a TAR or Zip archive, but it contains only -the information that Nix considers important. For instance, -timestamps are elided because all files in the Nix store have their -timestamp set to 0 anyway. Likewise, all permissions are left out -except for the execute bit, because all files in the Nix store have -644 or 755 permission. - -Also, a NAR archive is canonical, meaning -that “equal” paths always produce the same NAR archive. For instance, -directory entries are always sorted so that the actual on-disk order -doesn’t influence the result. This means that the cryptographic hash -of a NAR dump of a path is usable as a fingerprint of the contents of -the path. Indeed, the hashes of store paths stored in Nix’s database -(see nix-store -q ---hash) are SHA-256 hashes of the NAR dump of each -store path. - -NAR archives support filenames of unlimited length and 64-bit -file sizes. They can contain regular files, directories, and symbolic -links, but not other types of files (such as device nodes). - -A Nix archive can be unpacked using nix-store ---restore. - - - - - - - - - -Operation <option>--restore</option> - - - Synopsis - - nix-store - - path - - - -Description - -The operation unpacks a NAR archive -to path, which must not already exist. The -archive is read from standard input. - - - - - - - - - -Operation <option>--export</option> - - - Synopsis - - nix-store - - paths - - - -Description - -The operation writes a serialisation -of the specified store paths to standard output in a format that can -be imported into another Nix store with nix-store --import. This -is like nix-store ---dump, except that the NAR archive produced by that command -doesn’t contain the necessary meta-information to allow it to be -imported into another Nix store (namely, the set of references of the -path). - -This command does not produce a closure of -the specified paths, so if a store path references other store paths -that are missing in the target Nix store, the import will fail. To -copy a whole closure, do something like: - - -$ nix-store --export $(nix-store -qR paths) > out - -To import the whole closure again, run: - - -$ nix-store --import < out - - - - - - - - - - - -Operation <option>--import</option> - - - Synopsis - - nix-store - - - - -Description - -The operation reads a serialisation of -a set of store paths produced by nix-store --export from -standard input and adds those store paths to the Nix store. Paths -that already exist in the Nix store are ignored. If a path refers to -another path that doesn’t exist in the Nix store, the import -fails. - - - - - - - - - -Operation <option>--optimise</option> - - - Synopsis - - nix-store - - - - -Description - -The operation reduces Nix store disk -space usage by finding identical files in the store and hard-linking -them to each other. It typically reduces the size of the store by -something like 25-35%. Only regular files and symlinks are -hard-linked in this manner. Files are considered identical when they -have the same NAR archive serialisation: that is, regular files must -have the same contents and permission (executable or non-executable), -and symlinks must have the same contents. - -After completion, or when the command is interrupted, a report -on the achieved savings is printed on standard error. - -Use or to get some -progress indication. - - - -Example - - -$ nix-store --optimise -hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1' -... -541838819 bytes (516.74 MiB) freed by hard-linking 54143 files; -there are 114486 files with equal contents out of 215894 files in total - - - - - - - - - - -Operation <option>--read-log</option> - - - Synopsis - - nix-store - - - - - paths - - - -Description - -The operation prints the build log -of the specified store paths on standard output. The build log is -whatever the builder of a derivation wrote to standard output and -standard error. If a store path is not a derivation, the deriver of -the store path is used. - -Build logs are kept in -/nix/var/log/nix/drvs. However, there is no -guarantee that a build log is available for any particular store path. -For instance, if the path was downloaded as a pre-built binary through -a substitute, then the log is unavailable. - - - -Example - - -$ nix-store -l $(which ktorrent) -building /nix/store/dhc73pvzpnzxhdgpimsd9sw39di66ph1-ktorrent-2.2.1 -unpacking sources -unpacking source archive /nix/store/p8n1jpqs27mgkjw07pb5269717nzf5f8-ktorrent-2.2.1.tar.gz -ktorrent-2.2.1/ -ktorrent-2.2.1/NEWS -... - - - - - - - - - - -Operation <option>--dump-db</option> - - - Synopsis - - nix-store - - paths - - - -Description - -The operation writes a dump of the -Nix database to standard output. It can be loaded into an empty Nix -store using . This is useful for making -backups and when migrating to different database schemas. - -By default, will dump the entire Nix -database. When one or more store paths is passed, only the subset of -the Nix database for those store paths is dumped. As with -, the user is responsible for passing all the -store paths for a closure. See for an -example. - - - - - - - - -Operation <option>--load-db</option> - - - Synopsis - - nix-store - - - - -Description - -The operation reads a dump of the Nix -database created by from standard input and -loads it into the Nix database. - - - - - - - - -Operation <option>--print-env</option> - - - Synopsis - - nix-store - - drvpath - - - -Description - -The operation prints out the -environment of a derivation in a format that can be evaluated by a -shell. The command line arguments of the builder are placed in the -variable _args. - - - -Example - - -$ nix-store --print-env $(nix-instantiate '<nixpkgs>' -A firefox) - -export src; src='/nix/store/plpj7qrwcz94z2psh6fchsi7s8yihc7k-firefox-12.0.source.tar.bz2' -export stdenv; stdenv='/nix/store/7c8asx3yfrg5dg1gzhzyq2236zfgibnn-stdenv' -export system; system='x86_64-linux' -export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-builder.sh' - - - - - - - - - -Operation <option>--generate-binary-cache-key</option> - - - Synopsis - - nix-store - - - - - - - - - -Description - -This command generates an Ed25519 key pair that can -be used to create a signed binary cache. It takes three mandatory -parameters: - - - - A key name, such as - cache.example.org-1, that is used to look up keys - on the client when it verifies signatures. It can be anything, but - it’s suggested to use the host name of your cache - (e.g. cache.example.org) with a suffix denoting - the number of the key (to be incremented every time you need to - revoke a key). - - The file name where the secret key is to be - stored. - - The file name where the public key is to be - stored. - - - - - - - - - - - - -Environment variables - - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/opt-common-syn.xml b/third_party/nix/doc/manual/command-ref/opt-common-syn.xml deleted file mode 100644 index b610b54b96..0000000000 --- a/third_party/nix/doc/manual/command-ref/opt-common-syn.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - number - - - - number - - - - number - - - - number - - - - - - - - - - - - - - - - - - path - - - - name - value - - - - diff --git a/third_party/nix/doc/manual/command-ref/opt-common.xml b/third_party/nix/doc/manual/command-ref/opt-common.xml deleted file mode 100644 index b8a2f260e8..0000000000 --- a/third_party/nix/doc/manual/command-ref/opt-common.xml +++ /dev/null @@ -1,366 +0,0 @@ - - -Common Options - - -Most Nix commands accept the following command-line options: - - - - - - Prints out a summary of the command syntax and - exits. - - - - - - - Prints out the Nix version number on standard output - and exits. - - - - / - - - - Increases the level of verbosity of diagnostic messages - printed on standard error. For each Nix operation, the information - printed on standard output is well-defined; any diagnostic - information is printed on standard error, never on standard - output. - - This option may be specified repeatedly. Currently, the - following verbosity levels exist: - - - - 0 - “Errors only”: only print messages - explaining why the Nix invocation failed. - - - 1 - “Informational”: print - useful messages about what Nix is doing. - This is the default. - - - 2 - “Talkative”: print more informational - messages. - - - 3 - “Chatty”: print even more - informational messages. - - - 4 - “Debug”: print debug - information. - - - 5 - “Vomit”: print vast amounts of debug - information. - - - - - - - - - - - - - - Decreases the level of verbosity of diagnostic messages - printed on standard error. This is the inverse option to - / . - - - This option may be specified repeatedly. See the previous - verbosity levels list. - - - - - - - / - - By default, output written by builders to standard - output and standard error is echoed to the Nix command's standard - error. This option suppresses this behaviour. Note that the - builder's standard output and error are always written to a log file - in - prefix/nix/var/log/nix. - - - - - / -number - - - - Sets the maximum number of build jobs that Nix will - perform in parallel to the specified number. Specify - auto to use the number of CPUs in the system. - The default is specified by the max-jobs - configuration setting, which itself defaults to - 1. A higher value is useful on SMP systems or to - exploit I/O latency. - - Setting it to 0 disallows building on the local - machine, which is useful when you want builds to happen only on remote - builders. - - - - - - - - - Sets the value of the NIX_BUILD_CORES - environment variable in the invocation of builders. Builders can - use this variable at their discretion to control the maximum amount - of parallelism. For instance, in Nixpkgs, if the derivation - attribute enableParallelBuilding is set to - true, the builder passes the - flag to GNU Make. - It defaults to the value of the cores - configuration setting, if set, or 1 otherwise. - The value 0 means that the builder should use all - available CPU cores in the system. - - - - - - - Sets the maximum number of seconds that a builder - can go without producing any data on standard output or standard - error. The default is specified by the max-silent-time - configuration setting. 0 means no - time-out. - - - - - - Sets the maximum number of seconds that a builder - can run. The default is specified by the timeout - configuration setting. 0 means no - timeout. - - - - / - - Keep going in case of failed builds, to the - greatest extent possible. That is, if building an input of some - derivation fails, Nix will still build the other inputs, but not the - derivation itself. Without this option, Nix stops if any build - fails (except for builds of substitutes), possibly killing builds in - progress (in case of parallel or distributed builds). - - - - - / - - Specifies that in case of a build failure, the - temporary directory (usually in /tmp) in which - the build takes place should not be deleted. The path of the build - directory is printed as an informational message. - - - - - - - - - - Whenever Nix attempts to build a derivation for which - substitutes are known for each output path, but realising the output - paths through the substitutes fails, fall back on building the - derivation. - - The most common scenario in which this is useful is when we - have registered substitutes in order to perform binary distribution - from, say, a network repository. If the repository is down, the - realisation of the derivation will fail. When this option is - specified, Nix will build the derivation instead. Thus, - installation from binaries falls back on installation from source. - This option is not the default since it is generally not desirable - for a transient failure in obtaining the substitutes to lead to a - full build from source (with the related consumption of - resources). - - - - - - - - - - Disables the build hook mechanism. This allows to ignore remote - builders if they are setup on the machine. - - It's useful in cases where the bandwidth between the client and the - remote builder is too low. In that case it can take more time to upload the - sources to the remote builder and fetch back the result than to do the - computation locally. - - - - - - - - - - When this option is used, no attempt is made to open - the Nix database. Most Nix operations do need database access, so - those operations will fail. - - - - - name value - - This option is accepted by - nix-env, nix-instantiate and - nix-build. When evaluating Nix expressions, the - expression evaluator will automatically try to call functions that - it encounters. It can automatically call functions for which every - argument has a default value - (e.g., { argName ? - defaultValue }: - ...). With - , you can also call functions that have - arguments without a default value (or override a default value). - That is, if the evaluator encounters a function with an argument - named name, it will call it with value - value. - - For instance, the top-level default.nix in - Nixpkgs is actually a function: - - -{ # The system (e.g., `i686-linux') for which to build the packages. - system ? builtins.currentSystem - ... -}: ... - - So if you call this Nix expression (e.g., when you do - nix-env -i pkgname), - the function will be called automatically using the value builtins.currentSystem - for the system argument. You can override this - using , e.g., nix-env -i - pkgname --arg system - \"i686-freebsd\". (Note that since the argument is a Nix - string literal, you have to escape the quotes.) - - - - - name value - - This option is like , only the - value is not a Nix expression but a string. So instead of - --arg system \"i686-linux\" (the outer quotes are - to keep the shell happy) you can say --argstr system - i686-linux. - - - - - / -attrPath - - Select an attribute from the top-level Nix - expression being evaluated. (nix-env, - nix-instantiate, nix-build and - nix-shell only.) The attribute - path attrPath is a sequence of - attribute names separated by dots. For instance, given a top-level - Nix expression e, the attribute path - xorg.xorgserver would cause the expression - e.xorg.xorgserver to - be used. See nix-env - --install for some concrete examples. - - In addition to attribute names, you can also specify array - indices. For instance, the attribute path - foo.3.bar selects the bar - attribute of the fourth element of the array in the - foo attribute of the top-level - expression. - - - - - / - - Interpret the command line arguments as a list of - Nix expressions to be parsed and evaluated, rather than as a list - of file names of Nix expressions. - (nix-instantiate, nix-build - and nix-shell only.) - - - - - path - - Add a path to the Nix expression search path. This - option may be given multiple times. See the NIX_PATH environment variable for - information on the semantics of the Nix search path. Paths added - through take precedence over - NIX_PATH. - - - - - name value - - Set the Nix configuration option - name to value. - This overrides settings in the Nix configuration file (see - nix.conf5). - - - - - - - Fix corrupted or missing store paths by - redownloading or rebuilding them. Note that this is slow because it - requires computing a cryptographic hash of the contents of every - path in the closure of the build. Also note the warning under - nix-store --repair-path. - - - - - - - - diff --git a/third_party/nix/doc/manual/command-ref/opt-inst-syn.xml b/third_party/nix/doc/manual/command-ref/opt-inst-syn.xml deleted file mode 100644 index e8c3f1ec6f..0000000000 --- a/third_party/nix/doc/manual/command-ref/opt-inst-syn.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - - - - - - - - - - - - - path - - diff --git a/third_party/nix/doc/manual/command-ref/utilities.xml b/third_party/nix/doc/manual/command-ref/utilities.xml deleted file mode 100644 index 893f5b5b52..0000000000 --- a/third_party/nix/doc/manual/command-ref/utilities.xml +++ /dev/null @@ -1,20 +0,0 @@ - - -Utilities - -This section lists utilities that you can use when you -work with Nix. - - - - - - - - - - diff --git a/third_party/nix/doc/manual/expressions/advanced-attributes.xml b/third_party/nix/doc/manual/expressions/advanced-attributes.xml deleted file mode 100644 index 07b0d97d3f..0000000000 --- a/third_party/nix/doc/manual/expressions/advanced-attributes.xml +++ /dev/null @@ -1,340 +0,0 @@ -
- -Advanced Attributes - -Derivations can declare some infrequently used optional -attributes. - - - - allowedReferences - - The optional attribute - allowedReferences specifies a list of legal - references (dependencies) of the output of the builder. For - example, - - -allowedReferences = []; - - - enforces that the output of a derivation cannot have any runtime - dependencies on its inputs. To allow an output to have a runtime - dependency on itself, use "out" as a list item. - This is used in NixOS to check that generated files such as - initial ramdisks for booting Linux don’t have accidental - dependencies on other paths in the Nix store. - - - - - allowedRequisites - - This attribute is similar to - allowedReferences, but it specifies the legal - requisites of the whole closure, so all the dependencies - recursively. For example, - - -allowedRequisites = [ foobar ]; - - - enforces that the output of a derivation cannot have any other - runtime dependency than foobar, and in addition - it enforces that foobar itself doesn't - introduce any other dependency itself. - - - - disallowedReferences - - The optional attribute - disallowedReferences specifies a list of illegal - references (dependencies) of the output of the builder. For - example, - - -disallowedReferences = [ foo ]; - - - enforces that the output of a derivation cannot have a direct runtime - dependencies on the derivation foo. - - - - - disallowedRequisites - - This attribute is similar to - disallowedReferences, but it specifies illegal - requisites for the whole closure, so all the dependencies - recursively. For example, - - -disallowedRequisites = [ foobar ]; - - - enforces that the output of a derivation cannot have any - runtime dependency on foobar or any other derivation - depending recursively on foobar. - - - - - exportReferencesGraph - - This attribute allows builders access to the - references graph of their inputs. The attribute is a list of - inputs in the Nix store whose references graph the builder needs - to know. The value of this attribute should be a list of pairs - [ name1 - path1 name2 - path2 ... - ]. The references graph of each - pathN will be stored in a text file - nameN in the temporary build directory. - The text files have the format used by nix-store - --register-validity (with the deriver fields left - empty). For example, when the following derivation is built: - - -derivation { - ... - exportReferencesGraph = [ "libfoo-graph" libfoo ]; -}; - - - the references graph of libfoo is placed in the - file libfoo-graph in the temporary build - directory. - - exportReferencesGraph is useful for - builders that want to do something with the closure of a store - path. Examples include the builders in NixOS that generate the - initial ramdisk for booting Linux (a cpio - archive containing the closure of the boot script) and the - ISO-9660 image for the installation CD (which is populated with a - Nix store containing the closure of a bootable NixOS - configuration). - - - - - impureEnvVars - - This attribute allows you to specify a list of - environment variables that should be passed from the environment - of the calling user to the builder. Usually, the environment is - cleared completely when the builder is executed, but with this - attribute you can allow specific environment variables to be - passed unmodified. For example, fetchurl in - Nixpkgs has the line - - -impureEnvVars = [ "http_proxy" "https_proxy" ... ]; - - - to make it use the proxy server configuration specified by the - user in the environment variables http_proxy and - friends. - - This attribute is only allowed in fixed-output derivations, where - impurities such as these are okay since (the hash of) the output - is known in advance. It is ignored for all other - derivations. - - impureEnvVars implementation takes - environment variables from the current builder process. When a daemon is - building its environmental variables are used. Without the daemon, the - environmental variables come from the environment of the - nix-build. - - - - - - outputHash - outputHashAlgo - outputHashMode - - These attributes declare that the derivation is a - so-called fixed-output derivation, which - means that a cryptographic hash of the output is already known in - advance. When the build of a fixed-output derivation finishes, - Nix computes the cryptographic hash of the output and compares it - to the hash declared with these attributes. If there is a - mismatch, the build fails. - - The rationale for fixed-output derivations is derivations - such as those produced by the fetchurl - function. This function downloads a file from a given URL. To - ensure that the downloaded file has not been modified, the caller - must also specify a cryptographic hash of the file. For example, - - -fetchurl { - url = http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz; - sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; -} - - - It sometimes happens that the URL of the file changes, e.g., - because servers are reorganised or no longer available. We then - must update the call to fetchurl, e.g., - - -fetchurl { - url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz; - sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; -} - - - If a fetchurl derivation was treated like a - normal derivation, the output paths of the derivation and - all derivations depending on it would change. - For instance, if we were to change the URL of the Glibc source - distribution in Nixpkgs (a package on which almost all other - packages depend) massive rebuilds would be needed. This is - unfortunate for a change which we know cannot have a real effect - as it propagates upwards through the dependency graph. - - For fixed-output derivations, on the other hand, the name of - the output path only depends on the outputHash* - and name attributes, while all other attributes - are ignored for the purpose of computing the output path. (The - name attribute is included because it is part - of the path.) - - As an example, here is the (simplified) Nix expression for - fetchurl: - - -{ stdenv, curl }: # The curl program is used for downloading. - -{ url, sha256 }: - -stdenv.mkDerivation { - name = baseNameOf (toString url); - builder = ./builder.sh; - buildInputs = [ curl ]; - - # This is a fixed-output derivation; the output must be a regular - # file with SHA256 hash sha256. - outputHashMode = "flat"; - outputHashAlgo = "sha256"; - outputHash = sha256; - - inherit url; -} - - - - - The outputHashAlgo attribute specifies - the hash algorithm used to compute the hash. It can currently be - "sha1", "sha256" or - "sha512". - - The outputHashMode attribute determines - how the hash is computed. It must be one of the following two - values: - - - - "flat" - - The output must be a non-executable regular - file. If it isn’t, the build fails. The hash is simply - computed over the contents of that file (so it’s equal to what - Unix commands like sha256sum or - sha1sum produce). - - This is the default. - - - - "recursive" - - The hash is computed over the NAR archive dump - of the output (i.e., the result of nix-store - --dump). In this case, the output can be - anything, including a directory tree. - - - - - - - - The outputHash attribute, finally, must - be a string containing the hash in either hexadecimal or base-32 - notation. (See the nix-hash command - for information about converting to and from base-32 - notation.) - - - - - passAsFile - - A list of names of attributes that should be - passed via files rather than environment variables. For example, - if you have - - -passAsFile = ["big"]; -big = "a very long string"; - - - then when the builder runs, the environment variable - bigPath will contain the absolute path to a - temporary file containing a very long - string. That is, for any attribute - x listed in - passAsFile, Nix will pass an environment - variable xPath holding - the path of the file containing the value of attribute - x. This is useful when you need to pass - large strings to a builder, since most operating systems impose a - limit on the size of the environment (typically, a few hundred - kilobyte). - - - - - preferLocalBuild - - If this attribute is set to - true and distributed building is - enabled, then, if possible, the derivaton will be built - locally instead of forwarded to a remote machine. This is - appropriate for trivial builders where the cost of doing a - download or remote build would exceed the cost of building - locally. - - - - - allowSubstitutes - - If this attribute is set to - false, then Nix will always build this - derivation; it will not try to substitute its outputs. This is - useful for very trivial derivations (such as - writeText in Nixpkgs) that are cheaper to - build than to substitute from a binary cache. - - - - - - -
diff --git a/third_party/nix/doc/manual/expressions/arguments-variables.xml b/third_party/nix/doc/manual/expressions/arguments-variables.xml deleted file mode 100644 index bf60cb7eef..0000000000 --- a/third_party/nix/doc/manual/expressions/arguments-variables.xml +++ /dev/null @@ -1,121 +0,0 @@ -
- -Arguments and Variables - - - -Composing GNU Hello -(<filename>all-packages.nix</filename>) - -... - -rec { - - hello = import ../applications/misc/hello/ex-1 { - inherit fetchurl stdenv perl; - }; - - perl = import ../development/interpreters/perl { - inherit fetchurl stdenv; - }; - - fetchurl = import ../build-support/fetchurl { - inherit stdenv; ... - }; - - stdenv = ...; - -} - - - -The Nix expression in is a -function; it is missing some arguments that have to be filled in -somewhere. In the Nix Packages collection this is done in the file -pkgs/top-level/all-packages.nix, where all -Nix expressions for packages are imported and called with the -appropriate arguments. shows -some fragments of -all-packages.nix. - - - - - - This file defines a set of attributes, all of which are - concrete derivations (i.e., not functions). In fact, we define a - mutually recursive set of attributes. That - is, the attributes can refer to each other. This is precisely - what we want since we want to plug the - various packages into each other. - - - - - - Here we import the Nix expression for - GNU Hello. The import operation just loads and returns the - specified Nix expression. In fact, we could just have put the - contents of in - all-packages.nix at this point. That - would be completely equivalent, but it would make the file rather - bulky. - - Note that we refer to - ../applications/misc/hello/ex-1, not - ../applications/misc/hello/ex-1/default.nix. - When you try to import a directory, Nix automatically appends - /default.nix to the file name. - - - - - - This is where the actual composition takes place. Here we - call the function imported from - ../applications/misc/hello/ex-1 with a set - containing the things that the function expects, namely - fetchurl, stdenv, and - perl. We use inherit again to use the - attributes defined in the surrounding scope (we could also have - written fetchurl = fetchurl;, etc.). - - The result of this function call is an actual derivation - that can be built by Nix (since when we fill in the arguments of - the function, what we get is its body, which is the call to - stdenv.mkDerivation in ). - - Nixpkgs has a convenience function - callPackage that imports and calls a - function, filling in any missing arguments by passing the - corresponding attribute from the Nixpkgs set, like this: - - -hello = callPackage ../applications/misc/hello/ex-1 { }; - - - If necessary, you can set or override arguments: - - -hello = callPackage ../applications/misc/hello/ex-1 { stdenv = myStdenv; }; - - - - - - - - - Likewise, we have to instantiate Perl, - fetchurl, and the standard environment. - - - - - -
\ No newline at end of file diff --git a/third_party/nix/doc/manual/expressions/build-script.xml b/third_party/nix/doc/manual/expressions/build-script.xml deleted file mode 100644 index 7bad8f808d..0000000000 --- a/third_party/nix/doc/manual/expressions/build-script.xml +++ /dev/null @@ -1,119 +0,0 @@ -
- -Build Script - -Build script for GNU Hello -(<filename>builder.sh</filename>) - -source $stdenv/setup - -PATH=$perl/bin:$PATH - -tar xvfz $src -cd hello-* -./configure --prefix=$out -make -make install - - - shows the builder referenced -from Hello's Nix expression (stored in -pkgs/applications/misc/hello/ex-1/builder.sh). -The builder can actually be made a lot shorter by using the -generic builder functions provided by -stdenv, but here we write out the build steps to -elucidate what a builder does. It performs the following -steps: - - - - - - When Nix runs a builder, it initially completely clears the - environment (except for the attributes declared in the - derivation). For instance, the PATH variable is - emptyActually, it's initialised to - /path-not-set to prevent Bash from setting it - to a default value.. This is done to prevent - undeclared inputs from being used in the build process. If for - example the PATH contained - /usr/bin, then you might accidentally use - /usr/bin/gcc. - - So the first step is to set up the environment. This is - done by calling the setup script of the - standard environment. The environment variable - stdenv points to the location of the standard - environment being used. (It wasn't specified explicitly as an - attribute in , but - mkDerivation adds it automatically.) - - - - - - Since Hello needs Perl, we have to make sure that Perl is in - the PATH. The perl environment - variable points to the location of the Perl package (since it - was passed in as an attribute to the derivation), so - $perl/bin is the - directory containing the Perl interpreter. - - - - - - Now we have to unpack the sources. The - src attribute was bound to the result of - fetching the Hello source tarball from the network, so the - src environment variable points to the location in - the Nix store to which the tarball was downloaded. After - unpacking, we cd to the resulting source - directory. - - The whole build is performed in a temporary directory - created in /tmp, by the way. This directory is - removed after the builder finishes, so there is no need to clean - up the sources afterwards. Also, the temporary directory is - always newly created, so you don't have to worry about files from - previous builds interfering with the current build. - - - - - - GNU Hello is a typical Autoconf-based package, so we first - have to run its configure script. In Nix - every package is stored in a separate location in the Nix store, - for instance - /nix/store/9a54ba97fb71b65fda531012d0443ce2-hello-2.1.1. - Nix computes this path by cryptographically hashing all attributes - of the derivation. The path is passed to the builder through the - out environment variable. So here we give - configure the parameter - --prefix=$out to cause Hello to be installed in - the expected location. - - - - - - Finally we build Hello (make) and install - it into the location specified by out - (make install). - - - - - -If you are wondering about the absence of error checking on the -result of various commands called in the builder: this is because the -shell script is evaluated with Bash's option, -which causes the script to be aborted if any command fails without an -error check. - -
\ No newline at end of file diff --git a/third_party/nix/doc/manual/expressions/builder-syntax.xml b/third_party/nix/doc/manual/expressions/builder-syntax.xml deleted file mode 100644 index e51bade44e..0000000000 --- a/third_party/nix/doc/manual/expressions/builder-syntax.xml +++ /dev/null @@ -1,119 +0,0 @@ -
- -Builder Syntax - -Build script for GNU Hello -(<filename>builder.sh</filename>) - -source $stdenv/setup - -PATH=$perl/bin:$PATH - -tar xvfz $src -cd hello-* -./configure --prefix=$out -make -make install - - - shows the builder referenced -from Hello's Nix expression (stored in -pkgs/applications/misc/hello/ex-1/builder.sh). -The builder can actually be made a lot shorter by using the -generic builder functions provided by -stdenv, but here we write out the build steps to -elucidate what a builder does. It performs the following -steps: - - - - - - When Nix runs a builder, it initially completely clears the - environment (except for the attributes declared in the - derivation). For instance, the PATH variable is - emptyActually, it's initialised to - /path-not-set to prevent Bash from setting it - to a default value.. This is done to prevent - undeclared inputs from being used in the build process. If for - example the PATH contained - /usr/bin, then you might accidentally use - /usr/bin/gcc. - - So the first step is to set up the environment. This is - done by calling the setup script of the - standard environment. The environment variable - stdenv points to the location of the standard - environment being used. (It wasn't specified explicitly as an - attribute in , but - mkDerivation adds it automatically.) - - - - - - Since Hello needs Perl, we have to make sure that Perl is in - the PATH. The perl environment - variable points to the location of the Perl package (since it - was passed in as an attribute to the derivation), so - $perl/bin is the - directory containing the Perl interpreter. - - - - - - Now we have to unpack the sources. The - src attribute was bound to the result of - fetching the Hello source tarball from the network, so the - src environment variable points to the location in - the Nix store to which the tarball was downloaded. After - unpacking, we cd to the resulting source - directory. - - The whole build is performed in a temporary directory - created in /tmp, by the way. This directory is - removed after the builder finishes, so there is no need to clean - up the sources afterwards. Also, the temporary directory is - always newly created, so you don't have to worry about files from - previous builds interfering with the current build. - - - - - - GNU Hello is a typical Autoconf-based package, so we first - have to run its configure script. In Nix - every package is stored in a separate location in the Nix store, - for instance - /nix/store/9a54ba97fb71b65fda531012d0443ce2-hello-2.1.1. - Nix computes this path by cryptographically hashing all attributes - of the derivation. The path is passed to the builder through the - out environment variable. So here we give - configure the parameter - --prefix=$out to cause Hello to be installed in - the expected location. - - - - - - Finally we build Hello (make) and install - it into the location specified by out - (make install). - - - - - -If you are wondering about the absence of error checking on the -result of various commands called in the builder: this is because the -shell script is evaluated with Bash's option, -which causes the script to be aborted if any command fails without an -error check. - -
\ No newline at end of file diff --git a/third_party/nix/doc/manual/expressions/builtins.xml b/third_party/nix/doc/manual/expressions/builtins.xml deleted file mode 100644 index 394e1fc32c..0000000000 --- a/third_party/nix/doc/manual/expressions/builtins.xml +++ /dev/null @@ -1,1658 +0,0 @@ -
- -Built-in Functions - -This section lists the functions and constants built into the -Nix expression evaluator. (The built-in function -derivation is discussed above.) Some built-ins, -such as derivation, are always in scope of every -Nix expression; you can just access them right away. But to prevent -polluting the namespace too much, most built-ins are not in scope. -Instead, you can access them through the builtins -built-in value, which is a set that contains all built-in functions -and values. For instance, derivation is also -available as builtins.derivation. - - - - - - - abort s - builtins.abort s - - Abort Nix expression evaluation, print error - message s. - - - - - - builtins.add - e1 e2 - - - Return the sum of the numbers - e1 and - e2. - - - - - - builtins.all - pred list - - Return true if the function - pred returns true - for all elements of list, - and false otherwise. - - - - - - builtins.any - pred list - - Return true if the function - pred returns true - for at least one element of list, - and false otherwise. - - - - - - builtins.attrNames - set - - Return the names of the attributes in the set - set in an alphabetically sorted list. For instance, - builtins.attrNames { y = 1; x = "foo"; } - evaluates to [ "x" "y" ]. - - - - - - builtins.attrValues - set - - Return the values of the attributes in the set - set in the order corresponding to the - sorted attribute names. - - - - - - baseNameOf s - - Return the base name of the - string s, that is, everything following - the final slash in the string. This is similar to the GNU - basename command. - - - - - - builtins.bitAnd - e1 e2 - - Return the bitwise AND of the integers - e1 and - e2. - - - - - - builtins.bitOr - e1 e2 - - Return the bitwise OR of the integers - e1 and - e2. - - - - - - builtins.bitXor - e1 e2 - - Return the bitwise XOR of the integers - e1 and - e2. - - - - - - builtins - - The set builtins contains all - the built-in functions and values. You can use - builtins to test for the availability of - features in the Nix installation, e.g., - - -if builtins ? getEnv then builtins.getEnv "PATH" else "" - - This allows a Nix expression to fall back gracefully on older Nix - installations that don’t have the desired built-in - function. - - - - - - builtins.compareVersions - s1 s2 - - Compare two strings representing versions and - return -1 if version - s1 is older than version - s2, 0 if they are - the same, and 1 if - s1 is newer than - s2. The version comparison algorithm - is the same as the one used by nix-env - -u. - - - - - - builtins.concatLists - lists - - Concatenate a list of lists into a single - list. - - - - - builtins.concatStringsSep - separator list - - Concatenate a list of strings with a separator - between each element, e.g. concatStringsSep "/" - ["usr" "local" "bin"] == "usr/local/bin" - - - - - builtins.currentSystem - - The built-in value currentSystem - evaluates to the Nix platform identifier for the Nix installation - on which the expression is being evaluated, such as - "i686-linux" or - "x86_64-darwin". - - - - - - - - - - - - builtins.deepSeq - e1 e2 - - This is like seq - e1 - e2, except that - e1 is evaluated - deeply: if it’s a list or set, its elements - or attributes are also evaluated recursively. - - - - - - derivation - attrs - builtins.derivation - attrs - - derivation is described in - . - - - - - - dirOf s - builtins.dirOf s - - Return the directory part of the string - s, that is, everything before the final - slash in the string. This is similar to the GNU - dirname command. - - - - - - builtins.div - e1 e2 - - Return the quotient of the numbers - e1 and - e2. - - - - - builtins.elem - x xs - - Return true if a value equal to - x occurs in the list - xs, and false - otherwise. - - - - - - builtins.elemAt - xs n - - Return element n from - the list xs. Elements are counted - starting from 0. A fatal error occurs if the index is out of - bounds. - - - - - - builtins.fetchurl - url - - Download the specified URL and return the path of - the downloaded file. This function is not available if restricted evaluation mode is - enabled. - - - - - - fetchTarball - url - builtins.fetchTarball - url - - Download the specified URL, unpack it and return - the path of the unpacked tree. The file must be a tape archive - (.tar) compressed with - gzip, bzip2 or - xz. The top-level path component of the files - in the tarball is removed, so it is best if the tarball contains a - single directory at top level. The typical use of the function is - to obtain external Nix expression dependencies, such as a - particular version of Nixpkgs, e.g. - - -with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz) {}; - -stdenv.mkDerivation { … } - - - - The fetched tarball is cached for a certain amount of time - (1 hour by default) in ~/.cache/nix/tarballs/. - You can change the cache timeout either on the command line with - or - in the Nix configuration file with this option: - number of seconds to cache. - - - Note that when obtaining the hash with nix-prefetch-url - the option --unpack is required. - - - This function can also verify the contents against a hash. - In that case, the function takes a set instead of a URL. The set - requires the attribute url and the attribute - sha256, e.g. - - -with import (fetchTarball { - url = https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz; - sha256 = "1jppksrfvbk5ypiqdz4cddxdl8z6zyzdb2srq8fcffr327ld5jj2"; -}) {}; - -stdenv.mkDerivation { … } - - - - - This function is not available if restricted evaluation mode is - enabled. - - - - - - builtins.fetchGit - args - - - - - Fetch a path from git. args can be - a URL, in which case the HEAD of the repo at that URL is - fetched. Otherwise, it can be an attribute with the following - attributes (all except url optional): - - - - - url - - - The URL of the repo. - - - - - name - - - The name of the directory the repo should be exported to - in the store. Defaults to the basename of the URL. - - - - - rev - - - The git revision to fetch. Defaults to the tip of - ref. - - - - - ref - - - The git ref to look for the requested revision under. - This is often a branch or tag name. Defaults to - HEAD. - - - - By default, the ref value is prefixed - with refs/heads/. As of Nix 2.3.0 - Nix will not prefix refs/heads/ if - ref starts with refs/. - - - - - - - Fetching a private repository over SSH - builtins.fetchGit { - url = "git@github.com:my-secret/repository.git"; - ref = "master"; - rev = "adab8b916a45068c044658c4158d81878f9ed1c3"; -} - - - - Fetching an arbitrary ref - builtins.fetchGit { - url = "https://github.com/NixOS/nix.git"; - ref = "refs/heads/0.5-release"; -} - - - - Fetching a repository's specific commit on an arbitrary branch - - If the revision you're looking for is in the default branch - of the git repository you don't strictly need to specify - the branch name in the ref attribute. - - - However, if the revision you're looking for is in a future - branch for the non-default branch you will need to specify - the the ref attribute as well. - - builtins.fetchGit { - url = "https://github.com/nixos/nix.git"; - rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452"; - ref = "1.11-maintenance"; -} - - - It is nice to always specify the branch which a revision - belongs to. Without the branch being specified, the - fetcher might fail if the default branch changes. - Additionally, it can be confusing to try a commit from a - non-default branch and see the fetch fail. If the branch - is specified the fault is much more obvious. - - - - - - Fetching a repository's specific commit on the default branch - - If the revision you're looking for is in the default branch - of the git repository you may omit the - ref attribute. - - builtins.fetchGit { - url = "https://github.com/nixos/nix.git"; - rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452"; -} - - - - Fetching a tag - builtins.fetchGit { - url = "https://github.com/nixos/nix.git"; - ref = "refs/tags/1.9"; -} - - - - Fetching the latest version of a remote branch - - builtins.fetchGit can behave impurely - fetch the latest version of a remote branch. - - Nix will refetch the branch in accordance to - . - This behavior is disabled in - Pure evaluation mode. - builtins.fetchGit { - url = "ssh://git@github.com/nixos/nix.git"; - ref = "master"; -} - - - - - - builtins.filter - f xs - - Return a list consisting of the elements of - xs for which the function - f returns - true. - - - - - - builtins.filterSource - e1 e2 - - - - This function allows you to copy sources into the Nix - store while filtering certain files. For instance, suppose that - you want to use the directory source-dir as - an input to a Nix expression, e.g. - - -stdenv.mkDerivation { - ... - src = ./source-dir; -} - - - However, if source-dir is a Subversion - working copy, then all those annoying .svn - subdirectories will also be copied to the store. Worse, the - contents of those directories may change a lot, causing lots of - spurious rebuilds. With filterSource you - can filter out the .svn directories: - - - src = builtins.filterSource - (path: type: type != "directory" || baseNameOf path != ".svn") - ./source-dir; - - - - - Thus, the first argument e1 - must be a predicate function that is called for each regular - file, directory or symlink in the source tree - e2. If the function returns - true, the file is copied to the Nix store, - otherwise it is omitted. The function is called with two - arguments. The first is the full path of the file. The second - is a string that identifies the type of the file, which is - either "regular", - "directory", "symlink" or - "unknown" (for other kinds of files such as - device nodes or fifos — but note that those cannot be copied to - the Nix store, so if the predicate returns - true for them, the copy will fail). If you - exclude a directory, the entire corresponding subtree of - e2 will be excluded. - - - - - - - - builtins.foldl’ - op nul list - - Reduce a list by applying a binary operator, from - left to right, e.g. foldl’ op nul [x0 x1 x2 ...] = op (op - (op nul x0) x1) x2) .... The operator is applied - strictly, i.e., its arguments are evaluated first. For example, - foldl’ (x: y: x + y) 0 [1 2 3] evaluates to - 6. - - - - - - builtins.functionArgs - f - - - Return a set containing the names of the formal arguments expected - by the function f. - The value of each attribute is a Boolean denoting whether the corresponding - argument has a default value. For instance, - functionArgs ({ x, y ? 123}: ...) = { x = false; y = true; }. - - - "Formal argument" here refers to the attributes pattern-matched by - the function. Plain lambdas are not included, e.g. - functionArgs (x: ...) = { }. - - - - - - builtins.fromJSON e - - Convert a JSON string to a Nix - value. For example, - - -builtins.fromJSON ''{"x": [1, 2, 3], "y": null}'' - - - returns the value { x = [ 1 2 3 ]; y = null; - }. - - - - - - builtins.genList - generator length - - Generate list of size - length, with each element - i equal to the value returned by - generator i. For - example, - - -builtins.genList (x: x * x) 5 - - - returns the list [ 0 1 4 9 16 ]. - - - - - - builtins.getAttr - s set - - getAttr returns the attribute - named s from - set. Evaluation aborts if the - attribute doesn’t exist. This is a dynamic version of the - . operator, since s - is an expression rather than an identifier. - - - - - - builtins.getEnv - s - - getEnv returns the value of - the environment variable s, or an empty - string if the variable doesn’t exist. This function should be - used with care, as it can introduce all sorts of nasty environment - dependencies in your Nix expression. - - getEnv is used in Nix Packages to - locate the file ~/.nixpkgs/config.nix, which - contains user-local settings for Nix Packages. (That is, it does - a getEnv "HOME" to locate the user’s home - directory.) - - - - - - builtins.hasAttr - s set - - hasAttr returns - true if set has an - attribute named s, and - false otherwise. This is a dynamic version of - the ? operator, since - s is an expression rather than an - identifier. - - - - - - builtins.hashString - type s - - Return a base-16 representation of the - cryptographic hash of string s. The - hash algorithm specified by type must - be one of "md5", "sha1", - "sha256" or "sha512". - - - - - - builtins.hashFile - type p - - Return a base-16 representation of the - cryptographic hash of the file at path p. The - hash algorithm specified by type must - be one of "md5", "sha1", - "sha256" or "sha512". - - - - - - builtins.head - list - - Return the first element of a list; abort - evaluation if the argument isn’t a list or is an empty list. You - can test whether a list is empty by comparing it with - []. - - - - - - import - path - builtins.import - path - - Load, parse and return the Nix expression in the - file path. If path - is a directory, the file default.nix - in that directory is loaded. Evaluation aborts if the - file doesn’t exist or contains an incorrect Nix expression. - import implements Nix’s module system: you - can put any Nix expression (such as a set or a function) in a - separate file, and use it from Nix expressions in other - files. - - Unlike some languages, import is a regular - function in Nix. Paths using the angle bracket syntax (e.g., - import <foo>) are normal path - values (see ). - - A Nix expression loaded by import must - not contain any free variables (identifiers - that are not defined in the Nix expression itself and are not - built-in). Therefore, it cannot refer to variables that are in - scope at the call site. For instance, if you have a calling - expression - - -rec { - x = 123; - y = import ./foo.nix; -} - - then the following foo.nix will give an - error: - - -x + 456 - - since x is not in scope in - foo.nix. If you want x - to be available in foo.nix, you should pass - it as a function argument: - - -rec { - x = 123; - y = import ./foo.nix x; -} - - and - - -x: x + 456 - - (The function argument doesn’t have to be called - x in foo.nix; any name - would work.) - - - - - - builtins.intersectAttrs - e1 e2 - - Return a set consisting of the attributes in the - set e2 that also exist in the set - e1. - - - - - - builtins.isAttrs - e - - Return true if - e evaluates to a set, and - false otherwise. - - - - - - builtins.isList - e - - Return true if - e evaluates to a list, and - false otherwise. - - - - - builtins.isFunction - e - - Return true if - e evaluates to a function, and - false otherwise. - - - - - - builtins.isString - e - - Return true if - e evaluates to a string, and - false otherwise. - - - - - - builtins.isInt - e - - Return true if - e evaluates to an int, and - false otherwise. - - - - - - builtins.isFloat - e - - Return true if - e evaluates to a float, and - false otherwise. - - - - - - builtins.isBool - e - - Return true if - e evaluates to a bool, and - false otherwise. - - - - builtins.isPath - e - - Return true if - e evaluates to a path, and - false otherwise. - - - - - isNull - e - builtins.isNull - e - - Return true if - e evaluates to null, - and false otherwise. - - This function is deprecated; - just write e == null instead. - - - - - - - - builtins.length - e - - Return the length of the list - e. - - - - - - builtins.lessThan - e1 e2 - - Return true if the number - e1 is less than the number - e2, and false - otherwise. Evaluation aborts if either - e1 or e2 - does not evaluate to a number. - - - - - - builtins.listToAttrs - e - - Construct a set from a list specifying the names - and values of each attribute. Each element of the list should be - a set consisting of a string-valued attribute - name specifying the name of the attribute, and - an attribute value specifying its value. - Example: - - -builtins.listToAttrs - [ { name = "foo"; value = 123; } - { name = "bar"; value = 456; } - ] - - - evaluates to - - -{ foo = 123; bar = 456; } - - - - - - - - map - f list - builtins.map - f list - - Apply the function f to - each element in the list list. For - example, - - -map (x: "foo" + x) [ "bar" "bla" "abc" ] - - evaluates to [ "foobar" "foobla" "fooabc" - ]. - - - - - - builtins.match - regex str - - Returns a list if the extended - POSIX regular expression regex - matches str precisely, otherwise returns - null. Each item in the list is a regex group. - - -builtins.match "ab" "abc" - - -Evaluates to null. - - -builtins.match "abc" "abc" - - -Evaluates to [ ]. - - -builtins.match "a(b)(c)" "abc" - - -Evaluates to [ "b" "c" ]. - - -builtins.match "[[:space:]]+([[:upper:]]+)[[:space:]]+" " FOO " - - -Evaluates to [ "foo" ]. - - - - - - builtins.mul - e1 e2 - - Return the product of the numbers - e1 and - e2. - - - - - - builtins.parseDrvName - s - - Split the string s into - a package name and version. The package name is everything up to - but not including the first dash followed by a digit, and the - version is everything following that dash. The result is returned - in a set { name, version }. Thus, - builtins.parseDrvName "nix-0.12pre12876" - returns { name = "nix"; version = "0.12pre12876"; - }. - - - - - - builtins.path - args - - - - - An enrichment of the built-in path type, based on the attributes - present in args. All are optional - except path: - - - - - path - - The underlying path. - - - - name - - - The name of the path when added to the store. This can - used to reference paths that have nix-illegal characters - in their names, like @. - - - - - filter - - - A function of the type expected by - builtins.filterSource, - with the same semantics. - - - - - recursive - - - When false, when - path is added to the store it is with a - flat hash, rather than a hash of the NAR serialization of - the file. Thus, path must refer to a - regular file, not a directory. This allows similar - behavior to fetchurl. Defaults to - true. - - - - - sha256 - - - When provided, this is the expected hash of the file at - the path. Evaluation will fail if the hash is incorrect, - and providing a hash allows - builtins.path to be used even when the - pure-eval nix config option is on. - - - - - - - - - builtins.pathExists - path - - Return true if the path - path exists at evaluation time, and - false otherwise. - - - - - builtins.placeholder - output - - Return a placeholder string for the specified - output that will be substituted by the - corresponding output path at build time. Typical outputs would be - "out", "bin" or - "dev". - - - - builtins.readDir - path - - Return the contents of the directory - path as a set mapping directory entries - to the corresponding file type. For instance, if directory - A contains a regular file - B and another directory - C, then builtins.readDir - ./A will return the set - - -{ B = "regular"; C = "directory"; } - - The possible values for the file type are - "regular", "directory", - "symlink" and - "unknown". - - - - - - builtins.readFile - path - - Return the contents of the file - path as a string. - - - - - - removeAttrs - set list - builtins.removeAttrs - set list - - Remove the attributes listed in - list from - set. The attributes don’t have to - exist in set. For instance, - - -removeAttrs { x = 1; y = 2; z = 3; } [ "a" "x" "z" ] - - evaluates to { y = 2; }. - - - - - - builtins.replaceStrings - from to s - - Given string s, replace - every occurrence of the strings in from - with the corresponding string in - to. For example, - - -builtins.replaceStrings ["oo" "a"] ["a" "i"] "foobar" - - - evaluates to "fabir". - - - - - - builtins.seq - e1 e2 - - Evaluate e1, then - evaluate and return e2. This ensures - that a computation is strict in the value of - e1. - - - - - - builtins.sort - comparator list - - Return list in sorted - order. It repeatedly calls the function - comparator with two elements. The - comparator should return true if the first - element is less than the second, and false - otherwise. For example, - - -builtins.sort builtins.lessThan [ 483 249 526 147 42 77 ] - - - produces the list [ 42 77 147 249 483 526 - ]. - - This is a stable sort: it preserves the relative order of - elements deemed equal by the comparator. - - - - - - builtins.split - regex str - - Returns a list composed of non matched strings interleaved - with the lists of the extended - POSIX regular expression regex matches - of str. Each item in the lists of matched - sequences is a regex group. - - -builtins.split "(a)b" "abc" - - -Evaluates to [ "" [ "a" ] "c" ]. - - -builtins.split "([ac])" "abc" - - -Evaluates to [ "" [ "a" ] "b" [ "c" ] "" ]. - - -builtins.split "(a)|(c)" "abc" - - -Evaluates to [ "" [ "a" null ] "b" [ null "c" ] "" ]. - - -builtins.split "([[:upper:]]+)" " FOO " - - -Evaluates to [ " " [ "FOO" ] " " ]. - - - - - - - builtins.splitVersion - s - - Split a string representing a version into its - components, by the same version splitting logic underlying the - version comparison in - nix-env -u. - - - - - - builtins.stringLength - e - - Return the length of the string - e. If e is - not a string, evaluation is aborted. - - - - - - builtins.sub - e1 e2 - - Return the difference between the numbers - e1 and - e2. - - - - - - builtins.substring - start len - s - - Return the substring of - s from character position - start (zero-based) up to but not - including start + len. If - start is greater than the length of the - string, an empty string is returned, and if start + - len lies beyond the end of the string, only the - substring up to the end of the string is returned. - start must be - non-negative. For example, - - -builtins.substring 0 3 "nixos" - - - evaluates to "nix". - - - - - - - builtins.tail - list - - Return the second to last elements of a list; - abort evaluation if the argument isn’t a list or is an empty - list. - - - - - - throw - s - builtins.throw - s - - Throw an error message - s. This usually aborts Nix expression - evaluation, but in nix-env -qa and other - commands that try to evaluate a set of derivations to get - information about those derivations, a derivation that throws an - error is silently skipped (which is not the case for - abort). - - - - - - builtins.toFile - name - s - - Store the string s in a - file in the Nix store and return its path. The file has suffix - name. This file can be used as an - input to derivations. One application is to write builders - “inline”. For instance, the following Nix expression combines - and into one file: - - -{ stdenv, fetchurl, perl }: - -stdenv.mkDerivation { - name = "hello-2.1.1"; - - builder = builtins.toFile "builder.sh" " - source $stdenv/setup - - PATH=$perl/bin:$PATH - - tar xvfz $src - cd hello-* - ./configure --prefix=$out - make - make install - "; - - src = fetchurl { - url = http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz; - sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; - }; - inherit perl; -} - - - - It is even possible for one file to refer to another, e.g., - - - builder = let - configFile = builtins.toFile "foo.conf" " - # This is some dummy configuration file. - ... - "; - in builtins.toFile "builder.sh" " - source $stdenv/setup - ... - cp ${configFile} $out/etc/foo.conf - "; - - Note that ${configFile} is an antiquotation - (see ), so the result of the - expression configFile (i.e., a path like - /nix/store/m7p7jfny445k...-foo.conf) will be - spliced into the resulting string. - - It is however not allowed to have files - mutually referring to each other, like so: - - -let - foo = builtins.toFile "foo" "...${bar}..."; - bar = builtins.toFile "bar" "...${foo}..."; -in foo - - This is not allowed because it would cause a cyclic dependency in - the computation of the cryptographic hashes for - foo and bar. - It is also not possible to reference the result of a derivation. - If you are using Nixpkgs, the writeTextFile function is able to - do that. - - - - - - builtins.toJSON e - - Return a string containing a JSON representation - of e. Strings, integers, floats, booleans, - nulls and lists are mapped to their JSON equivalents. Sets - (except derivations) are represented as objects. Derivations are - translated to a JSON string containing the derivation’s output - path. Paths are copied to the store and represented as a JSON - string of the resulting store path. - - - - - - builtins.toPath s - - DEPRECATED. Use /. + "/path" - to convert a string into an absolute path. For relative paths, - use ./. + "/path". - - - - - - - toString e - builtins.toString e - - Convert the expression - e to a string. - e can be: - - A string (in which case the string is returned unmodified). - A path (e.g., toString /foo/bar yields "/foo/bar". - A set containing { __toString = self: ...; }. - An integer. - A list, in which case the string representations of its elements are joined with spaces. - A Boolean (false yields "", true yields "1"). - null, which yields the empty string. - - - - - - - - builtins.toXML e - - Return a string containing an XML representation - of e. The main application for - toXML is to communicate information with the - builder in a more structured format than plain environment - variables. - - - - shows an example where this is - the case. The builder is supposed to generate the configuration - file for a Jetty - servlet container. A servlet container contains a number - of servlets (*.war files) each exported under - a specific URI prefix. So the servlet configuration is a list of - sets containing the path and - war of the servlet (). This kind of information is - difficult to communicate with the normal method of passing - information through an environment variable, which just - concatenates everything together into a string (which might just - work in this case, but wouldn’t work if fields are optional or - contain lists themselves). Instead the Nix expression is - converted to an XML representation with - toXML, which is unambiguous and can easily be - processed with the appropriate tools. For instance, in the - example an XSLT stylesheet () is applied to it () to - generate the XML configuration file for the Jetty server. The XML - representation produced from by toXML is shown in . - - Note that uses the toFile built-in to write the - builder and the stylesheet “inline” in the Nix expression. The - path of the stylesheet is spliced into the builder at - xsltproc ${stylesheet} - .... - - Passing information to a builder - using <function>toXML</function> - - $out/server-conf.xml]]> - - - - - - - - - - - - - "; - - servlets = builtins.toXML []]> - - - - XML representation produced by - <function>toXML</function> - - - - - - - - - - - - - - - - - - - - - -]]> - - - - - - - - - - builtins.trace - e1 e2 - - Evaluate e1 and print its - abstract syntax representation on standard error. Then return - e2. This function is useful for - debugging. - - - - - builtins.tryEval - e - - Try to shallowly evaluate e. - Return a set containing the attributes success - (true if e evaluated - successfully, false if an error was thrown) and - value, equalling e - if successful and false otherwise. Note that this - doesn't evaluate e deeply, so - let e = { x = throw ""; }; in (builtins.tryEval e).success - will be true. Using builtins.deepSeq - one can get the expected result: let e = { x = throw ""; - }; in (builtins.tryEval (builtins.deepSeq e e)).success will be - false. - - - - - - - builtins.typeOf - e - - Return a string representing the type of the value - e, namely "int", - "bool", "string", - "path", "null", - "set", "list", - "lambda" or - "float". - - - - - - - -
diff --git a/third_party/nix/doc/manual/expressions/derivations.xml b/third_party/nix/doc/manual/expressions/derivations.xml deleted file mode 100644 index 6f6297565c..0000000000 --- a/third_party/nix/doc/manual/expressions/derivations.xml +++ /dev/null @@ -1,211 +0,0 @@ -
- -Derivations - -The most important built-in function is -derivation, which is used to describe a single -derivation (a build action). It takes as input a set, the attributes -of which specify the inputs of the build. - - - - There must be an attribute named - system whose value must be a string specifying a - Nix platform identifier, such as "i686-linux" or - "x86_64-darwin"To figure out - your platform identifier, look at the line Checking for the - canonical Nix system name in the output of Nix's - configure script. The build - can only be performed on a machine and operating system matching the - platform identifier. (Nix can automatically forward builds for - other platforms by forwarding them to other machines; see .) - - There must be an attribute named - name whose value must be a string. This is used - as a symbolic name for the package by nix-env, - and it is appended to the output paths of the - derivation. - - There must be an attribute named - builder that identifies the program that is - executed to perform the build. It can be either a derivation or a - source (a local file reference, e.g., - ./builder.sh). - - Every attribute is passed as an environment variable - to the builder. Attribute values are translated to environment - variables as follows: - - - - Strings and numbers are just passed - verbatim. - - A path (e.g., - ../foo/sources.tar) causes the referenced - file to be copied to the store; its location in the store is put - in the environment variable. The idea is that all sources - should reside in the Nix store, since all inputs to a derivation - should reside in the Nix store. - - A derivation causes that - derivation to be built prior to the present derivation; its - default output path is put in the environment - variable. - - Lists of the previous types are also allowed. - They are simply concatenated, separated by - spaces. - - true is passed as the string - 1, false and - null are passed as an empty string. - - - - - - The optional attribute args - specifies command-line arguments to be passed to the builder. It - should be a list. - - The optional attribute outputs - specifies a list of symbolic outputs of the derivation. By default, - a derivation produces a single output path, denoted as - out. However, derivations can produce multiple - output paths. This is useful because it allows outputs to be - downloaded or garbage-collected separately. For instance, imagine a - library package that provides a dynamic library, header files, and - documentation. A program that links against the library doesn’t - need the header files and documentation at runtime, and it doesn’t - need the documentation at build time. Thus, the library package - could specify: - -outputs = [ "lib" "headers" "doc" ]; - - This will cause Nix to pass environment variables - lib, headers and - doc to the builder containing the intended store - paths of each output. The builder would typically do something like - -./configure --libdir=$lib/lib --includedir=$headers/include --docdir=$doc/share/doc - - for an Autoconf-style package. You can refer to each output of a - derivation by selecting it as an attribute, e.g. - -buildInputs = [ pkg.lib pkg.headers ]; - - The first element of outputs determines the - default output. Thus, you could also write - -buildInputs = [ pkg pkg.headers ]; - - since pkg is equivalent to - pkg.lib. - - - -The function mkDerivation in the Nixpkgs -standard environment is a wrapper around -derivation that adds a default value for -system and always uses Bash as the builder, to -which the supplied builder is passed as a command-line argument. See -the Nixpkgs manual for details. - -The builder is executed as follows: - - - - A temporary directory is created under the directory - specified by TMPDIR (default - /tmp) where the build will take place. The - current directory is changed to this directory. - - The environment is cleared and set to the derivation - attributes, as specified above. - - In addition, the following variables are set: - - - - NIX_BUILD_TOP contains the path of - the temporary directory for this build. - - Also, TMPDIR, - TEMPDIR, TMP, TEMP - are set to point to the temporary directory. This is to prevent - the builder from accidentally writing temporary files anywhere - else. Doing so might cause interference by other - processes. - - PATH is set to - /path-not-set to prevent shells from - initialising it to their built-in default value. - - HOME is set to - /homeless-shelter to prevent programs from - using /etc/passwd or the like to find the - user's home directory, which could cause impurity. Usually, when - HOME is set, it is used as the location of the home - directory, even if it points to a non-existent - path. - - NIX_STORE is set to the path of the - top-level Nix store directory (typically, - /nix/store). - - For each output declared in - outputs, the corresponding environment variable - is set to point to the intended path in the Nix store for that - output. Each output path is a concatenation of the cryptographic - hash of all build inputs, the name attribute - and the output name. (The output name is omitted if it’s - out.) - - - - - - If an output path already exists, it is removed. - Also, locks are acquired to prevent multiple Nix instances from - performing the same build at the same time. - - A log of the combined standard output and error is - written to /nix/var/log/nix. - - The builder is executed with the arguments specified - by the attribute args. If it exits with exit - code 0, it is considered to have succeeded. - - The temporary directory is removed (unless the - option was specified). - - If the build was successful, Nix scans each output - path for references to input paths by looking for the hash parts of - the input paths. Since these are potential runtime dependencies, - Nix registers them as dependencies of the output - paths. - - After the build, Nix sets the last-modified - timestamp on all files in the build result to 1 (00:00:01 1/1/1970 - UTC), sets the group to the default group, and sets the mode of the - file to 0444 or 0555 (i.e., read-only, with execute permission - enabled if the file was originally executable). Note that possible - setuid and setgid bits are - cleared. Setuid and setgid programs are not currently supported by - Nix. This is because the Nix archives used in deployment have no - concept of ownership information, and because it makes the build - result dependent on the user performing the build. - - - - - - - -
diff --git a/third_party/nix/doc/manual/expressions/expression-language.xml b/third_party/nix/doc/manual/expressions/expression-language.xml deleted file mode 100644 index 240ef80f14..0000000000 --- a/third_party/nix/doc/manual/expressions/expression-language.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -Nix Expression Language - -The Nix expression language is a pure, lazy, functional -language. Purity means that operations in the language don't have -side-effects (for instance, there is no variable assignment). -Laziness means that arguments to functions are evaluated only when -they are needed. Functional means that functions are -normal values that can be passed around and manipulated -in interesting ways. The language is not a full-featured, general -purpose language. Its main job is to describe packages, -compositions of packages, and the variability within -packages. - -This section presents the various features of the -language. - - - - - - - - - diff --git a/third_party/nix/doc/manual/expressions/expression-syntax.xml b/third_party/nix/doc/manual/expressions/expression-syntax.xml deleted file mode 100644 index 42b9dca362..0000000000 --- a/third_party/nix/doc/manual/expressions/expression-syntax.xml +++ /dev/null @@ -1,148 +0,0 @@ -
- -Expression Syntax - -Nix expression for GNU Hello -(<filename>default.nix</filename>) - -{ stdenv, fetchurl, perl }: - -stdenv.mkDerivation { - name = "hello-2.1.1"; - builder = ./builder.sh; - src = fetchurl { - url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz; - sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; - }; - inherit perl; -} - - - shows a Nix expression for GNU -Hello. It's actually already in the Nix Packages collection in -pkgs/applications/misc/hello/ex-1/default.nix. -It is customary to place each package in a separate directory and call -the single Nix expression in that directory -default.nix. The file has the following elements -(referenced from the figure by number): - - - - - - This states that the expression is a - function that expects to be called with three - arguments: stdenv, fetchurl, - and perl. They are needed to build Hello, but - we don't know how to build them here; that's why they are function - arguments. stdenv is a package that is used - by almost all Nix Packages packages; it provides a - standard environment consisting of the things you - would expect in a basic Unix environment: a C/C++ compiler (GCC, - to be precise), the Bash shell, fundamental Unix tools such as - cp, grep, - tar, etc. fetchurl is a - function that downloads files. perl is the - Perl interpreter. - - Nix functions generally have the form { x, y, ..., - z }: e where x, y, - etc. are the names of the expected arguments, and where - e is the body of the function. So - here, the entire remainder of the file is the body of the - function; when given the required arguments, the body should - describe how to build an instance of the Hello package. - - - - - - So we have to build a package. Building something from - other stuff is called a derivation in Nix (as - opposed to sources, which are built by humans instead of - computers). We perform a derivation by calling - stdenv.mkDerivation. - mkDerivation is a function provided by - stdenv that builds a package from a set of - attributes. A set is just a list of - key/value pairs where each key is a string and each value is an - arbitrary Nix expression. They take the general form { - name1 = - expr1; ... - nameN = - exprN; }. - - - - - - The attribute name specifies the symbolic - name and version of the package. Nix doesn't really care about - these things, but they are used by for instance nix-env - -q to show a human-readable name for - packages. This attribute is required by - mkDerivation. - - - - - - The attribute builder specifies the - builder. This attribute can sometimes be omitted, in which case - mkDerivation will fill in a default builder - (which does a configure; make; make install, in - essence). Hello is sufficiently simple that the default builder - would suffice, but in this case, we will show an actual builder - for educational purposes. The value - ./builder.sh refers to the shell script shown - in , discussed below. - - - - - - The builder has to know what the sources of the package - are. Here, the attribute src is bound to the - result of a call to the fetchurl function. - Given a URL and a SHA-256 hash of the expected contents of the file - at that URL, this function builds a derivation that downloads the - file and checks its hash. So the sources are a dependency that - like all other dependencies is built before Hello itself is - built. - - Instead of src any other name could have - been used, and in fact there can be any number of sources (bound - to different attributes). However, src is - customary, and it's also expected by the default builder (which we - don't use in this example). - - - - - - Since the derivation requires Perl, we have to pass the - value of the perl function argument to the - builder. All attributes in the set are actually passed as - environment variables to the builder, so declaring an attribute - - -perl = perl; - - will do the trick: it binds an attribute perl - to the function argument which also happens to be called - perl. However, it looks a bit silly, so there - is a shorter syntax. The inherit keyword - causes the specified attributes to be bound to whatever variables - with the same name happen to be in scope. - - - - - - - -
diff --git a/third_party/nix/doc/manual/expressions/generic-builder.xml b/third_party/nix/doc/manual/expressions/generic-builder.xml deleted file mode 100644 index db7ff405d8..0000000000 --- a/third_party/nix/doc/manual/expressions/generic-builder.xml +++ /dev/null @@ -1,98 +0,0 @@ -
- -Generic Builder Syntax - -Recall from that the builder -looked something like this: - - -PATH=$perl/bin:$PATH -tar xvfz $src -cd hello-* -./configure --prefix=$out -make -make install - -The builders for almost all Unix packages look like this — set up some -environment variables, unpack the sources, configure, build, and -install. For this reason the standard environment provides some Bash -functions that automate the build process. A builder using the -generic build facilities in shown in . - -Build script using the generic -build functions - -buildInputs="$perl" - -source $stdenv/setup - -genericBuild - - - - - - - The buildInputs variable tells - setup to use the indicated packages as - inputs. This means that if a package provides a - bin subdirectory, it's added to - PATH; if it has a include - subdirectory, it's added to GCC's header search path; and so - on.How does it work? setup - tries to source the file - pkg/nix-support/setup-hook - of all dependencies. These “setup hooks” can then set up whatever - environment variables they want; for instance, the setup hook for - Perl sets the PERL5LIB environment variable to - contain the lib/site_perl directories of all - inputs. - - - - - - - The function genericBuild is defined in - the file $stdenv/setup. - - - - - - The final step calls the shell function - genericBuild, which performs the steps that - were done explicitly in . The - generic builder is smart enough to figure out whether to unpack - the sources using gzip, - bzip2, etc. It can be customised in many ways; - see the Nixpkgs manual for details. - - - - - -Discerning readers will note that the -buildInputs could just as well have been set in the Nix -expression, like this: - - - buildInputs = [ perl ]; - -The perl attribute can then be removed, and the -builder becomes even shorter: - - -source $stdenv/setup -genericBuild - -In fact, mkDerivation provides a default builder -that looks exactly like that, so it is actually possible to omit the -builder for Hello entirely. - -
diff --git a/third_party/nix/doc/manual/expressions/language-constructs.xml b/third_party/nix/doc/manual/expressions/language-constructs.xml deleted file mode 100644 index 0d0cbbe155..0000000000 --- a/third_party/nix/doc/manual/expressions/language-constructs.xml +++ /dev/null @@ -1,409 +0,0 @@ -
- -Language Constructs - -Recursive sets - -Recursive sets are just normal sets, but the attributes can -refer to each other. For example, - - -rec { - x = y; - y = 123; -}.x - - -evaluates to 123. Note that without -rec the binding x = y; would -refer to the variable y in the surrounding scope, -if one exists, and would be invalid if no such variable exists. That -is, in a normal (non-recursive) set, attributes are not added to the -lexical scope; in a recursive set, they are. - -Recursive sets of course introduce the danger of infinite -recursion. For example, - - -rec { - x = y; - y = x; -}.x - -does not terminateActually, Nix detects infinite -recursion in this case and aborts (infinite recursion -encountered).. - - - - -Let-expressions - -A let-expression allows you to define local variables for an -expression. For instance, - - -let - x = "foo"; - y = "bar"; -in x + y - -evaluates to "foobar". - - - - - - -Inheriting attributes - -When defining a set or in a let-expression it is often convenient to copy variables -from the surrounding lexical scope (e.g., when you want to propagate -attributes). This can be shortened using the -inherit keyword. For instance, - - -let x = 123; in -{ inherit x; - y = 456; -} - -is equivalent to - - -let x = 123; in -{ x = x; - y = 456; -} - -and both evaluate to { x = 123; y = 456; }. (Note that -this works because x is added to the lexical scope -by the let construct.) It is also possible to -inherit attributes from another set. For instance, in this fragment -from all-packages.nix, - - - graphviz = (import ../tools/graphics/graphviz) { - inherit fetchurl stdenv libpng libjpeg expat x11 yacc; - inherit (xlibs) libXaw; - }; - - xlibs = { - libX11 = ...; - libXaw = ...; - ... - } - - libpng = ...; - libjpg = ...; - ... - -the set used in the function call to the function defined in -../tools/graphics/graphviz inherits a number of -variables from the surrounding scope (fetchurl -... yacc), but also inherits -libXaw (the X Athena Widgets) from the -xlibs (X11 client-side libraries) set. - - -Summarizing the fragment - - -... -inherit x y z; -inherit (src-set) a b c; -... - -is equivalent to - - -... -x = x; y = y; z = z; -a = src-set.a; b = src-set.b; c = src-set.c; -... - -when used while defining local variables in a let-expression or -while defining a set. - - - - -Functions - -Functions have the following form: - - -pattern: body - -The pattern specifies what the argument of the function must look -like, and binds variables in the body to (parts of) the -argument. There are three kinds of patterns: - - - - - If a pattern is a single identifier, then the - function matches any argument. Example: - - -let negate = x: !x; - concat = x: y: x + y; -in if negate true then concat "foo" "bar" else "" - - Note that concat is a function that takes one - argument and returns a function that takes another argument. This - allows partial parameterisation (i.e., only filling some of the - arguments of a function); e.g., - - -map (concat "foo") [ "bar" "bla" "abc" ] - - evaluates to [ "foobar" "foobla" - "fooabc" ]. - - - A set pattern of the form - { name1, name2, …, nameN } matches a set - containing the listed attributes, and binds the values of those - attributes to variables in the function body. For example, the - function - - -{ x, y, z }: z + y + x - - can only be called with a set containing exactly the attributes - x, y and - z. No other attributes are allowed. If you want - to allow additional arguments, you can use an ellipsis - (...): - - -{ x, y, z, ... }: z + y + x - - This works on any set that contains at least the three named - attributes. - - It is possible to provide default values - for attributes, in which case they are allowed to be missing. A - default value is specified by writing - name ? - e, where - e is an arbitrary expression. For example, - - -{ x, y ? "foo", z ? "bar" }: z + y + x - - specifies a function that only requires an attribute named - x, but optionally accepts y - and z. - - - An @-pattern provides a means of referring - to the whole value being matched: - - args@{ x, y, z, ... }: z + y + x + args.a - -but can also be written as: - - { x, y, z, ... } @ args: z + y + x + args.a - - Here args is bound to the entire argument, which - is further matched against the pattern { x, y, z, - ... }. @-pattern makes mainly sense with an - ellipsis(...) as you can access attribute names as - a, using args.a, which was given as an - additional attribute to the function. - - - - - The args@ expression is bound to the argument passed to the function which - means that attributes with defaults that aren't explicitly specified in the function call - won't cause an evaluation error, but won't exist in args. - - - For instance - -let - function = args@{ a ? 23, ... }: args; -in - function {} - - will evaluate to an empty attribute set. - - - - - -Note that functions do not have names. If you want to give them -a name, you can bind them to an attribute, e.g., - - -let concat = { x, y }: x + y; -in concat { x = "foo"; y = "bar"; } - - - - - - -Conditionals - -Conditionals look like this: - - -if e1 then e2 else e3 - -where e1 is an expression that should -evaluate to a Boolean value (true or -false). - - - - -Assertions - -Assertions are generally used to check that certain requirements -on or between features and dependencies hold. They look like this: - - -assert e1; e2 - -where e1 is an expression that should -evaluate to a Boolean value. If it evaluates to -true, e2 is returned; -otherwise expression evaluation is aborted and a backtrace is printed. - -Nix expression for Subversion - -{ localServer ? false -, httpServer ? false -, sslSupport ? false -, pythonBindings ? false -, javaSwigBindings ? false -, javahlBindings ? false -, stdenv, fetchurl -, openssl ? null, httpd ? null, db4 ? null, expat, swig ? null, j2sdk ? null -}: - -assert localServer -> db4 != null; -assert httpServer -> httpd != null && httpd.expat == expat; -assert sslSupport -> openssl != null && (httpServer -> httpd.openssl == openssl); -assert pythonBindings -> swig != null && swig.pythonSupport; -assert javaSwigBindings -> swig != null && swig.javaSupport; -assert javahlBindings -> j2sdk != null; - -stdenv.mkDerivation { - name = "subversion-1.1.1"; - ... - openssl = if sslSupport then openssl else null; - ... -} - - - show how assertions are -used in the Nix expression for Subversion. - - - - - This assertion states that if Subversion is to have support - for local repositories, then Berkeley DB is needed. So if the - Subversion function is called with the - localServer argument set to - true but the db4 argument - set to null, then the evaluation fails. - - - - This is a more subtle condition: if Subversion is built with - Apache (httpServer) support, then the Expat - library (an XML library) used by Subversion should be same as the - one used by Apache. This is because in this configuration - Subversion code ends up being linked with Apache code, and if the - Expat libraries do not match, a build- or runtime link error or - incompatibility might occur. - - - - This assertion says that in order for Subversion to have SSL - support (so that it can access https URLs), an - OpenSSL library must be passed. Additionally, it says that - if Apache support is enabled, then Apache's - OpenSSL should match Subversion's. (Note that if Apache support - is not enabled, we don't care about Apache's OpenSSL.) - - - - The conditional here is not really related to assertions, - but is worth pointing out: it ensures that if SSL support is - disabled, then the Subversion derivation is not dependent on - OpenSSL, even if a non-null value was passed. - This prevents an unnecessary rebuild of Subversion if OpenSSL - changes. - - - - - - - - -With-expressions - -A with-expression, - - -with e1; e2 - -introduces the set e1 into the lexical -scope of the expression e2. For instance, - - -let as = { x = "foo"; y = "bar"; }; -in with as; x + y - -evaluates to "foobar" since the -with adds the x and -y attributes of as to the -lexical scope in the expression x + y. The most -common use of with is in conjunction with the -import function. E.g., - - -with (import ./definitions.nix); ... - -makes all attributes defined in the file -definitions.nix available as if they were defined -locally in a let-expression. - -The bindings introduced by with do not shadow bindings -introduced by other means, e.g. - - -let a = 3; in with { a = 1; }; let a = 4; in with { a = 2; }; ... - -establishes the same scope as - - -let a = 1; in let a = 2; in let a = 3; in let a = 4; in ... - - - - - - -Comments - -Comments can be single-line, started with a # -character, or inline/multi-line, enclosed within /* -... */. - - - - -
diff --git a/third_party/nix/doc/manual/expressions/language-operators.xml b/third_party/nix/doc/manual/expressions/language-operators.xml deleted file mode 100644 index 4f11bf5293..0000000000 --- a/third_party/nix/doc/manual/expressions/language-operators.xml +++ /dev/null @@ -1,222 +0,0 @@ -
- -Operators - - lists the operators in the -Nix expression language, in order of precedence (from strongest to -weakest binding). - - - Operators - - - - Name - Syntax - Associativity - Description - Precedence - - - - - Select - e . - attrpath - [ or def ] - - none - Select attribute denoted by the attribute path - attrpath from set - e. (An attribute path is a - dot-separated list of attribute names.) If the attribute - doesn’t exist, return def if - provided, otherwise abort evaluation. - 1 - - - Application - e1 e2 - left - Call function e1 with - argument e2. - 2 - - - Arithmetic Negation - - e - none - Arithmetic negation. - 3 - - - Has Attribute - e ? - attrpath - none - Test whether set e contains - the attribute denoted by attrpath; - return true or - false. - 4 - - - List Concatenation - e1 ++ e2 - right - List concatenation. - 5 - - - Multiplication - - e1 * e2, - - left - Arithmetic multiplication. - 6 - - - Division - - e1 / e2 - - left - Arithmetic division. - 6 - - - Addition - - e1 + e2 - - left - Arithmetic addition. - 7 - - - Subtraction - - e1 - e2 - - left - Arithmetic subtraction. - 7 - - - String Concatenation - - string1 + string2 - - left - String concatenation. - 7 - - - Not - ! e - none - Boolean negation. - 8 - - - Update - e1 // - e2 - right - Return a set consisting of the attributes in - e1 and - e2 (with the latter taking - precedence over the former in case of equally named - attributes). - 9 - - - Less Than - - e1 < e2, - - none - Arithmetic comparison. - 10 - - - Less Than or Equal To - - e1 <= e2 - - none - Arithmetic comparison. - 10 - - - Greater Than - - e1 > e2 - - none - Arithmetic comparison. - 10 - - - Greater Than or Equal To - - e1 >= e2 - - none - Arithmetic comparison. - 10 - - - Equality - - e1 == e2 - - none - Equality. - 11 - - - Inequality - - e1 != e2 - - none - Inequality. - 11 - - - Logical AND - e1 && - e2 - left - Logical AND. - 12 - - - Logical OR - e1 || - e2 - left - Logical OR. - 13 - - - Logical Implication - e1 -> - e2 - none - Logical implication (equivalent to - !e1 || - e2). - 14 - - - -
- -
diff --git a/third_party/nix/doc/manual/expressions/language-values.xml b/third_party/nix/doc/manual/expressions/language-values.xml deleted file mode 100644 index bb2090c881..0000000000 --- a/third_party/nix/doc/manual/expressions/language-values.xml +++ /dev/null @@ -1,313 +0,0 @@ -
- -Values - - -Simple Values - -Nix has the following basic data types: - - - - - - Strings can be written in three - ways. - - The most common way is to enclose the string between double - quotes, e.g., "foo bar". Strings can span - multiple lines. The special characters " and - \ and the character sequence - ${ must be escaped by prefixing them with a - backslash (\). Newlines, carriage returns and - tabs can be written as \n, - \r and \t, - respectively. - - You can include the result of an expression into a string by - enclosing it in - ${...}, a feature - known as antiquotation. The enclosed - expression must evaluate to something that can be coerced into a - string (meaning that it must be a string, a path, or a - derivation). For instance, rather than writing - - -"--with-freetype2-library=" + freetype + "/lib" - - (where freetype is a derivation), you can - instead write the more natural - - -"--with-freetype2-library=${freetype}/lib" - - The latter is automatically translated to the former. A more - complicated example (from the Nix expression for Qt): - - -configureFlags = " - -system-zlib -system-libpng -system-libjpeg - ${if openglSupport then "-dlopen-opengl - -L${mesa}/lib -I${mesa}/include - -L${libXmu}/lib -I${libXmu}/include" else ""} - ${if threadSupport then "-thread" else "-no-thread"} -"; - - Note that Nix expressions and strings can be arbitrarily nested; - in this case the outer string contains various antiquotations that - themselves contain strings (e.g., "-thread"), - some of which in turn contain expressions (e.g., - ${mesa}). - - The second way to write string literals is as an - indented string, which is enclosed between - pairs of double single-quotes, like so: - - -'' - This is the first line. - This is the second line. - This is the third line. -'' - - This kind of string literal intelligently strips indentation from - the start of each line. To be precise, it strips from each line a - number of spaces equal to the minimal indentation of the string as - a whole (disregarding the indentation of empty lines). For - instance, the first and second line are indented two space, while - the third line is indented four spaces. Thus, two spaces are - stripped from each line, so the resulting string is - - -"This is the first line.\nThis is the second line.\n This is the third line.\n" - - - - Note that the whitespace and newline following the opening - '' is ignored if there is no non-whitespace - text on the initial line. - - Antiquotation - (${expr}) is - supported in indented strings. - - Since ${ and '' have - special meaning in indented strings, you need a way to quote them. - $ can be escaped by prefixing it with - '' (that is, two single quotes), i.e., - ''$. '' can be escaped by - prefixing it with ', i.e., - '''. $ removes any special meaning - from the following $. Linefeed, carriage-return and tab - characters can be written as ''\n, - ''\r, ''\t, and ''\ - escapes any other character. - - - - Indented strings are primarily useful in that they allow - multi-line string literals to follow the indentation of the - enclosing Nix expression, and that less escaping is typically - necessary for strings representing languages such as shell scripts - and configuration files because '' is much less - common than ". Example: - - -stdenv.mkDerivation { - ... - postInstall = - '' - mkdir $out/bin $out/etc - cp foo $out/bin - echo "Hello World" > $out/etc/foo.conf - ${if enableBar then "cp bar $out/bin" else ""} - ''; - ... -} - - - - - Finally, as a convenience, URIs as - defined in appendix B of RFC 2396 - can be written as is, without quotes. For - instance, the string - "http://example.org/foo.tar.bz2" - can also be written as - http://example.org/foo.tar.bz2. - - - - Numbers, which can be integers (like - 123) or floating point (like - 123.43 or .27e13). - - Numbers are type-compatible: pure integer operations will always - return integers, whereas any operation involving at least one floating point - number will have a floating point number as a result. - - Paths, e.g., - /bin/sh or ./builder.sh. - A path must contain at least one slash to be recognised as such; for - instance, builder.sh is not a - pathIt's parsed as an expression that selects the - attribute sh from the variable - builder.. If the file name is - relative, i.e., if it does not begin with a slash, it is made - absolute at parse time relative to the directory of the Nix - expression that contained it. For instance, if a Nix expression in - /foo/bar/bla.nix refers to - ../xyzzy/fnord.nix, the absolute path is - /foo/xyzzy/fnord.nix. - - If the first component of a path is a ~, - it is interpreted as if the rest of the path were relative to the - user's home directory. e.g. ~/foo would be - equivalent to /home/edolstra/foo for a user - whose home directory is /home/edolstra. - - - Paths can also be specified between angle brackets, e.g. - <nixpkgs>. This means that the directories - listed in the environment variable - NIX_PATH will be searched - for the given file or directory name. - - - - - Booleans with values - true and - false. - - The null value, denoted as - null. - - - - - - - - -Lists - -Lists are formed by enclosing a whitespace-separated list of -values between square brackets. For example, - - -[ 123 ./foo.nix "abc" (f { x = y; }) ] - -defines a list of four elements, the last being the result of a call -to the function f. Note that function calls have -to be enclosed in parentheses. If they had been omitted, e.g., - - -[ 123 ./foo.nix "abc" f { x = y; } ] - -the result would be a list of five elements, the fourth one being a -function and the fifth being a set. - -Note that lists are only lazy in values, and they are strict in length. - - - - - -Sets - -Sets are really the core of the language, since ultimately the -Nix language is all about creating derivations, which are really just -sets of attributes to be passed to build scripts. - -Sets are just a list of name/value pairs (called -attributes) enclosed in curly brackets, where -each value is an arbitrary expression terminated by a semicolon. For -example: - - -{ x = 123; - text = "Hello"; - y = f { bla = 456; }; -} - -This defines a set with attributes named x, -text, y. The order of the -attributes is irrelevant. An attribute name may only occur -once. - -Attributes can be selected from a set using the -. operator. For instance, - - -{ a = "Foo"; b = "Bar"; }.a - -evaluates to "Foo". It is possible to provide a -default value in an attribute selection using the -or keyword. For example, - - -{ a = "Foo"; b = "Bar"; }.c or "Xyzzy" - -will evaluate to "Xyzzy" because there is no -c attribute in the set. - -You can use arbitrary double-quoted strings as attribute -names: - - -{ "foo ${bar}" = 123; "nix-1.0" = 456; }."foo ${bar}" - - -This will evaluate to 123 (Assuming -bar is antiquotable). In the case where an -attribute name is just a single antiquotation, the quotes can be -dropped: - - -{ foo = 123; }.${bar} or 456 - -This will evaluate to 123 if -bar evaluates to "foo" when -coerced to a string and 456 otherwise (again -assuming bar is antiquotable). - -In the special case where an attribute name inside of a set declaration -evaluates to null (which is normally an error, as -null is not antiquotable), that attribute is simply not -added to the set: - - -{ ${if foo then "bar" else null} = true; } - -This will evaluate to {} if foo -evaluates to false. - -A set that has a __functor attribute whose value -is callable (i.e. is itself a function or a set with a -__functor attribute whose value is callable) can be -applied as if it were a function, with the set itself passed in first -, e.g., - - -let add = { __functor = self: x: x + self.x; }; - inc = add // { x = 1; }; -in inc 1 - - -evaluates to 2. This can be used to attach metadata to a -function without the caller needing to treat it specially, or to implement -a form of object-oriented programming, for example. - - - - - - -
diff --git a/third_party/nix/doc/manual/expressions/simple-building-testing.xml b/third_party/nix/doc/manual/expressions/simple-building-testing.xml deleted file mode 100644 index 7326a3e76a..0000000000 --- a/third_party/nix/doc/manual/expressions/simple-building-testing.xml +++ /dev/null @@ -1,84 +0,0 @@ -
- -Building and Testing - -You can now try to build Hello. Of course, you could do -nix-env -i hello, but you may not want to install a -possibly broken package just yet. The best way to test the package is by -using the command nix-build, -which builds a Nix expression and creates a symlink named -result in the current directory: - - -$ nix-build -A hello -building path `/nix/store/632d2b22514d...-hello-2.1.1' -hello-2.1.1/ -hello-2.1.1/intl/ -hello-2.1.1/intl/ChangeLog -... - -$ ls -l result -lrwxrwxrwx ... 2006-09-29 10:43 result -> /nix/store/632d2b22514d...-hello-2.1.1 - -$ ./result/bin/hello -Hello, world! - -The option selects -the hello attribute. This is faster than using the -symbolic package name specified by the name -attribute (which also happens to be hello) and is -unambiguous (there can be multiple packages with the symbolic name -hello, but there can be only one attribute in a set -named hello). - -nix-build registers the -./result symlink as a garbage collection root, so -unless and until you delete the ./result symlink, -the output of the build will be safely kept on your system. You can -use nix-build’s switch to give the symlink another -name. - -Nix has transactional semantics. Once a build finishes -successfully, Nix makes a note of this in its database: it registers -that the path denoted by out is now -valid. If you try to build the derivation again, Nix -will see that the path is already valid and finish immediately. If a -build fails, either because it returns a non-zero exit code, because -Nix or the builder are killed, or because the machine crashes, then -the output paths will not be registered as valid. If you try to build -the derivation again, Nix will remove the output paths if they exist -(e.g., because the builder died half-way through make -install) and try again. Note that there is no -negative caching: Nix doesn't remember that a build -failed, and so a failed build can always be repeated. This is because -Nix cannot distinguish between permanent failures (e.g., a compiler -error due to a syntax error in the source) and transient failures -(e.g., a disk full condition). - -Nix also performs locking. If you run multiple Nix builds -simultaneously, and they try to build the same derivation, the first -Nix instance that gets there will perform the build, while the others -block (or perform other derivations if available) until the build -finishes: - - -$ nix-build -A hello -waiting for lock on `/nix/store/0h5b7hp8d4hqfrw8igvx97x1xawrjnac-hello-2.1.1x' - -So it is always safe to run multiple instances of Nix in parallel -(which isn’t the case with, say, make). - -If you have a system with multiple CPUs, you may want to have -Nix build different derivations in parallel (insofar as possible). -Just pass the option , where -N is the maximum number of jobs to be run -in parallel, or set. Typically this should be the number of -CPUs. - -
diff --git a/third_party/nix/doc/manual/expressions/simple-expression.xml b/third_party/nix/doc/manual/expressions/simple-expression.xml deleted file mode 100644 index 29fd872eea..0000000000 --- a/third_party/nix/doc/manual/expressions/simple-expression.xml +++ /dev/null @@ -1,47 +0,0 @@ - - -A Simple Nix Expression - -This section shows how to add and test the GNU Hello -package to the Nix Packages collection. Hello is a program -that prints out the text Hello, world!. - -To add a package to the Nix Packages collection, you generally -need to do three things: - - - - Write a Nix expression for the package. This is a - file that describes all the inputs involved in building the package, - such as dependencies, sources, and so on. - - Write a builder. This is a - shell scriptIn fact, it can be written in any - language, but typically it's a bash shell - script. that actually builds the package from - the inputs. - - Add the package to the file - pkgs/top-level/all-packages.nix. The Nix - expression written in the first step is a - function; it requires other packages in order - to build it. In this step you put it all together, i.e., you call - the function with the right arguments to build the actual - package. - - - - - - - - - - - - diff --git a/third_party/nix/doc/manual/expressions/writing-nix-expressions.xml b/third_party/nix/doc/manual/expressions/writing-nix-expressions.xml deleted file mode 100644 index 6646dddf08..0000000000 --- a/third_party/nix/doc/manual/expressions/writing-nix-expressions.xml +++ /dev/null @@ -1,26 +0,0 @@ - - -Writing Nix Expressions - - -This chapter shows you how to write Nix expressions, which -instruct Nix how to build packages. It starts with a -simple example (a Nix expression for GNU Hello), and then moves -on to a more in-depth look at the Nix expression language. - -This chapter is mostly about the Nix expression language. -For more extensive information on adding packages to the Nix Packages -collection (such as functions in the standard environment and coding -conventions), please consult its -manual. - - - - - - diff --git a/third_party/nix/doc/manual/figures/user-environments.png b/third_party/nix/doc/manual/figures/user-environments.png deleted file mode 100644 index 1f781cf23c..0000000000 Binary files a/third_party/nix/doc/manual/figures/user-environments.png and /dev/null differ diff --git a/third_party/nix/doc/manual/figures/user-environments.sxd b/third_party/nix/doc/manual/figures/user-environments.sxd deleted file mode 100644 index bc661b6406..0000000000 Binary files a/third_party/nix/doc/manual/figures/user-environments.sxd and /dev/null differ diff --git a/third_party/nix/doc/manual/glossary/glossary.xml b/third_party/nix/doc/manual/glossary/glossary.xml deleted file mode 100644 index e3162ed8d4..0000000000 --- a/third_party/nix/doc/manual/glossary/glossary.xml +++ /dev/null @@ -1,199 +0,0 @@ - - -Glossary - - - - - -derivation - - A description of a build action. The result of a - derivation is a store object. Derivations are typically specified - in Nix expressions using the derivation - primitive. These are translated into low-level - store derivations (implicitly by - nix-env and nix-build, or - explicitly by nix-instantiate). - - - - -store - - The location in the file system where store objects - live. Typically /nix/store. - - - - -store path - - The location in the file system of a store object, - i.e., an immediate child of the Nix store - directory. - - - - -store object - - A file that is an immediate child of the Nix store - directory. These can be regular files, but also entire directory - trees. Store objects can be sources (objects copied from outside of - the store), derivation outputs (objects produced by running a build - action), or derivations (files describing a build - action). - - - - -substitute - - A substitute is a command invocation stored in the - Nix database that describes how to build a store object, bypassing - the normal build mechanism (i.e., derivations). Typically, the - substitute builds the store object by downloading a pre-built - version of the store object from some server. - - - - -purity - - The assumption that equal Nix derivations when run - always produce the same output. This cannot be guaranteed in - general (e.g., a builder can rely on external inputs such as the - network or the system time) but the Nix model assumes - it. - - - - -Nix expression - - A high-level description of software packages and - compositions thereof. Deploying software using Nix entails writing - Nix expressions for your packages. Nix expressions are translated - to derivations that are stored in the Nix store. These derivations - can then be built. - - - - -reference - - - A store path P is said to have a - reference to a store path Q if the store object - at P contains the path Q - somewhere. The references of a store path are - the set of store paths to which it has a reference. - - A derivation can reference other derivations and sources - (but not output paths), whereas an output path only references other - output paths. - - - - - -reachable - - A store path Q is reachable from - another store path P if Q is in the - closure of the - references relation. - - - -closure - - The closure of a store path is the set of store - paths that are directly or indirectly “reachable” from that store - path; that is, it’s the closure of the path under the references relation. For a package, the - closure of its derivation is equivalent to the build-time - dependencies, while the closure of its output path is equivalent to its - runtime dependencies. For correct deployment it is necessary to deploy whole - closures, since otherwise at runtime files could be missing. The command - nix-store -qR prints out closures of store paths. - - As an example, if the store object at path P contains - a reference to path Q, then Q is - in the closure of P. Further, if Q - references R then R is also in - the closure of P. - - - - - -output path - - A store path produced by a derivation. - - - - -deriver - - The deriver of an output path is the store - derivation that built it. - - - - -validity - - A store path is considered - valid if it exists in the file system, is - listed in the Nix database as being valid, and if all paths in its - closure are also valid. - - - - -user environment - - An automatically generated store object that - consists of a set of symlinks to “active” applications, i.e., other - store paths. These are generated automatically by nix-env. See . - - - - - - -profile - - A symlink to the current user environment of a user, e.g., - /nix/var/nix/profiles/default. - - - - -NAR - - A Nix - ARchive. This is a serialisation of a path in - the Nix store. It can contain regular files, directories and - symbolic links. NARs are generated and unpacked using - nix-store --dump and nix-store - --restore. - - - - - - - - - diff --git a/third_party/nix/doc/manual/hacking.xml b/third_party/nix/doc/manual/hacking.xml deleted file mode 100644 index b671811d3a..0000000000 --- a/third_party/nix/doc/manual/hacking.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -Hacking - -This section provides some notes on how to hack on Nix. To get -the latest version of Nix from GitHub: - -$ git clone git://github.com/NixOS/nix.git -$ cd nix - - - -To build it and its dependencies: - -$ nix-build release.nix -A build.x86_64-linux - - - -To build all dependencies and start a shell in which all -environment variables are set up so that those dependencies can be -found: - -$ nix-shell - -To build Nix itself in this shell: - -[nix-shell]$ ./bootstrap.sh -[nix-shell]$ configurePhase -[nix-shell]$ make - -To install it in $(pwd)/inst and test it: - -[nix-shell]$ make install -[nix-shell]$ make installcheck - - - - - diff --git a/third_party/nix/doc/manual/images/callouts/1.gif b/third_party/nix/doc/manual/images/callouts/1.gif deleted file mode 100644 index 9e7a87f754..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/1.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/10.gif b/third_party/nix/doc/manual/images/callouts/10.gif deleted file mode 100644 index e80f7f8e63..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/10.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/11.gif b/third_party/nix/doc/manual/images/callouts/11.gif deleted file mode 100644 index 67f91a239d..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/11.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/12.gif b/third_party/nix/doc/manual/images/callouts/12.gif deleted file mode 100644 index 54c4b42f19..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/12.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/13.gif b/third_party/nix/doc/manual/images/callouts/13.gif deleted file mode 100644 index dd5d7d9b64..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/13.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/14.gif b/third_party/nix/doc/manual/images/callouts/14.gif deleted file mode 100644 index 3d7a952a31..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/14.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/15.gif b/third_party/nix/doc/manual/images/callouts/15.gif deleted file mode 100644 index 1c9183d5bb..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/15.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/2.gif b/third_party/nix/doc/manual/images/callouts/2.gif deleted file mode 100644 index 94d42a30f9..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/2.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/3.gif b/third_party/nix/doc/manual/images/callouts/3.gif deleted file mode 100644 index dd3541a1bc..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/3.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/4.gif b/third_party/nix/doc/manual/images/callouts/4.gif deleted file mode 100644 index 4bcbf7e31a..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/4.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/5.gif b/third_party/nix/doc/manual/images/callouts/5.gif deleted file mode 100644 index 1c62b4f920..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/5.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/6.gif b/third_party/nix/doc/manual/images/callouts/6.gif deleted file mode 100644 index 23bc5555d2..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/6.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/7.gif b/third_party/nix/doc/manual/images/callouts/7.gif deleted file mode 100644 index e55ce89585..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/7.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/8.gif b/third_party/nix/doc/manual/images/callouts/8.gif deleted file mode 100644 index 49375e09f4..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/8.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/images/callouts/9.gif b/third_party/nix/doc/manual/images/callouts/9.gif deleted file mode 100644 index da12a4fe28..0000000000 Binary files a/third_party/nix/doc/manual/images/callouts/9.gif and /dev/null differ diff --git a/third_party/nix/doc/manual/installation/building-source.xml b/third_party/nix/doc/manual/installation/building-source.xml deleted file mode 100644 index 772cda9cc3..0000000000 --- a/third_party/nix/doc/manual/installation/building-source.xml +++ /dev/null @@ -1,49 +0,0 @@ -
- -Building Nix from Source - -After unpacking or checking out the Nix sources, issue the -following commands: - - -$ ./configure options... -$ make -$ make install - -Nix requires GNU Make so you may need to invoke -gmake instead. - -When building from the Git repository, these should be preceded -by the command: - - -$ ./bootstrap.sh - - - -The installation path can be specified by passing the - to -configure. The default installation directory is -/usr/local. You can change this to any location -you like. You must have write permission to the -prefix path. - -Nix keeps its store (the place where -packages are stored) in /nix/store by default. -This can be changed using -. - -It is best not to change the Nix -store from its default, since doing so makes it impossible to use -pre-built binaries from the standard Nixpkgs channels — that is, all -packages will need to be built from source. - -Nix keeps state (such as its database and log files) in -/nix/var by default. This can be changed using -. - -
diff --git a/third_party/nix/doc/manual/installation/env-variables.xml b/third_party/nix/doc/manual/installation/env-variables.xml deleted file mode 100644 index e2b8fc867c..0000000000 --- a/third_party/nix/doc/manual/installation/env-variables.xml +++ /dev/null @@ -1,89 +0,0 @@ - - -Environment Variables - -To use Nix, some environment variables should be set. In -particular, PATH should contain the directories -prefix/bin and -~/.nix-profile/bin. The first directory contains -the Nix tools themselves, while ~/.nix-profile is -a symbolic link to the current user environment -(an automatically generated package consisting of symlinks to -installed packages). The simplest way to set the required environment -variables is to include the file -prefix/etc/profile.d/nix.sh -in your ~/.profile (or similar), like this: - - -source prefix/etc/profile.d/nix.sh - -
- -<envar>NIX_SSL_CERT_FILE</envar> - -If you need to specify a custom certificate bundle to account -for an HTTPS-intercepting man in the middle proxy, you must specify -the path to the certificate bundle in the environment variable -NIX_SSL_CERT_FILE. - - -If you don't specify a NIX_SSL_CERT_FILE -manually, Nix will install and use its own certificate -bundle. - - - Set the environment variable and install Nix - -$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -$ sh <(curl https://nixos.org/nix/install) - - - In the shell profile and rc files (for example, - /etc/bashrc, /etc/zshrc), - add the following line: - -export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt - - - - -You must not add the export and then do the install, as -the Nix installer will detect the presense of Nix configuration, and -abort. - -
-<envar>NIX_SSL_CERT_FILE</envar> with macOS and the Nix daemon - -On macOS you must specify the environment variable for the Nix -daemon service, then restart it: - - -$ sudo launchctl setenv NIX_SSL_CERT_FILE /etc/ssl/my-certificate-bundle.crt -$ sudo launchctl kickstart -k system/org.nixos.nix-daemon - -
- -
- -Proxy Environment Variables - -The Nix installer has special handling for these proxy-related -environment variables: -http_proxy, https_proxy, -ftp_proxy, no_proxy, -HTTP_PROXY, HTTPS_PROXY, -FTP_PROXY, NO_PROXY. - -If any of these variables are set when running the Nix installer, -then the installer will create an override file at -/etc/systemd/system/nix-daemon.service.d/override.conf -so nix-daemon will use them. - -
- -
-
diff --git a/third_party/nix/doc/manual/installation/installation.xml b/third_party/nix/doc/manual/installation/installation.xml deleted file mode 100644 index 8789593528..0000000000 --- a/third_party/nix/doc/manual/installation/installation.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -Installation - - -This section describes how to install and configure Nix for first-time use. - - - - - - - - - - - diff --git a/third_party/nix/doc/manual/installation/installing-binary.xml b/third_party/nix/doc/manual/installation/installing-binary.xml deleted file mode 100644 index 394d8053b9..0000000000 --- a/third_party/nix/doc/manual/installation/installing-binary.xml +++ /dev/null @@ -1,190 +0,0 @@ - - -Installing a Binary Distribution - -If you are using Linux or macOS, the easiest way to install Nix -is to run the following command: - - - $ sh <(curl https://nixos.org/nix/install) - - -As of Nix 2.1.0, the Nix installer will always default to creating a -single-user installation, however opting in to the multi-user -installation is highly recommended. - - -
- Single User Installation - - - To explicitly select a single-user installation on your system: - - - sh <(curl https://nixos.org/nix/install) --no-daemon - - - - -This will perform a single-user installation of Nix, meaning that -/nix is owned by the invoking user. You should -run this under your usual user account, not as -root. The script will invoke sudo to create -/nix if it doesn’t already exist. If you don’t -have sudo, you should manually create -/nix first as root, e.g.: - - -$ mkdir /nix -$ chown alice /nix - - -The install script will modify the first writable file from amongst -.bash_profile, .bash_login -and .profile to source -~/.nix-profile/etc/profile.d/nix.sh. You can set -the NIX_INSTALLER_NO_MODIFY_PROFILE environment -variable before executing the install script to disable this -behaviour. - - - -You can uninstall Nix simply by running: - - -$ rm -rf /nix - - - -
- -
- Multi User Installation - - The multi-user Nix installation creates system users, and a system - service for the Nix daemon. - - - - Supported Systems - - - Linux running systemd, with SELinux disabled - - macOS - - - - You can instruct the installer to perform a multi-user - installation on your system: - - - sh <(curl https://nixos.org/nix/install) --daemon - - - - - The multi-user installation of Nix will create build users between - the user IDs 30001 and 30032, and a group with the group ID 30000. - - You should run this under your usual user account, - not as root. The script will invoke - sudo as needed. - - - - If you need Nix to use a different group ID or user ID set, you - will have to download the tarball manually and edit the install - script. - - - - The installer will modify /etc/bashrc, and - /etc/zshrc if they exist. The installer will - first back up these files with a - .backup-before-nix extension. The installer - will also create /etc/profile.d/nix.sh. - - - You can uninstall Nix with the following commands: - - -sudo rm -rf /etc/profile/nix.sh /etc/nix /nix ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels - -# If you are on Linux with systemd, you will need to run: -sudo systemctl stop nix-daemon.socket -sudo systemctl stop nix-daemon.service -sudo systemctl disable nix-daemon.socket -sudo systemctl disable nix-daemon.service -sudo systemctl daemon-reload - -# If you are on macOS, you will need to run: -sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist -sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist - - - There may also be references to Nix in - /etc/profile, - /etc/bashrc, and - /etc/zshrc which you may remove. - - -
- -
- Installing a pinned Nix version from a URL - - - NixOS.org hosts version-specific installation URLs for all Nix - versions since 1.11.16, at - https://nixos.org/releases/nix/nix-VERSION/install. - - - - These install scripts can be used the same as the main - NixOS.org installation script: - - - sh <(curl https://nixos.org/nix/install) - - - - - In the same directory of the install script are sha256 sums, and - gpg signature files. - -
- -
- Installing from a binary tarball - - - You can also download a binary tarball that contains Nix and all - its dependencies. (This is what the install script at - https://nixos.org/nix/install does automatically.) You - should unpack it somewhere (e.g. in /tmp), - and then run the script named install inside - the binary tarball: - - - -alice$ cd /tmp -alice$ tar xfj nix-1.8-x86_64-darwin.tar.bz2 -alice$ cd nix-1.8-x86_64-darwin -alice$ ./install - - - - - If you need to edit the multi-user installation script to use - different group ID or a different user ID range, modify the - variables set in the file named - install-multi-user. - -
-
diff --git a/third_party/nix/doc/manual/installation/installing-source.xml b/third_party/nix/doc/manual/installation/installing-source.xml deleted file mode 100644 index c261a109d6..0000000000 --- a/third_party/nix/doc/manual/installation/installing-source.xml +++ /dev/null @@ -1,16 +0,0 @@ - - -Installing Nix from Source - -If no binary package is available, you can download and compile -a source distribution. - - - - - - diff --git a/third_party/nix/doc/manual/installation/multi-user.xml b/third_party/nix/doc/manual/installation/multi-user.xml deleted file mode 100644 index 69ae1ef270..0000000000 --- a/third_party/nix/doc/manual/installation/multi-user.xml +++ /dev/null @@ -1,107 +0,0 @@ -
- -Multi-User Mode - -To allow a Nix store to be shared safely among multiple users, -it is important that users are not able to run builders that modify -the Nix store or database in arbitrary ways, or that interfere with -builds started by other users. If they could do so, they could -install a Trojan horse in some package and compromise the accounts of -other users. - -To prevent this, the Nix store and database are owned by some -privileged user (usually root) and builders are -executed under special user accounts (usually named -nixbld1, nixbld2, etc.). When a -unprivileged user runs a Nix command, actions that operate on the Nix -store (such as builds) are forwarded to a Nix -daemon running under the owner of the Nix store/database -that performs the operation. - -Multi-user mode has one important limitation: only -root and a set of trusted -users specified in nix.conf can specify arbitrary -binary caches. So while unprivileged users may install packages from -arbitrary Nix expressions, they may not get pre-built -binaries. - - - - -Setting up the build users - -The build users are the special UIDs under -which builds are performed. They should all be members of the -build users group nixbld. -This group should have no other members. The build users should not -be members of any other group. On Linux, you can create the group and -users as follows: - - -$ groupadd -r nixbld -$ for n in $(seq 1 10); do useradd -c "Nix build user $n" \ - -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" \ - nixbld$n; done - - -This creates 10 build users. There can never be more concurrent builds -than the number of build users, so you may want to increase this if -you expect to do many builds at the same time. - - - - - - -Running the daemon - -The Nix daemon should be -started as follows (as root): - - -$ nix-daemon - -You’ll want to put that line somewhere in your system’s boot -scripts. - -To let unprivileged users use the daemon, they should set the -NIX_REMOTE environment -variable to daemon. So you should put a -line like - - -export NIX_REMOTE=daemon - -into the users’ login scripts. - - - - - - -Restricting access - -To limit which users can perform Nix operations, you can use the -permissions on the directory -/nix/var/nix/daemon-socket. For instance, if you -want to restrict the use of Nix to the members of a group called -nix-users, do - - -$ chgrp nix-users /nix/var/nix/daemon-socket -$ chmod ug=rwx,o= /nix/var/nix/daemon-socket - - -This way, users who are not in the nix-users group -cannot connect to the Unix domain socket -/nix/var/nix/daemon-socket/socket, so they cannot -perform Nix operations. - - - - -
diff --git a/third_party/nix/doc/manual/installation/nix-security.xml b/third_party/nix/doc/manual/installation/nix-security.xml deleted file mode 100644 index d888ff14d4..0000000000 --- a/third_party/nix/doc/manual/installation/nix-security.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -Security - -Nix has two basic security models. First, it can be used in -“single-user mode”, which is similar to what most other package -management tools do: there is a single user (typically root) who performs all package -management operations. All other users can then use the installed -packages, but they cannot perform package management operations -themselves. - -Alternatively, you can configure Nix in “multi-user mode”. In -this model, all users can perform package management operations — for -instance, every user can install software without requiring root -privileges. Nix ensures that this is secure. For instance, it’s not -possible for one user to overwrite a package used by another user with -a Trojan horse. - - - - - \ No newline at end of file diff --git a/third_party/nix/doc/manual/installation/obtaining-source.xml b/third_party/nix/doc/manual/installation/obtaining-source.xml deleted file mode 100644 index 968822cc06..0000000000 --- a/third_party/nix/doc/manual/installation/obtaining-source.xml +++ /dev/null @@ -1,30 +0,0 @@ -
- -Obtaining a Source Distribution - -The source tarball of the most recent stable release can be -downloaded from the Nix homepage. -You can also grab the most -recent development release. - -Alternatively, the most recent sources of Nix can be obtained -from its Git -repository. For example, the following command will check out -the latest revision into a directory called -nix: - - -$ git clone https://github.com/NixOS/nix - -Likewise, specific releases can be obtained from the tags of the -repository. - -
\ No newline at end of file diff --git a/third_party/nix/doc/manual/installation/prerequisites-source.xml b/third_party/nix/doc/manual/installation/prerequisites-source.xml deleted file mode 100644 index e7bdcf966c..0000000000 --- a/third_party/nix/doc/manual/installation/prerequisites-source.xml +++ /dev/null @@ -1,105 +0,0 @@ -
- -Prerequisites - - - - GNU Make. - - Bash Shell. The ./configure script - relies on bashisms, so Bash is required. - - A version of GCC or Clang that supports C++17. - - pkg-config to locate - dependencies. If your distribution does not provide it, you can get - it from . - - The OpenSSL library to calculate cryptographic hashes. - If your distribution does not provide it, you can get it from . - - The libbrotlienc and - libbrotlidec libraries to provide implementation - of the Brotli compression algorithm. They are available for download - from the official repository . - - The bzip2 compressor program and the - libbz2 library. Thus you must have bzip2 - installed, including development headers and libraries. If your - distribution does not provide these, you can obtain bzip2 from . - - liblzma, which is provided by - XZ Utils. If your distribution does not provide this, you can - get it from . - - cURL and its library. If your distribution does not - provide it, you can get it from . - - The SQLite embedded database library, version 3.6.19 - or higher. If your distribution does not provide it, please install - it from . - - The Boehm - garbage collector to reduce the evaluator’s memory - consumption (optional). To enable it, install - pkgconfig and the Boehm garbage collector, and - pass the flag to - configure. - - The boost library of version - 1.66.0 or higher. It can be obtained from the official web site - . - - The editline library of version - 1.14.0 or higher. It can be obtained from the its repository - . - - The xmllint and - xsltproc programs to build this manual and the - man-pages. These are part of the libxml2 and - libxslt packages, respectively. You also need - the DocBook - XSL stylesheets and optionally the DocBook 5.0 RELAX NG - schemas. Note that these are only required if you modify the - manual sources or when you are building from the Git - repository. - - Recent versions of Bison and Flex to build the - parser. (This is because Nix needs GLR support in Bison and - reentrancy support in Flex.) For Bison, you need version 2.6, which - can be obtained from the GNU FTP - server. For Flex, you need version 2.5.35, which is - available on SourceForge. - Slightly older versions may also work, but ancient versions like the - ubiquitous 2.5.4a won't. Note that these are only required if you - modify the parser or when you are building from the Git - repository. - - The libseccomp is used to provide - syscall filtering on Linux. This is an optional dependency and can - be disabled passing a - option to the configure script (Not recommended - unless your system doesn't support - libseccomp). To get the library, visit . - - - -
diff --git a/third_party/nix/doc/manual/installation/single-user.xml b/third_party/nix/doc/manual/installation/single-user.xml deleted file mode 100644 index 09cdaa5d48..0000000000 --- a/third_party/nix/doc/manual/installation/single-user.xml +++ /dev/null @@ -1,21 +0,0 @@ -
- -Single-User Mode - -In single-user mode, all Nix operations that access the database -in prefix/var/nix/db -or modify the Nix store in -prefix/store must be -performed under the user ID that owns those directories. This is -typically root. (If you -install from RPM packages, that’s in fact the default ownership.) -However, on single-user machines, it is often convenient to -chown those directories to your normal user account -so that you don’t have to su to root all the time. - -
\ No newline at end of file diff --git a/third_party/nix/doc/manual/installation/supported-platforms.xml b/third_party/nix/doc/manual/installation/supported-platforms.xml deleted file mode 100644 index 3e74be49d1..0000000000 --- a/third_party/nix/doc/manual/installation/supported-platforms.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -Supported Platforms - -Nix is currently supported on the following platforms: - - - - Linux (i686, x86_64, aarch64). - - macOS (x86_64). - - - - - - - - - - diff --git a/third_party/nix/doc/manual/installation/upgrading.xml b/third_party/nix/doc/manual/installation/upgrading.xml deleted file mode 100644 index 30670d7fec..0000000000 --- a/third_party/nix/doc/manual/installation/upgrading.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - Upgrading Nix - - - Multi-user Nix users on macOS can upgrade Nix by running: - sudo -i sh -c 'nix-channel --update && - nix-env -iA nixpkgs.nix && - launchctl remove org.nixos.nix-daemon && - launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist' - - - - - Single-user installations of Nix should run this: - nix-channel --update; nix-env -iA nixpkgs.nix - - diff --git a/third_party/nix/doc/manual/introduction/about-nix.xml b/third_party/nix/doc/manual/introduction/about-nix.xml deleted file mode 100644 index c21ed34ddc..0000000000 --- a/third_party/nix/doc/manual/introduction/about-nix.xml +++ /dev/null @@ -1,268 +0,0 @@ - - -About Nix - -Nix is a purely functional package manager. -This means that it treats packages like values in purely functional -programming languages such as Haskell — they are built by functions -that don’t have side-effects, and they never change after they have -been built. Nix stores packages in the Nix -store, usually the directory -/nix/store, where each package has its own unique -subdirectory such as - - -/nix/store/b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z-firefox-33.1/ - - -where b6gvzjyb2pg0… is a unique identifier for the -package that captures all its dependencies (it’s a cryptographic hash -of the package’s build dependency graph). This enables many powerful -features. - - -Multiple versions - -You can have multiple versions or variants of a package -installed at the same time. This is especially important when -different applications have dependencies on different versions of the -same package — it prevents the “DLL hell”. Because of the hashing -scheme, different versions of a package end up in different paths in -the Nix store, so they don’t interfere with each other. - -An important consequence is that operations like upgrading or -uninstalling an application cannot break other applications, since -these operations never “destructively” update or delete files that are -used by other packages. - - - - -Complete dependencies - -Nix helps you make sure that package dependency specifications -are complete. In general, when you’re making a package for a package -management system like RPM, you have to specify for each package what -its dependencies are, but there are no guarantees that this -specification is complete. If you forget a dependency, then the -package will build and work correctly on your -machine if you have the dependency installed, but not on the end -user's machine if it's not there. - -Since Nix on the other hand doesn’t install packages in “global” -locations like /usr/bin but in package-specific -directories, the risk of incomplete dependencies is greatly reduced. -This is because tools such as compilers don’t search in per-packages -directories such as -/nix/store/5lbfaxb722zp…-openssl-0.9.8d/include, -so if a package builds correctly on your system, this is because you -specified the dependency explicitly. This takes care of the build-time -dependencies. - -Once a package is built, runtime dependencies are found by -scanning binaries for the hash parts of Nix store paths (such as -r8vvq9kq…). This sounds risky, but it works -extremely well. - - - - -Multi-user support - -Nix has multi-user support. This means that non-privileged -users can securely install software. Each user can have a different -profile, a set of packages in the Nix store that -appear in the user’s PATH. If a user installs a -package that another user has already installed previously, the -package won’t be built or downloaded a second time. At the same time, -it is not possible for one user to inject a Trojan horse into a -package that might be used by another user. - - - - -Atomic upgrades and rollbacks - -Since package management operations never overwrite packages in -the Nix store but just add new versions in different paths, they are -atomic. So during a package upgrade, there is no -time window in which the package has some files from the old version -and some files from the new version — which would be bad because a -program might well crash if it’s started during that period. - -And since packages aren’t overwritten, the old versions are still -there after an upgrade. This means that you can roll -back to the old version: - - -$ nix-env --upgrade some-packages -$ nix-env --rollback - - - - - -Garbage collection - -When you uninstall a package like this… - - -$ nix-env --uninstall firefox - - -the package isn’t deleted from the system right away (after all, you -might want to do a rollback, or it might be in the profiles of other -users). Instead, unused packages can be deleted safely by running the -garbage collector: - - -$ nix-collect-garbage - - -This deletes all packages that aren’t in use by any user profile or by -a currently running program. - - - - -Functional package language - -Packages are built from Nix expressions, -which is a simple functional language. A Nix expression describes -everything that goes into a package build action (a “derivation”): -other packages, sources, the build script, environment variables for -the build script, etc. Nix tries very hard to ensure that Nix -expressions are deterministic: building a Nix -expression twice should yield the same result. - -Because it’s a functional language, it’s easy to support -building variants of a package: turn the Nix expression into a -function and call it any number of times with the appropriate -arguments. Due to the hashing scheme, variants don’t conflict with -each other in the Nix store. - - - - -Transparent source/binary deployment - -Nix expressions generally describe how to build a package from -source, so an installation action like - - -$ nix-env --install firefox - - -could cause quite a bit of build activity, as not -only Firefox but also all its dependencies (all the way up to the C -library and the compiler) would have to built, at least if they are -not already in the Nix store. This is a source deployment -model. For most users, building from source is not very -pleasant as it takes far too long. However, Nix can automatically -skip building from source and instead use a binary -cache, a web server that provides pre-built binaries. For -instance, when asked to build -/nix/store/b6gvzjyb2pg0…-firefox-33.1 from source, -Nix would first check if the file -https://cache.nixos.org/b6gvzjyb2pg0….narinfo exists, and -if so, fetch the pre-built binary referenced from there; otherwise, it -would fall back to building from source. - - - - - - - -Nix Packages collection - -We provide a large set of Nix expressions containing hundreds of -existing Unix packages, the Nix Packages -collection (Nixpkgs). - - - - -Managing build environments - -Nix is extremely useful for developers as it makes it easy to -automatically set up the build environment for a package. Given a -Nix expression that describes the dependencies of your package, the -command nix-shell will build or download those -dependencies if they’re not already in your Nix store, and then start -a Bash shell in which all necessary environment variables (such as -compiler search paths) are set. - -For example, the following command gets all dependencies of the -Pan newsreader, as described by its -Nix expression: - - -$ nix-shell '<nixpkgs>' -A pan - - -You’re then dropped into a shell where you can edit, build and test -the package: - - -[nix-shell]$ tar xf $src -[nix-shell]$ cd pan-* -[nix-shell]$ ./configure -[nix-shell]$ make -[nix-shell]$ ./pan/gui/pan - - - - - - - -Portability - -Nix runs on Linux and macOS. - - - - -NixOS - -NixOS is a Linux distribution based on Nix. It uses Nix not -just for package management but also to manage the system -configuration (e.g., to build configuration files in -/etc). This means, among other things, that it -is easy to roll back the entire configuration of the system to an -earlier state. Also, users can install software without root -privileges. For more information and downloads, see the NixOS homepage. - - - - -License - -Nix is released under the terms of the GNU -LGPLv2.1 or (at your option) any later version. - - - - - diff --git a/third_party/nix/doc/manual/introduction/introduction.xml b/third_party/nix/doc/manual/introduction/introduction.xml deleted file mode 100644 index 12b2cc7610..0000000000 --- a/third_party/nix/doc/manual/introduction/introduction.xml +++ /dev/null @@ -1,12 +0,0 @@ - - -Introduction - - - - - diff --git a/third_party/nix/doc/manual/introduction/quick-start.xml b/third_party/nix/doc/manual/introduction/quick-start.xml deleted file mode 100644 index 1ce6c8d50a..0000000000 --- a/third_party/nix/doc/manual/introduction/quick-start.xml +++ /dev/null @@ -1,124 +0,0 @@ - - -Quick Start - -This chapter is for impatient people who don't like reading -documentation. For more in-depth information you are kindly referred -to subsequent chapters. - - - -Install single-user Nix by running the following: - - -$ bash <(curl https://nixos.org/nix/install) - - -This will install Nix in /nix. The install script -will create /nix using sudo, -so make sure you have sufficient rights. (For other installation -methods, see .) - -See what installable packages are currently available -in the channel: - - -$ nix-env -qa -docbook-xml-4.3 -docbook-xml-4.5 -firefox-33.0.2 -hello-2.9 -libxslt-1.1.28 -... - - - -Install some packages from the channel: - - -$ nix-env -i hello - -This should download pre-built packages; it should not build them -locally (if it does, something went wrong). - -Test that they work: - - -$ which hello -/home/eelco/.nix-profile/bin/hello -$ hello -Hello, world! - - - - -Uninstall a package: - - -$ nix-env -e hello - - - -You can also test a package without installing it: - - -$ nix-shell -p hello - - -This builds or downloads GNU Hello and its dependencies, then drops -you into a Bash shell where the hello command is -present, all without affecting your normal environment: - - -[nix-shell:~]$ hello -Hello, world! - -[nix-shell:~]$ exit - -$ hello -hello: command not found - - - - -To keep up-to-date with the channel, do: - - -$ nix-channel --update nixpkgs -$ nix-env -u '*' - -The latter command will upgrade each installed package for which there -is a “newer” version (as determined by comparing the version -numbers). - -If you're unhappy with the result of a -nix-env action (e.g., an upgraded package turned -out not to work properly), you can go back: - - -$ nix-env --rollback - - - -You should periodically run the Nix garbage collector -to get rid of unused packages, since uninstalls or upgrades don't -actually delete them: - - -$ nix-collect-garbage -d - - - - - - - - diff --git a/third_party/nix/doc/manual/manual.xml b/third_party/nix/doc/manual/manual.xml deleted file mode 100644 index 87d9de28ab..0000000000 --- a/third_party/nix/doc/manual/manual.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - Nix Package Manager Guide - Version - - - - Eelco - Dolstra - - Author - - - - 2004-2018 - Eelco Dolstra - - - - - - - - - - - - - - - - - - - - diff --git a/third_party/nix/doc/manual/nix-lang-ref.xml b/third_party/nix/doc/manual/nix-lang-ref.xml deleted file mode 100644 index 86273ac3d0..0000000000 --- a/third_party/nix/doc/manual/nix-lang-ref.xml +++ /dev/null @@ -1,182 +0,0 @@ - - Nix Language Reference - - - Grammar - - - Expressions - - - Expr - - - - - - - ExprFunction - - '{' '}' ':' - | - - - - - - ExprAssert - - 'assert' ';' - | - - - - - - ExprIf - - 'if' 'then' - 'else' - | - - - - - - ExprOp - - '!' - | - '==' - | - '!=' - | - '&&' - | - '||' - | - '->' - | - '//' - | - '~' - | - '?' - | - - - - - - ExprApp - - '.' - | - - - - - - ExprSelect - - - | - - - - - - ExprSimple - - | - | - | - | - - | - 'true' | 'false' | 'null' - | - '(' ')' - | - '{' * '}' - | - 'let' '{' * '}' - | - 'rec' '{' * '}' - | - '[' * ']' - - - - - Bind - - '=' ';' - | - 'inherit' ('(' ')')? * ';' - - - - - Formals - - ',' - | - - - - - Formal - - - | - '?' - - - - - - - Terminals - - - Id - [a-zA-Z\_][a-zA-Z0-9\_\']* - - - - Int - [0-9]+ - - - - Str - \"[^\n\"]*\" - - - - Path - [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+ - - - - Uri - [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ - - - - Whitespace - - [ \t\n]+ - | - \#[^\n]* - | - \/\*(.|\n)*\*\/ - - - - - - - - diff --git a/third_party/nix/doc/manual/packages/basic-package-mgmt.xml b/third_party/nix/doc/manual/packages/basic-package-mgmt.xml deleted file mode 100644 index 0f21297f31..0000000000 --- a/third_party/nix/doc/manual/packages/basic-package-mgmt.xml +++ /dev/null @@ -1,194 +0,0 @@ - - -Basic Package Management - -The main command for package management is nix-env. You can use -it to install, upgrade, and erase packages, and to query what -packages are installed or are available for installation. - -In Nix, different users can have different “views” -on the set of installed applications. That is, there might be lots of -applications present on the system (possibly in many different -versions), but users can have a specific selection of those active — -where “active” just means that it appears in a directory -in the user’s PATH. Such a view on the set of -installed applications is called a user -environment, which is just a directory tree consisting of -symlinks to the files of the active applications. - -Components are installed from a set of Nix -expressions that tell Nix how to build those packages, -including, if necessary, their dependencies. There is a collection of -Nix expressions called the Nixpkgs package collection that contains -packages ranging from basic development stuff such as GCC and Glibc, -to end-user applications like Mozilla Firefox. (Nix is however not -tied to the Nixpkgs package collection; you could write your own Nix -expressions based on Nixpkgs, or completely new ones.) - -You can manually download the latest version of Nixpkgs from -. However, -it’s much more convenient to use the Nixpkgs -channel, since it makes it easy to stay up to -date with new versions of Nixpkgs. (Channels are described in more -detail in .) Nixpkgs is automatically -added to your list of “subscribed” channels when you install -Nix. If this is not the case for some reason, you can add it as -follows: - - -$ nix-channel --add https://nixos.org/channels/nixpkgs-unstable -$ nix-channel --update - - - - -On NixOS, you’re automatically subscribed to a NixOS -channel corresponding to your NixOS major release -(e.g. http://nixos.org/channels/nixos-14.12). A NixOS -channel is identical to the Nixpkgs channel, except that it contains -only Linux binaries and is updated only if a set of regression tests -succeed. - -You can view the set of available packages in Nixpkgs: - - -$ nix-env -qa -aterm-2.2 -bash-3.0 -binutils-2.15 -bison-1.875d -blackdown-1.4.2 -bzip2-1.0.2 -… - -The flag specifies a query operation, and - means that you want to show the “available” (i.e., -installable) packages, as opposed to the installed packages. If you -downloaded Nixpkgs yourself, or if you checked it out from GitHub, -then you need to pass the path to your Nixpkgs tree using the - flag: - - -$ nix-env -qaf /path/to/nixpkgs - - -where /path/to/nixpkgs is where you’ve -unpacked or checked out Nixpkgs. - -You can select specific packages by name: - - -$ nix-env -qa firefox -firefox-34.0.5 -firefox-with-plugins-34.0.5 - - -and using regular expressions: - - -$ nix-env -qa 'firefox.*' - - - - -It is also possible to see the status of -available packages, i.e., whether they are installed into the user -environment and/or present in the system: - - -$ nix-env -qas -… --PS bash-3.0 ---S binutils-2.15 -IPS bison-1.875d -… - -The first character (I) indicates whether the -package is installed in your current user environment. The second -(P) indicates whether it is present on your system -(in which case installing it into your user environment would be a -very quick operation). The last one (S) indicates -whether there is a so-called substitute for the -package, which is Nix’s mechanism for doing binary deployment. It -just means that Nix knows that it can fetch a pre-built package from -somewhere (typically a network server) instead of building it -locally. - -You can install a package using nix-env -i. -For instance, - - -$ nix-env -i subversion - -will install the package called subversion (which -is, of course, the Subversion version -management system). - -When you ask Nix to install a package, it will first try -to get it in pre-compiled form from a binary -cache. By default, Nix will use the binary cache -https://cache.nixos.org; it contains binaries for most -packages in Nixpkgs. Only if no binary is available in the binary -cache, Nix will build the package from source. So if nix-env --i subversion results in Nix building stuff from source, -then either the package is not built for your platform by the Nixpkgs -build servers, or your version of Nixpkgs is too old or too new. For -instance, if you have a very recent checkout of Nixpkgs, then the -Nixpkgs build servers may not have had a chance to build everything -and upload the resulting binaries to -https://cache.nixos.org. The Nixpkgs channel is only -updated after all binaries have been uploaded to the cache, so if you -stick to the Nixpkgs channel (rather than using a Git checkout of the -Nixpkgs tree), you will get binaries for most packages. - -Naturally, packages can also be uninstalled: - - -$ nix-env -e subversion - - - -Upgrading to a new version is just as easy. If you have a new -release of Nix Packages, you can do: - - -$ nix-env -u subversion - -This will only upgrade Subversion if there is a -“newer” version in the new set of Nix expressions, as -defined by some pretty arbitrary rules regarding ordering of version -numbers (which generally do what you’d expect of them). To just -unconditionally replace Subversion with whatever version is in the Nix -expressions, use -i instead of --u; -i will remove -whatever version is already installed. - -You can also upgrade all packages for which there are newer -versions: - - -$ nix-env -u - - - -Sometimes it’s useful to be able to ask what -nix-env would do, without actually doing it. For -instance, to find out what packages would be upgraded by -nix-env -u, you can do - - -$ nix-env -u --dry-run -(dry run; not doing anything) -upgrading `libxslt-1.1.0' to `libxslt-1.1.10' -upgrading `graphviz-1.10' to `graphviz-1.12' -upgrading `coreutils-5.0' to `coreutils-5.2.1' - - - - diff --git a/third_party/nix/doc/manual/packages/binary-cache-substituter.xml b/third_party/nix/doc/manual/packages/binary-cache-substituter.xml deleted file mode 100644 index c6ceb9c806..0000000000 --- a/third_party/nix/doc/manual/packages/binary-cache-substituter.xml +++ /dev/null @@ -1,70 +0,0 @@ -
- -Serving a Nix store via HTTP - -You can easily share the Nix store of a machine via HTTP. This -allows other machines to fetch store paths from that machine to speed -up installations. It uses the same binary cache -mechanism that Nix usually uses to fetch pre-built binaries from -https://cache.nixos.org. - -The daemon that handles binary cache requests via HTTP, -nix-serve, is not part of the Nix distribution, but -you can install it from Nixpkgs: - - -$ nix-env -i nix-serve - - -You can then start the server, listening for HTTP connections on -whatever port you like: - - -$ nix-serve -p 8080 - - -To check whether it works, try the following on the client: - - -$ curl http://avalon:8080/nix-cache-info - - -which should print something like: - - -StoreDir: /nix/store -WantMassQuery: 1 -Priority: 30 - - - - -On the client side, you can tell Nix to use your binary cache -using , e.g.: - - -$ nix-env -i firefox --option extra-binary-caches http://avalon:8080/ - - -The option tells Nix to use this -binary cache in addition to your default caches, such as -https://cache.nixos.org. Thus, for any path in the closure -of Firefox, Nix will first check if the path is available on the -server avalon or another binary caches. If not, it -will fall back to building from source. - -You can also tell Nix to always use your binary cache by adding -a line to the nix.conf -configuration file like this: - - -binary-caches = http://avalon:8080/ https://cache.nixos.org/ - - - - -
diff --git a/third_party/nix/doc/manual/packages/channels.xml b/third_party/nix/doc/manual/packages/channels.xml deleted file mode 100644 index 15c119fcb1..0000000000 --- a/third_party/nix/doc/manual/packages/channels.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -Channels - -If you want to stay up to date with a set of packages, it’s not -very convenient to manually download the latest set of Nix expressions -for those packages and upgrade using nix-env. -Fortunately, there’s a better way: Nix -channels. - -A Nix channel is just a URL that points to a place that contains -a set of Nix expressions and a manifest. Using the command nix-channel you -can automatically stay up to date with whatever is available at that -URL. - -You can “subscribe” to a channel using -nix-channel --add, e.g., - - -$ nix-channel --add https://nixos.org/channels/nixpkgs-unstable - -subscribes you to a channel that always contains that latest version -of the Nix Packages collection. (Subscribing really just means that -the URL is added to the file ~/.nix-channels, -where it is read by subsequent calls to nix-channel ---update.) You can “unsubscribe” using nix-channel ---remove: - - -$ nix-channel --remove nixpkgs - - - -To obtain the latest Nix expressions available in a channel, do - - -$ nix-channel --update - -This downloads and unpacks the Nix expressions in every channel -(downloaded from url/nixexprs.tar.bz2). -It also makes the union of each channel’s Nix expressions available by -default to nix-env operations (via the symlink -~/.nix-defexpr/channels). Consequently, you can -then say - - -$ nix-env -u - -to upgrade all packages in your profile to the latest versions -available in the subscribed channels. - - diff --git a/third_party/nix/doc/manual/packages/copy-closure.xml b/third_party/nix/doc/manual/packages/copy-closure.xml deleted file mode 100644 index 012030e3eb..0000000000 --- a/third_party/nix/doc/manual/packages/copy-closure.xml +++ /dev/null @@ -1,50 +0,0 @@ -
- -Copying Closures Via SSH - -The command nix-copy-closure copies a Nix -store path along with all its dependencies to or from another machine -via the SSH protocol. It doesn’t copy store paths that are already -present on the target machine. For example, the following command -copies Firefox with all its dependencies: - - -$ nix-copy-closure --to alice@itchy.example.org $(type -p firefox) - -See for details. - -With nix-store ---export and nix-store --import you can -write the closure of a store path (that is, the path and all its -dependencies) to a file, and then unpack that file into another Nix -store. For example, - - -$ nix-store --export $(nix-store -qR $(type -p firefox)) > firefox.closure - -writes the closure of Firefox to a file. You can then copy this file -to another machine and install the closure: - - -$ nix-store --import < firefox.closure - -Any store paths in the closure that are already present in the target -store are ignored. It is also possible to pipe the export into -another command, e.g. to copy and install a closure directly to/on -another machine: - - -$ nix-store --export $(nix-store -qR $(type -p firefox)) | bzip2 | \ - ssh alice@itchy.example.org "bunzip2 | nix-store --import" - -However, nix-copy-closure is generally more -efficient because it only copies paths that are not already present in -the target Nix store. - -
diff --git a/third_party/nix/doc/manual/packages/garbage-collection.xml b/third_party/nix/doc/manual/packages/garbage-collection.xml deleted file mode 100644 index b506f22b03..0000000000 --- a/third_party/nix/doc/manual/packages/garbage-collection.xml +++ /dev/null @@ -1,86 +0,0 @@ - - -Garbage Collection - -nix-env operations such as upgrades -() and uninstall () never -actually delete packages from the system. All they do (as shown -above) is to create a new user environment that no longer contains -symlinks to the “deleted” packages. - -Of course, since disk space is not infinite, unused packages -should be removed at some point. You can do this by running the Nix -garbage collector. It will remove from the Nix store any package -not used (directly or indirectly) by any generation of any -profile. - -Note however that as long as old generations reference a -package, it will not be deleted. After all, we wouldn’t be able to -do a rollback otherwise. So in order for garbage collection to be -effective, you should also delete (some) old generations. Of course, -this should only be done if you are certain that you will not need to -roll back. - -To delete all old (non-current) generations of your current -profile: - - -$ nix-env --delete-generations old - -Instead of old you can also specify a list of -generations, e.g., - - -$ nix-env --delete-generations 10 11 14 - -To delete all generations older than a specified number of days -(except the current generation), use the d -suffix. For example, - - -$ nix-env --delete-generations 14d - -deletes all generations older than two weeks. - -After removing appropriate old generations you can run the -garbage collector as follows: - - -$ nix-store --gc - -The behaviour of the gargage collector is affected by the -keep-derivations (default: true) and keep-outputs -(default: false) options in the Nix configuration file. The defaults will ensure -that all derivations that are build-time dependencies of garbage collector roots -will be kept and that all output paths that are runtime dependencies -will be kept as well. All other derivations or paths will be collected. -(This is usually what you want, but while you are developing -it may make sense to keep outputs to ensure that rebuild times are quick.) - -If you are feeling uncertain, you can also first view what files would -be deleted: - - -$ nix-store --gc --print-dead - -Likewise, the option will show the paths -that won’t be deleted. - -There is also a convenient little utility -nix-collect-garbage, which when invoked with the - () switch deletes all -old generations of all profiles in -/nix/var/nix/profiles. So - - -$ nix-collect-garbage -d - -is a quick and easy way to clean up your system. - - - - diff --git a/third_party/nix/doc/manual/packages/garbage-collector-roots.xml b/third_party/nix/doc/manual/packages/garbage-collector-roots.xml deleted file mode 100644 index 8338e53920..0000000000 --- a/third_party/nix/doc/manual/packages/garbage-collector-roots.xml +++ /dev/null @@ -1,29 +0,0 @@ -
- -Garbage Collector Roots - -The roots of the garbage collector are all store paths to which -there are symlinks in the directory -prefix/nix/var/nix/gcroots. -For instance, the following command makes the path -/nix/store/d718ef...-foo a root of the collector: - - -$ ln -s /nix/store/d718ef...-foo /nix/var/nix/gcroots/bar - -That is, after this command, the garbage collector will not remove -/nix/store/d718ef...-foo or any of its -dependencies. - -Subdirectories of -prefix/nix/var/nix/gcroots -are also searched for symlinks. Symlinks to non-store paths are -followed and searched for roots, but symlinks to non-store paths -inside the paths reached in that way are not -followed to prevent infinite recursion. - -
\ No newline at end of file diff --git a/third_party/nix/doc/manual/packages/package-management.xml b/third_party/nix/doc/manual/packages/package-management.xml deleted file mode 100644 index 61e55faeb3..0000000000 --- a/third_party/nix/doc/manual/packages/package-management.xml +++ /dev/null @@ -1,23 +0,0 @@ - - -Package Management - - -This chapter discusses how to do package management with Nix, -i.e., how to obtain, install, upgrade, and erase packages. This is -the “user’s” perspective of the Nix system — people -who want to create packages should consult -. - - - - - - - - - diff --git a/third_party/nix/doc/manual/packages/profiles.xml b/third_party/nix/doc/manual/packages/profiles.xml deleted file mode 100644 index 4d10319abe..0000000000 --- a/third_party/nix/doc/manual/packages/profiles.xml +++ /dev/null @@ -1,158 +0,0 @@ - - -Profiles - -Profiles and user environments are Nix’s mechanism for -implementing the ability to allow different users to have different -configurations, and to do atomic upgrades and rollbacks. To -understand how they work, it’s useful to know a bit about how Nix -works. In Nix, packages are stored in unique locations in the -Nix store (typically, -/nix/store). For instance, a particular version -of the Subversion package might be stored in a directory -/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3/, -while another version might be stored in -/nix/store/5mq2jcn36ldlmh93yj1n8s9c95pj7c5s-subversion-1.1.2. -The long strings prefixed to the directory names are cryptographic -hashes160-bit truncations of SHA-256 hashes encoded in -a base-32 notation, to be precise. of -all inputs involved in building the package — -sources, dependencies, compiler flags, and so on. So if two -packages differ in any way, they end up in different locations in -the file system, so they don’t interfere with each other. shows a part of a typical Nix -store. - -
User environments - - - - - -
- -Of course, you wouldn’t want to type - - -$ /nix/store/dpmvp969yhdq...-subversion-1.1.3/bin/svn - -every time you want to run Subversion. Of course we could set up the -PATH environment variable to include the -bin directory of every package we want to use, -but this is not very convenient since changing PATH -doesn’t take effect for already existing processes. The solution Nix -uses is to create directory trees of symlinks to -activated packages. These are called -user environments and they are packages -themselves (though automatically generated by -nix-env), so they too reside in the Nix store. For -instance, in the user -environment /nix/store/0c1p5z4kda11...-user-env -contains a symlink to just Subversion 1.1.2 (arrows in the figure -indicate symlinks). This would be what we would obtain if we had done - - -$ nix-env -i subversion - -on a set of Nix expressions that contained Subversion 1.1.2. - -This doesn’t in itself solve the problem, of course; you -wouldn’t want to type -/nix/store/0c1p5z4kda11...-user-env/bin/svn -either. That’s why there are symlinks outside of the store that point -to the user environments in the store; for instance, the symlinks -default-42-link and -default-43-link in the example. These are called -generations since every time you perform a -nix-env operation, a new user environment is -generated based on the current one. For instance, generation 43 was -created from generation 42 when we did - - -$ nix-env -i subversion firefox - -on a set of Nix expressions that contained Firefox and a new version -of Subversion. - -Generations are grouped together into -profiles so that different users don’t interfere -with each other if they don’t want to. For example: - - -$ ls -l /nix/var/nix/profiles/ -... -lrwxrwxrwx 1 eelco ... default-42-link -> /nix/store/0c1p5z4kda11...-user-env -lrwxrwxrwx 1 eelco ... default-43-link -> /nix/store/3aw2pdyx2jfc...-user-env -lrwxrwxrwx 1 eelco ... default -> default-43-link - -This shows a profile called default. The file -default itself is actually a symlink that points -to the current generation. When we do a nix-env -operation, a new user environment and generation link are created -based on the current one, and finally the default -symlink is made to point at the new generation. This last step is -atomic on Unix, which explains how we can do atomic upgrades. (Note -that the building/installing of new packages doesn’t interfere in -any way with old packages, since they are stored in different -locations in the Nix store.) - -If you find that you want to undo a nix-env -operation, you can just do - - -$ nix-env --rollback - -which will just make the current generation link point at the previous -link. E.g., default would be made to point at -default-42-link. You can also switch to a -specific generation: - - -$ nix-env --switch-generation 43 - -which in this example would roll forward to generation 43 again. You -can also see all available generations: - - -$ nix-env --list-generations - -You generally wouldn’t have -/nix/var/nix/profiles/some-profile/bin -in your PATH. Rather, there is a symlink -~/.nix-profile that points to your current -profile. This means that you should put -~/.nix-profile/bin in your PATH -(and indeed, that’s what the initialisation script -/nix/etc/profile.d/nix.sh does). This makes it -easier to switch to a different profile. You can do that using the -command nix-env --switch-profile: - - -$ nix-env --switch-profile /nix/var/nix/profiles/my-profile - -$ nix-env --switch-profile /nix/var/nix/profiles/default - -These commands switch to the my-profile and -default profile, respectively. If the profile doesn’t exist, it will -be created automatically. You should be careful about storing a -profile in another location than the profiles -directory, since otherwise it might not be used as a root of the -garbage collector (see ). - -All nix-env operations work on the profile -pointed to by ~/.nix-profile, but you can override -this using the option (abbreviation -): - - -$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion - -This will not change the -~/.nix-profile symlink. - -
diff --git a/third_party/nix/doc/manual/packages/s3-substituter.xml b/third_party/nix/doc/manual/packages/s3-substituter.xml deleted file mode 100644 index 868b5a66dc..0000000000 --- a/third_party/nix/doc/manual/packages/s3-substituter.xml +++ /dev/null @@ -1,182 +0,0 @@ - -
- -Serving a Nix store via AWS S3 or S3-compatible Service - -Nix has built-in support for storing and fetching store paths -from Amazon S3 and S3 compatible services. This uses the same -binary cache mechanism that Nix usually uses to -fetch prebuilt binaries from cache.nixos.org. - -The following options can be specified as URL parameters to -the S3 URL: - - - profile - - - The name of the AWS configuration profile to use. By default - Nix will use the default profile. - - - - - region - - - The region of the S3 bucket. us–east-1 by - default. - - - - If your bucket is not in us–east-1, you - should always explicitly specify the region parameter. - - - - - endpoint - - - The URL to your S3-compatible service, for when not using - Amazon S3. Do not specify this value if you're using Amazon - S3. - - This endpoint must support HTTPS and will use - path-based addressing instead of virtual host based - addressing. - - - - scheme - - - The scheme used for S3 requests, https - (default) or http. This option allows you to - disable HTTPS for binary caches which don't support it. - - HTTPS should be used if the cache might contain - sensitive information. - - - - -In this example we will use the bucket named -example-nix-cache. - -
- Anonymous Reads to your S3-compatible binary cache - - If your binary cache is publicly accessible and does not - require authentication, the simplest and easiest way to use Nix with - your S3 compatible binary cache is to use the HTTP URL for that - cache. - - For AWS S3 the binary cache URL for example bucket will be - exactly https://example-nix-cache.s3.amazonaws.com or - s3://example-nix-cache. For S3 compatible binary caches, - consult that cache's documentation. - - Your bucket will need the following bucket policy: - - -
- -
- Authenticated Reads to your S3 binary cache - - For AWS S3 the binary cache URL for example bucket will be - exactly s3://example-nix-cache. - - Nix will use the default - credential provider chain for authenticating requests to - Amazon S3. - - Nix supports authenticated reads from Amazon S3 and S3 - compatible binary caches. - - Your bucket will need a bucket policy allowing the desired - users to perform the s3:GetObject and - s3:GetBucketLocation action on all objects in the - bucket. The anonymous policy in can be updated to - have a restricted Principal to support - this. -
- - -
- Authenticated Writes to your S3-compatible binary cache - - Nix support fully supports writing to Amazon S3 and S3 - compatible buckets. The binary cache URL for our example bucket will - be s3://example-nix-cache. - - Nix will use the default - credential provider chain for authenticating requests to - Amazon S3. - - Your account will need the following IAM policy to - upload to the cache: - - - - - Uploading with a specific credential profile for Amazon S3 - nix copy --to 's3://example-nix-cache?profile=cache-upload&region=eu-west-2' nixpkgs.hello - - - Uploading to an S3-Compatible Binary Cache - nix copy --to 's3://example-nix-cache?profile=cache-upload&scheme=https&endpoint=minio.example.com' nixpkgs.hello - -
-
diff --git a/third_party/nix/doc/manual/packages/sharing-packages.xml b/third_party/nix/doc/manual/packages/sharing-packages.xml deleted file mode 100644 index bb6c52b8f8..0000000000 --- a/third_party/nix/doc/manual/packages/sharing-packages.xml +++ /dev/null @@ -1,20 +0,0 @@ - - -Sharing Packages Between Machines - -Sometimes you want to copy a package from one machine to -another. Or, you want to install some packages and you know that -another machine already has some or all of those packages or their -dependencies. In that case there are mechanisms to quickly copy -packages between machines. - - - - - - - diff --git a/third_party/nix/doc/manual/packages/ssh-substituter.xml b/third_party/nix/doc/manual/packages/ssh-substituter.xml deleted file mode 100644 index 8db3f96625..0000000000 --- a/third_party/nix/doc/manual/packages/ssh-substituter.xml +++ /dev/null @@ -1,73 +0,0 @@ -
- -Serving a Nix store via SSH - -You can tell Nix to automatically fetch needed binaries from a -remote Nix store via SSH. For example, the following installs Firefox, -automatically fetching any store paths in Firefox’s closure if they -are available on the server avalon: - - -$ nix-env -i firefox --substituters ssh://alice@avalon - - -This works similar to the binary cache substituter that Nix usually -uses, only using SSH instead of HTTP: if a store path -P is needed, Nix will first check if it’s available -in the Nix store on avalon. If not, it will fall -back to using the binary cache substituter, and then to building from -source. - -The SSH substituter currently does not allow you to enter -an SSH passphrase interactively. Therefore, you should use -ssh-add to load the decrypted private key into -ssh-agent. - -You can also copy the closure of some store path, without -installing it into your profile, e.g. - - -$ nix-store -r /nix/store/m85bxg…-firefox-34.0.5 --substituters ssh://alice@avalon - - -This is essentially equivalent to doing - - -$ nix-copy-closure --from alice@avalon /nix/store/m85bxg…-firefox-34.0.5 - - - - -You can use SSH’s forced command feature to -set up a restricted user account for SSH substituter access, allowing -read-only access to the local Nix store, but nothing more. For -example, add the following lines to sshd_config -to restrict the user nix-ssh: - - -Match User nix-ssh - AllowAgentForwarding no - AllowTcpForwarding no - PermitTTY no - PermitTunnel no - X11Forwarding no - ForceCommand nix-store --serve -Match All - - -On NixOS, you can accomplish the same by adding the following to your -configuration.nix: - - -nix.sshServe.enable = true; -nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ]; - - -where the latter line lists the public keys of users that are allowed -to connect. - -
diff --git a/third_party/nix/doc/manual/quote-literals.xsl b/third_party/nix/doc/manual/quote-literals.xsl deleted file mode 100644 index 5002643dbd..0000000000 --- a/third_party/nix/doc/manual/quote-literals.xsl +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - `' - - - - - - - - - -
- - - -
-
- - - - - - - - - - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/release-notes.xml b/third_party/nix/doc/manual/release-notes/release-notes.xml deleted file mode 100644 index 2655d68e35..0000000000 --- a/third_party/nix/doc/manual/release-notes/release-notes.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -Nix Release Notes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/third_party/nix/doc/manual/release-notes/rl-0.10.1.xml b/third_party/nix/doc/manual/release-notes/rl-0.10.1.xml deleted file mode 100644 index 95829323d4..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.10.1.xml +++ /dev/null @@ -1,13 +0,0 @@ -
- -Release 0.10.1 (2006-10-11) - -This release fixes two somewhat obscure bugs that occur when -evaluating Nix expressions that are stored inside the Nix store -(NIX-67). These do not affect most users. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.10.xml b/third_party/nix/doc/manual/release-notes/rl-0.10.xml deleted file mode 100644 index 9afec4de94..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.10.xml +++ /dev/null @@ -1,323 +0,0 @@ -
- -Release 0.10 (2006-10-06) - -This version of Nix uses Berkeley DB 4.4 instead of 4.3. -The database is upgraded automatically, but you should be careful not -to use old versions of Nix that still use Berkeley DB 4.3. In -particular, if you use a Nix installed through Nix, you should run - - -$ nix-store --clear-substitutes - -first. - -Also, the database schema has changed slighted to fix a -performance issue (see below). When you run any Nix 0.10 command for -the first time, the database will be upgraded automatically. This is -irreversible. - - - - - - - - nix-env usability improvements: - - - - An option - (or ) has been added to nix-env - --query to allow you to compare installed versions of - packages to available versions, or vice versa. An easy way to - see if you are up to date with what’s in your subscribed - channels is nix-env -qc \*. - - nix-env --query now takes as - arguments a list of package names about which to show - information, just like , etc.: for - example, nix-env -q gcc. Note that to show - all derivations, you need to specify - \*. - - nix-env -i - pkgname will now install - the highest available version of - pkgname, rather than installing all - available versions (which would probably give collisions) - (NIX-31). - - nix-env (-i|-u) --dry-run now - shows exactly which missing paths will be built or - substituted. - - nix-env -qa --description - shows human-readable descriptions of packages, provided that - they have a meta.description attribute (which - most packages in Nixpkgs don’t have yet). - - - - - - - New language features: - - - - Reference scanning (which happens after each - build) is much faster and takes a constant amount of - memory. - - String interpolation. Expressions like - - -"--with-freetype2-library=" + freetype + "/lib" - - can now be written as - - -"--with-freetype2-library=${freetype}/lib" - - You can write arbitrary expressions within - ${...}, not just - identifiers. - - Multi-line string literals. - - String concatenations can now involve - derivations, as in the example "--with-freetype2-library=" - + freetype + "/lib". This was not previously possible - because we need to register that a derivation that uses such a - string is dependent on freetype. The - evaluator now properly propagates this information. - Consequently, the subpath operator (~) has - been deprecated. - - Default values of function arguments can now - refer to other function arguments; that is, all arguments are in - scope in the default values - (NIX-45). - - - - Lots of new built-in primitives, such as - functions for list manipulation and integer arithmetic. See the - manual for a complete list. All primops are now available in - the set builtins, allowing one to test for - the availability of primop in a backwards-compatible - way. - - Real let-expressions: let x = ...; - ... z = ...; in .... - - - - - - - New commands nix-pack-closure and - nix-unpack-closure than can be used to easily - transfer a store path with all its dependencies to another machine. - Very convenient whenever you have some package on your machine and - you want to copy it somewhere else. - - - XML support: - - - - nix-env -q --xml prints the - installed or available packages in an XML representation for - easy processing by other tools. - - nix-instantiate --eval-only - --xml prints an XML representation of the resulting - term. (The new flag forces ‘deep’ - evaluation of the result, i.e., list elements and attributes are - evaluated recursively.) - - In Nix expressions, the primop - builtins.toXML converts a term to an XML - representation. This is primarily useful for passing structured - information to builders. - - - - - - - You can now unambiguously specify which derivation to - build or install in nix-env, - nix-instantiate and nix-build - using the / flags, which - takes an attribute name as argument. (Unlike symbolic package names - such as subversion-1.4.0, attribute names in an - attribute set are unique.) For instance, a quick way to perform a - test build of a package in Nixpkgs is nix-build - pkgs/top-level/all-packages.nix -A - foo. nix-env -q - --attr shows the attribute names corresponding to each - derivation. - - - If the top-level Nix expression used by - nix-env, nix-instantiate or - nix-build evaluates to a function whose arguments - all have default values, the function will be called automatically. - Also, the new command-line switch can be used to specify - function arguments on the command line. - - - nix-install-package --url - URL allows a package to be - installed directly from the given URL. - - - Nix now works behind an HTTP proxy server; just set - the standard environment variables http_proxy, - https_proxy, ftp_proxy or - all_proxy appropriately. Functions such as - fetchurl in Nixpkgs also respect these - variables. - - - nix-build -o - symlink allows the symlink to - the build result to be named something other than - result. - - - - - - Platform support: - - - - Support for 64-bit platforms, provided a suitably - patched ATerm library is used. Also, files larger than 2 - GiB are now supported. - - Added support for Cygwin (Windows, - i686-cygwin), Mac OS X on Intel - (i686-darwin) and Linux on PowerPC - (powerpc-linux). - - Users of SMP and multicore machines will - appreciate that the number of builds to be performed in parallel - can now be specified in the configuration file in the - build-max-jobs setting. - - - - - - - Garbage collector improvements: - - - - Open files (such as running programs) are now - used as roots of the garbage collector. This prevents programs - that have been uninstalled from being garbage collected while - they are still running. The script that detects these - additional runtime roots - (find-runtime-roots.pl) is inherently - system-specific, but it should work on Linux and on all - platforms that have the lsof - utility. - - nix-store --gc - (a.k.a. nix-collect-garbage) prints out the - number of bytes freed on standard output. nix-store - --gc --print-dead shows how many bytes would be freed - by an actual garbage collection. - - nix-collect-garbage -d - removes all old generations of all profiles - before calling the actual garbage collector (nix-store - --gc). This is an easy way to get rid of all old - packages in the Nix store. - - nix-store now has an - operation to delete specific paths - from the Nix store. It won’t delete reachable (non-garbage) - paths unless is - specified. - - - - - - - Berkeley DB 4.4’s process registry feature is used - to recover from crashed Nix processes. - - - - A performance issue has been fixed with the - referer table, which stores the inverse of the - references table (i.e., it tells you what store - paths refer to a given path). Maintaining this table could take a - quadratic amount of time, as well as a quadratic amount of Berkeley - DB log file space (in particular when running the garbage collector) - (NIX-23). - - Nix now catches the TERM and - HUP signals in addition to the - INT signal. So you can now do a killall - nix-store without triggering a database - recovery. - - bsdiff updated to version - 4.3. - - Substantial performance improvements in expression - evaluation and nix-env -qa, all thanks to Valgrind. Memory use has - been reduced by a factor 8 or so. Big speedup by memoisation of - path hashing. - - Lots of bug fixes, notably: - - - - Make sure that the garbage collector can run - successfully when the disk is full - (NIX-18). - - nix-env now locks the profile - to prevent races between concurrent nix-env - operations on the same profile - (NIX-7). - - Removed misleading messages from - nix-env -i (e.g., installing - `foo' followed by uninstalling - `foo') (NIX-17). - - - - - - Nix source distributions are a lot smaller now since - we no longer include a full copy of the Berkeley DB source - distribution (but only the bits we need). - - Header files are now installed so that external - programs can use the Nix libraries. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.11.xml b/third_party/nix/doc/manual/release-notes/rl-0.11.xml deleted file mode 100644 index 7ad0ab5b71..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.11.xml +++ /dev/null @@ -1,261 +0,0 @@ -
- -Release 0.11 (2007-12-31) - -Nix 0.11 has many improvements over the previous stable release. -The most important improvement is secure multi-user support. It also -features many usability enhancements and language extensions, many of -them prompted by NixOS, the purely functional Linux distribution based -on Nix. Here is an (incomplete) list: - - - - - - Secure multi-user support. A single Nix store can - now be shared between multiple (possible untrusted) users. This is - an important feature for NixOS, where it allows non-root users to - install software. The old setuid method for sharing a store between - multiple users has been removed. Details for setting up a - multi-user store can be found in the manual. - - - The new command nix-copy-closure - gives you an easy and efficient way to exchange software between - machines. It copies the missing parts of the closure of a set of - store path to or from a remote machine via - ssh. - - - A new kind of string literal: strings between double - single-quotes ('') have indentation - “intelligently” removed. This allows large strings (such as shell - scripts or configuration file fragments in NixOS) to cleanly follow - the indentation of the surrounding expression. It also requires - much less escaping, since '' is less common in - most languages than ". - - - nix-env - modifies the current generation of a profile so that it contains - exactly the specified derivation, and nothing else. For example, - nix-env -p /nix/var/nix/profiles/browser --set - firefox lets the profile named - browser contain just Firefox. - - - nix-env now maintains - meta-information about installed packages in profiles. The - meta-information is the contents of the meta - attribute of derivations, such as description or - homepage. The command nix-env -q --xml - --meta shows all meta-information. - - - nix-env now uses the - meta.priority attribute of derivations to resolve - filename collisions between packages. Lower priority values denote - a higher priority. For instance, the GCC wrapper package and the - Binutils package in Nixpkgs both have a file - bin/ld, so previously if you tried to install - both you would get a collision. Now, on the other hand, the GCC - wrapper declares a higher priority than Binutils, so the former’s - bin/ld is symlinked in the user - environment. - - - nix-env -i / -u: instead of - breaking package ties by version, break them by priority and version - number. That is, if there are multiple packages with the same name, - then pick the package with the highest priority, and only use the - version if there are multiple packages with the same - priority. - - This makes it possible to mark specific versions/variant in - Nixpkgs more or less desirable than others. A typical example would - be a beta version of some package (e.g., - gcc-4.2.0rc1) which should not be installed even - though it is the highest version, except when it is explicitly - selected (e.g., nix-env -i - gcc-4.2.0rc1). - - - nix-env --set-flag allows meta - attributes of installed packages to be modified. There are several - attributes that can be usefully modified, because they affect the - behaviour of nix-env or the user environment - build script: - - - - meta.priority can be changed - to resolve filename clashes (see above). - - meta.keep can be set to - true to prevent the package from being - upgraded or replaced. Useful if you want to hang on to an older - version of a package. - - meta.active can be set to - false to “disable” the package. That is, no - symlinks will be generated to the files of the package, but it - remains part of the profile (so it won’t be garbage-collected). - Set it back to true to re-enable the - package. - - - - - - - nix-env -q now has a flag - () that causes - nix-env to show only those derivations whose - output is already in the Nix store or that can be substituted (i.e., - downloaded from somewhere). In other words, it shows the packages - that can be installed “quickly”, i.e., don’t need to be built from - source. The flag is also available in - nix-env -i and nix-env -u to - filter out derivations for which no pre-built binary is - available. - - - The new option (in - nix-env, nix-instantiate and - nix-build) is like , except - that the value is a string. For example, --argstr system - i686-linux is equivalent to --arg system - \"i686-linux\" (note that - prevents annoying quoting around shell arguments). - - - nix-store has a new operation - () - paths that shows the build log of the given - paths. - - - - - - Nix now uses Berkeley DB 4.5. The database is - upgraded automatically, but you should be careful not to use old - versions of Nix that still use Berkeley DB 4.4. - - - - - - The option - (corresponding to the configuration setting - build-max-silent-time) allows you to set a - timeout on builds — if a build produces no output on - stdout or stderr for the given - number of seconds, it is terminated. This is useful for recovering - automatically from builds that are stuck in an infinite - loop. - - - nix-channel: each subscribed - channel is its own attribute in the top-level expression generated - for the channel. This allows disambiguation (e.g. nix-env - -i -A nixpkgs_unstable.firefox). - - - The substitutes table has been removed from the - database. This makes operations such as nix-pull - and nix-channel --update much, much - faster. - - - nix-pull now supports - bzip2-compressed manifests. This speeds up - channels. - - - nix-prefetch-url now has a - limited form of caching. This is used by - nix-channel to prevent unnecessary downloads when - the channel hasn’t changed. - - - nix-prefetch-url now by default - computes the SHA-256 hash of the file instead of the MD5 hash. In - calls to fetchurl you should pass the - sha256 attribute instead of - md5. You can pass either a hexadecimal or a - base-32 encoding of the hash. - - - Nix can now perform builds in an automatically - generated “chroot”. This prevents a builder from accessing stuff - outside of the Nix store, and thus helps ensure purity. This is an - experimental feature. - - - The new command nix-store - --optimise reduces Nix store disk space usage by finding - identical files in the store and hard-linking them to each other. - It typically reduces the size of the store by something like - 25-35%. - - - ~/.nix-defexpr can now be a - directory, in which case the Nix expressions in that directory are - combined into an attribute set, with the file names used as the - names of the attributes. The command nix-env - --import (which set the - ~/.nix-defexpr symlink) is - removed. - - - Derivations can specify the new special attribute - allowedReferences to enforce that the references - in the output of a derivation are a subset of a declared set of - paths. For example, if allowedReferences is an - empty list, then the output must not have any references. This is - used in NixOS to check that generated files such as initial ramdisks - for booting Linux don’t have any dependencies. - - - The new attribute - exportReferencesGraph allows builders access to - the references graph of their inputs. This is used in NixOS for - tasks such as generating ISO-9660 images that contain a Nix store - populated with the closure of certain paths. - - - Fixed-output derivations (like - fetchurl) can define the attribute - impureEnvVars to allow external environment - variables to be passed to builders. This is used in Nixpkgs to - support proxy configuration, among other things. - - - Several new built-in functions: - builtins.attrNames, - builtins.filterSource, - builtins.isAttrs, - builtins.isFunction, - builtins.listToAttrs, - builtins.stringLength, - builtins.sub, - builtins.substring, - throw, - builtins.trace, - builtins.readFile. - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.12.xml b/third_party/nix/doc/manual/release-notes/rl-0.12.xml deleted file mode 100644 index fdba8c4d57..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.12.xml +++ /dev/null @@ -1,175 +0,0 @@ -
- -Release 0.12 (2008-11-20) - - - - - Nix no longer uses Berkeley DB to store Nix store metadata. - The principal advantages of the new storage scheme are: it works - properly over decent implementations of NFS (allowing Nix stores - to be shared between multiple machines); no recovery is needed - when a Nix process crashes; no write access is needed for - read-only operations; no more running out of Berkeley DB locks on - certain operations. - - You still need to compile Nix with Berkeley DB support if - you want Nix to automatically convert your old Nix store to the - new schema. If you don’t need this, you can build Nix with the - configure option - . - - After the automatic conversion to the new schema, you can - delete the old Berkeley DB files: - - -$ cd /nix/var/nix/db -$ rm __db* log.* derivers references referrers reserved validpaths DB_CONFIG - - The new metadata is stored in the directories - /nix/var/nix/db/info and - /nix/var/nix/db/referrer. Though the - metadata is stored in human-readable plain-text files, they are - not intended to be human-editable, as Nix is rather strict about - the format. - - The new storage schema may or may not require less disk - space than the Berkeley DB environment, mostly depending on the - cluster size of your file system. With 1 KiB clusters (which - seems to be the ext3 default nowadays) it - usually takes up much less space. - - - There is a new substituter that copies paths - directly from other (remote) Nix stores mounted somewhere in the - filesystem. For instance, you can speed up an installation by - mounting some remote Nix store that already has the packages in - question via NFS or sshfs. The environment - variable NIX_OTHER_STORES specifies the locations of - the remote Nix directories, - e.g. /mnt/remote-fs/nix. - - New nix-store operations - and to dump - and reload the Nix database. - - The garbage collector has a number of new options to - allow only some of the garbage to be deleted. The option - tells the - collector to stop after at least N bytes - have been deleted. The option tells it to stop after the - link count on /nix/store has dropped below - N. This is useful for very large Nix - stores on filesystems with a 32000 subdirectories limit (like - ext3). The option - causes store paths to be deleted in order of ascending last access - time. This allows non-recently used stuff to be deleted. The - option - specifies an upper limit to the last accessed time of paths that may - be deleted. For instance, - - - $ nix-store --gc -v --max-atime $(date +%s -d "2 months ago") - - deletes everything that hasn’t been accessed in two months. - - nix-env now uses optimistic - profile locking when performing an operation like installing or - upgrading, instead of setting an exclusive lock on the profile. - This allows multiple nix-env -i / -u / -e - operations on the same profile in parallel. If a - nix-env operation sees at the end that the profile - was changed in the meantime by another process, it will just - restart. This is generally cheap because the build results are - still in the Nix store. - - The option is now - supported by nix-store -r and - nix-build. - - The information previously shown by - (i.e., which derivations will be built - and which paths will be substituted) is now always shown by - nix-env, nix-store -r and - nix-build. The total download size of - substitutable paths is now also shown. For instance, a build will - show something like - - -the following derivations will be built: - /nix/store/129sbxnk5n466zg6r1qmq1xjv9zymyy7-activate-configuration.sh.drv - /nix/store/7mzy971rdm8l566ch8hgxaf89x7lr7ik-upstart-jobs.drv - ... -the following paths will be downloaded/copied (30.02 MiB): - /nix/store/4m8pvgy2dcjgppf5b4cj5l6wyshjhalj-samba-3.2.4 - /nix/store/7h1kwcj29ip8vk26rhmx6bfjraxp0g4l-libunwind-0.98.6 - ... - - - - Language features: - - - - @-patterns as in Haskell. For instance, in a - function definition - - f = args @ {x, y, z}: ...; - - args refers to the argument as a whole, which - is further pattern-matched against the attribute set pattern - {x, y, z}. - - ...” (ellipsis) patterns. - An attribute set pattern can now say ... at - the end of the attribute name list to specify that the function - takes at least the listed attributes, while - ignoring additional attributes. For instance, - - {stdenv, fetchurl, fuse, ...}: ... - - defines a function that accepts any attribute set that includes - at least the three listed attributes. - - New primops: - builtins.parseDrvName (split a package name - string like "nix-0.12pre12876" into its name - and version components, e.g. "nix" and - "0.12pre12876"), - builtins.compareVersions (compare two version - strings using the same algorithm that nix-env - uses), builtins.length (efficiently compute - the length of a list), builtins.mul (integer - multiplication), builtins.div (integer - division). - - - - - - - - nix-prefetch-url now supports - mirror:// URLs, provided that the environment - variable NIXPKGS_ALL points at a Nixpkgs - tree. - - Removed the commands - nix-pack-closure and - nix-unpack-closure. You can do almost the same - thing but much more efficiently by doing nix-store --export - $(nix-store -qR paths) > closure and - nix-store --import < - closure. - - Lots of bug fixes, including a big performance bug in - the handling of with-expressions. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.13.xml b/third_party/nix/doc/manual/release-notes/rl-0.13.xml deleted file mode 100644 index cce2e4a26b..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.13.xml +++ /dev/null @@ -1,106 +0,0 @@ -
- -Release 0.13 (2009-11-05) - -This is primarily a bug fix release. It has some new -features: - - - - - Syntactic sugar for writing nested attribute sets. Instead of - - -{ - foo = { - bar = 123; - xyzzy = true; - }; - a = { b = { c = "d"; }; }; -} - - - you can write - - -{ - foo.bar = 123; - foo.xyzzy = true; - a.b.c = "d"; -} - - - This is useful, for instance, in NixOS configuration files. - - - - - Support for Nix channels generated by Hydra, the Nix-based - continuous build system. (Hydra generates NAR archives on the - fly, so the size and hash of these archives isn’t known in - advance.) - - - - Support i686-linux builds directly on - x86_64-linux Nix installations. This is - implemented using the personality() syscall, - which causes uname to return - i686 in child processes. - - - - Various improvements to the chroot - support. Building in a chroot works quite well - now. - - - - Nix no longer blocks if it tries to build a path and another - process is already building the same path. Instead it tries to - build another buildable path first. This improves - parallelism. - - - - Support for large (> 4 GiB) files in NAR archives. - - - - Various (performance) improvements to the remote build - mechanism. - - - - New primops: builtins.addErrorContext (to - add a string to stack traces — useful for debugging), - builtins.isBool, - builtins.isString, - builtins.isInt, - builtins.intersectAttrs. - - - - OpenSolaris support (Sander van der Burg). - - - - Stack traces are no longer displayed unless the - option is used. - - - - The scoping rules for inherit - (e) ... in recursive - attribute sets have changed. The expression - e can now refer to the attributes - defined in the containing set. - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.14.xml b/third_party/nix/doc/manual/release-notes/rl-0.14.xml deleted file mode 100644 index e5fe9da78e..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.14.xml +++ /dev/null @@ -1,46 +0,0 @@ -
- -Release 0.14 (2010-02-04) - -This release has the following improvements: - - - - - The garbage collector now starts deleting garbage much - faster than before. It no longer determines liveness of all paths - in the store, but does so on demand. - - - - Added a new operation, nix-store --query - --roots, that shows the garbage collector roots that - directly or indirectly point to the given store paths. - - - - Removed support for converting Berkeley DB-based Nix - databases to the new schema. - - - - Removed the and - garbage collector options. They were - not very useful in practice. - - - - On Windows, Nix now requires Cygwin 1.7.x. - - - - A few bug fixes. - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.15.xml b/third_party/nix/doc/manual/release-notes/rl-0.15.xml deleted file mode 100644 index 9f58a8efc5..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.15.xml +++ /dev/null @@ -1,14 +0,0 @@ -
- -Release 0.15 (2010-03-17) - -This is a bug-fix release. Among other things, it fixes -building on Mac OS X (Snow Leopard), and improves the contents of -/etc/passwd and /etc/group -in chroot builds. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.16.xml b/third_party/nix/doc/manual/release-notes/rl-0.16.xml deleted file mode 100644 index af1edc0ebb..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.16.xml +++ /dev/null @@ -1,55 +0,0 @@ -
- -Release 0.16 (2010-08-17) - -This release has the following improvements: - - - - - The Nix expression evaluator is now much faster in most - cases: typically, 3 - to 8 times compared to the old implementation. It also - uses less memory. It no longer depends on the ATerm - library. - - - - - Support for configurable parallelism inside builders. Build - scripts have always had the ability to perform multiple build - actions in parallel (for instance, by running make -j - 2), but this was not desirable because the number of - actions to be performed in parallel was not configurable. Nix - now has an option as well as a configuration - setting build-cores = - N that causes the - environment variable NIX_BUILD_CORES to be set to - N when the builder is invoked. The - builder can use this at its discretion to perform a parallel - build, e.g., by calling make -j - N. In Nixpkgs, this can be - enabled on a per-package basis by setting the derivation - attribute enableParallelBuilding to - true. - - - - - nix-store -q now supports XML output - through the flag. - - - - Several bug fixes. - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.5.xml b/third_party/nix/doc/manual/release-notes/rl-0.5.xml deleted file mode 100644 index e9f8bf2701..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.5.xml +++ /dev/null @@ -1,11 +0,0 @@ -
- -Release 0.5 and earlier - -Please refer to the Subversion commit log messages. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.6.xml b/third_party/nix/doc/manual/release-notes/rl-0.6.xml deleted file mode 100644 index 6dc6521d3c..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.6.xml +++ /dev/null @@ -1,122 +0,0 @@ -
- -Release 0.6 (2004-11-14) - - - - - Rewrite of the normalisation engine. - - - - Multiple builds can now be performed in parallel - (option ). - - Distributed builds. Nix can now call a shell - script to forward builds to Nix installations on remote - machines, which may or may not be of the same platform - type. - - Option allows - recovery from broken substitutes. - - Option causes - building of other (unaffected) derivations to continue if one - failed. - - - - - - - - Improvements to the garbage collector (i.e., it - should actually work now). - - Setuid Nix installations allow a Nix store to be - shared among multiple users. - - Substitute registration is much faster - now. - - A utility nix-build to build a - Nix expression and create a symlink to the result int the current - directory; useful for testing Nix derivations. - - Manual updates. - - - - nix-env changes: - - - - Derivations for other platforms are filtered out - (which can be overridden using - ). - - by default now - uninstall previous derivations with the same - name. - - allows upgrading to a - specific version. - - New operation - to remove profile - generations (necessary for effective garbage - collection). - - Nicer output (sorted, - columnised). - - - - - - - - More sensible verbosity levels all around (builder - output is now shown always, unless is - given). - - - - Nix expression language changes: - - - - New language construct: with - E1; - E2 brings all attributes - defined in the attribute set E1 in - scope in E2. - - Added a map - function. - - Various new operators (e.g., string - concatenation). - - - - - - - - Expression evaluation is much - faster. - - An Emacs mode for editing Nix expressions (with - syntax highlighting and indentation) has been - added. - - Many bug fixes. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.7.xml b/third_party/nix/doc/manual/release-notes/rl-0.7.xml deleted file mode 100644 index 6f95db4367..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.7.xml +++ /dev/null @@ -1,35 +0,0 @@ -
- -Release 0.7 (2005-01-12) - - - - Binary patching. When upgrading components using - pre-built binaries (through nix-pull / nix-channel), Nix can - automatically download and apply binary patches to already installed - components instead of full downloads. Patching is “smart”: if there - is a sequence of patches to an installed - component, Nix will use it. Patches are currently generated - automatically between Nixpkgs (pre-)releases. - - Simplifications to the substitute - mechanism. - - Nix-pull now stores downloaded manifests in - /nix/var/nix/manifests. - - Metadata on files in the Nix store is canonicalised - after builds: the last-modified timestamp is set to 0 (00:00:00 - 1/1/1970), the mode is set to 0444 or 0555 (readable and possibly - executable by all; setuid/setgid bits are dropped), and the group is - set to the default. This ensures that the result of a build and an - installation through a substitute is the same; and that timestamp - dependencies are revealed. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.8.1.xml b/third_party/nix/doc/manual/release-notes/rl-0.8.1.xml deleted file mode 100644 index f7ffca0f8d..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.8.1.xml +++ /dev/null @@ -1,21 +0,0 @@ -
- -Release 0.8.1 (2005-04-13) - -This is a bug fix release. - - - - Patch downloading was broken. - - The garbage collector would not delete paths that - had references from invalid (but substitutable) - paths. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.8.xml b/third_party/nix/doc/manual/release-notes/rl-0.8.xml deleted file mode 100644 index 784b26c6b7..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.8.xml +++ /dev/null @@ -1,246 +0,0 @@ -
- -Release 0.8 (2005-04-11) - -NOTE: the hashing scheme in Nix 0.8 changed (as detailed below). -As a result, nix-pull manifests and channels built -for Nix 0.7 and below will now work anymore. However, the Nix -expression language has not changed, so you can still build from -source. Also, existing user environments continue to work. Nix 0.8 -will automatically upgrade the database schema of previous -installations when it is first run. - -If you get the error message - - -you have an old-style manifest `/nix/var/nix/manifests/[...]'; please -delete it - -you should delete previously downloaded manifests: - - -$ rm /nix/var/nix/manifests/* - -If nix-channel gives the error message - - -manifest `http://catamaran.labs.cs.uu.nl/dist/nix/channels/[channel]/MANIFEST' -is too old (i.e., for Nix <= 0.7) - -then you should unsubscribe from the offending channel -(nix-channel --remove -URL; leave out -/MANIFEST), and subscribe to the same URL, with -channels replaced by channels-v3 -(e.g., ). - -Nix 0.8 has the following improvements: - - - - The cryptographic hashes used in store paths are now - 160 bits long, but encoded in base-32 so that they are still only 32 - characters long (e.g., - /nix/store/csw87wag8bqlqk7ipllbwypb14xainap-atk-1.9.0). - (This is actually a 160 bit truncation of a SHA-256 - hash.) - - Big cleanups and simplifications of the basic store - semantics. The notion of “closure store expressions” is gone (and - so is the notion of “successors”); the file system references of a - store path are now just stored in the database. - - For instance, given any store path, you can query its closure: - - -$ nix-store -qR $(which firefox) -... lots of paths ... - - Also, Nix now remembers for each store path the derivation that - built it (the “deriver”): - - -$ nix-store -qR $(which firefox) -/nix/store/4b0jx7vq80l9aqcnkszxhymsf1ffa5jd-firefox-1.0.1.drv - - So to see the build-time dependencies, you can do - - -$ nix-store -qR $(nix-store -qd $(which firefox)) - - or, in a nicer format: - - -$ nix-store -q --tree $(nix-store -qd $(which firefox)) - - - - File system references are also stored in reverse. For - instance, you can query all paths that directly or indirectly use a - certain Glibc: - - -$ nix-store -q --referrers-closure \ - /nix/store/8lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 - - - - - - The concept of fixed-output derivations has been - formalised. Previously, functions such as - fetchurl in Nixpkgs used a hack (namely, - explicitly specifying a store path hash) to prevent changes to, say, - the URL of the file from propagating upwards through the dependency - graph, causing rebuilds of everything. This can now be done cleanly - by specifying the outputHash and - outputHashAlgo attributes. Nix itself checks - that the content of the output has the specified hash. (This is - important for maintaining certain invariants necessary for future - work on secure shared stores.) - - One-click installation :-) It is now possible to - install any top-level component in Nixpkgs directly, through the web - — see, e.g., . - All you have to do is associate - /nix/bin/nix-install-package with the MIME type - application/nix-package (or the extension - .nixpkg), and clicking on a package link will - cause it to be installed, with all appropriate dependencies. If you - just want to install some specific application, this is easier than - subscribing to a channel. - - nix-store -r - PATHS now builds all the - derivations PATHS in parallel. Previously it did them sequentially - (though exploiting possible parallelism between subderivations). - This is nice for build farms. - - nix-channel has new operations - and - . - - New ways of installing components into user - environments: - - - - Copy from another user environment: - - -$ nix-env -i --from-profile .../other-profile firefox - - - - Install a store derivation directly (bypassing the - Nix expression language entirely): - - -$ nix-env -i /nix/store/z58v41v21xd3...-aterm-2.3.1.drv - - (This is used to implement nix-install-package, - which is therefore immune to evolution in the Nix expression - language.) - - Install an already built store path directly: - - -$ nix-env -i /nix/store/hsyj5pbn0d9i...-aterm-2.3.1 - - - - Install the result of a Nix expression specified - as a command-line argument: - - -$ nix-env -f .../i686-linux.nix -i -E 'x: x.firefoxWrapper' - - The difference with the normal installation mode is that - does not use the name - attributes of derivations. Therefore, this can be used to - disambiguate multiple derivations with the same - name. - - - - A hash of the contents of a store path is now stored - in the database after a successful build. This allows you to check - whether store paths have been tampered with: nix-store - --verify --check-contents. - - - - Implemented a concurrent garbage collector. It is now - always safe to run the garbage collector, even if other Nix - operations are happening simultaneously. - - However, there can still be GC races if you use - nix-instantiate and nix-store - --realise directly to build things. To prevent races, - use the flag of those commands. - - - - The garbage collector now finally deletes paths in - the right order (i.e., topologically sorted under the “references” - relation), thus making it safe to interrupt the collector without - risking a store that violates the closure - invariant. - - Likewise, the substitute mechanism now downloads - files in the right order, thus preserving the closure invariant at - all times. - - The result of nix-build is now - registered as a root of the garbage collector. If the - ./result link is deleted, the GC root - disappears automatically. - - - - The behaviour of the garbage collector can be changed - globally by setting options in - /nix/etc/nix/nix.conf. - - - - gc-keep-derivations specifies - whether deriver links should be followed when searching for live - paths. - - gc-keep-outputs specifies - whether outputs of derivations should be followed when searching - for live paths. - - env-keep-derivations - specifies whether user environments should store the paths of - derivations when they are added (thus keeping the derivations - alive). - - - - - - New nix-env query flags - and - . - - fetchurl allows SHA-1 and SHA-256 - in addition to MD5. Just specify the attribute - sha1 or sha256 instead of - md5. - - Manual updates. - - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.9.1.xml b/third_party/nix/doc/manual/release-notes/rl-0.9.1.xml deleted file mode 100644 index 85d11f4168..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.9.1.xml +++ /dev/null @@ -1,13 +0,0 @@ -
- -Release 0.9.1 (2005-09-20) - -This bug fix release addresses a problem with the ATerm library -when the flag in -configure was not used. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.9.2.xml b/third_party/nix/doc/manual/release-notes/rl-0.9.2.xml deleted file mode 100644 index cb705e98ac..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.9.2.xml +++ /dev/null @@ -1,28 +0,0 @@ -
- -Release 0.9.2 (2005-09-21) - -This bug fix release fixes two problems on Mac OS X: - - - - If Nix was linked against statically linked versions - of the ATerm or Berkeley DB library, there would be dynamic link - errors at runtime. - - nix-pull and - nix-push intermittently failed due to race - conditions involving pipes and child processes with error messages - such as open2: open(GLOB(0x180b2e4), >&=9) failed: Bad - file descriptor at /nix/bin/nix-pull line 77 (issue - NIX-14). - - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-0.9.xml b/third_party/nix/doc/manual/release-notes/rl-0.9.xml deleted file mode 100644 index fd1e633f78..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-0.9.xml +++ /dev/null @@ -1,98 +0,0 @@ -
- -Release 0.9 (2005-09-16) - -NOTE: this version of Nix uses Berkeley DB 4.3 instead of 4.2. -The database is upgraded automatically, but you should be careful not -to use old versions of Nix that still use Berkeley DB 4.2. In -particular, if you use a Nix installed through Nix, you should run - - -$ nix-store --clear-substitutes - -first. - - - - - Unpacking of patch sequences is much faster now - since we no longer do redundant unpacking and repacking of - intermediate paths. - - Nix now uses Berkeley DB 4.3. - - The derivation primitive is - lazier. Attributes of dependent derivations can mutually refer to - each other (as long as there are no data dependencies on the - outPath and drvPath attributes - computed by derivation). - - For example, the expression derivation - attrs now evaluates to (essentially) - - -attrs // { - type = "derivation"; - outPath = derivation! attrs; - drvPath = derivation! attrs; -} - - where derivation! is a primop that does the - actual derivation instantiation (i.e., it does what - derivation used to do). The advantage is that - it allows commands such as nix-env -qa and - nix-env -i to be much faster since they no longer - need to instantiate all derivations, just the - name attribute. - - Also, it allows derivations to cyclically reference each - other, for example, - - -webServer = derivation { - ... - hostName = "svn.cs.uu.nl"; - services = [svnService]; -}; - -svnService = derivation { - ... - hostName = webServer.hostName; -}; - - Previously, this would yield a black hole (infinite recursion). - - - - nix-build now defaults to using - ./default.nix if no Nix expression is - specified. - - nix-instantiate, when applied to - a Nix expression that evaluates to a function, will call the - function automatically if all its arguments have - defaults. - - Nix now uses libtool to build dynamic libraries. - This reduces the size of executables. - - A new list concatenation operator - ++. For example, [1 2 3] ++ [4 5 - 6] evaluates to [1 2 3 4 5 - 6]. - - Some currently undocumented primops to support - low-level build management using Nix (i.e., using Nix as a Make - replacement). See the commit messages for r3578 - and r3580. - - Various bug fixes and performance - improvements. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.0.xml b/third_party/nix/doc/manual/release-notes/rl-1.0.xml deleted file mode 100644 index ff11168d09..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.0.xml +++ /dev/null @@ -1,119 +0,0 @@ -
- -Release 1.0 (2012-05-11) - -There have been numerous improvements and bug fixes since the -previous release. Here are the most significant: - - - - - Nix can now optionally use the Boehm garbage collector. - This significantly reduces the Nix evaluator’s memory footprint, - especially when evaluating large NixOS system configurations. It - can be enabled using the configure - option. - - - - Nix now uses SQLite for its database. This is faster and - more flexible than the old ad hoc format. - SQLite is also used to cache the manifests in - /nix/var/nix/manifests, resulting in a - significant speedup. - - - - Nix now has an search path for expressions. The search path - is set using the environment variable NIX_PATH and - the command line option. In Nix expressions, - paths between angle brackets are used to specify files that must - be looked up in the search path. For instance, the expression - <nixpkgs/default.nix> looks for a file - nixpkgs/default.nix relative to every element - in the search path. - - - - The new command nix-build --run-env - builds all dependencies of a derivation, then starts a shell in an - environment containing all variables from the derivation. This is - useful for reproducing the environment of a derivation for - development. - - - - The new command nix-store --verify-path - verifies that the contents of a store path have not - changed. - - - - The new command nix-store --print-env - prints out the environment of a derivation in a format that can be - evaluated by a shell. - - - - Attribute names can now be arbitrary strings. For instance, - you can write { "foo-1.2" = …; "bla bla" = …; }."bla - bla". - - - - Attribute selection can now provide a default value using - the or operator. For instance, the expression - x.y.z or e evaluates to the attribute - x.y.z if it exists, and e - otherwise. - - - - The right-hand side of the ? operator can - now be an attribute path, e.g., attrs ? - a.b.c. - - - - On Linux, Nix will now make files in the Nix store immutable - on filesystems that support it. This prevents accidental - modification of files in the store by the root user. - - - - Nix has preliminary support for derivations with multiple - outputs. This is useful because it allows parts of a package to - be deployed and garbage-collected separately. For instance, - development parts of a package such as header files or static - libraries would typically not be part of the closure of an - application, resulting in reduced disk usage and installation - time. - - - - The Nix store garbage collector is faster and holds the - global lock for a shorter amount of time. - - - - The option (corresponding to the - configuration setting build-timeout) allows you - to set an absolute timeout on builds — if a build runs for more than - the given number of seconds, it is terminated. This is useful for - recovering automatically from builds that are stuck in an infinite - loop but keep producing output, and for which - --max-silent-time is ineffective. - - - - Nix development has moved to GitHub (). - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.1.xml b/third_party/nix/doc/manual/release-notes/rl-1.1.xml deleted file mode 100644 index 2f26e7a242..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.1.xml +++ /dev/null @@ -1,100 +0,0 @@ -
- -Release 1.1 (2012-07-18) - -This release has the following improvements: - - - - - On Linux, when doing a chroot build, Nix now uses various - namespace features provided by the Linux kernel to improve - build isolation. Namely: - - The private network namespace ensures that - builders cannot talk to the outside world (or vice versa): each - build only sees a private loopback interface. This also means - that two concurrent builds can listen on the same port (e.g. as - part of a test) without conflicting with each - other. - The PID namespace causes each build to start as - PID 1. Processes outside of the chroot are not visible to those - on the inside. On the other hand, processes inside the chroot - are visible from the outside (though with - different PIDs). - The IPC namespace prevents the builder from - communicating with outside processes using SysV IPC mechanisms - (shared memory, message queues, semaphores). It also ensures - that all IPC objects are destroyed when the builder - exits. - The UTS namespace ensures that builders see a - hostname of localhost rather than the actual - hostname. - The private mount namespace was already used by - Nix to ensure that the bind-mounts used to set up the chroot are - cleaned up automatically. - - - - - - Build logs are now compressed using - bzip2. The command nix-store - -l decompresses them on the fly. This can be disabled - by setting the option build-compress-log to - false. - - - - The creation of build logs in - /nix/var/log/nix/drvs can be disabled by - setting the new option build-keep-log to - false. This is useful, for instance, for Hydra - build machines. - - - - Nix now reserves some space in - /nix/var/nix/db/reserved to ensure that the - garbage collector can run successfully if the disk is full. This - is necessary because SQLite transactions fail if the disk is - full. - - - - Added a basic fetchurl function. This - is not intended to replace the fetchurl in - Nixpkgs, but is useful for bootstrapping; e.g., it will allow us - to get rid of the bootstrap binaries in the Nixpkgs source tree - and download them instead. You can use it by doing - import <nix/fetchurl.nix> { url = - url; sha256 = - "hash"; }. (Shea Levy) - - - - Improved RPM spec file. (Michel Alexandre Salim) - - - - Support for on-demand socket-based activation in the Nix - daemon with systemd. - - - - Added a manpage for - nix.conf5. - - - - When using the Nix daemon, the flag in - nix-env -qa is now much faster. - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.10.xml b/third_party/nix/doc/manual/release-notes/rl-1.10.xml deleted file mode 100644 index 689a954663..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.10.xml +++ /dev/null @@ -1,64 +0,0 @@ -
- -Release 1.10 (2015-09-03) - -This is primarily a bug fix release. It also has a number of new -features: - - - - - A number of builtin functions have been added to reduce - Nixpkgs/NixOS evaluation time and memory consumption: - all, - any, - concatStringsSep, - foldl’, - genList, - replaceStrings, - sort. - - - - - The garbage collector is more robust when the disk is full. - - - - Nix supports a new API for building derivations that doesn’t - require a .drv file to be present on disk; it - only requires an in-memory representation of the derivation. This - is used by the Hydra continuous build system to make remote builds - more efficient. - - - - The function <nix/fetchurl.nix> now - uses a builtin builder (i.e. it doesn’t - require starting an external process; the download is performed by - Nix itself). This ensures that derivation paths don’t change when - Nix is upgraded, and obviates the need for ugly hacks to support - chroot execution. - - - - now prints some configuration - information, in particular what compile-time optional features are - enabled, and the paths of various directories. - - - - Build users have their supplementary groups set correctly. - - - - -This release has contributions from Eelco Dolstra, Guillaume -Maudoux, Iwan Aucamp, Jaka Hudoklin, Kirill Elagin, Ludovic Courtès, -Manolis Ragkousis, Nicolas B. Pierron and Shea Levy. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.11.10.xml b/third_party/nix/doc/manual/release-notes/rl-1.11.10.xml deleted file mode 100644 index 415388b3e2..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.11.10.xml +++ /dev/null @@ -1,31 +0,0 @@ -
- -Release 1.11.10 (2017-06-12) - -This release fixes a security bug in Nix’s “build user” build -isolation mechanism. Previously, Nix builders had the ability to -create setuid binaries owned by a nixbld -user. Such a binary could then be used by an attacker to assume a -nixbld identity and interfere with subsequent -builds running under the same UID. - -To prevent this issue, Nix now disallows builders to create -setuid and setgid binaries. On Linux, this is done using a seccomp BPF -filter. Note that this imposes a small performance penalty (e.g. 1% -when building GNU Hello). Using seccomp, we now also prevent the -creation of extended attributes and POSIX ACLs since these cannot be -represented in the NAR format and (in the case of POSIX ACLs) allow -bypassing regular Nix store permissions. On macOS, the restriction is -implemented using the existing sandbox mechanism, which now uses a -minimal “allow all except the creation of setuid/setgid binaries” -profile when regular sandboxing is disabled. On other platforms, the -“build user” mechanism is now disabled. - -Thanks go to Linus Heckemann for discovering and reporting this -bug. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.11.xml b/third_party/nix/doc/manual/release-notes/rl-1.11.xml deleted file mode 100644 index fe422dd1f8..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.11.xml +++ /dev/null @@ -1,141 +0,0 @@ -
- -Release 1.11 (2016-01-19) - -This is primarily a bug fix release. It also has a number of new -features: - - - - - nix-prefetch-url can now download URLs - specified in a Nix expression. For example, - - -$ nix-prefetch-url -A hello.src - - - will prefetch the file specified by the - fetchurl call in the attribute - hello.src from the Nix expression in the - current directory, and print the cryptographic hash of the - resulting file on stdout. This differs from nix-build -A - hello.src in that it doesn't verify the hash, and is - thus useful when you’re updating a Nix expression. - - You can also prefetch the result of functions that unpack a - tarball, such as fetchFromGitHub. For example: - - -$ nix-prefetch-url --unpack https://github.com/NixOS/patchelf/archive/0.8.tar.gz - - - or from a Nix expression: - - -$ nix-prefetch-url -A nix-repl.src - - - - - - - - The builtin function - <nix/fetchurl.nix> now supports - downloading and unpacking NARs. This removes the need to have - multiple downloads in the Nixpkgs stdenv bootstrap process (like a - separate busybox binary for Linux, or curl/mkdir/sh/bzip2 for - Darwin). Now all those files can be combined into a single NAR, - optionally compressed using xz. - - - - Nix now supports SHA-512 hashes for verifying fixed-output - derivations, and in builtins.hashString. - - - - - The new flag will cause every build to - be executed N+1 times. If the build - output differs between any round, the build is rejected, and the - output paths are not registered as valid. This is primarily - useful to verify build determinism. (We already had a - option to repeat a previously succeeded - build. However, with , non-deterministic - builds are registered in the DB. Preventing that is useful for - Hydra to ensure that non-deterministic builds don't end up - getting published to the binary cache.) - - - - - - The options and , if they - detect a difference between two runs of the same derivation and - is given, will make the output of the other - run available under - store-path-check. This - makes it easier to investigate the non-determinism using tools - like diffoscope, e.g., - - -$ nix-build pkgs/stdenv/linux -A stage1.pkgs.zlib --check -K -error: derivation ‘/nix/store/l54i8wlw2265…-zlib-1.2.8.drv’ may not -be deterministic: output ‘/nix/store/11a27shh6n2i…-zlib-1.2.8’ -differs from ‘/nix/store/11a27shh6n2i…-zlib-1.2.8-check’ - -$ diffoscope /nix/store/11a27shh6n2i…-zlib-1.2.8 /nix/store/11a27shh6n2i…-zlib-1.2.8-check -… -├── lib/libz.a -│ ├── metadata -│ │ @@ -1,15 +1,15 @@ -│ │ -rw-r--r-- 30001/30000 3096 Jan 12 15:20 2016 adler32.o -… -│ │ +rw-r--r-- 30001/30000 3096 Jan 12 15:28 2016 adler32.o -… - - - - - - Improved FreeBSD support. - - - - nix-env -qa --xml --meta now prints - license information. - - - - The maximum number of parallel TCP connections that the - binary cache substituter will use has been decreased from 150 to - 25. This should prevent upsetting some broken NAT routers, and - also improves performance. - - - - All "chroot"-containing strings got renamed to "sandbox". - In particular, some Nix options got renamed, but the old names - are still accepted as lower-priority aliases. - - - - - -This release has contributions from Anders Claesson, Anthony -Cowley, Bjørn Forsman, Brian McKenna, Danny Wilson, davidak, Eelco Dolstra, -Fabian Schmitthenner, FrankHB, Ilya Novoselov, janus, Jim Garrison, John -Ericson, Jude Taylor, Ludovic Courtès, Manuel Jacob, Mathnerd314, -Pascal Wittmann, Peter Simons, Philip Potter, Preston Bennes, Rommel -M. Martinez, Sander van der Burg, Shea Levy, Tim Cuthbertson, Tuomas -Tynkkynen, Utku Demir and Vladimír Čunát. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.2.xml b/third_party/nix/doc/manual/release-notes/rl-1.2.xml deleted file mode 100644 index 748fd9e670..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.2.xml +++ /dev/null @@ -1,157 +0,0 @@ -
- -Release 1.2 (2012-12-06) - -This release has the following improvements and changes: - - - - - Nix has a new binary substituter mechanism: the - binary cache. A binary cache contains - pre-built binaries of Nix packages. Whenever Nix wants to build a - missing Nix store path, it will check a set of binary caches to - see if any of them has a pre-built binary of that path. The - configuration setting contains a - list of URLs of binary caches. For instance, doing - -$ nix-env -i thunderbird --option binary-caches http://cache.nixos.org - - will install Thunderbird and its dependencies, using the available - pre-built binaries in http://cache.nixos.org. - The main advantage over the old “manifest”-based method of getting - pre-built binaries is that you don’t have to worry about your - manifest being in sync with the Nix expressions you’re installing - from; i.e., you don’t need to run nix-pull to - update your manifest. It’s also more scalable because you don’t - need to redownload a giant manifest file every time. - - - A Nix channel can provide a binary cache URL that will be - used automatically if you subscribe to that channel. If you use - the Nixpkgs or NixOS channels - (http://nixos.org/channels) you automatically get the - cache http://cache.nixos.org. - - Binary caches are created using nix-push. - For details on the operation and format of binary caches, see the - nix-push manpage. More details are provided in - this - nix-dev posting. - - - - Multiple output support should now be usable. A derivation - can declare that it wants to produce multiple store paths by - saying something like - -outputs = [ "lib" "headers" "doc" ]; - - This will cause Nix to pass the intended store path of each output - to the builder through the environment variables - lib, headers and - doc. Other packages can refer to a specific - output by referring to - pkg.output, - e.g. - -buildInputs = [ pkg.lib pkg.headers ]; - - If you install a package with multiple outputs using - nix-env, each output path will be symlinked - into the user environment. - - - - Dashes are now valid as part of identifiers and attribute - names. - - - - The new operation nix-store --repair-path - allows corrupted or missing store paths to be repaired by - redownloading them. nix-store --verify --check-contents - --repair will scan and repair all paths in the Nix - store. Similarly, nix-env, - nix-build, nix-instantiate - and nix-store --realise have a - flag to detect and fix bad paths by - rebuilding or redownloading them. - - - - Nix no longer sets the immutable bit on files in the Nix - store. Instead, the recommended way to guard the Nix store - against accidental modification on Linux is to make it a read-only - bind mount, like this: - - -$ mount --bind /nix/store /nix/store -$ mount -o remount,ro,bind /nix/store - - - Nix will automatically make /nix/store - writable as needed (using a private mount namespace) to allow - modifications. - - - - Store optimisation (replacing identical files in the store - with hard links) can now be done automatically every time a path - is added to the store. This is enabled by setting the - configuration option auto-optimise-store to - true (disabled by default). - - - - Nix now supports xz compression for NARs - in addition to bzip2. It compresses about 30% - better on typical archives and decompresses about twice as - fast. - - - - Basic Nix expression evaluation profiling: setting the - environment variable NIX_COUNT_CALLS to - 1 will cause Nix to print how many times each - primop or function was executed. - - - - New primops: concatLists, - elem, elemAt and - filter. - - - - The command nix-copy-closure has a new - flag () to - download missing paths on the target machine using the substitute - mechanism. - - - - The command nix-worker has been renamed - to nix-daemon. Support for running the Nix - worker in “slave” mode has been removed. - - - - The flag of every Nix command now - invokes man. - - - - Chroot builds are now supported on systemd machines. - - - - -This release has contributions from Eelco Dolstra, Florian -Friesdorf, Mats Erik Andersson and Shea Levy. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.3.xml b/third_party/nix/doc/manual/release-notes/rl-1.3.xml deleted file mode 100644 index e2009ee3ba..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.3.xml +++ /dev/null @@ -1,19 +0,0 @@ -
- -Release 1.3 (2013-01-04) - -This is primarily a bug fix release. When this version is first -run on Linux, it removes any immutable bits from the Nix store and -increases the schema version of the Nix store. (The previous release -removed support for setting the immutable bit; this release clears any -remaining immutable bits to make certain operations more -efficient.) - -This release has contributions from Eelco Dolstra and Stuart -Pernsteiner. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.4.xml b/third_party/nix/doc/manual/release-notes/rl-1.4.xml deleted file mode 100644 index aefb22f2b9..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.4.xml +++ /dev/null @@ -1,39 +0,0 @@ -
- -Release 1.4 (2013-02-26) - -This release fixes a security bug in multi-user operation. It -was possible for derivations to cause the mode of files outside of the -Nix store to be changed to 444 (read-only but world-readable) by -creating hard links to those files (details). - -There are also the following improvements: - - - - New built-in function: - builtins.hashString. - - Build logs are now stored in - /nix/var/log/nix/drvs/XX/, - where XX is the first two characters of - the derivation. This is useful on machines that keep a lot of build - logs (such as Hydra servers). - - The function corepkgs/fetchurl - can now make the downloaded file executable. This will allow - getting rid of all bootstrap binaries in the Nixpkgs source - tree. - - Language change: The expression "${./path} - ..." now evaluates to a string instead of a - path. - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.5.1.xml b/third_party/nix/doc/manual/release-notes/rl-1.5.1.xml deleted file mode 100644 index 035c8dbcbb..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.5.1.xml +++ /dev/null @@ -1,12 +0,0 @@ -
- -Release 1.5.1 (2013-02-28) - -The bug fix to the bug fix had a bug itself, of course. But -this time it will work for sure! - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.5.2.xml b/third_party/nix/doc/manual/release-notes/rl-1.5.2.xml deleted file mode 100644 index 7e81dd2432..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.5.2.xml +++ /dev/null @@ -1,12 +0,0 @@ -
- -Release 1.5.2 (2013-05-13) - -This is primarily a bug fix release. It has contributions from -Eelco Dolstra, Lluís Batlle i Rossell and Shea Levy. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.5.xml b/third_party/nix/doc/manual/release-notes/rl-1.5.xml deleted file mode 100644 index 8e279d7693..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.5.xml +++ /dev/null @@ -1,12 +0,0 @@ -
- -Release 1.5 (2013-02-27) - -This is a brown paper bag release to fix a regression introduced -by the hard link security fix in 1.4. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.6.1.xml b/third_party/nix/doc/manual/release-notes/rl-1.6.1.xml deleted file mode 100644 index 9ecc527347..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.6.1.xml +++ /dev/null @@ -1,69 +0,0 @@ -
- -Release 1.6.1 (2013-10-28) - -This is primarily a bug fix release. Changes of interest -are: - - - - - Nix 1.6 accidentally changed the semantics of antiquoted - paths in strings, such as "${/foo}/bar". This - release reverts to the Nix 1.5.3 behaviour. - - - - Previously, Nix optimised expressions such as - "${expr}" to - expr. Thus it neither checked whether - expr could be coerced to a string, nor - applied such coercions. This meant that - "${123}" evaluatued to 123, - and "${./foo}" evaluated to - ./foo (even though - "${./foo} " evaluates to - "/nix/store/hash-foo "). - Nix now checks the type of antiquoted expressions and - applies coercions. - - - - Nix now shows the exact position of undefined variables. In - particular, undefined variable errors in a with - previously didn't show any position - information, so this makes it a lot easier to fix such - errors. - - - - Undefined variables are now treated consistently. - Previously, the tryEval function would catch - undefined variables inside a with but not - outside. Now tryEval never catches undefined - variables. - - - - Bash completion in nix-shell now works - correctly. - - - - Stack traces are less verbose: they no longer show calls to - builtin functions and only show a single line for each derivation - on the call stack. - - - - New built-in function: builtins.typeOf, - which returns the type of its argument as a string. - - - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.6.xml b/third_party/nix/doc/manual/release-notes/rl-1.6.xml deleted file mode 100644 index 5805634209..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.6.xml +++ /dev/null @@ -1,127 +0,0 @@ -
- -Release 1.6 (2013-09-10) - -In addition to the usual bug fixes, this release has several new -features: - - - - - The command nix-build --run-env has been - renamed to nix-shell. - - - - nix-shell now sources - $stdenv/setup inside the - interactive shell, rather than in a parent shell. This ensures - that shell functions defined by stdenv can be - used in the interactive shell. - - - - nix-shell has a new flag - to clear the environment, so you get an - environment that more closely corresponds to the “real” Nix build. - - - - - nix-shell now sets the shell prompt - (PS1) to ensure that Nix shells are distinguishable - from your regular shells. - - - - nix-env no longer requires a - * argument to match all packages, so - nix-env -qa is equivalent to nix-env - -qa '*'. - - - - nix-env -i has a new flag - () to remove all - previous packages from the profile. This makes it easier to do - declarative package management similar to NixOS’s - . For instance, if you - have a specification my-packages.nix like this: - - -with import <nixpkgs> {}; -[ thunderbird - geeqie - ... -] - - - then after any change to this file, you can run: - - -$ nix-env -f my-packages.nix -ir - - - to update your profile to match the specification. - - - - The ‘with’ language construct is now more - lazy. It only evaluates its argument if a variable might actually - refer to an attribute in the argument. For instance, this now - works: - - -let - pkgs = with pkgs; { foo = "old"; bar = foo; } // overrides; - overrides = { foo = "new"; }; -in pkgs.bar - - - This evaluates to "new", while previously it - gave an “infinite recursion” error. - - - - Nix now has proper integer arithmetic operators. For - instance, you can write x + y instead of - builtins.add x y, or x < - y instead of builtins.lessThan x y. - The comparison operators also work on strings. - - - - On 64-bit systems, Nix integers are now 64 bits rather than - 32 bits. - - - - When using the Nix daemon, the nix-daemon - worker process now runs on the same CPU as the client, on systems - that support setting CPU affinity. This gives a significant speedup - on some systems. - - - - If a stack overflow occurs in the Nix evaluator, you now get - a proper error message (rather than “Segmentation fault”) on some - systems. - - - - In addition to directories, you can now bind-mount regular - files in chroots through the (now misnamed) option - . - - - - -This release has contributions from Domen Kožar, Eelco Dolstra, -Florian Friesdorf, Gergely Risko, Ivan Kozik, Ludovic Courtès and Shea -Levy. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.7.xml b/third_party/nix/doc/manual/release-notes/rl-1.7.xml deleted file mode 100644 index 44ecaa78da..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.7.xml +++ /dev/null @@ -1,263 +0,0 @@ -
- -Release 1.7 (2014-04-11) - -In addition to the usual bug fixes, this release has the -following new features: - - - - - Antiquotation is now allowed inside of quoted attribute - names (e.g. set."${foo}"). In the case where - the attribute name is just a single antiquotation, the quotes can - be dropped (e.g. the above example can be written - set.${foo}). If an attribute name inside of a - set declaration evaluates to null (e.g. - { ${null} = false; }), then that attribute is - not added to the set. - - - - Experimental support for cryptographically signed binary - caches. See the - commit for details. - - - - An experimental new substituter, - download-via-ssh, that fetches binaries from - remote machines via SSH. Specifying the flags --option - use-ssh-substituter true --option ssh-substituter-hosts - user@hostname will cause Nix - to download binaries from the specified machine, if it has - them. - - - - nix-store -r and - nix-build have a new flag, - , that builds a previously built - derivation again, and prints an error message if the output is not - exactly the same. This helps to verify whether a derivation is - truly deterministic. For example: - - -$ nix-build '<nixpkgs>' -A patchelf - -$ nix-build '<nixpkgs>' -A patchelf --check - -error: derivation `/nix/store/1ipvxs…-patchelf-0.6' may not be deterministic: - hash mismatch in output `/nix/store/4pc1dm…-patchelf-0.6.drv' - - - - - - - - The nix-instantiate flags - and - have been renamed to and - , respectively. - - - - nix-instantiate, - nix-build and nix-shell now - have a flag (or ) that - allows you to specify the expression to be evaluated as a command - line argument. For instance, nix-instantiate --eval -E - '1 + 2' will print 3. - - - - nix-shell improvements: - - - - - It has a new flag, (or - ), that sets up a build environment - containing the specified packages from Nixpkgs. For example, - the command - - -$ nix-shell -p sqlite xorg.libX11 hello - - - will start a shell in which the given packages are - present. - - - - It now uses shell.nix as the - default expression, falling back to - default.nix if the former doesn’t - exist. This makes it convenient to have a - shell.nix in your project to set up a - nice development environment. - - - - It evaluates the derivation attribute - shellHook, if set. Since - stdenv does not normally execute this hook, - it allows you to do nix-shell-specific - setup. - - - - It preserves the user’s timezone setting. - - - - - - - - In chroots, Nix now sets up a /dev - containing only a minimal set of devices (such as - /dev/null). Note that it only does this if - you don’t have /dev - listed in your setting; - otherwise, it will bind-mount the /dev from - outside the chroot. - - Similarly, if you don’t have /dev/pts listed - in , Nix will mount a private - devpts filesystem on the chroot’s - /dev/pts. - - - - - New built-in function: builtins.toJSON, - which returns a JSON representation of a value. - - - - nix-env -q has a new flag - to print a JSON representation of the - installed or available packages. - - - - nix-env now supports meta attributes with - more complex values, such as attribute sets. - - - - The flag now allows attribute names with - dots in them, e.g. - - -$ nix-instantiate --eval '<nixos>' -A 'config.systemd.units."nscd.service".text' - - - - - - - The option to - nix-store --gc now accepts a unit - specifier. For example, nix-store --gc --max-freed - 1G will free up to 1 gigabyte of disk space. - - - - nix-collect-garbage has a new flag - - Nd, which deletes - all user environment generations older than - N days. Likewise, nix-env - --delete-generations accepts a - Nd age limit. - - - - Nix now heuristically detects whether a build failure was - due to a disk-full condition. In that case, the build is not - flagged as “permanently failed”. This is mostly useful for Hydra, - which needs to distinguish between permanent and transient build - failures. - - - - There is a new symbol __curPos that - expands to an attribute set containing its file name and line and - column numbers, e.g. { file = "foo.nix"; line = 10; - column = 5; }. There also is a new builtin function, - unsafeGetAttrPos, that returns the position of - an attribute. This is used by Nixpkgs to provide location - information in error messages, e.g. - - -$ nix-build '<nixpkgs>' -A libreoffice --argstr system x86_64-darwin -error: the package ‘libreoffice-4.0.5.2’ in ‘.../applications/office/libreoffice/default.nix:263’ - is not supported on ‘x86_64-darwin’ - - - - - - - The garbage collector is now more concurrent with other Nix - processes because it releases certain locks earlier. - - - - The binary tarball installer has been improved. You can now - install Nix by running: - - -$ bash <(curl https://nixos.org/nix/install) - - - - - - - More evaluation errors include position information. For - instance, selecting a missing attribute will print something like - - -error: attribute `nixUnstabl' missing, at /etc/nixos/configurations/misc/eelco/mandark.nix:216:15 - - - - - - - The command nix-setuid-helper is - gone. - - - - Nix no longer uses Automake, but instead has a - non-recursive, GNU Make-based build system. - - - - All installed libraries now have the prefix - libnix. In particular, this gets rid of - libutil, which could clash with libraries with - the same name from other packages. - - - - Nix now requires a compiler that supports C++11. - - - - -This release has contributions from Danny Wilson, Domen Kožar, -Eelco Dolstra, Ian-Woo Kim, Ludovic Courtès, Maxim Ivanov, Petr -Rockai, Ricardo M. Correia and Shea Levy. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.8.xml b/third_party/nix/doc/manual/release-notes/rl-1.8.xml deleted file mode 100644 index c854c5c5f8..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.8.xml +++ /dev/null @@ -1,123 +0,0 @@ -
- -Release 1.8 (2014-12-14) - - - - Breaking change: to address a race condition, the - remote build hook mechanism now uses nix-store - --serve on the remote machine. This requires build slaves - to be updated to Nix 1.8. - - Nix now uses HTTPS instead of HTTP to access the - default binary cache, - cache.nixos.org. - - nix-env selectors are now regular - expressions. For instance, you can do - - -$ nix-env -qa '.*zip.*' - - - to query all packages with a name containing - zip. - - nix-store --read-log can now - fetch remote build logs. If a build log is not available locally, - then ‘nix-store -l’ will now try to download it from the servers - listed in the ‘log-servers’ option in nix.conf. For instance, if you - have the configuration option - - -log-servers = http://hydra.nixos.org/log - - -then it will try to get logs from -http://hydra.nixos.org/log/base name of the -store path. This allows you to do things like: - - -$ nix-store -l $(which xterm) - - - and get a log even if xterm wasn't built - locally. - - New builtin functions: - attrValues, deepSeq, - fromJSON, readDir, - seq. - - nix-instantiate --eval now has a - flag to print the resulting value in JSON - format. - - nix-copy-closure now uses - nix-store --serve on the remote side to send or - receive closures. This fixes a race condition between - nix-copy-closure and the garbage - collector. - - Derivations can specify the new special attribute - allowedRequisites, which has a similar meaning to - allowedReferences. But instead of only enforcing - to explicitly specify the immediate references, it requires the - derivation to specify all the dependencies recursively (hence the - name, requisites) that are used by the resulting - output. - - On Mac OS X, Nix now handles case collisions when - importing closures from case-sensitive file systems. This is mostly - useful for running NixOps on Mac OS X. - - The Nix daemon has new configuration options - (specifying the users and groups that - are allowed to connect to the daemon) and - (specifying the users and groups that - can perform privileged operations like specifying untrusted binary - caches). - - The configuration option - now defaults to the number of available - CPU cores. - - Build users are now used by default when Nix is - invoked as root. This prevents builds from accidentally running as - root. - - Nix now includes systemd units and Upstart - jobs. - - Speed improvements to nix-store - --optimise. - - Language change: the == operator - now ignores string contexts (the “dependencies” of a - string). - - Nix now filters out Nix-specific ANSI escape - sequences on standard error. They are supposed to be invisible, but - some terminals show them anyway. - - Various commands now automatically pipe their output - into the pager as specified by the PAGER environment - variable. - - Several improvements to reduce memory consumption in - the evaluator. - - - -This release has contributions from Adam Szkoda, Aristid -Breitkreuz, Bob van der Linden, Charles Strahan, darealshinji, Eelco -Dolstra, Gergely Risko, Joel Taylor, Ludovic Courtès, Marko Durkovic, -Mikey Ariel, Paul Colomiets, Ricardo M. Correia, Ricky Elrod, Robert -Helgesson, Rob Vermaas, Russell O'Connor, Shea Levy, Shell Turner, -Sönke Hahn, Steve Purcell, Vladimír Čunát and Wout Mertens. - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-1.9.xml b/third_party/nix/doc/manual/release-notes/rl-1.9.xml deleted file mode 100644 index c8406bd207..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-1.9.xml +++ /dev/null @@ -1,216 +0,0 @@ -
- -Release 1.9 (2015-06-12) - -In addition to the usual bug fixes, this release has the -following new features: - - - - - Signed binary cache support. You can enable signature - checking by adding the following to nix.conf: - - -signed-binary-caches = * -binary-cache-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - - - This will prevent Nix from downloading any binary from the cache - that is not signed by one of the keys listed in - . - - Signature checking is only supported if you built Nix with - the libsodium package. - - Note that while Nix has had experimental support for signed - binary caches since version 1.7, this release changes the - signature format in a backwards-incompatible way. - - - - - - Automatic downloading of Nix expression tarballs. In various - places, you can now specify the URL of a tarball containing Nix - expressions (such as Nixpkgs), which will be downloaded and - unpacked automatically. For example: - - - - In nix-env: - - -$ nix-env -f https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz -iA firefox - - - This installs Firefox from the latest tested and built revision - of the NixOS 14.12 channel. - - In nix-build and - nix-shell: - - -$ nix-build https://github.com/NixOS/nixpkgs/archive/master.tar.gz -A hello - - - This builds GNU Hello from the latest revision of the Nixpkgs - master branch. - - In the Nix search path (as specified via - NIX_PATH or ). For example, to - start a shell containing the Pan package from a specific version - of Nixpkgs: - - -$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz - - - - - In nixos-rebuild (on NixOS): - - -$ nixos-rebuild test -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-unstable.tar.gz - - - - - In Nix expressions, via the new builtin function fetchTarball: - - -with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz) {}; … - - - (This is not allowed in restricted mode.) - - - - - - - - nix-shell improvements: - - - - nix-shell now has a flag - to execute a command in the - nix-shell environment, - e.g. nix-shell --run make. This is like - the existing flag, except that it - uses a non-interactive shell (ensuring that hitting Ctrl-C won’t - drop you into the child shell). - - nix-shell can now be used as - a #!-interpreter. This allows you to write - scripts that dynamically fetch their own dependencies. For - example, here is a Haskell script that, when invoked, first - downloads GHC and the Haskell packages on which it depends: - - -#! /usr/bin/env nix-shell -#! nix-shell -i runghc -p haskellPackages.ghc haskellPackages.HTTP - -import Network.HTTP - -main = do - resp <- Network.HTTP.simpleHTTP (getRequest "http://nixos.org/") - body <- getResponseBody resp - print (take 100 body) - - - Of course, the dependencies are cached in the Nix store, so the - second invocation of this script will be much - faster. - - - - - - - - Chroot improvements: - - - - Chroot builds are now supported on Mac OS X - (using its sandbox mechanism). - - If chroots are enabled, they are now used for - all derivations, including fixed-output derivations (such as - fetchurl). The latter do have network - access, but can no longer access the host filesystem. If you - need the old behaviour, you can set the option - to - relaxed. - - On Linux, if chroots are enabled, builds are - performed in a private PID namespace once again. (This - functionality was lost in Nix 1.8.) - - Store paths listed in - are now automatically - expanded to their closure. For instance, if you want - /nix/store/…-bash/bin/sh mounted in your - chroot as /bin/sh, you only need to say - build-chroot-dirs = - /bin/sh=/nix/store/…-bash/bin/sh; it is no longer - necessary to specify the dependencies of Bash. - - - - - - The new derivation attribute - passAsFile allows you to specify that the - contents of derivation attributes should be passed via files rather - than environment variables. This is useful if you need to pass very - long strings that exceed the size limit of the environment. The - Nixpkgs function writeTextFile uses - this. - - You can now use ~ in Nix file - names to refer to your home directory, e.g. import - ~/.nixpkgs/config.nix. - - Nix has a new option - that allows limiting what paths the Nix evaluator has access to. By - passing --option restrict-eval true to Nix, the - evaluator will throw an exception if an attempt is made to access - any file outside of the Nix search path. This is primarily intended - for Hydra to ensure that a Hydra jobset only refers to its declared - inputs (and is therefore reproducible). - - nix-env now only creates a new - “generation” symlink in /nix/var/nix/profiles - if something actually changed. - - The environment variable NIX_PAGER - can now be set to override PAGER. You can set it to - cat to disable paging for Nix commands - only. - - Failing <...> - lookups now show position information. - - Improved Boehm GC use: we disabled scanning for - interior pointers, which should reduce the “Repeated - allocation of very large block” warnings and associated - retention of memory. - - - -This release has contributions from aszlig, Benjamin Staffin, -Charles Strahan, Christian Theune, Daniel Hahler, Danylo Hlynskyi -Daniel Peebles, Dan Peebles, Domen Kožar, Eelco Dolstra, Harald van -Dijk, Hoang Xuan Phu, Jaka Hudoklin, Jeff Ramnani, j-keck, Linquize, -Luca Bruno, Michael Merickel, Oliver Dunkl, Rob Vermaas, Rok Garbas, -Shea Levy, Tobias Geerinckx-Rice and William A. Kennington III. - -
- diff --git a/third_party/nix/doc/manual/release-notes/rl-2.0.xml b/third_party/nix/doc/manual/release-notes/rl-2.0.xml deleted file mode 100644 index fc9a77b08b..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-2.0.xml +++ /dev/null @@ -1,1012 +0,0 @@ -
- -Release 2.0 (2018-02-22) - -The following incompatible changes have been made: - - - - - The manifest-based substituter mechanism - (download-using-manifests) has been removed. It - has been superseded by the binary cache substituter mechanism - since several years. As a result, the following programs have been - removed: - - - nix-pull - nix-generate-patches - bsdiff - bspatch - - - - - - The “copy from other stores” substituter mechanism - (copy-from-other-stores and the - NIX_OTHER_STORES environment variable) has been - removed. It was primarily used by the NixOS installer to copy - available paths from the installation medium. The replacement is - to use a chroot store as a substituter - (e.g. --substituters /mnt), or to build into a - chroot store (e.g. --store /mnt --substituters /). - - - - The command nix-push has been removed as - part of the effort to eliminate Nix's dependency on Perl. You can - use nix copy instead, e.g. nix copy - --to file:///tmp/my-binary-cache paths… - - - - The “nested” log output feature () has been removed. As a result, - nix-log2xml was also removed. - - - - OpenSSL-based signing has been removed. This - feature was never well-supported. A better alternative is provided - by the and - options. - - - - Failed build caching has been removed. This - feature was introduced to support the Hydra continuous build - system, but Hydra no longer uses it. - - - - nix-mode.el has been removed from - Nix. It is now a separate - repository and can be installed through the MELPA package - repository. - - - - -This release has the following new features: - - - - - It introduces a new command named nix, - which is intended to eventually replace all - nix-* commands with a more consistent and - better designed user interface. It currently provides replacements - for some (but not all) of the functionality provided by - nix-store, nix-build, - nix-shell -p, nix-env -qa, - nix-instantiate --eval, - nix-push and - nix-copy-closure. It has the following major - features: - - - - - Unlike the legacy commands, it has a consistent way to - refer to packages and package-like arguments (like store - paths). For example, the following commands all copy the GNU - Hello package to a remote machine: - - nix copy --to ssh://machine nixpkgs.hello - nix copy --to ssh://machine /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 - nix copy --to ssh://machine '(with import <nixpkgs> {}; hello)' - - By contrast, nix-copy-closure only accepted - store paths as arguments. - - - - It is self-documenting: shows - all available command-line arguments. If - is given after a subcommand, it shows - examples for that subcommand. nix - --help-config shows all configuration - options. - - - - It is much less verbose. By default, it displays a - single-line progress indicator that shows how many packages - are left to be built or downloaded, and (if there are running - builds) the most recent line of builder output. If a build - fails, it shows the last few lines of builder output. The full - build log can be retrieved using nix - log. - - - - It provides - all nix.conf configuration options as - command line flags. For example, instead of --option - http-connections 100 you can write - --http-connections 100. Boolean options can - be written as - --foo or - --no-foo - (e.g. ). - - - - Many subcommands have a flag to - write results to stdout in JSON format. - - - - - Please note that the nix command - is a work in progress and the interface is subject to - change. - - It provides the following high-level (“porcelain”) - subcommands: - - - - - nix build is a replacement for - nix-build. - - - - nix run executes a command in an - environment in which the specified packages are available. It - is (roughly) a replacement for nix-shell - -p. Unlike that command, it does not execute the - command in a shell, and has a flag (-c) - that specifies the unquoted command line to be - executed. - - It is particularly useful in conjunction with chroot - stores, allowing Linux users who do not have permission to - install Nix in /nix/store to still use - binary substitutes that assume - /nix/store. For example, - - nix run --store ~/my-nix nixpkgs.hello -c hello --greeting 'Hi everybody!' - - downloads (or if not substitutes are available, builds) the - GNU Hello package into - ~/my-nix/nix/store, then runs - hello in a mount namespace where - ~/my-nix/nix/store is mounted onto - /nix/store. - - - - nix search replaces nix-env - -qa. It searches the available packages for - occurrences of a search string in the attribute name, package - name or description. Unlike nix-env -qa, it - has a cache to speed up subsequent searches. - - - - nix copy copies paths between - arbitrary Nix stores, generalising - nix-copy-closure and - nix-push. - - - - nix repl replaces the external - program nix-repl. It provides an - interactive environment for evaluating and building Nix - expressions. Note that it uses linenoise-ng - instead of GNU Readline. - - - - nix upgrade-nix upgrades Nix to the - latest stable version. This requires that Nix is installed in - a profile. (Thus it won’t work on NixOS, or if it’s installed - outside of the Nix store.) - - - - nix verify checks whether store paths - are unmodified and/or “trusted” (see below). It replaces - nix-store --verify and nix-store - --verify-path. - - - - nix log shows the build log of a - package or path. If the build log is not available locally, it - will try to obtain it from the configured substituters (such - as cache.nixos.org, which now provides build - logs). - - - - nix edit opens the source code of a - package in your editor. - - - - nix eval replaces - nix-instantiate --eval. - - - - nix - why-depends shows why one store path has another in - its closure. This is primarily useful to finding the causes of - closure bloat. For example, - - nix why-depends nixpkgs.vlc nixpkgs.libdrm.dev - - shows a chain of files and fragments of file contents that - cause the VLC package to have the “dev” output of - libdrm in its closure — an undesirable - situation. - - - - nix path-info shows information about - store paths, replacing nix-store -q. A - useful feature is the option - (). For example, the following command show - the closure sizes of every path in the current NixOS system - closure, sorted by size: - - nix path-info -rS /run/current-system | sort -nk2 - - - - - - nix optimise-store replaces - nix-store --optimise. The main difference - is that it has a progress indicator. - - - - - A number of low-level (“plumbing”) commands are also - available: - - - - - nix ls-store and nix - ls-nar list the contents of a store path or NAR - file. The former is primarily useful in conjunction with - remote stores, e.g. - - nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 - - lists the contents of path in a binary cache. - - - - nix cat-store and nix - cat-nar allow extracting a file from a store path or - NAR file. - - - - nix dump-path writes the contents of - a store path to stdout in NAR format. This replaces - nix-store --dump. - - - - nix - show-derivation displays a store derivation in JSON - format. This is an alternative to - pp-aterm. - - - - nix - add-to-store replaces nix-store - --add. - - - - nix sign-paths signs store - paths. - - - - nix copy-sigs copies signatures from - one store to another. - - - - nix show-config shows all - configuration options and their current values. - - - - - - - - The store abstraction that Nix has had for a long time to - support store access via the Nix daemon has been extended - significantly. In particular, substituters (which used to be - external programs such as - download-from-binary-cache) are now subclasses - of the abstract Store class. This allows - many Nix commands to operate on such store types. For example, - nix path-info shows information about paths in - your local Nix store, while nix path-info --store - https://cache.nixos.org/ shows information about paths - in the specified binary cache. Similarly, - nix-copy-closure, nix-push - and substitution are all instances of the general notion of - copying paths between different kinds of Nix stores. - - Stores are specified using an URI-like syntax, - e.g. https://cache.nixos.org/ or - ssh://machine. The following store types are supported: - - - - - - LocalStore (stori URI - local or an absolute path) and the misnamed - RemoteStore (daemon) - provide access to a local Nix store, the latter via the Nix - daemon. You can use auto or the empty - string to auto-select a local or daemon store depending on - whether you have write permission to the Nix store. It is no - longer necessary to set the NIX_REMOTE - environment variable to use the Nix daemon. - - As noted above, LocalStore now - supports chroot builds, allowing the “physical” location of - the Nix store - (e.g. /home/alice/nix/store) to differ - from its “logical” location (typically - /nix/store). This allows non-root users - to use Nix while still getting the benefits from prebuilt - binaries from cache.nixos.org. - - - - - - BinaryCacheStore is the abstract - superclass of all binary cache stores. It supports writing - build logs and NAR content listings in JSON format. - - - - - - HttpBinaryCacheStore - (http://, https://) - supports binary caches via HTTP or HTTPS. If the server - supports PUT requests, it supports - uploading store paths via commands such as nix - copy. - - - - - - LocalBinaryCacheStore - (file://) supports binary caches in the - local filesystem. - - - - - - S3BinaryCacheStore - (s3://) supports binary caches stored in - Amazon S3, if enabled at compile time. - - - - - - LegacySSHStore (ssh://) - is used to implement remote builds and - nix-copy-closure. - - - - - - SSHStore - (ssh-ng://) supports arbitrary Nix - operations on a remote machine via the same protocol used by - nix-daemon. - - - - - - - - - - - - Security has been improved in various ways: - - - - - Nix now stores signatures for local store - paths. When paths are copied between stores (e.g., copied from - a binary cache to a local store), signatures are - propagated. - - Locally-built paths are signed automatically using the - secret keys specified by the - store option. Secret/public key pairs can be generated using - nix-store - --generate-binary-cache-key. - - In addition, locally-built store paths are marked as - “ultimately trusted”, but this bit is not propagated when - paths are copied between stores. - - - - Content-addressable store paths no longer require - signatures — they can be imported into a store by unprivileged - users even if they lack signatures. - - - - The command nix verify checks whether - the specified paths are trusted, i.e., have a certain number - of trusted signatures, are ultimately trusted, or are - content-addressed. - - - - Substitutions from binary caches now - require signatures by default. This was already the case on - NixOS. - - - - In Linux sandbox builds, we now - use /build instead of - /tmp as the temporary build - directory. This fixes potential security problems when a build - accidentally stores its TMPDIR in some - security-sensitive place, such as an RPATH. - - - - - - - - - - Pure evaluation mode. This is a variant - of the existing restricted evaluation mode. In pure mode, the Nix - evaluator forbids access to anything that could cause different - evaluations of the same command line arguments to produce a - different result. This includes builtin functions such as - builtins.getEnv, but more importantly, - all filesystem or network access unless a - content hash or commit hash is specified. For example, calls to - builtins.fetchGit are only allowed if a - rev attribute is specified. - - The goal of this feature is to enable true reproducibility - and traceability of builds (including NixOS system configurations) - at the evaluation level. For example, in the future, - nixos-rebuild might build configurations from a - Nix expression in a Git repository in pure mode. That expression - might fetch other repositories such as Nixpkgs via - builtins.fetchGit. The commit hash of the - top-level repository then uniquely identifies a running system, - and, in conjunction with that repository, allows it to be - reproduced or modified. - - - - - There are several new features to support binary - reproducibility (i.e. to help ensure that multiple builds of the - same derivation produce exactly the same output). When - is set to - false, it’s no - longer a fatal error if build rounds produce different - output. Also, a hook named is provided - to allow you to run tools such as diffoscope - when build rounds produce different output. - - - - Configuring remote builds is a lot easier now. Provided you - are not using the Nix daemon, you can now just specify a remote - build machine on the command line, e.g. --option builders - 'ssh://my-mac x86_64-darwin'. The environment variable - NIX_BUILD_HOOK has been removed and is no longer - needed. The environment variable NIX_REMOTE_SYSTEMS - is still supported for compatibility, but it is also possible to - specify builders in nix.conf by setting the - option builders = - @path. - - - - If a fixed-output derivation produces a result with an - incorrect hash, the output path is moved to the location - corresponding to the actual hash and registered as valid. Thus, a - subsequent build of the fixed-output derivation with the correct - hash is unnecessary. - - - - nix-shell now - sets the IN_NIX_SHELL environment variable - during evaluation and in the shell itself. This can be used to - perform different actions depending on whether you’re in a Nix - shell or in a regular build. Nixpkgs provides - lib.inNixShell to check this variable during - evaluation. - - - - NIX_PATH is now lazy, so URIs in the path are - only downloaded if they are needed for evaluation. - - - - You can now use - channel:channel-name as a - short-hand for - https://nixos.org/channels/channel-name/nixexprs.tar.xz. For - example, nix-build channel:nixos-15.09 -A hello - will build the GNU Hello package from the - nixos-15.09 channel. In the future, this may - use Git to fetch updates more efficiently. - - - - When is given, the last - 10 lines of the build log will be shown if a build - fails. - - - - Networking has been improved: - - - - - HTTP/2 is now supported. This makes binary cache lookups - much - more efficient. - - - - We now retry downloads on many HTTP errors, making - binary caches substituters more resilient to temporary - failures. - - - - HTTP credentials can now be configured via the standard - netrc mechanism. - - - - If S3 support is enabled at compile time, - s3:// URIs are supported - in all places where Nix allows URIs. - - - - Brotli compression is now supported. In particular, - cache.nixos.org build logs are now compressed using - Brotli. - - - - - - - - - - nix-env now - ignores packages with bad derivation names (in particular those - starting with a digit or containing a dot). - - - - Many configuration options have been renamed, either because - they were unnecessarily verbose - (e.g. is now just - ) or to reflect generalised behaviour - (e.g. is now - because it allows arbitrary store - URIs). The old names are still supported for compatibility. - - - - The option can now - be set to auto to use the number of CPUs in the - system. - - - - Hashes can now - be specified in base-64 format, in addition to base-16 and the - non-standard base-32. - - - - nix-shell now uses - bashInteractive from Nixpkgs, rather than the - bash command that happens to be in the caller’s - PATH. This is especially important on macOS where - the bash provided by the system is seriously - outdated and cannot execute stdenv’s setup - script. - - - - Nix can now automatically trigger a garbage collection if - free disk space drops below a certain level during a build. This - is configured using the and - options. - - - - nix-store -q --roots and - nix-store --gc --print-roots now show temporary - and in-memory roots. - - - - - Nix can now be extended with plugins. See the documentation of - the option for more details. - - - - - -The Nix language has the following new features: - - - - - It supports floating point numbers. They are based on the - C++ float type and are supported by the - existing numerical operators. Export and import to and from JSON - and XML works, too. - - - - Derivation attributes can now reference the outputs of the - derivation using the placeholder builtin - function. For example, the attribute - - -configureFlags = "--prefix=${placeholder "out"} --includedir=${placeholder "dev"}"; - - - will cause the configureFlags environment variable - to contain the actual store paths corresponding to the - out and dev outputs. - - - - - - -The following builtin functions are new or extended: - - - - - builtins.fetchGit - allows Git repositories to be fetched at evaluation time. Thus it - differs from the fetchgit function in - Nixpkgs, which fetches at build time and cannot be used to fetch - Nix expressions during evaluation. A typical use case is to import - external NixOS modules from your configuration, e.g. - - imports = [ (builtins.fetchGit https://github.com/edolstra/dwarffs + "/module.nix") ]; - - - - - - Similarly, builtins.fetchMercurial - allows you to fetch Mercurial repositories. - - - - builtins.path generalises - builtins.filterSource and path literals - (e.g. ./foo). It allows specifying a store path - name that differs from the source path name - (e.g. builtins.path { path = ./foo; name = "bar"; - }) and also supports filtering out unwanted - files. - - - - builtins.fetchurl and - builtins.fetchTarball now support - sha256 and name - attributes. - - - - builtins.split - splits a string using a POSIX extended regular expression as the - separator. - - - - builtins.partition - partitions the elements of a list into two lists, depending on a - Boolean predicate. - - - - <nix/fetchurl.nix> now uses the - content-addressable tarball cache at - http://tarballs.nixos.org/, just like - fetchurl in - Nixpkgs. (f2682e6e18a76ecbfb8a12c17e3a0ca15c084197) - - - - In restricted and pure evaluation mode, builtin functions - that download from the network (such as - fetchGit) are permitted to fetch underneath a - list of URI prefixes specified in the option - . - - - - - - -The Nix build environment has the following changes: - - - - - Values such as Booleans, integers, (nested) lists and - attribute sets can now - be passed to builders in a non-lossy way. If the special attribute - __structuredAttrs is set to - true, the other derivation attributes are - serialised in JSON format and made available to the builder via - the file .attrs.json in the builder’s temporary - directory. This obviates the need for - passAsFile since JSON files have no size - restrictions, unlike process environments. - - As - a convenience to Bash builders, Nix writes a script named - .attrs.sh to the builder’s directory that - initialises shell variables corresponding to all attributes that - are representable in Bash. This includes non-nested (associative) - arrays. For example, the attribute hardening.format = - true ends up as the Bash associative array element - ${hardening[format]}. - - - - Builders can now - communicate what build phase they are in by writing messages to - the file descriptor specified in NIX_LOG_FD. The - current phase is shown by the nix progress - indicator. - - - - - In Linux sandbox builds, we now - provide a default /bin/sh (namely - ash from BusyBox). - - - - In structured attribute mode, - exportReferencesGraph exports - extended information about closures in JSON format. In particular, - it includes the sizes and hashes of paths. This is primarily - useful for NixOS image builders. - - - - Builds are now - killed as soon as Nix receives EOF on the builder’s stdout or - stderr. This fixes a bug that allowed builds to hang Nix - indefinitely, regardless of - timeouts. - - - - The configuration - option can now specify optional paths by appending a - ?, e.g. /dev/nvidiactl? will - bind-mount /dev/nvidiactl only if it - exists. - - - - On Linux, builds are now executed in a user - namespace with UID 1000 and GID 100. - - - - - - -A number of significant internal changes were made: - - - - - Nix no longer depends on Perl and all Perl components have - been rewritten in C++ or removed. The Perl bindings that used to - be part of Nix have been moved to a separate package, - nix-perl. - - - - All Store classes are now - thread-safe. RemoteStore supports multiple - concurrent connections to the daemon. This is primarily useful in - multi-threaded programs such as - hydra-queue-runner. - - - - - - -This release has contributions from - -Adrien Devresse, -Alexander Ried, -Alex Cruice, -Alexey Shmalko, -AmineChikhaoui, -Andy Wingo, -Aneesh Agrawal, -Anthony Cowley, -Armijn Hemel, -aszlig, -Ben Gamari, -Benjamin Hipple, -Benjamin Staffin, -Benno Fünfstück, -Bjørn Forsman, -Brian McKenna, -Charles Strahan, -Chase Adams, -Chris Martin, -Christian Theune, -Chris Warburton, -Daiderd Jordan, -Dan Connolly, -Daniel Peebles, -Dan Peebles, -davidak, -David McFarland, -Dmitry Kalinkin, -Domen Kožar, -Eelco Dolstra, -Emery Hemingway, -Eric Litak, -Eric Wolf, -Fabian Schmitthenner, -Frederik Rietdijk, -Gabriel Gonzalez, -Giorgio Gallo, -Graham Christensen, -Guillaume Maudoux, -Harmen, -Iavael, -James Broadhead, -James Earl Douglas, -Janus Troelsen, -Jeremy Shaw, -Joachim Schiele, -Joe Hermaszewski, -Joel Moberg, -Johannes 'fish' Ziemke, -Jörg Thalheim, -Jude Taylor, -kballou, -Keshav Kini, -Kjetil Orbekk, -Langston Barrett, -Linus Heckemann, -Ludovic Courtès, -Manav Rathi, -Marc Scholten, -Markus Hauck, -Matt Audesse, -Matthew Bauer, -Matthias Beyer, -Matthieu Coudron, -N1X, -Nathan Zadoks, -Neil Mayhew, -Nicolas B. Pierron, -Niklas Hambüchen, -Nikolay Amiantov, -Ole Jørgen Brønner, -Orivej Desh, -Peter Simons, -Peter Stuart, -Pyry Jahkola, -regnat, -Renzo Carbonara, -Rhys, -Robert Vollmert, -Scott Olson, -Scott R. Parish, -Sergei Trofimovich, -Shea Levy, -Sheena Artrip, -Spencer Baugh, -Stefan Junker, -Susan Potter, -Thomas Tuegel, -Timothy Allen, -Tristan Hume, -Tuomas Tynkkynen, -tv, -Tyson Whitehead, -Vladimír Čunát, -Will Dietz, -wmertens, -Wout Mertens, -zimbatm and -Zoran Plesivčak. - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-2.1.xml b/third_party/nix/doc/manual/release-notes/rl-2.1.xml deleted file mode 100644 index 16c243fc19..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-2.1.xml +++ /dev/null @@ -1,133 +0,0 @@ -
- -Release 2.1 (2018-09-02) - -This is primarily a bug fix release. It also reduces memory -consumption in certain situations. In addition, it has the following -new features: - - - - - The Nix installer will no longer default to the Multi-User - installation for macOS. You can still instruct the installer to - run in multi-user mode. - - - - - The Nix installer now supports performing a Multi-User - installation for Linux computers which are running systemd. You - can select a Multi-User installation by passing the - flag to the installer: sh <(curl - https://nixos.org/nix/install) --daemon. - - - The multi-user installer cannot handle systems with SELinux. - If your system has SELinux enabled, you can force the installer to run - in single-user mode. - - - - New builtin functions: - builtins.bitAnd, - builtins.bitOr, - builtins.bitXor, - builtins.fromTOML, - builtins.concatMap, - builtins.mapAttrs. - - - - - The S3 binary cache store now supports uploading NARs larger - than 5 GiB. - - - - The S3 binary cache store now supports uploading to - S3-compatible services with the endpoint - option. - - - - The flag is no longer required - to recover from disappeared NARs in binary caches. - - - - nix-daemon now respects - . - - - - nix run now respects - nix-support/propagated-user-env-packages. - - - - -This release has contributions from - -Adrien Devresse, -Aleksandr Pashkov, -Alexandre Esteves, -Amine Chikhaoui, -Andrew Dunham, -Asad Saeeduddin, -aszlig, -Ben Challenor, -Ben Gamari, -Benjamin Hipple, -Bogdan Seniuc, -Corey O'Connor, -Daiderd Jordan, -Daniel Peebles, -Daniel Poelzleithner, -Danylo Hlynskyi, -Dmitry Kalinkin, -Domen Kožar, -Doug Beardsley, -Eelco Dolstra, -Erik Arvstedt, -Félix Baylac-Jacqué, -Gleb Peregud, -Graham Christensen, -Guillaume Maudoux, -Ivan Kozik, -John Arnold, -Justin Humm, -Linus Heckemann, -Lorenzo Manacorda, -Matthew Justin Bauer, -Matthew O'Gorman, -Maximilian Bosch, -Michael Bishop, -Michael Fiano, -Michael Mercier, -Michael Raskin, -Michael Weiss, -Nicolas Dudebout, -Peter Simons, -Ryan Trinkle, -Samuel Dionne-Riel, -Sean Seefried, -Shea Levy, -Symphorien Gibol, -Tim Engler, -Tim Sears, -Tuomas Tynkkynen, -volth, -Will Dietz, -Yorick van Pelt and -zimbatm. - - -
diff --git a/third_party/nix/doc/manual/release-notes/rl-2.2.xml b/third_party/nix/doc/manual/release-notes/rl-2.2.xml deleted file mode 100644 index d29eb87e82..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-2.2.xml +++ /dev/null @@ -1,143 +0,0 @@ -
- -Release 2.2 (2019-01-11) - -This is primarily a bug fix release. It also has the following -changes: - - - - - In derivations that use structured attributes (i.e. that - specify set the __structuredAttrs attribute to - true to cause all attributes to be passed to - the builder in JSON format), you can now specify closure checks - per output, e.g.: - - -outputChecks."out" = { - # The closure of 'out' must not be larger than 256 MiB. - maxClosureSize = 256 * 1024 * 1024; - - # It must not refer to C compiler or to the 'dev' output. - disallowedRequisites = [ stdenv.cc "dev" ]; -}; - -outputChecks."dev" = { - # The 'dev' output must not be larger than 128 KiB. - maxSize = 128 * 1024; -}; - - - - - - - - The derivation attribute - requiredSystemFeatures is now enforced for - local builds, and not just to route builds to remote builders. - The supported features of a machine can be specified through the - configuration setting system-features. - - By default, system-features includes - kvm if /dev/kvm - exists. For compatibility, it also includes the pseudo-features - nixos-test, benchmark and - big-parallel which are used by Nixpkgs to route - builds to particular Hydra build machines. - - - - - Sandbox builds are now enabled by default on Linux. - - - - The new command nix doctor shows - potential issues with your Nix installation. - - - - The fetchGit builtin function now uses a - caching scheme that puts different remote repositories in distinct - local repositories, rather than a single shared repository. This - may require more disk space but is faster. - - - - The dirOf builtin function now works on - relative paths. - - - - Nix now supports SRI hashes, - allowing the hash algorithm and hash to be specified in a single - string. For example, you can write: - - -import <nix/fetchurl.nix> { - url = https://nixos.org/releases/nix/nix-2.1.3/nix-2.1.3.tar.xz; - hash = "sha256-XSLa0FjVyADWWhFfkZ2iKTjFDda6mMXjoYMXLRSYQKQ="; -}; - - - instead of - - -import <nix/fetchurl.nix> { - url = https://nixos.org/releases/nix/nix-2.1.3/nix-2.1.3.tar.xz; - sha256 = "5d22dad058d5c800d65a115f919da22938c50dd6ba98c5e3a183172d149840a4"; -}; - - - - - In fixed-output derivations, the - outputHashAlgo attribute is no longer mandatory - if outputHash specifies the hash. - - nix hash-file and nix - hash-path now print hashes in SRI format by - default. They also use SHA-256 by default instead of SHA-512 - because that's what we use most of the time in Nixpkgs. - - - - Integers are now 64 bits on all platforms. - - - - The evaluator now prints profiling statistics (enabled via - the NIX_SHOW_STATS and - NIX_COUNT_CALLS environment variables) in JSON - format. - - - - The option in nix-store - --query has been removed. Instead, there now is an - option to output the dependency graph - in GraphML format. - - - - All nix-* commands are now symlinks to - nix. This saves a bit of disk space. - - - - nix repl now uses - libeditline or - libreadline. - - - - -
- diff --git a/third_party/nix/doc/manual/release-notes/rl-2.3.xml b/third_party/nix/doc/manual/release-notes/rl-2.3.xml deleted file mode 100644 index 0ad7d641f8..0000000000 --- a/third_party/nix/doc/manual/release-notes/rl-2.3.xml +++ /dev/null @@ -1,91 +0,0 @@ -
- -Release 2.3 (2019-09-04) - -This is primarily a bug fix release. However, it makes some -incompatible changes: - - - - - Nix now uses BSD file locks instead of POSIX file - locks. Because of this, you should not use Nix 2.3 and previous - releases at the same time on a Nix store. - - - - -It also has the following changes: - - - - - builtins.fetchGit's ref - argument now allows specifying an absolute remote ref. - Nix will automatically prefix ref with - refs/heads only if ref doesn't - already begin with refs/. - - - - - The installer now enables sandboxing by default on Linux when the - system has the necessary kernel support. - - - - - The max-jobs setting now defaults to 1. - - - - New builtin functions: - builtins.isPath, - builtins.hashFile. - - - - - The nix command has a new - () flag to - print build log output to stderr, rather than showing the last log - line in the progress bar. To distinguish between concurrent - builds, log lines are prefixed by the name of the package. - - - - - Builds are now executed in a pseudo-terminal, and the - TERM environment variable is set to - xterm-256color. This allows many programs - (e.g. gcc, clang, - cmake) to print colorized log output. - - - - Add convenience flag. This flag - disables substituters; sets the tarball-ttl - setting to infinity (ensuring that any previously downloaded files - are considered current); and disables retrying downloads and sets - the connection timeout to the minimum. This flag is enabled - automatically if there are no configured non-loopback network - interfaces. - - - - Add a post-build-hook setting to run a - program after a build has succeeded. - - - - Add a trace-function-calls setting to log - the duration of Nix function calls to stderr. - - - - -
diff --git a/third_party/nix/doc/manual/schemas.xml b/third_party/nix/doc/manual/schemas.xml deleted file mode 100644 index 691a517b9c..0000000000 --- a/third_party/nix/doc/manual/schemas.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/third_party/nix/misc/systemd/nix-daemon.service.in b/third_party/nix/misc/systemd/nix-daemon.service.in deleted file mode 100644 index c3d2a4a39e..0000000000 --- a/third_party/nix/misc/systemd/nix-daemon.service.in +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Nix Daemon -RequiresMountsFor=@storedir@ -RequiresMountsFor=@localstatedir@ -ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket - -[Service] -ExecStart=@@bindir@/nix-daemon nix-daemon -KillMode=process - -[Install] -WantedBy=multi-user.target diff --git a/third_party/nix/misc/systemd/nix-daemon.socket.in b/third_party/nix/misc/systemd/nix-daemon.socket.in deleted file mode 100644 index 9ed39ffe6e..0000000000 --- a/third_party/nix/misc/systemd/nix-daemon.socket.in +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Nix Daemon Socket -Before=multi-user.target -RequiresMountsFor=@storedir@ -ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket - -[Socket] -ListenStream=@localstatedir@/nix/daemon-socket/socket - -[Install] -WantedBy=sockets.target diff --git a/third_party/nix/scripts/build.sh b/third_party/nix/scripts/build.sh deleted file mode 100755 index 759c9e9f2c..0000000000 --- a/third_party/nix/scripts/build.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -# Run `nix build` using a local store, for use during development. Intended to -# be run from the cmake build directory - -set -eo pipefail - -if [ $1 = "--debug" ]; then - run=(gdb --args) - shift 1 -elif [ "$1" = "--rr" ]; then - run=(rr record) - shift 1 -else - run=() -fi - -make -j 10 -NIX_STORE_DIR=$(pwd)/nix/store \ - NIX_LOG_DIR=$(pwd)/nix/var/log/nix \ - NIX_STATE_DIR=$(pwd)/nix/var/nix \ - XDG_CACHE_HOME=$(pwd)/cache \ - NIX_REMOTE=daemon \ - ${run[*]} ./src/nix build "$@" diff --git a/third_party/nix/scripts/daemon.sh b/third_party/nix/scripts/daemon.sh deleted file mode 100755 index 3daa0f1390..0000000000 --- a/third_party/nix/scripts/daemon.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# Run a nix daemon using a local store, for use during development. Intended to -# be run from the cmake build directory - -if [ $1 = "--debug" ]; then - run=(gdb --args) - shift 1 -elif [ "$1" = "--rr" ]; then - run=(rr record) - shift 1 -else - run=() -fi - -make -j 10 -NIX_STORE_DIR=$(pwd)/nix/store \ - NIX_LOG_DIR=$(pwd)/nix/var/log/nix \ - NIX_STATE_DIR=$(pwd)/nix/var/nix \ - XDG_CACHE_HOME=$(pwd)/cache \ - NIX_LIBEXEC_DIR=$(pwd) \ - GRPC_TRACE=all \ - ${gdb[*]} ./src/nix-daemon/nix-daemon diff --git a/third_party/nix/scripts/eval.sh b/third_party/nix/scripts/eval.sh deleted file mode 100755 index f71d9f7931..0000000000 --- a/third_party/nix/scripts/eval.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# Run `nix eval` using a local store, for use during development. Intended to -# be run from the cmake build directory - -if [ "$#" -gt 0 ] && [ "$1" = "--debug" ]; then - gdb=(gdb --args) - shift 1 -elif [ "$1" = "--rr" ]; then - gdb=(rr record) - shift 1 -else - gdb=() -fi - -make -j 10 -NIX_STORE_DIR=$(pwd)/nix/store \ - NIX_LOG_DIR=$(pwd)/nix/var/log/nix \ - NIX_STATE_DIR=$(pwd)/nix/var/nix \ - XDG_CACHE_HOME=$(pwd)/cache \ - NIX_REMOTE=daemon \ - ${gdb[*]} ./src/nix eval "$@" diff --git a/third_party/nix/scripts/install-darwin-multi-user.sh b/third_party/nix/scripts/install-darwin-multi-user.sh deleted file mode 100644 index 49076bd5c0..0000000000 --- a/third_party/nix/scripts/install-darwin-multi-user.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env bash - -set -eu -set -o pipefail - -readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist - -dsclattr() { - /usr/bin/dscl . -read "$1" \ - | awk "/$2/ { print \$2 }" -} - -poly_validate_assumptions() { - if [ "$(uname -s)" != "Darwin" ]; then - failure "This script is for use with macOS!" - fi -} - -poly_service_installed_check() { - [ -e "$PLIST_DEST" ] -} - -poly_service_uninstall_directions() { - cat < /dev/null 2>&1 -} - -poly_group_id_get() { - dsclattr "/Groups/$1" "PrimaryGroupID" -} - -poly_create_build_group() { - _sudo "Create the Nix build group, $NIX_BUILD_GROUP_NAME" \ - /usr/sbin/dseditgroup -o create \ - -r "Nix build group for nix-daemon" \ - -i "$NIX_BUILD_GROUP_ID" \ - "$NIX_BUILD_GROUP_NAME" >&2 -} - -poly_user_exists() { - /usr/bin/dscl . -read "/Users/$1" > /dev/null 2>&1 -} - -poly_user_id_get() { - dsclattr "/Users/$1" "UniqueID" -} - -poly_user_hidden_get() { - dsclattr "/Users/$1" "IsHidden" -} - -poly_user_hidden_set() { - _sudo "in order to make $1 a hidden user" \ - /usr/bin/dscl . -create "/Users/$1" "IsHidden" "1" -} - -poly_user_home_get() { - dsclattr "/Users/$1" "NFSHomeDirectory" -} - -poly_user_home_set() { - _sudo "in order to give $1 a safe home directory" \ - /usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2" -} - -poly_user_note_get() { - dsclattr "/Users/$1" "RealName" -} - -poly_user_note_set() { - _sudo "in order to give $username a useful note" \ - /usr/bin/dscl . -create "/Users/$1" "RealName" "$2" -} - -poly_user_shell_get() { - dsclattr "/Users/$1" "UserShell" -} - -poly_user_shell_set() { - _sudo "in order to give $1 a safe home directory" \ - /usr/bin/dscl . -create "/Users/$1" "UserShell" "$2" -} - -poly_user_in_group_check() { - username=$1 - group=$2 - dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1 -} - -poly_user_in_group_set() { - username=$1 - group=$2 - - _sudo "Add $username to the $group group"\ - /usr/sbin/dseditgroup -o edit -t user \ - -a "$username" "$group" -} - -poly_user_primary_group_get() { - dsclattr "/Users/$1" "PrimaryGroupID" -} - -poly_user_primary_group_set() { - _sudo "to let the nix daemon use this user for builds (this might seem redundant, but there are two concepts of group membership)" \ - /usr/bin/dscl . -create "/Users/$1" "PrimaryGroupID" "$2" -} - -poly_create_build_user() { - username=$1 - uid=$2 - builder_num=$3 - - _sudo "Creating the Nix build user (#$builder_num), $username" \ - /usr/bin/dscl . create "/Users/$username" \ - UniqueID "${uid}" -} diff --git a/third_party/nix/scripts/install-multi-user.sh b/third_party/nix/scripts/install-multi-user.sh deleted file mode 100644 index 5233762fa6..0000000000 --- a/third_party/nix/scripts/install-multi-user.sh +++ /dev/null @@ -1,798 +0,0 @@ -#!/usr/bin/env bash - -set -eu -set -o pipefail - -# Sourced from: -# - https://github.com/LnL7/nix-darwin/blob/8c29d0985d74b4a990238497c47a2542a5616b3c/bootstrap.sh -# - https://gist.github.com/expipiplus1/e571ce88c608a1e83547c918591b149f/ac504c6c1b96e65505fbda437a28ce563408ecb0 -# - https://github.com/NixOS/nixos-org-configurations/blob/a122f418797713d519aadf02e677fce0dc1cb446/delft/scripts/nix-mac-installer.sh -# - https://github.com/matthewbauer/macNixOS/blob/f6045394f9153edea417be90c216788e754feaba/install-macNixOS.sh -# - https://gist.github.com/LnL7/9717bd6cdcb30b086fd7f2093e5f8494/86b26f852ce563e973acd30f796a9a416248c34a -# -# however tracking which bits came from which would be impossible. - -readonly ESC='\033[0m' -readonly BOLD='\033[1m' -readonly BLUE='\033[34m' -readonly BLUE_UL='\033[4;34m' -readonly GREEN='\033[32m' -readonly GREEN_UL='\033[4;32m' -readonly RED='\033[31m' - -readonly NIX_USER_COUNT="32" -readonly NIX_BUILD_GROUP_ID="30000" -readonly NIX_BUILD_GROUP_NAME="nixbld" -readonly NIX_FIRST_BUILD_UID="30001" -# Please don't change this. We don't support it, because the -# default shell profile that comes with Nix doesn't support it. -readonly NIX_ROOT="/nix" - -readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc") -readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" -readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" - -readonly NIX_INSTALLED_NIX="@nix@" -readonly NIX_INSTALLED_CACERT="@cacert@" -readonly EXTRACTED_NIX_PATH="$(dirname "$0")" - -readonly ROOT_HOME=$(echo ~root) - -if [ -t 0 ]; then - readonly IS_HEADLESS='no' -else - readonly IS_HEADLESS='yes' -fi - -headless() { - if [ "$IS_HEADLESS" = "yes" ]; then - return 0 - else - return 1 - fi -} - -contactme() { - echo "We'd love to help if you need it." - echo "" - echo "If you can, open an issue at https://github.com/nixos/nix/issues" - echo "" - echo "Or feel free to contact the team," - echo " - on IRC #nixos on irc.freenode.net" - echo " - on twitter @nixos_org" -} - -uninstall_directions() { - subheader "Uninstalling nix:" - local step=0 - - if poly_service_installed_check; then - step=$((step + 1)) - poly_service_uninstall_directions "$step" - fi - - for profile_target in "${PROFILE_TARGETS[@]}"; do - if [ -e "$profile_target" ] && [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then - step=$((step + 1)) - cat < $1" -} - -bold() { - echo "$BOLD$*$ESC" -} - -ok() { - _textout "$GREEN" "$@" -} - -warning() { - warningheader "warning!" - cat - echo "" -} - -failure() { - header "oh no!" - _textout "$RED" "$@" - echo "" - _textout "$RED" "$(contactme)" - trap finish_cleanup EXIT - exit 1 -} - -ui_confirm() { - _textout "$GREEN$GREEN_UL" "$1" - - if headless; then - echo "No TTY, assuming you would say yes :)" - return 0 - fi - - local prompt="[y/n] " - echo -n "$prompt" - while read -r y; do - if [ "$y" = "y" ]; then - echo "" - return 0 - elif [ "$y" = "n" ]; then - echo "" - return 1 - else - _textout "$RED" "Sorry, I didn't understand. I can only understand answers of y or n" - echo -n "$prompt" - fi - done - echo "" - return 1 -} - -__sudo() { - local expl="$1" - local cmd="$2" - shift - header "sudo execution" - - echo "I am executing:" - echo "" - printf " $ sudo %s\\n" "$cmd" - echo "" - echo "$expl" - echo "" - - return 0 -} - -_sudo() { - local expl="$1" - shift - if ! headless; then - __sudo "$expl" "$*" - fi - sudo "$@" -} - - -readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX) -function finish_cleanup { - rm -rf "$SCRATCH" -} - -function finish_fail { - finish_cleanup - - failure < /dev/null >&2; then - failure < "$SCRATCH/.nix-channels" - _sudo "to set up the default system channel (part 1)" \ - install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels" -} - -welcome_to_nix() { - ok "Welcome to the Multi-User Nix Installation" - - cat < "$SCRATCH/nix.conf" -build-users-group = $NIX_BUILD_GROUP_NAME -EOF - _sudo "to place the default nix daemon configuration (part 2)" \ - install -m 0664 "$SCRATCH/nix.conf" /etc/nix/nix.conf -} - -main() { - if [ "$(uname -s)" = "Darwin" ]; then - # shellcheck source=./install-darwin-multi-user.sh - . "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh" - elif [ "$(uname -s)" = "Linux" ]; then - if [ -e /run/systemd/system ]; then - # shellcheck source=./install-systemd-multi-user.sh - . "$EXTRACTED_NIX_PATH/install-systemd-multi-user.sh" - else - failure "Sorry, the multi-user installation requires systemd on Linux (detected using /run/systemd/system)" - fi - else - failure "Sorry, I don't know what to do on $(uname)" - fi - - welcome_to_nix - chat_about_sudo - - if [ "${ALLOW_PREEXISTING_INSTALLATION:-}" = "" ]; then - validate_starting_assumptions - fi - - setup_report - - if ! ui_confirm "Ready to continue?"; then - ok "Alright, no changes have been made :)" - contactme - trap finish_cleanup EXIT - exit 1 - fi - - create_build_group - create_build_users - create_directories - place_channel_configuration - install_from_extracted_nix - - configure_shell_profile - - set +eu - . /etc/profile - set -eu - - setup_default_profile - place_nix_configuration - poly_configure_nix_daemon_service - - trap finish_success EXIT -} - - -main diff --git a/third_party/nix/scripts/install-nix-from-closure.sh b/third_party/nix/scripts/install-nix-from-closure.sh deleted file mode 100644 index 3f15818547..0000000000 --- a/third_party/nix/scripts/install-nix-from-closure.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/sh - -set -e - -dest="/nix" -self="$(dirname "$0")" -nix="@nix@" -cacert="@cacert@" - - -if ! [ -e "$self/.reginfo" ]; then - echo "$0: incomplete installer (.reginfo is missing)" >&2 -fi - -if [ -z "$USER" ] && ! USER=$(id -u -n); then - echo "$0: \$USER is not set" >&2 - exit 1 -fi - -if [ -z "$HOME" ]; then - echo "$0: \$HOME is not set" >&2 - exit 1 -fi - -# macOS support for 10.12.6 or higher -if [ "$(uname -s)" = "Darwin" ]; then - macos_major=$(sw_vers -productVersion | cut -d '.' -f 2) - macos_minor=$(sw_vers -productVersion | cut -d '.' -f 3) - if [ "$macos_major" -lt 12 ] || { [ "$macos_major" -eq 12 ] && [ "$macos_minor" -lt 6 ]; }; then - echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher" - exit 1 - fi -fi - -# Determine if we could use the multi-user installer or not -if [ "$(uname -s)" = "Darwin" ]; then - echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 -elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then - echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 -fi - -INSTALL_MODE=no-daemon -# Trivially handle the --daemon / --no-daemon options -if [ "x${1:-}" = "x--no-daemon" ]; then - INSTALL_MODE=no-daemon -elif [ "x${1:-}" = "x--daemon" ]; then - INSTALL_MODE=daemon -elif [ "x${1:-}" != "x" ]; then - ( - echo "Nix Installer [--daemon|--no-daemon]" - - echo "Choose installation method." - echo "" - echo " --daemon: Installs and configures a background daemon that manages the store," - echo " providing multi-user support and better isolation for local builds." - echo " Both for security and reproducibility, this method is recommended if" - echo " supported on your platform." - echo " See https://nixos.org/nix/manual/#sect-multi-user-installation" - echo "" - echo " --no-daemon: Simple, single-user installation that does not require root and is" - echo " trivial to uninstall." - echo " (default)" - echo "" - ) >&2 - exit -fi - -if [ "$INSTALL_MODE" = "daemon" ]; then - printf '\e[1;31mSwitching to the Daemon-based Installer\e[0m\n' - exec "$self/install-multi-user" - exit 0 -fi - -if [ "$(id -u)" -eq 0 ]; then - printf '\e[1;31mwarning: installing Nix as root is not supported by this script!\e[0m\n' -fi - -echo "performing a single-user installation of Nix..." >&2 - -if ! [ -e $dest ]; then - cmd="mkdir -m 0755 $dest && chown $USER $dest" - echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2 - if ! sudo sh -c "$cmd"; then - echo "$0: please manually run '$cmd' as root to create $dest" >&2 - exit 1 - fi -fi - -if ! [ -w $dest ]; then - echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see http://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2 - exit 1 -fi - -mkdir -p $dest/store - -printf "copying Nix to %s..." "${dest}/store" >&2 - -for i in $(cd "$self/store" >/dev/null && echo ./*); do - printf "." >&2 - i_tmp="$dest/store/$i.$$" - if [ -e "$i_tmp" ]; then - rm -rf "$i_tmp" - fi - if ! [ -e "$dest/store/$i" ]; then - cp -Rp "$self/store/$i" "$i_tmp" - chmod -R a-w "$i_tmp" - chmod +w "$i_tmp" - mv "$i_tmp" "$dest/store/$i" - chmod -w "$dest/store/$i" - fi -done -echo "" >&2 - -if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then - echo "$0: unable to register valid paths" >&2 - exit 1 -fi - -. "$nix/etc/profile.d/nix.sh" - -if ! "$nix/bin/nix-env" -i "$nix"; then - echo "$0: unable to install Nix into your default profile" >&2 - exit 1 -fi - -# Install an SSL certificate bundle. -if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then - $nix/bin/nix-env -i "$cacert" - export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt" -fi - -# Subscribe the user to the Nixpkgs channel and fetch it. -if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then - $nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable -fi -if [ -z "$_NIX_INSTALLER_TEST" ]; then - if ! $nix/bin/nix-channel --update nixpkgs; then - echo "Fetching the nixpkgs channel failed. (Are you offline?)" - echo "To try again later, run \"nix-channel --update nixpkgs\"." - fi -fi - -added= -p=$HOME/.nix-profile/etc/profile.d/nix.sh -if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then - # Make the shell source nix.sh during login. - for i in .bash_profile .bash_login .profile; do - fn="$HOME/$i" - if [ -w "$fn" ]; then - if ! grep -q "$p" "$fn"; then - echo "modifying $fn..." >&2 - echo "if [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn" - fi - added=1 - break - fi - done -fi - -if [ -z "$added" ]; then - cat >&2 <&2 < /dev/null 2>&1 -} - -poly_group_id_get() { - getent group "$1" | cut -d: -f3 -} - -poly_create_build_group() { - _sudo "Create the Nix build group, $NIX_BUILD_GROUP_NAME" \ - groupadd -g "$NIX_BUILD_GROUP_ID" --system \ - "$NIX_BUILD_GROUP_NAME" >&2 -} - -poly_user_exists() { - getent passwd "$1" > /dev/null 2>&1 -} - -poly_user_id_get() { - getent passwd "$1" | cut -d: -f3 -} - -poly_user_hidden_get() { - echo "1" -} - -poly_user_hidden_set() { - true -} - -poly_user_home_get() { - getent passwd "$1" | cut -d: -f6 -} - -poly_user_home_set() { - _sudo "in order to give $1 a safe home directory" \ - usermod --home "$2" "$1" -} - -poly_user_note_get() { - getent passwd "$1" | cut -d: -f5 -} - -poly_user_note_set() { - _sudo "in order to give $1 a useful comment" \ - usermod --comment "$2" "$1" -} - -poly_user_shell_get() { - getent passwd "$1" | cut -d: -f7 -} - -poly_user_shell_set() { - _sudo "in order to prevent $1 from logging in" \ - usermod --shell "$2" "$1" -} - -poly_user_in_group_check() { - groups "$1" | grep -q "$2" > /dev/null 2>&1 -} - -poly_user_in_group_set() { - _sudo "Add $1 to the $2 group"\ - usermod --append --groups "$2" "$1" -} - -poly_user_primary_group_get() { - getent passwd "$1" | cut -d: -f4 -} - -poly_user_primary_group_set() { - _sudo "to let the nix daemon use this user for builds (this might seem redundant, but there are two concepts of group membership)" \ - usermod --gid "$2" "$1" - -} - -poly_create_build_user() { - username=$1 - uid=$2 - builder_num=$3 - - _sudo "Creating the Nix build user, $username" \ - useradd \ - --home-dir /var/empty \ - --comment "Nix build user $builder_num" \ - --gid "$NIX_BUILD_GROUP_ID" \ - --groups "$NIX_BUILD_GROUP_NAME" \ - --no-user-group \ - --system \ - --shell /sbin/nologin \ - --uid "$uid" \ - --password "!" \ - "$username" -} diff --git a/third_party/nix/scripts/install.in b/third_party/nix/scripts/install.in deleted file mode 100644 index 902758b138..0000000000 --- a/third_party/nix/scripts/install.in +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/sh - -# This script installs the Nix package manager on your system by -# downloading a binary distribution and running its installer script -# (which in turn creates and populates /nix). - -{ # Prevent execution if this script was only partially downloaded -oops() { - echo "$0:" "$@" >&2 - exit 1 -} - -tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \ - oops "Can't create temporary directory for downloading the Nix binary tarball")" -cleanup() { - rm -rf "$tmpDir" -} -trap cleanup EXIT INT QUIT TERM - -require_util() { - command -v "$1" > /dev/null 2>&1 || - oops "you do not have '$1' installed, which I need to $2" -} - -case "$(uname -s).$(uname -m)" in - Linux.x86_64) system=x86_64-linux; hash=@binaryTarball_x86_64-linux@;; - Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;; - Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;; - Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;; - *) oops "sorry, there is no binary distribution of Nix for your platform";; -esac - -url="https://nixos.org/releases/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.xz" - -tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.xz")" - -require_util curl "download the binary tarball" -require_util tar "unpack the binary tarball" - -echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..." -curl -L "$url" -o "$tarball" || oops "failed to download '$url'" - -if command -v sha256sum > /dev/null 2>&1; then - hash2="$(sha256sum -b "$tarball" | cut -c1-64)" -elif command -v shasum > /dev/null 2>&1; then - hash2="$(shasum -a 256 -b "$tarball" | cut -c1-64)" -elif command -v openssl > /dev/null 2>&1; then - hash2="$(openssl dgst -r -sha256 "$tarball" | cut -c1-64)" -else - oops "cannot verify the SHA-256 hash of '$url'; you need one of 'shasum', 'sha256sum', or 'openssl'" -fi - -if [ "$hash" != "$hash2" ]; then - oops "SHA-256 hash mismatch in '$url'; expected $hash, got $hash2" -fi - -unpack=$tmpDir/unpack -mkdir -p "$unpack" -tar -xf "$tarball" -C "$unpack" || oops "failed to unpack '$url'" - -script=$(echo "$unpack"/*/install) - -[ -e "$script" ] || oops "installation script is missing from the binary tarball!" -"$script" "$@" - -} # End of wrapping diff --git a/third_party/nix/scripts/nix-http-export.cgi.in b/third_party/nix/scripts/nix-http-export.cgi.in deleted file mode 100755 index 19a505af1c..0000000000 --- a/third_party/nix/scripts/nix-http-export.cgi.in +++ /dev/null @@ -1,51 +0,0 @@ -#! /bin/sh - -export HOME=/tmp -export NIX_REMOTE=daemon - -TMP_DIR="${TMP_DIR:-/tmp/nix-export}" - -@coreutils@/mkdir -p "$TMP_DIR" || true -@coreutils@/chmod a+r "$TMP_DIR" - -needed_path="?$QUERY_STRING" -needed_path="${needed_path#*[?&]needed_path=}" -needed_path="${needed_path%%&*}" -#needed_path="$(echo $needed_path | ./unhttp)" -needed_path="${needed_path//%2B/+}" -needed_path="${needed_path//%3D/=}" - -echo needed_path: "$needed_path" >&2 - -NIX_STORE="${NIX_STORE_DIR:-/nix/store}" - -echo NIX_STORE: "${NIX_STORE}" >&2 - -full_path="${NIX_STORE}"/"$needed_path" - -if [ "$needed_path" != "${needed_path%.drv}" ]; then - echo "Status: 403 You should create the derivation file yourself" - echo "Content-Type: text/plain" - echo - echo "Refusing to disclose derivation contents" - exit -fi - -if @bindir@/nix-store --check-validity "$full_path"; then - if ! [ -e nix-export/"$needed_path".nar.gz ]; then - @bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz - @coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz - fi; - echo "Status: 301 Moved" - echo "Location: nix-export/"$needed_path".nar.gz" - echo -else - echo "Status: 404 No such path found" - echo "Content-Type: text/plain" - echo - echo "Path not found:" - echo "$needed_path" - echo "checked:" - echo "$full_path" -fi - diff --git a/third_party/nix/scripts/nix-profile-daemon.sh.in b/third_party/nix/scripts/nix-profile-daemon.sh.in deleted file mode 100644 index 47655080a6..0000000000 --- a/third_party/nix/scripts/nix-profile-daemon.sh.in +++ /dev/null @@ -1,29 +0,0 @@ -# Only execute this file once per shell. -if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi -__ETC_PROFILE_NIX_SOURCED=1 - -export NIX_USER_PROFILE_DIR="@localstatedir@/nix/profiles/per-user/$USER" -export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile" - -# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. -if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then - : # Allow users to override the NIX_SSL_CERT_FILE -elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch - export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt -elif [ -e /etc/ssl/ca-bundle.pem ]; then # openSUSE Tumbleweed - export NIX_SSL_CERT_FILE=/etc/ssl/ca-bundle.pem -elif [ -e /etc/ssl/certs/ca-bundle.crt ]; then # Old NixOS - export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt -elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS - export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt -else - # Fall back to what is in the nix profiles, favouring whatever is defined last. - for i in $NIX_PROFILES; do - if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then - export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt - fi - done -fi - -export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels" -export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH" diff --git a/third_party/nix/scripts/nix-profile.sh.in b/third_party/nix/scripts/nix-profile.sh.in deleted file mode 100644 index e15f7cd46b..0000000000 --- a/third_party/nix/scripts/nix-profile.sh.in +++ /dev/null @@ -1,39 +0,0 @@ -if [ -n "$HOME" ] && [ -n "$USER" ]; then - - # Set up the per-user profile. - # This part should be kept in sync with nixpkgs:nixos/modules/programs/shell.nix - - NIX_LINK=$HOME/.nix-profile - - NIX_USER_PROFILE_DIR=@localstatedir@/nix/profiles/per-user/$USER - - # Append ~/.nix-defexpr/channels to $NIX_PATH so that - # paths work when the user has fetched the Nixpkgs channel. - export NIX_PATH=${NIX_PATH:+$NIX_PATH:}$HOME/.nix-defexpr/channels - - # Set up environment. - # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix - export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile" - - # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. - if [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch - export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt - elif [ -e /etc/ssl/ca-bundle.pem ]; then # openSUSE Tumbleweed - export NIX_SSL_CERT_FILE=/etc/ssl/ca-bundle.pem - elif [ -e /etc/ssl/certs/ca-bundle.crt ]; then # Old NixOS - export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt - elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS - export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt - elif [ -e "$NIX_LINK/etc/ssl/certs/ca-bundle.crt" ]; then # fall back to cacert in Nix profile - export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ssl/certs/ca-bundle.crt" - elif [ -e "$NIX_LINK/etc/ca-bundle.crt" ]; then # old cacert in Nix profile - export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt" - fi - - if [ -n "${MANPATH-}" ]; then - export MANPATH="$NIX_LINK/share/man:$MANPATH" - fi - - export PATH="$NIX_LINK/bin:$PATH" - unset NIX_LINK NIX_USER_PROFILE_DIR -fi diff --git a/third_party/nix/scripts/nix-reduce-build.in b/third_party/nix/scripts/nix-reduce-build.in deleted file mode 100755 index 50beb9d10b..0000000000 --- a/third_party/nix/scripts/nix-reduce-build.in +++ /dev/null @@ -1,171 +0,0 @@ -#! @bash@ - -WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX); -cd "$WORKING_DIRECTORY"; - -if test -z "$1" || test "a--help" = "a$1" ; then - echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2 - echo As in: >&2 - echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2 - echo nix-reduce-build /etc/nixos/nixos -- \\ - echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2 - echo " store path name will be added into the end of the URL" >&2 - echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2 - echo " that should be a directory where gzipped 'nix-store --export' ">&2 - echo " files are located (they should have .nar.gz extension)" >&2 - echo " Or all together: " >&2 - echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\ - " ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2 - echo " Also supports best-effort local builds of failing expression set:" >&2 - echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2 - echo " nix-daemon:// builds using daemon" - echo " nix-self:// builds directly using nix-store from current installation" >&2 - echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2; - echo "derivations with specified output hash (sha256, sha1 or md5)." >&2 - echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2; - echo "maximum amount of paths" >&2; - echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2; - echo "maximum amount of paths" >&2; - echo " If no package sources are specified, required paths are listed." >&2; - exit; -fi; - -while ! test "$1" = "--" || test "$1" = "" ; do - echo "$1" >> initial; >&2 - shift; -done -shift; -echo Will work on $(cat initial | wc -l) targets. >&2 - -while read ; do - case "$REPLY" in - ${NIX_STORE_DIR:-/nix/store}/*) - echo "$REPLY" >> paths; >&2 - ;; - *) - ( - IFS=: ; - nix-instantiate $REPLY >> paths; - ); - ;; - esac; -done < initial; -echo Proceeding $(cat paths | wc -l) paths. >&2 - -while read; do - case "$REPLY" in - *.drv) - echo "$REPLY" >> derivers; >&2 - ;; - *) - nix-store --query --deriver "$REPLY" >>derivers; - ;; - esac; -done < paths; -echo Found $(cat derivers | wc -l) derivers. >&2 - -cat derivers | xargs nix-store --query -R > derivers-closure; -echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2 - -cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths; -cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths; -echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2 - -cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths; -echo We need $(cat needed-paths | wc -l) paths. >&2 - -egrep '[.]drv$' derivers-closure > critical-derivers; - -if test -z "$1" ; then - cat needed-paths; -fi; - -refresh_critical_derivers() { - echo "Finding needed derivers..." >&2; - cat critical-derivers | while read; do - if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then - echo "$REPLY"; - fi; - done > new-critical-derivers; - mv new-critical-derivers critical-derivers; - echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2 -} - -build_here() { - cat critical-derivers | while read; do - echo "Realising $REPLY using nix-daemon" >&2 - @bindir@/nix-store -r "${REPLY}" - done; -} - -try_to_substitute(){ - cat needed-paths | while read ; do - echo "Building $REPLY using nix-daemon" >&2 - @bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}" - done; -} - -for i in "$@"; do - sshHost="${i#ssh://}"; - httpHost="${i#http://}"; - httpsHost="${i#https://}"; - filePath="${i#file:/}"; - if [ "$i" != "$sshHost" ]; then - cat needed-paths | while read; do - echo "Getting $REPLY and its closure over ssh" >&2 - nix-copy-closure --from "$sshHost" --gzip "$REPLY" &2 - curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import; - done; - elif [ "$i" != "$filePath" ] ; then - cat needed-paths | while read; do - echo "Installing $REPLY from file" >&2 - gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import; - done; - elif [ "$i" = "nix-daemon://" ] ; then - NIX_REMOTE=daemon try_to_substitute; - refresh_critical_derivers; - NIX_REMOTE=daemon build_here; - elif [ "$i" = "nix-self://" ] ; then - NIX_REMOTE= try_to_substitute; - refresh_critical_derivers; - NIX_REMOTE= build_here; - elif [ "$i" = "nix-daemon-fixed://" ] ; then - refresh_critical_derivers; - - cat critical-derivers | while read; do - if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then - echo "Realising $REPLY using nix-daemon" >&2 - NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}" - fi; - done; - elif [ "$i" = "nix-self-fixed://" ] ; then - refresh_critical_derivers; - - cat critical-derivers | while read; do - if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then - echo "Realising $REPLY using direct Nix build" >&2 - NIX_REMOTE= @bindir@/nix-store -r "${REPLY}" - fi; - done; - elif [ "$i" = "nix-daemon-substitute://" ] ; then - NIX_REMOTE=daemon try_to_substitute; - elif [ "$i" = "nix-self-substitute://" ] ; then - NIX_REMOTE= try_to_substitute; - elif [ "$i" = "nix-daemon-build://" ] ; then - refresh_critical_derivers; - NIX_REMOTE=daemon build_here; - elif [ "$i" = "nix-self-build://" ] ; then - refresh_critical_derivers; - NIX_REMOTE= build_here; - fi; - mv needed-paths wanted-paths; - cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths; - echo We still need $(cat needed-paths | wc -l) paths. >&2 -done; - -cd / -rm -r "$WORKING_DIRECTORY" diff --git a/third_party/nix/scripts/repl.sh b/third_party/nix/scripts/repl.sh deleted file mode 100755 index d068e80790..0000000000 --- a/third_party/nix/scripts/repl.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# Run `nix repl` using a local store, for use during development. Intended to -# be run from the cmake build directory - -if [ "$#" -gt 0 ] && [ "$1" = "--debug" ]; then - gdb=(gdb --args) - shift 1 -elif [ "$1" = "--rr" ]; then - gdb=(rr record) - shift 1 -else - gdb=() -fi - -make -j 10 -NIX_STORE_DIR=$(pwd)/nix/store \ - NIX_LOG_DIR=$(pwd)/nix/var/log/nix \ - NIX_STATE_DIR=$(pwd)/nix/var/nix \ - XDG_CACHE_HOME=$(pwd)/cache \ - NIX_REMOTE=daemon \ - ${gdb[*]} ./src/nix repl "$@" diff --git a/third_party/nix/scripts/setup_store.sh b/third_party/nix/scripts/setup_store.sh deleted file mode 100755 index ee96c8d3b8..0000000000 --- a/third_party/nix/scripts/setup_store.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Setup a store for local development rooted at the current directory, and -# compatible with the scripts in this directory (repl.sh, build.sh, eval.sh, -# daemon.sh, etc). Intended to be run from the cmake build directory - -mkdir -p nix/store nix/var/nix nix/var/log/nix -ln -s $(pwd)/src/nix ./nix/build-remote -mkdir -p $(dirname "$(pwd)${SANDBOX_SHELL}") -cp "${SANDBOX_SHELL}" "$(pwd)${SANDBOX_SHELL}" diff --git a/third_party/nix/src/CMakeLists.txt b/third_party/nix/src/CMakeLists.txt deleted file mode 100644 index 486c69fa2a..0000000000 --- a/third_party/nix/src/CMakeLists.txt +++ /dev/null @@ -1,85 +0,0 @@ -# -*- mode: cmake; -*- - -# The 'nix' binary is composed of various sources below this -# directory. In the previous build system, they were all built from -# this location and this setup mimics that (with the exception of the -# various Nix libraries). - -add_subdirectory(proto) -add_subdirectory(libutil) -add_subdirectory(libstore) -add_subdirectory(libmain) -add_subdirectory(libexpr) -add_subdirectory(nix-daemon) - -if (PACKAGE_TESTS) - add_subdirectory(tests) -endif() - -add_executable(nix) -set_property(TARGET nix PROPERTY CXX_STANDARD 17) -include_directories(${PROJECT_BINARY_DIR}) -target_include_directories(nix PUBLIC "${nix_SOURCE_DIR}/src") - -target_sources(nix - PRIVATE - nix/command.hh - nix/legacy.hh - nix-env/user-env.hh - nix-store/dotgraph.hh - nix-store/graphml.hh - - nix/add-to-store.cc - nix/build.cc - nix/cat.cc - nix/command.cc - nix/copy.cc - nix/doctor.cc - nix/dump-path.cc - nix/edit.cc - nix/eval.cc - nix/hash.cc - nix/installables.cc - nix/legacy.cc - nix/log.cc - nix/ls.cc - nix/main.cc - nix/optimise-store.cc - nix/path-info.cc - nix/ping-store.cc - nix/repl.cc - nix/run.cc - nix/search.cc - nix/show-config.cc - nix/show-derivation.cc - nix/sigs.cc - nix/upgrade-nix.cc - nix/verify.cc - nix/why-depends.cc - - build-remote/build-remote.cc - nix-build/nix-build.cc - nix-channel/nix-channel.cc - nix-collect-garbage/nix-collect-garbage.cc - nix-copy-closure/nix-copy-closure.cc - nix-env/nix-env.cc - nix-env/user-env.cc - nix-instantiate/nix-instantiate.cc - nix-prefetch-url/nix-prefetch-url.cc - nix-store/dotgraph.cc - nix-store/graphml.cc - nix-store/nix-store.cc -) - -target_link_libraries(nix - nixexpr - nixmain - nixstore - nixutil - - absl::strings - editline - glog -) - -INSTALL(TARGETS nix DESTINATION bin) diff --git a/third_party/nix/src/build-remote/build-remote.cc b/third_party/nix/src/build-remote/build-remote.cc deleted file mode 100644 index 43564a5eb7..0000000000 --- a/third_party/nix/src/build-remote/build-remote.cc +++ /dev/null @@ -1,274 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libstore/machines.hh" -#include "libstore/pathlocks.hh" -#include "libstore/store-api.hh" -#include "libutil/serialise.hh" -#include "nix/legacy.hh" - -using namespace nix; -using std::cin; - -static void handleAlarm(int sig) {} - -std::string escapeUri(std::string uri) { - std::replace(uri.begin(), uri.end(), '/', '_'); - return uri; -} - -static std::string currentLoad; - -static AutoCloseFD openSlotLock(const Machine& m, unsigned long long slot) { - return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri), slot), - true); -} - -static bool allSupportedLocally(const std::set& requiredFeatures) { - for (auto& feature : requiredFeatures) { - if (settings.systemFeatures.get().count(feature) == 0u) { - return false; - } - } - return true; -} - -static int _main(int argc, char* argv[]) { - { - /* Ensure we don't get any SSH passphrase or host key popups. */ - unsetenv("DISPLAY"); - unsetenv("SSH_ASKPASS"); - - FdSource source(STDIN_FILENO); - - /* Read the parent's settings. */ - while (readInt(source) != 0u) { - auto name = readString(source); - auto value = readString(source); - settings.set(name, value); - } - - settings.maxBuildJobs.set("1"); // hack to make tests with local?root= work - - auto store = openStore().cast(); - - /* It would be more appropriate to use $XDG_RUNTIME_DIR, since - that gets cleared on reboot, but it wouldn't work on macOS. */ - currentLoad = store->stateDir + "/current-load"; - - std::shared_ptr sshStore; - AutoCloseFD bestSlotLock; - - auto machines = getMachines(); - DLOG(INFO) << "got " << machines.size() << " remote builders"; - - if (machines.empty()) { - std::cerr << "# decline-permanently\n"; - return 0; - } - - std::string drvPath; - std::string storeUri; - - while (true) { - try { - auto s = readString(source); - if (s != "try") { - return 0; - } - } catch (EndOfFile&) { - return 0; - } - - auto amWilling = readInt(source); - auto neededSystem = readString(source); - source >> drvPath; - auto requiredFeatures = readStrings>(source); - - auto canBuildLocally = - (amWilling != 0u) && - (neededSystem == settings.thisSystem || - settings.extraPlatforms.get().count(neededSystem) > 0) && - allSupportedLocally(requiredFeatures); - - /* Error ignored here, will be caught later */ - mkdir(currentLoad.c_str(), 0777); - - while (true) { - bestSlotLock = AutoCloseFD(-1); - AutoCloseFD lock(openLockFile(currentLoad + "/main-lock", true)); - lockFile(lock.get(), ltWrite, true); - - bool rightType = false; - - Machine* bestMachine = nullptr; - unsigned long long bestLoad = 0; - for (auto& m : machines) { - DLOG(INFO) << "considering building on remote machine '" << m.storeUri - << "'"; - - if (m.enabled && - std::find(m.systemTypes.begin(), m.systemTypes.end(), - neededSystem) != m.systemTypes.end() && - m.allSupported(requiredFeatures) && - m.mandatoryMet(requiredFeatures)) { - rightType = true; - AutoCloseFD free; - unsigned long long load = 0; - for (unsigned long long slot = 0; slot < m.maxJobs; ++slot) { - auto slotLock = openSlotLock(m, slot); - if (lockFile(slotLock.get(), ltWrite, false)) { - if (!free) { - free = std::move(slotLock); - } - } else { - ++load; - } - } - if (!free) { - continue; - } - bool best = false; - if (!bestMachine || !bestSlotLock) { - best = true; - } else if (load / m.speedFactor < - bestLoad / bestMachine->speedFactor) { - best = true; - } else if (load / m.speedFactor == - bestLoad / bestMachine->speedFactor) { - if (m.speedFactor > bestMachine->speedFactor) { - best = true; - } else if (m.speedFactor == bestMachine->speedFactor) { - if (load < bestLoad) { - best = true; - } - } - } - if (best) { - bestLoad = load; - bestSlotLock = std::move(free); - bestMachine = &m; - } - } - } - - if (!bestSlotLock || !bestMachine) { - if (rightType && !canBuildLocally) { - std::cerr << "# postpone\n"; - } else { - std::cerr << "# decline\n"; - } - break; - } - - futimens(bestSlotLock.get(), nullptr); - - lock = AutoCloseFD(-1); - - try { - DLOG(INFO) << "connecting to '" << bestMachine->storeUri << "'"; - - Store::Params storeParams; - if (absl::StartsWith(bestMachine->storeUri, "ssh://")) { - storeParams["max-connections"] = "1"; - storeParams["log-fd"] = "4"; - if (!bestMachine->sshKey.empty()) { - storeParams["ssh-key"] = bestMachine->sshKey; - } - } - - sshStore = openStore(bestMachine->storeUri, storeParams); - sshStore->connect(); - storeUri = bestMachine->storeUri; - - } catch (std::exception& e) { - auto msg = absl::StripTrailingAsciiWhitespace(drainFD(5, false)); - LOG(ERROR) << "cannot build on '" << bestMachine->storeUri - << "': " << e.what() - << (msg.empty() ? "" : absl::StrCat(": ", msg)); - bestMachine->enabled = false; - continue; - } - - goto connected; - } - } - - connected: - close(5); - - std::cerr << "# accept\n" << storeUri << "\n"; - - auto inputs = readStrings(source); - auto outputs = readStrings(source); - - AutoCloseFD uploadLock = openLockFile( - currentLoad + "/" + escapeUri(storeUri) + ".upload-lock", true); - - { - DLOG(INFO) << "waiting for the upload lock to '" << storeUri << "'"; - - auto old = signal(SIGALRM, handleAlarm); - alarm(15 * 60); - if (!lockFile(uploadLock.get(), ltWrite, true)) { - LOG(ERROR) << "somebody is hogging the upload lock, continuing..."; - } - alarm(0); - signal(SIGALRM, old); - } - - auto substitute = - settings.buildersUseSubstitutes ? Substitute : NoSubstitute; - - { - DLOG(INFO) << "copying dependencies to '" << storeUri << "'"; - copyPaths(store, ref(sshStore), inputs, NoRepair, NoCheckSigs, - substitute); - } - - uploadLock = AutoCloseFD(-1); - - BasicDerivation drv( - readDerivation(store->realStoreDir + "/" + baseNameOf(drvPath))); - drv.inputSrcs = inputs; - - auto result = sshStore->buildDerivation(std::cerr, drvPath, drv); - - if (!result.success()) { - throw Error("build of '%s' on '%s' failed: %s", drvPath, storeUri, - result.errorMsg); - } - - PathSet missing; - for (auto& path : outputs) { - if (!store->isValidPath(path)) { - missing.insert(path); - } - } - - if (!missing.empty()) { - DLOG(INFO) << "copying outputs from '" << storeUri << "'"; - store->locksHeld.insert(missing.begin(), missing.end()); /* FIXME: ugly */ - copyPaths(ref(sshStore), store, missing, NoRepair, NoCheckSigs, - NoSubstitute); - } - - return 0; - } -} - -static RegisterLegacyCommand s1("build-remote", _main); diff --git a/third_party/nix/src/cpptoml/LICENSE b/third_party/nix/src/cpptoml/LICENSE deleted file mode 100644 index 8802c4fa5a..0000000000 --- a/third_party/nix/src/cpptoml/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2014 Chase Geigle - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/nix/src/cpptoml/cpptoml.h b/third_party/nix/src/cpptoml/cpptoml.h deleted file mode 100644 index 150b53ff86..0000000000 --- a/third_party/nix/src/cpptoml/cpptoml.h +++ /dev/null @@ -1,3668 +0,0 @@ -/** - * @file cpptoml.h - * @author Chase Geigle - * @date May 2013 - */ - -#ifndef CPPTOML_H -#define CPPTOML_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if __cplusplus > 201103L -#define CPPTOML_DEPRECATED(reason) [[deprecated(reason)]] -#elif defined(__clang__) -#define CPPTOML_DEPRECATED(reason) __attribute__((deprecated(reason))) -#elif defined(__GNUG__) -#define CPPTOML_DEPRECATED(reason) __attribute__((deprecated)) -#elif defined(_MSC_VER) -#if _MSC_VER < 1910 -#define CPPTOML_DEPRECATED(reason) __declspec(deprecated) -#else -#define CPPTOML_DEPRECATED(reason) [[deprecated(reason)]] -#endif -#endif - -namespace cpptoml -{ -class writer; // forward declaration -class base; // forward declaration -#if defined(CPPTOML_USE_MAP) -// a std::map will ensure that entries a sorted, albeit at a slight -// performance penalty relative to the (default) unordered_map -using string_to_base_map = std::map>; -#else -// by default an unordered_map is used for best performance as the -// toml specification does not require entries to be sorted -using string_to_base_map - = std::unordered_map>; -#endif - -// if defined, `base` will retain type information in form of an enum class -// such that static_cast can be used instead of dynamic_cast -// #define CPPTOML_NO_RTTI - -template -class option -{ - public: - option() : empty_{true} - { - // nothing - } - - option(T value) : empty_{false}, value_(std::move(value)) - { - // nothing - } - - explicit operator bool() const - { - return !empty_; - } - - const T& operator*() const - { - return value_; - } - - const T* operator->() const - { - return &value_; - } - - template - T value_or(U&& alternative) const - { - if (!empty_) - return value_; - return static_cast(std::forward(alternative)); - } - - private: - bool empty_; - T value_; -}; - -struct local_date -{ - int year = 0; - int month = 0; - int day = 0; -}; - -struct local_time -{ - int hour = 0; - int minute = 0; - int second = 0; - int microsecond = 0; -}; - -struct zone_offset -{ - int hour_offset = 0; - int minute_offset = 0; -}; - -struct local_datetime : local_date, local_time -{ -}; - -struct offset_datetime : local_datetime, zone_offset -{ - static inline struct offset_datetime from_zoned(const struct tm& t) - { - offset_datetime dt; - dt.year = t.tm_year + 1900; - dt.month = t.tm_mon + 1; - dt.day = t.tm_mday; - dt.hour = t.tm_hour; - dt.minute = t.tm_min; - dt.second = t.tm_sec; - - char buf[16]; - strftime(buf, 16, "%z", &t); - - int offset = std::stoi(buf); - dt.hour_offset = offset / 100; - dt.minute_offset = offset % 100; - return dt; - } - - CPPTOML_DEPRECATED("from_local has been renamed to from_zoned") - static inline struct offset_datetime from_local(const struct tm& t) - { - return from_zoned(t); - } - - static inline struct offset_datetime from_utc(const struct tm& t) - { - offset_datetime dt; - dt.year = t.tm_year + 1900; - dt.month = t.tm_mon + 1; - dt.day = t.tm_mday; - dt.hour = t.tm_hour; - dt.minute = t.tm_min; - dt.second = t.tm_sec; - return dt; - } -}; - -CPPTOML_DEPRECATED("datetime has been renamed to offset_datetime") -typedef offset_datetime datetime; - -class fill_guard -{ - public: - fill_guard(std::ostream& os) : os_(os), fill_{os.fill()} - { - // nothing - } - - ~fill_guard() - { - os_.fill(fill_); - } - - private: - std::ostream& os_; - std::ostream::char_type fill_; -}; - -inline std::ostream& operator<<(std::ostream& os, const local_date& dt) -{ - fill_guard g{os}; - os.fill('0'); - - using std::setw; - os << setw(4) << dt.year << "-" << setw(2) << dt.month << "-" << setw(2) - << dt.day; - - return os; -} - -inline std::ostream& operator<<(std::ostream& os, const local_time& ltime) -{ - fill_guard g{os}; - os.fill('0'); - - using std::setw; - os << setw(2) << ltime.hour << ":" << setw(2) << ltime.minute << ":" - << setw(2) << ltime.second; - - if (ltime.microsecond > 0) - { - os << "."; - int power = 100000; - for (int curr_us = ltime.microsecond; curr_us; power /= 10) - { - auto num = curr_us / power; - os << num; - curr_us -= num * power; - } - } - - return os; -} - -inline std::ostream& operator<<(std::ostream& os, const zone_offset& zo) -{ - fill_guard g{os}; - os.fill('0'); - - using std::setw; - - if (zo.hour_offset != 0 || zo.minute_offset != 0) - { - if (zo.hour_offset > 0) - { - os << "+"; - } - else - { - os << "-"; - } - os << setw(2) << std::abs(zo.hour_offset) << ":" << setw(2) - << std::abs(zo.minute_offset); - } - else - { - os << "Z"; - } - - return os; -} - -inline std::ostream& operator<<(std::ostream& os, const local_datetime& dt) -{ - return os << static_cast(dt) << "T" - << static_cast(dt); -} - -inline std::ostream& operator<<(std::ostream& os, const offset_datetime& dt) -{ - return os << static_cast(dt) - << static_cast(dt); -} - -template -struct is_one_of; - -template -struct is_one_of : std::is_same -{ -}; - -template -struct is_one_of -{ - const static bool value - = std::is_same::value || is_one_of::value; -}; - -template -class value; - -template -struct valid_value - : is_one_of -{ -}; - -template -struct value_traits; - -template -struct valid_value_or_string_convertible -{ - - const static bool value = valid_value::type>::value - || std::is_convertible::value; -}; - -template -struct value_traits::value>::type> -{ - using value_type = typename std::conditional< - valid_value::type>::value, - typename std::decay::type, std::string>::type; - - using type = value; - - static value_type construct(T&& val) - { - return value_type(val); - } -}; - -template -struct value_traits< - T, - typename std::enable_if< - !valid_value_or_string_convertible::value - && std::is_floating_point::type>::value>::type> -{ - using value_type = typename std::decay::type; - - using type = value; - - static value_type construct(T&& val) - { - return value_type(val); - } -}; - -template -struct value_traits< - T, typename std::enable_if< - !valid_value_or_string_convertible::value - && !std::is_floating_point::type>::value - && std::is_signed::type>::value>::type> -{ - using value_type = int64_t; - - using type = value; - - static value_type construct(T&& val) - { - if (val < (std::numeric_limits::min)()) - throw std::underflow_error{"constructed value cannot be " - "represented by a 64-bit signed " - "integer"}; - - if (val > (std::numeric_limits::max)()) - throw std::overflow_error{"constructed value cannot be represented " - "by a 64-bit signed integer"}; - - return static_cast(val); - } -}; - -template -struct value_traits< - T, typename std::enable_if< - !valid_value_or_string_convertible::value - && std::is_unsigned::type>::value>::type> -{ - using value_type = int64_t; - - using type = value; - - static value_type construct(T&& val) - { - if (val > static_cast((std::numeric_limits::max)())) - throw std::overflow_error{"constructed value cannot be represented " - "by a 64-bit signed integer"}; - - return static_cast(val); - } -}; - -class array; -class table; -class table_array; - -template -struct array_of_trait -{ - using return_type = option>; -}; - -template <> -struct array_of_trait -{ - using return_type = option>>; -}; - -template -inline std::shared_ptr::type> make_value(T&& val); -inline std::shared_ptr make_array(); - -namespace detail -{ -template -inline std::shared_ptr make_element(); -} - -inline std::shared_ptr make_table(); -inline std::shared_ptr make_table_array(bool is_inline = false); - -#if defined(CPPTOML_NO_RTTI) -/// Base type used to store underlying data type explicitly if RTTI is disabled -enum class base_type -{ - NONE, - STRING, - LOCAL_TIME, - LOCAL_DATE, - LOCAL_DATETIME, - OFFSET_DATETIME, - INT, - FLOAT, - BOOL, - TABLE, - ARRAY, - TABLE_ARRAY -}; - -/// Type traits class to convert C++ types to enum member -template -struct base_type_traits; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::STRING; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::LOCAL_TIME; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::LOCAL_DATE; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::LOCAL_DATETIME; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::OFFSET_DATETIME; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::INT; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::FLOAT; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::BOOL; -}; - -template <> -struct base_type_traits
-{ - static const base_type type = base_type::TABLE; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::ARRAY; -}; - -template <> -struct base_type_traits -{ - static const base_type type = base_type::TABLE_ARRAY; -}; -#endif - -/** - * A generic base TOML value used for type erasure. - */ -class base : public std::enable_shared_from_this -{ - public: - virtual ~base() = default; - - virtual std::shared_ptr clone() const = 0; - - /** - * Determines if the given TOML element is a value. - */ - virtual bool is_value() const - { - return false; - } - - /** - * Determines if the given TOML element is a table. - */ - virtual bool is_table() const - { - return false; - } - - /** - * Converts the TOML element into a table. - */ - std::shared_ptr
as_table() - { - if (is_table()) - return std::static_pointer_cast
(shared_from_this()); - return nullptr; - } - /** - * Determines if the TOML element is an array of "leaf" elements. - */ - virtual bool is_array() const - { - return false; - } - - /** - * Converts the TOML element to an array. - */ - std::shared_ptr as_array() - { - if (is_array()) - return std::static_pointer_cast(shared_from_this()); - return nullptr; - } - - /** - * Determines if the given TOML element is an array of tables. - */ - virtual bool is_table_array() const - { - return false; - } - - /** - * Converts the TOML element into a table array. - */ - std::shared_ptr as_table_array() - { - if (is_table_array()) - return std::static_pointer_cast(shared_from_this()); - return nullptr; - } - - /** - * Attempts to coerce the TOML element into a concrete TOML value - * of type T. - */ - template - std::shared_ptr> as(); - - template - std::shared_ptr> as() const; - - template - void accept(Visitor&& visitor, Args&&... args) const; - -#if defined(CPPTOML_NO_RTTI) - base_type type() const - { - return type_; - } - - protected: - base(const base_type t) : type_(t) - { - // nothing - } - - private: - const base_type type_ = base_type::NONE; - -#else - protected: - base() - { - // nothing - } -#endif -}; - -/** - * A concrete TOML value representing the "leaves" of the "tree". - */ -template -class value : public base -{ - struct make_shared_enabler - { - // nothing; this is a private key accessible only to friends - }; - - template - friend std::shared_ptr::type> - cpptoml::make_value(U&& val); - - public: - static_assert(valid_value::value, "invalid value type"); - - std::shared_ptr clone() const override; - - value(const make_shared_enabler&, const T& val) : value(val) - { - // nothing; note that users cannot actually invoke this function - // because they lack access to the make_shared_enabler. - } - - bool is_value() const override - { - return true; - } - - /** - * Gets the data associated with this value. - */ - T& get() - { - return data_; - } - - /** - * Gets the data associated with this value. Const version. - */ - const T& get() const - { - return data_; - } - - private: - T data_; - - /** - * Constructs a value from the given data. - */ -#if defined(CPPTOML_NO_RTTI) - value(const T& val) : base(base_type_traits::type), data_(val) - { - } -#else - value(const T& val) : data_(val) - { - } -#endif - - value(const value& val) = delete; - value& operator=(const value& val) = delete; -}; - -template -std::shared_ptr::type> make_value(T&& val) -{ - using value_type = typename value_traits::type; - using enabler = typename value_type::make_shared_enabler; - return std::make_shared( - enabler{}, value_traits::construct(std::forward(val))); -} - -template -inline std::shared_ptr> base::as() -{ -#if defined(CPPTOML_NO_RTTI) - if (type() == base_type_traits::type) - return std::static_pointer_cast>(shared_from_this()); - else - return nullptr; -#else - return std::dynamic_pointer_cast>(shared_from_this()); -#endif -} - -// special case value to allow getting an integer parameter as a -// double value -template <> -inline std::shared_ptr> base::as() -{ -#if defined(CPPTOML_NO_RTTI) - if (type() == base_type::FLOAT) - return std::static_pointer_cast>(shared_from_this()); - - if (type() == base_type::INT) - { - auto v = std::static_pointer_cast>(shared_from_this()); - return make_value(static_cast(v->get())); - } -#else - if (auto v = std::dynamic_pointer_cast>(shared_from_this())) - return v; - - if (auto v = std::dynamic_pointer_cast>(shared_from_this())) - return make_value(static_cast(v->get())); -#endif - - return nullptr; -} - -template -inline std::shared_ptr> base::as() const -{ -#if defined(CPPTOML_NO_RTTI) - if (type() == base_type_traits::type) - return std::static_pointer_cast>(shared_from_this()); - else - return nullptr; -#else - return std::dynamic_pointer_cast>(shared_from_this()); -#endif -} - -// special case value to allow getting an integer parameter as a -// double value -template <> -inline std::shared_ptr> base::as() const -{ -#if defined(CPPTOML_NO_RTTI) - if (type() == base_type::FLOAT) - return std::static_pointer_cast>( - shared_from_this()); - - if (type() == base_type::INT) - { - auto v = as(); - // the below has to be a non-const value due to a bug in - // libc++: https://llvm.org/bugs/show_bug.cgi?id=18843 - return make_value(static_cast(v->get())); - } -#else - if (auto v - = std::dynamic_pointer_cast>(shared_from_this())) - return v; - - if (auto v = as()) - { - // the below has to be a non-const value due to a bug in - // libc++: https://llvm.org/bugs/show_bug.cgi?id=18843 - return make_value(static_cast(v->get())); - } -#endif - - return nullptr; -} - -/** - * Exception class for array insertion errors. - */ -class array_exception : public std::runtime_error -{ - public: - array_exception(const std::string& err) : std::runtime_error{err} - { - } -}; - -class array : public base -{ - public: - friend std::shared_ptr make_array(); - - std::shared_ptr clone() const override; - - virtual bool is_array() const override - { - return true; - } - - using size_type = std::size_t; - - /** - * arrays can be iterated over - */ - using iterator = std::vector>::iterator; - - /** - * arrays can be iterated over. Const version. - */ - using const_iterator = std::vector>::const_iterator; - - iterator begin() - { - return values_.begin(); - } - - const_iterator begin() const - { - return values_.begin(); - } - - iterator end() - { - return values_.end(); - } - - const_iterator end() const - { - return values_.end(); - } - - /** - * Obtains the array (vector) of base values. - */ - std::vector>& get() - { - return values_; - } - - /** - * Obtains the array (vector) of base values. Const version. - */ - const std::vector>& get() const - { - return values_; - } - - std::shared_ptr at(size_t idx) const - { - return values_.at(idx); - } - - /** - * Obtains an array of values. Note that elements may be - * nullptr if they cannot be converted to a value. - */ - template - std::vector>> array_of() const - { - std::vector>> result(values_.size()); - - std::transform(values_.begin(), values_.end(), result.begin(), - [&](std::shared_ptr v) { return v->as(); }); - - return result; - } - - /** - * Obtains a option>. The option will be empty if the array - * contains values that are not of type T. - */ - template - inline typename array_of_trait::return_type get_array_of() const - { - std::vector result; - result.reserve(values_.size()); - - for (const auto& val : values_) - { - if (auto v = val->as()) - result.push_back(v->get()); - else - return {}; - } - - return {std::move(result)}; - } - - /** - * Obtains an array of arrays. Note that elements may be nullptr - * if they cannot be converted to a array. - */ - std::vector> nested_array() const - { - std::vector> result(values_.size()); - - std::transform(values_.begin(), values_.end(), result.begin(), - [&](std::shared_ptr v) -> std::shared_ptr { - if (v->is_array()) - return std::static_pointer_cast(v); - return std::shared_ptr{}; - }); - - return result; - } - - /** - * Add a value to the end of the array - */ - template - void push_back(const std::shared_ptr>& val) - { - if (values_.empty() || values_[0]->as()) - { - values_.push_back(val); - } - else - { - throw array_exception{"Arrays must be homogenous."}; - } - } - - /** - * Add an array to the end of the array - */ - void push_back(const std::shared_ptr& val) - { - if (values_.empty() || values_[0]->is_array()) - { - values_.push_back(val); - } - else - { - throw array_exception{"Arrays must be homogenous."}; - } - } - - /** - * Convenience function for adding a simple element to the end - * of the array. - */ - template - void push_back(T&& val, typename value_traits::type* = 0) - { - push_back(make_value(std::forward(val))); - } - - /** - * Insert a value into the array - */ - template - iterator insert(iterator position, const std::shared_ptr>& value) - { - if (values_.empty() || values_[0]->as()) - { - return values_.insert(position, value); - } - else - { - throw array_exception{"Arrays must be homogenous."}; - } - } - - /** - * Insert an array into the array - */ - iterator insert(iterator position, const std::shared_ptr& value) - { - if (values_.empty() || values_[0]->is_array()) - { - return values_.insert(position, value); - } - else - { - throw array_exception{"Arrays must be homogenous."}; - } - } - - /** - * Convenience function for inserting a simple element in the array - */ - template - iterator insert(iterator position, T&& val, - typename value_traits::type* = 0) - { - return insert(position, make_value(std::forward(val))); - } - - /** - * Erase an element from the array - */ - iterator erase(iterator position) - { - return values_.erase(position); - } - - /** - * Clear the array - */ - void clear() - { - values_.clear(); - } - - /** - * Reserve space for n values. - */ - void reserve(size_type n) - { - values_.reserve(n); - } - - private: -#if defined(CPPTOML_NO_RTTI) - array() : base(base_type::ARRAY) - { - // empty - } -#else - array() = default; -#endif - - template - array(InputIterator begin, InputIterator end) : values_{begin, end} - { - // nothing - } - - array(const array& obj) = delete; - array& operator=(const array& obj) = delete; - - std::vector> values_; -}; - -inline std::shared_ptr make_array() -{ - struct make_shared_enabler : public array - { - make_shared_enabler() - { - // nothing - } - }; - - return std::make_shared(); -} - -namespace detail -{ -template <> -inline std::shared_ptr make_element() -{ - return make_array(); -} -} // namespace detail - -/** - * Obtains a option>. The option will be empty if the array - * contains values that are not of type T. - */ -template <> -inline typename array_of_trait::return_type -array::get_array_of() const -{ - std::vector> result; - result.reserve(values_.size()); - - for (const auto& val : values_) - { - if (auto v = val->as_array()) - result.push_back(v); - else - return {}; - } - - return {std::move(result)}; -} - -class table; - -class table_array : public base -{ - friend class table; - friend std::shared_ptr make_table_array(bool); - - public: - std::shared_ptr clone() const override; - - using size_type = std::size_t; - - /** - * arrays can be iterated over - */ - using iterator = std::vector>::iterator; - - /** - * arrays can be iterated over. Const version. - */ - using const_iterator = std::vector>::const_iterator; - - iterator begin() - { - return array_.begin(); - } - - const_iterator begin() const - { - return array_.begin(); - } - - iterator end() - { - return array_.end(); - } - - const_iterator end() const - { - return array_.end(); - } - - virtual bool is_table_array() const override - { - return true; - } - - std::vector>& get() - { - return array_; - } - - const std::vector>& get() const - { - return array_; - } - - /** - * Add a table to the end of the array - */ - void push_back(const std::shared_ptr
& val) - { - array_.push_back(val); - } - - /** - * Insert a table into the array - */ - iterator insert(iterator position, const std::shared_ptr
& value) - { - return array_.insert(position, value); - } - - /** - * Erase an element from the array - */ - iterator erase(iterator position) - { - return array_.erase(position); - } - - /** - * Clear the array - */ - void clear() - { - array_.clear(); - } - - /** - * Reserve space for n tables. - */ - void reserve(size_type n) - { - array_.reserve(n); - } - - /** - * Whether or not the table array is declared inline. This mostly - * matters for parsing, where statically defined arrays cannot be - * appended to using the array-of-table syntax. - */ - bool is_inline() const - { - return is_inline_; - } - - private: -#if defined(CPPTOML_NO_RTTI) - table_array(bool is_inline = false) - : base(base_type::TABLE_ARRAY), is_inline_(is_inline) - { - // nothing - } -#else - table_array(bool is_inline = false) : is_inline_(is_inline) - { - // nothing - } -#endif - - table_array(const table_array& obj) = delete; - table_array& operator=(const table_array& rhs) = delete; - - std::vector> array_; - const bool is_inline_ = false; -}; - -inline std::shared_ptr make_table_array(bool is_inline) -{ - struct make_shared_enabler : public table_array - { - make_shared_enabler(bool mse_is_inline) : table_array(mse_is_inline) - { - // nothing - } - }; - - return std::make_shared(is_inline); -} - -namespace detail -{ -template <> -inline std::shared_ptr make_element() -{ - return make_table_array(true); -} -} // namespace detail - -// The below are overloads for fetching specific value types out of a value -// where special casting behavior (like bounds checking) is desired - -template -typename std::enable_if::value - && std::is_signed::value, - option>::type -get_impl(const std::shared_ptr& elem) -{ - if (auto v = elem->as()) - { - if (v->get() < (std::numeric_limits::min)()) - throw std::underflow_error{ - "T cannot represent the value requested in get"}; - - if (v->get() > (std::numeric_limits::max)()) - throw std::overflow_error{ - "T cannot represent the value requested in get"}; - - return {static_cast(v->get())}; - } - else - { - return {}; - } -} - -template -typename std::enable_if::value - && std::is_unsigned::value, - option>::type -get_impl(const std::shared_ptr& elem) -{ - if (auto v = elem->as()) - { - if (v->get() < 0) - throw std::underflow_error{"T cannot store negative value in get"}; - - if (static_cast(v->get()) > (std::numeric_limits::max)()) - throw std::overflow_error{ - "T cannot represent the value requested in get"}; - - return {static_cast(v->get())}; - } - else - { - return {}; - } -} - -template -typename std::enable_if::value - || std::is_same::value, - option>::type -get_impl(const std::shared_ptr& elem) -{ - if (auto v = elem->as()) - { - return {v->get()}; - } - else - { - return {}; - } -} - -/** - * Represents a TOML keytable. - */ -class table : public base -{ - public: - friend class table_array; - friend std::shared_ptr
make_table(); - - std::shared_ptr clone() const override; - - /** - * tables can be iterated over. - */ - using iterator = string_to_base_map::iterator; - - /** - * tables can be iterated over. Const version. - */ - using const_iterator = string_to_base_map::const_iterator; - - iterator begin() - { - return map_.begin(); - } - - const_iterator begin() const - { - return map_.begin(); - } - - iterator end() - { - return map_.end(); - } - - const_iterator end() const - { - return map_.end(); - } - - bool is_table() const override - { - return true; - } - - bool empty() const - { - return map_.empty(); - } - - /** - * Determines if this key table contains the given key. - */ - bool contains(const std::string& key) const - { - return map_.find(key) != map_.end(); - } - - /** - * Determines if this key table contains the given key. Will - * resolve "qualified keys". Qualified keys are the full access - * path separated with dots like "grandparent.parent.child". - */ - bool contains_qualified(const std::string& key) const - { - return resolve_qualified(key); - } - - /** - * Obtains the base for a given key. - * @throw std::out_of_range if the key does not exist - */ - std::shared_ptr get(const std::string& key) const - { - return map_.at(key); - } - - /** - * Obtains the base for a given key. Will resolve "qualified - * keys". Qualified keys are the full access path separated with - * dots like "grandparent.parent.child". - * - * @throw std::out_of_range if the key does not exist - */ - std::shared_ptr get_qualified(const std::string& key) const - { - std::shared_ptr p; - resolve_qualified(key, &p); - return p; - } - - /** - * Obtains a table for a given key, if possible. - */ - std::shared_ptr
get_table(const std::string& key) const - { - if (contains(key) && get(key)->is_table()) - return std::static_pointer_cast
(get(key)); - return nullptr; - } - - /** - * Obtains a table for a given key, if possible. Will resolve - * "qualified keys". - */ - std::shared_ptr
get_table_qualified(const std::string& key) const - { - if (contains_qualified(key) && get_qualified(key)->is_table()) - return std::static_pointer_cast
(get_qualified(key)); - return nullptr; - } - - /** - * Obtains an array for a given key. - */ - std::shared_ptr get_array(const std::string& key) const - { - if (!contains(key)) - return nullptr; - return get(key)->as_array(); - } - - /** - * Obtains an array for a given key. Will resolve "qualified keys". - */ - std::shared_ptr get_array_qualified(const std::string& key) const - { - if (!contains_qualified(key)) - return nullptr; - return get_qualified(key)->as_array(); - } - - /** - * Obtains a table_array for a given key, if possible. - */ - std::shared_ptr get_table_array(const std::string& key) const - { - if (!contains(key)) - return nullptr; - return get(key)->as_table_array(); - } - - /** - * Obtains a table_array for a given key, if possible. Will resolve - * "qualified keys". - */ - std::shared_ptr - get_table_array_qualified(const std::string& key) const - { - if (!contains_qualified(key)) - return nullptr; - return get_qualified(key)->as_table_array(); - } - - /** - * Helper function that attempts to get a value corresponding - * to the template parameter from a given key. - */ - template - option get_as(const std::string& key) const - { - try - { - return get_impl(get(key)); - } - catch (const std::out_of_range&) - { - return {}; - } - } - - /** - * Helper function that attempts to get a value corresponding - * to the template parameter from a given key. Will resolve "qualified - * keys". - */ - template - option get_qualified_as(const std::string& key) const - { - try - { - return get_impl(get_qualified(key)); - } - catch (const std::out_of_range&) - { - return {}; - } - } - - /** - * Helper function that attempts to get an array of values of a given - * type corresponding to the template parameter for a given key. - * - * If the key doesn't exist, doesn't exist as an array type, or one or - * more keys inside the array type are not of type T, an empty option - * is returned. Otherwise, an option containing a vector of the values - * is returned. - */ - template - inline typename array_of_trait::return_type - get_array_of(const std::string& key) const - { - if (auto v = get_array(key)) - { - std::vector result; - result.reserve(v->get().size()); - - for (const auto& b : v->get()) - { - if (auto val = b->as()) - result.push_back(val->get()); - else - return {}; - } - return {std::move(result)}; - } - - return {}; - } - - /** - * Helper function that attempts to get an array of values of a given - * type corresponding to the template parameter for a given key. Will - * resolve "qualified keys". - * - * If the key doesn't exist, doesn't exist as an array type, or one or - * more keys inside the array type are not of type T, an empty option - * is returned. Otherwise, an option containing a vector of the values - * is returned. - */ - template - inline typename array_of_trait::return_type - get_qualified_array_of(const std::string& key) const - { - if (auto v = get_array_qualified(key)) - { - std::vector result; - result.reserve(v->get().size()); - - for (const auto& b : v->get()) - { - if (auto val = b->as()) - result.push_back(val->get()); - else - return {}; - } - return {std::move(result)}; - } - - return {}; - } - - /** - * Adds an element to the keytable. - */ - void insert(const std::string& key, const std::shared_ptr& value) - { - map_[key] = value; - } - - /** - * Convenience shorthand for adding a simple element to the - * keytable. - */ - template - void insert(const std::string& key, T&& val, - typename value_traits::type* = 0) - { - insert(key, make_value(std::forward(val))); - } - - /** - * Removes an element from the table. - */ - void erase(const std::string& key) - { - map_.erase(key); - } - - private: -#if defined(CPPTOML_NO_RTTI) - table() : base(base_type::TABLE) - { - // nothing - } -#else - table() - { - // nothing - } -#endif - - table(const table& obj) = delete; - table& operator=(const table& rhs) = delete; - - std::vector split(const std::string& value, - char separator) const - { - std::vector result; - std::string::size_type p = 0; - std::string::size_type q; - while ((q = value.find(separator, p)) != std::string::npos) - { - result.emplace_back(value, p, q - p); - p = q + 1; - } - result.emplace_back(value, p); - return result; - } - - // If output parameter p is specified, fill it with the pointer to the - // specified entry and throw std::out_of_range if it couldn't be found. - // - // Otherwise, just return true if the entry could be found or false - // otherwise and do not throw. - bool resolve_qualified(const std::string& key, - std::shared_ptr* p = nullptr) const - { - auto parts = split(key, '.'); - auto last_key = parts.back(); - parts.pop_back(); - - auto cur_table = this; - for (const auto& part : parts) - { - cur_table = cur_table->get_table(part).get(); - if (!cur_table) - { - if (!p) - return false; - - throw std::out_of_range{key + " is not a valid key"}; - } - } - - if (!p) - return cur_table->map_.count(last_key) != 0; - - *p = cur_table->map_.at(last_key); - return true; - } - - string_to_base_map map_; -}; - -/** - * Helper function that attempts to get an array of arrays for a given - * key. - * - * If the key doesn't exist, doesn't exist as an array type, or one or - * more keys inside the array type are not of type T, an empty option - * is returned. Otherwise, an option containing a vector of the values - * is returned. - */ -template <> -inline typename array_of_trait::return_type -table::get_array_of(const std::string& key) const -{ - if (auto v = get_array(key)) - { - std::vector> result; - result.reserve(v->get().size()); - - for (const auto& b : v->get()) - { - if (auto val = b->as_array()) - result.push_back(val); - else - return {}; - } - - return {std::move(result)}; - } - - return {}; -} - -/** - * Helper function that attempts to get an array of arrays for a given - * key. Will resolve "qualified keys". - * - * If the key doesn't exist, doesn't exist as an array type, or one or - * more keys inside the array type are not of type T, an empty option - * is returned. Otherwise, an option containing a vector of the values - * is returned. - */ -template <> -inline typename array_of_trait::return_type -table::get_qualified_array_of(const std::string& key) const -{ - if (auto v = get_array_qualified(key)) - { - std::vector> result; - result.reserve(v->get().size()); - - for (const auto& b : v->get()) - { - if (auto val = b->as_array()) - result.push_back(val); - else - return {}; - } - - return {std::move(result)}; - } - - return {}; -} - -std::shared_ptr
make_table() -{ - struct make_shared_enabler : public table - { - make_shared_enabler() - { - // nothing - } - }; - - return std::make_shared(); -} - -namespace detail -{ -template <> -inline std::shared_ptr
make_element
() -{ - return make_table(); -} -} // namespace detail - -template -std::shared_ptr value::clone() const -{ - return make_value(data_); -} - -inline std::shared_ptr array::clone() const -{ - auto result = make_array(); - result->reserve(values_.size()); - for (const auto& ptr : values_) - result->values_.push_back(ptr->clone()); - return result; -} - -inline std::shared_ptr table_array::clone() const -{ - auto result = make_table_array(is_inline()); - result->reserve(array_.size()); - for (const auto& ptr : array_) - result->array_.push_back(ptr->clone()->as_table()); - return result; -} - -inline std::shared_ptr table::clone() const -{ - auto result = make_table(); - for (const auto& pr : map_) - result->insert(pr.first, pr.second->clone()); - return result; -} - -/** - * Exception class for all TOML parsing errors. - */ -class parse_exception : public std::runtime_error -{ - public: - parse_exception(const std::string& err) : std::runtime_error{err} - { - } - - parse_exception(const std::string& err, std::size_t line_number) - : std::runtime_error{err + " at line " + std::to_string(line_number)} - { - } -}; - -inline bool is_number(char c) -{ - return c >= '0' && c <= '9'; -} - -inline bool is_hex(char c) -{ - return is_number(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'); -} - -/** - * Helper object for consuming expected characters. - */ -template -class consumer -{ - public: - consumer(std::string::iterator& it, const std::string::iterator& end, - OnError&& on_error) - : it_(it), end_(end), on_error_(std::forward(on_error)) - { - // nothing - } - - void operator()(char c) - { - if (it_ == end_ || *it_ != c) - on_error_(); - ++it_; - } - - template - void operator()(const char (&str)[N]) - { - std::for_each(std::begin(str), std::end(str) - 1, - [&](char c) { (*this)(c); }); - } - - void eat_or(char a, char b) - { - if (it_ == end_ || (*it_ != a && *it_ != b)) - on_error_(); - ++it_; - } - - int eat_digits(int len) - { - int val = 0; - for (int i = 0; i < len; ++i) - { - if (!is_number(*it_) || it_ == end_) - on_error_(); - val = 10 * val + (*it_++ - '0'); - } - return val; - } - - void error() - { - on_error_(); - } - - private: - std::string::iterator& it_; - const std::string::iterator& end_; - OnError on_error_; -}; - -template -consumer make_consumer(std::string::iterator& it, - const std::string::iterator& end, - OnError&& on_error) -{ - return consumer(it, end, std::forward(on_error)); -} - -// replacement for std::getline to handle incorrectly line-ended files -// https://stackoverflow.com/questions/6089231/getting-std-ifstream-to-handle-lf-cr-and-crlf -namespace detail -{ -inline std::istream& getline(std::istream& input, std::string& line) -{ - line.clear(); - - std::istream::sentry sentry{input, true}; - auto sb = input.rdbuf(); - - while (true) - { - auto c = sb->sbumpc(); - if (c == '\r') - { - if (sb->sgetc() == '\n') - c = sb->sbumpc(); - } - - if (c == '\n') - return input; - - if (c == std::istream::traits_type::eof()) - { - if (line.empty()) - input.setstate(std::ios::eofbit); - return input; - } - - line.push_back(static_cast(c)); - } -} -} // namespace detail - -/** - * The parser class. - */ -class parser -{ - public: - /** - * Parsers are constructed from streams. - */ - parser(std::istream& stream) : input_(stream) - { - // nothing - } - - parser& operator=(const parser& parser) = delete; - - /** - * Parses the stream this parser was created on until EOF. - * @throw parse_exception if there are errors in parsing - */ - std::shared_ptr
parse() - { - std::shared_ptr
root = make_table(); - - table* curr_table = root.get(); - - while (detail::getline(input_, line_)) - { - line_number_++; - auto it = line_.begin(); - auto end = line_.end(); - consume_whitespace(it, end); - if (it == end || *it == '#') - continue; - if (*it == '[') - { - curr_table = root.get(); - parse_table(it, end, curr_table); - } - else - { - parse_key_value(it, end, curr_table); - consume_whitespace(it, end); - eol_or_comment(it, end); - } - } - return root; - } - - private: -#if defined _MSC_VER - __declspec(noreturn) -#elif defined __GNUC__ - __attribute__((noreturn)) -#endif - void throw_parse_exception(const std::string& err) - { - throw parse_exception{err, line_number_}; - } - - void parse_table(std::string::iterator& it, - const std::string::iterator& end, table*& curr_table) - { - // remove the beginning keytable marker - ++it; - if (it == end) - throw_parse_exception("Unexpected end of table"); - if (*it == '[') - parse_table_array(it, end, curr_table); - else - parse_single_table(it, end, curr_table); - } - - void parse_single_table(std::string::iterator& it, - const std::string::iterator& end, - table*& curr_table) - { - if (it == end || *it == ']') - throw_parse_exception("Table name cannot be empty"); - - std::string full_table_name; - bool inserted = false; - - auto key_end = [](char c) { return c == ']'; }; - - auto key_part_handler = [&](const std::string& part) { - if (part.empty()) - throw_parse_exception("Empty component of table name"); - - if (!full_table_name.empty()) - full_table_name += '.'; - full_table_name += part; - - if (curr_table->contains(part)) - { -#if !defined(__PGI) - auto b = curr_table->get(part); -#else - // Workaround for PGI compiler - std::shared_ptr b = curr_table->get(part); -#endif - if (b->is_table()) - curr_table = static_cast(b.get()); - else if (b->is_table_array()) - curr_table = std::static_pointer_cast(b) - ->get() - .back() - .get(); - else - throw_parse_exception("Key " + full_table_name - + "already exists as a value"); - } - else - { - inserted = true; - curr_table->insert(part, make_table()); - curr_table = static_cast(curr_table->get(part).get()); - } - }; - - key_part_handler(parse_key(it, end, key_end, key_part_handler)); - - if (it == end) - throw_parse_exception( - "Unterminated table declaration; did you forget a ']'?"); - - if (*it != ']') - { - std::string errmsg{"Unexpected character in table definition: "}; - errmsg += '"'; - errmsg += *it; - errmsg += '"'; - throw_parse_exception(errmsg); - } - - // table already existed - if (!inserted) - { - auto is_value - = [](const std::pair&>& p) { - return p.second->is_value(); - }; - - // if there are any values, we can't add values to this table - // since it has already been defined. If there aren't any - // values, then it was implicitly created by something like - // [a.b] - if (curr_table->empty() - || std::any_of(curr_table->begin(), curr_table->end(), - is_value)) - { - throw_parse_exception("Redefinition of table " - + full_table_name); - } - } - - ++it; - consume_whitespace(it, end); - eol_or_comment(it, end); - } - - void parse_table_array(std::string::iterator& it, - const std::string::iterator& end, table*& curr_table) - { - ++it; - if (it == end || *it == ']') - throw_parse_exception("Table array name cannot be empty"); - - auto key_end = [](char c) { return c == ']'; }; - - std::string full_ta_name; - auto key_part_handler = [&](const std::string& part) { - if (part.empty()) - throw_parse_exception("Empty component of table array name"); - - if (!full_ta_name.empty()) - full_ta_name += '.'; - full_ta_name += part; - - if (curr_table->contains(part)) - { -#if !defined(__PGI) - auto b = curr_table->get(part); -#else - // Workaround for PGI compiler - std::shared_ptr b = curr_table->get(part); -#endif - - // if this is the end of the table array name, add an - // element to the table array that we just looked up, - // provided it was not declared inline - if (it != end && *it == ']') - { - if (!b->is_table_array()) - { - throw_parse_exception("Key " + full_ta_name - + " is not a table array"); - } - - auto v = b->as_table_array(); - - if (v->is_inline()) - { - throw_parse_exception("Static array " + full_ta_name - + " cannot be appended to"); - } - - v->get().push_back(make_table()); - curr_table = v->get().back().get(); - } - // otherwise, just keep traversing down the key name - else - { - if (b->is_table()) - curr_table = static_cast(b.get()); - else if (b->is_table_array()) - curr_table = std::static_pointer_cast(b) - ->get() - .back() - .get(); - else - throw_parse_exception("Key " + full_ta_name - + " already exists as a value"); - } - } - else - { - // if this is the end of the table array name, add a new - // table array and a new table inside that array for us to - // add keys to next - if (it != end && *it == ']') - { - curr_table->insert(part, make_table_array()); - auto arr = std::static_pointer_cast( - curr_table->get(part)); - arr->get().push_back(make_table()); - curr_table = arr->get().back().get(); - } - // otherwise, create the implicitly defined table and move - // down to it - else - { - curr_table->insert(part, make_table()); - curr_table - = static_cast(curr_table->get(part).get()); - } - } - }; - - key_part_handler(parse_key(it, end, key_end, key_part_handler)); - - // consume the last "]]" - auto eat = make_consumer(it, end, [this]() { - throw_parse_exception("Unterminated table array name"); - }); - eat(']'); - eat(']'); - - consume_whitespace(it, end); - eol_or_comment(it, end); - } - - void parse_key_value(std::string::iterator& it, std::string::iterator& end, - table* curr_table) - { - auto key_end = [](char c) { return c == '='; }; - - auto key_part_handler = [&](const std::string& part) { - // two cases: this key part exists already, in which case it must - // be a table, or it doesn't exist in which case we must create - // an implicitly defined table - if (curr_table->contains(part)) - { - auto val = curr_table->get(part); - if (val->is_table()) - { - curr_table = static_cast(val.get()); - } - else - { - throw_parse_exception("Key " + part - + " already exists as a value"); - } - } - else - { - auto newtable = make_table(); - curr_table->insert(part, newtable); - curr_table = newtable.get(); - } - }; - - auto key = parse_key(it, end, key_end, key_part_handler); - - if (curr_table->contains(key)) - throw_parse_exception("Key " + key + " already present"); - if (it == end || *it != '=') - throw_parse_exception("Value must follow after a '='"); - ++it; - consume_whitespace(it, end); - curr_table->insert(key, parse_value(it, end)); - consume_whitespace(it, end); - } - - template - std::string - parse_key(std::string::iterator& it, const std::string::iterator& end, - KeyEndFinder&& key_end, KeyPartHandler&& key_part_handler) - { - // parse the key as a series of one or more simple-keys joined with '.' - while (it != end && !key_end(*it)) - { - auto part = parse_simple_key(it, end); - consume_whitespace(it, end); - - if (it == end || key_end(*it)) - { - return part; - } - - if (*it != '.') - { - std::string errmsg{"Unexpected character in key: "}; - errmsg += '"'; - errmsg += *it; - errmsg += '"'; - throw_parse_exception(errmsg); - } - - key_part_handler(part); - - // consume the dot - ++it; - } - - throw_parse_exception("Unexpected end of key"); - } - - std::string parse_simple_key(std::string::iterator& it, - const std::string::iterator& end) - { - consume_whitespace(it, end); - - if (it == end) - throw_parse_exception("Unexpected end of key (blank key?)"); - - if (*it == '"' || *it == '\'') - { - return string_literal(it, end, *it); - } - else - { - auto bke = std::find_if(it, end, [](char c) { - return c == '.' || c == '=' || c == ']'; - }); - return parse_bare_key(it, bke); - } - } - - std::string parse_bare_key(std::string::iterator& it, - const std::string::iterator& end) - { - if (it == end) - { - throw_parse_exception("Bare key missing name"); - } - - auto key_end = end; - --key_end; - consume_backwards_whitespace(key_end, it); - ++key_end; - std::string key{it, key_end}; - - if (std::find(it, key_end, '#') != key_end) - { - throw_parse_exception("Bare key " + key + " cannot contain #"); - } - - if (std::find_if(it, key_end, - [](char c) { return c == ' ' || c == '\t'; }) - != key_end) - { - throw_parse_exception("Bare key " + key - + " cannot contain whitespace"); - } - - if (std::find_if(it, key_end, - [](char c) { return c == '[' || c == ']'; }) - != key_end) - { - throw_parse_exception("Bare key " + key - + " cannot contain '[' or ']'"); - } - - it = end; - return key; - } - - enum class parse_type - { - STRING = 1, - LOCAL_TIME, - LOCAL_DATE, - LOCAL_DATETIME, - OFFSET_DATETIME, - INT, - FLOAT, - BOOL, - ARRAY, - INLINE_TABLE - }; - - std::shared_ptr parse_value(std::string::iterator& it, - std::string::iterator& end) - { - parse_type type = determine_value_type(it, end); - switch (type) - { - case parse_type::STRING: - return parse_string(it, end); - case parse_type::LOCAL_TIME: - return parse_time(it, end); - case parse_type::LOCAL_DATE: - case parse_type::LOCAL_DATETIME: - case parse_type::OFFSET_DATETIME: - return parse_date(it, end); - case parse_type::INT: - case parse_type::FLOAT: - return parse_number(it, end); - case parse_type::BOOL: - return parse_bool(it, end); - case parse_type::ARRAY: - return parse_array(it, end); - case parse_type::INLINE_TABLE: - return parse_inline_table(it, end); - default: - throw_parse_exception("Failed to parse value"); - } - } - - parse_type determine_value_type(const std::string::iterator& it, - const std::string::iterator& end) - { - if (it == end) - { - throw_parse_exception("Failed to parse value type"); - } - if (*it == '"' || *it == '\'') - { - return parse_type::STRING; - } - else if (is_time(it, end)) - { - return parse_type::LOCAL_TIME; - } - else if (auto dtype = date_type(it, end)) - { - return *dtype; - } - else if (is_number(*it) || *it == '-' || *it == '+' - || (*it == 'i' && it + 1 != end && it[1] == 'n' - && it + 2 != end && it[2] == 'f') - || (*it == 'n' && it + 1 != end && it[1] == 'a' - && it + 2 != end && it[2] == 'n')) - { - return determine_number_type(it, end); - } - else if (*it == 't' || *it == 'f') - { - return parse_type::BOOL; - } - else if (*it == '[') - { - return parse_type::ARRAY; - } - else if (*it == '{') - { - return parse_type::INLINE_TABLE; - } - throw_parse_exception("Failed to parse value type"); - } - - parse_type determine_number_type(const std::string::iterator& it, - const std::string::iterator& end) - { - // determine if we are an integer or a float - auto check_it = it; - if (*check_it == '-' || *check_it == '+') - ++check_it; - - if (check_it == end) - throw_parse_exception("Malformed number"); - - if (*check_it == 'i' || *check_it == 'n') - return parse_type::FLOAT; - - while (check_it != end && is_number(*check_it)) - ++check_it; - if (check_it != end && *check_it == '.') - { - ++check_it; - while (check_it != end && is_number(*check_it)) - ++check_it; - return parse_type::FLOAT; - } - else - { - return parse_type::INT; - } - } - - std::shared_ptr> parse_string(std::string::iterator& it, - std::string::iterator& end) - { - auto delim = *it; - assert(delim == '"' || delim == '\''); - - // end is non-const here because we have to be able to potentially - // parse multiple lines in a string, not just one - auto check_it = it; - ++check_it; - if (check_it != end && *check_it == delim) - { - ++check_it; - if (check_it != end && *check_it == delim) - { - it = ++check_it; - return parse_multiline_string(it, end, delim); - } - } - return make_value(string_literal(it, end, delim)); - } - - std::shared_ptr> - parse_multiline_string(std::string::iterator& it, - std::string::iterator& end, char delim) - { - std::stringstream ss; - - auto is_ws = [](char c) { return c == ' ' || c == '\t'; }; - - bool consuming = false; - std::shared_ptr> ret; - - auto handle_line = [&](std::string::iterator& local_it, - std::string::iterator& local_end) { - if (consuming) - { - local_it = std::find_if_not(local_it, local_end, is_ws); - - // whole line is whitespace - if (local_it == local_end) - return; - } - - consuming = false; - - while (local_it != local_end) - { - // handle escaped characters - if (delim == '"' && *local_it == '\\') - { - auto check = local_it; - // check if this is an actual escape sequence or a - // whitespace escaping backslash - ++check; - consume_whitespace(check, local_end); - if (check == local_end) - { - consuming = true; - break; - } - - ss << parse_escape_code(local_it, local_end); - continue; - } - - // if we can end the string - if (std::distance(local_it, local_end) >= 3) - { - auto check = local_it; - // check for """ - if (*check++ == delim && *check++ == delim - && *check++ == delim) - { - local_it = check; - ret = make_value(ss.str()); - break; - } - } - - ss << *local_it++; - } - }; - - // handle the remainder of the current line - handle_line(it, end); - if (ret) - return ret; - - // start eating lines - while (detail::getline(input_, line_)) - { - ++line_number_; - - it = line_.begin(); - end = line_.end(); - - handle_line(it, end); - - if (ret) - return ret; - - if (!consuming) - ss << std::endl; - } - - throw_parse_exception("Unterminated multi-line basic string"); - } - - std::string string_literal(std::string::iterator& it, - const std::string::iterator& end, char delim) - { - ++it; - std::string val; - while (it != end) - { - // handle escaped characters - if (delim == '"' && *it == '\\') - { - val += parse_escape_code(it, end); - } - else if (*it == delim) - { - ++it; - consume_whitespace(it, end); - return val; - } - else - { - val += *it++; - } - } - throw_parse_exception("Unterminated string literal"); - } - - std::string parse_escape_code(std::string::iterator& it, - const std::string::iterator& end) - { - ++it; - if (it == end) - throw_parse_exception("Invalid escape sequence"); - char value; - if (*it == 'b') - { - value = '\b'; - } - else if (*it == 't') - { - value = '\t'; - } - else if (*it == 'n') - { - value = '\n'; - } - else if (*it == 'f') - { - value = '\f'; - } - else if (*it == 'r') - { - value = '\r'; - } - else if (*it == '"') - { - value = '"'; - } - else if (*it == '\\') - { - value = '\\'; - } - else if (*it == 'u' || *it == 'U') - { - return parse_unicode(it, end); - } - else - { - throw_parse_exception("Invalid escape sequence"); - } - ++it; - return std::string(1, value); - } - - std::string parse_unicode(std::string::iterator& it, - const std::string::iterator& end) - { - bool large = *it++ == 'U'; - auto codepoint = parse_hex(it, end, large ? 0x10000000 : 0x1000); - - if ((codepoint > 0xd7ff && codepoint < 0xe000) || codepoint > 0x10ffff) - { - throw_parse_exception( - "Unicode escape sequence is not a Unicode scalar value"); - } - - std::string result; - // See Table 3-6 of the Unicode standard - if (codepoint <= 0x7f) - { - // 1-byte codepoints: 00000000 0xxxxxxx - // repr: 0xxxxxxx - result += static_cast(codepoint & 0x7f); - } - else if (codepoint <= 0x7ff) - { - // 2-byte codepoints: 00000yyy yyxxxxxx - // repr: 110yyyyy 10xxxxxx - // - // 0x1f = 00011111 - // 0xc0 = 11000000 - // - result += static_cast(0xc0 | ((codepoint >> 6) & 0x1f)); - // - // 0x80 = 10000000 - // 0x3f = 00111111 - // - result += static_cast(0x80 | (codepoint & 0x3f)); - } - else if (codepoint <= 0xffff) - { - // 3-byte codepoints: zzzzyyyy yyxxxxxx - // repr: 1110zzzz 10yyyyyy 10xxxxxx - // - // 0xe0 = 11100000 - // 0x0f = 00001111 - // - result += static_cast(0xe0 | ((codepoint >> 12) & 0x0f)); - result += static_cast(0x80 | ((codepoint >> 6) & 0x1f)); - result += static_cast(0x80 | (codepoint & 0x3f)); - } - else - { - // 4-byte codepoints: 000uuuuu zzzzyyyy yyxxxxxx - // repr: 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx - // - // 0xf0 = 11110000 - // 0x07 = 00000111 - // - result += static_cast(0xf0 | ((codepoint >> 18) & 0x07)); - result += static_cast(0x80 | ((codepoint >> 12) & 0x3f)); - result += static_cast(0x80 | ((codepoint >> 6) & 0x3f)); - result += static_cast(0x80 | (codepoint & 0x3f)); - } - return result; - } - - uint32_t parse_hex(std::string::iterator& it, - const std::string::iterator& end, uint32_t place) - { - uint32_t value = 0; - while (place > 0) - { - if (it == end) - throw_parse_exception("Unexpected end of unicode sequence"); - - if (!is_hex(*it)) - throw_parse_exception("Invalid unicode escape sequence"); - - value += place * hex_to_digit(*it++); - place /= 16; - } - return value; - } - - uint32_t hex_to_digit(char c) - { - if (is_number(c)) - return static_cast(c - '0'); - return 10 - + static_cast(c - - ((c >= 'a' && c <= 'f') ? 'a' : 'A')); - } - - std::shared_ptr parse_number(std::string::iterator& it, - const std::string::iterator& end) - { - auto check_it = it; - auto check_end = find_end_of_number(it, end); - - auto eat_sign = [&]() { - if (check_it != end && (*check_it == '-' || *check_it == '+')) - ++check_it; - }; - - auto check_no_leading_zero = [&]() { - if (check_it != end && *check_it == '0' && check_it + 1 != check_end - && check_it[1] != '.') - { - throw_parse_exception("Numbers may not have leading zeros"); - } - }; - - auto eat_digits = [&](bool (*check_char)(char)) { - auto beg = check_it; - while (check_it != end && check_char(*check_it)) - { - ++check_it; - if (check_it != end && *check_it == '_') - { - ++check_it; - if (check_it == end || !check_char(*check_it)) - throw_parse_exception("Malformed number"); - } - } - - if (check_it == beg) - throw_parse_exception("Malformed number"); - }; - - auto eat_hex = [&]() { eat_digits(&is_hex); }; - - auto eat_numbers = [&]() { eat_digits(&is_number); }; - - if (check_it != end && *check_it == '0' && check_it + 1 != check_end - && (check_it[1] == 'x' || check_it[1] == 'o' || check_it[1] == 'b')) - { - ++check_it; - char base = *check_it; - ++check_it; - if (base == 'x') - { - eat_hex(); - return parse_int(it, check_it, 16); - } - else if (base == 'o') - { - auto start = check_it; - eat_numbers(); - auto val = parse_int(start, check_it, 8, "0"); - it = start; - return val; - } - else // if (base == 'b') - { - auto start = check_it; - eat_numbers(); - auto val = parse_int(start, check_it, 2); - it = start; - return val; - } - } - - eat_sign(); - check_no_leading_zero(); - - if (check_it != end && check_it + 1 != end && check_it + 2 != end) - { - if (check_it[0] == 'i' && check_it[1] == 'n' && check_it[2] == 'f') - { - auto val = std::numeric_limits::infinity(); - if (*it == '-') - val = -val; - it = check_it + 3; - return make_value(val); - } - else if (check_it[0] == 'n' && check_it[1] == 'a' - && check_it[2] == 'n') - { - auto val = std::numeric_limits::quiet_NaN(); - if (*it == '-') - val = -val; - it = check_it + 3; - return make_value(val); - } - } - - eat_numbers(); - - if (check_it != end - && (*check_it == '.' || *check_it == 'e' || *check_it == 'E')) - { - bool is_exp = *check_it == 'e' || *check_it == 'E'; - - ++check_it; - if (check_it == end) - throw_parse_exception("Floats must have trailing digits"); - - auto eat_exp = [&]() { - eat_sign(); - check_no_leading_zero(); - eat_numbers(); - }; - - if (is_exp) - eat_exp(); - else - eat_numbers(); - - if (!is_exp && check_it != end - && (*check_it == 'e' || *check_it == 'E')) - { - ++check_it; - eat_exp(); - } - - return parse_float(it, check_it); - } - else - { - return parse_int(it, check_it); - } - } - - std::shared_ptr> parse_int(std::string::iterator& it, - const std::string::iterator& end, - int base = 10, - const char* prefix = "") - { - std::string v{it, end}; - v = prefix + v; - v.erase(std::remove(v.begin(), v.end(), '_'), v.end()); - it = end; - try - { - return make_value(std::stoll(v, nullptr, base)); - } - catch (const std::invalid_argument& ex) - { - throw_parse_exception("Malformed number (invalid argument: " - + std::string{ex.what()} + ")"); - } - catch (const std::out_of_range& ex) - { - throw_parse_exception("Malformed number (out of range: " - + std::string{ex.what()} + ")"); - } - } - - std::shared_ptr> parse_float(std::string::iterator& it, - const std::string::iterator& end) - { - std::string v{it, end}; - v.erase(std::remove(v.begin(), v.end(), '_'), v.end()); - it = end; - char decimal_point = std::localeconv()->decimal_point[0]; - std::replace(v.begin(), v.end(), '.', decimal_point); - try - { - return make_value(std::stod(v)); - } - catch (const std::invalid_argument& ex) - { - throw_parse_exception("Malformed number (invalid argument: " - + std::string{ex.what()} + ")"); - } - catch (const std::out_of_range& ex) - { - throw_parse_exception("Malformed number (out of range: " - + std::string{ex.what()} + ")"); - } - } - - std::shared_ptr> parse_bool(std::string::iterator& it, - const std::string::iterator& end) - { - auto eat = make_consumer(it, end, [this]() { - throw_parse_exception("Attempted to parse invalid boolean value"); - }); - - if (*it == 't') - { - eat("true"); - return make_value(true); - } - else if (*it == 'f') - { - eat("false"); - return make_value(false); - } - - eat.error(); - return nullptr; - } - - std::string::iterator find_end_of_number(std::string::iterator it, - std::string::iterator end) - { - auto ret = std::find_if(it, end, [](char c) { - return !is_number(c) && c != '_' && c != '.' && c != 'e' && c != 'E' - && c != '-' && c != '+' && c != 'x' && c != 'o' && c != 'b'; - }); - if (ret != end && ret + 1 != end && ret + 2 != end) - { - if ((ret[0] == 'i' && ret[1] == 'n' && ret[2] == 'f') - || (ret[0] == 'n' && ret[1] == 'a' && ret[2] == 'n')) - { - ret = ret + 3; - } - } - return ret; - } - - std::string::iterator find_end_of_date(std::string::iterator it, - std::string::iterator end) - { - auto end_of_date = std::find_if(it, end, [](char c) { - return !is_number(c) && c != '-'; - }); - if (end_of_date != end && *end_of_date == ' ' && end_of_date + 1 != end - && is_number(end_of_date[1])) - end_of_date++; - return std::find_if(end_of_date, end, [](char c) { - return !is_number(c) && c != 'T' && c != 'Z' && c != ':' - && c != '-' && c != '+' && c != '.'; - }); - } - - std::string::iterator find_end_of_time(std::string::iterator it, - std::string::iterator end) - { - return std::find_if(it, end, [](char c) { - return !is_number(c) && c != ':' && c != '.'; - }); - } - - local_time read_time(std::string::iterator& it, - const std::string::iterator& end) - { - auto time_end = find_end_of_time(it, end); - - auto eat = make_consumer( - it, time_end, [&]() { throw_parse_exception("Malformed time"); }); - - local_time ltime; - - ltime.hour = eat.eat_digits(2); - eat(':'); - ltime.minute = eat.eat_digits(2); - eat(':'); - ltime.second = eat.eat_digits(2); - - int power = 100000; - if (it != time_end && *it == '.') - { - ++it; - while (it != time_end && is_number(*it)) - { - ltime.microsecond += power * (*it++ - '0'); - power /= 10; - } - } - - if (it != time_end) - throw_parse_exception("Malformed time"); - - return ltime; - } - - std::shared_ptr> - parse_time(std::string::iterator& it, const std::string::iterator& end) - { - return make_value(read_time(it, end)); - } - - std::shared_ptr parse_date(std::string::iterator& it, - const std::string::iterator& end) - { - auto date_end = find_end_of_date(it, end); - - auto eat = make_consumer( - it, date_end, [&]() { throw_parse_exception("Malformed date"); }); - - local_date ldate; - ldate.year = eat.eat_digits(4); - eat('-'); - ldate.month = eat.eat_digits(2); - eat('-'); - ldate.day = eat.eat_digits(2); - - if (it == date_end) - return make_value(ldate); - - eat.eat_or('T', ' '); - - local_datetime ldt; - static_cast(ldt) = ldate; - static_cast(ldt) = read_time(it, date_end); - - if (it == date_end) - return make_value(ldt); - - offset_datetime dt; - static_cast(dt) = ldt; - - int hoff = 0; - int moff = 0; - if (*it == '+' || *it == '-') - { - auto plus = *it == '+'; - ++it; - - hoff = eat.eat_digits(2); - dt.hour_offset = (plus) ? hoff : -hoff; - eat(':'); - moff = eat.eat_digits(2); - dt.minute_offset = (plus) ? moff : -moff; - } - else if (*it == 'Z') - { - ++it; - } - - if (it != date_end) - throw_parse_exception("Malformed date"); - - return make_value(dt); - } - - std::shared_ptr parse_array(std::string::iterator& it, - std::string::iterator& end) - { - // this gets ugly because of the "homogeneity" restriction: - // arrays can either be of only one type, or contain arrays - // (each of those arrays could be of different types, though) - // - // because of the latter portion, we don't really have a choice - // but to represent them as arrays of base values... - ++it; - - // ugh---have to read the first value to determine array type... - skip_whitespace_and_comments(it, end); - - // edge case---empty array - if (*it == ']') - { - ++it; - return make_array(); - } - - auto val_end = std::find_if( - it, end, [](char c) { return c == ',' || c == ']' || c == '#'; }); - parse_type type = determine_value_type(it, val_end); - switch (type) - { - case parse_type::STRING: - return parse_value_array(it, end); - case parse_type::LOCAL_TIME: - return parse_value_array(it, end); - case parse_type::LOCAL_DATE: - return parse_value_array(it, end); - case parse_type::LOCAL_DATETIME: - return parse_value_array(it, end); - case parse_type::OFFSET_DATETIME: - return parse_value_array(it, end); - case parse_type::INT: - return parse_value_array(it, end); - case parse_type::FLOAT: - return parse_value_array(it, end); - case parse_type::BOOL: - return parse_value_array(it, end); - case parse_type::ARRAY: - return parse_object_array(&parser::parse_array, '[', it, - end); - case parse_type::INLINE_TABLE: - return parse_object_array( - &parser::parse_inline_table, '{', it, end); - default: - throw_parse_exception("Unable to parse array"); - } - } - - template - std::shared_ptr parse_value_array(std::string::iterator& it, - std::string::iterator& end) - { - auto arr = make_array(); - while (it != end && *it != ']') - { - auto val = parse_value(it, end); - if (auto v = val->as()) - arr->get().push_back(val); - else - throw_parse_exception("Arrays must be homogeneous"); - skip_whitespace_and_comments(it, end); - if (*it != ',') - break; - ++it; - skip_whitespace_and_comments(it, end); - } - if (it != end) - ++it; - return arr; - } - - template - std::shared_ptr parse_object_array(Function&& fun, char delim, - std::string::iterator& it, - std::string::iterator& end) - { - auto arr = detail::make_element(); - - while (it != end && *it != ']') - { - if (*it != delim) - throw_parse_exception("Unexpected character in array"); - - arr->get().push_back(((*this).*fun)(it, end)); - skip_whitespace_and_comments(it, end); - - if (it == end || *it != ',') - break; - - ++it; - skip_whitespace_and_comments(it, end); - } - - if (it == end || *it != ']') - throw_parse_exception("Unterminated array"); - - ++it; - return arr; - } - - std::shared_ptr
parse_inline_table(std::string::iterator& it, - std::string::iterator& end) - { - auto tbl = make_table(); - do - { - ++it; - if (it == end) - throw_parse_exception("Unterminated inline table"); - - consume_whitespace(it, end); - if (it != end && *it != '}') - { - parse_key_value(it, end, tbl.get()); - consume_whitespace(it, end); - } - } while (*it == ','); - - if (it == end || *it != '}') - throw_parse_exception("Unterminated inline table"); - - ++it; - consume_whitespace(it, end); - - return tbl; - } - - void skip_whitespace_and_comments(std::string::iterator& start, - std::string::iterator& end) - { - consume_whitespace(start, end); - while (start == end || *start == '#') - { - if (!detail::getline(input_, line_)) - throw_parse_exception("Unclosed array"); - line_number_++; - start = line_.begin(); - end = line_.end(); - consume_whitespace(start, end); - } - } - - void consume_whitespace(std::string::iterator& it, - const std::string::iterator& end) - { - while (it != end && (*it == ' ' || *it == '\t')) - ++it; - } - - void consume_backwards_whitespace(std::string::iterator& back, - const std::string::iterator& front) - { - while (back != front && (*back == ' ' || *back == '\t')) - --back; - } - - void eol_or_comment(const std::string::iterator& it, - const std::string::iterator& end) - { - if (it != end && *it != '#') - throw_parse_exception("Unidentified trailing character '" - + std::string{*it} - + "'---did you forget a '#'?"); - } - - bool is_time(const std::string::iterator& it, - const std::string::iterator& end) - { - auto time_end = find_end_of_time(it, end); - auto len = std::distance(it, time_end); - - if (len < 8) - return false; - - if (it[2] != ':' || it[5] != ':') - return false; - - if (len > 8) - return it[8] == '.' && len > 9; - - return true; - } - - option date_type(const std::string::iterator& it, - const std::string::iterator& end) - { - auto date_end = find_end_of_date(it, end); - auto len = std::distance(it, date_end); - - if (len < 10) - return {}; - - if (it[4] != '-' || it[7] != '-') - return {}; - - if (len >= 19 && (it[10] == 'T' || it[10] == ' ') - && is_time(it + 11, date_end)) - { - // datetime type - auto time_end = find_end_of_time(it + 11, date_end); - if (time_end == date_end) - return {parse_type::LOCAL_DATETIME}; - else - return {parse_type::OFFSET_DATETIME}; - } - else if (len == 10) - { - // just a regular date - return {parse_type::LOCAL_DATE}; - } - - return {}; - } - - std::istream& input_; - std::string line_; - std::size_t line_number_ = 0; -}; - -/** - * Utility function to parse a file as a TOML file. Returns the root table. - * Throws a parse_exception if the file cannot be opened. - */ -inline std::shared_ptr
parse_file(const std::string& filename) -{ -#if defined(BOOST_NOWIDE_FSTREAM_INCLUDED_HPP) - boost::nowide::ifstream file{filename.c_str()}; -#elif defined(NOWIDE_FSTREAM_INCLUDED_HPP) - nowide::ifstream file{filename.c_str()}; -#else - std::ifstream file{filename}; -#endif - if (!file.is_open()) - throw parse_exception{filename + " could not be opened for parsing"}; - parser p{file}; - return p.parse(); -} - -template -struct value_accept; - -template <> -struct value_accept<> -{ - template - static void accept(const base&, Visitor&&, Args&&...) - { - // nothing - } -}; - -template -struct value_accept -{ - template - static void accept(const base& b, Visitor&& visitor, Args&&... args) - { - if (auto v = b.as()) - { - visitor.visit(*v, std::forward(args)...); - } - else - { - value_accept::accept(b, std::forward(visitor), - std::forward(args)...); - } - } -}; - -/** - * base implementation of accept() that calls visitor.visit() on the concrete - * class. - */ -template -void base::accept(Visitor&& visitor, Args&&... args) const -{ - if (is_value()) - { - using value_acceptor - = value_accept; - value_acceptor::accept(*this, std::forward(visitor), - std::forward(args)...); - } - else if (is_table()) - { - visitor.visit(static_cast(*this), - std::forward(args)...); - } - else if (is_array()) - { - visitor.visit(static_cast(*this), - std::forward(args)...); - } - else if (is_table_array()) - { - visitor.visit(static_cast(*this), - std::forward(args)...); - } -} - -/** - * Writer that can be passed to accept() functions of cpptoml objects and - * will output valid TOML to a stream. - */ -class toml_writer -{ - public: - /** - * Construct a toml_writer that will write to the given stream - */ - toml_writer(std::ostream& s, const std::string& indent_space = "\t") - : stream_(s), indent_(indent_space), has_naked_endline_(false) - { - // nothing - } - - public: - /** - * Output a base value of the TOML tree. - */ - template - void visit(const value& v, bool = false) - { - write(v); - } - - /** - * Output a table element of the TOML tree - */ - void visit(const table& t, bool in_array = false) - { - write_table_header(in_array); - std::vector values; - std::vector tables; - - for (const auto& i : t) - { - if (i.second->is_table() || i.second->is_table_array()) - { - tables.push_back(i.first); - } - else - { - values.push_back(i.first); - } - } - - for (unsigned int i = 0; i < values.size(); ++i) - { - path_.push_back(values[i]); - - if (i > 0) - endline(); - - write_table_item_header(*t.get(values[i])); - t.get(values[i])->accept(*this, false); - path_.pop_back(); - } - - for (unsigned int i = 0; i < tables.size(); ++i) - { - path_.push_back(tables[i]); - - if (values.size() > 0 || i > 0) - endline(); - - write_table_item_header(*t.get(tables[i])); - t.get(tables[i])->accept(*this, false); - path_.pop_back(); - } - - endline(); - } - - /** - * Output an array element of the TOML tree - */ - void visit(const array& a, bool = false) - { - write("["); - - for (unsigned int i = 0; i < a.get().size(); ++i) - { - if (i > 0) - write(", "); - - if (a.get()[i]->is_array()) - { - a.get()[i]->as_array()->accept(*this, true); - } - else - { - a.get()[i]->accept(*this, true); - } - } - - write("]"); - } - - /** - * Output a table_array element of the TOML tree - */ - void visit(const table_array& t, bool = false) - { - for (unsigned int j = 0; j < t.get().size(); ++j) - { - if (j > 0) - endline(); - - t.get()[j]->accept(*this, true); - } - - endline(); - } - - /** - * Escape a string for output. - */ - static std::string escape_string(const std::string& str) - { - std::string res; - for (auto it = str.begin(); it != str.end(); ++it) - { - if (*it == '\b') - { - res += "\\b"; - } - else if (*it == '\t') - { - res += "\\t"; - } - else if (*it == '\n') - { - res += "\\n"; - } - else if (*it == '\f') - { - res += "\\f"; - } - else if (*it == '\r') - { - res += "\\r"; - } - else if (*it == '"') - { - res += "\\\""; - } - else if (*it == '\\') - { - res += "\\\\"; - } - else if (static_cast(*it) <= UINT32_C(0x001f)) - { - res += "\\u"; - std::stringstream ss; - ss << std::hex << static_cast(*it); - res += ss.str(); - } - else - { - res += *it; - } - } - return res; - } - - protected: - /** - * Write out a string. - */ - void write(const value& v) - { - write("\""); - write(escape_string(v.get())); - write("\""); - } - - /** - * Write out a double. - */ - void write(const value& v) - { - std::stringstream ss; - ss << std::showpoint - << std::setprecision(std::numeric_limits::max_digits10) - << v.get(); - - auto double_str = ss.str(); - auto pos = double_str.find("e0"); - if (pos != std::string::npos) - double_str.replace(pos, 2, "e"); - pos = double_str.find("e-0"); - if (pos != std::string::npos) - double_str.replace(pos, 3, "e-"); - - stream_ << double_str; - has_naked_endline_ = false; - } - - /** - * Write out an integer, local_date, local_time, local_datetime, or - * offset_datetime. - */ - template - typename std::enable_if< - is_one_of::value>::type - write(const value& v) - { - write(v.get()); - } - - /** - * Write out a boolean. - */ - void write(const value& v) - { - write((v.get() ? "true" : "false")); - } - - /** - * Write out the header of a table. - */ - void write_table_header(bool in_array = false) - { - if (!path_.empty()) - { - indent(); - - write("["); - - if (in_array) - { - write("["); - } - - for (unsigned int i = 0; i < path_.size(); ++i) - { - if (i > 0) - { - write("."); - } - - if (path_[i].find_first_not_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" - "fghijklmnopqrstuvwxyz0123456789" - "_-") - == std::string::npos) - { - write(path_[i]); - } - else - { - write("\""); - write(escape_string(path_[i])); - write("\""); - } - } - - if (in_array) - { - write("]"); - } - - write("]"); - endline(); - } - } - - /** - * Write out the identifier for an item in a table. - */ - void write_table_item_header(const base& b) - { - if (!b.is_table() && !b.is_table_array()) - { - indent(); - - if (path_.back().find_first_not_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" - "fghijklmnopqrstuvwxyz0123456789" - "_-") - == std::string::npos) - { - write(path_.back()); - } - else - { - write("\""); - write(escape_string(path_.back())); - write("\""); - } - - write(" = "); - } - } - - private: - /** - * Indent the proper number of tabs given the size of - * the path. - */ - void indent() - { - for (std::size_t i = 1; i < path_.size(); ++i) - write(indent_); - } - - /** - * Write a value out to the stream. - */ - template - void write(const T& v) - { - stream_ << v; - has_naked_endline_ = false; - } - - /** - * Write an endline out to the stream - */ - void endline() - { - if (!has_naked_endline_) - { - stream_ << "\n"; - has_naked_endline_ = true; - } - } - - private: - std::ostream& stream_; - const std::string indent_; - std::vector path_; - bool has_naked_endline_; -}; - -inline std::ostream& operator<<(std::ostream& stream, const base& b) -{ - toml_writer writer{stream}; - b.accept(writer); - return stream; -} - -template -std::ostream& operator<<(std::ostream& stream, const value& v) -{ - toml_writer writer{stream}; - v.accept(writer); - return stream; -} - -inline std::ostream& operator<<(std::ostream& stream, const table& t) -{ - toml_writer writer{stream}; - t.accept(writer); - return stream; -} - -inline std::ostream& operator<<(std::ostream& stream, const table_array& t) -{ - toml_writer writer{stream}; - t.accept(writer); - return stream; -} - -inline std::ostream& operator<<(std::ostream& stream, const array& a) -{ - toml_writer writer{stream}; - a.accept(writer); - return stream; -} -} // namespace cpptoml -#endif // CPPTOML_H diff --git a/third_party/nix/src/libexpr/CMakeLists.txt b/third_party/nix/src/libexpr/CMakeLists.txt deleted file mode 100644 index 8cb7143d2c..0000000000 --- a/third_party/nix/src/libexpr/CMakeLists.txt +++ /dev/null @@ -1,85 +0,0 @@ -# -*- mode: cmake; -*- -add_library(nixexpr SHARED) -set_property(TARGET nixexpr PROPERTY CXX_STANDARD 17) -include_directories(${PROJECT_BINARY_DIR}) # for 'generated/' -target_include_directories(nixexpr PUBLIC "${nix_SOURCE_DIR}/src") - -# Generate lexer & parser for inclusion: -find_package(BISON) -find_package(FLEX) - -BISON_TARGET(NixParser parser.y - ${PROJECT_BINARY_DIR}/generated/parser-tab.cc - DEFINES_FILE ${PROJECT_BINARY_DIR}/generated/parser-tab.hh) - -FLEX_TARGET(NixLexer lexer.l - ${PROJECT_BINARY_DIR}/generated/lexer-tab.cc - DEFINES_FILE ${PROJECT_BINARY_DIR}/generated/lexer-tab.hh) - -ADD_FLEX_BISON_DEPENDENCY(NixLexer NixParser) - -set(HEADER_FILES - attr-path.hh - attr-set.hh - common-eval-args.hh - eval.hh - eval-inline.hh - function-trace.hh - get-drvs.hh - json-to-value.hh - names.hh - nixexpr.hh - parser.hh - primops.hh - symbol-table.hh - value.hh - value-to-json.hh - value-to-xml.hh -) - -target_sources(nixexpr - PUBLIC - ${HEADER_FILES} - - PRIVATE - ${PROJECT_BINARY_DIR}/generated/parser-tab.hh - ${PROJECT_BINARY_DIR}/generated/parser-tab.cc - ${PROJECT_BINARY_DIR}/generated/lexer-tab.hh - ${PROJECT_BINARY_DIR}/generated/lexer-tab.cc - primops/context.cc - primops/fetchGit.cc - primops/fetchMercurial.cc - primops/fromTOML.cc - attr-path.cc - attr-set.cc - common-eval-args.cc - eval.cc - function-trace.cc - get-drvs.cc - json-to-value.cc - names.cc - nixexpr.cc - parser.cc - primops.cc - symbol-table.cc - value.cc - value-to-json.cc - value-to-xml.cc -) - -target_link_libraries(nixexpr - nixmain - nixstore - nixutil - - absl::btree - absl::flat_hash_set - absl::node_hash_set - absl::strings -) - -configure_file("nix-expr.pc.in" "${PROJECT_BINARY_DIR}/nix-expr.pc" @ONLY) -INSTALL(FILES "${PROJECT_BINARY_DIR}/nix-expr.pc" DESTINATION "${PKGCONFIG_INSTALL_DIR}") - -INSTALL(FILES ${HEADER_FILES} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/nix/libexpr) -INSTALL(TARGETS nixexpr DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/third_party/nix/src/libexpr/attr-path.cc b/third_party/nix/src/libexpr/attr-path.cc deleted file mode 100644 index 86ebeec2fb..0000000000 --- a/third_party/nix/src/libexpr/attr-path.cc +++ /dev/null @@ -1,109 +0,0 @@ -#include "libexpr/attr-path.hh" - -#include - -#include "libexpr/eval-inline.hh" -#include "libutil/util.hh" - -namespace nix { - -static Strings parseAttrPath(const std::string& s) { - Strings res; - std::string cur; - std::string::const_iterator i = s.begin(); - while (i != s.end()) { - if (*i == '.') { - res.push_back(cur); - cur.clear(); - } else if (*i == '"') { - ++i; - while (true) { - if (i == s.end()) { - throw Error(format("missing closing quote in selection path '%1%'") % - s); - } - if (*i == '"') { - break; - } - cur.push_back(*i++); - } - } else { - cur.push_back(*i); - } - ++i; - } - if (!cur.empty()) { - res.push_back(cur); - } - return res; -} - -Value* findAlongAttrPath(EvalState& state, const std::string& attrPath, - Bindings* autoArgs, Value& vIn) { - Strings tokens = parseAttrPath(attrPath); - - Error attrError = - Error(format("attribute selection path '%1%' does not match expression") % - attrPath); - - Value* v = &vIn; - - for (auto& attr : tokens) { - /* Is i an index (integer) or a normal attribute name? */ - enum { apAttr, apIndex } apType = apAttr; - unsigned int attrIndex; - if (absl::SimpleAtoi(attr, &attrIndex)) { - apType = apIndex; - } - - /* Evaluate the expression. */ - Value* vNew = state.allocValue(); - state.autoCallFunction(autoArgs, *v, *vNew); - v = vNew; - state.forceValue(*v); - - /* It should evaluate to either a set or an expression, - according to what is specified in the attrPath. */ - - if (apType == apAttr) { - if (v->type != tAttrs) { - throw TypeError(format("the expression selected by the selection path " - "'%1%' should be a set but is %2%") % - attrPath % showType(*v)); - } - - if (attr.empty()) { - throw Error(format("empty attribute name in selection path '%1%'") % - attrPath); - } - - Bindings::iterator a = v->attrs->find(state.symbols.Create(attr)); - if (a == v->attrs->end()) { - throw Error( - format("attribute '%1%' in selection path '%2%' not found") % attr % - attrPath); - } - v = &*(a->second).value; - } - - else if (apType == apIndex) { - if (!v->isList()) { - throw TypeError(format("the expression selected by the selection path " - "'%1%' should be a list but is %2%") % - attrPath % showType(*v)); - } - - if (attrIndex >= v->listSize()) { - throw Error( - format("list index %1% in selection path '%2%' is out of range") % - attrIndex % attrPath); - } - - v = (*v->list)[attrIndex]; - } - } - - return v; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/attr-path.hh b/third_party/nix/src/libexpr/attr-path.hh deleted file mode 100644 index 97170be840..0000000000 --- a/third_party/nix/src/libexpr/attr-path.hh +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -#include -#include - -#include "libexpr/eval.hh" - -namespace nix { - -Value* findAlongAttrPath(EvalState& state, const std::string& attrPath, - Bindings* autoArgs, Value& vIn); - -} diff --git a/third_party/nix/src/libexpr/attr-set.cc b/third_party/nix/src/libexpr/attr-set.cc deleted file mode 100644 index b1617c981f..0000000000 --- a/third_party/nix/src/libexpr/attr-set.cc +++ /dev/null @@ -1,111 +0,0 @@ -#include "libexpr/attr-set.hh" - -#include - -#include -#include - -#include "libexpr/eval-inline.hh" - -namespace nix { - -// This function inherits its name from previous implementations, in -// which Bindings was backed by an array of elements which was scanned -// linearly. -// -// In that setup, inserting duplicate elements would always yield the -// first element (until the next sort, which wasn't stable, after -// which things are more or less undefined). -// -// This behaviour is mimicked by using .insert(), which will *not* -// override existing values. -void Bindings::push_back(const Attr& attr) { - auto [_, inserted] = attributes_.insert({attr.name, attr}); - - if (!inserted) { - DLOG(WARNING) << "attempted to insert duplicate attribute for key '" - << attr.name << "'"; - } -} - -size_t Bindings::size() const { return attributes_.size(); } - -bool Bindings::empty() { return attributes_.empty(); } - -Bindings::iterator Bindings::find(const Symbol& name) { - return attributes_.find(name); -} - -bool Bindings::Equal(const Bindings* other, EvalState& state) const { - if (this == other) { - return true; - } - - if (this->attributes_.size() != other->attributes_.size()) { - return false; - } - - Bindings::const_iterator i; - Bindings::const_iterator j; - for (i = this->cbegin(), j = other->cbegin(); i != this->cend(); ++i, ++j) { - if (i->second.name != j->second.name || - !state.eqValues(*i->second.value, *j->second.value)) { - return false; - } - } - - return true; -} - -Bindings::iterator Bindings::begin() { return attributes_.begin(); } -Bindings::iterator Bindings::end() { return attributes_.end(); } - -Bindings::const_iterator Bindings::cbegin() const { - return attributes_.cbegin(); -} - -Bindings::const_iterator Bindings::cend() const { return attributes_.cend(); } - -std::unique_ptr Bindings::New(size_t capacity) { - if (capacity == 0) { - // TODO(tazjin): A lot of 0-capacity Bindings are allocated. - // It would be nice to optimize that. - } - - return std::make_unique(); -} - -std::unique_ptr Bindings::Merge(const Bindings& lhs, - const Bindings& rhs) { - auto bindings = New(lhs.size() + rhs.size()); - - // Values are merged by inserting the entire iterator range of both - // input sets. The right-hand set (the values of which take - // precedence) is inserted *first* because the range insertion - // method does not override values. - bindings->attributes_.insert(rhs.attributes_.cbegin(), - rhs.attributes_.cend()); - bindings->attributes_.insert(lhs.attributes_.cbegin(), - lhs.attributes_.cend()); - - return bindings; -} - -void EvalState::mkAttrs(Value& v, size_t capacity) { - clearValue(v); - v.type = tAttrs; - v.attrs = Bindings::New(capacity); - nrAttrsets++; - nrAttrsInAttrsets += capacity; -} - -/* Create a new attribute named 'name' on an existing attribute set stored - in 'vAttrs' and return the newly allocated Value which is associated with - this attribute. */ -Value* EvalState::allocAttr(Value& vAttrs, const Symbol& name) { - Value* v = allocValue(); - vAttrs.attrs->push_back(Attr(name, v)); - return v; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/attr-set.hh b/third_party/nix/src/libexpr/attr-set.hh deleted file mode 100644 index 5d77e0907c..0000000000 --- a/third_party/nix/src/libexpr/attr-set.hh +++ /dev/null @@ -1,69 +0,0 @@ -// This file implements the underlying structure of Nix attribute sets. -#pragma once - -#include - -#include "libexpr/nixexpr.hh" -#include "libexpr/symbol-table.hh" -#include "libutil/types.hh" - -namespace nix { // TODO(tazjin): ::expr - -class EvalState; -struct Value; - -/* Map one attribute name to its value. */ -struct Attr { - Symbol name; - Value* value; // TODO(tazjin): Who owns this? - Pos* pos; // TODO(tazjin): Who owns this? - Attr(Symbol name, Value* value, Pos* pos = &noPos) - : name(name), value(value), pos(pos){}; -}; - -using AttributeMap = absl::btree_map; - -class Bindings { - public: - using iterator = AttributeMap::iterator; - using const_iterator = AttributeMap::const_iterator; - - // Allocate a new attribute set that is visible to the garbage - // collector. - static std::unique_ptr New(size_t capacity = 0); - - // Create a new attribute set by merging two others. This is used to - // implement the `//` operator in Nix. - static std::unique_ptr Merge(const Bindings& lhs, - const Bindings& rhs); - - // Return the number of contained elements. - size_t size() const; - - // Is this attribute set empty? - bool empty(); - - // Insert, but do not replace, values in the attribute set. - void push_back(const Attr& attr); - - // Are these two attribute sets deeply equal? - // Note: Does not special-case derivations. Use state.eqValues() to check - // attrsets that may be derivations. - bool Equal(const Bindings* other, EvalState& state) const; - - // Look up a specific element of the attribute set. - iterator find(const Symbol& name); - - iterator begin(); - const_iterator cbegin() const; - iterator end(); - const_iterator cend() const; - - // oh no - friend class EvalState; - - private: - AttributeMap attributes_; -}; - -} // namespace nix diff --git a/third_party/nix/src/libexpr/common-eval-args.cc b/third_party/nix/src/libexpr/common-eval-args.cc deleted file mode 100644 index f63d3f8276..0000000000 --- a/third_party/nix/src/libexpr/common-eval-args.cc +++ /dev/null @@ -1,72 +0,0 @@ -#include "libexpr/common-eval-args.hh" - -#include "libexpr/eval.hh" -#include "libmain/shared.hh" -#include "libstore/download.hh" -#include "libutil/util.hh" - -namespace nix { - -MixEvalArgs::MixEvalArgs() { - mkFlag() - .longName("arg") - .description("argument to be passed to Nix functions") - .labels({"name", "expr"}) - .handler([&](std::vector ss) { - auto_args_[ss[0]] = std::make_pair(kArgTypeExpr, ss[1]); - }); - - mkFlag() - .longName("argstr") - .description("string-valued argument to be passed to Nix functions") - .labels({"name", "string"}) - .handler([&](std::vector ss) { - auto_args_[ss[0]] = std::make_pair(kArgTypeString, ss[1]); - }); - - mkFlag() - .shortName('I') - .longName("include") - .description( - "add a path to the list of locations used to look up <...> file " - "names") - .label("path") - .handler([&](const std::string& s) { searchPath.push_back(s); }); -} - -std::unique_ptr MixEvalArgs::getAutoArgs(EvalState& state) { - auto res = Bindings::New(auto_args_.size()); - for (auto& [arg, arg_value] : auto_args_) { - Value* v = state.allocValue(); - switch (arg_value.first) { - case kArgTypeExpr: { - state.mkThunk_( - *v, state.parseExprFromString(arg_value.second, absPath("."))); - break; - } - case kArgTypeString: { - mkString(*v, arg_value.second); - break; - } - } - - res->push_back(Attr(state.symbols.Create(arg), v)); - } - return res; -} - -Path lookupFileArg(EvalState& state, std::string s) { - if (isUri(s)) { - CachedDownloadRequest request(s); - request.unpack = true; - return getDownloader()->downloadCached(state.store, request).path; - } - if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { - Path p = s.substr(1, s.size() - 2); - return state.findFile(p); - } else { - return absPath(s); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/common-eval-args.hh b/third_party/nix/src/libexpr/common-eval-args.hh deleted file mode 100644 index 5e0e8af79c..0000000000 --- a/third_party/nix/src/libexpr/common-eval-args.hh +++ /dev/null @@ -1,26 +0,0 @@ -#pragma once - -#include "libutil/args.hh" - -namespace nix { - -class Store; -class EvalState; -class Bindings; - -enum ArgType { kArgTypeString, kArgTypeExpr }; - -struct MixEvalArgs : virtual Args { - MixEvalArgs(); - - std::unique_ptr getAutoArgs(EvalState& state); - - Strings searchPath; - - private: - std::map> auto_args_; -}; - -Path lookupFileArg(EvalState& state, std::string s); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/eval-inline.hh b/third_party/nix/src/libexpr/eval-inline.hh deleted file mode 100644 index 5162ab3971..0000000000 --- a/third_party/nix/src/libexpr/eval-inline.hh +++ /dev/null @@ -1,90 +0,0 @@ -#pragma once - -#include "libexpr/eval.hh" - -#define LocalNoInline(f) \ - static f __attribute__((noinline)); \ - f -#define LocalNoInlineNoReturn(f) \ - static f __attribute__((noinline, noreturn)); \ - f - -namespace nix { - -LocalNoInlineNoReturn(void throwEvalError(const char* s, const Pos& pos)) { - throw EvalError(format(s) % pos); -} - -LocalNoInlineNoReturn(void throwTypeError(const char* s, const Value& v)) { - throw TypeError(format(s) % showType(v)); -} - -LocalNoInlineNoReturn(void throwTypeError(const char* s, const Value& v, - const Pos& pos)) { - throw TypeError(format(s) % showType(v) % pos); -} - -void EvalState::forceValue(Value& v, const Pos& pos) { - if (v.type == tThunk) { - Env* env = v.thunk.env; - Expr* expr = v.thunk.expr; - try { - v.type = tBlackhole; - // checkInterrupt(); - expr->eval(*this, *env, v); - } catch (...) { - v.type = tThunk; - v.thunk.env = env; - v.thunk.expr = expr; - throw; - } - } else if (v.type == tApp) { - callFunction(*v.app.left, *v.app.right, v, noPos); - } else if (v.type == tBlackhole) { - throwEvalError("infinite recursion encountered, at %1%", pos); - } -} - -inline void EvalState::forceAttrs(Value& v) { - forceValue(v); - if (v.type != tAttrs) { - throwTypeError("value is %1% while a set was expected", v); - } -} - -inline void EvalState::forceAttrs(Value& v, const Pos& pos) { - forceValue(v); - if (v.type != tAttrs) { - throwTypeError("value is %1% while a set was expected, at %2%", v, pos); - } -} - -inline void EvalState::forceList(Value& v) { - forceValue(v); - if (!v.isList()) { - throwTypeError("value is %1% while a list was expected", v); - } -} - -inline void EvalState::forceList(Value& v, const Pos& pos) { - forceValue(v); - if (!v.isList()) { - throwTypeError("value is %1% while a list was expected, at %2%", v, pos); - } -} - -/* Note: Various places expect the allocated memory to be zeroed. */ -inline void* allocBytes(size_t n) { - void* p; -#if HAVE_BOEHMGC - p = GC_MALLOC(n); -#else - p = calloc(n, 1); -#endif - if (!p) { - throw std::bad_alloc(); - } - return p; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/eval.cc b/third_party/nix/src/libexpr/eval.cc deleted file mode 100644 index 682ea64832..0000000000 --- a/third_party/nix/src/libexpr/eval.cc +++ /dev/null @@ -1,1878 +0,0 @@ -#include "libexpr/eval.hh" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "libexpr/eval-inline.hh" -#include "libexpr/function-trace.hh" -#include "libexpr/value.hh" -#include "libstore/derivations.hh" -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/hash.hh" -#include "libutil/json.hh" -#include "libutil/util.hh" -#include "libutil/visitor.hh" - -namespace nix { -namespace { - -void ConfigureGc() { /* This function intentionally left blank. */ -} - -} // namespace - -namespace expr { - -absl::once_flag gc_flag; - -void InitGC() { absl::call_once(gc_flag, &ConfigureGc); } - -} // namespace expr - -static char* dupString(const char* s) { - char* t; - t = strdup(s); - if (t == nullptr) { - throw std::bad_alloc(); - } - return t; -} - -std::shared_ptr allocRootValue(Value* v) { - return std::make_shared(v); -} - -static void printValue(std::ostream& str, std::set& active, - const Value& v) { - checkInterrupt(); - - if (active.find(&v) != active.end()) { - str << ""; - return; - } - active.insert(&v); - - switch (v.type) { - case tInt: - str << v.integer; - break; - case tBool: - str << (v.boolean ? "true" : "false"); - break; - case tString: - str << "\""; - for (const char* i = v.string.s; *i != 0; i++) { - if (*i == '\"' || *i == '\\') { - str << "\\" << *i; - } else if (*i == '\n') { - str << "\\n"; - } else if (*i == '\r') { - str << "\\r"; - } else if (*i == '\t') { - str << "\\t"; - } else { - str << *i; - } - } - str << "\""; - break; - case tPath: - str << v.path; // !!! escaping? - break; - case tNull: - str << "null"; - break; - case tAttrs: { - str << "{ "; - for (const auto& [key, value] : *v.attrs) { - str << key << " = "; - printValue(str, active, *value.value); - str << "; "; - } - str << "}"; - break; - } - case tList: - str << "[ "; - for (unsigned int n = 0; n < v.listSize(); ++n) { - printValue(str, active, *(*v.list)[n]); - str << " "; - } - str << "]"; - break; - case tThunk: - case tApp: - str << ""; - break; - case tLambda: - str << ""; - break; - case tPrimOp: - str << ""; - break; - case tPrimOpApp: - str << ""; - break; - case tFloat: - str << v.fpoint; - break; - default: - throw Error( - absl::StrCat("invalid value of type ", static_cast(v.type))); - } - - active.erase(&v); -} - -std::ostream& operator<<(std::ostream& str, const Value& v) { - std::set active; - printValue(str, active, v); - return str; -} - -const Value* getPrimOp(const Value& v) { - const Value* primOp = &v; - while (primOp->type == tPrimOpApp) { - primOp = primOp->primOpApp.left; - } - assert(primOp->type == tPrimOp); - return primOp; -} - -std::string showType(const Value& v) { - switch (v.type) { - case tInt: - return "an integer"; - case tBool: - return "a boolean"; - case tString: - return v.string.context != nullptr ? "a string with context" : "a string"; - case tPath: - return "a path"; - case tNull: - return "null"; - case tAttrs: - return "a set"; - case tList: - return "a list"; - case tThunk: - return "a thunk"; - case tApp: - return "a function application"; - case tLambda: - return "a function"; - case tBlackhole: - return "a black hole"; - case tPrimOp: - return fmt("the built-in function '%s'", std::string(v.primOp->name)); - case tPrimOpApp: - return fmt("the partially applied built-in function '%s'", - std::string(getPrimOp(v)->primOp->name)); - case _reserved1: - LOG(FATAL) << "attempted to show the type string of the deprecated " - "tExternal value"; - break; - case tFloat: - return "a float"; - } - LOG(FATAL) - << "attempted to determine the type string of an unknown type number (" - << static_cast(v.type) << ")"; - abort(); -} - -static Symbol getName(const AttrName& name, EvalState& state, Env& env) { - return std::visit( - util::overloaded{[&](const Symbol& name) -> Symbol { return name; }, - [&](Expr* expr) -> Symbol { - Value nameValue; - expr->eval(state, env, nameValue); - state.forceStringNoCtx(nameValue); - return state.symbols.Create(nameValue.string.s); - }}, - name); -} - -/* Very hacky way to parse $NIX_PATH, which is colon-separated, but - can contain URLs (e.g. "nixpkgs=https://bla...:foo=https://"). */ -static Strings parseNixPath(const std::string& s) { - Strings res; - - auto p = s.begin(); - - while (p != s.end()) { - auto start = p; - auto start2 = p; - - while (p != s.end() && *p != ':') { - if (*p == '=') { - start2 = p + 1; - } - ++p; - } - - if (p == s.end()) { - if (p != start) { - res.push_back(std::string(start, p)); - } - break; - } - - if (*p == ':') { - if (isUri(std::string(start2, s.end()))) { - ++p; - while (p != s.end() && *p != ':') { - ++p; - } - } - res.push_back(std::string(start, p)); - if (p == s.end()) { - break; - } - } - - ++p; - } - - return res; -} - -EvalState::EvalState(const Strings& _searchPath, const ref& store) - : sWith(symbols.Create("")), - sOutPath(symbols.Create("outPath")), - sDrvPath(symbols.Create("drvPath")), - sType(symbols.Create("type")), - sMeta(symbols.Create("meta")), - sName(symbols.Create("name")), - sValue(symbols.Create("value")), - sSystem(symbols.Create("system")), - sOutputs(symbols.Create("outputs")), - sOutputName(symbols.Create("outputName")), - sIgnoreNulls(symbols.Create("__ignoreNulls")), - sFile(symbols.Create("file")), - sLine(symbols.Create("line")), - sColumn(symbols.Create("column")), - sFunctor(symbols.Create("__functor")), - sToString(symbols.Create("__toString")), - sRight(symbols.Create("right")), - sWrong(symbols.Create("wrong")), - sStructuredAttrs(symbols.Create("__structuredAttrs")), - sBuilder(symbols.Create("builder")), - sArgs(symbols.Create("args")), - sOutputHash(symbols.Create("outputHash")), - sOutputHashAlgo(symbols.Create("outputHashAlgo")), - sOutputHashMode(symbols.Create("outputHashMode")), - sDerivationNix(std::nullopt), - repair(NoRepair), - store(store), - baseEnv(allocEnv(128)), - staticBaseEnv(false, nullptr) { - expr::InitGC(); - - countCalls = getEnv("NIX_COUNT_CALLS").value_or("0") != "0"; - - /* Initialise the Nix expression search path. */ - if (!evalSettings.pureEval) { - Strings paths = parseNixPath(getEnv("NIX_PATH").value_or("")); - for (auto& i : _searchPath) { - addToSearchPath(i); - } - for (auto& i : paths) { - addToSearchPath(i); - } - } - addToSearchPath("nix=" + - canonPath(settings.nixDataDir + "/nix/corepkgs", true)); - - if (evalSettings.restrictEval || evalSettings.pureEval) { - allowedPaths = PathSet(); - - for (auto& i : searchPath) { - auto r = resolveSearchPathElem(i); - if (!r.first) { - continue; - } - - auto path = r.second; - - if (store->isInStore(r.second)) { - PathSet closure; - store->computeFSClosure(store->toStorePath(r.second), closure); - for (auto& path : closure) { - allowedPaths->insert(path); - } - } else { - allowedPaths->insert(r.second); - } - } - } - - createBaseEnv(); -} - -EvalState::~EvalState() = default; - -Path EvalState::checkSourcePath(const Path& path_) { - TraceFileAccess(path_); - if (!allowedPaths) { - return path_; - } - - auto i = resolvedPaths.find(path_); - if (i != resolvedPaths.end()) { - return i->second; - } - - bool found = false; - - /* First canonicalize the path without symlinks, so we make sure an - * attacker can't append ../../... to a path that would be in allowedPaths - * and thus leak symlink targets. - */ - Path abspath = canonPath(path_); - - for (auto& i : *allowedPaths) { - if (isDirOrInDir(abspath, i)) { - found = true; - break; - } - } - - if (!found) { - throw RestrictedPathError( - "access to path '%1%' is forbidden in restricted mode", abspath); - } - - /* Resolve symlinks. */ - DLOG(INFO) << "checking access to '" << abspath << "'"; - Path path = canonPath(abspath, true); - - for (auto& i : *allowedPaths) { - if (isDirOrInDir(path, i)) { - resolvedPaths[path_] = path; - return path; - } - } - - throw RestrictedPathError( - "access to path '%1%' is forbidden in restricted mode", path); -} - -void EvalState::checkURI(const std::string& uri) { - if (!evalSettings.restrictEval) { - return; - } - - /* 'uri' should be equal to a prefix, or in a subdirectory of a - prefix. Thus, the prefix https://github.co does not permit - access to https://github.com. Note: this allows 'http://' and - 'https://' as prefixes for any http/https URI. */ - for (auto& prefix : evalSettings.allowedUris.get()) { - if (uri == prefix || - (uri.size() > prefix.size() && !prefix.empty() && - absl::StartsWith(uri, prefix) && - (prefix[prefix.size() - 1] == '/' || uri[prefix.size()] == '/'))) { - return; - } - } - - /* If the URI is a path, then check it against allowedPaths as - well. */ - if (absl::StartsWith(uri, "/")) { - checkSourcePath(uri); - return; - } - - if (absl::StartsWith(uri, "file://")) { - checkSourcePath(std::string(uri, 7)); - return; - } - - throw RestrictedPathError( - "access to URI '%s' is forbidden in restricted mode", uri); -} - -Path EvalState::toRealPath(const Path& path, const PathSet& context) { - // FIXME: check whether 'path' is in 'context'. - return !context.empty() && store->isInStore(path) ? store->toRealPath(path) - : path; -}; - -Value* EvalState::addConstant(const std::string& name, Value& v) { - Value* v2 = allocValue(); - *v2 = v; - staticBaseEnv.vars[symbols.Create(name)] = baseEnvDispl; - baseEnv.values[baseEnvDispl++] = v2; - std::string name2 = - std::string(name, 0, 2) == "__" ? std::string(name, 2) : name; - baseEnv.values[0]->attrs->push_back(Attr(symbols.Create(name2), v2)); - return v2; -} - -Value* EvalState::addPrimOp(const std::string& name, size_t arity, - PrimOpFun primOp) { - if (arity == 0) { - Value v; - primOp(*this, noPos, nullptr, v); - return addConstant(name, v); - } - std::string name2 = - std::string(name, 0, 2) == "__" ? std::string(name, 2) : name; - Symbol sym = symbols.Create(name2); - Value* v = allocValue(); - v->type = tPrimOp; - v->primOp = std::make_shared(primOp, arity, sym); - staticBaseEnv.vars[symbols.Create(name)] = baseEnvDispl; - baseEnv.values[baseEnvDispl++] = v; - baseEnv.values[0]->attrs->push_back(Attr(sym, v)); - return v; -} - -Value& EvalState::getBuiltin(const std::string& name) { - return *baseEnv.values[0]->attrs->find(symbols.Create(name))->second.value; -} - -/* Every "format" object (even temporary) takes up a few hundred bytes - of stack space, which is a real killer in the recursive - evaluator. So here are some helper functions for throwing - exceptions. */ - -LocalNoInlineNoReturn(void throwEvalError(const char* s, - const std::string& s2)) { - throw EvalError(format(s) % s2); -} - -LocalNoInlineNoReturn(void throwEvalError(const char* s, const std::string& s2, - const Pos& pos)) { - throw EvalError(format(s) % s2 % pos); -} - -LocalNoInlineNoReturn(void throwEvalError(const char* s, const std::string& s2, - const std::string& s3)) { - throw EvalError(format(s) % s2 % s3); -} - -LocalNoInlineNoReturn(void throwEvalError(const char* s, const std::string& s2, - const std::string& s3, - const Pos& pos)) { - throw EvalError(format(s) % s2 % s3 % pos); -} - -LocalNoInlineNoReturn(void throwEvalError(const char* s, const Symbol& sym, - const Pos& p1, const Pos& p2)) { - throw EvalError(format(s) % sym % p1 % p2); -} - -LocalNoInlineNoReturn(void throwTypeError(const char* s, const Pos& pos)) { - throw TypeError(format(s) % pos); -} - -LocalNoInlineNoReturn(void throwTypeError(const char* s, - const std::string& s1)) { - throw TypeError(format(s) % s1); -} - -LocalNoInlineNoReturn(void throwTypeError(const char* s, const ExprLambda& fun, - const Symbol& s2, const Pos& pos)) { - throw TypeError(format(s) % fun.showNamePos() % s2 % pos); -} - -LocalNoInlineNoReturn(void throwAssertionError(const char* s, - const std::string& s1, - const Pos& pos)) { - throw AssertionError(format(s) % s1 % pos); -} - -LocalNoInlineNoReturn(void throwUndefinedVarError(const char* s, - const std::string& s1, - const Pos& pos)) { - throw UndefinedVarError(format(s) % s1 % pos); -} - -LocalNoInline(void addErrorPrefix(Error& e, const char* s, - const std::string& s2)) { - e.addPrefix(format(s) % s2); -} - -LocalNoInline(void addErrorPrefix(Error& e, const char* s, - const ExprLambda& fun, const Pos& pos)) { - e.addPrefix(format(s) % fun.showNamePos() % pos); -} - -LocalNoInline(void addErrorPrefix(Error& e, const char* s, - const std::string& s2, const Pos& pos)) { - e.addPrefix(format(s) % s2 % pos); -} - -void mkString(Value& v, const char* s) { mkStringNoCopy(v, dupString(s)); } - -Value& mkString(Value& v, const std::string& s, const PathSet& context) { - mkString(v, s.c_str()); - if (!context.empty()) { - size_t n = 0; - v.string.context = static_cast( - allocBytes((context.size() + 1) * sizeof(char*))); - for (auto& i : context) { - v.string.context[n++] = dupString(i.c_str()); - } - v.string.context[n] = nullptr; - } - return v; -} - -void mkPath(Value& v, const char* s) { mkPathNoCopy(v, dupString(s)); } - -inline Value* EvalState::lookupVar(Env* env, const ExprVar& var, bool noEval) { - for (size_t l = var.level; l != 0u; --l, env = env->up) { - ; - } - - if (!var.fromWith) { - return env->values[var.displ]; - } - - while (true) { - if (env->type == Env::HasWithExpr) { - if (noEval) { - return nullptr; - } - if (!env->withAttrsExpr) { - CHECK(false) << "HasWithExpr evaluated twice"; - } - Value* v = allocValue(); - evalAttrs(*env->up, env->withAttrsExpr, *v); - env->values[0] = v; - env->withAttrsExpr = nullptr; - env->type = Env::HasWithAttrs; - } - Bindings::iterator j = env->values[0]->attrs->find(var.name); - if (j != env->values[0]->attrs->end()) { - if (countCalls && (j->second.pos != nullptr)) { - attrSelects[*j->second.pos]++; - } - return j->second.value; - } - if (env->prevWith == 0u) { - throwUndefinedVarError("undefined variable '%1%' at %2%", var.name, - var.pos); - } - for (size_t l = env->prevWith; l != 0u; --l, env = env->up) { - } - } -} - -Value* EvalState::allocValue() { - nrValues++; - return new Value; -} - -Env& EvalState::allocEnv(size_t size) { - if (size > std::numeric_limits::max()) { - throw Error("environment size %d is too big", size); - } - - nrEnvs++; - nrValuesInEnvs += size; - Env* env = new Env(size); - env->type = Env::Plain; - - return *env; -} - -void EvalState::mkList(Value& v, std::shared_ptr list) { - nrListElems += list->size(); - clearValue(v); - v.type = tList; - v.list = list; -} - -void EvalState::mkList(Value& v, size_t size) { - EvalState::mkList(v, std::make_shared(size)); -} - -unsigned long nrThunks = 0; - -static inline void mkThunk(Value& v, Env& env, Expr* expr) { - v.type = tThunk; - v.thunk.env = &env; - v.thunk.expr = expr; - nrThunks++; -} - -void EvalState::mkThunk_(Value& v, Expr* expr) { mkThunk(v, baseEnv, expr); } - -void EvalState::mkPos(Value& v, Pos* pos) { - if ((pos != nullptr) && pos->file.has_value() && pos->file.value().set()) { - mkAttrs(v, 3); - mkString(*allocAttr(v, sFile), pos->file.value()); - mkInt(*allocAttr(v, sLine), pos->line); - mkInt(*allocAttr(v, sColumn), pos->column); - } else { - mkNull(v); - } -} - -/* Create a thunk for the delayed computation of the given expression - in the given environment. But if the expression is a variable, - then look it up right away. This significantly reduces the number - of thunks allocated. */ -Value* Expr::maybeThunk(EvalState& state, Env& env) { - Value* v = state.allocValue(); - mkThunk(*v, env, this); - return v; -} - -unsigned long nrAvoided = 0; - -Value* ExprVar::maybeThunk(EvalState& state, Env& env) { - Value* v = state.lookupVar(&env, *this, true); - /* The value might not be initialised in the environment yet. - In that case, ignore it. */ - if (v != nullptr) { - nrAvoided++; - return v; - } - return Expr::maybeThunk(state, env); -} - -Value* ExprString::maybeThunk(EvalState& state, Env& env) { - nrAvoided++; - return &v; -} - -Value* ExprInt::maybeThunk(EvalState& state, Env& env) { - nrAvoided++; - return &v; -} - -Value* ExprFloat::maybeThunk(EvalState& state, Env& env) { - nrAvoided++; - return &v; -} - -Value* ExprPath::maybeThunk(EvalState& state, Env& env) { - nrAvoided++; - return &v; -} - -void EvalState::evalFile(const Path& path_, Value& v) { - auto path = checkSourcePath(path_); - - FileEvalCache::iterator i; - if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) { - v = i->second; - return; - } - - Path path2 = resolveExprPath(path); - if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) { - v = i->second; - return; - } - - VLOG(2) << "evaluating file '" << path2 << "'"; - Expr* e = nullptr; - - auto j = fileParseCache.find(path2); - if (j != fileParseCache.end()) { - e = j->second; - } - - if (e == nullptr) { - e = parseExprFromFile(checkSourcePath(path2)); - } - - fileParseCache[path2] = e; - - try { - eval(e, v); - } catch (Error& e) { - addErrorPrefix(e, "while evaluating the file '%1%':\n", path2); - throw; - } - - fileEvalCache[path2] = v; - if (path != path2) { - fileEvalCache[path] = v; - } -} - -void EvalState::resetFileCache() { - fileEvalCache.clear(); - fileParseCache.clear(); -} - -void EvalState::eval(Expr* e, Value& v) { e->eval(*this, baseEnv, v); } - -inline bool EvalState::evalBool(Env& env, Expr* e) { - Value v; - e->eval(*this, env, v); - if (v.type != tBool) { - throwTypeError("value is %1% while a Boolean was expected", v); - } - return v.boolean; -} - -inline bool EvalState::evalBool(Env& env, Expr* e, const Pos& pos) { - Value v; - e->eval(*this, env, v); - if (v.type != tBool) { - throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos); - } - return v.boolean; -} - -inline void EvalState::evalAttrs(Env& env, Expr* e, Value& v) { - e->eval(*this, env, v); - if (v.type != tAttrs) { - throwTypeError("value is %1% while a set was expected", v); - } -} - -void Expr::eval(EvalState& state, Env& env, Value& v) { abort(); } - -void ExprInt::eval(EvalState& state, Env& env, Value& v) { v = this->v; } - -void ExprFloat::eval(EvalState& state, Env& env, Value& v) { v = this->v; } - -void ExprString::eval(EvalState& state, Env& env, Value& v) { v = this->v; } - -void ExprPath::eval(EvalState& state, Env& env, Value& v) { v = this->v; } - -void ExprAttrs::eval(EvalState& state, Env& env, Value& value) { - state.mkAttrs(value, attrs.size() + dynamicAttrs.size()); - Env* dynamicEnv = &env; - - if (recursive) { - /* Create a new environment that contains the attributes in - this `rec'. */ - Env& env2(state.allocEnv(attrs.size())); - env2.up = &env; - dynamicEnv = &env2; - - /* The recursive attributes are evaluated in the new - environment, while the inherited attributes are evaluated - in the original environment. */ - size_t displ = 0; - for (auto& attr : attrs) { - Value* vAttr; - vAttr = - attr.second.e->maybeThunk(state, attr.second.inherited ? env : env2); - env2.values[displ++] = vAttr; - value.attrs->push_back(Attr(attr.first, vAttr, &attr.second.pos)); - } - } else { - // TODO(tazjin): insert range - for (auto& i : attrs) { - value.attrs->push_back( - Attr(i.first, i.second.e->maybeThunk(state, env), &i.second.pos)); - } - } - - /* Dynamic attrs apply *after* rec. */ - for (auto& i : dynamicAttrs) { - Value nameVal; - i.nameExpr->eval(state, *dynamicEnv, nameVal); - state.forceValue(nameVal, i.pos); - if (nameVal.type == tNull) { - continue; - } - state.forceStringNoCtx(nameVal); - Symbol nameSym = state.symbols.Create(nameVal.string.s); - Bindings::iterator j = value.attrs->find(nameSym); - if (j != value.attrs->end()) { - throwEvalError("dynamic attribute '%1%' at %2% already defined at %3%", - nameSym, i.pos, *j->second.pos); - } - - value.attrs->push_back( - Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos)); - } -} - -void ExprLet::eval(EvalState& state, Env& env, Value& v) { - /* Create a new environment that contains the attributes in this - `let'. */ - Env& env2(state.allocEnv(attrs->attrs.size())); - env2.up = &env; - - /* The recursive attributes are evaluated in the new environment, - while the inherited attributes are evaluated in the original - environment. */ - size_t displ = 0; - for (auto& i : attrs->attrs) { - env2.values[displ++] = - i.second.e->maybeThunk(state, i.second.inherited ? env : env2); - } - - body->eval(state, env2, v); -} - -void ExprList::eval(EvalState& state, Env& env, Value& v) { - state.mkList(v, elems.size()); - for (size_t n = 0; n < elems.size(); ++n) { - (*v.list)[n] = elems[n]->maybeThunk(state, env); - } -} - -void ExprVar::eval(EvalState& state, Env& env, Value& v) { - Value* v2 = state.lookupVar(&env, *this, false); - state.forceValue(*v2, pos); - v = *v2; -} - -static std::string showAttrPath(EvalState& state, Env& env, - const AttrPath& attrPath) { - std::ostringstream out; - bool first = true; - for (auto& i : attrPath) { - if (!first) { - out << '.'; - } else { - first = false; - } - out << getName(i, state, env); - } - return out.str(); -} - -uint64_t nrLookups = 0; - -void ExprSelect::eval(EvalState& state, Env& env, Value& v) { - Value vTmp; - Pos* pos2 = nullptr; - Value* vAttrs = &vTmp; - - e->eval(state, env, vTmp); - - try { - for (auto& i : attrPath) { - nrLookups++; - Bindings::iterator j; - Symbol name = getName(i, state, env); - if (def != nullptr) { - state.forceValue(*vAttrs, pos); - if (vAttrs->type != tAttrs || - (j = vAttrs->attrs->find(name)) == vAttrs->attrs->end()) { - def->eval(state, env, v); - return; - } - } else { - state.forceAttrs(*vAttrs, pos); - if ((j = vAttrs->attrs->find(name)) == vAttrs->attrs->end()) { - throwEvalError("attribute '%1%' missing, at %2%", name, pos); - } - } - vAttrs = j->second.value; - pos2 = j->second.pos; - if (state.countCalls && (pos2 != nullptr)) { - state.attrSelects[*pos2]++; - } - } - - state.forceValue(*vAttrs, (pos2 != nullptr ? *pos2 : this->pos)); - - } catch (Error& e) { - // This code relies on 'sDerivationNix' being correcty mutated at - // some prior point (it would previously otherwise have been a - // nullptr). - // - // We haven't seen this fail, so for now the contained value is - // just accessed at the risk of potentially crashing. - if ((pos2 != nullptr) && pos2->file != state.sDerivationNix.value()) { - addErrorPrefix(e, "while evaluating the attribute '%1%' at %2%:\n", - showAttrPath(state, env, attrPath), *pos2); - } - throw; - } - - v = *vAttrs; -} - -void ExprOpHasAttr::eval(EvalState& state, Env& env, Value& v) { - Value vTmp; - Value* vAttrs = &vTmp; - - e->eval(state, env, vTmp); - - for (auto& i : attrPath) { - state.forceValue(*vAttrs); - Bindings::iterator j; - Symbol name = getName(i, state, env); - if (vAttrs->type != tAttrs || - (j = vAttrs->attrs->find(name)) == vAttrs->attrs->end()) { - mkBool(v, false); - return; - } - vAttrs = j->second.value; - } - - mkBool(v, true); -} - -void ExprLambda::eval(EvalState& state, Env& env, Value& v) { - v.type = tLambda; - v.lambda.env = &env; - v.lambda.fun = this; -} - -void ExprApp::eval(EvalState& state, Env& env, Value& v) { - /* FIXME: vFun prevents GCC from doing tail call optimisation. */ - Value vFun; - e1->eval(state, env, vFun); - state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos); -} - -void EvalState::callPrimOp(Value& fun, Value& arg, Value& v, const Pos& pos) { - /* Figure out the number of arguments still needed. */ - size_t argsDone = 0; - Value* primOp = &fun; - while (primOp->type == tPrimOpApp) { - argsDone++; - primOp = primOp->primOpApp.left; - } - assert(primOp->type == tPrimOp); - auto arity = primOp->primOp->arity; - auto argsLeft = arity - argsDone; - - if (argsLeft == 1) { - /* We have all the arguments, so call the primop. */ - - /* Put all the arguments in an array. */ - Value* vArgs[arity]; - auto n = arity - 1; - vArgs[n--] = &arg; - for (Value* arg = &fun; arg->type == tPrimOpApp; - arg = arg->primOpApp.left) { - vArgs[n--] = arg->primOpApp.right; - } - - /* And call the primop. */ - nrPrimOpCalls++; - if (countCalls) { - primOpCalls[primOp->primOp->name]++; - } - primOp->primOp->fun(*this, pos, vArgs, v); - } else { - Value* fun2 = allocValue(); - *fun2 = fun; - v.type = tPrimOpApp; - v.primOpApp.left = fun2; - v.primOpApp.right = &arg; - } -} - -void EvalState::callFunction(Value& fun, Value& arg, Value& v, const Pos& pos) { - auto trace = evalSettings.traceFunctionCalls - ? std::make_unique(pos) - : nullptr; - - forceValue(fun, pos); - - if (fun.type == tPrimOp || fun.type == tPrimOpApp) { - callPrimOp(fun, arg, v, pos); - return; - } - - // If the value to be called is an attribute set, check whether it - // contains an appropriate function in the '__functor' element and - // use that. - if (fun.type == tAttrs) { - auto found = fun.attrs->find(sFunctor); - if (found != fun.attrs->end()) { - // fun may be allocated on the stack of the calling function, - // but for functors we may keep a reference, so heap-allocate a - // copy and use that instead - auto& fun2 = *allocValue(); - fun2 = fun; - /* !!! Should we use the attr pos here? */ - Value v2; - // functors are called with the element itself as the first - // parameter, which is partially applied here - callFunction(*found->second.value, fun2, v2, pos); - return callFunction(v2, arg, v, pos); - } - } - - if (fun.type != tLambda) { - throwTypeError( - "attempt to call something which is not a function but %1%, at %2%", - fun, pos); - } - - ExprLambda& lambda(*fun.lambda.fun); - - auto size = (lambda.arg.empty() ? 0 : 1) + - (lambda.matchAttrs ? lambda.formals->formals.size() : 0); - Env& env2(allocEnv(size)); - env2.up = fun.lambda.env; - - size_t displ = 0; - - if (!lambda.matchAttrs) { - env2.values[displ++] = &arg; - - } else { - forceAttrs(arg, pos); - - if (!lambda.arg.empty()) { - env2.values[displ++] = &arg; - } - - /* For each formal argument, get the actual argument. If - there is no matching actual argument but the formal - argument has a default, use the default. */ - size_t attrsUsed = 0; - for (auto& i : lambda.formals->formals) { - Bindings::iterator j = arg.attrs->find(i.name); - if (j == arg.attrs->end()) { - if (i.def == nullptr) { - throwTypeError("%1% called without required argument '%2%', at %3%", - lambda, i.name, pos); - } - env2.values[displ++] = i.def->maybeThunk(*this, env2); - } else { - attrsUsed++; - env2.values[displ++] = j->second.value; - } - } - - /* Check that each actual argument is listed as a formal - argument (unless the attribute match specifies a `...'). */ - if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) { - /* Nope, so show the first unexpected argument to the - user. */ - for (auto& i : *arg.attrs) { - if (lambda.formals->argNames.find(i.second.name) == - lambda.formals->argNames.end()) { - throwTypeError("%1% called with unexpected argument '%2%', at %3%", - lambda, i.second.name, pos); - } - } - abort(); // shouldn't happen - } - } - - nrFunctionCalls++; - if (countCalls) { - incrFunctionCall(&lambda); - } - - /* Evaluate the body. This is conditional on showTrace, because - catching exceptions makes this function not tail-recursive. */ - if (settings.showTrace) { - try { - lambda.body->eval(*this, env2, v); - } catch (Error& e) { - addErrorPrefix(e, "while evaluating %1%, called from %2%:\n", lambda, - pos); - throw; - } - } else { - fun.lambda.fun->body->eval(*this, env2, v); - } -} - -// Lifted out of callFunction() because it creates a temporary that -// prevents tail-call optimisation. -void EvalState::incrFunctionCall(ExprLambda* fun) { functionCalls[fun]++; } - -void EvalState::autoCallFunction(Bindings* args, Value& fun, Value& res) { - forceValue(fun); - - if (fun.type == tAttrs) { - auto found = fun.attrs->find(sFunctor); - if (found != fun.attrs->end()) { - Value* v = allocValue(); - callFunction(*found->second.value, fun, *v, noPos); - forceValue(*v); - return autoCallFunction(args, *v, res); - } - } - - if (fun.type != tLambda || !fun.lambda.fun->matchAttrs) { - res = fun; - return; - } - - Value* actualArgs = allocValue(); - mkAttrs(*actualArgs, fun.lambda.fun->formals->formals.size()); - - if (fun.lambda.fun->formals->ellipsis) { - // If the formals have an ellipsis (eg the function accepts extra args) pass - // all available automatic arguments (which includes arguments specified on - // the command line via --arg/--argstr) - for (auto& [_, v] : *args) { - actualArgs->attrs->push_back(v); - } - } else { - // Otherwise, only pass the arguments that the function accepts - for (auto& i : fun.lambda.fun->formals->formals) { - Bindings::iterator j = args->find(i.name); - if (j != args->end()) { - actualArgs->attrs->push_back(j->second); - } else if (i.def == nullptr) { - throwTypeError( - "cannot auto-call a function that has an argument without a " - "default " - "value ('%1%')", - i.name); - } - } - } - - callFunction(fun, *actualArgs, res, noPos); -} - -void ExprWith::eval(EvalState& state, Env& env, Value& v) { - Env& env2(state.allocEnv(1)); - env2.up = &env; - env2.prevWith = prevWith; - env2.type = Env::HasWithExpr; - /* placeholder for result of attrs */ - env2.values[0] = nullptr; - env2.withAttrsExpr = this->attrs; - - body->eval(state, env2, v); -} - -void ExprIf::eval(EvalState& state, Env& env, Value& v) { - (state.evalBool(env, cond) ? then : else_)->eval(state, env, v); -} - -void ExprAssert::eval(EvalState& state, Env& env, Value& v) { - if (!state.evalBool(env, cond, pos)) { - std::ostringstream out; - cond->show(out); - throwAssertionError("assertion %1% failed at %2%", out.str(), pos); - } - body->eval(state, env, v); -} - -void ExprOpNot::eval(EvalState& state, Env& env, Value& v) { - mkBool(v, !state.evalBool(env, e)); -} - -void ExprOpEq::eval(EvalState& state, Env& env, Value& v) { - Value v1; - e1->eval(state, env, v1); - Value v2; - e2->eval(state, env, v2); - mkBool(v, state.eqValues(v1, v2)); -} - -void ExprOpNEq::eval(EvalState& state, Env& env, Value& v) { - Value v1; - e1->eval(state, env, v1); - Value v2; - e2->eval(state, env, v2); - mkBool(v, !state.eqValues(v1, v2)); -} - -void ExprOpAnd::eval(EvalState& state, Env& env, Value& v) { - mkBool(v, state.evalBool(env, e1, pos) && state.evalBool(env, e2, pos)); -} - -void ExprOpOr::eval(EvalState& state, Env& env, Value& v) { - mkBool(v, state.evalBool(env, e1, pos) || state.evalBool(env, e2, pos)); -} - -void ExprOpImpl::eval(EvalState& state, Env& env, Value& v) { - mkBool(v, !state.evalBool(env, e1, pos) || state.evalBool(env, e2, pos)); -} - -void ExprOpUpdate::eval(EvalState& state, Env& env, Value& dest) { - Value v1; - Value v2; - state.evalAttrs(env, e1, v1); - state.evalAttrs(env, e2, v2); - - state.nrOpUpdates++; - - clearValue(dest); - dest.type = tAttrs; - dest.attrs = Bindings::Merge(*v1.attrs, *v2.attrs); -} - -void ExprOpConcatLists::eval(EvalState& state, Env& env, Value& v) { - Value v1; - e1->eval(state, env, v1); - Value v2; - e2->eval(state, env, v2); - state.concatLists(v, {&v1, &v2}, pos); -} - -void EvalState::concatLists(Value& v, const NixList& lists, const Pos& pos) { - nrListConcats++; - - auto outlist = std::make_shared(); - - for (Value* list : lists) { - forceList(*list, pos); - outlist->insert(outlist->end(), list->list->begin(), list->list->end()); - } - - mkList(v, outlist); -} - -void ExprConcatStrings::eval(EvalState& state, Env& env, Value& v) { - PathSet context; - std::ostringstream s; - NixInt n = 0; - NixFloat nf = 0; - - bool first = !forceString; - ValueType firstType = tString; - - for (auto& i : *es) { - Value vTmp; - i->eval(state, env, vTmp); - - /* If the first element is a path, then the result will also - be a path, we don't copy anything (yet - that's done later, - since paths are copied when they are used in a derivation), - and none of the strings are allowed to have contexts. */ - if (first) { - firstType = vTmp.type; - first = false; - } - - if (firstType == tInt) { - if (vTmp.type == tInt) { - n += vTmp.integer; - } else if (vTmp.type == tFloat) { - // Upgrade the type from int to float; - firstType = tFloat; - nf = n; - nf += vTmp.fpoint; - } else { - throwEvalError("cannot add %1% to an integer, at %2%", showType(vTmp), - pos); - } - } else if (firstType == tFloat) { - if (vTmp.type == tInt) { - nf += vTmp.integer; - } else if (vTmp.type == tFloat) { - nf += vTmp.fpoint; - } else { - throwEvalError("cannot add %1% to a float, at %2%", showType(vTmp), - pos); - } - } else { - s << state.coerceToString(pos, vTmp, context, false, - firstType == tString); - } - } - - if (firstType == tInt) { - mkInt(v, n); - } else if (firstType == tFloat) { - mkFloat(v, nf); - } else if (firstType == tPath) { - if (!context.empty()) { - throwEvalError( - "a string that refers to a store path cannot be appended to a path, " - "at %1%", - pos); - } - auto path = canonPath(s.str()); - mkPath(v, path.c_str()); - } else { - mkString(v, s.str(), context); - } -} - -void ExprPos::eval(EvalState& state, Env& env, Value& v) { - state.mkPos(v, &pos); -} - -template -using traceable_flat_hash_set = absl::flat_hash_set; - -void EvalState::forceValueDeep(Value& v) { - traceable_flat_hash_set seen; - - std::function recurse; - - recurse = [&](Value& v) { - if (seen.find(&v) != seen.end()) { - return; - } - seen.insert(&v); - - forceValue(v); - - if (v.type == tAttrs) { - for (auto& i : *v.attrs) { - try { - recurse(*i.second.value); - } catch (Error& e) { - addErrorPrefix(e, "while evaluating the attribute '%1%' at %2%:\n", - i.second.name, *i.second.pos); - throw; - } - } - } else if (v.isList()) { - for (size_t n = 0; n < v.listSize(); ++n) { - recurse(*(*v.list)[n]); - } - } - }; - - recurse(v); -} - -NixInt EvalState::forceInt(Value& v, const Pos& pos) { - forceValue(v, pos); - if (v.type != tInt) { - throwTypeError("value is %1% while an integer was expected, at %2%", v, - pos); - } - return v.integer; -} - -NixFloat EvalState::forceFloat(Value& v, const Pos& pos) { - forceValue(v, pos); - if (v.type == tInt) { - return static_cast(v.integer); - } - if (v.type != tFloat) { - throwTypeError("value is %1% while a float was expected, at %2%", v, pos); - } - return v.fpoint; -} - -bool EvalState::forceBool(Value& v, const Pos& pos) { - forceValue(v); - if (v.type != tBool) { - throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos); - } - return v.boolean; -} - -bool EvalState::isFunctor(Value& fun) { - return fun.type == tAttrs && fun.attrs->find(sFunctor) != fun.attrs->end(); -} - -void EvalState::forceFunction(Value& v, const Pos& pos) { - forceValue(v); - if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp && - !isFunctor(v)) { - throwTypeError("value is %1% while a function was expected, at %2%", v, - pos); - } -} - -std::string EvalState::forceString(Value& v, const Pos& pos) { - forceValue(v, pos); - if (v.type != tString) { - if (pos) { - throwTypeError("value is %1% while a string was expected, at %2%", v, - pos); - } else { - throwTypeError("value is %1% while a string was expected", v); - } - } - return std::string(v.string.s); -} - -void copyContext(const Value& v, PathSet& context) { - if (v.string.context != nullptr) { - for (const char** p = v.string.context; *p != nullptr; ++p) { - context.insert(*p); - } - } -} - -std::string EvalState::forceString(Value& v, PathSet& context, const Pos& pos) { - std::string s = forceString(v, pos); - copyContext(v, context); - return s; -} - -std::string EvalState::forceStringNoCtx(Value& v, const Pos& pos) { - std::string s = forceString(v, pos); - if (v.string.context != nullptr) { - if (pos) { - throwEvalError( - "the string '%1%' is not allowed to refer to a store path (such as " - "'%2%'), at %3%", - v.string.s, v.string.context[0], pos); - } else { - throwEvalError( - "the string '%1%' is not allowed to refer to a store path (such as " - "'%2%')", - v.string.s, v.string.context[0]); - } - } - return s; -} - -bool EvalState::isDerivation(Value& v) { - if (v.type != tAttrs) { - return false; - } - Bindings::iterator i = v.attrs->find(sType); - if (i == v.attrs->end()) { - return false; - } - forceValue(*i->second.value); - if (i->second.value->type != tString) { - return false; - } - return strcmp(i->second.value->string.s, "derivation") == 0; -} - -std::optional EvalState::tryAttrsToString(const Pos& pos, Value& v, - PathSet& context, - bool coerceMore, - bool copyToStore) { - auto i = v.attrs->find(sToString); - if (i != v.attrs->end()) { - Value v1; - callFunction(*i->second.value, v, v1, pos); - return coerceToString(pos, v1, context, coerceMore, copyToStore); - } - - return {}; -} - -std::string EvalState::coerceToString(const Pos& pos, Value& v, - PathSet& context, bool coerceMore, - bool copyToStore) { - forceValue(v); - - std::string s; - - if (v.type == tString) { - copyContext(v, context); - return v.string.s; - } - - if (v.type == tPath) { - Path path(canonPath(v.path)); - return copyToStore ? copyPathToStore(context, path) : path; - } - - if (v.type == tAttrs) { - auto maybeString = - tryAttrsToString(pos, v, context, coerceMore, copyToStore); - if (maybeString) { - return *maybeString; - } - auto i = v.attrs->find(sOutPath); - if (i == v.attrs->end()) { - throwTypeError("cannot coerce a set to a string, at %1%", pos); - } - return coerceToString(pos, *i->second.value, context, coerceMore, - copyToStore); - } - - if (coerceMore) { - /* Note that `false' is represented as an empty string for - shell scripting convenience, just like `null'. */ - if (v.type == tBool && v.boolean) { - return "1"; - } - if (v.type == tBool && !v.boolean) { - return ""; - } - if (v.type == tInt) { - return std::to_string(v.integer); - } - if (v.type == tFloat) { - return std::to_string(v.fpoint); - } - if (v.type == tNull) { - return ""; - } - - if (v.isList()) { - std::string result; - for (size_t n = 0; n < v.listSize(); ++n) { - result += coerceToString(pos, *(*v.list)[n], context, coerceMore, - copyToStore); - if (n < v.listSize() - 1 - /* !!! not quite correct */ - && (!(*v.list)[n]->isList() || (*v.list)[n]->listSize() != 0)) { - result += " "; - } - } - return result; - } - } - - throwTypeError("cannot coerce %1% to a string, at %2%", v, pos); -} - -std::string EvalState::copyPathToStore(PathSet& context, const Path& path) { - if (nix::isDerivation(path)) { - throwEvalError("file names are not allowed to end in '%1%'", drvExtension); - } - - Path dstPath; - if (!srcToStore[path].empty()) { - dstPath = srcToStore[path]; - } else { - dstPath = - settings.readOnlyMode - ? store - ->computeStorePathForPath(baseNameOf(path), - checkSourcePath(path)) - .first - : store->addToStore(baseNameOf(path), checkSourcePath(path), true, - htSHA256, defaultPathFilter, repair); - srcToStore[path] = dstPath; - VLOG(2) << "copied source '" << path << "' -> '" << dstPath << "'"; - } - - context.insert(dstPath); - return dstPath; -} - -Path EvalState::coerceToPath(const Pos& pos, Value& v, PathSet& context) { - std::string path = coerceToString(pos, v, context, false, false); - if (path.empty() || path[0] != '/') { - throwEvalError("string '%1%' doesn't represent an absolute path, at %2%", - path, pos); - } - return path; -} - -bool EvalState::eqValues(Value& v1, Value& v2) { - forceValue(v1); - forceValue(v2); - - /* !!! Hack to support some old broken code that relies on pointer - equality tests between sets. (Specifically, builderDefs calls - uniqList on a list of sets.) Will remove this eventually. */ - if (&v1 == &v2) { - return true; - } - - // Special case type-compatibility between float and int - if (v1.type == tInt && v2.type == tFloat) { - return v1.integer == v2.fpoint; - } - if (v1.type == tFloat && v2.type == tInt) { - return v1.fpoint == v2.integer; - } - - // All other types are not compatible with each other. - if (v1.type != v2.type) { - return false; - } - - switch (v1.type) { - case tInt: - return v1.integer == v2.integer; - - case tBool: - return v1.boolean == v2.boolean; - - case tString: - return strcmp(v1.string.s, v2.string.s) == 0; - - case tPath: - return strcmp(v1.path, v2.path) == 0; - - case tNull: - return true; - - case tList: - if (v1.listSize() != v2.listSize()) { - return false; - } - for (size_t n = 0; n < v1.listSize(); ++n) { - if (!eqValues(*(*v1.list)[n], *(*v2.list)[n])) { - return false; - } - } - return true; - - case tAttrs: { - // As an optimisation if both values are pointing towards the - // same attribute set, we can skip all this extra work. - if (v1.attrs == v2.attrs) { - return true; - } - - /* If both sets denote a derivation (type = "derivation"), - then compare their outPaths. */ - if (isDerivation(v1) && isDerivation(v2)) { - Bindings::iterator i = v1.attrs->find(sOutPath); - Bindings::iterator j = v2.attrs->find(sOutPath); - if (i != v1.attrs->end() && j != v2.attrs->end()) { - return eqValues(*i->second.value, *j->second.value); - } - } - - return v1.attrs->Equal(v2.attrs.get(), *this); - } - - /* Functions are incomparable. */ - case tLambda: - case tPrimOp: - case tPrimOpApp: - return false; - - case tFloat: - return v1.fpoint == v2.fpoint; - - default: - throwEvalError("cannot compare %1% with %2%", showType(v1), showType(v2)); - } -} - -void EvalState::printStats() { - bool showStats = getEnv("NIX_SHOW_STATS").value_or("0") != "0"; - - struct rusage buf; - getrusage(RUSAGE_SELF, &buf); - float cpuTime = buf.ru_utime.tv_sec + - (static_cast(buf.ru_utime.tv_usec) / 1000000); - - uint64_t bEnvs = nrEnvs * sizeof(Env) + nrValuesInEnvs * sizeof(Value*); - uint64_t bLists = nrListElems * sizeof(Value*); - uint64_t bValues = nrValues * sizeof(Value); - uint64_t bAttrsets = - nrAttrsets * sizeof(Bindings) + nrAttrsInAttrsets * sizeof(Attr); - - if (showStats) { - auto outPath = getEnv("NIX_SHOW_STATS_PATH").value_or("-"); - std::fstream fs; - if (outPath != "-") { - fs.open(outPath, std::fstream::out); - } - JSONObject topObj(outPath == "-" ? std::cerr : fs, true); - topObj.attr("cpuTime", cpuTime); - { - auto envs = topObj.object("envs"); - envs.attr("number", nrEnvs); - envs.attr("elements", nrValuesInEnvs); - envs.attr("bytes", bEnvs); - } - { - auto lists = topObj.object("list"); - lists.attr("elements", nrListElems); - lists.attr("bytes", bLists); - lists.attr("concats", nrListConcats); - } - { - auto values = topObj.object("values"); - values.attr("number", nrValues); - values.attr("bytes", bValues); - } - { - auto syms = topObj.object("symbols"); - syms.attr("number", symbols.Size()); - syms.attr("bytes", symbols.TotalSize()); - } - { - auto sets = topObj.object("sets"); - sets.attr("number", nrAttrsets); - sets.attr("bytes", bAttrsets); - sets.attr("elements", nrAttrsInAttrsets); - } - { - auto sizes = topObj.object("sizes"); - sizes.attr("Env", sizeof(Env)); - sizes.attr("Value", sizeof(Value)); - sizes.attr("Bindings", sizeof(Bindings)); - sizes.attr("Attr", sizeof(Attr)); - } - topObj.attr("nrOpUpdates", nrOpUpdates); - topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied); - topObj.attr("nrThunks", nrThunks); - topObj.attr("nrAvoided", nrAvoided); - topObj.attr("nrLookups", nrLookups); - topObj.attr("nrPrimOpCalls", nrPrimOpCalls); - topObj.attr("nrFunctionCalls", nrFunctionCalls); - - if (countCalls) { - { - auto obj = topObj.object("primops"); - for (auto& i : primOpCalls) { - obj.attr(i.first, i.second); - } - } - { - auto list = topObj.list("functions"); - for (auto& i : functionCalls) { - auto obj = list.object(); - if (i.first->name.has_value()) { - obj.attr("name", (const std::string&)i.first->name.value()); - } else { - obj.attr("name", nullptr); - } - if (i.first->pos) { - obj.attr("file", (const std::string&)i.first->pos.file); - obj.attr("line", i.first->pos.line); - obj.attr("column", i.first->pos.column); - } - obj.attr("count", i.second); - } - } - { - auto list = topObj.list("attributes"); - for (auto& i : attrSelects) { - auto obj = list.object(); - if (i.first) { - obj.attr("file", (const std::string&)i.first.file); - obj.attr("line", i.first.line); - obj.attr("column", i.first.column); - } - obj.attr("count", i.second); - } - } - } - - // TODO(tazjin): what is this? commented out because .dump() is gone. - // if (getEnv("NIX_SHOW_SYMBOLS", "0") != "0") { - // auto list = topObj.list("symbols"); - // symbols.dump([&](const std::string& s) { list.elem(s); }); - // } - } -} - -void EvalState::TraceFileAccess(const Path& realPath) { - if (file_access_trace_fn) { - if (last_traced_file != realPath) { - file_access_trace_fn(realPath); - // Basic deduplication. - last_traced_file = std::string(realPath); - } - } -} - -void EvalState::EnableFileAccessTracing(std::function fn) { - file_access_trace_fn = fn; -} - -size_t valueSize(const Value& v) { - traceable_flat_hash_set seenBindings; - traceable_flat_hash_set seenEnvs; - traceable_flat_hash_set seenLists; - traceable_flat_hash_set seenStrings; - traceable_flat_hash_set seenValues; - - auto doString = [&](const char* s) -> size_t { - if (seenStrings.find(s) != seenStrings.end()) { - return 0; - } - seenStrings.insert(s); - return strlen(s) + 1; - }; - - std::function doValue; - std::function doEnv; - - doValue = [&](const Value& v) -> size_t { - if (seenValues.find(&v) != seenValues.end()) { - return 0; - } - seenValues.insert(&v); - - size_t sz = sizeof(Value); - - switch (v.type) { - case tString: - sz += doString(v.string.s); - if (v.string.context != nullptr) { - for (const char** p = v.string.context; *p != nullptr; ++p) { - sz += doString(*p); - } - } - break; - case tPath: - sz += doString(v.path); - break; - case tAttrs: - if (seenBindings.find(v.attrs.get()) == seenBindings.end()) { - seenBindings.insert(v.attrs.get()); - sz += sizeof(Bindings); - for (const auto& i : *v.attrs) { - sz += doValue(*i.second.value); - } - } - break; - case tList: - if (seenLists.find(v.list.get()) == seenLists.end()) { - seenLists.insert(v.list.get()); - sz += v.listSize() * sizeof(Value*); - for (const Value* v : *v.list) { - sz += doValue(*v); - } - } - break; - case tThunk: - sz += doEnv(*v.thunk.env); - break; - case tApp: - sz += doValue(*v.app.left); - sz += doValue(*v.app.right); - break; - case tLambda: - sz += doEnv(*v.lambda.env); - break; - case tPrimOpApp: - sz += doValue(*v.primOpApp.left); - sz += doValue(*v.primOpApp.right); - break; - default:; - } - - return sz; - }; - - doEnv = [&](const Env& env) -> size_t { - if (seenEnvs.find(&env) != seenEnvs.end()) { - return 0; - } - seenEnvs.insert(&env); - - size_t sz = sizeof(Env) + sizeof(Value*) * env.size; - - if (env.type != Env::HasWithExpr) { - for (const Value* v : env.values) { - if (v != nullptr) { - sz += doValue(*v); - } - } - } else { - // TODO(kanepyork): trace ExprWith? how important is this accounting? - } - - if (env.up != nullptr) { - sz += doEnv(*env.up); - } - - return sz; - }; - - return doValue(v); -} - -EvalSettings evalSettings; - -static GlobalConfig::Register r1(&evalSettings); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/eval.hh b/third_party/nix/src/libexpr/eval.hh deleted file mode 100644 index 0352a89a2a..0000000000 --- a/third_party/nix/src/libexpr/eval.hh +++ /dev/null @@ -1,365 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "libexpr/attr-set.hh" -#include "libexpr/nixexpr.hh" -#include "libexpr/symbol-table.hh" -#include "libexpr/value.hh" -#include "libutil/config.hh" -#include "libutil/hash.hh" - -namespace nix { -namespace expr { - -// Initialise the Boehm GC once per program instance. This should be -// called in places that require the garbage collector. -void InitGC(); - -} // namespace expr - -class Store; -class EvalState; -enum RepairFlag : bool; - -typedef void (*PrimOpFun)(EvalState& state, const Pos& pos, Value** args, - Value& v); - -struct PrimOp { - PrimOpFun fun; - size_t arity; - Symbol name; - PrimOp(PrimOpFun fun, size_t arity, Symbol name) - : fun(fun), arity(arity), name(name) {} -}; - -struct Env { - Env(unsigned short size) : size(size) { values = std::vector(size); } - - Env* up; - unsigned short size; // used by ‘valueSize’ - unsigned short prevWith : 14; // nr of levels up to next `with' environment - enum { Plain = 0, HasWithExpr, HasWithAttrs } type : 2; - std::vector values; - Expr* withAttrsExpr = nullptr; -}; - -Value& mkString(Value& v, const std::string& s, - const PathSet& context = PathSet()); - -void copyContext(const Value& v, PathSet& context); - -/* Cache for calls to addToStore(); maps source paths to the store - paths. */ -using SrcToStore = std::map; - -std::ostream& operator<<(std::ostream& str, const Value& v); - -using SearchPathElem = std::pair; -using SearchPath = std::list; - -using FileParseCache = std::map; - -class EvalState { - public: - SymbolTable symbols; - - const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue, sSystem, - sOutputs, sOutputName, sIgnoreNulls, sFile, sLine, sColumn, sFunctor, - sToString, sRight, sWrong, sStructuredAttrs, sBuilder, sArgs, sOutputHash, - sOutputHashAlgo, sOutputHashMode; - - // Symbol representing the path to the built-in 'derivation.nix' - // file, set during primops initialisation. - std::optional sDerivationNix; - - /* If set, force copying files to the Nix store even if they - already exist there. */ - RepairFlag repair; - - /* The allowed filesystem paths in restricted or pure evaluation - mode. */ - std::optional allowedPaths; - - const ref store; - - private: - SrcToStore srcToStore; - - /* A cache from path names to parse trees. */ - FileParseCache fileParseCache; - - /* A cache from path names to values. */ - using FileEvalCache = std::map; - FileEvalCache fileEvalCache; - - SearchPath searchPath; - - std::map> searchPathResolved; - - /* Cache used by checkSourcePath(). */ - std::unordered_map resolvedPaths; - - public: - EvalState(const Strings& _searchPath, const ref& store); - ~EvalState(); - - void addToSearchPath(const std::string& s); - - SearchPath getSearchPath() { return searchPath; } - - Path checkSourcePath(const Path& path); - - void checkURI(const std::string& uri); - - /* When using a diverted store and 'path' is in the Nix store, map - 'path' to the diverted location (e.g. /nix/store/foo is mapped - to /home/alice/my-nix/nix/store/foo). However, this is only - done if the context is not empty, since otherwise we're - probably trying to read from the actual /nix/store. This is - intended to distinguish between import-from-derivation and - sources stored in the actual /nix/store. */ - Path toRealPath(const Path& path, const PathSet& context); - - /* Parse a Nix expression from the specified file. */ - Expr* parseExprFromFile(const Path& path); - Expr* parseExprFromFile(const Path& path, StaticEnv& staticEnv); - - /* Parse a Nix expression from the specified string. */ - Expr* parseExprFromString(const std::string& s, const Path& basePath, - StaticEnv& staticEnv); - Expr* parseExprFromString(const std::string& s, const Path& basePath); - - Expr* parseStdin(); - - /* Evaluate an expression read from the given file to normal - form. */ - void evalFile(const Path& path, Value& v); - - void resetFileCache(); - - /* Look up a file in the search path. */ - Path findFile(const std::string& path); - Path findFile(SearchPath& searchPath, const std::string& path, - const Pos& pos = noPos); - - /* If the specified search path element is a URI, download it. */ - std::pair resolveSearchPathElem( - const SearchPathElem& elem); - - /* Evaluate an expression to normal form, storing the result in - value `v'. */ - void eval(Expr* e, Value& v); - - /* Evaluation the expression, then verify that it has the expected - type. */ - inline bool evalBool(Env& env, Expr* e); - inline bool evalBool(Env& env, Expr* e, const Pos& pos); - inline void evalAttrs(Env& env, Expr* e, Value& v); - - /* If `v' is a thunk, enter it and overwrite `v' with the result - of the evaluation of the thunk. If `v' is a delayed function - application, call the function and overwrite `v' with the - result. Otherwise, this is a no-op. */ - inline void forceValue(Value& v, const Pos& pos = noPos); - - /* Force a value, then recursively force list elements and - attributes. */ - void forceValueDeep(Value& v); - - /* Force `v', and then verify that it has the expected type. */ - NixInt forceInt(Value& v, const Pos& pos); - NixFloat forceFloat(Value& v, const Pos& pos); - bool forceBool(Value& v, const Pos& pos); - inline void forceAttrs(Value& v); - inline void forceAttrs(Value& v, const Pos& pos); - inline void forceList(Value& v); - inline void forceList(Value& v, const Pos& pos); - void forceFunction(Value& v, const Pos& pos); // either lambda or primop - std::string forceString(Value& v, const Pos& pos = noPos); - std::string forceString(Value& v, PathSet& context, const Pos& pos = noPos); - std::string forceStringNoCtx(Value& v, const Pos& pos = noPos); - - /* Return true iff the value `v' denotes a derivation (i.e. a - set with attribute `type = "derivation"'). */ - bool isDerivation(Value& v); - - std::optional tryAttrsToString(const Pos& pos, Value& v, - PathSet& context, - bool coerceMore = false, - bool copyToStore = true); - - /* String coercion. Converts strings, paths and derivations to a - string. If `coerceMore' is set, also converts nulls, integers, - booleans and lists to a string. If `copyToStore' is set, - referenced paths are copied to the Nix store as a side effect. */ - std::string coerceToString(const Pos& pos, Value& v, PathSet& context, - bool coerceMore = false, bool copyToStore = true); - - std::string copyPathToStore(PathSet& context, const Path& path); - - /* Path coercion. Converts strings, paths and derivations to a - path. The result is guaranteed to be a canonicalised, absolute - path. Nothing is copied to the store. */ - Path coerceToPath(const Pos& pos, Value& v, PathSet& context); - - public: - /* The base environment, containing the builtin functions and - values. */ - Env& baseEnv; - - /* The same, but used during parsing to resolve variables. */ - StaticEnv staticBaseEnv; // !!! should be private - - private: - unsigned int baseEnvDispl = 0; - - void createBaseEnv(); - - Value* addConstant(const std::string& name, Value& v); - - Value* addPrimOp(const std::string& name, size_t arity, PrimOpFun primOp); - - public: - Value& getBuiltin(const std::string& name); - - private: - inline Value* lookupVar(Env* env, const ExprVar& var, bool noEval); - - friend struct ExprVar; - friend struct ExprAttrs; - friend struct ExprLet; - - Expr* parse(const char* text, const Path& path, const Path& basePath, - StaticEnv& staticEnv); - - public: - /* Do a deep equality test between two values. That is, list - elements and attributes are compared recursively. */ - bool eqValues(Value& v1, Value& v2); - - bool isFunctor(Value& fun); - - void callFunction(Value& fun, Value& arg, Value& v, const Pos& pos); - void callPrimOp(Value& fun, Value& arg, Value& v, const Pos& pos); - - /* Automatically call a function for which each argument has a - default value or has a binding in the `args' map. 'args' need - not live past the end of the call. */ - void autoCallFunction(Bindings* args, Value& fun, Value& res); - - /* Allocation primitives. */ - Value* allocValue(); - Env& allocEnv(size_t size); - - Value* allocAttr(Value& vAttrs, const Symbol& name); - - // Create a list value from the specified vector. - void mkList(Value& v, std::shared_ptr list); - - // Create a list value, allocating as many elements as specified in - // size. This is used for the many cases in this codebase where - // assignment happens into the preallocated list. - void mkList(Value& v, size_t size = 0); - - void mkAttrs(Value& v, size_t capacity); - void mkThunk_(Value& v, Expr* expr); - void mkPos(Value& v, Pos* pos); - - void concatLists(Value& v, const NixList& lists, const Pos& pos); - - /* Print statistics. */ - void printStats(); - - void realiseContext(const PathSet& context); - - /* File access tracing. */ - void TraceFileAccess(const Path& path); - void EnableFileAccessTracing(std::function fn); - - private: - unsigned long nrEnvs = 0; - unsigned long nrValuesInEnvs = 0; - unsigned long nrValues = 0; - unsigned long nrListElems = 0; - unsigned long nrAttrsets = 0; - unsigned long nrAttrsInAttrsets = 0; - unsigned long nrOpUpdates = 0; - unsigned long nrOpUpdateValuesCopied = 0; - unsigned long nrListConcats = 0; - unsigned long nrPrimOpCalls = 0; - unsigned long nrFunctionCalls = 0; - - bool countCalls; - - std::function file_access_trace_fn = nullptr; - Path last_traced_file = ""; - - using PrimOpCalls = std::map; - PrimOpCalls primOpCalls; - - using FunctionCalls = std::map; - FunctionCalls functionCalls; - - void incrFunctionCall(ExprLambda* fun); - - using AttrSelects = std::map; - AttrSelects attrSelects; - - friend struct ExprOpUpdate; - friend struct ExprOpConcatLists; - friend struct ExprSelect; - friend void prim_getAttr(EvalState& state, const Pos& pos, Value** args, - Value& v); -}; - -/* Return a string representing the type of the value `v'. */ -std::string showType(const Value& v); - -/* Decode a context string ‘!!’ into a pair . */ -std::pair decodeContext(const std::string& s); - -/* If `path' refers to a directory, then append "/default.nix". */ -Path resolveExprPath(Path path); - -struct InvalidPathError : EvalError { - Path path; - InvalidPathError(const Path& path); -#ifdef EXCEPTION_NEEDS_THROW_SPEC - ~InvalidPathError() noexcept {}; -#endif -}; - -struct EvalSettings : Config { - Setting restrictEval{ - this, false, "restrict-eval", - "Whether to restrict file system access to paths in $NIX_PATH, " - "and network access to the URI prefixes listed in 'allowed-uris'."}; - - Setting pureEval{this, false, "pure-eval", - "Whether to restrict file system and network access " - "to files specified by cryptographic hash."}; - - Setting enableImportFromDerivation{ - this, true, "allow-import-from-derivation", - "Whether the evaluator allows importing the result of a derivation."}; - - Setting allowedUris{ - this, - {}, - "allowed-uris", - "Prefixes of URIs that builtin functions such as fetchurl and fetchGit " - "are allowed to fetch."}; - - Setting traceFunctionCalls{this, false, "trace-function-calls", - "Emit log messages for each function entry " - "and exit at the 'vomit' log level (-vvvv)"}; -}; - -extern EvalSettings evalSettings; - -} // namespace nix diff --git a/third_party/nix/src/libexpr/function-trace.cc b/third_party/nix/src/libexpr/function-trace.cc deleted file mode 100644 index b1b856965c..0000000000 --- a/third_party/nix/src/libexpr/function-trace.cc +++ /dev/null @@ -1,19 +0,0 @@ -#include "libexpr/function-trace.hh" - -#include - -namespace nix { - -FunctionCallTrace::FunctionCallTrace(const Pos& pos) : pos(pos) { - auto duration = std::chrono::high_resolution_clock::now().time_since_epoch(); - auto ns = std::chrono::duration_cast(duration); - LOG(INFO) << "function-trace entered " << pos << " at " << ns.count(); -} - -FunctionCallTrace::~FunctionCallTrace() { - auto duration = std::chrono::high_resolution_clock::now().time_since_epoch(); - auto ns = std::chrono::duration_cast(duration); - LOG(INFO) << "function-trace exited " << pos << " at " << ns.count(); -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/function-trace.hh b/third_party/nix/src/libexpr/function-trace.hh deleted file mode 100644 index 6b810159b8..0000000000 --- a/third_party/nix/src/libexpr/function-trace.hh +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include - -#include "libexpr/eval.hh" - -namespace nix { - -struct FunctionCallTrace { - const Pos& pos; - FunctionCallTrace(const Pos& pos); - ~FunctionCallTrace(); -}; -} // namespace nix diff --git a/third_party/nix/src/libexpr/get-drvs.cc b/third_party/nix/src/libexpr/get-drvs.cc deleted file mode 100644 index 164c1e54f3..0000000000 --- a/third_party/nix/src/libexpr/get-drvs.cc +++ /dev/null @@ -1,446 +0,0 @@ -#include "libexpr/get-drvs.hh" - -#include -#include -#include - -#include -#include -#include - -#include "libexpr/eval-inline.hh" -#include "libstore/derivations.hh" -#include "libutil/util.hh" - -namespace nix { - -DrvInfo::DrvInfo(EvalState& state, std::string attrPath, - std::shared_ptr attrs) - : state(&state), attrs(attrs), attrPath(std::move(attrPath)) {} - -DrvInfo::DrvInfo(EvalState& state, const ref& store, - const std::string& drvPathWithOutputs) - : state(&state), attrPath("") { - auto spec = parseDrvPathWithOutputs(drvPathWithOutputs); - - drvPath = spec.first; - - auto drv = store->derivationFromPath(drvPath); - - name = storePathToName(drvPath); - - if (spec.second.size() > 1) { - throw Error( - "building more than one derivation output is not supported, in '%s'", - drvPathWithOutputs); - } - - outputName = spec.second.empty() ? get(drv.env, "outputName", "out") - : *spec.second.begin(); - - auto i = drv.outputs.find(outputName); - if (i == drv.outputs.end()) { - throw Error("derivation '%s' does not have output '%s'", drvPath, - outputName); - } - - outPath = i->second.path; -} - -std::string DrvInfo::queryName() const { - if (name.empty() && (attrs != nullptr)) { - auto i = attrs->find(state->sName); - if (i == attrs->end()) { - throw TypeError("derivation name missing"); - } - name = state->forceStringNoCtx(*i->second.value); - } - return name; -} - -std::string DrvInfo::querySystem() const { - if (system.empty() && (attrs != nullptr)) { - auto i = attrs->find(state->sSystem); - system = i == attrs->end() - ? "unknown" - : state->forceStringNoCtx(*i->second.value, *i->second.pos); - } - return system; -} - -std::string DrvInfo::queryDrvPath() const { - if (drvPath.empty() && (attrs != nullptr)) { - Bindings::iterator i = attrs->find(state->sDrvPath); - PathSet context; - drvPath = i != attrs->end() ? state->coerceToPath(*i->second.pos, - *i->second.value, context) - : ""; - } - return drvPath; -} - -std::string DrvInfo::queryOutPath() const { - if (outPath.empty() && (attrs != nullptr)) { - Bindings::iterator i = attrs->find(state->sOutPath); - PathSet context; - outPath = i != attrs->end() ? state->coerceToPath(*i->second.pos, - *i->second.value, context) - : ""; - } - return outPath; -} - -DrvInfo::Outputs DrvInfo::queryOutputs(bool onlyOutputsToInstall) { - if (outputs.empty()) { - /* Get the ‘outputs’ list. */ - Bindings::iterator i; - if ((attrs != nullptr) && - (i = attrs->find(state->sOutputs)) != attrs->end()) { - state->forceList(*i->second.value, *i->second.pos); - - /* For each output... */ - for (unsigned int j = 0; j < i->second.value->listSize(); ++j) { - /* Evaluate the corresponding set. */ - std::string name = state->forceStringNoCtx(*(*i->second.value->list)[j], - *i->second.pos); - Bindings::iterator out = attrs->find(state->symbols.Create(name)); - if (out == attrs->end()) { - continue; // FIXME: throw error? - } - state->forceAttrs(*out->second.value); - - /* And evaluate its ‘outPath’ attribute. */ - Bindings::iterator outPath = - out->second.value->attrs->find(state->sOutPath); - if (outPath == out->second.value->attrs->end()) { - continue; // FIXME: throw error? - } - PathSet context; - outputs[name] = state->coerceToPath(*outPath->second.pos, - *outPath->second.value, context); - } - } else { - outputs["out"] = queryOutPath(); - } - } - if (!onlyOutputsToInstall || (attrs == nullptr)) { - return outputs; - } - - /* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */ - const Value* outTI = queryMeta("outputsToInstall"); - if (outTI == nullptr) { - return outputs; - } - const auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'"); - /* ^ this shows during `nix-env -i` right under the bad derivation */ - if (!outTI->isList()) { - throw errMsg; - } - Outputs result; - - for (Value* i : *outTI->list) { - if (i->type != tString) { - throw errMsg; - } - auto out = outputs.find(i->string.s); - if (out == outputs.end()) { - throw errMsg; - } - result.insert(*out); - } - return result; -} - -std::string DrvInfo::queryOutputName() const { - if (outputName.empty() && (attrs != nullptr)) { - Bindings::iterator i = attrs->find(state->sOutputName); - outputName = - i != attrs->end() ? state->forceStringNoCtx(*i->second.value) : ""; - } - return outputName; -} - -Bindings* DrvInfo::getMeta() { - if (meta != nullptr) { - return meta.get(); - } - if (attrs == nullptr) { - return nullptr; - } - Bindings::iterator a = attrs->find(state->sMeta); - if (a == attrs->end()) { - return nullptr; - } - state->forceAttrs(*a->second.value, *a->second.pos); - meta = a->second.value->attrs; - return meta.get(); -} - -StringSet DrvInfo::queryMetaNames() { - StringSet res; - if (getMeta() == nullptr) { - return res; - } - for (auto& i : *meta) { - res.insert(i.second.name); - } - return res; -} - -bool DrvInfo::checkMeta(Value& v) { - state->forceValue(v); - if (v.isList()) { - for (unsigned int n = 0; n < v.listSize(); ++n) { - if (!checkMeta(*(*v.list)[n])) { - return false; - } - } - return true; - } - if (v.type == tAttrs) { - Bindings::iterator i = v.attrs->find(state->sOutPath); - if (i != v.attrs->end()) { - return false; - } - for (auto& i : *v.attrs) { - if (!checkMeta(*i.second.value)) { - return false; - } - } - return true; - } else { - return v.type == tInt || v.type == tBool || v.type == tString || - v.type == tFloat; - } -} - -Value* DrvInfo::queryMeta(const std::string& name) { - if (getMeta() == nullptr) { - return nullptr; - } - Bindings::iterator a = meta->find(state->symbols.Create(name)); - if (a == meta->end() || !checkMeta(*a->second.value)) { - return nullptr; - } - return a->second.value; -} - -std::string DrvInfo::queryMetaString(const std::string& name) { - Value* v = queryMeta(name); - if ((v == nullptr) || v->type != tString) { - return ""; - } - return v->string.s; -} - -NixInt DrvInfo::queryMetaInt(const std::string& name, NixInt def) { - Value* v = queryMeta(name); - if (v == nullptr) { - return def; - } - if (v->type == tInt) { - return v->integer; - } - if (v->type == tString) { - /* Backwards compatibility with before we had support for - integer meta fields. */ - NixInt n; - if (absl::SimpleAtoi(v->string.s, &n)) { - return n; - } - } - return def; -} - -NixFloat DrvInfo::queryMetaFloat(const std::string& name, NixFloat def) { - Value* v = queryMeta(name); - if (v == nullptr) { - return def; - } - if (v->type == tFloat) { - return v->fpoint; - } - if (v->type == tString) { - /* Backwards compatibility with before we had support for - float meta fields. */ - NixFloat n; - if (string2Float(v->string.s, n)) { - return n; - } - } - return def; -} - -bool DrvInfo::queryMetaBool(const std::string& name, bool def) { - Value* v = queryMeta(name); - if (v == nullptr) { - return def; - } - if (v->type == tBool) { - return v->boolean; - } - if (v->type == tString) { - /* Backwards compatibility with before we had support for - Boolean meta fields. */ - if (strcmp(v->string.s, "true") == 0) { - return true; - } - if (strcmp(v->string.s, "false") == 0) { - return false; - } - } - return def; -} - -void DrvInfo::setMeta(const std::string& name, Value* v) { - std::shared_ptr old = meta; - meta = std::shared_ptr(Bindings::New(old->size() + 1).release()); - Symbol sym = state->symbols.Create(name); - if (old != nullptr) { - for (auto i : *old) { - if (i.second.name != sym) { - meta->push_back(i.second); - } - } - } - if (v != nullptr) { - meta->push_back(Attr(sym, v)); - } -} - -/* Cache for already considered attrsets. */ -using Done = absl::flat_hash_set>; - -/* Evaluate value `v'. If it evaluates to a set of type `derivation', - then put information about it in `drvs' (unless it's already in `done'). - The result boolean indicates whether it makes sense - for the caller to recursively search for derivations in `v'. */ -static bool getDerivation(EvalState& state, Value& v, - const std::string& attrPath, DrvInfos& drvs, - Done& done, bool ignoreAssertionFailures) { - try { - state.forceValue(v); - if (!state.isDerivation(v)) { - return true; - } - - /* Remove spurious duplicates (e.g., a set like `rec { x = - derivation {...}; y = x;}'. */ - if (done.find(v.attrs) != done.end()) { - return false; - } - done.insert(v.attrs); - - DrvInfo drv(state, attrPath, v.attrs); - - drv.queryName(); - - drvs.push_back(drv); - - return false; - - } catch (AssertionError& e) { - if (ignoreAssertionFailures) { - return false; - } - throw; - } -} - -std::optional getDerivation(EvalState& state, Value& v, - bool ignoreAssertionFailures) { - Done done; - DrvInfos drvs; - getDerivation(state, v, "", drvs, done, ignoreAssertionFailures); - if (drvs.size() != 1) { - return {}; - } - return std::move(drvs.front()); -} - -static std::string addToPath(const std::string& s1, const std::string& s2) { - return s1.empty() ? s2 : s1 + "." + s2; -} - -static std::regex attrRegex("[A-Za-z_][A-Za-z0-9-_+]*"); - -static void getDerivations(EvalState& state, Value& vIn, - const std::string& pathPrefix, Bindings* autoArgs, - DrvInfos& drvs, Done& done, - bool ignoreAssertionFailures) { - Value v; - state.autoCallFunction(autoArgs, vIn, v); - - /* Process the expression. */ - if (!getDerivation(state, v, pathPrefix, drvs, done, - ignoreAssertionFailures)) { - ; - - } else if (v.type == tAttrs) { - /* !!! undocumented hackery to support combining channels in - nix-env.cc. */ - bool combineChannels = - v.attrs->find(state.symbols.Create("_combineChannels")) != - v.attrs->end(); - - /* Consider the attributes in sorted order to get more - deterministic behaviour in nix-env operations (e.g. when - there are names clashes between derivations, the derivation - bound to the attribute with the "lower" name should take - precedence). */ - for (auto& [_, i] : *v.attrs) { - DLOG(INFO) << "evaluating attribute '" << i.name << "'"; - if (!std::regex_match(std::string(i.name), attrRegex)) { - continue; - } - std::string pathPrefix2 = addToPath(pathPrefix, i.name); - if (combineChannels) { - getDerivations(state, *i.value, pathPrefix2, autoArgs, drvs, done, - ignoreAssertionFailures); - } else if (getDerivation(state, *i.value, pathPrefix2, drvs, done, - ignoreAssertionFailures)) { - /* If the value of this attribute is itself a set, - should we recurse into it? => Only if it has a - `recurseForDerivations = true' attribute. */ - if (i.value->type == tAttrs) { - Bindings::iterator j = i.value->attrs->find( - state.symbols.Create("recurseForDerivations")); - if (j != i.value->attrs->end() && - state.forceBool(*j->second.value, *j->second.pos)) { - getDerivations(state, *i.value, pathPrefix2, autoArgs, drvs, done, - ignoreAssertionFailures); - } - } - } - } - } - - else if (v.isList()) { - for (unsigned int n = 0; n < v.listSize(); ++n) { - std::string pathPrefix2 = - addToPath(pathPrefix, (format("%1%") % n).str()); - if (getDerivation(state, *(*v.list)[n], pathPrefix2, drvs, done, - ignoreAssertionFailures)) { - getDerivations(state, *(*v.list)[n], pathPrefix2, autoArgs, drvs, done, - ignoreAssertionFailures); - } - } - } - - else { - throw TypeError( - "expression does not evaluate to a derivation (or a set or list of " - "those)"); - } -} - -void getDerivations(EvalState& state, Value& v, const std::string& pathPrefix, - Bindings* autoArgs, DrvInfos& drvs, - bool ignoreAssertionFailures) { - Done done; - getDerivations(state, v, pathPrefix, autoArgs, drvs, done, - ignoreAssertionFailures); -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/get-drvs.hh b/third_party/nix/src/libexpr/get-drvs.hh deleted file mode 100644 index 3de266d0c0..0000000000 --- a/third_party/nix/src/libexpr/get-drvs.hh +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include -#include - -#include "libexpr/eval.hh" - -namespace nix { - -struct DrvInfo { - public: - typedef std::map Outputs; - - private: - EvalState* state; - - mutable std::string name; - mutable std::string system; - mutable std::string drvPath; - mutable std::string outPath; - mutable std::string outputName; - Outputs outputs; - - bool failed = false; // set if we get an AssertionError - - std::shared_ptr attrs = nullptr; - std::shared_ptr meta = nullptr; - - Bindings* getMeta(); - - bool checkMeta(Value& v); - - public: - std::string attrPath; /* path towards the derivation */ - - DrvInfo(EvalState& state) : state(&state){}; - DrvInfo(EvalState& state, std::string attrPath, - std::shared_ptr attrs); - DrvInfo(EvalState& state, const ref& store, - const std::string& drvPathWithOutputs); - - std::string queryName() const; - std::string querySystem() const; - std::string queryDrvPath() const; - std::string queryOutPath() const; - std::string queryOutputName() const; - /** Return the list of outputs. The "outputs to install" are determined by - * `meta.outputsToInstall`. */ - Outputs queryOutputs(bool onlyOutputsToInstall = false); - - StringSet queryMetaNames(); - Value* queryMeta(const std::string& name); - std::string queryMetaString(const std::string& name); - NixInt queryMetaInt(const std::string& name, NixInt def); - NixFloat queryMetaFloat(const std::string& name, NixFloat def); - bool queryMetaBool(const std::string& name, bool def); - void setMeta(const std::string& name, Value* v); - - /* - MetaInfo queryMetaInfo(EvalState & state) const; - MetaValue queryMetaInfo(EvalState & state, const std::string & name) const; - */ - - void setName(const std::string& s) { name = s; } - void setDrvPath(const std::string& s) { drvPath = s; } - void setOutPath(const std::string& s) { outPath = s; } - - void setFailed() { failed = true; }; - bool hasFailed() { return failed; }; -}; - -using DrvInfos = std::list; - -/* If value `v' denotes a derivation, return a DrvInfo object - describing it. Otherwise return nothing. */ -std::optional getDerivation(EvalState& state, Value& v, - bool ignoreAssertionFailures); - -void getDerivations(EvalState& state, Value& v, const std::string& pathPrefix, - Bindings* autoArgs, DrvInfos& drvs, - bool ignoreAssertionFailures); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/json-to-value.cc b/third_party/nix/src/libexpr/json-to-value.cc deleted file mode 100644 index 043f8c64cd..0000000000 --- a/third_party/nix/src/libexpr/json-to-value.cc +++ /dev/null @@ -1,152 +0,0 @@ -#include "libexpr/json-to-value.hh" - -#include -#include -#include - -#include "libexpr/value.hh" - -using json = nlohmann::json; - -namespace nix { - -// for more information, refer to -// https://github.com/nlohmann/json/blob/master/include/nlohmann/detail/input/json_sax.hpp -class JSONSax : nlohmann::json_sax { - class JSONState { - protected: - std::unique_ptr parent; - std::shared_ptr v; - - public: - virtual std::unique_ptr resolve(EvalState&) { - throw std::logic_error("tried to close toplevel json parser state"); - } - explicit JSONState(std::unique_ptr&& p) : parent(std::move(p)) {} - explicit JSONState(Value* v) : v(allocRootValue(v)) {} - JSONState(JSONState& p) = delete; - Value& value(EvalState& state) { - if (!v) v = allocRootValue(state.allocValue()); - return **v; - } - virtual ~JSONState() {} - virtual void add() {} - }; - - class JSONObjectState : public JSONState { - using JSONState::JSONState; - ValueMap attrs = ValueMap(); - std::unique_ptr resolve(EvalState& state) override { - Value& v = parent->value(state); - state.mkAttrs(v, attrs.size()); - for (auto& i : attrs) v.attrs->push_back(Attr(i.first, i.second)); - return std::move(parent); - } - void add() override { v = nullptr; }; - - public: - void key(string_t& name, EvalState& state) { - attrs[state.symbols.Create(name)] = &value(state); - } - }; - - class JSONListState : public JSONState { - using JSONState::JSONState; - std::vector values; - std::unique_ptr resolve(EvalState& state) override { - Value& v = parent->value(state); - state.mkList(v, values.size()); - for (size_t n = 0; n < values.size(); ++n) { - (*v.list)[n] = values[n]; - } - return std::move(parent); - } - void add() override { - values.push_back(*v); - v = nullptr; - }; - - public: - JSONListState(std::unique_ptr&& p, std::size_t reserve) - : JSONState(std::move(p)) { - values.reserve(reserve); - } - }; - - EvalState& state; - std::unique_ptr rs; - - template - inline bool handle_value(T f, Args... args) { - f(rs->value(state), args...); - rs->add(); - return true; - } - - public: - JSONSax(EvalState& state, Value& v) : state(state), rs(new JSONState(&v)){}; - - bool null() override { return handle_value(mkNull); } - - bool boolean(bool val) override { return handle_value(mkBool, val); } - - bool number_integer(number_integer_t val) override { - return handle_value(mkInt, val); - } - - bool number_unsigned(number_unsigned_t val) override { - return handle_value(mkInt, val); - } - - bool number_float(number_float_t val, const string_t&) override { - return handle_value(mkFloat, val); - } - - bool string(string_t& val) override { - return handle_value(mkString, val.c_str()); - } - -#if NLOHMANN_JSON_VERSION_MAJOR >= 3 && NLOHMANN_JSON_VERSION_MINOR >= 8 - bool binary(binary_t&) { - // This function ought to be unreachable - assert(false); - return true; - } -#endif - - bool start_object(std::size_t) override { - rs = std::make_unique(std::move(rs)); - return true; - } - - bool key(string_t& name) override { - dynamic_cast(rs.get())->key(name, state); - return true; - } - - bool end_object() override { - rs = rs->resolve(state); - rs->add(); - return true; - } - - bool end_array() override { return end_object(); } - - bool start_array(size_t len) override { - rs = std::make_unique( - std::move(rs), len != std::numeric_limits::max() ? len : 128); - return true; - } - - bool parse_error(std::size_t, const std::string&, - const nlohmann::detail::exception& ex) override { - throw JSONParseError(ex.what()); - } -}; - -void parseJSON(EvalState& state, const std::string& s_, Value& v) { - JSONSax parser(state, v); - bool res = json::sax_parse(s_, &parser); - if (!res) throw JSONParseError("Invalid JSON Value"); -} -} // namespace nix diff --git a/third_party/nix/src/libexpr/json-to-value.hh b/third_party/nix/src/libexpr/json-to-value.hh deleted file mode 100644 index 7f258f2137..0000000000 --- a/third_party/nix/src/libexpr/json-to-value.hh +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -#include - -#include "libexpr/eval.hh" - -namespace nix { - -MakeError(JSONParseError, EvalError); - -void parseJSON(EvalState& state, const std::string& s, Value& v); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/lexer.l b/third_party/nix/src/libexpr/lexer.l deleted file mode 100644 index d5b8a45936..0000000000 --- a/third_party/nix/src/libexpr/lexer.l +++ /dev/null @@ -1,193 +0,0 @@ -%option reentrant bison-bridge bison-locations -%option noyywrap -%option never-interactive -%option stack -%option nodefault -%option nounput noyy_top_state - - -%s DEFAULT -%x STRING -%x IND_STRING - - -%{ -#include - -#include "generated/parser-tab.hh" -#include "libexpr/nixexpr.hh" -#include "libexpr/parser.hh" - -using namespace nix; - -namespace nix { - -static void initLoc(YYLTYPE* loc) { - loc->first_line = loc->last_line = 1; - loc->first_column = loc->last_column = 1; -} - -static void adjustLoc(YYLTYPE* loc, const char* s, size_t len) { - loc->first_line = loc->last_line; - loc->first_column = loc->last_column; - - while (len--) { - switch (*s++) { - case '\r': - if (*s == '\n') /* cr/lf */ - s++; - /* fall through */ - case '\n': - ++loc->last_line; - loc->last_column = 1; - break; - default: - ++loc->last_column; - } - } -} - -} - -#define YY_USER_INIT initLoc(yylloc) -#define YY_USER_ACTION adjustLoc(yylloc, yytext, yyleng); - -#define PUSH_STATE(state) yy_push_state(state, yyscanner) -#define POP_STATE() yy_pop_state(yyscanner) - -%} - - -ANY .|\n -ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]* -INT [0-9]+ -FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)? -PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/? -HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/? -SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\> -URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ - - -%% - - -if { return IF; } -then { return THEN; } -else { return ELSE; } -assert { return ASSERT; } -with { return WITH; } -let { return LET; } -in { return IN; } -rec { return REC; } -inherit { return INHERIT; } -or { return OR_KW; } -\.\.\. { return ELLIPSIS; } - -\=\= { return EQ; } -\!\= { return NEQ; } -\<\= { return LEQ; } -\>\= { return GEQ; } -\&\& { return AND; } -\|\| { return OR; } -\-\> { return IMPL; } -\/\/ { return UPDATE; } -\+\+ { return CONCAT; } - -{ID} { yylval->id = strdup(yytext); return ID; } -{INT} { errno = 0; - try { - yylval->n = boost::lexical_cast(yytext); - } catch (const boost::bad_lexical_cast &) { - throw ParseError(format("invalid integer '%1%'") % yytext); - } - return INT; - } -{FLOAT} { errno = 0; - yylval->nf = strtod(yytext, 0); - if (errno != 0) - throw ParseError(format("invalid float '%1%'") % yytext); - return FLOAT; - } - -\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } - -\} { /* State INITIAL only exists at the bottom of the stack and is - used as a marker. DEFAULT replaces it everywhere else. - Popping when in INITIAL state causes an empty stack exception, - so don't */ - if (YYSTATE != INITIAL) - POP_STATE(); - return '}'; - } -\{ { PUSH_STATE(DEFAULT); return '{'; } - -\" { PUSH_STATE(STRING); return '"'; } -([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})*\$/\" | -([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})+ { - /* It is impossible to match strings ending with '$' with one - regex because trailing contexts are only valid at the end - of a rule. (A sane but undocumented limitation.) */ - yylval->e = unescapeStr(data->symbols, yytext, yyleng); - return STR; - } -\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } -\" { POP_STATE(); return '"'; } -\$|\\|\$\\ { - /* This can only occur when we reach EOF, otherwise the above - (...|\$[^\{\"\\]|\\.|\$\\.)+ would have triggered. - This is technically invalid, but we leave the problem to the - parser who fails with exact location. */ - return STR; - } - -\'\'(\ *\n)? { PUSH_STATE(IND_STRING); return IND_STRING_OPEN; } -([^\$\']|\$[^\{\']|\'[^\'\$])+ { - yylval->e = new ExprIndStr(yytext); - return IND_STR; - } -\'\'\$ | -\$ { - yylval->e = new ExprIndStr("$"); - return IND_STR; - } -\'\'\' { - yylval->e = new ExprIndStr("''"); - return IND_STR; - } -\'\'\\{ANY} { - yylval->e = unescapeStr(data->symbols, yytext + 2, yyleng - 2); - return IND_STR; - } -\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } -\'\' { POP_STATE(); return IND_STRING_CLOSE; } -\' { - yylval->e = new ExprIndStr("'"); - return IND_STR; - } - - -{PATH} { if (yytext[yyleng-1] == '/') - throw ParseError("path '%s' has a trailing slash", yytext); - yylval->path = strdup(yytext); - return PATH; - } -{HPATH} { if (yytext[yyleng-1] == '/') - throw ParseError("path '%s' has a trailing slash", yytext); - yylval->path = strdup(yytext); - return HPATH; - } -{SPATH} { yylval->path = strdup(yytext); return SPATH; } -{URI} { yylval->uri = strdup(yytext); return URI; } - -[ \t\r\n]+ /* eat up whitespace */ -\#[^\r\n]* /* single-line comments */ -\/\*([^*]|\*+[^*/])*\*+\/ /* long comments */ - -{ANY} { - /* Don't return a negative number, as this will cause - Bison to stop parsing without an error. */ - return (unsigned char) yytext[0]; - } - -%% - diff --git a/third_party/nix/src/libexpr/names.cc b/third_party/nix/src/libexpr/names.cc deleted file mode 100644 index 1e9c2f2f4a..0000000000 --- a/third_party/nix/src/libexpr/names.cc +++ /dev/null @@ -1,121 +0,0 @@ -#include "libexpr/names.hh" - -#include - -#include - -#include "libutil/util.hh" - -namespace nix { - -DrvName::DrvName() { name = ""; } - -/* Parse a derivation name. The `name' part of a derivation name is - everything up to but not including the first dash *not* followed by - a letter. The `version' part is the rest (excluding the separating - dash). E.g., `apache-httpd-2.0.48' is parsed to (`apache-httpd', - '2.0.48'). */ -DrvName::DrvName(const std::string& s) : hits(0) { - name = fullName = s; - for (unsigned int i = 0; i < s.size(); ++i) { - /* !!! isalpha/isdigit are affected by the locale. */ - if (s[i] == '-' && i + 1 < s.size() && (isalpha(s[i + 1]) == 0)) { - name = std::string(s, 0, i); - version = std::string(s, i + 1); - break; - } - } -} - -bool DrvName::matches(DrvName& n) { - if (name != "*") { - if (!regex) { - regex = std::make_unique(name, std::regex::extended); - } - if (!std::regex_match(n.name, *regex)) { - return false; - } - } - return !(!version.empty() && version != n.version); -} - -std::string nextComponent(std::string::const_iterator& p, - const std::string::const_iterator end) { - /* Skip any dots and dashes (component separators). */ - while (p != end && (*p == '.' || *p == '-')) { - ++p; - } - - if (p == end) { - return ""; - } - - /* If the first character is a digit, consume the longest sequence - of digits. Otherwise, consume the longest sequence of - non-digit, non-separator characters. */ - std::string s; - if (isdigit(*p) != 0) { - while (p != end && (isdigit(*p) != 0)) { - s += *p++; - } - } else { - while (p != end && ((isdigit(*p) == 0) && *p != '.' && *p != '-')) { - s += *p++; - } - } - - return s; -} - -static bool componentsLT(const std::string& c1, const std::string& c2) { - int n1; - int n2; - bool c1Num = absl::SimpleAtoi(c1, &n1); - bool c2Num = absl::SimpleAtoi(c2, &n2); - - if (c1Num && c2Num) { - return n1 < n2; - } - if (c1.empty() && c2Num) { - return true; - } else if (c1 == "pre" && c2 != "pre") { - return true; - } else if (c2 == "pre") { - return false; - /* Assume that `2.3a' < `2.3.1'. */ - } else if (c2Num) { - return true; - } else if (c1Num) { - return false; - } else { - return c1 < c2; - } -} - -int compareVersions(const std::string& v1, const std::string& v2) { - std::string::const_iterator p1 = v1.begin(); - std::string::const_iterator p2 = v2.begin(); - - while (p1 != v1.end() || p2 != v2.end()) { - std::string c1 = nextComponent(p1, v1.end()); - std::string c2 = nextComponent(p2, v2.end()); - if (componentsLT(c1, c2)) { - return -1; - } - if (componentsLT(c2, c1)) { - return 1; - } - } - - return 0; -} - -DrvNames drvNamesFromArgs(const Strings& opArgs) { - DrvNames result; - for (auto& i : opArgs) { - result.push_back(DrvName(i)); - } - return result; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/names.hh b/third_party/nix/src/libexpr/names.hh deleted file mode 100644 index 061388d517..0000000000 --- a/third_party/nix/src/libexpr/names.hh +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include -#include - -#include "libutil/types.hh" - -namespace nix { - -struct DrvName { - std::string fullName; - std::string name; - std::string version; - unsigned int hits; - - DrvName(); - DrvName(const std::string& s); - bool matches(DrvName& n); - - private: - std::unique_ptr regex; -}; - -typedef std::list DrvNames; - -std::string nextComponent(std::string::const_iterator& p, - const std::string::const_iterator end); -int compareVersions(const std::string& v1, const std::string& v2); -DrvNames drvNamesFromArgs(const Strings& opArgs); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/nix-expr.pc.in b/third_party/nix/src/libexpr/nix-expr.pc.in deleted file mode 100644 index 99b0ae2c68..0000000000 --- a/third_party/nix/src/libexpr/nix-expr.pc.in +++ /dev/null @@ -1,10 +0,0 @@ -prefix=@CMAKE_INSTALL_PREFIX@ -libdir=@CMAKE_INSTALL_FULL_LIBDIR@ -includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ - -Name: Nix -Description: Nix Package Manager -Version: @PACKAGE_VERSION@ -Requires: nix-store bdw-gc -Libs: -L${libdir} -lnixexpr -Cflags: -I${includedir}/nix diff --git a/third_party/nix/src/libexpr/nixexpr.cc b/third_party/nix/src/libexpr/nixexpr.cc deleted file mode 100644 index 391f068205..0000000000 --- a/third_party/nix/src/libexpr/nixexpr.cc +++ /dev/null @@ -1,414 +0,0 @@ -#include "libexpr/nixexpr.hh" - -#include -#include - -#include "libstore/derivations.hh" -#include "libutil/util.hh" -#include "libutil/visitor.hh" - -namespace nix { - -/* Displaying abstract syntax trees. */ - -std::ostream& operator<<(std::ostream& str, const Expr& e) { - e.show(str); - return str; -} - -static void showString(std::ostream& str, const std::string& s) { - str << '"'; - for (auto c : std::string(s)) { - if (c == '"' || c == '\\' || c == '$') { - str << "\\" << c; - } else if (c == '\n') { - str << "\\n"; - } else if (c == '\r') { - str << "\\r"; - } else if (c == '\t') { - str << "\\t"; - } else { - str << c; - } - } - str << '"'; -} - -static void showId(std::ostream& str, const std::string& s) { - if (s.empty()) { - str << "\"\""; - } else if (s == "if") { // FIXME: handle other keywords - str << '"' << s << '"'; - } else { - char c = s[0]; - if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) { - showString(str, s); - return; - } - for (auto c : s) { - if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') || c == '_' || c == '\'' || c == '-')) { - showString(str, s); - return; - } - } - str << s; - } -} - -std::ostream& operator<<(std::ostream& str, const Symbol& sym) { - showId(str, *sym.s); - return str; -} - -void Expr::show(std::ostream& str) const { abort(); } - -void ExprInt::show(std::ostream& str) const { str << n; } - -void ExprFloat::show(std::ostream& str) const { str << nf; } - -void ExprString::show(std::ostream& str) const { showString(str, s); } - -void ExprPath::show(std::ostream& str) const { str << s; } - -void ExprVar::show(std::ostream& str) const { str << name; } - -void ExprSelect::show(std::ostream& str) const { - str << "(" << *e << ")." << showAttrPath(attrPath); - if (def != nullptr) { - str << " or (" << *def << ")"; - } -} - -void ExprOpHasAttr::show(std::ostream& str) const { - str << "((" << *e << ") ? " << showAttrPath(attrPath) << ")"; -} - -void ExprAttrs::show(std::ostream& str) const { - if (recursive) { - str << "rec "; - } - str << "{ "; - for (auto& i : attrs) { - if (i.second.inherited) { - str << "inherit " << i.first << " " - << "; "; - } else { - str << i.first << " = " << *i.second.e << "; "; - } - } - for (auto& i : dynamicAttrs) { - str << "\"${" << *i.nameExpr << "}\" = " << *i.valueExpr << "; "; - } - str << "}"; -} - -void ExprList::show(std::ostream& str) const { - str << "[ "; - for (auto& i : elems) { - str << "(" << *i << ") "; - } - str << "]"; -} - -void ExprLambda::show(std::ostream& str) const { - str << "("; - if (matchAttrs) { - str << "{ "; - bool first = true; - for (auto& i : formals->formals) { - if (first) { - first = false; - } else { - str << ", "; - } - str << i.name; - if (i.def != nullptr) { - str << " ? " << *i.def; - } - } - if (formals->ellipsis) { - if (!first) { - str << ", "; - } - str << "..."; - } - str << " }"; - if (!arg.empty()) { - str << " @ "; - } - } - if (!arg.empty()) { - str << arg; - } - str << ": " << *body << ")"; -} - -void ExprLet::show(std::ostream& str) const { - str << "(let "; - for (auto& i : attrs->attrs) { - if (i.second.inherited) { - str << "inherit " << i.first << "; "; - } else { - str << i.first << " = " << *i.second.e << "; "; - } - } - str << "in " << *body << ")"; -} - -void ExprWith::show(std::ostream& str) const { - str << "(with " << *attrs << "; " << *body << ")"; -} - -void ExprIf::show(std::ostream& str) const { - str << "(if " << *cond << " then " << *then << " else " << *else_ << ")"; -} - -void ExprAssert::show(std::ostream& str) const { - str << "assert " << *cond << "; " << *body; -} - -void ExprOpNot::show(std::ostream& str) const { str << "(! " << *e << ")"; } - -void ExprConcatStrings::show(std::ostream& str) const { - bool first = true; - str << "("; - for (auto& i : *es) { - if (first) { - first = false; - } else { - str << " + "; - } - str << *i; - } - str << ")"; -} - -void ExprPos::show(std::ostream& str) const { str << "__curPos"; } - -std::ostream& operator<<(std::ostream& str, const Pos& pos) { - if (!pos || !pos.file.has_value()) { - str << "undefined position"; - } else { - str << (format(ANSI_BOLD "%1%" ANSI_NORMAL ":%2%:%3%") % - std::string(pos.file.value()) % pos.line % pos.column) - .str(); - } - return str; -} - -std::string showAttrPath(const AttrPath& attrPath) { - std::ostringstream out; - bool first = true; - for (auto& attr : attrPath) { - if (!first) { - out << '.'; - } else { - first = false; - } - - std::visit(util::overloaded{ - [&](const Symbol& sym) { out << sym; }, - [&](const Expr* expr) { out << "\"${" << *expr << "}\""; }}, - attr); - } - return out.str(); -} - -Pos noPos; - -/* Computing levels/displacements for variables. */ - -void Expr::bindVars(const StaticEnv& env) { abort(); } - -void ExprInt::bindVars(const StaticEnv& env) {} - -void ExprFloat::bindVars(const StaticEnv& env) {} - -void ExprString::bindVars(const StaticEnv& env) {} - -void ExprPath::bindVars(const StaticEnv& env) {} - -void ExprVar::bindVars(const StaticEnv& env) { - /* Check whether the variable appears in the environment. If so, - set its level and displacement. */ - const StaticEnv* curEnv; - unsigned int level; - std::optional withLevel = std::nullopt; - for (curEnv = &env, level = 0; curEnv != nullptr; - curEnv = curEnv->up, level++) { - if (curEnv->isWith) { - if (!withLevel.has_value()) { - withLevel = level; - } - } else { - auto i = curEnv->vars.find(name); - if (i != curEnv->vars.end()) { - fromWith = false; - this->level = level; - displ = i->second; - return; - } - } - } - - /* Otherwise, the variable must be obtained from the nearest - enclosing `with'. If there is no `with', then we can issue an - "undefined variable" error now. */ - if (!withLevel.has_value()) { - throw UndefinedVarError(format("undefined variable '%1%' at %2%") % name % - pos); - } - - fromWith = true; - this->level = withLevel.value(); -} - -void ExprSelect::bindVars(const StaticEnv& env) { - e->bindVars(env); - if (def != nullptr) { - def->bindVars(env); - } - for (auto& i : attrPath) { - if (auto* expr = std::get_if(&i)) { - (*expr)->bindVars(env); - } - } -} - -void ExprOpHasAttr::bindVars(const StaticEnv& env) { - e->bindVars(env); - for (auto& i : attrPath) { - if (auto* expr = std::get_if(&i)) { - (*expr)->bindVars(env); - } - } -} - -void ExprAttrs::bindVars(const StaticEnv& env) { - const StaticEnv* dynamicEnv = &env; - StaticEnv newEnv(/* isWith = */ false, &env); - - if (recursive) { - dynamicEnv = &newEnv; - - unsigned int displ = 0; - for (auto& i : attrs) { - newEnv.vars[i.first] = i.second.displ = displ++; - } - - for (auto& i : attrs) { - i.second.e->bindVars(i.second.inherited ? env : newEnv); - } - } - - else { - for (auto& i : attrs) { - i.second.e->bindVars(env); - } - } - - for (auto& i : dynamicAttrs) { - i.nameExpr->bindVars(*dynamicEnv); - i.valueExpr->bindVars(*dynamicEnv); - } -} - -void ExprList::bindVars(const StaticEnv& env) { - for (auto& i : elems) { - i->bindVars(env); - } -} - -void ExprLambda::bindVars(const StaticEnv& env) { - StaticEnv newEnv(false, &env); - - unsigned int displ = 0; - - if (!arg.empty()) { - newEnv.vars[arg] = displ++; - } - - if (matchAttrs) { - for (auto& i : formals->formals) { - newEnv.vars[i.name] = displ++; - } - - for (auto& i : formals->formals) { - if (i.def != nullptr) { - i.def->bindVars(newEnv); - } - } - } - - body->bindVars(newEnv); -} - -void ExprLet::bindVars(const StaticEnv& env) { - StaticEnv newEnv(false, &env); - - unsigned int displ = 0; - for (auto& i : attrs->attrs) { - newEnv.vars[i.first] = i.second.displ = displ++; - } - - for (auto& i : attrs->attrs) { - i.second.e->bindVars(i.second.inherited ? env : newEnv); - } - - body->bindVars(newEnv); -} - -void ExprWith::bindVars(const StaticEnv& env) { - /* Does this `with' have an enclosing `with'? If so, record its - level so that `lookupVar' can look up variables in the previous - `with' if this one doesn't contain the desired attribute. */ - const StaticEnv* curEnv; - unsigned int level; - prevWith = 0; - for (curEnv = &env, level = 1; curEnv != nullptr; - curEnv = curEnv->up, level++) { - if (curEnv->isWith) { - prevWith = level; - break; - } - } - - attrs->bindVars(env); - StaticEnv newEnv(true, &env); - body->bindVars(newEnv); -} - -void ExprIf::bindVars(const StaticEnv& env) { - cond->bindVars(env); - then->bindVars(env); - else_->bindVars(env); -} - -void ExprAssert::bindVars(const StaticEnv& env) { - cond->bindVars(env); - body->bindVars(env); -} - -void ExprOpNot::bindVars(const StaticEnv& env) { e->bindVars(env); } - -void ExprConcatStrings::bindVars(const StaticEnv& env) { - for (auto& i : *es) { - i->bindVars(env); - } -} - -void ExprPos::bindVars(const StaticEnv& env) {} - -/* Storing function names. */ -void ExprLambda::setName(Symbol& name) { this->name = name; } - -std::string ExprLambda::showNamePos() const { - return (format("%1% at %2%") % - (name.has_value() ? "'" + std::string(name.value()) + "'" - : "anonymous function") % - pos) - .str(); -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/nixexpr.hh b/third_party/nix/src/libexpr/nixexpr.hh deleted file mode 100644 index 16b58dec2e..0000000000 --- a/third_party/nix/src/libexpr/nixexpr.hh +++ /dev/null @@ -1,361 +0,0 @@ -#pragma once - -#include -#include -#include - -#include - -#include "libexpr/symbol-table.hh" -#include "libexpr/value.hh" -#include "libutil/types.hh" // TODO(tazjin): audit this include - -namespace nix { - -MakeError(EvalError, Error); -MakeError(ParseError, Error); -MakeError(AssertionError, EvalError); -MakeError(ThrownError, AssertionError); -MakeError(Abort, EvalError); -MakeError(TypeError, EvalError); -MakeError(UndefinedVarError, Error); -MakeError(RestrictedPathError, Error); - -/* Position objects. */ - -struct Pos { - std::optional file; - unsigned int line, column; - Pos(const std::optional& file, unsigned int line, unsigned int column) - : file(file), line(line), column(column){}; - - // TODO(tazjin): remove this - empty pos is never useful - Pos() : file(std::nullopt), line(0), column(0){}; - - operator bool() const { return line != 0; } - - bool operator<(const Pos& p2) const { - if (!file.has_value()) { - return true; - } - - if (!line) { - return p2.line; - } - if (!p2.line) { - return false; - } - int d = ((std::string)file.value()).compare((std::string)p2.file.value()); - if (d < 0) { - return true; - } - if (d > 0) { - return false; - } - if (line < p2.line) { - return true; - } - if (line > p2.line) { - return false; - } - return column < p2.column; - } -}; - -extern Pos noPos; - -std::ostream& operator<<(std::ostream& str, const Pos& pos); - -struct Env; -struct Value; -class EvalState; -struct StaticEnv; - -/* An attribute path is a sequence of attribute names. */ -using AttrName = std::variant; -using AttrPath = std::vector; -using AttrNameVector = std::vector; - -using VectorExprs = std::vector; - -std::string showAttrPath(const AttrPath& attrPath); - -/* Abstract syntax of Nix expressions. */ - -struct Expr { - virtual ~Expr(){}; - virtual void show(std::ostream& str) const; - virtual void bindVars(const StaticEnv& env); - virtual void eval(EvalState& state, Env& env, Value& v); - virtual Value* maybeThunk(EvalState& state, Env& env); -}; - -std::ostream& operator<<(std::ostream& str, const Expr& e); - -#define COMMON_METHODS \ - void show(std::ostream& str) const; \ - void eval(EvalState& state, Env& env, Value& v); \ - void bindVars(const StaticEnv& env); - -struct ExprInt : Expr { - NixInt n; - Value v; - ExprInt(NixInt n) : n(n) { mkInt(v, n); }; - COMMON_METHODS - Value* maybeThunk(EvalState& state, Env& env); -}; - -struct ExprFloat : Expr { - NixFloat nf; - Value v; - ExprFloat(NixFloat nf) : nf(nf) { mkFloat(v, nf); }; - COMMON_METHODS - Value* maybeThunk(EvalState& state, Env& env); -}; - -struct ExprString : Expr { - Symbol s; - Value v; - ExprString(const Symbol& s) : s(s) { mkString(v, s); }; - COMMON_METHODS - Value* maybeThunk(EvalState& state, Env& env); -}; - -/* Temporary class used during parsing of indented strings. */ -struct ExprIndStr : Expr { - std::string s; - ExprIndStr(const std::string& s) : s(s){}; -}; - -struct ExprPath : Expr { - std::string s; - Value v; - ExprPath(const std::string& s) : s(s) { mkPathNoCopy(v, this->s.c_str()); }; - COMMON_METHODS - Value* maybeThunk(EvalState& state, Env& env); -}; - -struct ExprVar : Expr { - Pos pos; - Symbol name; - - /* Whether the variable comes from an environment (e.g. a rec, let - or function argument) or from a "with". */ - bool fromWith; - - /* In the former case, the value is obtained by going `level' - levels up from the current environment and getting the - `displ'th value in that environment. In the latter case, the - value is obtained by getting the attribute named `name' from - the set stored in the environment that is `level' levels up - from the current one.*/ - unsigned int level; - unsigned int displ; - - ExprVar(const Symbol& name) : name(name){}; - ExprVar(const Pos& pos, const Symbol& name) : pos(pos), name(name){}; - COMMON_METHODS - Value* maybeThunk(EvalState& state, Env& env); -}; - -// [tazjin] I *think* that this struct describes the syntactic -// construct for "selecting" something out of an attribute set, e.g. -// `a.b.c` => ExprSelect{"b", "c"}. -// -// Each path element has got a pointer to an expression, which seems -// to be the thing preceding its period, but afaict that is only set -// for the first one in a path. -struct ExprSelect : Expr { - Pos pos; - Expr *e, *def; - AttrPath attrPath; - ExprSelect(const Pos& pos, Expr* e, const AttrPath& attrPath, Expr* def) - : pos(pos), e(e), def(def), attrPath(attrPath){}; - ExprSelect(const Pos& pos, Expr* e, const Symbol& name) - : pos(pos), e(e), def(0) { - attrPath.push_back(AttrName(name)); - }; - COMMON_METHODS -}; - -struct ExprOpHasAttr : Expr { - Pos pos; - Expr* e; - AttrPath attrPath; - ExprOpHasAttr(Expr* e, const AttrPath& attrPath) : e(e), attrPath(attrPath){}; - ExprOpHasAttr(const Pos& pos, Expr* e, const AttrPath& attrPath) - : pos(pos), e(e), attrPath(attrPath){}; - COMMON_METHODS -}; - -struct ExprAttrs : Expr { - bool recursive; - - struct AttrDef { - bool inherited; - Expr* e; - Pos pos; - unsigned int displ; // displacement - AttrDef(Expr* e, const Pos& pos, bool inherited = false) - : inherited(inherited), e(e), pos(pos), displ(0){}; - AttrDef(){}; - }; - - using AttrDefs = absl::flat_hash_map; - AttrDefs attrs; - - struct DynamicAttrDef { - Expr *nameExpr, *valueExpr; - Pos pos; - DynamicAttrDef(Expr* nameExpr, Expr* valueExpr, const Pos& pos) - : nameExpr(nameExpr), valueExpr(valueExpr), pos(pos){}; - }; - - using DynamicAttrDefs = std::vector; - DynamicAttrDefs dynamicAttrs; - - ExprAttrs() : recursive(false){}; - COMMON_METHODS -}; - -struct ExprList : Expr { - VectorExprs elems; - ExprList(){}; - COMMON_METHODS -}; - -struct Formal { - Symbol name; - Expr* def; // def = default, not definition - Formal(const Symbol& name, Expr* def) : name(name), def(def){}; -}; - -// Describes structured function arguments (e.g. `{ a }: ...`) -struct Formals { - using Formals_ = std::list; - Formals_ formals; - std::set argNames; // used during parsing - bool ellipsis; -}; - -struct ExprLambda : Expr { - public: - Pos pos; - std::optional name; - Symbol arg; - bool matchAttrs; - Formals* formals; - Expr* body; - ExprLambda(const Pos& pos, const Symbol& arg, bool matchAttrs, - Formals* formals, Expr* body) - : pos(pos), - arg(arg), - matchAttrs(matchAttrs), - formals(formals), - body(body) { - if (!arg.empty() && formals && - formals->argNames.find(arg) != formals->argNames.end()) { - throw ParseError( - format("duplicate formal function argument '%1%' at %2%") % arg % - pos); - } - }; - void setName(Symbol& name); - std::string showNamePos() const; - COMMON_METHODS -}; - -struct ExprLet : Expr { - ExprAttrs* attrs; - Expr* body; - ExprLet(ExprAttrs* attrs, Expr* body) : attrs(attrs), body(body){}; - COMMON_METHODS -}; - -struct ExprWith : Expr { - Pos pos; - Expr *attrs, *body; - size_t prevWith; - ExprWith(const Pos& pos, Expr* attrs, Expr* body) - : pos(pos), attrs(attrs), body(body){}; - COMMON_METHODS -}; - -struct ExprIf : Expr { - Pos pos; - Expr *cond, *then, *else_; - ExprIf(Expr* cond, Expr* then, Expr* else_) - : cond(cond), then(then), else_(else_){}; - ExprIf(const Pos& pos, Expr* cond, Expr* then, Expr* else_) - : pos(pos), cond(cond), then(then), else_(else_){}; - COMMON_METHODS -}; - -struct ExprAssert : Expr { - Pos pos; - Expr *cond, *body; - ExprAssert(const Pos& pos, Expr* cond, Expr* body) - : pos(pos), cond(cond), body(body){}; - COMMON_METHODS -}; - -struct ExprOpNot : Expr { - Pos pos; - Expr* e; - explicit ExprOpNot(Expr* e) : e(e){}; - ExprOpNot(const Pos& pos, Expr* e) : pos(pos), e(e){}; - COMMON_METHODS -}; - -#define MakeBinOp(name, s) \ - struct name : Expr { \ - Pos pos; \ - Expr *e1, *e2; \ - name(Expr* e1, Expr* e2) : e1(e1), e2(e2){}; \ - name(const Pos& pos, Expr* e1, Expr* e2) : pos(pos), e1(e1), e2(e2){}; \ - void show(std::ostream& str) const { \ - str << "(" << *e1 << " " s " " << *e2 << ")"; \ - } \ - void bindVars(const StaticEnv& env) { \ - e1->bindVars(env); \ - e2->bindVars(env); \ - } \ - void eval(EvalState& state, Env& env, Value& v); \ - }; - -MakeBinOp(ExprApp, ""); -MakeBinOp(ExprOpEq, "=="); -MakeBinOp(ExprOpNEq, "!="); -MakeBinOp(ExprOpAnd, "&&"); -MakeBinOp(ExprOpOr, "||"); -MakeBinOp(ExprOpImpl, "->"); -MakeBinOp(ExprOpUpdate, "//"); -MakeBinOp(ExprOpConcatLists, "++"); - -struct ExprConcatStrings : Expr { - Pos pos; - bool forceString; - nix::VectorExprs* es; - ExprConcatStrings(const Pos& pos, bool forceString, nix::VectorExprs* es) - : pos(pos), forceString(forceString), es(es){}; - COMMON_METHODS -}; - -struct ExprPos : Expr { - Pos pos; - ExprPos(const Pos& pos) : pos(pos){}; - COMMON_METHODS -}; - -/* Static environments are used to map variable names onto (level, - displacement) pairs used to obtain the value of the variable at - runtime. */ -struct StaticEnv { - bool isWith; - const StaticEnv* up; - typedef absl::flat_hash_map Vars; - Vars vars; - StaticEnv(bool isWith, const StaticEnv* up) : isWith(isWith), up(up){}; -}; - -} // namespace nix diff --git a/third_party/nix/src/libexpr/parser.cc b/third_party/nix/src/libexpr/parser.cc deleted file mode 100644 index aea6cec7e4..0000000000 --- a/third_party/nix/src/libexpr/parser.cc +++ /dev/null @@ -1,332 +0,0 @@ -#include "libexpr/parser.hh" - -#include -#include -#include -#include - -#include "libexpr/eval.hh" -#include "libstore/download.hh" -#include "libstore/store-api.hh" - -namespace nix { - -void addAttr(ExprAttrs* attrs, AttrPath& attrPath, Expr* e, const Pos& pos) { - AttrPath::iterator i; - // All attrpaths have at least one attr - assert(!attrPath.empty()); - // Checking attrPath validity. - // =========================== - for (i = attrPath.begin(); i + 1 < attrPath.end(); i++) { - if (const auto* sym = std::get_if(&(*i)); sym && sym->set()) { - ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(*sym); - if (j != attrs->attrs.end()) { - if (!j->second.inherited) { - ExprAttrs* attrs2 = dynamic_cast(j->second.e); - if (!attrs2) { - dupAttr(attrPath, pos, j->second.pos); - } - attrs = attrs2; - } else { - dupAttr(attrPath, pos, j->second.pos); - } - } else { - ExprAttrs* nested = new ExprAttrs; - attrs->attrs[*sym] = ExprAttrs::AttrDef(nested, pos); - attrs = nested; - } - } else { - // Yes, this code does not handle all conditions - // exhaustively. We use std::get to throw if the condition - // that isn't covered happens, which is potentially a - // behaviour change from the previous default constructed - // Symbol. It should alert us about anything untoward going - // on here. - auto* expr = std::get(*i); - - ExprAttrs* nested = new ExprAttrs; - attrs->dynamicAttrs.push_back( - ExprAttrs::DynamicAttrDef(expr, nested, pos)); - attrs = nested; - } - } - // Expr insertion. - // ========================== - if (auto* sym = std::get_if(&(*i)); sym && sym->set()) { - ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(*sym); - if (j != attrs->attrs.end()) { - // This attr path is already defined. However, if both - // e and the expr pointed by the attr path are two attribute sets, - // we want to merge them. - // Otherwise, throw an error. - auto ae = dynamic_cast(e); - auto jAttrs = dynamic_cast(j->second.e); - if (jAttrs && ae) { - for (auto& ad : ae->attrs) { - auto j2 = jAttrs->attrs.find(ad.first); - if (j2 != - jAttrs->attrs.end()) { // Attr already defined in iAttrs, error. - dupAttr(ad.first, j2->second.pos, ad.second.pos); - } - jAttrs->attrs[ad.first] = ad.second; - } - } else { - dupAttr(attrPath, pos, j->second.pos); - } - } else { - // This attr path is not defined. Let's create it. - attrs->attrs[*sym] = ExprAttrs::AttrDef(e, pos); - } - } else { - // Same caveat as the identical line above. - auto* expr = std::get(*i); - attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(expr, e, pos)); - } -} - -void addFormal(const Pos& pos, Formals* formals, const Formal& formal) { - if (formals->argNames.find(formal.name) != formals->argNames.end()) { - throw ParseError(format("duplicate formal function argument '%1%' at %2%") % - formal.name % pos); - } - formals->formals.push_front(formal); - formals->argNames.insert(formal.name); -} - -Expr* stripIndentation(const Pos& pos, SymbolTable& symbols, VectorExprs& es) { - if (es.empty()) { - return new ExprString(symbols.Create("")); - } - - /* Figure out the minimum indentation. Note that by design - whitespace-only final lines are not taken into account. (So - the " " in "\n ''" is ignored, but the " " in "\n foo''" is.) */ - bool atStartOfLine = true; /* = seen only whitespace in the current line */ - size_t minIndent = 1000000; - size_t curIndent = 0; - for (auto& i : es) { - ExprIndStr* e = dynamic_cast(i); - if (!e) { - /* Anti-quotations end the current start-of-line whitespace. */ - if (atStartOfLine) { - atStartOfLine = false; - if (curIndent < minIndent) { - minIndent = curIndent; - } - } - continue; - } - for (size_t j = 0; j < e->s.size(); ++j) { - if (atStartOfLine) { - if (e->s[j] == ' ') { - curIndent++; - } else if (e->s[j] == '\n') { - /* Empty line, doesn't influence minimum - indentation. */ - curIndent = 0; - } else { - atStartOfLine = false; - if (curIndent < minIndent) { - minIndent = curIndent; - } - } - } else if (e->s[j] == '\n') { - atStartOfLine = true; - curIndent = 0; - } - } - } - - /* Strip spaces from each line. */ - VectorExprs* es2 = new VectorExprs; - atStartOfLine = true; - size_t curDropped = 0; - size_t n = es.size(); - for (VectorExprs::iterator i = es.begin(); i != es.end(); ++i, --n) { - ExprIndStr* e = dynamic_cast(*i); - if (!e) { - atStartOfLine = false; - curDropped = 0; - es2->push_back(*i); - continue; - } - - std::string s2; - for (size_t j = 0; j < e->s.size(); ++j) { - if (atStartOfLine) { - if (e->s[j] == ' ') { - if (curDropped++ >= minIndent) { - s2 += e->s[j]; - } - } else if (e->s[j] == '\n') { - curDropped = 0; - s2 += e->s[j]; - } else { - atStartOfLine = false; - curDropped = 0; - s2 += e->s[j]; - } - } else { - s2 += e->s[j]; - if (e->s[j] == '\n') { - atStartOfLine = true; - } - } - } - - /* Remove the last line if it is empty and consists only of - spaces. */ - if (n == 1) { - std::string::size_type p = s2.find_last_of('\n'); - if (p != std::string::npos && - s2.find_first_not_of(' ', p + 1) == std::string::npos) { - s2 = std::string(s2, 0, p + 1); - } - } - - es2->push_back(new ExprString(symbols.Create(s2))); - } - - /* If this is a single string, then don't do a concatenation. */ - return es2->size() == 1 && dynamic_cast((*es2)[0]) - ? (*es2)[0] - : new ExprConcatStrings(pos, true, es2); -} - -Path resolveExprPath(Path path) { - assert(path[0] == '/'); - - /* If `path' is a symlink, follow it. This is so that relative - path references work. */ - struct stat st; - while (true) { - if (lstat(path.c_str(), &st)) { - throw SysError(format("getting status of '%1%'") % path); - } - if (!S_ISLNK(st.st_mode)) { - break; - } - path = absPath(readLink(path), dirOf(path)); - } - - /* If `path' refers to a directory, append `/default.nix'. */ - if (S_ISDIR(st.st_mode)) { - path = canonPath(path + "/default.nix"); - } - - return path; -} - -// These methods are actually declared in eval.hh, and were - for some -// reason - previously implemented in parser.y. - -Expr* EvalState::parseExprFromFile(const Path& path) { - return parseExprFromFile(path, staticBaseEnv); -} - -Expr* EvalState::parseExprFromFile(const Path& path, StaticEnv& staticEnv) { - return parse(readFile(path).c_str(), path, dirOf(path), staticEnv); -} - -Expr* EvalState::parseExprFromString(const std::string& s, const Path& basePath, - StaticEnv& staticEnv) { - return parse(s.c_str(), "(std::string)", basePath, staticEnv); -} - -Expr* EvalState::parseExprFromString(const std::string& s, - const Path& basePath) { - return parseExprFromString(s, basePath, staticBaseEnv); -} - -Expr* EvalState::parseStdin() { - // Activity act(*logger, lvlTalkative, format("parsing standard input")); - return parseExprFromString(drainFD(0), absPath(".")); -} - -void EvalState::addToSearchPath(const std::string& s) { - size_t pos = s.find('='); - std::string prefix; - Path path; - if (pos == std::string::npos) { - path = s; - } else { - prefix = std::string(s, 0, pos); - path = std::string(s, pos + 1); - } - - searchPath.emplace_back(prefix, path); -} - -Path EvalState::findFile(const std::string& path) { - return findFile(searchPath, path); -} - -Path EvalState::findFile(SearchPath& searchPath, const std::string& path, - const Pos& pos) { - for (auto& i : searchPath) { - std::string suffix; - if (i.first.empty()) { - suffix = "/" + path; - } else { - auto s = i.first.size(); - if (path.compare(0, s, i.first) != 0 || - (path.size() > s && path[s] != '/')) { - continue; - } - suffix = path.size() == s ? "" : "/" + std::string(path, s); - } - auto r = resolveSearchPathElem(i); - if (!r.first) { - continue; - } - Path res = r.second + suffix; - if (pathExists(res)) { - return canonPath(res); - } - } - format f = format( - "file '%1%' was not found in the Nix search path (add it using $NIX_PATH " - "or -I)" + - std::string(pos ? ", at %2%" : "")); - f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit); - throw ThrownError(f % path % pos); -} - -std::pair EvalState::resolveSearchPathElem( - const SearchPathElem& elem) { - auto i = searchPathResolved.find(elem.second); - if (i != searchPathResolved.end()) { - return i->second; - } - - std::pair res; - - if (isUri(elem.second)) { - try { - CachedDownloadRequest request(elem.second); - request.unpack = true; - res = {true, getDownloader()->downloadCached(store, request).path}; - } catch (DownloadError& e) { - LOG(WARNING) << "Nix search path entry '" << elem.second - << "' cannot be downloaded, ignoring"; - res = {false, ""}; - } - } else { - auto path = absPath(elem.second); - if (pathExists(path)) { - res = {true, path}; - } else { - LOG(WARNING) << "Nix search path entry '" << elem.second - << "' does not exist, ignoring"; - res = {false, ""}; - } - } - - VLOG(2) << "resolved search path element '" << elem.second << "' to '" - << res.second << "'"; - - searchPathResolved[elem.second] = res; - return res; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/parser.hh b/third_party/nix/src/libexpr/parser.hh deleted file mode 100644 index 70b5450b5a..0000000000 --- a/third_party/nix/src/libexpr/parser.hh +++ /dev/null @@ -1,100 +0,0 @@ -// Parser utilities for use in parser.y -#pragma once - -// TODO(tazjin): Audit these includes, they were in parser.y -#include -#include - -#include - -#include "libexpr/eval.hh" -#include "libexpr/nixexpr.hh" -#include "libutil/util.hh" - -#define YY_DECL \ - int yylex(YYSTYPE* yylval_param, YYLTYPE* yylloc_param, yyscan_t yyscanner, \ - nix::ParseData* data) - -#define CUR_POS makeCurPos(*yylocp, data) - -namespace nix { - -struct ParseData { - EvalState& state; - SymbolTable& symbols; - Expr* result; - Path basePath; - std::optional path; - std::string error; - Symbol sLetBody; - - ParseData(EvalState& state) - : state(state), - symbols(state.symbols), - sLetBody(symbols.Create("")){}; -}; - -// Clang fails to identify these functions as used, probably because -// of some interaction between the lexer/parser codegen and something -// else. -// -// To avoid warnings for that we disable -Wunused-function in this block. - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-function" - -// TODO(tazjin): move dupAttr to anonymous namespace -static void dupAttr(const AttrPath& attrPath, const Pos& pos, - const Pos& prevPos) { - throw ParseError(format("attribute '%1%' at %2% already defined at %3%") % - showAttrPath(attrPath) % pos % prevPos); -} - -static void dupAttr(Symbol attr, const Pos& pos, const Pos& prevPos) { - throw ParseError(format("attribute '%1%' at %2% already defined at %3%") % - attr % pos % prevPos); -} - -void addAttr(ExprAttrs* attrs, AttrPath& attrPath, Expr* e, const Pos& pos); - -void addFormal(const Pos& pos, Formals* formals, const Formal& formal); - -Expr* stripIndentation(const Pos& pos, SymbolTable& symbols, VectorExprs& es); - -Path resolveExprPath(Path path); - -// implementations originally from lexer.l - -static Expr* unescapeStr(SymbolTable& symbols, const char* s, size_t length) { - std::string t; - t.reserve(length); - char c; - while ((c = *s++)) { - if (c == '\\') { - assert(*s); - c = *s++; - if (c == 'n') { - t += '\n'; - } else if (c == 'r') { - t += '\r'; - } else if (c == 't') { - t += '\t'; - } else { - t += c; - } - } else if (c == '\r') { - /* Normalise CR and CR/LF into LF. */ - t += '\n'; - if (*s == '\n') { - s++; - } /* cr/lf */ - } else { - t += c; - } - } - return new ExprString(symbols.Create(t)); -} - -#pragma clang diagnostic pop // re-enable -Wunused-function - -} // namespace nix diff --git a/third_party/nix/src/libexpr/parser.y b/third_party/nix/src/libexpr/parser.y deleted file mode 100644 index a8af06802f..0000000000 --- a/third_party/nix/src/libexpr/parser.y +++ /dev/null @@ -1,359 +0,0 @@ -%glr-parser -%locations -%define parse.error verbose -%define api.pure true -%defines -/* %no-lines */ -%parse-param { void * scanner } -%parse-param { nix::ParseData * data } -%lex-param { void * scanner } -%lex-param { nix::ParseData * data } -%expect 1 -%expect-rr 1 - -%code requires { -#define YY_NO_INPUT 1 // disable unused yyinput features -#include "libexpr/parser.hh" - -struct YYSTYPE { - union { - nix::Expr * e; - nix::ExprList * list; - nix::ExprAttrs * attrs; - nix::Formals * formals; - nix::Formal * formal; - nix::NixInt n; - nix::NixFloat nf; - const char * id; // !!! -> Symbol - char * path; - char * uri; - nix::AttrNameVector * attrNames; - nix::VectorExprs * string_parts; - }; -}; - -} - -%{ - -#include "generated/parser-tab.hh" -#include "generated/lexer-tab.hh" - -YY_DECL; - -using namespace nix; - -namespace nix { - -static inline Pos makeCurPos(const YYLTYPE& loc, ParseData* data) { - return Pos(data->path, loc.first_line, loc.first_column); -} - -void yyerror(YYLTYPE* loc, yyscan_t scanner, ParseData* data, - const char* error) { - data->error = (format("%1%, at %2%") % error % makeCurPos(*loc, data)).str(); -} - -} - -%} - -%type start expr expr_function expr_if expr_op -%type expr_app expr_select expr_simple -%type expr_list -%type binds -%type formals -%type formal -%type attrs attrpath -%type string_parts_interpolated ind_string_parts -%type string_parts string_attr -%type attr -%token ID ATTRPATH -%token STR IND_STR -%token INT -%token FLOAT -%token PATH HPATH SPATH -%token URI -%token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW -%token DOLLAR_CURLY /* == ${ */ -%token IND_STRING_OPEN IND_STRING_CLOSE -%token ELLIPSIS - -%right IMPL -%left OR -%left AND -%nonassoc EQ NEQ -%nonassoc '<' '>' LEQ GEQ -%right UPDATE -%left NOT -%left '+' '-' -%left '*' '/' -%right CONCAT -%nonassoc '?' -%nonassoc NEGATE - -%% - -start: expr { data->result = $1; }; - -expr: expr_function; - -expr_function - : ID ':' expr_function - { $$ = new ExprLambda(CUR_POS, data->symbols.Create($1), false, 0, $3); } - | '{' formals '}' ':' expr_function - { $$ = new ExprLambda(CUR_POS, data->symbols.Create(""), true, $2, $5); } - | '{' formals '}' '@' ID ':' expr_function - { $$ = new ExprLambda(CUR_POS, data->symbols.Create($5), true, $2, $7); } - | ID '@' '{' formals '}' ':' expr_function - { $$ = new ExprLambda(CUR_POS, data->symbols.Create($1), true, $4, $7); } - | ASSERT expr ';' expr_function - { $$ = new ExprAssert(CUR_POS, $2, $4); } - | WITH expr ';' expr_function - { $$ = new ExprWith(CUR_POS, $2, $4); } - | LET binds IN expr_function - { if (!$2->dynamicAttrs.empty()) - throw ParseError(format("dynamic attributes not allowed in let at %1%") - % CUR_POS); - $$ = new ExprLet($2, $4); - } - | expr_if - ; - -expr_if - : IF expr THEN expr ELSE expr { $$ = new ExprIf(CUR_POS, $2, $4, $6); } - | expr_op - ; - -expr_op - : '!' expr_op %prec NOT { $$ = new ExprOpNot(CUR_POS, $2); } - | '-' expr_op %prec NEGATE { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__sub")), new ExprInt(0)), $2); } - | expr_op EQ expr_op { $$ = new ExprOpEq(CUR_POS, $1, $3); } - | expr_op NEQ expr_op { $$ = new ExprOpNEq(CUR_POS, $1, $3); } - | expr_op '<' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__lessThan")), $1), $3); } - | expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__lessThan")), $3), $1)); } - | expr_op '>' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__lessThan")), $3), $1); } - | expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__lessThan")), $1), $3)); } - | expr_op AND expr_op { $$ = new ExprOpAnd(CUR_POS, $1, $3); } - | expr_op OR expr_op { $$ = new ExprOpOr(CUR_POS, $1, $3); } - | expr_op IMPL expr_op { $$ = new ExprOpImpl(CUR_POS, $1, $3); } - | expr_op UPDATE expr_op { $$ = new ExprOpUpdate(CUR_POS, $1, $3); } - | expr_op '?' attrpath { $$ = new ExprOpHasAttr(CUR_POS, $1, *$3); } - | expr_op '+' expr_op - { $$ = new ExprConcatStrings(CUR_POS, false, new nix::VectorExprs({$1, $3})); } - | expr_op '-' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__sub")), $1), $3); } - | expr_op '*' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__mul")), $1), $3); } - | expr_op '/' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.Create("__div")), $1), $3); } - | expr_op CONCAT expr_op { $$ = new ExprOpConcatLists(CUR_POS, $1, $3); } - | expr_app - ; - -expr_app - : expr_app expr_select - { $$ = new ExprApp(CUR_POS, $1, $2); } - | expr_select { $$ = $1; } - ; - -expr_select - : expr_simple '.' attrpath - { $$ = new ExprSelect(CUR_POS, $1, *$3, 0); } - | expr_simple '.' attrpath OR_KW expr_select - { $$ = new ExprSelect(CUR_POS, $1, *$3, $5); } - | /* Backwards compatibility: because Nixpkgs has a rarely used - function named ‘or’, allow stuff like ‘map or [...]’. */ - expr_simple OR_KW - { $$ = new ExprApp(CUR_POS, $1, new ExprVar(CUR_POS, data->symbols.Create("or"))); } - | expr_simple { $$ = $1; } - ; - -expr_simple - : ID { - if (strcmp($1, "__curPos") == 0) - $$ = new ExprPos(CUR_POS); - else - $$ = new ExprVar(CUR_POS, data->symbols.Create($1)); - } - | INT { $$ = new ExprInt($1); } - | FLOAT { $$ = new ExprFloat($1); } - | '"' string_parts '"' { $$ = $2; } - | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE { - $$ = stripIndentation(CUR_POS, data->symbols, *$2); - } - | PATH { $$ = new ExprPath(absPath($1, data->basePath)); } - | HPATH { $$ = new ExprPath(getHome() + std::string{$1 + 1}); } - | SPATH { - std::string path($1 + 1, strlen($1) - 2); - $$ = new ExprApp(CUR_POS, - new ExprApp(new ExprVar(data->symbols.Create("__findFile")), - new ExprVar(data->symbols.Create("__nixPath"))), - new ExprString(data->symbols.Create(path))); - } - | URI { $$ = new ExprString(data->symbols.Create($1)); } - | '(' expr ')' { $$ = $2; } - /* Let expressions `let {..., body = ...}' are just desugared - into `(rec {..., body = ...}).body'. */ - | LET '{' binds '}' - { $3->recursive = true; $$ = new ExprSelect(noPos, $3, data->symbols.Create("body")); } - | REC '{' binds '}' - { $3->recursive = true; $$ = $3; } - | '{' binds '}' - { $$ = $2; } - | '[' expr_list ']' { $$ = $2; } - ; - -string_parts - : STR - | string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, $1); } - | { $$ = new ExprString(data->symbols.Create("")); } - ; - -string_parts_interpolated - : string_parts_interpolated STR { $$ = $1; $1->push_back($2); } - | string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); } - | DOLLAR_CURLY expr '}' { $$ = new nix::VectorExprs; $$->push_back($2); } - | STR DOLLAR_CURLY expr '}' { - $$ = new nix::VectorExprs; - $$->push_back($1); - $$->push_back($3); - } - ; - -ind_string_parts - : ind_string_parts IND_STR { $$ = $1; $1->push_back($2); } - | ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); } - | { $$ = new nix::VectorExprs; } - ; - -binds - : binds attrpath '=' expr ';' { $$ = $1; addAttr($$, *$2, $4, makeCurPos(@2, data)); } - | binds INHERIT attrs ';' - { $$ = $1; - for (auto & i : *$3) { - auto sym = std::get(i); - if ($$->attrs.find(sym) != $$->attrs.end()) { - dupAttr(sym, makeCurPos(@3, data), $$->attrs[sym].pos); - } - Pos pos = makeCurPos(@3, data); - $$->attrs[sym] = ExprAttrs::AttrDef(new ExprVar(CUR_POS, sym), pos, true); - } - } - | binds INHERIT '(' expr ')' attrs ';' - { $$ = $1; - /* !!! Should ensure sharing of the expression in $4. */ - for (auto & i : *$6) { - auto sym = std::get(i); - if ($$->attrs.find(sym) != $$->attrs.end()) { - dupAttr(sym, makeCurPos(@6, data), $$->attrs[sym].pos); - } - $$->attrs[sym] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, sym), makeCurPos(@6, data)); - } - } - | { $$ = new ExprAttrs; } - ; - -attrs - : attrs attr { $$ = $1; $1->push_back(AttrName(data->symbols.Create($2))); } - | attrs string_attr - { $$ = $1; - ExprString * str = dynamic_cast($2); - if (str) { - $$->push_back(AttrName(str->s)); - delete str; - } else - throw ParseError(format("dynamic attributes not allowed in inherit at %1%") - % makeCurPos(@2, data)); - } - | { $$ = new AttrPath; } - ; - -attrpath - : attrpath '.' attr { $$ = $1; $1->push_back(AttrName(data->symbols.Create($3))); } - | attrpath '.' string_attr - { $$ = $1; - ExprString * str = dynamic_cast($3); - if (str) { - $$->push_back(AttrName(str->s)); - delete str; - } else { - $$->push_back(AttrName($3)); - } - } - | attr { $$ = new nix::AttrNameVector; $$->push_back(AttrName(data->symbols.Create($1))); } - | string_attr - { $$ = new nix::AttrNameVector; - ExprString *str = dynamic_cast($1); - if (str) { - $$->push_back(AttrName(str->s)); - delete str; - } else - $$->push_back(AttrName($1)); - } - ; - -attr - : ID { $$ = $1; } - | OR_KW { $$ = "or"; } - ; - -string_attr - : '"' string_parts '"' { $$ = $2; } - | DOLLAR_CURLY expr '}' { $$ = $2; } - ; - -expr_list - : expr_list expr_select { $$ = $1; $1->elems.push_back($2); /* !!! dangerous */ } - | { $$ = new ExprList; } - ; - -formals - : formal ',' formals - { $$ = $3; addFormal(CUR_POS, $$, *$1); } - | formal - { $$ = new Formals; addFormal(CUR_POS, $$, *$1); $$->ellipsis = false; } - | - { $$ = new Formals; $$->ellipsis = false; } - | ELLIPSIS - { $$ = new Formals; $$->ellipsis = true; } - ; - -formal - : ID { $$ = new Formal(data->symbols.Create($1), 0); } - | ID '?' expr { $$ = new Formal(data->symbols.Create($1), $3); } - ; - -%% - - -#include -#include -#include -#include - -#include "libexpr/eval.hh" -#include "libstore/store-api.hh" - - -namespace nix { - -Expr* EvalState::parse(const char* text, const Path& path, const Path& basePath, - StaticEnv& staticEnv) { - yyscan_t scanner; - ParseData data(*this); - data.basePath = basePath; - data.path = data.symbols.Create(path); - - yylex_init(&scanner); - yy_scan_string(text, scanner); - int res = yyparse(scanner, &data); - yylex_destroy(scanner); - - if (res) { - throw ParseError(data.error); - } - - data.result->bindVars(staticEnv); - - return data.result; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/primops.cc b/third_party/nix/src/libexpr/primops.cc deleted file mode 100644 index f196c5ed72..0000000000 --- a/third_party/nix/src/libexpr/primops.cc +++ /dev/null @@ -1,2335 +0,0 @@ -#include "libexpr/primops.hh" - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libexpr/json-to-value.hh" -#include "libexpr/names.hh" -#include "libexpr/value-to-json.hh" -#include "libexpr/value-to-xml.hh" -#include "libstore/derivations.hh" -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "libutil/json.hh" -#include "libutil/status.hh" -#include "libutil/util.hh" - -namespace nix { - -/************************************************************* - * Miscellaneous - *************************************************************/ - -/* Decode a context string ‘!!’ into a pair . */ -std::pair decodeContext(const std::string& s) { - if (s.at(0) == '!') { - size_t index = s.find('!', 1); - return std::pair(std::string(s, index + 1), - std::string(s, 1, index - 1)); - } - return std::pair( - s.at(0) == '/' ? s : std::string(s, 1), ""); -} - -InvalidPathError::InvalidPathError(const Path& path) - : EvalError(format("path '%1%' is not valid") % path), path(path) {} - -void EvalState::realiseContext(const PathSet& context) { - PathSet drvs; - - for (auto& i : context) { - std::pair decoded = decodeContext(i); - Path ctx = decoded.first; - assert(store->isStorePath(ctx)); - if (!store->isValidPath(ctx)) { - throw InvalidPathError(ctx); - } - if (!decoded.second.empty() && nix::isDerivation(ctx)) { - drvs.insert(decoded.first + "!" + decoded.second); - - /* Add the output of this derivation to the allowed - paths. */ - if (allowedPaths) { - auto drv = store->derivationFromPath(decoded.first); - auto i = drv.outputs.find(decoded.second); - if (i == drv.outputs.end()) { - throw Error("derivation '%s' does not have an output named '%s'", - decoded.first, decoded.second); - } - allowedPaths->insert(i->second.path); - } - } - } - - if (drvs.empty()) { - return; - } - - if (!evalSettings.enableImportFromDerivation) { - throw EvalError(format("attempted to realize '%1%' during evaluation but " - "'allow-import-from-derivation' is false") % - *(drvs.begin())); - } - - /* For performance, prefetch all substitute info. */ - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - unsigned long long downloadSize; - unsigned long long narSize; - store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, - narSize); - - nix::util::OkOrThrow(store->buildPaths(std::cerr, drvs)); -} - -/* Load and evaluate an expression from path specified by the - argument. */ -static void prim_scopedImport(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[1], context); - - try { - state.realiseContext(context); - } catch (InvalidPathError& e) { - throw EvalError( - format("cannot import '%1%', since path '%2%' is not valid, at %3%") % - path % e.path % pos); - } - - Path realPath = state.checkSourcePath(state.toRealPath(path, context)); - - if (state.store->isStorePath(path) && state.store->isValidPath(path) && - isDerivation(path)) { - Derivation drv = readDerivation(realPath); - Value& w = *state.allocValue(); - state.mkAttrs(w, 3 + drv.outputs.size()); - Value* v2 = state.allocAttr(w, state.sDrvPath); - mkString(*v2, path, {"=" + path}); - v2 = state.allocAttr(w, state.sName); - mkString(*v2, drv.env["name"]); - Value* outputsVal = state.allocAttr(w, state.symbols.Create("outputs")); - state.mkList(*outputsVal, drv.outputs.size()); - unsigned int outputs_index = 0; - - for (const auto& o : drv.outputs) { - v2 = state.allocAttr(w, state.symbols.Create(o.first)); - mkString(*v2, o.second.path, {"!" + o.first + "!" + path}); - (*outputsVal->list)[outputs_index] = state.allocValue(); - mkString(*((*outputsVal->list)[outputs_index++]), o.first); - } - - Value fun; - state.evalFile( - settings.nixDataDir + "/nix/corepkgs/imported-drv-to-derivation.nix", - fun); - state.forceFunction(fun, pos); - mkApp(v, fun, w); - state.forceAttrs(v, pos); - } else { - state.forceAttrs(*args[0]); - if (args[0]->attrs->empty()) { - state.evalFile(realPath, v); - } else { - Env* env = &state.allocEnv(args[0]->attrs->size()); - env->up = &state.baseEnv; - - StaticEnv staticEnv(false, &state.staticBaseEnv); - - unsigned int displ = 0; - for (auto& attr : *args[0]->attrs) { - staticEnv.vars[attr.second.name] = displ; - env->values[displ++] = attr.second.value; - } - - DLOG(INFO) << "evaluating file '" << realPath << "'"; - Expr* e = state.parseExprFromFile(resolveExprPath(realPath), staticEnv); - - e->eval(state, *env, v); - } - } -} - -/* Return a string representing the type of the expression. */ -static void prim_typeOf(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - std::string t; - switch (args[0]->type) { - case tInt: - t = "int"; - break; - case tBool: - t = "bool"; - break; - case tString: - t = "string"; - break; - case tPath: - t = "path"; - break; - case tNull: - t = "null"; - break; - case tAttrs: - t = "set"; - break; - case tList: - t = "list"; - break; - case tLambda: - case tPrimOp: - case tPrimOpApp: - t = "lambda"; - break; - case tFloat: - t = "float"; - break; - default: - abort(); - } - mkString(v, state.symbols.Create(t)); -} - -/* Determine whether the argument is the null value. */ -static void prim_isNull(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tNull); -} - -/* Determine whether the argument is a function. */ -static void prim_isFunction(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - bool res; - switch (args[0]->type) { - case tLambda: - case tPrimOp: - case tPrimOpApp: - res = true; - break; - default: - res = false; - break; - } - mkBool(v, res); -} - -/* Determine whether the argument is an integer. */ -static void prim_isInt(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tInt); -} - -/* Determine whether the argument is a float. */ -static void prim_isFloat(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tFloat); -} - -/* Determine whether the argument is a string. */ -static void prim_isString(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tString); -} - -/* Determine whether the argument is a Boolean. */ -static void prim_isBool(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tBool); -} - -/* Determine whether the argument is a path. */ -static void prim_isPath(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tPath); -} - -struct CompareValues { - bool operator()(const Value* v1, const Value* v2) const { - if (v1->type == tFloat && v2->type == tInt) { - return v1->fpoint < v2->integer; - } - if (v1->type == tInt && v2->type == tFloat) { - return v1->integer < v2->fpoint; - } - if (v1->type != v2->type) { - throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % - showType(*v2)); - } - switch (v1->type) { - case tInt: - return v1->integer < v2->integer; - case tFloat: - return v1->fpoint < v2->fpoint; - case tString: - return strcmp(v1->string.s, v2->string.s) < 0; - case tPath: - return strcmp(v1->path, v2->path) < 0; - default: - throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % - showType(*v2)); - } - } -}; - -typedef std::list ValueList; - -static void prim_genericClosure(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceAttrs(*args[0], pos); - - /* Get the start set. */ - Bindings::iterator startSet = - args[0]->attrs->find(state.symbols.Create("startSet")); - if (startSet == args[0]->attrs->end()) { - throw EvalError(format("attribute 'startSet' required, at %1%") % pos); - } - state.forceList(*startSet->second.value, pos); - - ValueList workSet; - for (Value* elem : *startSet->second.value->list) { - workSet.push_back(elem); - } - - /* Get the operator. */ - Bindings::iterator op = - args[0]->attrs->find(state.symbols.Create("operator")); - if (op == args[0]->attrs->end()) { - throw EvalError(format("attribute 'operator' required, at %1%") % pos); - } - state.forceValue(*op->second.value); - - /* Construct the closure by applying the operator to element of - `workSet', adding the result to `workSet', continuing until - no new elements are found. */ - ValueList res; - // `doneKeys' doesn't need to be a GC root, because its values are - // reachable from res. - std::set doneKeys; - while (!workSet.empty()) { - Value* e = *(workSet.begin()); - workSet.pop_front(); - - state.forceAttrs(*e, pos); - - Bindings::iterator key = e->attrs->find(state.symbols.Create("key")); - if (key == e->attrs->end()) { - throw EvalError(format("attribute 'key' required, at %1%") % pos); - } - state.forceValue(*key->second.value); - - if (doneKeys.find(key->second.value) != doneKeys.end()) { - continue; - } - doneKeys.insert(key->second.value); - res.push_back(e); - - /* Call the `operator' function with `e' as argument. */ - Value call; - mkApp(call, *op->second.value, *e); - state.forceList(call, pos); - - /* Add the values returned by the operator to the work set. */ - for (unsigned int n = 0; n < call.listSize(); ++n) { - state.forceValue(*(*call.list)[n]); - workSet.push_back((*call.list)[n]); - } - } - - /* Create the result list. */ - state.mkList(v, res.size()); - unsigned int n = 0; - for (auto& i : res) { - (*v.list)[n++] = i; - } -} - -static void prim_abort(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - std::string s = state.coerceToString(pos, *args[0], context); - throw Abort( - format("evaluation aborted with the following error message: '%1%'") % s); -} - -static void prim_throw(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - std::string s = state.coerceToString(pos, *args[0], context); - throw ThrownError(s); -} - -static void prim_addErrorContext(EvalState& state, const Pos& pos, Value** args, - Value& v) { - try { - state.forceValue(*args[1]); - v = *args[1]; - } catch (Error& e) { - PathSet context; - e.addPrefix(format("%1%\n") % state.coerceToString(pos, *args[0], context)); - throw; - } -} - -/* Try evaluating the argument. Success => {success=true; value=something;}, - * else => {success=false; value=false;} */ -static void prim_tryEval(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.mkAttrs(v, 2); - try { - state.forceValue(*args[0]); - v.attrs->push_back(Attr(state.sValue, args[0])); - mkBool(*state.allocAttr(v, state.symbols.Create("success")), true); - } catch (AssertionError& e) { - mkBool(*state.allocAttr(v, state.sValue), false); - mkBool(*state.allocAttr(v, state.symbols.Create("success")), false); - } -} - -/* Return an environment variable. Use with care. */ -static void prim_getEnv(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string name = state.forceStringNoCtx(*args[0], pos); - mkString(v, evalSettings.restrictEval || evalSettings.pureEval - ? "" - : getEnv(name).value_or("")); -} - -/* Evaluate the first argument, then return the second argument. */ -static void prim_seq(EvalState& state, const Pos& pos, Value** args, Value& v) { - state.forceValue(*args[0]); - state.forceValue(*args[1]); - v = *args[1]; -} - -/* Evaluate the first argument deeply (i.e. recursing into lists and - attrsets), then return the second argument. */ -static void prim_deepSeq(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValueDeep(*args[0]); - state.forceValue(*args[1]); - v = *args[1]; -} - -/* Evaluate the first expression and print it on standard error. Then - return the second expression. Useful for debugging. */ -static void prim_trace(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - if (args[0]->type == tString) { - LOG(INFO) << "trace: " << args[0]->string.s; - } else { - LOG(INFO) << "trace: " << *args[0]; - } - state.forceValue(*args[1]); - v = *args[1]; -} - -void prim_valueSize(EvalState& state, const Pos& pos, Value** args, Value& v) { - /* We're not forcing the argument on purpose. */ - mkInt(v, valueSize(*args[0])); -} - -/************************************************************* - * Derivations - *************************************************************/ - -/* Construct (as a unobservable side effect) a Nix derivation - expression that performs the derivation described by the argument - set. Returns the original set extended with the following - attributes: `outPath' containing the primary output path of the - derivation; `drvPath' containing the path of the Nix expression; - and `type' set to `derivation' to indicate that this is a - derivation. */ -static void prim_derivationStrict(EvalState& state, const Pos& pos, - Value** args, Value& v) { - state.forceAttrs(*args[0], pos); - - /* Figure out the name first (for stack backtraces). */ - Bindings::iterator attr = args[0]->attrs->find(state.sName); - if (attr == args[0]->attrs->end()) { - throw EvalError(format("required attribute 'name' missing, at %1%") % pos); - } - std::string drvName; - Pos& posDrvName(*attr->second.pos); - try { - drvName = state.forceStringNoCtx(*attr->second.value, pos); - } catch (Error& e) { - e.addPrefix( - format("while evaluating the derivation attribute 'name' at %1%:\n") % - posDrvName); - throw; - } - - /* Check whether attributes should be passed as a JSON file. */ - std::ostringstream jsonBuf; - std::unique_ptr jsonObject; - attr = args[0]->attrs->find(state.sStructuredAttrs); - if (attr != args[0]->attrs->end() && - state.forceBool(*attr->second.value, pos)) { - jsonObject = std::make_unique(jsonBuf); - } - - /* Check whether null attributes should be ignored. */ - bool ignoreNulls = false; - attr = args[0]->attrs->find(state.sIgnoreNulls); - if (attr != args[0]->attrs->end()) { - ignoreNulls = state.forceBool(*attr->second.value, pos); - } - - /* Build the derivation expression by processing the attributes. */ - Derivation drv; - - PathSet context; - - std::optional outputHash; - std::string outputHashAlgo; - bool outputHashRecursive = false; - - StringSet outputs; - outputs.insert("out"); - - for (auto& [_, i] : *args[0]->attrs) { - if (i.name == state.sIgnoreNulls) { - continue; - } - const std::string& key = i.name; - - auto handleHashMode = [&](const std::string& s) { - if (s == "recursive") { - outputHashRecursive = true; - } else if (s == "flat") { - outputHashRecursive = false; - } else { - throw EvalError( - "invalid value '%s' for 'outputHashMode' attribute, at %s", s, - posDrvName); - } - }; - - auto handleOutputs = [&](const Strings& ss) { - outputs.clear(); - for (auto& j : ss) { - if (outputs.find(j) != outputs.end()) { - throw EvalError(format("duplicate derivation output '%1%', at %2%") % - j % posDrvName); - } - /* !!! Check whether j is a valid attribute - name. */ - /* Derivations cannot be named ‘drv’, because - then we'd have an attribute ‘drvPath’ in - the resulting set. */ - if (j == "drv") { - throw EvalError( - format("invalid derivation output name 'drv', at %1%") % - posDrvName); - } - outputs.insert(j); - } - if (outputs.empty()) { - throw EvalError( - format("derivation cannot have an empty set of outputs, at %1%") % - posDrvName); - } - }; - - try { - if (ignoreNulls) { - state.forceValue(*i.value); - if (i.value->type == tNull) { - continue; - } - } - - /* The `args' attribute is special: it supplies the - command-line arguments to the builder. */ - if (i.name == state.sArgs) { - state.forceList(*i.value, pos); - for (unsigned int n = 0; n < i.value->listSize(); ++n) { - std::string s = state.coerceToString(posDrvName, *(*i.value->list)[n], - context, true); - drv.args.push_back(s); - } - } - - /* All other attributes are passed to the builder through - the environment. */ - else { - if (jsonObject) { - if (i.name == state.sStructuredAttrs) { - continue; - } - - auto placeholder(jsonObject->placeholder(key)); - printValueAsJSON(state, true, *i.value, placeholder, context); - - if (i.name == state.sBuilder) { - drv.builder = state.forceString(*i.value, context, posDrvName); - } else if (i.name == state.sSystem) { - drv.platform = state.forceStringNoCtx(*i.value, posDrvName); - } else if (i.name == state.sOutputHash) { - outputHash = state.forceStringNoCtx(*i.value, posDrvName); - } else if (i.name == state.sOutputHashAlgo) { - outputHashAlgo = state.forceStringNoCtx(*i.value, posDrvName); - } else if (i.name == state.sOutputHashMode) { - handleHashMode(state.forceStringNoCtx(*i.value, posDrvName)); - } else if (i.name == state.sOutputs) { - /* Require ‘outputs’ to be a list of strings. */ - state.forceList(*i.value, posDrvName); - Strings ss; - for (unsigned int n = 0; n < i.value->listSize(); ++n) { - ss.emplace_back( - state.forceStringNoCtx(*(*i.value->list)[n], posDrvName)); - } - handleOutputs(ss); - } - - } else { - auto s = state.coerceToString(posDrvName, *i.value, context, true); - drv.env.emplace(key, s); - if (i.name == state.sBuilder) { - drv.builder = s; - } else if (i.name == state.sSystem) { - drv.platform = s; - } else if (i.name == state.sOutputHash) { - outputHash = s; - } else if (i.name == state.sOutputHashAlgo) { - outputHashAlgo = s; - } else if (i.name == state.sOutputHashMode) { - handleHashMode(s); - } else if (i.name == state.sOutputs) { - handleOutputs(absl::StrSplit(s, absl::ByAnyChar(" \t\n\r"), - absl::SkipEmpty())); - } - } - } - - } catch (Error& e) { - e.addPrefix(format("while evaluating the attribute '%1%' of the " - "derivation '%2%' at %3%:\n") % - key % drvName % posDrvName); - throw; - } - } - - if (jsonObject) { - jsonObject.reset(); - drv.env.emplace("__json", jsonBuf.str()); - } - - /* Everything in the context of the strings in the derivation - attributes should be added as dependencies of the resulting - derivation. */ - for (auto& path : context) { - /* Paths marked with `=' denote that the path of a derivation - is explicitly passed to the builder. Since that allows the - builder to gain access to every path in the dependency - graph of the derivation (including all outputs), all paths - in the graph must be added to this derivation's list of - inputs to ensure that they are available when the builder - runs. */ - if (path.at(0) == '=') { - /* !!! This doesn't work if readOnlyMode is set. */ - PathSet refs; - state.store->computeFSClosure(std::string(path, 1), refs); - for (auto& j : refs) { - drv.inputSrcs.insert(j); - if (isDerivation(j)) { - drv.inputDrvs[j] = state.store->queryDerivationOutputNames(j); - } - } - } - - /* Handle derivation outputs of the form ‘!!’. */ - else if (path.at(0) == '!') { - std::pair ctx = decodeContext(path); - drv.inputDrvs[ctx.first].insert(ctx.second); - } - - /* Otherwise it's a source file. */ - else { - drv.inputSrcs.insert(path); - } - } - - /* Do we have all required attributes? */ - if (drv.builder.empty()) { - throw EvalError(format("required attribute 'builder' missing, at %1%") % - posDrvName); - } - if (drv.platform.empty()) { - throw EvalError(format("required attribute 'system' missing, at %1%") % - posDrvName); - } - - /* Check whether the derivation name is valid. */ - checkStoreName(drvName); - if (isDerivation(drvName)) { - throw EvalError( - format("derivation names are not allowed to end in '%1%', at %2%") % - drvExtension % posDrvName); - } - - if (outputHash) { - /* Handle fixed-output derivations. */ - if (outputs.size() != 1 || *(outputs.begin()) != "out") { - throw Error(format("multiple outputs are not supported in fixed-output " - "derivations, at %1%") % - posDrvName); - } - - HashType ht = - outputHashAlgo.empty() ? htUnknown : parseHashType(outputHashAlgo); - auto hash_ = Hash::deserialize(*outputHash, ht); - auto h = Hash::unwrap_throw(hash_); - - Path outPath = - state.store->makeFixedOutputPath(outputHashRecursive, h, drvName); - if (!jsonObject) { - drv.env["out"] = outPath; - } - drv.outputs["out"] = DerivationOutput( - outPath, (outputHashRecursive ? "r:" : "") + printHashType(h.type), - h.to_string(Base16, false)); - } - - else { - /* Construct the "masked" store derivation, which is the final - one except that in the list of outputs, the output paths - are empty, and the corresponding environment variables have - an empty value. This ensures that changes in the set of - output names do get reflected in the hash. */ - for (auto& i : outputs) { - if (!jsonObject) { - drv.env[i] = ""; - } - drv.outputs[i] = DerivationOutput("", "", ""); - } - - /* Use the masked derivation expression to compute the output - path. */ - Hash h = hashDerivationModulo(*state.store, drv); - - for (auto& i : drv.outputs) { - if (i.second.path.empty()) { - Path outPath = state.store->makeOutputPath(i.first, h, drvName); - if (!jsonObject) { - drv.env[i.first] = outPath; - } - i.second.path = outPath; - } - } - } - - /* Write the resulting term into the Nix store directory. */ - Path drvPath = writeDerivation(state.store, drv, drvName, state.repair); - - VLOG(2) << "instantiated '" << drvName << "' -> '" << drvPath << "'"; - - /* Optimisation, but required in read-only mode! because in that - case we don't actually write store derivations, so we can't - read them later. */ - drvHashes[drvPath] = hashDerivationModulo(*state.store, drv); - - state.mkAttrs(v, 1 + drv.outputs.size()); - mkString(*state.allocAttr(v, state.sDrvPath), drvPath, {"=" + drvPath}); - for (auto& i : drv.outputs) { - mkString(*state.allocAttr(v, state.symbols.Create(i.first)), i.second.path, - {"!" + i.first + "!" + drvPath}); - } -} - -/* Return a placeholder string for the specified output that will be - substituted by the corresponding output path at build time. For - example, 'placeholder "out"' returns the string - /1rz4g4znpzjwh1xymhjpm42vipw92pr73vdgl6xs1hycac8kf2n9. At build - time, any occurence of this string in an derivation attribute will - be replaced with the concrete path in the Nix store of the output - ‘out’. */ -static void prim_placeholder(EvalState& state, const Pos& pos, Value** args, - Value& v) { - mkString(v, hashPlaceholder(state.forceStringNoCtx(*args[0], pos))); -} - -/************************************************************* - * Paths - *************************************************************/ - -/* Convert the argument to a path. !!! obsolete? */ -static void prim_toPath(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[0], context); - mkString(v, canonPath(path), context); -} - -/* Allow a valid store path to be used in an expression. This is - useful in some generated expressions such as in nix-push, which - generates a call to a function with an already existing store path - as argument. You don't want to use `toPath' here because it copies - the path to the Nix store, which yields a copy like - /nix/store/newhash-oldhash-oldname. In the past, `toPath' had - special case behaviour for store paths, but that created weird - corner cases. */ -static void prim_storePath(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context)); - /* Resolve symlinks in ‘path’, unless ‘path’ itself is a symlink - directly in the store. The latter condition is necessary so - e.g. nix-push does the right thing. */ - if (!state.store->isStorePath(path)) { - path = canonPath(path, true); - } - if (!state.store->isInStore(path)) { - throw EvalError(format("path '%1%' is not in the Nix store, at %2%") % - path % pos); - } - Path path2 = state.store->toStorePath(path); - if (!settings.readOnlyMode) { - state.store->ensurePath(path2); - } - context.insert(path2); - mkString(v, path, context); -} - -static void prim_pathExists(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[0], context); - try { - state.realiseContext(context); - } catch (InvalidPathError& e) { - throw EvalError(format("cannot check the existence of '%1%', since path " - "'%2%' is not valid, at %3%") % - path % e.path % pos); - } - - try { - mkBool(v, pathExists(state.checkSourcePath(path))); - } catch (SysError& e) { - /* Don't give away info from errors while canonicalising - ‘path’ in restricted mode. */ - mkBool(v, false); - } catch (RestrictedPathError& e) { - mkBool(v, false); - } -} - -/* Return the base name of the given string, i.e., everything - following the last slash. */ -static void prim_baseNameOf(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - mkString( - v, baseNameOf(state.coerceToString(pos, *args[0], context, false, false)), - context); -} - -/* Return the directory of the given path, i.e., everything before the - last slash. Return either a path or a string depending on the type - of the argument. */ -static void prim_dirOf(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path dir = dirOf(state.coerceToString(pos, *args[0], context, false, false)); - if (args[0]->type == tPath) { - mkPath(v, dir.c_str()); - } else { - mkString(v, dir, context); - } -} - -/* Return the contents of a file as a string. */ -static void prim_readFile(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[0], context); - try { - state.realiseContext(context); - } catch (InvalidPathError& e) { - throw EvalError( - format("cannot read '%1%', since path '%2%' is not valid, at %3%") % - path % e.path % pos); - } - std::string s = - readFile(state.checkSourcePath(state.toRealPath(path, context))); - if (s.find(static_cast(0)) != std::string::npos) { - throw Error(format("the contents of the file '%1%' cannot be represented " - "as a Nix string") % - path); - } - mkString(v, s.c_str()); -} - -/* Find a file in the Nix search path. Used to implement paths, - which are desugared to 'findFile __nixPath "x"'. */ -static void prim_findFile(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceList(*args[0], pos); - - SearchPath searchPath; - - for (unsigned int n = 0; n < args[0]->listSize(); ++n) { - Value& v2(*(*args[0]->list)[n]); - state.forceAttrs(v2, pos); - - std::string prefix; - Bindings::iterator i = v2.attrs->find(state.symbols.Create("prefix")); - if (i != v2.attrs->end()) { - prefix = state.forceStringNoCtx(*i->second.value, pos); - } - - i = v2.attrs->find(state.symbols.Create("path")); - if (i == v2.attrs->end()) { - throw EvalError(format("attribute 'path' missing, at %1%") % pos); - } - - PathSet context; - std::string path = - state.coerceToString(pos, *i->second.value, context, false, false); - - try { - state.realiseContext(context); - } catch (InvalidPathError& e) { - throw EvalError( - format("cannot find '%1%', since path '%2%' is not valid, at %3%") % - path % e.path % pos); - } - - searchPath.emplace_back(prefix, path); - } - - std::string path = state.forceStringNoCtx(*args[1], pos); - - mkPath(v, - state.checkSourcePath(state.findFile(searchPath, path, pos)).c_str()); -} - -/* Return the cryptographic hash of a file in base-16. */ -static void prim_hashFile(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string type = state.forceStringNoCtx(*args[0], pos); - HashType ht = parseHashType(type); - if (ht == htUnknown) { - throw Error(format("unknown hash type '%1%', at %2%") % type % pos); - } - - PathSet context; // discarded - Path p = state.coerceToPath(pos, *args[1], context); - - mkString(v, hashFile(ht, state.checkSourcePath(p)).to_string(Base16, false), - context); -} - -/* Read a directory (without . or ..) */ -static void prim_readDir(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet ctx; - Path path = state.coerceToPath(pos, *args[0], ctx); - try { - state.realiseContext(ctx); - } catch (InvalidPathError& e) { - throw EvalError( - format("cannot read '%1%', since path '%2%' is not valid, at %3%") % - path % e.path % pos); - } - - DirEntries entries = readDirectory(state.checkSourcePath(path)); - state.mkAttrs(v, entries.size()); - - for (auto& ent : entries) { - Value* ent_val = state.allocAttr(v, state.symbols.Create(ent.name)); - if (ent.type == DT_UNKNOWN) { - ent.type = getFileType(path + "/" + ent.name); - } - mkStringNoCopy(*ent_val, ent.type == DT_REG ? "regular" - : ent.type == DT_DIR ? "directory" - : ent.type == DT_LNK ? "symlink" - : "unknown"); - } -} - -/************************************************************* - * Creating files - *************************************************************/ - -/* Convert the argument (which can be any Nix expression) to an XML - representation returned in a string. Not all Nix expressions can - be sensibly or completely represented (e.g., functions). */ -static void prim_toXML(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::ostringstream out; - PathSet context; - printValueAsXML(state, true, false, *args[0], out, context); - mkString(v, out.str(), context); -} - -/* Convert the argument (which can be any Nix expression) to a JSON - string. Not all Nix expressions can be sensibly or completely - represented (e.g., functions). */ -static void prim_toJSON(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::ostringstream out; - PathSet context; - printValueAsJSON(state, true, *args[0], out, context); - mkString(v, out.str(), context); -} - -/* Parse a JSON string to a value. */ -static void prim_fromJSON(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string s = state.forceStringNoCtx(*args[0], pos); - parseJSON(state, s, v); -} - -/* Store a string in the Nix store as a source file that can be used - as an input by derivations. */ -static void prim_toFile(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - std::string name = state.forceStringNoCtx(*args[0], pos); - std::string contents = state.forceString(*args[1], context, pos); - - PathSet refs; - - for (auto path : context) { - if (path.at(0) != '/') { - throw EvalError(format("in 'toFile': the file '%1%' cannot refer to " - "derivation outputs, at %2%") % - name % pos); - } - refs.insert(path); - } - - Path storePath = - settings.readOnlyMode - ? state.store->computeStorePathForText(name, contents, refs) - : state.store->addTextToStore(name, contents, refs, state.repair); - - /* Note: we don't need to add `context' to the context of the - result, since `storePath' itself has references to the paths - used in args[1]. */ - - mkString(v, storePath, {storePath}); -} - -static void addPath(EvalState& state, const Pos& pos, const std::string& name, - const Path& path_, Value* filterFun, bool recursive, - const Hash& expectedHash, Value& v) { - const auto path = evalSettings.pureEval && expectedHash - ? path_ - : state.checkSourcePath(path_); - PathFilter filter = filterFun != nullptr ? ([&](const Path& path) { - auto st = lstat(path); - - /* Call the filter function. The first argument is the path, - the second is a string indicating the type of the file. */ - Value arg1; - mkString(arg1, path); - - Value fun2; - state.callFunction(*filterFun, arg1, fun2, noPos); - - Value arg2; - mkString(arg2, S_ISREG(st.st_mode) ? "regular" - : S_ISDIR(st.st_mode) ? "directory" - : S_ISLNK(st.st_mode) - ? "symlink" - : "unknown" /* not supported, will fail! */); - - Value res; - state.callFunction(fun2, arg2, res, noPos); - - return state.forceBool(res, pos); - }) - : defaultPathFilter; - - Path expectedStorePath; - if (expectedHash) { - expectedStorePath = - state.store->makeFixedOutputPath(recursive, expectedHash, name); - } - Path dstPath; - if (!expectedHash || !state.store->isValidPath(expectedStorePath)) { - dstPath = settings.readOnlyMode - ? state.store - ->computeStorePathForPath(name, path, recursive, - htSHA256, filter) - .first - : state.store->addToStore(name, path, recursive, htSHA256, - filter, state.repair); - if (expectedHash && expectedStorePath != dstPath) { - throw Error(format("store path mismatch in (possibly filtered) path " - "added from '%1%'") % - path); - } - } else { - dstPath = expectedStorePath; - } - - mkString(v, dstPath, {dstPath}); -} - -static void prim_filterSource(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[1], context); - if (!context.empty()) { - throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % - path % pos); - } - - state.forceValue(*args[0]); - if (args[0]->type != tLambda) { - throw TypeError(format("first argument in call to 'filterSource' is not a " - "function but %1%, at %2%") % - showType(*args[0]) % pos); - } - - addPath(state, pos, baseNameOf(path), path, args[0], true, Hash(), v); -} - -static void prim_path(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceAttrs(*args[0], pos); - Path path; - std::string name; - Value* filterFun = nullptr; - auto recursive = true; - Hash expectedHash; - - for (auto& attr : *args[0]->attrs) { - const std::string& n(attr.second.name); - if (n == "path") { - PathSet context; - path = state.coerceToPath(*attr.second.pos, *attr.second.value, context); - if (!context.empty()) { - throw EvalError( - format("string '%1%' cannot refer to other paths, at %2%") % path % - *attr.second.pos); - } - } else if (attr.second.name == state.sName) { - name = state.forceStringNoCtx(*attr.second.value, *attr.second.pos); - } else if (n == "filter") { - state.forceValue(*attr.second.value); - filterFun = attr.second.value; - } else if (n == "recursive") { - recursive = state.forceBool(*attr.second.value, *attr.second.pos); - } else if (n == "sha256") { - auto hash_ = Hash::deserialize( - state.forceStringNoCtx(*attr.second.value, *attr.second.pos), - htSHA256); - expectedHash = Hash::unwrap_throw(hash_); - } else { - throw EvalError( - format("unsupported argument '%1%' to 'addPath', at %2%") % - attr.second.name % *attr.second.pos); - } - } - if (path.empty()) { - throw EvalError(format("'path' required, at %1%") % pos); - } - if (name.empty()) { - name = baseNameOf(path); - } - - addPath(state, pos, name, path, filterFun, recursive, expectedHash, v); -} - -/************************************************************* - * Sets - *************************************************************/ - -/* Return the names of the attributes in a set as a sorted list of - strings. */ -static void prim_attrNames(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceAttrs(*args[0], pos); - - state.mkList(v, args[0]->attrs->size()); - - unsigned int n = 0; - for (auto& [key, value] : *args[0]->attrs) { - mkString(*((*v.list)[n++] = state.allocValue()), key); - } -} - -/* Return the values of the attributes in a set as a list, in the same - order as attrNames. */ -static void prim_attrValues(EvalState& state, const Pos& pos, Value** input, - Value& output) { - state.forceAttrs(*input[0], pos); - - state.mkList(output, input[0]->attrs->size()); - - unsigned int n = 0; - for (auto& [key, value] : *input[0]->attrs) { - (*output.list)[n++] = value.value; - } -} - -/* Dynamic version of the `.' operator. */ -void prim_getAttr(EvalState& state, const Pos& pos, Value** args, Value& v) { - std::string attr = state.forceStringNoCtx(*args[0], pos); - state.forceAttrs(*args[1], pos); - // !!! Should we create a symbol here or just do a lookup? - Bindings::iterator i = args[1]->attrs->find(state.symbols.Create(attr)); - if (i == args[1]->attrs->end()) { - throw EvalError(format("attribute '%1%' missing, at %2%") % attr % pos); - } - // !!! add to stack trace? - if (state.countCalls && (i->second.pos != nullptr)) { - state.attrSelects[*i->second.pos]++; - } - state.forceValue(*i->second.value); - v = *i->second.value; -} - -/* Return position information of the specified attribute. */ -void prim_unsafeGetAttrPos(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string attr = state.forceStringNoCtx(*args[0], pos); - state.forceAttrs(*args[1], pos); - Bindings::iterator i = args[1]->attrs->find(state.symbols.Create(attr)); - if (i == args[1]->attrs->end()) { - mkNull(v); - } else { - state.mkPos(v, i->second.pos); - } -} - -/* Dynamic version of the `?' operator. */ -static void prim_hasAttr(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string attr = state.forceStringNoCtx(*args[0], pos); - state.forceAttrs(*args[1], pos); - mkBool(v, args[1]->attrs->find(state.symbols.Create(attr)) != - args[1]->attrs->end()); -} - -/* Determine whether the argument is a set. */ -static void prim_isAttrs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->type == tAttrs); -} - -static void prim_removeAttrs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceAttrs(*args[0], pos); - state.forceList(*args[1], pos); - - /* Get the attribute names to be removed. */ - std::set names; - for (unsigned int i = 0; i < args[1]->listSize(); ++i) { - state.forceStringNoCtx(*(*args[1]->list)[i], pos); - names.insert(state.symbols.Create((*args[1]->list)[i]->string.s)); - } - - /* Copy all attributes not in that set. Note that we don't need - to sort v.attrs because it's a subset of an already sorted - vector. */ - state.mkAttrs(v, args[0]->attrs->size()); - for (auto& i : *args[0]->attrs) { - if (names.find(i.second.name) == names.end()) { - v.attrs->push_back(i.second); - } - } -} - -/* Builds a set from a list specifying (name, value) pairs. To be - precise, a list [{name = "name1"; value = value1;} ... {name = - "nameN"; value = valueN;}] is transformed to {name1 = value1; - ... nameN = valueN;}. In case of duplicate occurences of the same - name, the first takes precedence. */ -static void prim_listToAttrs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceList(*args[0], pos); - - state.mkAttrs(v, args[0]->listSize()); - - std::set seen; - - for (unsigned int i = 0; i < args[0]->listSize(); ++i) { - Value& v2(*(*args[0]->list)[i]); - state.forceAttrs(v2, pos); - - Bindings::iterator j = v2.attrs->find(state.sName); - if (j == v2.attrs->end()) { - throw TypeError( - format( - "'name' attribute missing in a call to 'listToAttrs', at %1%") % - pos); - } - std::string name = state.forceStringNoCtx(*j->second.value, pos); - - Symbol sym = state.symbols.Create(name); - if (seen.find(sym) == seen.end()) { - Bindings::iterator j2 = - // TODO(tazjin): this line used to construct the symbol again: - // state.symbols.Create(state.sValue)); - // Why? - v2.attrs->find(state.sValue); - if (j2 == v2.attrs->end()) { - throw TypeError(format("'value' attribute missing in a call to " - "'listToAttrs', at %1%") % - pos); - } - - v.attrs->push_back(Attr(sym, j2->second.value, j2->second.pos)); - seen.insert(sym); - } - } -} - -/* Return the right-biased intersection of two sets as1 and as2, - i.e. a set that contains every attribute from as2 that is also a - member of as1. */ -static void prim_intersectAttrs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceAttrs(*args[0], pos); - state.forceAttrs(*args[1], pos); - - state.mkAttrs(v, std::min(args[0]->attrs->size(), args[1]->attrs->size())); - - for (auto& i : *args[0]->attrs) { - Bindings::iterator j = args[1]->attrs->find(i.second.name); - if (j != args[1]->attrs->end()) { - v.attrs->push_back(j->second); - } - } -} - -/* Collect each attribute named `attr' from a list of attribute sets. - Sets that don't contain the named attribute are ignored. - - Example: - catAttrs "a" [{a = 1;} {b = 0;} {a = 2;}] - => [1 2] -*/ -static void prim_catAttrs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - Symbol attrName = state.symbols.Create(state.forceStringNoCtx(*args[0], pos)); - state.forceList(*args[1], pos); - - Value* res[args[1]->listSize()]; - unsigned int found = 0; - - for (unsigned int n = 0; n < args[1]->listSize(); ++n) { - Value& v2(*(*args[1]->list)[n]); - state.forceAttrs(v2, pos); - Bindings::iterator i = v2.attrs->find(attrName); - if (i != v2.attrs->end()) { - res[found++] = i->second.value; - } - } - - state.mkList(v, found); - for (unsigned int n = 0; n < found; ++n) { - (*v.list)[n] = res[n]; - } -} - -/* Return a set containing the names of the formal arguments expected - by the function `f'. The value of each attribute is a Boolean - denoting whether the corresponding argument has a default value. For - instance, - - functionArgs ({ x, y ? 123}: ...) - => { x = false; y = true; } - - "Formal argument" here refers to the attributes pattern-matched by - the function. Plain lambdas are not included, e.g. - - functionArgs (x: ...) - => { } -*/ -static void prim_functionArgs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - if (args[0]->type == tPrimOpApp || args[0]->type == tPrimOp) { - state.mkAttrs(v, 0); - return; - } - if (args[0]->type != tLambda) { - throw TypeError(format("'functionArgs' requires a function, at %1%") % pos); - } - - if (!args[0]->lambda.fun->matchAttrs) { - state.mkAttrs(v, 0); - return; - } - - state.mkAttrs(v, args[0]->lambda.fun->formals->formals.size()); - for (auto& i : args[0]->lambda.fun->formals->formals) { - // !!! should optimise booleans (allocate only once) - // TODO(tazjin): figure out what the above comment means - mkBool(*state.allocAttr(v, i.name), i.def != nullptr); - } -} - -/* Apply a function to every element of an attribute set. */ -static void prim_mapAttrs(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceAttrs(*args[1], pos); - - state.mkAttrs(v, args[1]->attrs->size()); - - for (auto& i : *args[1]->attrs) { - Value* vName = state.allocValue(); - Value* vFun2 = state.allocValue(); - mkString(*vName, i.second.name); - mkApp(*vFun2, *args[0], *vName); - mkApp(*state.allocAttr(v, i.second.name), *vFun2, *i.second.value); - } -} - -/************************************************************* - * Lists - *************************************************************/ - -/* Determine whether the argument is a list. */ -static void prim_isList(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - mkBool(v, args[0]->isList()); -} - -static void elemAt(EvalState& state, const Pos& pos, Value& list, int n, - Value& v) { - state.forceList(list, pos); - if (n < 0 || static_cast(n) >= list.listSize()) { - throw Error(format("list index %1% is out of bounds, at %2%") % n % pos); - } - state.forceValue(*(*list.list)[n]); - v = *(*list.list)[n]; -} - -/* Return the n-1'th element of a list. */ -static void prim_elemAt(EvalState& state, const Pos& pos, Value** args, - Value& v) { - elemAt(state, pos, *args[0], state.forceInt(*args[1], pos), v); -} - -/* Return the first element of a list. */ -static void prim_head(EvalState& state, const Pos& pos, Value** args, - Value& v) { - elemAt(state, pos, *args[0], 0, v); -} - -/* Return a list consisting of everything but the first element of - a list. Warning: this function takes O(n) time, so you probably - don't want to use it! */ -static void prim_tail(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceList(*args[0], pos); - if (args[0]->listSize() == 0) { - throw Error(format("'tail' called on an empty list, at %1%") % pos); - } - state.mkList(v, args[0]->listSize() - 1); - for (unsigned int n = 0; n < v.listSize(); ++n) { - (*v.list)[n] = (*args[0]->list)[n + 1]; - } -} - -/* Apply a function to every element of a list. */ -static void prim_map(EvalState& state, const Pos& pos, Value** args, Value& v) { - state.forceList(*args[1], pos); - - state.mkList(v, args[1]->listSize()); - - for (unsigned int n = 0; n < v.listSize(); ++n) { - mkApp(*((*v.list)[n] = state.allocValue()), *args[0], *(*args[1]->list)[n]); - } -} - -/* Filter a list using a predicate; that is, return a list containing - every element from the list for which the predicate function - returns true. */ -static void prim_filter(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceFunction(*args[0], pos); - state.forceList(*args[1], pos); - - // FIXME: putting this on the stack is risky. - Value* vs[args[1]->listSize()]; - unsigned int k = 0; - - bool same = true; - for (unsigned int n = 0; n < args[1]->listSize(); ++n) { - Value res; - state.callFunction(*args[0], *(*args[1]->list)[n], res, noPos); - if (state.forceBool(res, pos)) { - vs[k++] = (*args[1]->list)[n]; - } else { - same = false; - } - } - - if (same) { - v = *args[1]; - } else { - state.mkList(v, k); - for (unsigned int n = 0; n < k; ++n) { - (*v.list)[n] = vs[n]; - } - } -} - -/* Return true if a list contains a given element. */ -static void prim_elem(EvalState& state, const Pos& pos, Value** args, - Value& v) { - bool res = false; - state.forceList(*args[1], pos); - for (unsigned int n = 0; n < args[1]->listSize(); ++n) { - if (state.eqValues(*args[0], *(*args[1]->list)[n])) { - res = true; - break; - } - } - mkBool(v, res); -} - -/* Concatenate a list of lists. */ -static void prim_concatLists(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceList(*args[0], pos); - state.concatLists(v, *args[0]->list, pos); -} - -/* Return the length of a list. This is an O(1) time operation. */ -static void prim_length(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceList(*args[0], pos); - mkInt(v, args[0]->listSize()); -} - -/* Reduce a list by applying a binary operator, from left to - right. The operator is applied strictly. */ -static void prim_foldlStrict(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceFunction(*args[0], pos); - state.forceList(*args[2], pos); - - if (args[2]->listSize() != 0u) { - Value* vCur = args[1]; - - for (unsigned int n = 0; n < args[2]->listSize(); ++n) { - Value vTmp; - state.callFunction(*args[0], *vCur, vTmp, pos); - vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue(); - state.callFunction(vTmp, *(*args[2]->list)[n], *vCur, pos); - } - state.forceValue(v); - } else { - state.forceValue(*args[1]); - v = *args[1]; - } -} - -static void anyOrAll(bool any, EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceFunction(*args[0], pos); - state.forceList(*args[1], pos); - - Value vTmp; - for (unsigned int n = 0; n < args[1]->listSize(); ++n) { - state.callFunction(*args[0], *(*args[1]->list)[n], vTmp, pos); - bool res = state.forceBool(vTmp, pos); - if (res == any) { - mkBool(v, any); - return; - } - } - - mkBool(v, !any); -} - -static void prim_any(EvalState& state, const Pos& pos, Value** args, Value& v) { - anyOrAll(true, state, pos, args, v); -} - -static void prim_all(EvalState& state, const Pos& pos, Value** args, Value& v) { - anyOrAll(false, state, pos, args, v); -} - -static void prim_genList(EvalState& state, const Pos& pos, Value** args, - Value& v) { - auto len = state.forceInt(*args[1], pos); - - if (len < 0) { - throw EvalError(format("cannot create list of size %1%, at %2%") % len % - pos); - } - - state.mkList(v, len); - - for (unsigned int n = 0; n < static_cast(len); ++n) { - Value* arg = state.allocValue(); - mkInt(*arg, n); - mkApp(*((*v.list)[n] = state.allocValue()), *args[0], *arg); - } -} - -static void prim_lessThan(EvalState& state, const Pos& pos, Value** args, - Value& v); - -static void prim_sort(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceFunction(*args[0], pos); - state.forceList(*args[1], pos); - - // Copy of the input list which can be sorted in place. - v.type = tList; - v.list = std::make_shared(*args[1]->list); - - std::for_each(v.list->begin(), v.list->end(), - [&](Value* val) { state.forceValue(*val); }); - - auto comparator = [&](Value* a, Value* b) { - /* Optimization: if the comparator is lessThan, bypass - callFunction. */ - if (args[0]->type == tPrimOp && args[0]->primOp->fun == prim_lessThan) { - return CompareValues()(a, b); - } - - Value vTmp1{}; - Value vTmp2{}; - state.callFunction(*args[0], *a, vTmp1, pos); - state.callFunction(vTmp1, *b, vTmp2, pos); - return state.forceBool(vTmp2, pos); - }; - - /* FIXME: std::sort can segfault if the comparator is not a strict - weak ordering. What to do? std::stable_sort() seems more - resilient, but no guarantees... */ - std::stable_sort(v.list->begin(), v.list->end(), comparator); -} - -static void prim_partition(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceFunction(*args[0], pos); - state.forceList(*args[1], pos); - - std::shared_ptr right = std::make_shared(); - std::shared_ptr wrong = std::make_shared(); - - for (Value* elem : *args[1]->list) { - state.forceValue(*elem, pos); - - Value res; - state.callFunction(*args[0], *elem, res, pos); - if (state.forceBool(res, pos)) { - right->push_back(elem); - } else { - wrong->push_back(elem); - } - } - - state.mkAttrs(v, 2); - - Value* vRight = state.allocAttr(v, state.sRight); - state.mkList(*vRight, right); - - Value* vWrong = state.allocAttr(v, state.sWrong); - state.mkList(*vWrong, wrong); -} - -/* concatMap = f: list: concatLists (map f list); */ -/* C++-version is to avoid allocating `mkApp', call `f' eagerly */ -static void prim_concatMap(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceFunction(*args[0], pos); - state.forceList(*args[1], pos); - - std::shared_ptr outlist = std::make_shared(); - - for (Value* elem : *args[1]->list) { - auto out = state.allocValue(); - state.callFunction(*args[0], *elem, *out, pos); - state.forceList(*out, pos); - - outlist->insert(outlist->end(), out->list->begin(), out->list->end()); - } - - state.mkList(v, outlist); -} - -/************************************************************* - * Integer arithmetic - *************************************************************/ - -static void prim_add(EvalState& state, const Pos& pos, Value** args, Value& v) { - state.forceValue(*args[0], pos); - state.forceValue(*args[1], pos); - if (args[0]->type == tFloat || args[1]->type == tFloat) { - mkFloat(v, - state.forceFloat(*args[0], pos) + state.forceFloat(*args[1], pos)); - } else { - mkInt(v, state.forceInt(*args[0], pos) + state.forceInt(*args[1], pos)); - } -} - -static void prim_sub(EvalState& state, const Pos& pos, Value** args, Value& v) { - state.forceValue(*args[0], pos); - state.forceValue(*args[1], pos); - if (args[0]->type == tFloat || args[1]->type == tFloat) { - mkFloat(v, - state.forceFloat(*args[0], pos) - state.forceFloat(*args[1], pos)); - } else { - mkInt(v, state.forceInt(*args[0], pos) - state.forceInt(*args[1], pos)); - } -} - -static void prim_mul(EvalState& state, const Pos& pos, Value** args, Value& v) { - state.forceValue(*args[0], pos); - state.forceValue(*args[1], pos); - if (args[0]->type == tFloat || args[1]->type == tFloat) { - mkFloat(v, - state.forceFloat(*args[0], pos) * state.forceFloat(*args[1], pos)); - } else { - mkInt(v, state.forceInt(*args[0], pos) * state.forceInt(*args[1], pos)); - } -} - -static void prim_div(EvalState& state, const Pos& pos, Value** args, Value& v) { - state.forceValue(*args[0], pos); - state.forceValue(*args[1], pos); - - NixFloat f2 = state.forceFloat(*args[1], pos); - if (f2 == 0) { - throw EvalError(format("division by zero, at %1%") % pos); - } - - if (args[0]->type == tFloat || args[1]->type == tFloat) { - mkFloat(v, - state.forceFloat(*args[0], pos) / state.forceFloat(*args[1], pos)); - } else { - NixInt i1 = state.forceInt(*args[0], pos); - NixInt i2 = state.forceInt(*args[1], pos); - /* Avoid division overflow as it might raise SIGFPE. */ - if (i1 == std::numeric_limits::min() && i2 == -1) { - throw EvalError(format("overflow in integer division, at %1%") % pos); - } - mkInt(v, i1 / i2); - } -} - -static void prim_bitAnd(EvalState& state, const Pos& pos, Value** args, - Value& v) { - mkInt(v, state.forceInt(*args[0], pos) & state.forceInt(*args[1], pos)); -} - -static void prim_bitOr(EvalState& state, const Pos& pos, Value** args, - Value& v) { - mkInt(v, state.forceInt(*args[0], pos) | state.forceInt(*args[1], pos)); -} - -static void prim_bitXor(EvalState& state, const Pos& pos, Value** args, - Value& v) { - mkInt(v, state.forceInt(*args[0], pos) ^ state.forceInt(*args[1], pos)); -} - -static void prim_lessThan(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceValue(*args[0]); - state.forceValue(*args[1]); - CompareValues comp; - mkBool(v, comp(args[0], args[1])); -} - -/************************************************************* - * String manipulation - *************************************************************/ - -/* Convert the argument to a string. Paths are *not* copied to the - store, so `toString /foo/bar' yields `"/foo/bar"', not - `"/nix/store/whatever..."'. */ -static void prim_toString(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - std::string s = state.coerceToString(pos, *args[0], context, true, false); - mkString(v, s, context); -} - -/* `substring start len str' returns the substring of `str' starting - at character position `min(start, stringLength str)' inclusive and - ending at `min(start + len, stringLength str)'. `start' must be - non-negative. */ -static void prim_substring(EvalState& state, const Pos& pos, Value** args, - Value& v) { - int start = state.forceInt(*args[0], pos); - int len = state.forceInt(*args[1], pos); - PathSet context; - std::string s = state.coerceToString(pos, *args[2], context); - - if (start < 0) { - throw EvalError(format("negative start position in 'substring', at %1%") % - pos); - } - - mkString(v, - static_cast(start) >= s.size() - ? "" - : std::string(s, start, len), - context); -} - -static void prim_stringLength(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - std::string s = state.coerceToString(pos, *args[0], context); - mkInt(v, s.size()); -} - -/* Return the cryptographic hash of a string in base-16. */ -static void prim_hashString(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string type = state.forceStringNoCtx(*args[0], pos); - HashType ht = parseHashType(type); - if (ht == htUnknown) { - throw Error(format("unknown hash type '%1%', at %2%") % type % pos); - } - - PathSet context; // discarded - std::string s = state.forceString(*args[1], context, pos); - - mkString(v, hashString(ht, s).to_string(Base16, false), context); -} - -/* Match a regular expression against a string and return either - ‘null’ or a list containing substring matches. */ -static void prim_match(EvalState& state, const Pos& pos, Value** args, - Value& v) { - auto re = state.forceStringNoCtx(*args[0], pos); - - try { - std::regex regex(re, std::regex::extended); - - PathSet context; - const std::string str = state.forceString(*args[1], context, pos); - - std::smatch match; - if (!std::regex_match(str, match, regex)) { - mkNull(v); - return; - } - - // the first match is the whole string - const size_t len = match.size() - 1; - state.mkList(v, len); - for (size_t i = 0; i < len; ++i) { - if (!match[i + 1].matched) { - mkNull(*((*v.list)[i] = state.allocValue())); - } else { - mkString(*((*v.list)[i] = state.allocValue()), - match[i + 1].str().c_str()); - } - } - - } catch (std::regex_error& e) { - if (e.code() == std::regex_constants::error_space) { - // limit is _GLIBCXX_REGEX_STATE_LIMIT for libstdc++ - throw EvalError("memory limit exceeded by regular expression '%s', at %s", - re, pos); - } - throw EvalError("invalid regular expression '%s', at %s", re, pos); - } -} - -/* Split a std::string with a regular expression, and return a list of the - non-matching parts interleaved by the lists of the matching groups. */ -static void prim_split(EvalState& state, const Pos& pos, Value** args, - Value& v) { - auto re = state.forceStringNoCtx(*args[0], pos); - - try { - std::regex regex(re, std::regex::extended); - - PathSet context; - const std::string str = state.forceString(*args[1], context, pos); - - auto begin = std::sregex_iterator(str.begin(), str.end(), regex); - auto end = std::sregex_iterator(); - - // Any matches results are surrounded by non-matching results. - const size_t len = std::distance(begin, end); - state.mkList(v, 2 * len + 1); - size_t idx = 0; - Value* elem; - - if (len == 0) { - (*v.list)[idx++] = args[1]; - return; - } - - for (std::sregex_iterator i = begin; i != end; ++i) { - assert(idx <= 2 * len + 1 - 3); - std::smatch match = *i; - - // Add a string for non-matched characters. - elem = (*v.list)[idx++] = state.allocValue(); - mkString(*elem, match.prefix().str().c_str()); - - // Add a list for matched substrings. - const size_t slen = match.size() - 1; - elem = (*v.list)[idx++] = state.allocValue(); - - // Start at 1, beacause the first match is the whole string. - state.mkList(*elem, slen); - for (size_t si = 0; si < slen; ++si) { - if (!match[si + 1].matched) { - mkNull(*((*elem->list)[si] = state.allocValue())); - } else { - mkString(*((*elem->list)[si] = state.allocValue()), - match[si + 1].str().c_str()); - } - } - - // Add a string for non-matched suffix characters. - if (idx == 2 * len) { - elem = (*v.list)[idx++] = state.allocValue(); - mkString(*elem, match.suffix().str().c_str()); - } - } - assert(idx == 2 * len + 1); - - } catch (std::regex_error& e) { - if (e.code() == std::regex_constants::error_space) { - // limit is _GLIBCXX_REGEX_STATE_LIMIT for libstdc++ - throw EvalError("memory limit exceeded by regular expression '%s', at %s", - re, pos); - } - throw EvalError("invalid regular expression '%s', at %s", re, pos); - } -} - -static void prim_concatStringSep(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - - auto sep = state.forceString(*args[0], context, pos); - state.forceList(*args[1], pos); - - std::string res; - res.reserve((args[1]->listSize() + 32) * sep.size()); - bool first = true; - - for (unsigned int n = 0; n < args[1]->listSize(); ++n) { - if (first) { - first = false; - } else { - res += sep; - } - - res += state.coerceToString(pos, *(*args[1]->list)[n], context); - } - - mkString(v, res, context); -} - -static void prim_replaceStrings(EvalState& state, const Pos& pos, Value** args, - Value& v) { - state.forceList(*args[0], pos); - state.forceList(*args[1], pos); - if (args[0]->listSize() != args[1]->listSize()) { - throw EvalError(format("'from' and 'to' arguments to 'replaceStrings' have " - "different lengths, at %1%") % - pos); - } - - std::vector from; - from.reserve(args[0]->listSize()); - for (unsigned int n = 0; n < args[0]->listSize(); ++n) { - from.push_back(state.forceString(*(*args[0]->list)[n], pos)); - } - - std::vector> to; - to.reserve(args[1]->listSize()); - for (unsigned int n = 0; n < args[1]->listSize(); ++n) { - PathSet ctx; - auto s = state.forceString(*(*args[1]->list)[n], ctx, pos); - to.emplace_back(std::move(s), std::move(ctx)); - } - - PathSet context; - auto s = state.forceString(*args[2], context, pos); - - std::string res; - // Loops one past last character to handle the case where 'from' contains an - // empty string. - for (size_t p = 0; p <= s.size();) { - bool found = false; - auto i = from.begin(); - auto j = to.begin(); - for (; i != from.end(); ++i, ++j) { - if (s.compare(p, i->size(), *i) == 0) { - found = true; - res += j->first; - if (i->empty()) { - if (p < s.size()) { - res += s[p]; - } - p++; - } else { - p += i->size(); - } - for (auto& path : j->second) { - context.insert(path); - } - j->second.clear(); - break; - } - } - if (!found) { - if (p < s.size()) { - res += s[p]; - } - p++; - } - } - - mkString(v, res, context); -} - -/************************************************************* - * Versions - *************************************************************/ - -static void prim_parseDrvName(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string name = state.forceStringNoCtx(*args[0], pos); - DrvName parsed(name); - state.mkAttrs(v, 2); - mkString(*state.allocAttr(v, state.sName), parsed.name); - mkString(*state.allocAttr(v, state.symbols.Create("version")), - parsed.version); -} - -static void prim_compareVersions(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string version1 = state.forceStringNoCtx(*args[0], pos); - std::string version2 = state.forceStringNoCtx(*args[1], pos); - mkInt(v, compareVersions(version1, version2)); -} - -static void prim_splitVersion(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string version = state.forceStringNoCtx(*args[0], pos); - auto iter = version.cbegin(); - Strings components; - while (iter != version.cend()) { - auto component = nextComponent(iter, version.cend()); - if (component.empty()) { - break; - } - components.emplace_back(std::move(component)); - } - state.mkList(v, components.size()); - unsigned int n = 0; - for (auto& component : components) { - auto listElem = (*v.list)[n++] = state.allocValue(); - mkString(*listElem, component); - } -} - -/************************************************************* - * Networking - *************************************************************/ - -void fetch(EvalState& state, const Pos& pos, Value** args, Value& v, - const std::string& who, bool unpack, - const std::string& defaultName) { - CachedDownloadRequest request(""); - request.unpack = unpack; - request.name = defaultName; - - state.forceValue(*args[0]); - - if (args[0]->type == tAttrs) { - state.forceAttrs(*args[0], pos); - - for (auto& attr : *args[0]->attrs) { - std::string n(attr.second.name); - if (n == "url") { - request.uri = - state.forceStringNoCtx(*attr.second.value, *attr.second.pos); - } else if (n == "sha256") { - auto hash_ = Hash::deserialize( - state.forceStringNoCtx(*attr.second.value, *attr.second.pos), - htSHA256); - request.expectedHash = Hash::unwrap_throw(hash_); - } else if (n == "name") { - request.name = - state.forceStringNoCtx(*attr.second.value, *attr.second.pos); - } else { - throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % - attr.second.name % who % attr.second.pos); - } - } - - if (request.uri.empty()) { - throw EvalError(format("'url' argument required, at %1%") % pos); - } - - } else { - request.uri = state.forceStringNoCtx(*args[0], pos); - } - - state.checkURI(request.uri); - - if (evalSettings.pureEval && !request.expectedHash) { - throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", - who); - } - - auto res = getDownloader()->downloadCached(state.store, request); - - if (state.allowedPaths) { - state.allowedPaths->insert(res.path); - } - - mkString(v, res.storePath, PathSet({res.storePath})); -} - -static void prim_fetchurl(EvalState& state, const Pos& pos, Value** args, - Value& v) { - fetch(state, pos, args, v, "fetchurl", false, ""); -} - -static void prim_fetchTarball(EvalState& state, const Pos& pos, Value** args, - Value& v) { - fetch(state, pos, args, v, "fetchTarball", true, "source"); -} - -/************************************************************* - * Primop registration - *************************************************************/ - -RegisterPrimOp::PrimOps* RegisterPrimOp::primOps; - -RegisterPrimOp::RegisterPrimOp(const std::string& name, size_t arity, - PrimOpFun fun) { - if (primOps == nullptr) { - primOps = new PrimOps; - } - primOps->emplace_back(name, arity, fun); -} - -void EvalState::createBaseEnv() { - baseEnv.up = nullptr; - - /* Add global constants such as `true' to the base environment. */ - Value v; - - /* `builtins' must be first! */ - mkAttrs(v, 128); - addConstant("builtins", v); - - mkBool(v, true); - addConstant("true", v); - - mkBool(v, false); - addConstant("false", v); - - mkNull(v); - addConstant("null", v); - - auto vThrow = addPrimOp("throw", 1, prim_throw); - - auto addPurityError = [&](const std::string& name) { - Value* v2 = allocValue(); - mkString(*v2, fmt("'%s' is not allowed in pure evaluation mode", name)); - mkApp(v, *vThrow, *v2); - addConstant(name, v); - }; - - if (!evalSettings.pureEval) { - mkInt(v, time(nullptr)); - addConstant("__currentTime", v); - } - - if (!evalSettings.pureEval) { - mkString(v, settings.thisSystem); - addConstant("__currentSystem", v); - } - - mkString(v, nixVersion); - addConstant("__nixVersion", v); - - mkString(v, store->storeDir); - addConstant("__storeDir", v); - - /* Language version. This should be increased every time a new - language feature gets added. It's not necessary to increase it - when primops get added, because you can just use `builtins ? - primOp' to check. */ - mkInt(v, 5); - addConstant("__langVersion", v); - - // Miscellaneous - auto vScopedImport = addPrimOp("scopedImport", 2, prim_scopedImport); - Value* v2 = allocValue(); - mkAttrs(*v2, 0); - mkApp(v, *vScopedImport, *v2); - forceValue(v); - addConstant("import", v); - addPrimOp("__typeOf", 1, prim_typeOf); - addPrimOp("isNull", 1, prim_isNull); - addPrimOp("__isFunction", 1, prim_isFunction); - addPrimOp("__isString", 1, prim_isString); - addPrimOp("__isInt", 1, prim_isInt); - addPrimOp("__isFloat", 1, prim_isFloat); - addPrimOp("__isBool", 1, prim_isBool); - addPrimOp("__isPath", 1, prim_isPath); - addPrimOp("__genericClosure", 1, prim_genericClosure); - addPrimOp("abort", 1, prim_abort); - addPrimOp("__addErrorContext", 2, prim_addErrorContext); - addPrimOp("__tryEval", 1, prim_tryEval); - addPrimOp("__getEnv", 1, prim_getEnv); - - // Strictness - addPrimOp("__seq", 2, prim_seq); - addPrimOp("__deepSeq", 2, prim_deepSeq); - - // Debugging - addPrimOp("__trace", 2, prim_trace); - addPrimOp("__valueSize", 1, prim_valueSize); - - // Paths - addPrimOp("__toPath", 1, prim_toPath); - if (evalSettings.pureEval) { - addPurityError("__storePath"); - } else { - addPrimOp("__storePath", 1, prim_storePath); - } - addPrimOp("__pathExists", 1, prim_pathExists); - addPrimOp("baseNameOf", 1, prim_baseNameOf); - addPrimOp("dirOf", 1, prim_dirOf); - addPrimOp("__readFile", 1, prim_readFile); - addPrimOp("__readDir", 1, prim_readDir); - addPrimOp("__findFile", 2, prim_findFile); - addPrimOp("__hashFile", 2, prim_hashFile); - - // Creating files - addPrimOp("__toXML", 1, prim_toXML); - addPrimOp("__toJSON", 1, prim_toJSON); - addPrimOp("__fromJSON", 1, prim_fromJSON); - addPrimOp("__toFile", 2, prim_toFile); - addPrimOp("__filterSource", 2, prim_filterSource); - addPrimOp("__path", 1, prim_path); - - // Sets - addPrimOp("__attrNames", 1, prim_attrNames); - addPrimOp("__attrValues", 1, prim_attrValues); - addPrimOp("__getAttr", 2, prim_getAttr); - addPrimOp("__unsafeGetAttrPos", 2, prim_unsafeGetAttrPos); - addPrimOp("__hasAttr", 2, prim_hasAttr); - addPrimOp("__isAttrs", 1, prim_isAttrs); - addPrimOp("removeAttrs", 2, prim_removeAttrs); - addPrimOp("__listToAttrs", 1, prim_listToAttrs); - addPrimOp("__intersectAttrs", 2, prim_intersectAttrs); - addPrimOp("__catAttrs", 2, prim_catAttrs); - addPrimOp("__functionArgs", 1, prim_functionArgs); - addPrimOp("__mapAttrs", 2, prim_mapAttrs); - - // Lists - addPrimOp("__isList", 1, prim_isList); - addPrimOp("__elemAt", 2, prim_elemAt); - addPrimOp("__head", 1, prim_head); - addPrimOp("__tail", 1, prim_tail); - addPrimOp("map", 2, prim_map); - addPrimOp("__filter", 2, prim_filter); - addPrimOp("__elem", 2, prim_elem); - addPrimOp("__concatLists", 1, prim_concatLists); - addPrimOp("__length", 1, prim_length); - addPrimOp("__foldl'", 3, prim_foldlStrict); - addPrimOp("__any", 2, prim_any); - addPrimOp("__all", 2, prim_all); - addPrimOp("__genList", 2, prim_genList); - addPrimOp("__sort", 2, prim_sort); - addPrimOp("__partition", 2, prim_partition); - addPrimOp("__concatMap", 2, prim_concatMap); - - // Integer arithmetic - addPrimOp("__add", 2, prim_add); - addPrimOp("__sub", 2, prim_sub); - addPrimOp("__mul", 2, prim_mul); - addPrimOp("__div", 2, prim_div); - addPrimOp("__bitAnd", 2, prim_bitAnd); - addPrimOp("__bitOr", 2, prim_bitOr); - addPrimOp("__bitXor", 2, prim_bitXor); - addPrimOp("__lessThan", 2, prim_lessThan); - - // String manipulation - addPrimOp("toString", 1, prim_toString); - addPrimOp("__substring", 3, prim_substring); - addPrimOp("__stringLength", 1, prim_stringLength); - addPrimOp("__hashString", 2, prim_hashString); - addPrimOp("__match", 2, prim_match); - addPrimOp("__split", 2, prim_split); - addPrimOp("__concatStringsSep", 2, prim_concatStringSep); - addPrimOp("__replaceStrings", 3, prim_replaceStrings); - - // Versions - addPrimOp("__parseDrvName", 1, prim_parseDrvName); - addPrimOp("__compareVersions", 2, prim_compareVersions); - addPrimOp("__splitVersion", 1, prim_splitVersion); - - // Derivations - addPrimOp("derivationStrict", 1, prim_derivationStrict); - addPrimOp("placeholder", 1, prim_placeholder); - - // Networking - addPrimOp("__fetchurl", 1, prim_fetchurl); - addPrimOp("fetchTarball", 1, prim_fetchTarball); - - /* Add a wrapper around the derivation primop that computes the - `drvPath' and `outPath' attributes lazily. */ - std::string path = - canonPath(settings.nixDataDir + "/nix/corepkgs/derivation.nix", true); - sDerivationNix = symbols.Create(path); - evalFile(path, v); - addConstant("derivation", v); - - /* Add a value containing the current Nix expression search path. */ - mkList(v, searchPath.size()); - int n = 0; - for (auto& i : searchPath) { - v2 = (*v.list)[n++] = allocValue(); - mkAttrs(*v2, 2); - mkString(*allocAttr(*v2, symbols.Create("path")), i.second); - mkString(*allocAttr(*v2, symbols.Create("prefix")), i.first); - } - addConstant("__nixPath", v); - - if (RegisterPrimOp::primOps != nullptr) { - for (auto& primOp : *RegisterPrimOp::primOps) { - addPrimOp(std::get<0>(primOp), std::get<1>(primOp), std::get<2>(primOp)); - } - } -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/primops.hh b/third_party/nix/src/libexpr/primops.hh deleted file mode 100644 index ab5f647202..0000000000 --- a/third_party/nix/src/libexpr/primops.hh +++ /dev/null @@ -1,17 +0,0 @@ -#include -#include - -#include "libexpr/eval.hh" - -namespace nix { - -struct RegisterPrimOp { - using PrimOps = std::vector >; - static PrimOps* primOps; - /* You can register a constant by passing an arity of 0. fun - will get called during EvalState initialization, so there - may be primops not yet added and builtins is not yet sorted. */ - RegisterPrimOp(const std::string& name, size_t arity, PrimOpFun fun); -}; - -} // namespace nix diff --git a/third_party/nix/src/libexpr/primops/context.cc b/third_party/nix/src/libexpr/primops/context.cc deleted file mode 100644 index fb8879ead1..0000000000 --- a/third_party/nix/src/libexpr/primops/context.cc +++ /dev/null @@ -1,202 +0,0 @@ -#include "libexpr/eval-inline.hh" -#include "libexpr/primops.hh" -#include "libstore/derivations.hh" - -namespace nix { - -static void prim_unsafeDiscardStringContext(EvalState& state, const Pos& pos, - Value** args, Value& v) { - PathSet context; - std::string s = state.coerceToString(pos, *args[0], context); - mkString(v, s, PathSet()); -} - -static RegisterPrimOp r1("__unsafeDiscardStringContext", 1, - prim_unsafeDiscardStringContext); - -static void prim_hasContext(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - state.forceString(*args[0], context, pos); - mkBool(v, !context.empty()); -} - -static RegisterPrimOp r2("__hasContext", 1, prim_hasContext); - -/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a - builder without causing the derivation to be built (for instance, - in the derivation that builds NARs in nix-push, when doing - source-only deployment). This primop marks the string context so - that builtins.derivation adds the path to drv.inputSrcs rather than - drv.inputDrvs. */ -static void prim_unsafeDiscardOutputDependency(EvalState& state, const Pos& pos, - Value** args, Value& v) { - PathSet context; - std::string s = state.coerceToString(pos, *args[0], context); - - PathSet context2; - for (auto& p : context) { - context2.insert(p.at(0) == '=' ? std::string(p, 1) : p); - } - - mkString(v, s, context2); -} - -static RegisterPrimOp r3("__unsafeDiscardOutputDependency", 1, - prim_unsafeDiscardOutputDependency); - -/* Extract the context of a string as a structured Nix value. - - The context is represented as an attribute set whose keys are the - paths in the context set and whose values are attribute sets with - the following keys: - path: True if the relevant path is in the context as a plain store - path (i.e. the kind of context you get when interpolating - a Nix path (e.g. ./.) into a string). False if missing. - allOutputs: True if the relevant path is a derivation and it is - in the context as a drv file with all of its outputs - (i.e. the kind of context you get when referencing - .drvPath of some derivation). False if missing. - outputs: If a non-empty list, the relevant path is a derivation - and the provided outputs are referenced in the context - (i.e. the kind of context you get when referencing - .outPath of some derivation). Empty list if missing. - Note that for a given path any combination of the above attributes - may be present. -*/ -static void prim_getContext(EvalState& state, const Pos& pos, Value** args, - Value& v) { - struct ContextInfo { - bool path = false; - bool allOutputs = false; - Strings outputs; - }; - PathSet context; - state.forceString(*args[0], context, pos); - auto contextInfos = std::map(); - for (const auto& p : context) { - Path drv; - std::string output; - const Path* path = &p; - if (p.at(0) == '=') { - drv = std::string(p, 1); - path = &drv; - } else if (p.at(0) == '!') { - std::pair ctx = decodeContext(p); - drv = ctx.first; - output = ctx.second; - path = &drv; - } - auto isPath = drv.empty(); - auto isAllOutputs = (!drv.empty()) && output.empty(); - - auto iter = contextInfos.find(*path); - if (iter == contextInfos.end()) { - contextInfos.emplace( - *path, - ContextInfo{isPath, isAllOutputs, - output.empty() ? Strings{} : Strings{std::move(output)}}); - } else { - if (isPath) { - iter->second.path = true; - } else if (isAllOutputs) { - iter->second.allOutputs = true; - } else { - iter->second.outputs.emplace_back(std::move(output)); - } - } - } - - state.mkAttrs(v, contextInfos.size()); - - auto sPath = state.symbols.Create("path"); - auto sAllOutputs = state.symbols.Create("allOutputs"); - for (const auto& info : contextInfos) { - auto& infoVal = *state.allocAttr(v, state.symbols.Create(info.first)); - state.mkAttrs(infoVal, 3); - if (info.second.path) { - mkBool(*state.allocAttr(infoVal, sPath), true); - } - if (info.second.allOutputs) { - mkBool(*state.allocAttr(infoVal, sAllOutputs), true); - } - if (!info.second.outputs.empty()) { - auto& outputsVal = *state.allocAttr(infoVal, state.sOutputs); - state.mkList(outputsVal, info.second.outputs.size()); - size_t i = 0; - for (const auto& output : info.second.outputs) { - mkString(*((*outputsVal.list)[i++] = state.allocValue()), output); - } - } - } -} - -static RegisterPrimOp r4("__getContext", 1, prim_getContext); - -/* Append the given context to a given string. - - See the commentary above unsafeGetContext for details of the - context representation. -*/ -static void prim_appendContext(EvalState& state, const Pos& pos, Value** args, - Value& v) { - PathSet context; - auto orig = state.forceString(*args[0], context, pos); - - state.forceAttrs(*args[1], pos); - - auto sPath = state.symbols.Create("path"); - auto sAllOutputs = state.symbols.Create("allOutputs"); - for (const auto& attr_iter : *args[1]->attrs) { - const Attr* i = &attr_iter.second; // TODO(tazjin): get rid of this - if (!state.store->isStorePath(i->name)) { - throw EvalError("Context key '%s' is not a store path, at %s", i->name, - i->pos); - } - if (!settings.readOnlyMode) { - state.store->ensurePath(i->name); - } - state.forceAttrs(*i->value, *i->pos); - auto iter = i->value->attrs->find(sPath); - if (iter != i->value->attrs->end()) { - if (state.forceBool(*iter->second.value, *iter->second.pos)) { - context.insert(i->name); - } - } - - iter = i->value->attrs->find(sAllOutputs); - if (iter != i->value->attrs->end()) { - if (state.forceBool(*iter->second.value, *iter->second.pos)) { - if (!isDerivation(i->name)) { - throw EvalError( - "Tried to add all-outputs context of %s, which is not a " - "derivation, to a string, at %s", - i->name, i->pos); - } - context.insert("=" + std::string(i->name)); - } - } - - iter = i->value->attrs->find(state.sOutputs); - if (iter != i->value->attrs->end()) { - state.forceList(*iter->second.value, *iter->second.pos); - if (iter->second.value->listSize() && !isDerivation(i->name)) { - throw EvalError( - "Tried to add derivation output context of %s, which is not a " - "derivation, to a string, at %s", - i->name, i->pos); - } - for (unsigned int n = 0; n < iter->second.value->listSize(); ++n) { - auto name = state.forceStringNoCtx(*(*iter->second.value->list)[n], - *iter->second.pos); - context.insert("!" + name + "!" + std::string(i->name)); - } - } - } - - mkString(v, orig, context); -} - -static RegisterPrimOp r5("__appendContext", 2, prim_appendContext); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/primops/fetchGit.cc b/third_party/nix/src/libexpr/primops/fetchGit.cc deleted file mode 100644 index da4d683401..0000000000 --- a/third_party/nix/src/libexpr/primops/fetchGit.cc +++ /dev/null @@ -1,277 +0,0 @@ -#include -#include - -#include -#include -#include -#include -#include - -#include "libexpr/eval-inline.hh" -#include "libexpr/primops.hh" -#include "libstore/download.hh" -#include "libstore/pathlocks.hh" -#include "libstore/store-api.hh" -#include "libutil/hash.hh" - -using namespace std::string_literals; - -namespace nix { - -struct GitInfo { - Path storePath; - std::string rev; - std::string shortRev; - uint64_t revCount = 0; -}; - -std::regex revRegex("^[0-9a-fA-F]{40}$"); - -GitInfo exportGit(ref store, const std::string& uri, - std::optional ref, std::string rev, - const std::string& name) { - if (evalSettings.pureEval && rev == "") { - throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); - } - - if (!ref && rev == "" && absl::StartsWith(uri, "/") && - pathExists(uri + "/.git")) { - bool clean = true; - - try { - runProgram("git", true, - {"-C", uri, "diff-index", "--quiet", "HEAD", "--"}); - } catch (ExecError& e) { - if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) { - throw; - } - clean = false; - } - - if (!clean) { - /* This is an unclean working tree. So copy all tracked - files. */ - - GitInfo gitInfo; - gitInfo.rev = "0000000000000000000000000000000000000000"; - gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); - - std::set files = - absl::StrSplit(runProgram("git", true, {"-C", uri, "ls-files", "-z"}), - absl::ByChar('\0'), absl::SkipEmpty()); - - PathFilter filter = [&](const Path& p) -> bool { - assert(absl::StartsWith(p, uri)); - std::string file(p, uri.size() + 1); - - auto st = lstat(p); - - if (S_ISDIR(st.st_mode)) { - auto prefix = file + "/"; - auto i = files.lower_bound(prefix); - return i != files.end() && absl::StartsWith(*i, prefix); - } - - return files.count(file); - }; - - gitInfo.storePath = - store->addToStore("source", uri, true, htSHA256, filter); - - return gitInfo; - } - - // clean working tree, but no ref or rev specified. Use 'HEAD'. - rev = absl::StripTrailingAsciiWhitespace( - runProgram("git", true, {"-C", uri, "rev-parse", "HEAD"})); - ref = "HEAD"s; - } - - if (!ref) { - ref = "HEAD"s; - } - - if (rev != "" && !std::regex_match(rev, revRegex)) { - throw Error("invalid Git revision '%s'", rev); - } - - deletePath(getCacheDir() + "/nix/git"); - - Path cacheDir = getCacheDir() + "/nix/gitv2/" + - hashString(htSHA256, uri).to_string(Base32, false); - - if (!pathExists(cacheDir)) { - createDirs(dirOf(cacheDir)); - runProgram("git", true, {"init", "--bare", cacheDir}); - } - - Path localRefFile; - if (ref->compare(0, 5, "refs/") == 0) { - localRefFile = cacheDir + "/" + *ref; - } else { - localRefFile = cacheDir + "/refs/heads/" + *ref; - } - - bool doFetch; - time_t now = time(0); - /* If a rev was specified, we need to fetch if it's not in the - repo. */ - if (rev != "") { - try { - runProgram("git", true, {"-C", cacheDir, "cat-file", "-e", rev}); - doFetch = false; - } catch (ExecError& e) { - if (WIFEXITED(e.status)) { - doFetch = true; - } else { - throw; - } - } - } else { - /* If the local ref is older than ‘tarball-ttl’ seconds, do a - git fetch to update the local ref to the remote ref. */ - struct stat st; - doFetch = stat(localRefFile.c_str(), &st) != 0 || - static_cast(st.st_mtime) + settings.tarballTtl <= - static_cast(now); - } - if (doFetch) { - DLOG(INFO) << "fetching Git repository '" << uri << "'"; - - // FIXME: git stderr messes up our progress indicator, so - // we're using --quiet for now. Should process its stderr. - runProgram("git", true, - {"-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, - fmt("%s:%s", *ref, *ref)}); - - struct timeval times[2]; - times[0].tv_sec = now; - times[0].tv_usec = 0; - times[1].tv_sec = now; - times[1].tv_usec = 0; - - utimes(localRefFile.c_str(), times); - } - - // FIXME: check whether rev is an ancestor of ref. - GitInfo gitInfo; - gitInfo.rev = - rev != "" ? rev - : absl::StripTrailingAsciiWhitespace(readFile(localRefFile)); - gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); - - VLOG(2) << "using revision " << gitInfo.rev << " of repo '" << uri << "'"; - - std::string storeLinkName = - hashString(htSHA512, name + std::string("\0"s) + gitInfo.rev) - .to_string(Base32, false); - Path storeLink = cacheDir + "/" + storeLinkName + ".link"; - PathLocks storeLinkLock({storeLink}, fmt("waiting for lock on '%1%'...", - storeLink)); // FIXME: broken - - try { - auto json = nlohmann::json::parse(readFile(storeLink)); - - assert(json["name"] == name && json["rev"] == gitInfo.rev); - - gitInfo.storePath = json["storePath"]; - - if (store->isValidPath(gitInfo.storePath)) { - gitInfo.revCount = json["revCount"]; - return gitInfo; - } - - } catch (SysError& e) { - if (e.errNo != ENOENT) { - throw; - } - } - - // FIXME: should pipe this, or find some better way to extract a - // revision. - auto tar = runProgram("git", true, {"-C", cacheDir, "archive", gitInfo.rev}); - - Path tmpDir = createTempDir(); - AutoDelete delTmpDir(tmpDir, true); - - runProgram("tar", true, {"x", "-C", tmpDir}, tar); - - gitInfo.storePath = store->addToStore(name, tmpDir); - - gitInfo.revCount = std::stoull(runProgram( - "git", true, {"-C", cacheDir, "rev-list", "--count", gitInfo.rev})); - - nlohmann::json json; - json["storePath"] = gitInfo.storePath; - json["uri"] = uri; - json["name"] = name; - json["rev"] = gitInfo.rev; - json["revCount"] = gitInfo.revCount; - - writeFile(storeLink, json.dump()); - - return gitInfo; -} - -static void prim_fetchGit(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string url; - std::optional ref; - std::string rev; - std::string name = "source"; - PathSet context; - - state.forceValue(*args[0]); - - if (args[0]->type == tAttrs) { - state.forceAttrs(*args[0], pos); - - for (auto& attr_iter : *args[0]->attrs) { - auto& attr = attr_iter.second; - std::string n(attr.name); - if (n == "url") { - url = - state.coerceToString(*attr.pos, *attr.value, context, false, false); - } else if (n == "ref") { - ref = state.forceStringNoCtx(*attr.value, *attr.pos); - } else if (n == "rev") { - rev = state.forceStringNoCtx(*attr.value, *attr.pos); - } else if (n == "name") { - name = state.forceStringNoCtx(*attr.value, *attr.pos); - } else { - throw EvalError("unsupported argument '%s' to 'fetchGit', at %s", - attr.name, *attr.pos); - } - } - - if (url.empty()) { - throw EvalError(format("'url' argument required, at %1%") % pos); - } - - } else { - url = state.coerceToString(pos, *args[0], context, false, false); - } - - // FIXME: git externals probably can be used to bypass the URI - // whitelist. Ah well. - state.checkURI(url); - - auto gitInfo = exportGit(state.store, url, ref, rev, name); - - state.mkAttrs(v, 8); - mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, - PathSet({gitInfo.storePath})); - mkString(*state.allocAttr(v, state.symbols.Create("rev")), gitInfo.rev); - mkString(*state.allocAttr(v, state.symbols.Create("shortRev")), - gitInfo.shortRev); - mkInt(*state.allocAttr(v, state.symbols.Create("revCount")), - gitInfo.revCount); - - if (state.allowedPaths) { - state.allowedPaths->insert(state.store->toRealPath(gitInfo.storePath)); - } -} - -static RegisterPrimOp r("fetchGit", 1, prim_fetchGit); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/primops/fetchMercurial.cc b/third_party/nix/src/libexpr/primops/fetchMercurial.cc deleted file mode 100644 index 13dc61766f..0000000000 --- a/third_party/nix/src/libexpr/primops/fetchMercurial.cc +++ /dev/null @@ -1,246 +0,0 @@ -#include -#include - -#include -#include -#include -#include -#include - -#include "libexpr/eval-inline.hh" -#include "libexpr/primops.hh" -#include "libstore/download.hh" -#include "libstore/pathlocks.hh" -#include "libstore/store-api.hh" - -using namespace std::string_literals; - -namespace nix { - -struct HgInfo { - Path storePath; - std::string branch; - std::string rev; - uint64_t revCount = 0; -}; - -std::regex commitHashRegex("^[0-9a-fA-F]{40}$"); - -HgInfo exportMercurial(ref store, const std::string& uri, - std::string rev, const std::string& name) { - if (evalSettings.pureEval && rev == "") { - throw Error( - "in pure evaluation mode, 'fetchMercurial' requires a Mercurial " - "revision"); - } - - if (rev == "" && absl::StartsWith(uri, "/") && pathExists(uri + "/.hg")) { - bool clean = runProgram("hg", true, - {"status", "-R", uri, "--modified", "--added", - "--removed"}) == ""; - - if (!clean) { - /* This is an unclean working tree. So copy all tracked - files. */ - - DLOG(INFO) << "copying unclean Mercurial working tree '" << uri << "'"; - - HgInfo hgInfo; - hgInfo.rev = "0000000000000000000000000000000000000000"; - hgInfo.branch = absl::StripTrailingAsciiWhitespace( - runProgram("hg", true, {"branch", "-R", uri})); - - std::set files = absl::StrSplit( - runProgram("hg", true, - {"status", "-R", uri, "--clean", "--modified", "--added", - "--no-status", "--print0"}), - absl::ByChar('\0'), absl::SkipEmpty()); - - PathFilter filter = [&](const Path& p) -> bool { - assert(absl::StartsWith(p, uri)); - std::string file(p, uri.size() + 1); - - auto st = lstat(p); - - if (S_ISDIR(st.st_mode)) { - auto prefix = file + "/"; - auto i = files.lower_bound(prefix); - return i != files.end() && absl::StartsWith(*i, prefix); - } - - return files.count(file); - }; - - hgInfo.storePath = - store->addToStore("source", uri, true, htSHA256, filter); - - return hgInfo; - } - } - - if (rev == "") { - rev = "default"; - } - - Path cacheDir = fmt("%s/nix/hg/%s", getCacheDir(), - hashString(htSHA256, uri).to_string(Base32, false)); - - Path stampFile = fmt("%s/.hg/%s.stamp", cacheDir, - hashString(htSHA512, rev).to_string(Base32, false)); - - /* If we haven't pulled this repo less than ‘tarball-ttl’ seconds, - do so now. */ - time_t now = time(0); - struct stat st; - if (stat(stampFile.c_str(), &st) != 0 || - static_cast(st.st_mtime) + settings.tarballTtl <= - static_cast(now)) { - /* Except that if this is a commit hash that we already have, - we don't have to pull again. */ - if (!(std::regex_match(rev, commitHashRegex) && pathExists(cacheDir) && - runProgram(RunOptions("hg", {"log", "-R", cacheDir, "-r", rev, - "--template", "1"}) - .killStderr(true)) - .second == "1")) { - DLOG(INFO) << "fetching Mercurial repository '" << uri << "'"; - - if (pathExists(cacheDir)) { - try { - runProgram("hg", true, {"pull", "-R", cacheDir, "--", uri}); - } catch (ExecError& e) { - std::string transJournal = cacheDir + "/.hg/store/journal"; - /* hg throws "abandoned transaction" error only if this file exists */ - if (pathExists(transJournal)) { - runProgram("hg", true, {"recover", "-R", cacheDir}); - runProgram("hg", true, {"pull", "-R", cacheDir, "--", uri}); - } else { - throw ExecError(e.status, - fmt("'hg pull' %s", statusToString(e.status))); - } - } - } else { - createDirs(dirOf(cacheDir)); - runProgram("hg", true, {"clone", "--noupdate", "--", uri, cacheDir}); - } - } - - writeFile(stampFile, ""); - } - - std::vector tokens = - absl::StrSplit(runProgram("hg", true, - {"log", "-R", cacheDir, "-r", rev, "--template", - "{node} {rev} {branch}"}), - absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - assert(tokens.size() == 3); - - HgInfo hgInfo; - hgInfo.rev = tokens[0]; - hgInfo.revCount = std::stoull(tokens[1]); - hgInfo.branch = tokens[2]; - - std::string storeLinkName = - hashString(htSHA512, name + std::string("\0"s) + hgInfo.rev) - .to_string(Base32, false); - Path storeLink = fmt("%s/.hg/%s.link", cacheDir, storeLinkName); - - try { - auto json = nlohmann::json::parse(readFile(storeLink)); - - assert(json["name"] == name && json["rev"] == hgInfo.rev); - - hgInfo.storePath = json["storePath"]; - - if (store->isValidPath(hgInfo.storePath)) { - DLOG(INFO) << "using cached Mercurial store path '" << hgInfo.storePath - << "'"; - return hgInfo; - } - - } catch (SysError& e) { - if (e.errNo != ENOENT) { - throw; - } - } - - Path tmpDir = createTempDir(); - AutoDelete delTmpDir(tmpDir, true); - - runProgram("hg", true, {"archive", "-R", cacheDir, "-r", rev, tmpDir}); - - deletePath(tmpDir + "/.hg_archival.txt"); - - hgInfo.storePath = store->addToStore(name, tmpDir); - - nlohmann::json json; - json["storePath"] = hgInfo.storePath; - json["uri"] = uri; - json["name"] = name; - json["branch"] = hgInfo.branch; - json["rev"] = hgInfo.rev; - json["revCount"] = hgInfo.revCount; - - writeFile(storeLink, json.dump()); - - return hgInfo; -} - -static void prim_fetchMercurial(EvalState& state, const Pos& pos, Value** args, - Value& v) { - std::string url; - std::string rev; - std::string name = "source"; - PathSet context; - - state.forceValue(*args[0]); - - if (args[0]->type == tAttrs) { - state.forceAttrs(*args[0], pos); - - for (auto& attr_iter : *args[0]->attrs) { - auto& attr = attr_iter.second; - std::string n(attr.name); - if (n == "url") { - url = - state.coerceToString(*attr.pos, *attr.value, context, false, false); - } else if (n == "rev") { - rev = state.forceStringNoCtx(*attr.value, *attr.pos); - } else if (n == "name") { - name = state.forceStringNoCtx(*attr.value, *attr.pos); - } else { - throw EvalError("unsupported argument '%s' to 'fetchMercurial', at %s", - attr.name, *attr.pos); - } - } - - if (url.empty()) { - throw EvalError(format("'url' argument required, at %1%") % pos); - } - - } else { - url = state.coerceToString(pos, *args[0], context, false, false); - } - - // FIXME: git externals probably can be used to bypass the URI - // whitelist. Ah well. - state.checkURI(url); - - auto hgInfo = exportMercurial(state.store, url, rev, name); - - state.mkAttrs(v, 8); - mkString(*state.allocAttr(v, state.sOutPath), hgInfo.storePath, - PathSet({hgInfo.storePath})); - mkString(*state.allocAttr(v, state.symbols.Create("branch")), hgInfo.branch); - mkString(*state.allocAttr(v, state.symbols.Create("rev")), hgInfo.rev); - mkString(*state.allocAttr(v, state.symbols.Create("shortRev")), - std::string(hgInfo.rev, 0, 12)); - mkInt(*state.allocAttr(v, state.symbols.Create("revCount")), hgInfo.revCount); - - if (state.allowedPaths) { - state.allowedPaths->insert(state.store->toRealPath(hgInfo.storePath)); - } -} - -static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/primops/fromTOML.cc b/third_party/nix/src/libexpr/primops/fromTOML.cc deleted file mode 100644 index e3d2a49407..0000000000 --- a/third_party/nix/src/libexpr/primops/fromTOML.cc +++ /dev/null @@ -1,94 +0,0 @@ -#include "cpptoml/cpptoml.h" -#include "libexpr/eval-inline.hh" -#include "libexpr/primops.hh" - -namespace nix { - -static void prim_fromTOML(EvalState& state, const Pos& pos, Value** args, - Value& v) { - using namespace cpptoml; - - auto toml = state.forceStringNoCtx(*args[0], pos); - - std::istringstream tomlStream(toml); - - std::function)> visit; - - visit = [&](Value& v, std::shared_ptr t) { - if (auto t2 = t->as_table()) { - size_t size = 0; - for (auto& i : *t2) { - (void)i; - size++; - } - - state.mkAttrs(v, size); - - for (auto& i : *t2) { - auto& v2 = *state.allocAttr(v, state.symbols.Create(i.first)); - - if (auto i2 = i.second->as_table_array()) { - size_t size2 = i2->get().size(); - state.mkList(v2, size2); - for (size_t j = 0; j < size2; ++j) { - visit(*((*v2.list)[j] = state.allocValue()), i2->get()[j]); - } - } else { - visit(v2, i.second); - } - } - } - - else if (auto t2 = t->as_array()) { - size_t size = t2->get().size(); - - state.mkList(v, size); - - for (size_t i = 0; i < size; ++i) { - visit(*((*v.list)[i] = state.allocValue()), t2->get()[i]); - } - } - - // Handle cases like 'a = [[{ a = true }]]', which IMHO should be - // parsed as a array containing an array containing a table, - // but instead are parsed as an array containing a table array - // containing a table. - else if (auto t2 = t->as_table_array()) { - size_t size = t2->get().size(); - - state.mkList(v, size); - - for (size_t j = 0; j < size; ++j) { - visit(*((*v.list)[j] = state.allocValue()), t2->get()[j]); - } - } - - else if (t->is_value()) { - if (auto val = t->as()) { - mkInt(v, val->get()); - } else if (auto val = t->as()) { - mkFloat(v, val->get()); - } else if (auto val = t->as()) { - mkBool(v, val->get()); - } else if (auto val = t->as()) { - mkString(v, val->get()); - } else { - throw EvalError("unsupported value type in TOML"); - } - } - - else { - abort(); - } - }; - - try { - visit(v, parser(tomlStream).parse()); - } catch (std::runtime_error& e) { - throw EvalError("while parsing a TOML string at %s: %s", pos, e.what()); - } -} - -static RegisterPrimOp r("fromTOML", 1, prim_fromTOML); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/symbol-table.cc b/third_party/nix/src/libexpr/symbol-table.cc deleted file mode 100644 index 2b27ca54c2..0000000000 --- a/third_party/nix/src/libexpr/symbol-table.cc +++ /dev/null @@ -1,24 +0,0 @@ -#include "libexpr/symbol-table.hh" - -#include -#include - -namespace nix { - -Symbol SymbolTable::Create(absl::string_view sym) { - auto it = symbols_.emplace(sym); - const std::string* ptr = &(*it.first); - return Symbol(ptr); -} - -size_t SymbolTable::Size() const { return symbols_.size(); } - -size_t SymbolTable::TotalSize() const { - size_t n = 0; - for (auto& i : symbols_) { - n += i.size(); - } - return n; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/symbol-table.hh b/third_party/nix/src/libexpr/symbol-table.hh deleted file mode 100644 index c259965885..0000000000 --- a/third_party/nix/src/libexpr/symbol-table.hh +++ /dev/null @@ -1,69 +0,0 @@ -#pragma once - -#include -#include - -namespace nix { // TODO(tazjin): ::expr - -// TODO(tazjin): Replace with a simpler struct, or get rid of. -class Symbol { - private: - const std::string* s; // pointer into SymbolTable - Symbol(const std::string* s) : s(s){}; - friend class SymbolTable; - - public: - bool operator==(const Symbol& s2) const { return s == s2.s; } - - bool operator!=(const Symbol& s2) const { return s != s2.s; } - - bool operator<(const Symbol& s2) const { return *s < *s2.s; } - - operator const std::string&() const { return *s; } - - bool set() const { return s; } - - bool empty() const { return s->empty(); } - - friend std::ostream& operator<<(std::ostream& str, const Symbol& sym); - - template - friend H AbslHashValue(H h, const Symbol& c) { - return H::combine(std::move(h), c.s); - } -}; - -// SymbolTable is a hash-set based symbol-interning mechanism. -// -// TODO(tazjin): Figure out which things use this. AttrSets, ...? -// Is it possible this only exists because AttrSet wasn't a map? -// -// Original comment: -// -// Symbol table used by the parser and evaluator to represent and look -// up identifiers and attributes efficiently. SymbolTable::create() -// converts a string into a symbol. Symbols have the property that -// they can be compared efficiently (using a pointer equality test), -// because the symbol table stores only one copy of each string. -class SymbolTable { - public: - // Create a new symbol in this table by emplacing the provided - // string into it. - // - // The symbol will reference an existing symbol if the symbol is - // already interned. - Symbol Create(absl::string_view sym); - - // Return the number of symbols interned. - size_t Size() const; - - // Return the total size (in bytes) - size_t TotalSize() const; - - private: - // flat_hash_set does not retain pointer stability on rehashing, - // hence "interned" strings/symbols are stored on the heap. - absl::node_hash_set symbols_; -}; - -} // namespace nix diff --git a/third_party/nix/src/libexpr/value-to-json.cc b/third_party/nix/src/libexpr/value-to-json.cc deleted file mode 100644 index a338d4eed7..0000000000 --- a/third_party/nix/src/libexpr/value-to-json.cc +++ /dev/null @@ -1,91 +0,0 @@ -#include "libexpr/value-to-json.hh" - -#include -#include - -#include "libexpr/eval-inline.hh" -#include "libutil/json.hh" -#include "libutil/util.hh" - -namespace nix { - -void printValueAsJSON(EvalState& state, bool strict, Value& v, - JSONPlaceholder& out, PathSet& context) { - checkInterrupt(); - - if (strict) { - state.forceValue(v); - } - - switch (v.type) { - case tInt: - out.write(v.integer); - break; - - case tBool: - out.write(v.boolean); - break; - - case tString: - copyContext(v, context); - out.write(v.string.s); - break; - - case tPath: - out.write(state.copyPathToStore(context, v.path)); - break; - - case tNull: - out.write(nullptr); - break; - - case tAttrs: { - auto maybeString = - state.tryAttrsToString(noPos, v, context, false, false); - if (maybeString) { - out.write(*maybeString); - break; - } - auto i = v.attrs->find(state.sOutPath); - if (i == v.attrs->end()) { - auto obj(out.object()); - StringSet names; - for (auto& j : *v.attrs) { - names.insert(j.second.name); - } - for (auto& j : names) { - auto [_, a] = *v.attrs->find(state.symbols.Create(j)); - auto placeholder(obj.placeholder(j)); - printValueAsJSON(state, strict, *a.value, placeholder, context); - } - } else { - printValueAsJSON(state, strict, *i->second.value, out, context); - } - break; - } - - case tList: { - auto list(out.list()); - for (unsigned int n = 0; n < v.listSize(); ++n) { - auto placeholder(list.placeholder()); - printValueAsJSON(state, strict, *(*v.list)[n], placeholder, context); - } - break; - } - - case tFloat: - out.write(v.fpoint); - break; - - default: - throw TypeError(format("cannot convert %1% to JSON") % showType(v)); - } -} - -void printValueAsJSON(EvalState& state, bool strict, Value& v, - std::ostream& str, PathSet& context) { - JSONPlaceholder out(str); - printValueAsJSON(state, strict, v, out, context); -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/value-to-json.hh b/third_party/nix/src/libexpr/value-to-json.hh deleted file mode 100644 index 294d776045..0000000000 --- a/third_party/nix/src/libexpr/value-to-json.hh +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include -#include - -#include "libexpr/eval.hh" -#include "libexpr/nixexpr.hh" - -namespace nix { - -class JSONPlaceholder; - -void printValueAsJSON(EvalState& state, bool strict, Value& v, - JSONPlaceholder& out, PathSet& context); - -void printValueAsJSON(EvalState& state, bool strict, Value& v, - std::ostream& str, PathSet& context); - -} // namespace nix diff --git a/third_party/nix/src/libexpr/value-to-xml.cc b/third_party/nix/src/libexpr/value-to-xml.cc deleted file mode 100644 index 921973881f..0000000000 --- a/third_party/nix/src/libexpr/value-to-xml.cc +++ /dev/null @@ -1,184 +0,0 @@ -#include "libexpr/value-to-xml.hh" - -#include - -#include "libexpr/eval-inline.hh" -#include "libutil/util.hh" -#include "libutil/xml-writer.hh" - -namespace nix { - -static XMLAttrs singletonAttrs(const std::string& name, - const std::string& value) { - XMLAttrs attrs; - attrs[name] = value; - return attrs; -} - -static void printValueAsXML(EvalState& state, bool strict, bool location, - Value& v, XMLWriter& doc, PathSet& context, - PathSet& drvsSeen); - -static void posToXML(XMLAttrs& xmlAttrs, const Pos& pos) { - xmlAttrs["path"] = pos.file.value(); - xmlAttrs["line"] = (format("%1%") % pos.line).str(); - xmlAttrs["column"] = (format("%1%") % pos.column).str(); -} - -static void showAttrs(EvalState& state, bool strict, bool location, - Bindings& attrs, XMLWriter& doc, PathSet& context, - PathSet& drvsSeen) { - StringSet names; - - for (auto& i : attrs) { - names.insert(i.second.name); - } - - for (auto& i : names) { - auto& [_, a] = *attrs.find(state.symbols.Create(i)); - - XMLAttrs xmlAttrs; - xmlAttrs["name"] = i; - if (location && a.pos != &noPos) { - posToXML(xmlAttrs, *a.pos); - } - - XMLOpenElement elem(doc, "attr", xmlAttrs); - printValueAsXML(state, strict, location, *a.value, doc, context, drvsSeen); - } -} - -static void printValueAsXML(EvalState& state, bool strict, bool location, - Value& v, XMLWriter& doc, PathSet& context, - PathSet& drvsSeen) { - checkInterrupt(); - - if (strict) { - state.forceValue(v); - } - - switch (v.type) { - case tInt: - doc.writeEmptyElement( - "int", singletonAttrs("value", (format("%1%") % v.integer).str())); - break; - - case tBool: - doc.writeEmptyElement( - "bool", singletonAttrs("value", v.boolean ? "true" : "false")); - break; - - case tString: - /* !!! show the context? */ - copyContext(v, context); - doc.writeEmptyElement("string", singletonAttrs("value", v.string.s)); - break; - - case tPath: - doc.writeEmptyElement("path", singletonAttrs("value", v.path)); - break; - - case tNull: - doc.writeEmptyElement("null"); - break; - - case tAttrs: - if (state.isDerivation(v)) { - XMLAttrs xmlAttrs; - - Bindings::iterator a = - v.attrs->find(state.symbols.Create("derivation")); - - Path drvPath; - a = v.attrs->find(state.sDrvPath); - if (a != v.attrs->end()) { - if (strict) { - state.forceValue(*a->second.value); - } - if (a->second.value->type == tString) { - xmlAttrs["drvPath"] = drvPath = a->second.value->string.s; - } - } - - a = v.attrs->find(state.sOutPath); - if (a != v.attrs->end()) { - if (strict) { - state.forceValue(*a->second.value); - } - if (a->second.value->type == tString) { - xmlAttrs["outPath"] = a->second.value->string.s; - } - } - - XMLOpenElement _(doc, "derivation", xmlAttrs); - - if (!drvPath.empty() && drvsSeen.find(drvPath) == drvsSeen.end()) { - drvsSeen.insert(drvPath); - showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen); - } else { - doc.writeEmptyElement("repeated"); - } - } - - else { - XMLOpenElement _(doc, "attrs"); - showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen); - } - - break; - - case tList: { - XMLOpenElement _(doc, "list"); - for (unsigned int n = 0; n < v.listSize(); ++n) { - printValueAsXML(state, strict, location, *(*v.list)[n], doc, context, - drvsSeen); - } - break; - } - - case tLambda: { - XMLAttrs xmlAttrs; - if (location) { - posToXML(xmlAttrs, v.lambda.fun->pos); - } - XMLOpenElement _(doc, "function", xmlAttrs); - - if (v.lambda.fun->matchAttrs) { - XMLAttrs attrs; - if (!v.lambda.fun->arg.empty()) { - attrs["name"] = v.lambda.fun->arg; - } - if (v.lambda.fun->formals->ellipsis) { - attrs["ellipsis"] = "1"; - } - XMLOpenElement _(doc, "attrspat", attrs); - for (auto& i : v.lambda.fun->formals->formals) { - doc.writeEmptyElement("attr", singletonAttrs("name", i.name)); - } - } else { - doc.writeEmptyElement("varpat", - singletonAttrs("name", v.lambda.fun->arg)); - } - - break; - } - - case tFloat: - doc.writeEmptyElement( - "float", singletonAttrs("value", (format("%1%") % v.fpoint).str())); - break; - - default: - doc.writeEmptyElement("unevaluated"); - } -} - -void printValueAsXML(EvalState& state, bool strict, bool location, Value& v, - std::ostream& out, PathSet& context) { - XMLWriter doc(true, out); - XMLOpenElement root(doc, "expr"); - PathSet drvsSeen; - printValueAsXML(state, strict, location, v, doc, context, drvsSeen); -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/value-to-xml.hh b/third_party/nix/src/libexpr/value-to-xml.hh deleted file mode 100644 index 18c5279236..0000000000 --- a/third_party/nix/src/libexpr/value-to-xml.hh +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include -#include - -#include "libexpr/eval.hh" -#include "libexpr/nixexpr.hh" - -namespace nix { - -void printValueAsXML(EvalState& state, bool strict, bool location, Value& v, - std::ostream& out, PathSet& context); - -} diff --git a/third_party/nix/src/libexpr/value.cc b/third_party/nix/src/libexpr/value.cc deleted file mode 100644 index 93fe187478..0000000000 --- a/third_party/nix/src/libexpr/value.cc +++ /dev/null @@ -1,121 +0,0 @@ -#include "libexpr/value.hh" - -#include - -namespace nix { - -Value::Value(const Value& copy) { *this = copy; } - -Value::Value(Value&& move) { *this = move; } - -Value& Value::operator=(const Value& copy) { - if (type != copy.type) { - memset(this, 0, sizeof(*this)); - } - type = copy.type; - switch (type) { - case tInt: - integer = copy.integer; - break; - case tBool: - boolean = copy.boolean; - break; - case tString: - string = copy.string; - break; - case tPath: - path = copy.path; - break; - case tNull: - /* no fields */ - break; - case tAttrs: - attrs = copy.attrs; - break; - case tList: - list = copy.list; - break; - case tThunk: - thunk = copy.thunk; - break; - case tApp: - app = copy.app; - break; - case tLambda: - lambda = copy.lambda; - break; - case tBlackhole: - /* no fields */ - break; - case tPrimOp: - primOp = copy.primOp; - break; - case tPrimOpApp: - primOpApp = copy.primOpApp; - break; - case _reserved1: - LOG(FATAL) << "attempted to assign a tExternal value"; - break; - case tFloat: - fpoint = copy.fpoint; - break; - } - return *this; -} - -Value& Value::operator=(Value&& move) { - if (type != move.type) { - memset(this, 0, sizeof(*this)); - } - type = move.type; - switch (type) { - case tInt: - integer = move.integer; - break; - case tBool: - boolean = move.boolean; - break; - case tString: - string = move.string; - break; - case tPath: - path = move.path; - break; - case tNull: - /* no fields */ - break; - case tAttrs: - attrs = move.attrs; - break; - case tList: - list = move.list; - break; - case tThunk: - thunk = move.thunk; - break; - case tApp: - app = move.app; - break; - case tLambda: - lambda = move.lambda; - break; - case tBlackhole: - /* no fields */ - break; - case tPrimOp: - primOp = move.primOp; - break; - case tPrimOpApp: - primOpApp = move.primOpApp; - break; - case _reserved1: - LOG(FATAL) << "attempted to assign a tExternal value"; - break; - case tFloat: - fpoint = move.fpoint; - break; - } - return *this; -} - -} // namespace nix diff --git a/third_party/nix/src/libexpr/value.hh b/third_party/nix/src/libexpr/value.hh deleted file mode 100644 index 82021c77c4..0000000000 --- a/third_party/nix/src/libexpr/value.hh +++ /dev/null @@ -1,191 +0,0 @@ -#pragma once - -#include -#include - -#include "libexpr/symbol-table.hh" -#include "libutil/types.hh" - -namespace nix { - -using ValueType = enum { - tInt = 1, - tBool, - tString, - tPath, - tNull, - tAttrs, - tList, - tThunk, - tApp, - tLambda, - tBlackhole, - tPrimOp, - tPrimOpApp, - _reserved1, // formerly tExternal - tFloat -}; - -class Bindings; -struct Env; -struct Expr; -struct ExprLambda; -struct PrimOp; -struct PrimOp; -class Symbol; - -typedef int64_t NixInt; -typedef double NixFloat; - -// Forward declaration of Value is required because the following -// types are mutually recursive. -// -// TODO(tazjin): Really, these types need some serious refactoring. -struct Value; - -/* Strings in the evaluator carry a so-called `context' which - is a list of strings representing store paths. This is to - allow users to write things like - - "--with-freetype2-library=" + freetype + "/lib" - - where `freetype' is a derivation (or a source to be copied - to the store). If we just concatenated the strings without - keeping track of the referenced store paths, then if the - string is used as a derivation attribute, the derivation - will not have the correct dependencies in its inputDrvs and - inputSrcs. - - The semantics of the context is as follows: when a string - with context C is used as a derivation attribute, then the - derivations in C will be added to the inputDrvs of the - derivation, and the other store paths in C will be added to - the inputSrcs of the derivations. - - For canonicity, the store paths should be in sorted order. */ -struct NixString { - const char* s; - const char** context; // must be in sorted order -}; - -struct NixThunk { - Env* env; - Expr* expr; -}; - -struct NixApp { - Value *left, *right; -}; - -struct NixLambda { - Env* env; - ExprLambda* fun; -}; - -struct NixPrimOpApp { - Value *left, *right; -}; - -using NixList = std::vector; - -struct Value { - ValueType type; - union { // TODO(tazjin): std::variant - NixInt integer; - bool boolean; - NixString string; - const char* path; - std::shared_ptr attrs; - std::shared_ptr list; - NixThunk thunk; - NixApp app; // TODO(tazjin): "app"? - NixLambda lambda; - std::shared_ptr primOp; - NixPrimOpApp primOpApp; - NixFloat fpoint; - }; - - Value() : type(tInt), attrs(nullptr) { - static_assert(offsetof(Value, attrs) + sizeof(attrs) == sizeof(Value)); - } - - Value(const Value& copy); - Value(Value&& move); - ~Value() {} - Value& operator=(const Value& copy); - Value& operator=(Value&& move); - - bool isList() const { return type == tList; } - - size_t listSize() const { return list->size(); } -}; - -/* After overwriting an app node, be sure to clear pointers in the - Value to ensure that the target isn't kept alive unnecessarily. */ -static inline void clearValue(Value& v) { v.app.left = v.app.right = 0; } - -static inline void mkInt(Value& v, NixInt n) { - clearValue(v); - v.type = tInt; - v.integer = n; -} - -static inline void mkFloat(Value& v, NixFloat n) { - clearValue(v); - v.type = tFloat; - v.fpoint = n; -} - -static inline void mkBool(Value& v, bool b) { - clearValue(v); - v.type = tBool; - v.boolean = b; -} - -static inline void mkNull(Value& v) { - clearValue(v); - v.type = tNull; -} - -static inline void mkApp(Value& v, Value& left, Value& right) { - v.type = tApp; - v.app.left = &left; - v.app.right = &right; -} - -static inline void mkPrimOpApp(Value& v, Value& left, Value& right) { - v.type = tPrimOpApp; - v.app.left = &left; - v.app.right = &right; -} - -static inline void mkStringNoCopy(Value& v, const char* s) { - v.type = tString; - v.string.s = s; - v.string.context = 0; -} - -static inline void mkString(Value& v, const Symbol& s) { - mkStringNoCopy(v, ((const std::string&)s).c_str()); -} - -void mkString(Value& v, const char* s); - -static inline void mkPathNoCopy(Value& v, const char* s) { - clearValue(v); - v.type = tPath; - v.path = s; -} - -void mkPath(Value& v, const char* s); - -/* Compute the size in bytes of the given value, including all values - and environments reachable from it. Static expressions (Exprs) are - not included. */ -size_t valueSize(const Value& v); - -using ValueMap = std::map; - -std::shared_ptr allocRootValue(Value* v); - -} // namespace nix diff --git a/third_party/nix/src/libmain/CMakeLists.txt b/third_party/nix/src/libmain/CMakeLists.txt deleted file mode 100644 index a95128c131..0000000000 --- a/third_party/nix/src/libmain/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -# -*- mode: cmake; -*- -add_library(nixmain SHARED) -set_property(TARGET nixmain PROPERTY CXX_STANDARD 17) -include_directories(${PROJECT_BINARY_DIR}) # for config.h -target_include_directories(nixmain PUBLIC "${nix_SOURCE_DIR}/src") - -set(HEADER_FILES - common-args.hh - shared.hh -) - -target_sources(nixmain - PUBLIC - ${HEADER_FILES} - PRIVATE - common-args.cc - shared.cc - stack.cc -) - -target_link_libraries(nixmain - nixstore - nixutil - - absl::strings - glog -) - -configure_file("nix-main.pc.in" "${PROJECT_BINARY_DIR}/nix-main.pc" @ONLY) -INSTALL(FILES "${PROJECT_BINARY_DIR}/nix-main.pc" DESTINATION "${PKGCONFIG_INSTALL_DIR}") - -INSTALL(FILES ${HEADER_FILES} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/nix/libmain) -INSTALL(TARGETS nixmain DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/third_party/nix/src/libmain/common-args.cc b/third_party/nix/src/libmain/common-args.cc deleted file mode 100644 index 729e026f19..0000000000 --- a/third_party/nix/src/libmain/common-args.cc +++ /dev/null @@ -1,56 +0,0 @@ -#include "libmain/common-args.hh" - -#include - -#include "libstore/globals.hh" - -namespace nix { - -MixCommonArgs::MixCommonArgs(const std::string& programName) - : programName(programName) { - mkFlag() - .longName("verbose") - .shortName('v') - .description("increase verbosity level") - .handler([]() { - FLAGS_stderrthreshold = google::GLOG_INFO; - FLAGS_v += 1; - }); - - mkFlag() - .longName("quiet") - .description("silence all log output") - .handler([]() { FLAGS_stderrthreshold = google::GLOG_FATAL; }); - - mkFlag() - .longName("option") - .labels({"name", "value"}) - .description("set a Nix configuration option (overriding nix.conf)") - .arity(2) - .handler([](std::vector ss) { - try { - globalConfig.set(ss[0], ss[1]); - } catch (UsageError& e) { - LOG(WARNING) << e.what(); - } - }); - - mkFlag() - .longName("max-jobs") - .shortName('j') - .label("jobs") - .description("maximum number of parallel builds") - .handler([=](const std::string& s) { settings.set("max-jobs", s); }); - - std::string cat = "config"; - globalConfig.convertToArgs(*this, cat); - - // Backward compatibility hack: nix-env already had a --system flag. - if (programName == "nix-env") { - longFlags.erase("system"); - } - - hiddenCategories.insert(cat); -} - -} // namespace nix diff --git a/third_party/nix/src/libmain/common-args.hh b/third_party/nix/src/libmain/common-args.hh deleted file mode 100644 index f1c7c84813..0000000000 --- a/third_party/nix/src/libmain/common-args.hh +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include "libutil/args.hh" - -namespace nix { - -struct MixCommonArgs : virtual Args { - std::string programName; - MixCommonArgs(const std::string& programName); -}; - -struct MixDryRun : virtual Args { - bool dryRun = false; - - MixDryRun() { - mkFlag(0, "dry-run", "show what this command would do without doing it", - &dryRun); - } -}; - -struct MixJSON : virtual Args { - bool json = false; - - MixJSON() { mkFlag(0, "json", "produce JSON output", &json); } -}; - -} // namespace nix diff --git a/third_party/nix/src/libmain/nix-main.pc.in b/third_party/nix/src/libmain/nix-main.pc.in deleted file mode 100644 index 9876a3d1b7..0000000000 --- a/third_party/nix/src/libmain/nix-main.pc.in +++ /dev/null @@ -1,9 +0,0 @@ -prefix=@CMAKE_INSTALL_PREFIX@ -libdir=@CMAKE_INSTALL_FULL_LIBDIR@ -includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ - -Name: Nix -Description: Nix Package Manager -Version: @PACKAGE_VERSION@ -Libs: -L${libdir} -lnixmain -Cflags: -I${includedir}/nix diff --git a/third_party/nix/src/libmain/shared.cc b/third_party/nix/src/libmain/shared.cc deleted file mode 100644 index 331ea6b3a9..0000000000 --- a/third_party/nix/src/libmain/shared.cc +++ /dev/null @@ -1,386 +0,0 @@ -#include "libmain/shared.hh" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/util.hh" - -namespace nix { - -static bool gcWarning = true; - -void printGCWarning() { - if (!gcWarning) { - return; - } - - static bool haveWarned = false; - if (!haveWarned) { - haveWarned = true; - LOG(WARNING) << "you did not specify '--add-root'; " - << "the result might be removed by the garbage collector"; - } -} - -void printMissing(const ref& store, const PathSet& paths) { - unsigned long long downloadSize; - unsigned long long narSize; - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - store->queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, - narSize); - printMissing(store, willBuild, willSubstitute, unknown, downloadSize, - narSize); -} - -void printMissing(const ref& store, const PathSet& willBuild, - const PathSet& willSubstitute, const PathSet& unknown, - unsigned long long downloadSize, unsigned long long narSize) { - if (!willBuild.empty()) { - LOG(INFO) << "these derivations will be built:"; - Paths sorted = store->topoSortPaths(willBuild); - reverse(sorted.begin(), sorted.end()); - for (auto& i : sorted) { - LOG(INFO) << " " << i; - } - } - - if (!willSubstitute.empty()) { - LOG(INFO) << "these paths will be fetched (" - << (downloadSize / (1024.0 * 1024.0)) << " MiB download, " - << (narSize / (1024.0 * 1024.0)) << "MiB unpacked):"; - - for (auto& i : willSubstitute) { - LOG(INFO) << i; - } - } - - if (!unknown.empty()) { - LOG(INFO) << "don't know how to build these paths" - << (settings.readOnlyMode - ? " (may be caused by read-only store access)" - : "") - << ":"; - - for (auto& i : unknown) { - LOG(INFO) << i; - } - } -} - -std::string getArg(const std::string& opt, Strings::iterator& i, - const Strings::iterator& end) { - ++i; - if (i == end) { - throw UsageError(format("'%1%' requires an argument") % opt); - } - - return *i; -} - -#if OPENSSL_VERSION_NUMBER < 0x10101000L -/* OpenSSL is not thread-safe by default - it will randomly crash - unless the user supplies a mutex locking function. So let's do - that. */ -static std::vector opensslLocks; - -static void opensslLockCallback(int mode, int type, const char* file, - int line) { - if (mode & CRYPTO_LOCK) - opensslLocks[type].lock(); - else - opensslLocks[type].unlock(); -} -#endif - -static void sigHandler(int signo) {} - -void initNix() { - /* Turn on buffering for cerr. */ -#if HAVE_PUBSETBUF - static char buf[1024]; - std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); -#endif - -#if OPENSSL_VERSION_NUMBER < 0x10101000L - /* Initialise OpenSSL locking. */ - opensslLocks = std::vector(CRYPTO_num_locks()); - CRYPTO_set_locking_callback(opensslLockCallback); -#endif - - loadConfFile(); - - startSignalHandlerThread(); - - /* Reset SIGCHLD to its default. */ - struct sigaction act; - sigemptyset(&act.sa_mask); - act.sa_handler = SIG_DFL; - act.sa_flags = 0; - if (sigaction(SIGCHLD, &act, nullptr) != 0) { - throw SysError("resetting SIGCHLD"); - } - - /* Install a dummy SIGUSR1 handler for use with pthread_kill(). */ - act.sa_handler = sigHandler; - if (sigaction(SIGUSR1, &act, nullptr) != 0) { - throw SysError("handling SIGUSR1"); - } - - /* Register a SIGSEGV handler to detect stack overflows. */ - detectStackOverflow(); - - /* There is no privacy in the Nix system ;-) At least not for - now. In particular, store objects should be readable by - everybody. */ - umask(0022); - - /* Initialise the PRNG. */ - struct timeval tv; - gettimeofday(&tv, nullptr); - srandom(tv.tv_usec); -} - -LegacyArgs::LegacyArgs( - const std::string& programName, - std::function - parseArg) - : MixCommonArgs(programName), parseArg(std::move(parseArg)) { - mkFlag() - .longName("no-build-output") - .shortName('Q') - .description("do not show build output") - .set(&settings.verboseBuild, false); - - mkFlag() - .longName("keep-failed") - .shortName('K') - .description("keep temporary directories of failed builds") - .set(&(bool&)settings.keepFailed, true); - - mkFlag() - .longName("keep-going") - .shortName('k') - .description("keep going after a build fails") - .set(&(bool&)settings.keepGoing, true); - - mkFlag() - .longName("fallback") - .description("build from source if substitution fails") - .set(&(bool&)settings.tryFallback, true); - - auto intSettingAlias = [&](char shortName, const std::string& longName, - const std::string& description, - const std::string& dest) { - mkFlag(shortName, longName, description, [=](unsigned int n) { - settings.set(dest, std::to_string(n)); - }); - }; - - intSettingAlias(0, "cores", - "maximum number of CPU cores to use inside a build", "cores"); - intSettingAlias(0, "max-silent-time", - "number of seconds of silence before a build is killed", - "max-silent-time"); - intSettingAlias(0, "timeout", "number of seconds before a build is killed", - "timeout"); - - mkFlag(0, "readonly-mode", "do not write to the Nix store", - &settings.readOnlyMode); - - mkFlag(0, "no-gc-warning", "disable warning about not using '--add-root'", - &gcWarning, false); - - mkFlag() - .longName("store") - .label("store-uri") - .description("URI of the Nix store to use") - .dest(&(std::string&)settings.storeUri); -} - -bool LegacyArgs::processFlag(Strings::iterator& pos, Strings::iterator end) { - if (MixCommonArgs::processFlag(pos, end)) { - return true; - } - bool res = parseArg(pos, end); - if (res) { - ++pos; - } - return res; -} - -bool LegacyArgs::processArgs(const Strings& args, bool finish) { - if (args.empty()) { - return true; - } - assert(args.size() == 1); - Strings ss(args); - auto pos = ss.begin(); - if (!parseArg(pos, ss.end())) { - throw UsageError(format("unexpected argument '%1%'") % args.front()); - } - return true; -} - -void parseCmdLine( - int argc, char** argv, - std::function - parseArg) { - parseCmdLine(baseNameOf(argv[0]), argvToStrings(argc, argv), - std::move(parseArg)); -} - -void parseCmdLine( - const std::string& programName, const Strings& args, - std::function - parseArg) { - LegacyArgs(programName, std::move(parseArg)).parseCmdline(args); -} - -void printVersion(const std::string& programName) { - std::cout << format("%1% (Tvix) %2%") % programName % nixVersion << std::endl; - - // TODO(tazjin): figure out what the fuck this is - /*if (verbosity > lvlInfo) { - Strings cfg; -#if HAVE_BOEHMGC - cfg.push_back("gc"); -#endif -#if HAVE_SODIUM - cfg.push_back("signed-caches"); -#endif - std::cout << "Features: " << concatStringsSep(", ", cfg) << "\n"; - std::cout << "Configuration file: " << settings.nixConfDir + "/nix.conf" - << "\n"; - std::cout << "Store directory: " << settings.nixStore << "\n"; - std::cout << "State directory: " << settings.nixStateDir << "\n"; - } */ - throw Exit(); -} - -void showManPage(const std::string& name) { - restoreSignals(); - setenv("MANPATH", settings.nixManDir.c_str(), 1); - execlp("man", "man", name.c_str(), nullptr); - throw SysError(format("command 'man %1%' failed") % name.c_str()); -} - -int handleExceptions(const std::string& programName, - const std::function& fun) { - ReceiveInterrupts receiveInterrupts; // FIXME: need better place for this - - std::string error = ANSI_RED "error:" ANSI_NORMAL " "; - try { - try { - fun(); - } catch (...) { - /* Subtle: we have to make sure that any `interrupted' - condition is discharged before we reach printMsg() - below, since otherwise it will throw an (uncaught) - exception. */ - setInterruptThrown(); - throw; - } - } catch (Exit& e) { - return e.status; - } catch (UsageError& e) { - LOG(INFO) << e.what(); - LOG(INFO) << "Try '" << programName << " --help' for more information."; - return 1; - } catch (BaseError& e) { - LOG(ERROR) << error << (settings.showTrace ? e.prefix() : "") << e.msg(); - if (!e.prefix().empty() && !settings.showTrace) { - LOG(INFO) << "(use '--show-trace' to show detailed location information)"; - } - return static_cast(e.status); - } catch (std::bad_alloc& e) { - LOG(ERROR) << error << "failed to allocate: " << e.what(); - return 1; - } catch (std::exception& e) { - LOG(ERROR) << error << e.what(); - return 1; - } - - return 0; -} - -RunPager::RunPager() { - if (isatty(STDOUT_FILENO) == 0) { - return; - } - char* pager = getenv("NIX_PAGER"); - if (pager == nullptr) { - pager = getenv("PAGER"); - } - if (pager && (std::string(pager) == "" || std::string(pager) == "cat")) { - return; - } - - Pipe toPager; - toPager.create(); - - pid = startProcess([&]() { - if (dup2(toPager.readSide.get(), STDIN_FILENO) == -1) { - throw SysError("dupping stdin"); - } - if (getenv("LESS") == nullptr) { - setenv("LESS", "FRSXMK", 1); - } - restoreSignals(); - if (pager != nullptr) { - execl("/bin/sh", "sh", "-c", pager, nullptr); - } - execlp("pager", "pager", nullptr); - execlp("less", "less", nullptr); - execlp("more", "more", nullptr); - throw SysError(format("executing '%1%'") % pager); - }); - - pid.setKillSignal(SIGINT); - - if (dup2(toPager.writeSide.get(), STDOUT_FILENO) == -1) { - throw SysError("dupping stdout"); - } -} - -RunPager::~RunPager() { - try { - if (pid != Pid(-1)) { - std::cout.flush(); - close(STDOUT_FILENO); - pid.wait(); - } - } catch (...) { - ignoreException(); - } -} - -std::string showBytes(unsigned long long bytes) { - return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str(); -} - -PrintFreed::~PrintFreed() { - if (show) { - std::cout << format("%1% store paths deleted, %2% freed\n") % - results.paths.size() % showBytes(results.bytesFreed); - } -} - -Exit::~Exit() = default; - -} // namespace nix diff --git a/third_party/nix/src/libmain/shared.hh b/third_party/nix/src/libmain/shared.hh deleted file mode 100644 index d1061d5e04..0000000000 --- a/third_party/nix/src/libmain/shared.hh +++ /dev/null @@ -1,134 +0,0 @@ -#pragma once - -#include - -#include -#include - -#include "libmain/common-args.hh" -#include "libutil/args.hh" -#include "libutil/util.hh" - -namespace nix { - -class Exit : public std::exception { - public: - int status; - Exit() : status(0) {} - Exit(int status) : status(status) {} - virtual ~Exit(); -}; - -int handleExceptions(const std::string& programName, - const std::function& fun); - -void initNix(); - -void parseCmdLine( - int argc, char** argv, - std::function - parseArg); - -void parseCmdLine( - const std::string& programName, const Strings& args, - std::function - parseArg); - -void printVersion(const std::string& programName); - -/* Ugh. No better place to put this. */ -void printGCWarning(); - -class Store; - -void printMissing(const ref& store, const PathSet& paths); - -void printMissing(const ref& store, const PathSet& willBuild, - const PathSet& willSubstitute, const PathSet& unknown, - unsigned long long downloadSize, unsigned long long narSize); - -std::string getArg(const std::string& opt, Strings::iterator& i, - const Strings::iterator& end); - -template -N getIntArg(const std::string& opt, Strings::iterator& i, - const Strings::iterator& end, bool allowUnit) { - ++i; - if (i == end) { - throw UsageError(format("'%1%' requires an argument") % opt); - } - std::string s = *i; - N multiplier = 1; - if (allowUnit && !s.empty()) { - char u = std::toupper(*s.rbegin()); - if (std::isalpha(u)) { - if (u == 'K') { - multiplier = 1ULL << 10; - } else if (u == 'M') { - multiplier = 1ULL << 20; - } else if (u == 'G') { - multiplier = 1ULL << 30; - } else if (u == 'T') { - multiplier = 1ULL << 40; - } else { - throw UsageError(format("invalid unit specifier '%1%'") % u); - } - - s.resize(s.size() - 1); - } - } - N n; - if (!absl::SimpleAtoi(s, &n)) { - throw UsageError(format("'%1%' requires an integer argument") % opt); - } - return n * multiplier; -} - -struct LegacyArgs : public MixCommonArgs { - std::function - parseArg; - - LegacyArgs( - const std::string& programName, - std::function - parseArg); - - bool processFlag(Strings::iterator& pos, Strings::iterator end) override; - - bool processArgs(const Strings& args, bool finish) override; -}; - -/* Show the manual page for the specified program. */ -void showManPage(const std::string& name); - -/* The constructor of this class starts a pager if stdout is a - terminal and $PAGER is set. Stdout is redirected to the pager. */ -class RunPager { - public: - RunPager(); - ~RunPager(); - - private: - Pid pid; -}; - -extern volatile ::sig_atomic_t blockInt; - -/* GC helpers. */ - -std::string showBytes(unsigned long long bytes); - -struct GCResults; - -struct PrintFreed { - bool show; - const GCResults& results; - PrintFreed(bool show, const GCResults& results) - : show(show), results(results) {} - ~PrintFreed(); -}; - -/* Install a SIGSEGV handler to detect stack overflows. */ -void detectStackOverflow(); - -} // namespace nix diff --git a/third_party/nix/src/libmain/stack.cc b/third_party/nix/src/libmain/stack.cc deleted file mode 100644 index 628b6313a8..0000000000 --- a/third_party/nix/src/libmain/stack.cc +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include -#include -#include - -#include - -#include "libutil/types.hh" - -namespace nix { - -static void sigsegvHandler(int signo, siginfo_t* info, void* ctx) { - /* Detect stack overflows by comparing the faulting address with - the stack pointer. Unfortunately, getting the stack pointer is - not portable. */ - bool haveSP = true; - char* sp = nullptr; -#if defined(__x86_64__) && defined(REG_RSP) - sp = (char*)(static_cast(ctx))->uc_mcontext.gregs[REG_RSP]; -#elif defined(REG_ESP) - sp = (char*)((ucontext_t*)ctx)->uc_mcontext.gregs[REG_ESP]; -#else - haveSP = false; -#endif - - if (haveSP) { - ptrdiff_t diff = static_cast(info->si_addr) - sp; - if (diff < 0) { - diff = -diff; - } - if (diff < 4096) { - char msg[] = "error: stack overflow (possible infinite recursion)\n"; - [[gnu::unused]] auto res = write(2, msg, strlen(msg)); - _exit(1); // maybe abort instead? - } - } - - /* Restore default behaviour (i.e. segfault and dump core). */ - struct sigaction act; - sigfillset(&act.sa_mask); - act.sa_handler = SIG_DFL; - act.sa_flags = 0; - if (sigaction(SIGSEGV, &act, nullptr) != 0) { - abort(); - } -} - -void detectStackOverflow() { -#if defined(SA_SIGINFO) && defined(SA_ONSTACK) - /* Install a SIGSEGV handler to detect stack overflows. This - requires an alternative stack, otherwise the signal cannot be - delivered when we're out of stack space. */ - stack_t stack; - stack.ss_size = 4096 * 4 + MINSIGSTKSZ; - static auto stackBuf = std::make_unique>(stack.ss_size); - stack.ss_sp = stackBuf->data(); - if (stack.ss_sp == nullptr) { - throw Error("cannot allocate alternative stack"); - } - stack.ss_flags = 0; - if (sigaltstack(&stack, nullptr) == -1) { - throw SysError("cannot set alternative stack"); - } - - struct sigaction act; - sigfillset(&act.sa_mask); - act.sa_sigaction = sigsegvHandler; - act.sa_flags = SA_SIGINFO | SA_ONSTACK; - if (sigaction(SIGSEGV, &act, nullptr) != 0) { - throw SysError("resetting SIGSEGV"); - } -#endif -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/CMakeLists.txt b/third_party/nix/src/libstore/CMakeLists.txt deleted file mode 100644 index 246377cc9b..0000000000 --- a/third_party/nix/src/libstore/CMakeLists.txt +++ /dev/null @@ -1,127 +0,0 @@ -# -*- mode: cmake; -*- -add_library(nixstore SHARED) -add_library(nixstoremock SHARED) -set_property(TARGET nixstore PROPERTY CXX_STANDARD 17) -set_property(TARGET nixstoremock PROPERTY CXX_STANDARD 17) -include_directories(${PROJECT_BINARY_DIR}) # for config.h -target_include_directories(nixstore PUBLIC "${nix_SOURCE_DIR}/src") -target_include_directories(nixstoremock PUBLIC "${nix_SOURCE_DIR}/src") - -# The database schema is stored in schema.sql, but needs to be -# available during the build as static data. -# -# These commands create an includeable source-file out of it. -file(READ "schema.sql" NIX_SCHEMA) - -string(CONFIGURE - "#pragma once - namespace nix { - constexpr char kNixSqlSchema[] = R\"(${NIX_SCHEMA})\"; - }" - NIX_SCHEMA_GEN) - -file(WRITE ${PROJECT_BINARY_DIR}/generated/schema.sql.hh "${NIX_SCHEMA_GEN}") - -set(HEADER_FILES - binary-cache-store.hh - builtins.hh - crypto.hh - derivations.hh - download.hh - fs-accessor.hh - globals.hh - local-store.hh - machines.hh - nar-accessor.hh - nar-info-disk-cache.hh - nar-info.hh - parsed-derivations.hh - pathlocks.hh - profiles.hh - references.hh - remote-fs-accessor.hh - remote-store.hh - rpc-store.hh - s3-binary-cache-store.hh - s3.hh - serve-protocol.hh - sqlite.hh - ssh.hh - store-api.hh - worker-protocol.hh -) - -target_sources(nixstore - PUBLIC - ${HEADER_FILES} - - PRIVATE - ${PROJECT_BINARY_DIR}/generated/schema.sql.hh - binary-cache-store.cc - build.cc - crypto.cc - derivations.cc - download.cc - export-import.cc - gc.cc - globals.cc - http-binary-cache-store.cc - legacy-ssh-store.cc - local-binary-cache-store.cc - local-fs-store.cc - local-store.cc - machines.cc - misc.cc - nar-accessor.cc - nar-info.cc - nar-info-disk-cache.cc - optimise-store.cc - parsed-derivations.cc - pathlocks.cc - profiles.cc - references.cc - remote-fs-accessor.cc - remote-store.cc - rpc-store.cc - s3-binary-cache-store.cc - sqlite.cc - ssh.cc - ssh-store.cc - store-api.cc - builtins/buildenv.cc - builtins/fetchurl.cc -) - -target_link_libraries(nixstore - nixproto - nixutil - - CURL::libcurl - SQLite::SQLite3 - absl::strings - glog - seccomp - sodium -) - -target_sources(nixstoremock - PUBLIC - mock-binary-cache-store.hh - - PRIVATE - mock-binary-cache-store.cc -) - -target_link_libraries(nixstoremock - nixstore - - absl::btree - absl::flat_hash_map - glog -) - -configure_file("nix-store.pc.in" "${PROJECT_BINARY_DIR}/nix-store.pc" @ONLY) -INSTALL(FILES "${PROJECT_BINARY_DIR}/nix-store.pc" DESTINATION "${PKGCONFIG_INSTALL_DIR}") - -INSTALL(FILES ${HEADER_FILES} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/nix/libstore) -INSTALL(TARGETS nixstore nixstoremock DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/third_party/nix/src/libstore/binary-cache-store.cc b/third_party/nix/src/libstore/binary-cache-store.cc deleted file mode 100644 index 0b04e972da..0000000000 --- a/third_party/nix/src/libstore/binary-cache-store.cc +++ /dev/null @@ -1,396 +0,0 @@ -#include "libstore/binary-cache-store.hh" - -#include -#include -#include - -#include -#include -#include -#include - -#include "libstore/derivations.hh" -#include "libstore/fs-accessor.hh" -#include "libstore/globals.hh" -#include "libstore/nar-accessor.hh" -#include "libstore/nar-info-disk-cache.hh" -#include "libstore/nar-info.hh" -#include "libstore/remote-fs-accessor.hh" -#include "libutil/archive.hh" -#include "libutil/compression.hh" -#include "libutil/json.hh" -#include "libutil/sync.hh" - -namespace nix { - -BinaryCacheStore::BinaryCacheStore(const Params& params) : Store(params) { - if (secretKeyFile != "") { - const std::string& secret_key_file = secretKeyFile; - secretKey = std::make_unique(readFile(secret_key_file)); - } - - StringSink sink; - sink << std::string(kNarVersionMagic1); - narMagic = *sink.s; -} - -void BinaryCacheStore::init() { - std::string cacheInfoFile = "nix-cache-info"; - - auto cacheInfo = getFile(cacheInfoFile); - if (!cacheInfo) { - upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", - "text/x-nix-cache-info"); - } else { - for (auto& line : - absl::StrSplit(*cacheInfo, absl::ByChar('\n'), absl::SkipEmpty())) { - size_t colon = line.find(':'); - if (colon == std::string::npos) { - continue; - } - auto name = line.substr(0, colon); - auto value = - absl::StripAsciiWhitespace(line.substr(colon + 1, std::string::npos)); - if (name == "StoreDir") { - if (value != storeDir) { - throw Error(format("binary cache '%s' is for Nix stores with prefix " - "'%s', not '%s'") % - getUri() % value % storeDir); - } - } else if (name == "WantMassQuery") { - wantMassQuery_ = value == "1"; - } else if (name == "Priority") { - if (!absl::SimpleAtoi(value, &priority)) { - LOG(WARNING) << "Invalid 'Priority' value: " << value; - } - } - } - } -} - -void BinaryCacheStore::getFile( - const std::string& path, - Callback> callback) noexcept { - try { - callback(getFile(path)); - } catch (...) { - callback.rethrow(); - } -} - -void BinaryCacheStore::getFile(const std::string& path, Sink& sink) { - std::promise> promise; - getFile(path, Callback>{ - [&](std::future> result) { - try { - promise.set_value(result.get()); - } catch (...) { - promise.set_exception(std::current_exception()); - } - }}); - auto data = promise.get_future().get(); - sink(reinterpret_cast(data->data()), data->size()); -} - -std::shared_ptr BinaryCacheStore::getFile( - const std::string& path) { - StringSink sink; - try { - getFile(path, sink); - } catch (NoSuchBinaryCacheFile&) { - return nullptr; - } - return sink.s; -} - -Path BinaryCacheStore::narInfoFileFor(const Path& storePath) { - assertStorePath(storePath); - return storePathToHash(storePath) + ".narinfo"; -} - -void BinaryCacheStore::writeNarInfo(const ref& narInfo) { - auto narInfoFile = narInfoFileFor(narInfo->path); - - upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo"); - - auto hashPart = storePathToHash(narInfo->path); - - { - auto state_(state.lock()); - state_->pathInfoCache.upsert(hashPart, std::shared_ptr(narInfo)); - } - - if (diskCache) { - diskCache->upsertNarInfo(getUri(), hashPart, - std::shared_ptr(narInfo)); - } -} - -void BinaryCacheStore::addToStore(const ValidPathInfo& info, - const ref& nar, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) { - if ((repair == 0u) && isValidPath(info.path)) { - return; - } - - /* Verify that all references are valid. This may do some .narinfo - reads, but typically they'll already be cached. */ - for (auto& ref : info.references) { - try { - if (ref != info.path) { - queryPathInfo(ref); - } - } catch (InvalidPath&) { - throw Error(format("cannot add '%s' to the binary cache because the " - "reference '%s' is not valid") % - info.path % ref); - } - } - - assert(nar->compare(0, narMagic.size(), narMagic) == 0); - - auto narInfo = make_ref(info); - - narInfo->narSize = nar->size(); - narInfo->narHash = hashString(htSHA256, *nar); - - if (info.narHash && info.narHash != narInfo->narHash) { - throw Error( - format("refusing to copy corrupted path '%1%' to binary cache") % - info.path); - } - - auto accessor_ = std::dynamic_pointer_cast(accessor); - - /* Optionally write a JSON file containing a listing of the - contents of the NAR. */ - if (writeNARListing) { - std::ostringstream jsonOut; - - { - JSONObject jsonRoot(jsonOut); - jsonRoot.attr("version", 1); - - auto narAccessor = makeNarAccessor(nar); - - if (accessor_) { - accessor_->addToCache(info.path, *nar, narAccessor); - } - - { - auto res = jsonRoot.placeholder("root"); - listNar(res, narAccessor, "", true); - } - } - - upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), - "application/json"); - } - - else { - if (accessor_) { - accessor_->addToCache(info.path, *nar, makeNarAccessor(nar)); - } - } - - /* Compress the NAR. */ - narInfo->compression = compression; - auto now1 = std::chrono::steady_clock::now(); - auto narCompressed = compress(compression, *nar, parallelCompression); - auto now2 = std::chrono::steady_clock::now(); - narInfo->fileHash = hashString(htSHA256, *narCompressed); - narInfo->fileSize = narCompressed->size(); - - auto duration = - std::chrono::duration_cast(now2 - now1) - .count(); - DLOG(INFO) << "copying path '" << narInfo->path << "' (" << narInfo->narSize - << " bytes, compressed " - << ((1.0 - - static_cast(narCompressed->size()) / nar->size()) * - 100.0) - << "% in " << duration << "ms) to binary cache"; - - /* Atomically write the NAR file. */ - narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar" + - (compression == "xz" ? ".xz" - : compression == "bzip2" ? ".bz2" - : compression == "br" ? ".br" - : ""); - if ((repair != 0u) || !fileExists(narInfo->url)) { - stats.narWrite++; - upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar"); - } else { - stats.narWriteAverted++; - } - - stats.narWriteBytes += nar->size(); - stats.narWriteCompressedBytes += narCompressed->size(); - stats.narWriteCompressionTimeMs += duration; - - /* Atomically write the NAR info file.*/ - if (secretKey) { - narInfo->sign(*secretKey); - } - - writeNarInfo(narInfo); - - stats.narInfoWrite++; -} - -bool BinaryCacheStore::isValidPathUncached(const Path& storePath) { - // FIXME: this only checks whether a .narinfo with a matching hash - // part exists. So ‘f4kb...-foo’ matches ‘f4kb...-bar’, even - // though they shouldn't. Not easily fixed. - return fileExists(narInfoFileFor(storePath)); -} - -void BinaryCacheStore::narFromPath(const Path& storePath, Sink& sink) { - auto info = queryPathInfo(storePath).cast(); - - uint64_t narSize = 0; - - LambdaSink wrapperSink([&](const unsigned char* data, size_t len) { - sink(data, len); - narSize += len; - }); - - auto decompressor = makeDecompressionSink(info->compression, wrapperSink); - - try { - getFile(info->url, *decompressor); - } catch (NoSuchBinaryCacheFile& e) { - throw SubstituteGone(e.what()); - } - - decompressor->finish(); - - stats.narRead++; - // stats.narReadCompressedBytes += nar->size(); // FIXME - stats.narReadBytes += narSize; -} - -void BinaryCacheStore::queryPathInfoUncached( - const Path& storePath, - Callback> callback) noexcept { - auto uri = getUri(); - LOG(INFO) << "querying info about '" << storePath << "' on '" << uri << "'"; - - auto narInfoFile = narInfoFileFor(storePath); - - auto callbackPtr = std::make_shared(std::move(callback)); - - getFile(narInfoFile, - Callback>( - [=](std::future> fut) { - try { - auto data = fut.get(); - - if (!data) { - return (*callbackPtr)(nullptr); - } - - stats.narInfoRead++; - - (*callbackPtr)(std::shared_ptr( - std::make_shared(*this, *data, narInfoFile))); - - } catch (...) { - callbackPtr->rethrow(); - } - })); -} - -Path BinaryCacheStore::addToStore(const std::string& name, const Path& srcPath, - bool recursive, HashType hashAlgo, - PathFilter& filter, RepairFlag repair) { - // FIXME: some cut&paste from LocalStore::addToStore(). - - /* Read the whole path into memory. This is not a very scalable - method for very large paths, but `copyPath' is mainly used for - small files. */ - StringSink sink; - Hash h; - if (recursive) { - dumpPath(srcPath, sink, filter); - h = hashString(hashAlgo, *sink.s); - } else { - auto s = readFile(srcPath); - dumpString(s, sink); - h = hashString(hashAlgo, s); - } - - ValidPathInfo info; - info.path = makeFixedOutputPath(recursive, h, name); - - addToStore(info, sink.s, repair, CheckSigs, nullptr); - - return info.path; -} - -Path BinaryCacheStore::addTextToStore(const std::string& name, - const std::string& s, - const PathSet& references, - RepairFlag repair) { - ValidPathInfo info; - info.path = computeStorePathForText(name, s, references); - info.references = references; - - if ((repair != 0u) || !isValidPath(info.path)) { - StringSink sink; - dumpString(s, sink); - addToStore(info, sink.s, repair, CheckSigs, nullptr); - } - - return info.path; -} - -ref BinaryCacheStore::getFSAccessor() { - return make_ref(ref(shared_from_this()), - localNarCache); -} - -void BinaryCacheStore::addSignatures(const Path& storePath, - const StringSet& sigs) { - /* Note: this is inherently racy since there is no locking on - binary caches. In particular, with S3 this unreliable, even - when addSignatures() is called sequentially on a path, because - S3 might return an outdated cached version. */ - - auto narInfo = make_ref((NarInfo&)*queryPathInfo(storePath)); - - narInfo->sigs.insert(sigs.begin(), sigs.end()); - - auto narInfoFile = narInfoFileFor(narInfo->path); - - writeNarInfo(narInfo); -} - -std::shared_ptr BinaryCacheStore::getBuildLog(const Path& path) { - Path drvPath; - - if (isDerivation(path)) { - drvPath = path; - } else { - try { - auto info = queryPathInfo(path); - // FIXME: add a "Log" field to .narinfo - if (info->deriver.empty()) { - return nullptr; - } - drvPath = info->deriver; - } catch (InvalidPath&) { - return nullptr; - } - } - - auto logPath = "log/" + baseNameOf(drvPath); - - DLOG(INFO) << "fetching build log from binary cache '" << getUri() << "/" - << logPath << "'"; - - return getFile(logPath); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/binary-cache-store.hh b/third_party/nix/src/libstore/binary-cache-store.hh deleted file mode 100644 index 40c636f60a..0000000000 --- a/third_party/nix/src/libstore/binary-cache-store.hh +++ /dev/null @@ -1,115 +0,0 @@ -#pragma once - -#include - -#include "libstore/crypto.hh" -#include "libstore/store-api.hh" -#include "libutil/pool.hh" - -namespace nix { - -struct NarInfo; - -class BinaryCacheStore : public Store { - public: - const Setting compression{ - this, "xz", "compression", - "NAR compression method ('xz', 'bzip2', or 'none')"}; - const Setting writeNARListing{ - this, false, "write-nar-listing", - "whether to write a JSON file listing the files in each NAR"}; - const Setting secretKeyFile{ - this, "", "secret-key", - "path to secret key used to sign the binary cache"}; - const Setting localNarCache{this, "", "local-nar-cache", - "path to a local cache of NARs"}; - const Setting parallelCompression{ - this, false, "parallel-compression", - "enable multi-threading compression, available for xz only currently"}; - - private: - std::unique_ptr secretKey; - - protected: - BinaryCacheStore(const Params& params); - - public: - virtual bool fileExists(const std::string& path) = 0; - - virtual void upsertFile(const std::string& path, const std::string& data, - const std::string& mimeType) = 0; - - /* Note: subclasses must implement at least one of the two - following getFile() methods. */ - - /* Dump the contents of the specified file to a sink. */ - virtual void getFile(const std::string& path, Sink& sink); - - /* Fetch the specified file and call the specified callback with - the result. A subclass may implement this asynchronously. */ - virtual void getFile( - const std::string& path, - Callback> callback) noexcept; - - std::shared_ptr getFile(const std::string& path); - - protected: - bool wantMassQuery_ = false; - int priority = 50; - - public: - virtual void init(); - - private: - std::string narMagic; - - std::string narInfoFileFor(const Path& storePath); - - void writeNarInfo(const ref& narInfo); - - public: - bool isValidPathUncached(const Path& path) override; - - void queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept override; - - Path queryPathFromHashPart(const std::string& hashPart) override { - unsupported("queryPathFromHashPart"); - } - - bool wantMassQuery() override { return wantMassQuery_; } - - void addToStore(const ValidPathInfo& info, const ref& nar, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) override; - - Path addToStore(const std::string& name, const Path& srcPath, bool recursive, - HashType hashAlgo, PathFilter& filter, - RepairFlag repair) override; - - Path addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, RepairFlag repair) override; - - void narFromPath(const Path& path, Sink& sink) override; - - BuildResult buildDerivation(std::ostream& /*log_sink*/, const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) override { - unsupported("buildDerivation"); - } - - void ensurePath(const Path& path) override { unsupported("ensurePath"); } - - ref getFSAccessor() override; - - void addSignatures(const Path& storePath, const StringSet& sigs) override; - - std::shared_ptr getBuildLog(const Path& path) override; - - int getPriority() override { return priority; } -}; - -MakeError(NoSuchBinaryCacheFile, Error); - -} // namespace nix diff --git a/third_party/nix/src/libstore/build.cc b/third_party/nix/src/libstore/build.cc deleted file mode 100644 index 1f5752a168..0000000000 --- a/third_party/nix/src/libstore/build.cc +++ /dev/null @@ -1,4820 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libstore/builtins.hh" -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libstore/machines.hh" -#include "libstore/nar-info.hh" -#include "libstore/parsed-derivations.hh" -#include "libstore/pathlocks.hh" -#include "libstore/references.hh" -#include "libstore/store-api.hh" -#include "libutil/affinity.hh" -#include "libutil/archive.hh" -#include "libutil/compression.hh" -#include "libutil/finally.hh" -#include "libutil/json.hh" -#include "libutil/util.hh" - -/* Includes required for chroot support. */ -#if __linux__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#if HAVE_SECCOMP -#include -#endif -#define pivot_root(new_root, put_old) \ - (syscall(SYS_pivot_root, new_root, put_old)) -#endif - -#if HAVE_STATVFS -#include -#endif - -#include -#include - -namespace nix { - -constexpr std::string_view kPathNullDevice = "/dev/null"; - -/* Forward definition. */ -class Worker; -struct HookInstance; - -/* A pointer to a goal. */ -class Goal; -class DerivationGoal; -using GoalPtr = std::shared_ptr; -using WeakGoalPtr = std::weak_ptr; - -struct CompareGoalPtrs { - bool operator()(const GoalPtr& a, const GoalPtr& b) const; -}; - -/* Set of goals. */ -using Goals = std::set; -using WeakGoals = std::list; - -/* A map of paths to goals (and the other way around). */ -using WeakGoalMap = std::map; - -class Goal : public std::enable_shared_from_this { - public: - using ExitCode = enum { - ecBusy, - ecSuccess, - ecFailed, - ecNoSubstituters, - ecIncompleteClosure - }; - - protected: - /* Backlink to the worker. */ - Worker& worker; - - /* Goals that this goal is waiting for. */ - Goals waitees; - - /* Goals waiting for this one to finish. Must use weak pointers - here to prevent cycles. */ - WeakGoals waiters; - - /* Number of goals we are/were waiting for that have failed. */ - unsigned int nrFailed; - - /* Number of substitution goals we are/were waiting for that - failed because there are no substituters. */ - unsigned int nrNoSubstituters; - - /* Number of substitution goals we are/were waiting for that - failed because othey had unsubstitutable references. */ - unsigned int nrIncompleteClosure; - - /* Name of this goal for debugging purposes. */ - std::string name; - - /* Whether the goal is finished. */ - ExitCode exitCode; - - // Output stream for build logs. - // TODO(tazjin): Rename all build_log instances to log_sink. - std::ostream& log_sink() const; - - explicit Goal(Worker& worker) : worker(worker) { - nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; - exitCode = ecBusy; - } - - virtual ~Goal() { trace("goal destroyed"); } - - public: - virtual void work() = 0; - - void addWaitee(const GoalPtr& waitee); - - virtual void waiteeDone(GoalPtr waitee, ExitCode result); - - virtual void handleChildOutput(int fd, const std::string& data) { abort(); } - - virtual void handleEOF(int fd) { abort(); } - - void trace(const FormatOrString& fs); - - std::string getName() { return name; } - - ExitCode getExitCode() { return exitCode; } - - /* Callback in case of a timeout. It should wake up its waiters, - get rid of any running child processes that are being monitored - by the worker (important!), etc. */ - virtual void timedOut() = 0; - - virtual std::string key() = 0; - - protected: - virtual void amDone(ExitCode result); -}; - -bool CompareGoalPtrs::operator()(const GoalPtr& a, const GoalPtr& b) const { - std::string s1 = a->key(); - std::string s2 = b->key(); - return s1 < s2; -} - -using steady_time_point = std::chrono::time_point; - -/* A mapping used to remember for each child process to what goal it - belongs, and file descriptors for receiving log data and output - path creation commands. */ -struct Child { - WeakGoalPtr goal; - Goal* goal2; // ugly hackery - std::set fds; - bool respectTimeouts; - bool inBuildSlot; - steady_time_point lastOutput; /* time we last got output on stdout/stderr */ - steady_time_point timeStarted; -}; - -/* The worker class. */ -class Worker { - private: - /* Note: the worker should only have strong pointers to the - top-level goals. */ - - /* The top-level goals of the worker. */ - Goals topGoals; - - /* Goals that are ready to do some work. */ - WeakGoals awake; - - /* Goals waiting for a build slot. */ - WeakGoals wantingToBuild; - - /* Child processes currently running. */ - std::list children; - - /* Number of build slots occupied. This includes local builds and - substitutions but not remote builds via the build hook. */ - unsigned int nrLocalBuilds; - - /* Maps used to prevent multiple instantiations of a goal for the - same derivation / path. */ - WeakGoalMap derivationGoals; - WeakGoalMap substitutionGoals; - - /* Goals waiting for busy paths to be unlocked. */ - WeakGoals waitingForAnyGoal; - - /* Goals sleeping for a few seconds (polling a lock). */ - WeakGoals waitingForAWhile; - - /* Last time the goals in `waitingForAWhile' where woken up. */ - steady_time_point lastWokenUp; - - /* Cache for pathContentsGood(). */ - std::map pathContentsGoodCache; - - std::ostream& log_sink_; - - public: - /* Set if at least one derivation had a BuildError (i.e. permanent - failure). */ - bool permanentFailure; - - /* Set if at least one derivation had a timeout. */ - bool timedOut; - - /* Set if at least one derivation fails with a hash mismatch. */ - bool hashMismatch; - - /* Set if at least one derivation is not deterministic in check mode. */ - bool checkMismatch; - - LocalStore& store; - - std::unique_ptr hook; - - uint64_t expectedBuilds = 0; - uint64_t doneBuilds = 0; - uint64_t failedBuilds = 0; - uint64_t runningBuilds = 0; - - uint64_t expectedSubstitutions = 0; - uint64_t doneSubstitutions = 0; - uint64_t failedSubstitutions = 0; - uint64_t runningSubstitutions = 0; - uint64_t expectedDownloadSize = 0; - uint64_t doneDownloadSize = 0; - uint64_t expectedNarSize = 0; - uint64_t doneNarSize = 0; - - /* Whether to ask the build hook if it can build a derivation. If - it answers with "decline-permanently", we don't try again. */ - bool tryBuildHook = true; - - Worker(LocalStore& store, std::ostream& log_sink); - ~Worker(); - - /* Make a goal (with caching). */ - GoalPtr makeDerivationGoal(const Path& drvPath, - const StringSet& wantedOutputs, - BuildMode buildMode); - - std::shared_ptr makeBasicDerivationGoal( - const Path& drvPath, const BasicDerivation& drv, BuildMode buildMode); - - GoalPtr makeSubstitutionGoal(const Path& storePath, - RepairFlag repair = NoRepair); - - /* Remove a dead goal. */ - void removeGoal(const GoalPtr& goal); - - /* Wake up a goal (i.e., there is something for it to do). */ - void wakeUp(const GoalPtr& goal); - - /* Return the number of local build and substitution processes - currently running (but not remote builds via the build - hook). */ - unsigned int getNrLocalBuilds(); - - /* Registers a running child process. `inBuildSlot' means that - the process counts towards the jobs limit. */ - void childStarted(const GoalPtr& goal, const std::set& fds, - bool inBuildSlot, bool respectTimeouts); - - /* Unregisters a running child process. `wakeSleepers' should be - false if there is no sense in waking up goals that are sleeping - because they can't run yet (e.g., there is no free build slot, - or the hook would still say `postpone'). */ - void childTerminated(Goal* goal, bool wakeSleepers = true); - - /* Put `goal' to sleep until a build slot becomes available (which - might be right away). */ - void waitForBuildSlot(const GoalPtr& goal); - - /* Wait for any goal to finish. Pretty indiscriminate way to - wait for some resource that some other goal is holding. */ - void waitForAnyGoal(GoalPtr goal); - - /* Wait for a few seconds and then retry this goal. Used when - waiting for a lock held by another process. This kind of - polling is inefficient, but POSIX doesn't really provide a way - to wait for multiple locks in the main select() loop. */ - void waitForAWhile(GoalPtr goal); - - /* Loop until the specified top-level goals have finished. */ - void run(const Goals& topGoals); - - /* Wait for input to become available. */ - void waitForInput(); - - unsigned int exitStatus(); - - /* Check whether the given valid path exists and has the right - contents. */ - bool pathContentsGood(const Path& path); - - void markContentsGood(const Path& path); - - std::ostream& log_sink() const { return log_sink_; }; -}; - -////////////////////////////////////////////////////////////////////// - -void addToWeakGoals(WeakGoals& goals, const GoalPtr& p) { - // FIXME: necessary? - // FIXME: O(n) - for (auto& i : goals) { - if (i.lock() == p) { - return; - } - } - goals.push_back(p); -} - -std::ostream& Goal::log_sink() const { return worker.log_sink(); } - -void Goal::addWaitee(const GoalPtr& waitee) { - waitees.insert(waitee); - addToWeakGoals(waitee->waiters, shared_from_this()); -} - -void Goal::waiteeDone(GoalPtr waitee, ExitCode result) { - assert(waitees.find(waitee) != waitees.end()); - waitees.erase(waitee); - - trace(format("waitee '%1%' done; %2% left") % waitee->name % waitees.size()); - - if (result == ecFailed || result == ecNoSubstituters || - result == ecIncompleteClosure) { - ++nrFailed; - } - - if (result == ecNoSubstituters) { - ++nrNoSubstituters; - } - - if (result == ecIncompleteClosure) { - ++nrIncompleteClosure; - } - - if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) { - /* If we failed and keepGoing is not set, we remove all - remaining waitees. */ - for (auto& goal : waitees) { - WeakGoals waiters2; - for (auto& j : goal->waiters) { - if (j.lock() != shared_from_this()) { - waiters2.push_back(j); - } - } - goal->waiters = waiters2; - } - waitees.clear(); - - worker.wakeUp(shared_from_this()); - } -} - -void Goal::amDone(ExitCode result) { - trace("done"); - assert(exitCode == ecBusy); - assert(result == ecSuccess || result == ecFailed || - result == ecNoSubstituters || result == ecIncompleteClosure); - exitCode = result; - for (auto& i : waiters) { - GoalPtr goal = i.lock(); - if (goal) { - goal->waiteeDone(shared_from_this(), result); - } - } - waiters.clear(); - worker.removeGoal(shared_from_this()); -} - -void Goal::trace(const FormatOrString& fs) { - DLOG(INFO) << name << ": " << fs.s; -} - -////////////////////////////////////////////////////////////////////// - -/* Common initialisation performed in child processes. */ -static void commonChildInit(Pipe& logPipe) { - restoreSignals(); - - /* Put the child in a separate session (and thus a separate - process group) so that it has no controlling terminal (meaning - that e.g. ssh cannot open /dev/tty) and it doesn't receive - terminal signals. */ - if (setsid() == -1) { - throw SysError(format("creating a new session")); - } - - /* Dup the write side of the logger pipe into stderr. */ - if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1) { - throw SysError("cannot pipe standard error into log file"); - } - - /* Dup stderr to stdout. */ - if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) { - throw SysError("cannot dup stderr into stdout"); - } - - /* Reroute stdin to /dev/null. */ - int fdDevNull = open(kPathNullDevice.begin(), O_RDWR); - if (fdDevNull == -1) { - throw SysError(format("cannot open '%1%'") % kPathNullDevice); - } - if (dup2(fdDevNull, STDIN_FILENO) == -1) { - throw SysError("cannot dup null device into stdin"); - } - close(fdDevNull); -} - -void handleDiffHook(uid_t uid, uid_t gid, Path tryA, Path tryB, Path drvPath, - Path tmpDir, std::ostream& log_sink) { - auto diffHook = settings.diffHook; - if (diffHook != "" && settings.runDiffHook) { - try { - RunOptions diffHookOptions( - diffHook, {std::move(tryA), std::move(tryB), std::move(drvPath), - std::move(tmpDir)}); - diffHookOptions.searchPath = true; - diffHookOptions.uid = uid; - diffHookOptions.gid = gid; - diffHookOptions.chdir = "/"; - - auto diffRes = runProgram(diffHookOptions); - if (!statusOk(diffRes.first)) { - throw ExecError(diffRes.first, - fmt("diff-hook program '%1%' %2%", diffHook, - statusToString(diffRes.first))); - } - - if (!diffRes.second.empty()) { - log_sink << absl::StripTrailingAsciiWhitespace(diffRes.second); - } - } catch (Error& error) { - log_sink << "diff hook execution failed: " << error.what(); - } - } -} - -////////////////////////////////////////////////////////////////////// - -class UserLock { - private: - /* POSIX locks suck. If we have a lock on a file, and we open and - close that file again (without closing the original file - descriptor), we lose the lock. So we have to be *very* careful - not to open a lock file on which we are holding a lock. */ - static Sync lockedPaths_; - - Path fnUserLock; - AutoCloseFD fdUserLock; - - std::string user; - uid_t uid; - gid_t gid; - std::vector supplementaryGIDs; - - public: - UserLock(); - ~UserLock(); - - void kill(); - - std::string getUser() { return user; } - uid_t getUID() { - assert(uid); - return uid; - } - uid_t getGID() { - assert(gid); - return gid; - } - std::vector getSupplementaryGIDs() { return supplementaryGIDs; } - - bool enabled() { return uid != 0; } -}; - -Sync UserLock::lockedPaths_; - -UserLock::UserLock() { - assert(settings.buildUsersGroup != ""); - - /* Get the members of the build-users-group. */ - struct group* gr = getgrnam(settings.buildUsersGroup.get().c_str()); - if (gr == nullptr) { - throw Error( - format( - "the group '%1%' specified in 'build-users-group' does not exist") % - settings.buildUsersGroup); - } - gid = gr->gr_gid; - - /* Copy the result of getgrnam. */ - Strings users; - for (char** p = gr->gr_mem; *p != nullptr; ++p) { - DLOG(INFO) << "found build user " << *p; - users.push_back(*p); - } - - if (users.empty()) { - throw Error(format("the build users group '%1%' has no members") % - settings.buildUsersGroup); - } - - /* Find a user account that isn't currently in use for another - build. */ - for (auto& i : users) { - DLOG(INFO) << "trying user " << i; - - struct passwd* pw = getpwnam(i.c_str()); - if (pw == nullptr) { - throw Error(format("the user '%1%' in the group '%2%' does not exist") % - i % settings.buildUsersGroup); - } - - createDirs(settings.nixStateDir + "/userpool"); - - fnUserLock = - (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str(); - - { - auto lockedPaths(lockedPaths_.lock()); - if (lockedPaths->count(fnUserLock) != 0u) { - /* We already have a lock on this one. */ - continue; - } - lockedPaths->insert(fnUserLock); - } - - try { - AutoCloseFD fd( - open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600)); - if (!fd) { - throw SysError(format("opening user lock '%1%'") % fnUserLock); - } - - if (lockFile(fd.get(), ltWrite, false)) { - fdUserLock = std::move(fd); - user = i; - uid = pw->pw_uid; - - /* Sanity check... */ - if (uid == getuid() || uid == geteuid()) { - throw Error(format("the Nix user should not be a member of '%1%'") % - settings.buildUsersGroup); - } - -#if __linux__ - /* Get the list of supplementary groups of this build user. This - is usually either empty or contains a group such as "kvm". */ - supplementaryGIDs.resize(10); - int ngroups = supplementaryGIDs.size(); - int err = getgrouplist(pw->pw_name, pw->pw_gid, - supplementaryGIDs.data(), &ngroups); - if (err == -1) { - throw Error( - format("failed to get list of supplementary groups for '%1%'") % - pw->pw_name); - } - - supplementaryGIDs.resize(ngroups); -#endif - - return; - } - - } catch (...) { - lockedPaths_.lock()->erase(fnUserLock); - } - } - - throw Error(format("all build users are currently in use; " - "consider creating additional users and adding them to " - "the '%1%' group") % - settings.buildUsersGroup); -} - -UserLock::~UserLock() { - auto lockedPaths(lockedPaths_.lock()); - assert(lockedPaths->count(fnUserLock)); - lockedPaths->erase(fnUserLock); -} - -void UserLock::kill() { killUser(uid); } - -////////////////////////////////////////////////////////////////////// - -struct HookInstance { - /* Pipes for talking to the build hook. */ - Pipe toHook; - - /* Pipe for the hook's standard output/error. */ - Pipe fromHook; - - /* Pipe for the builder's standard output/error. */ - Pipe builderOut; - - /* The process ID of the hook. */ - Pid pid; - - FdSink sink; - - HookInstance(); - - ~HookInstance(); -}; - -HookInstance::HookInstance() { - DLOG(INFO) << "starting build hook " << settings.buildHook; - - /* Create a pipe to get the output of the child. */ - fromHook.create(); - - /* Create the communication pipes. */ - toHook.create(); - - /* Create a pipe to get the output of the builder. */ - builderOut.create(); - - /* Fork the hook. */ - pid = startProcess([&]() { - commonChildInit(fromHook); - - if (chdir("/") == -1) { - throw SysError("changing into /"); - } - - /* Dup the communication pipes. */ - if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1) { - throw SysError("dupping to-hook read side"); - } - - /* Use fd 4 for the builder's stdout/stderr. */ - if (dup2(builderOut.writeSide.get(), 4) == -1) { - throw SysError("dupping builder's stdout/stderr"); - } - - /* Hack: pass the read side of that fd to allow build-remote - to read SSH error messages. */ - if (dup2(builderOut.readSide.get(), 5) == -1) { - throw SysError("dupping builder's stdout/stderr"); - } - - Strings args = { - baseNameOf(settings.buildHook), - // std::to_string(verbosity), // TODO(tazjin): what? - }; - - execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data()); - - throw SysError("executing '%s'", settings.buildHook); - }); - - pid.setSeparatePG(true); - fromHook.writeSide = AutoCloseFD(-1); - toHook.readSide = AutoCloseFD(-1); - - sink = FdSink(toHook.writeSide.get()); - std::map settings; - globalConfig.getSettings(settings); - for (auto& setting : settings) { - sink << 1 << setting.first << setting.second.value; - } - sink << 0; -} - -HookInstance::~HookInstance() { - try { - toHook.writeSide = AutoCloseFD(-1); - if (pid != Pid(-1)) { - pid.kill(); - } - } catch (...) { - ignoreException(); - } -} - -////////////////////////////////////////////////////////////////////// - -using StringRewrites = std::map; - -std::string rewriteStrings(std::string s, const StringRewrites& rewrites) { - for (auto& i : rewrites) { - size_t j = 0; - while ((j = s.find(i.first, j)) != std::string::npos) { - s.replace(j, i.first.size(), i.second); - } - } - return s; -} - -////////////////////////////////////////////////////////////////////// - -using HookReply = enum { rpAccept, rpDecline, rpPostpone }; - -class SubstitutionGoal; - -class DerivationGoal : public Goal { - private: - /* Whether to use an on-disk .drv file. */ - bool useDerivation; - - /* The path of the derivation. */ - Path drvPath; - - /* The specific outputs that we need to build. Empty means all of - them. */ - StringSet wantedOutputs; - - /* Whether additional wanted outputs have been added. */ - bool needRestart = false; - - /* Whether to retry substituting the outputs after building the - inputs. */ - bool retrySubstitution; - - /* The derivation stored at drvPath. */ - std::unique_ptr drv; - - std::unique_ptr parsedDrv; - - /* The remainder is state held during the build. */ - - /* Locks on the output paths. */ - PathLocks outputLocks; - - /* All input paths (that is, the union of FS closures of the - immediate input paths). */ - PathSet inputPaths; - - /* Referenceable paths (i.e., input and output paths). */ - PathSet allPaths; - - /* Outputs that are already valid. If we're repairing, these are - the outputs that are valid *and* not corrupt. */ - PathSet validPaths; - - /* Outputs that are corrupt or not valid. */ - PathSet missingPaths; - - /* User selected for running the builder. */ - std::unique_ptr buildUser; - - /* The process ID of the builder. */ - Pid pid; - - /* The temporary directory. */ - Path tmpDir; - - /* The path of the temporary directory in the sandbox. */ - Path tmpDirInSandbox; - - /* File descriptor for the log file. */ - AutoCloseFD fdLogFile; - std::shared_ptr logFileSink, logSink; - - /* Number of bytes received from the builder's stdout/stderr. */ - unsigned long logSize; - - /* The most recent log lines. */ - std::list logTail; - - std::string currentLogLine; - size_t currentLogLinePos = 0; // to handle carriage return - - std::string currentHookLine; - - /* Pipe for the builder's standard output/error. */ - Pipe builderOut; - - /* Pipe for synchronising updates to the builder user namespace. */ - Pipe userNamespaceSync; - - /* The build hook. */ - std::unique_ptr hook; - - /* Whether we're currently doing a chroot build. */ - bool useChroot = false; - - Path chrootRootDir; - - /* RAII object to delete the chroot directory. */ - std::shared_ptr autoDelChroot; - - /* Whether this is a fixed-output derivation. */ - bool fixedOutput; - - /* Whether to run the build in a private network namespace. */ - bool privateNetwork = false; - - using GoalState = void (DerivationGoal::*)(); - GoalState state; - - /* Stuff we need to pass to initChild(). */ - struct ChrootPath { - Path source; - bool optional; - explicit ChrootPath(Path source = "", bool optional = false) - : source(std::move(source)), optional(optional) {} - }; - using DirsInChroot = - std::map; // maps target path to source path - DirsInChroot dirsInChroot; - - using Environment = std::map; - Environment env; - - /* Hash rewriting. */ - StringRewrites inputRewrites, outputRewrites; - using RedirectedOutputs = std::map; - RedirectedOutputs redirectedOutputs; - - BuildMode buildMode; - - /* If we're repairing without a chroot, there may be outputs that - are valid but corrupt. So we redirect these outputs to - temporary paths. */ - PathSet redirectedBadOutputs; - - BuildResult result; - - /* The current round, if we're building multiple times. */ - size_t curRound = 1; - - size_t nrRounds; - - /* Path registration info from the previous round, if we're - building multiple times. Since this contains the hash, it - allows us to compare whether two rounds produced the same - result. */ - std::map prevInfos; - - const uid_t sandboxUid = 1000; - const gid_t sandboxGid = 100; - - const static Path homeDir; - - std::unique_ptr> mcExpectedBuilds, mcRunningBuilds; - - /* The remote machine on which we're building. */ - std::string machineName; - - public: - DerivationGoal(Worker& worker, const Path& drvPath, StringSet wantedOutputs, - BuildMode buildMode); - - DerivationGoal(Worker& worker, const Path& drvPath, - const BasicDerivation& drv, BuildMode buildMode); - - ~DerivationGoal() override; - - /* Whether we need to perform hash rewriting if there are valid output paths. - */ - bool needsHashRewrite(); - - void timedOut() override; - - std::string key() override { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "b$"). */ - return "b$" + storePathToName(drvPath) + "$" + drvPath; - } - - void work() override; - - Path getDrvPath() { return drvPath; } - - /* Add wanted outputs to an already existing derivation goal. */ - void addWantedOutputs(const StringSet& outputs); - - BuildResult getResult() { return result; } - - private: - /* The states. */ - void getDerivation(); - void loadDerivation(); - void haveDerivation(); - void outputsSubstituted(); - void closureRepaired(); - void inputsRealised(); - void tryToBuild(); - void buildDone(); - - /* Is the build hook willing to perform the build? */ - HookReply tryBuildHook(); - - /* Start building a derivation. */ - void startBuilder(); - - /* Fill in the environment for the builder. */ - void initEnv(); - - /* Setup tmp dir location. */ - void initTmpDir(); - - /* Write a JSON file containing the derivation attributes. */ - void writeStructuredAttrs(); - - /* Make a file owned by the builder. */ - void chownToBuilder(const Path& path); - - /* Run the builder's process. */ - void runChild(); - - friend int childEntry(void* /*arg*/); - - /* Check that the derivation outputs all exist and register them - as valid. */ - void registerOutputs(); - - /* Check that an output meets the requirements specified by the - 'outputChecks' attribute (or the legacy - '{allowed,disallowed}{References,Requisites}' attributes). */ - void checkOutputs(const std::map& outputs); - - /* Open a log file and a pipe to it. */ - Path openLogFile(); - - /* Close the log file. */ - void closeLogFile(); - - /* Delete the temporary directory, if we have one. */ - void deleteTmpDir(bool force); - - /* Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, const std::string& data) override; - void handleEOF(int fd) override; - void flushLine(); - - /* Return the set of (in)valid paths. */ - PathSet checkPathValidity(bool returnValid, bool checkHash); - - /* Abort the goal if `path' failed to build. */ - bool pathFailed(const Path& path); - - /* Forcibly kill the child process, if any. */ - void killChild(); - - Path addHashRewrite(const Path& path); - - void repairClosure(); - - void amDone(ExitCode result) override { Goal::amDone(result); } - - void done(BuildResult::Status status, const std::string& msg = ""); - - PathSet exportReferences(const PathSet& storePaths); -}; - -const Path DerivationGoal::homeDir = "/homeless-shelter"; - -DerivationGoal::DerivationGoal(Worker& worker, const Path& drvPath, - StringSet wantedOutputs, BuildMode buildMode) - : Goal(worker), - useDerivation(true), - drvPath(drvPath), - wantedOutputs(std::move(wantedOutputs)), - buildMode(buildMode) { - state = &DerivationGoal::getDerivation; - name = (format("building of '%1%'") % drvPath).str(); - trace("created"); - - mcExpectedBuilds = - std::make_unique>(worker.expectedBuilds); -} - -DerivationGoal::DerivationGoal(Worker& worker, const Path& drvPath, - const BasicDerivation& drv, BuildMode buildMode) - : Goal(worker), - useDerivation(false), - drvPath(drvPath), - buildMode(buildMode) { - this->drv = std::make_unique(drv); - state = &DerivationGoal::haveDerivation; - name = (format("building of %1%") % showPaths(drv.outputPaths())).str(); - trace("created"); - - mcExpectedBuilds = - std::make_unique>(worker.expectedBuilds); - - /* Prevent the .chroot directory from being - garbage-collected. (See isActiveTempFile() in gc.cc.) */ - worker.store.addTempRoot(drvPath); -} - -DerivationGoal::~DerivationGoal() { - /* Careful: we should never ever throw an exception from a - destructor. */ - try { - killChild(); - } catch (...) { - ignoreException(); - } - try { - deleteTmpDir(false); - } catch (...) { - ignoreException(); - } - try { - closeLogFile(); - } catch (...) { - ignoreException(); - } -} - -inline bool DerivationGoal::needsHashRewrite() { return !useChroot; } - -void DerivationGoal::killChild() { - if (pid != Pid(-1)) { - worker.childTerminated(this); - - if (buildUser) { - /* If we're using a build user, then there is a tricky - race condition: if we kill the build user before the - child has done its setuid() to the build user uid, then - it won't be killed, and we'll potentially lock up in - pid.wait(). So also send a conventional kill to the - child. */ - ::kill(-static_cast(pid), SIGKILL); /* ignore the result */ - buildUser->kill(); - pid.wait(); - } else { - pid.kill(); - } - - assert(pid == Pid(-1)); - } - - hook.reset(); -} - -void DerivationGoal::timedOut() { - killChild(); - done(BuildResult::TimedOut); -} - -void DerivationGoal::work() { (this->*state)(); } - -void DerivationGoal::addWantedOutputs(const StringSet& outputs) { - /* If we already want all outputs, there is nothing to do. */ - if (wantedOutputs.empty()) { - return; - } - - if (outputs.empty()) { - wantedOutputs.clear(); - needRestart = true; - } else { - for (auto& i : outputs) { - if (wantedOutputs.find(i) == wantedOutputs.end()) { - wantedOutputs.insert(i); - needRestart = true; - } - } - } -} - -void DerivationGoal::getDerivation() { - trace("init"); - - /* The first thing to do is to make sure that the derivation - exists. If it doesn't, it may be created through a - substitute. */ - if (buildMode == bmNormal && worker.store.isValidPath(drvPath)) { - loadDerivation(); - return; - } - - addWaitee(worker.makeSubstitutionGoal(drvPath)); - - state = &DerivationGoal::loadDerivation; -} - -void DerivationGoal::loadDerivation() { - trace("loading derivation"); - - if (nrFailed != 0) { - log_sink() << "cannot build missing derivation '" << drvPath << "'" - << std::endl; - done(BuildResult::MiscFailure); - return; - } - - /* `drvPath' should already be a root, but let's be on the safe - side: if the user forgot to make it a root, we wouldn't want - things being garbage collected while we're busy. */ - worker.store.addTempRoot(drvPath); - - assert(worker.store.isValidPath(drvPath)); - - /* Get the derivation. */ - drv = std::unique_ptr( - new Derivation(worker.store.derivationFromPath(drvPath))); - - haveDerivation(); -} - -void DerivationGoal::haveDerivation() { - trace("have derivation"); - - retrySubstitution = false; - - for (auto& i : drv->outputs) { - worker.store.addTempRoot(i.second.path); - } - - /* Check what outputs paths are not already valid. */ - PathSet invalidOutputs = checkPathValidity(false, buildMode == bmRepair); - - /* If they are all valid, then we're done. */ - if (invalidOutputs.empty() && buildMode == bmNormal) { - done(BuildResult::AlreadyValid); - return; - } - - parsedDrv = std::make_unique(drvPath, *drv); - - /* We are first going to try to create the invalid output paths - through substitutes. If that doesn't work, we'll build - them. */ - if (settings.useSubstitutes && parsedDrv->substitutesAllowed()) { - for (auto& i : invalidOutputs) { - addWaitee(worker.makeSubstitutionGoal( - i, buildMode == bmRepair ? Repair : NoRepair)); - } - } - - if (waitees.empty()) { /* to prevent hang (no wake-up event) */ - outputsSubstituted(); - } else { - state = &DerivationGoal::outputsSubstituted; - } -} - -void DerivationGoal::outputsSubstituted() { - trace("all outputs substituted (maybe)"); - - if (nrFailed > 0 && nrFailed > nrNoSubstituters + nrIncompleteClosure && - !settings.tryFallback) { - done(BuildResult::TransientFailure, - (format("some substitutes for the outputs of derivation '%1%' failed " - "(usually happens due to networking issues); try '--fallback' " - "to build derivation from source ") % - drvPath) - .str()); - return; - } - - /* If the substitutes form an incomplete closure, then we should - build the dependencies of this derivation, but after that, we - can still use the substitutes for this derivation itself. */ - if (nrIncompleteClosure > 0) { - retrySubstitution = true; - } - - nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; - - if (needRestart) { - needRestart = false; - haveDerivation(); - return; - } - - auto nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); - if (buildMode == bmNormal && nrInvalid == 0) { - done(BuildResult::Substituted); - return; - } - if (buildMode == bmRepair && nrInvalid == 0) { - repairClosure(); - return; - } - if (buildMode == bmCheck && nrInvalid > 0) { - throw Error(format("some outputs of '%1%' are not valid, so checking is " - "not possible") % - drvPath); - } - - /* Otherwise, at least one of the output paths could not be - produced using a substitute. So we have to build instead. */ - - /* Make sure checkPathValidity() from now on checks all - outputs. */ - wantedOutputs = PathSet(); - - /* The inputs must be built before we can build this goal. */ - if (useDerivation) { - for (auto& i : dynamic_cast(drv.get())->inputDrvs) { - addWaitee(worker.makeDerivationGoal( - i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal)); - } - } - - for (auto& i : drv->inputSrcs) { - if (worker.store.isValidPath(i)) { - continue; - } - if (!settings.useSubstitutes) { - throw Error(format("dependency '%1%' of '%2%' does not exist, and " - "substitution is disabled") % - i % drvPath); - } - addWaitee(worker.makeSubstitutionGoal(i)); - } - - if (waitees.empty()) { /* to prevent hang (no wake-up event) */ - inputsRealised(); - } else { - state = &DerivationGoal::inputsRealised; - } -} - -void DerivationGoal::repairClosure() { - /* If we're repairing, we now know that our own outputs are valid. - Now check whether the other paths in the outputs closure are - good. If not, then start derivation goals for the derivations - that produced those outputs. */ - - /* Get the output closure. */ - PathSet outputClosure; - for (auto& i : drv->outputs) { - if (!wantOutput(i.first, wantedOutputs)) { - continue; - } - worker.store.computeFSClosure(i.second.path, outputClosure); - } - - /* Filter out our own outputs (which we have already checked). */ - for (auto& i : drv->outputs) { - outputClosure.erase(i.second.path); - } - - /* Get all dependencies of this derivation so that we know which - derivation is responsible for which path in the output - closure. */ - PathSet inputClosure; - if (useDerivation) { - worker.store.computeFSClosure(drvPath, inputClosure); - } - std::map outputsToDrv; - for (auto& i : inputClosure) { - if (isDerivation(i)) { - Derivation drv = worker.store.derivationFromPath(i); - for (auto& j : drv.outputs) { - outputsToDrv[j.second.path] = i; - } - } - } - - /* Check each path (slow!). */ - PathSet broken; - for (auto& i : outputClosure) { - if (worker.pathContentsGood(i)) { - continue; - } - log_sink() << "found corrupted or missing path '" << i - << "' in the output closure of '" << drvPath << "'" << std::endl; - Path drvPath2 = outputsToDrv[i]; - if (drvPath2.empty()) { - addWaitee(worker.makeSubstitutionGoal(i, Repair)); - } else { - addWaitee(worker.makeDerivationGoal(drvPath2, PathSet(), bmRepair)); - } - } - - if (waitees.empty()) { - done(BuildResult::AlreadyValid); - return; - } - - state = &DerivationGoal::closureRepaired; -} - -void DerivationGoal::closureRepaired() { - trace("closure repaired"); - if (nrFailed > 0) { - throw Error(format("some paths in the output closure of derivation '%1%' " - "could not be repaired") % - drvPath); - } - done(BuildResult::AlreadyValid); -} - -void DerivationGoal::inputsRealised() { - trace("all inputs realised"); - - if (nrFailed != 0) { - if (!useDerivation) { - throw Error(format("some dependencies of '%1%' are missing") % drvPath); - } - log_sink() << "cannot build derivation '" << drvPath << "': " << nrFailed - << " dependencies couldn't be built" << std::endl; - done(BuildResult::DependencyFailed); - return; - } - - if (retrySubstitution) { - haveDerivation(); - return; - } - - /* Gather information necessary for computing the closure and/or - running the build hook. */ - - /* The outputs are referenceable paths. */ - for (auto& i : drv->outputs) { - log_sink() << "building path " << i.second.path << std::endl; - allPaths.insert(i.second.path); - } - - /* Determine the full set of input paths. */ - - /* First, the input derivations. */ - if (useDerivation) { - for (auto& i : dynamic_cast(drv.get())->inputDrvs) { - /* Add the relevant output closures of the input derivation - `i' as input paths. Only add the closures of output paths - that are specified as inputs. */ - assert(worker.store.isValidPath(i.first)); - Derivation inDrv = worker.store.derivationFromPath(i.first); - for (auto& j : i.second) { - if (inDrv.outputs.find(j) != inDrv.outputs.end()) { - worker.store.computeFSClosure(inDrv.outputs[j].path, inputPaths); - } else { - throw Error(format("derivation '%1%' requires non-existent output " - "'%2%' from input derivation '%3%'") % - drvPath % j % i.first); - } - } - } - } - - /* Second, the input sources. */ - worker.store.computeFSClosure(drv->inputSrcs, inputPaths); - - DLOG(INFO) << "added input paths " << showPaths(inputPaths); - - allPaths.insert(inputPaths.begin(), inputPaths.end()); - - /* Is this a fixed-output derivation? */ - fixedOutput = drv->isFixedOutput(); - - /* Don't repeat fixed-output derivations since they're already - verified by their output hash.*/ - nrRounds = fixedOutput ? 1 : settings.buildRepeat + 1; - - /* Okay, try to build. Note that here we don't wait for a build - slot to become available, since we don't need one if there is a - build hook. */ - state = &DerivationGoal::tryToBuild; - worker.wakeUp(shared_from_this()); - - result = BuildResult(); -} - -void DerivationGoal::tryToBuild() { - trace("trying to build"); - - /* Obtain locks on all output paths. The locks are automatically - released when we exit this function or Nix crashes. If we - can't acquire the lock, then continue; hopefully some other - goal can start a build, and if not, the main loop will sleep a - few seconds and then retry this goal. */ - PathSet lockFiles; - for (auto& outPath : drv->outputPaths()) { - lockFiles.insert(worker.store.toRealPath(outPath)); - } - - if (!outputLocks.lockPaths(lockFiles, "", false)) { - worker.waitForAWhile(shared_from_this()); - return; - } - - /* Now check again whether the outputs are valid. This is because - another process may have started building in parallel. After - it has finished and released the locks, we can (and should) - reuse its results. (Strictly speaking the first check can be - omitted, but that would be less efficient.) Note that since we - now hold the locks on the output paths, no other process can - build this derivation, so no further checks are necessary. */ - validPaths = checkPathValidity(true, buildMode == bmRepair); - if (buildMode != bmCheck && validPaths.size() == drv->outputs.size()) { - DLOG(INFO) << "skipping build of derivation '" << drvPath - << "', someone beat us to it"; - outputLocks.setDeletion(true); - done(BuildResult::AlreadyValid); - return; - } - - missingPaths = drv->outputPaths(); - if (buildMode != bmCheck) { - for (auto& i : validPaths) { - missingPaths.erase(i); - } - } - - /* If any of the outputs already exist but are not valid, delete - them. */ - for (auto& i : drv->outputs) { - Path path = i.second.path; - if (worker.store.isValidPath(path)) { - continue; - } - DLOG(INFO) << "removing invalid path " << path; - deletePath(worker.store.toRealPath(path)); - } - - /* Don't do a remote build if the derivation has the attribute - `preferLocalBuild' set. Also, check and repair modes are only - supported for local builds. */ - bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(); - - auto started = [&]() { - std::string msg; - if (buildMode == bmRepair) { - msg = absl::StrFormat("repairing outputs of '%s'", drvPath); - } else if (buildMode == bmCheck) { - msg = absl::StrFormat("checking outputs of '%s'", drvPath); - } else if (nrRounds > 1) { - msg = absl::StrFormat("building '%s' (round %d/%d)", drvPath, curRound, - nrRounds); - } else { - msg = absl::StrFormat("building '%s'", drvPath); - } - - if (hook) { - absl::StrAppend(&msg, absl::StrFormat(" on '%s'", machineName)); - } - - log_sink() << msg << std::endl; - mcRunningBuilds = - std::make_unique>(worker.runningBuilds); - }; - - /* Is the build hook willing to accept this job? */ - if (!buildLocally) { - switch (tryBuildHook()) { - case rpAccept: - /* Yes, it has started doing so. Wait until we get - EOF from the hook. */ - result.startTime = time(nullptr); // inexact - state = &DerivationGoal::buildDone; - started(); - return; - case rpPostpone: - /* Not now; wait until at least one child finishes or - the wake-up timeout expires. */ - worker.waitForAWhile(shared_from_this()); - outputLocks.unlock(); - return; - case rpDecline: - /* We should do it ourselves. */ - break; - } - } - - /* Make sure that we are allowed to start a build. If this - derivation prefers to be done locally, do it even if - maxBuildJobs is 0. */ - unsigned int curBuilds = worker.getNrLocalBuilds(); - if (curBuilds >= settings.maxBuildJobs && !(buildLocally && curBuilds == 0)) { - worker.waitForBuildSlot(shared_from_this()); - outputLocks.unlock(); - return; - } - - try { - /* Okay, we have to build. */ - startBuilder(); - - } catch (BuildError& e) { - log_sink() << e.msg() << std::endl; - outputLocks.unlock(); - buildUser.reset(); - worker.permanentFailure = true; - done(BuildResult::InputRejected, e.msg()); - return; - } - - /* This state will be reached when we get EOF on the child's - log pipe. */ - state = &DerivationGoal::buildDone; - - started(); -} - -void replaceValidPath(const Path& storePath, const Path& tmpPath) { - /* We can't atomically replace storePath (the original) with - tmpPath (the replacement), so we have to move it out of the - way first. We'd better not be interrupted here, because if - we're repairing (say) Glibc, we end up with a broken system. */ - Path oldPath = - (format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str(); - if (pathExists(storePath)) { - rename(storePath.c_str(), oldPath.c_str()); - } - if (rename(tmpPath.c_str(), storePath.c_str()) == -1) { - throw SysError(format("moving '%1%' to '%2%'") % tmpPath % storePath); - } - deletePath(oldPath); -} - -MakeError(NotDeterministic, BuildError); - -void DerivationGoal::buildDone() { - trace("build done"); - - /* Release the build user at the end of this function. We don't do - it right away because we don't want another build grabbing this - uid and then messing around with our output. */ - Finally releaseBuildUser([&]() { buildUser.reset(); }); - - /* Since we got an EOF on the logger pipe, the builder is presumed - to have terminated. In fact, the builder could also have - simply have closed its end of the pipe, so just to be sure, - kill it. */ - int status = hook ? hook->pid.kill() : pid.kill(); - - DLOG(INFO) << "builder process for '" << drvPath << "' finished"; - - result.timesBuilt++; - result.stopTime = time(nullptr); - - /* So the child is gone now. */ - worker.childTerminated(this); - - /* Close the read side of the logger pipe. */ - if (hook) { - hook->builderOut.readSide = AutoCloseFD(-1); - hook->fromHook.readSide = AutoCloseFD(-1); - } else { - builderOut.readSide = AutoCloseFD(-1); - } - - /* Close the log file. */ - closeLogFile(); - - /* When running under a build user, make sure that all processes - running under that uid are gone. This is to prevent a - malicious user from leaving behind a process that keeps files - open and modifies them after they have been chown'ed to - root. */ - if (buildUser) { - buildUser->kill(); - } - - bool diskFull = false; - - try { - /* Check the exit status. */ - if (!statusOk(status)) { - /* Heuristically check whether the build failure may have - been caused by a disk full condition. We have no way - of knowing whether the build actually got an ENOSPC. - So instead, check if the disk is (nearly) full now. If - so, we don't mark this build as a permanent failure. */ -#if HAVE_STATVFS - unsigned long long required = - 8ULL * 1024 * 1024; // FIXME: make configurable - struct statvfs st; - if (statvfs(worker.store.realStoreDir.c_str(), &st) == 0 && - static_cast(st.f_bavail) * st.f_bsize < - required) { - diskFull = true; - } - if (statvfs(tmpDir.c_str(), &st) == 0 && - static_cast(st.f_bavail) * st.f_bsize < - required) { - diskFull = true; - } -#endif - - deleteTmpDir(false); - - /* Move paths out of the chroot for easier debugging of - build failures. */ - if (useChroot && buildMode == bmNormal) { - for (auto& i : missingPaths) { - if (pathExists(chrootRootDir + i)) { - rename((chrootRootDir + i).c_str(), i.c_str()); - } - } - } - - std::string msg = - (format("builder for '%1%' %2%") % drvPath % statusToString(status)) - .str(); - - if (!settings.verboseBuild && !logTail.empty()) { - msg += (format("; last %d log lines:") % logTail.size()).str(); - for (auto& line : logTail) { - msg += "\n " + line; - } - } - - if (diskFull) { - msg += - "\nnote: build failure may have been caused by lack of free disk " - "space"; - } - - throw BuildError(msg); - } - - /* Compute the FS closure of the outputs and register them as - being valid. */ - registerOutputs(); - - if (settings.postBuildHook != "") { - log_sink() << "running post-build-hook '" << settings.postBuildHook - << "' [" << drvPath << "]" << std::endl; - auto outputPaths = drv->outputPaths(); - std::map hookEnvironment = getEnv(); - - hookEnvironment.emplace("DRV_PATH", drvPath); - hookEnvironment.emplace("OUT_PATHS", - absl::StripTrailingAsciiWhitespace( - concatStringsSep(" ", outputPaths))); - - RunOptions opts(settings.postBuildHook, {}); - opts.environment = hookEnvironment; - - struct LogSink : Sink { - std::string currentLine; - - void operator()(const unsigned char* data, size_t len) override { - for (size_t i = 0; i < len; i++) { - auto c = data[i]; - - if (c == '\n') { - flushLine(); - } else { - currentLine += c; - } - } - } - - void flushLine() { - if (settings.verboseBuild) { - LOG(ERROR) << "post-build-hook: " << currentLine; - } - currentLine.clear(); - } - - ~LogSink() override { - if (!currentLine.empty()) { - currentLine += '\n'; - flushLine(); - } - } - }; - LogSink sink; - - opts.standardOut = &sink; - opts.mergeStderrToStdout = true; - runProgram2(opts); - } - - if (buildMode == bmCheck) { - done(BuildResult::Built); - return; - } - - /* Delete unused redirected outputs (when doing hash rewriting). */ - for (auto& i : redirectedOutputs) { - deletePath(i.second); - } - - /* Delete the chroot (if we were using one). */ - autoDelChroot.reset(); /* this runs the destructor */ - - deleteTmpDir(true); - - /* Repeat the build if necessary. */ - if (curRound++ < nrRounds) { - outputLocks.unlock(); - state = &DerivationGoal::tryToBuild; - worker.wakeUp(shared_from_this()); - return; - } - - /* It is now safe to delete the lock files, since all future - lockers will see that the output paths are valid; they will - not create new lock files with the same names as the old - (unlinked) lock files. */ - outputLocks.setDeletion(true); - outputLocks.unlock(); - - } catch (BuildError& e) { - log_sink() << e.msg() << std::endl; - - outputLocks.unlock(); - - BuildResult::Status st = BuildResult::MiscFailure; - - if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101) { - st = BuildResult::TimedOut; - - } else if (hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100)) { - } - - else { - st = dynamic_cast(&e) != nullptr - ? BuildResult::NotDeterministic - : statusOk(status) ? BuildResult::OutputRejected - : fixedOutput || diskFull ? BuildResult::TransientFailure - : BuildResult::PermanentFailure; - } - - done(st, e.msg()); - return; - } - - done(BuildResult::Built); -} - -HookReply DerivationGoal::tryBuildHook() { - if (!worker.tryBuildHook || !useDerivation) { - return rpDecline; - } - - if (!worker.hook) { - worker.hook = std::make_unique(); - } - - try { - /* Send the request to the hook. */ - worker.hook->sink << "try" - << (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 - : 0) - << drv->platform << drvPath - << parsedDrv->getRequiredSystemFeatures(); - worker.hook->sink.flush(); - - /* Read the first line of input, which should be a word indicating - whether the hook wishes to perform the build. */ - std::string reply; - while (true) { - std::string s = readLine(worker.hook->fromHook.readSide.get()); - if (std::string(s, 0, 2) == "# ") { - reply = std::string(s, 2); - break; - } - s += "\n"; - std::cerr << s; - } - - DLOG(INFO) << "hook reply is " << reply; - - if (reply == "decline") { - return rpDecline; - } - if (reply == "decline-permanently") { - worker.tryBuildHook = false; - worker.hook = nullptr; - return rpDecline; - } else if (reply == "postpone") { - return rpPostpone; - } else if (reply != "accept") { - throw Error(format("bad hook reply '%1%'") % reply); - } - } catch (SysError& e) { - if (e.errNo == EPIPE) { - log_sink() << "build hook died unexpectedly: " - << absl::StripTrailingAsciiWhitespace( - drainFD(worker.hook->fromHook.readSide.get())) - << std::endl; - worker.hook = nullptr; - return rpDecline; - } - throw; - } - - hook = std::move(worker.hook); - - machineName = readLine(hook->fromHook.readSide.get()); - - /* Tell the hook all the inputs that have to be copied to the - remote system. */ - hook->sink << inputPaths; - - /* Tell the hooks the missing outputs that have to be copied back - from the remote system. */ - hook->sink << missingPaths; - - hook->sink = FdSink(); - hook->toHook.writeSide = AutoCloseFD(-1); - - /* Create the log file and pipe. */ - Path logFile = openLogFile(); - - std::set fds; - fds.insert(hook->fromHook.readSide.get()); - fds.insert(hook->builderOut.readSide.get()); - worker.childStarted(shared_from_this(), fds, false, false); - - return rpAccept; -} - -void chmod_(const Path& path, mode_t mode) { - if (chmod(path.c_str(), mode) == -1) { - throw SysError(format("setting permissions on '%1%'") % path); - } -} - -int childEntry(void* arg) { - (static_cast(arg))->runChild(); - return 1; -} - -PathSet DerivationGoal::exportReferences(const PathSet& storePaths) { - PathSet paths; - - for (auto storePath : storePaths) { - /* Check that the store path is valid. */ - if (!worker.store.isInStore(storePath)) { - throw BuildError( - format("'exportReferencesGraph' contains a non-store path '%1%'") % - storePath); - } - - storePath = worker.store.toStorePath(storePath); - - if (inputPaths.count(storePath) == 0u) { - throw BuildError( - "cannot export references of path '%s' because it is not in the " - "input closure of the derivation", - storePath); - } - - worker.store.computeFSClosure(storePath, paths); - } - - /* If there are derivations in the graph, then include their - outputs as well. This is useful if you want to do things - like passing all build-time dependencies of some path to a - derivation that builds a NixOS DVD image. */ - PathSet paths2(paths); - - for (auto& j : paths2) { - if (isDerivation(j)) { - Derivation drv = worker.store.derivationFromPath(j); - for (auto& k : drv.outputs) { - worker.store.computeFSClosure(k.second.path, paths); - } - } - } - - return paths; -} - -static std::once_flag dns_resolve_flag; - -static void preloadNSS() { - /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a - dynamic library load of one of the glibc NSS libraries in a sandboxed - child, which will fail unless the library's already been loaded in the - parent. So we force a lookup of an invalid domain to force the NSS - machinery to - load its lookup libraries in the parent before any child gets a chance to. - */ - std::call_once(dns_resolve_flag, []() { - struct addrinfo* res = nullptr; - - if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", - nullptr, &res) != 0) { - if (res != nullptr) { - freeaddrinfo(res); - } - } - }); -} - -void DerivationGoal::startBuilder() { - /* Right platform? */ - if (!parsedDrv->canBuildLocally()) { - throw Error( - "a '%s' with features {%s} is required to build '%s', but I am a '%s' " - "with features {%s}", - drv->platform, - concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), drvPath, - settings.thisSystem, concatStringsSep(", ", settings.systemFeatures)); - } - - if (drv->isBuiltin()) { - preloadNSS(); - } - - /* Are we doing a chroot build? */ - { - auto noChroot = parsedDrv->getBoolAttr("__noChroot"); - if (settings.sandboxMode == smEnabled) { - if (noChroot) { - throw Error(format("derivation '%1%' has '__noChroot' set, " - "but that's not allowed when 'sandbox' is 'true'") % - drvPath); - } - useChroot = true; - } else if (settings.sandboxMode == smDisabled) { - useChroot = false; - } else if (settings.sandboxMode == smRelaxed) { - useChroot = !fixedOutput && !noChroot; - } - } - - if (worker.store.storeDir != worker.store.realStoreDir) { - useChroot = true; - } - - /* If `build-users-group' is not empty, then we have to build as - one of the members of that group. */ - if (settings.buildUsersGroup != "" && getuid() == 0) { - buildUser = std::make_unique(); - - /* Make sure that no other processes are executing under this - uid. */ - buildUser->kill(); - } - - /* Create a temporary directory where the build will take - place. */ - auto drvName = storePathToName(drvPath); - tmpDir = createTempDir("", "nix-build-" + drvName, false, false, 0700); - - chownToBuilder(tmpDir); - - /* Substitute output placeholders with the actual output paths. */ - for (auto& output : drv->outputs) { - inputRewrites[hashPlaceholder(output.first)] = output.second.path; - } - - /* Construct the environment passed to the builder. */ - initEnv(); - - writeStructuredAttrs(); - - /* Handle exportReferencesGraph(), if set. */ - if (!parsedDrv->getStructuredAttrs()) { - /* The `exportReferencesGraph' feature allows the references graph - to be passed to a builder. This attribute should be a list of - pairs [name1 path1 name2 path2 ...]. The references graph of - each `pathN' will be stored in a text file `nameN' in the - temporary build directory. The text files have the format used - by `nix-store --register-validity'. However, the deriver - fields are left empty. */ - std::string s = get(drv->env, "exportReferencesGraph"); - std::vector ss = - absl::StrSplit(s, absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - if (ss.size() % 2 != 0) { - throw BuildError(absl::StrFormat( - "odd number of tokens %d in 'exportReferencesGraph': '%s'", ss.size(), - s)); - } - for (auto i = ss.begin(); i != ss.end();) { - std::string fileName = *i++; - checkStoreName(fileName); /* !!! abuse of this function */ - Path storePath = *i++; - - /* Write closure info to . */ - writeFile(tmpDir + "/" + fileName, - worker.store.makeValidityRegistration( - exportReferences({storePath}), false, false)); - } - } - - if (useChroot) { - /* Allow a user-configurable set of directories from the - host file system. */ - PathSet dirs = settings.sandboxPaths; - PathSet dirs2 = settings.extraSandboxPaths; - dirs.insert(dirs2.begin(), dirs2.end()); - - dirsInChroot.clear(); - - for (auto i : dirs) { - if (i.empty()) { - continue; - } - bool optional = false; - if (i[i.size() - 1] == '?') { - optional = true; - i.pop_back(); - } - size_t p = i.find('='); - if (p == std::string::npos) { - dirsInChroot[i] = ChrootPath(i, optional); - } else { - dirsInChroot[std::string(i, 0, p)] = - ChrootPath(std::string(i, p + 1), optional); - } - } - dirsInChroot[tmpDirInSandbox] = ChrootPath(tmpDir); - - /* Add the closure of store paths to the chroot. */ - PathSet closure; - for (auto& i : dirsInChroot) { - try { - if (worker.store.isInStore(i.second.source)) { - worker.store.computeFSClosure( - worker.store.toStorePath(i.second.source), closure); - } - } catch (InvalidPath& e) { - } catch (Error& e) { - throw Error(format("while processing 'sandbox-paths': %s") % e.what()); - } - } - for (auto& i : closure) { - dirsInChroot[i] = ChrootPath(i); - } - - PathSet allowedPaths = settings.allowedImpureHostPrefixes; - - /* This works like the above, except on a per-derivation level */ - auto impurePaths = - parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings()); - - for (auto& i : impurePaths) { - bool found = false; - /* Note: we're not resolving symlinks here to prevent - giving a non-root user info about inaccessible - files. */ - Path canonI = canonPath(i); - /* If only we had a trie to do this more efficiently :) luckily, these are - * generally going to be pretty small */ - for (auto& a : allowedPaths) { - Path canonA = canonPath(a); - if (canonI == canonA || isInDir(canonI, canonA)) { - found = true; - break; - } - } - if (!found) { - throw Error(format("derivation '%1%' requested impure path '%2%', but " - "it was not in allowed-impure-host-deps") % - drvPath % i); - } - - dirsInChroot[i] = ChrootPath(i); - } - - /* Create a temporary directory in which we set up the chroot - environment using bind-mounts. We put it in the Nix store - to ensure that we can create hard-links to non-directory - inputs in the fake Nix store in the chroot (see below). */ - chrootRootDir = worker.store.toRealPath(drvPath) + ".chroot"; - deletePath(chrootRootDir); - - /* Clean up the chroot directory automatically. */ - autoDelChroot = std::make_shared(chrootRootDir); - - DLOG(INFO) << "setting up chroot environment in '" << chrootRootDir << "'"; - - if (mkdir(chrootRootDir.c_str(), 0750) == -1) { - throw SysError(format("cannot create '%1%'") % chrootRootDir); - } - - if (buildUser && - chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1) { - throw SysError(format("cannot change ownership of '%1%'") % - chrootRootDir); - } - - /* Create a writable /tmp in the chroot. Many builders need - this. (Of course they should really respect $TMPDIR - instead.) */ - Path chrootTmpDir = chrootRootDir + "/tmp"; - createDirs(chrootTmpDir); - chmod_(chrootTmpDir, 01777); - - /* Create a /etc/passwd with entries for the build user and the - nobody account. The latter is kind of a hack to support - Samba-in-QEMU. */ - createDirs(chrootRootDir + "/etc"); - - writeFile(chrootRootDir + "/etc/passwd", - fmt("root:x:0:0:Nix build user:%3%:/noshell\n" - "nixbld:x:%1%:%2%:Nix build user:%3%:/noshell\n" - "nobody:x:65534:65534:Nobody:/:/noshell\n", - sandboxUid, sandboxGid, settings.sandboxBuildDir)); - - /* Declare the build user's group so that programs get a consistent - view of the system (e.g., "id -gn"). */ - writeFile(chrootRootDir + "/etc/group", (format("root:x:0:\n" - "nixbld:!:%1%:\n" - "nogroup:x:65534:\n") % - sandboxGid) - .str()); - - /* Create /etc/hosts with localhost entry. */ - if (!fixedOutput) { - writeFile(chrootRootDir + "/etc/hosts", - "127.0.0.1 localhost\n::1 localhost\n"); - } - - /* Make the closure of the inputs available in the chroot, - rather than the whole Nix store. This prevents any access - to undeclared dependencies. Directories are bind-mounted, - while other inputs are hard-linked (since only directories - can be bind-mounted). !!! As an extra security - precaution, make the fake Nix store only writable by the - build user. */ - Path chrootStoreDir = chrootRootDir + worker.store.storeDir; - createDirs(chrootStoreDir); - chmod_(chrootStoreDir, 01775); - - if (buildUser && - chown(chrootStoreDir.c_str(), 0, buildUser->getGID()) == -1) { - throw SysError(format("cannot change ownership of '%1%'") % - chrootStoreDir); - } - - for (auto& i : inputPaths) { - Path r = worker.store.toRealPath(i); - struct stat st; - if (lstat(r.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % i); - } - if (S_ISDIR(st.st_mode)) { - dirsInChroot[i] = ChrootPath(r); - } else { - Path p = chrootRootDir + i; - DLOG(INFO) << "linking '" << p << "' to '" << r << "'"; - if (link(r.c_str(), p.c_str()) == -1) { - /* Hard-linking fails if we exceed the maximum - link count on a file (e.g. 32000 of ext3), - which is quite possible after a `nix-store - --optimise'. */ - if (errno != EMLINK) { - throw SysError(format("linking '%1%' to '%2%'") % p % i); - } - StringSink sink; - dumpPath(r, sink); - StringSource source(*sink.s); - restorePath(p, source); - } - } - } - - /* If we're repairing, checking or rebuilding part of a - multiple-outputs derivation, it's possible that we're - rebuilding a path that is in settings.dirsInChroot - (typically the dependencies of /bin/sh). Throw them - out. */ - for (auto& i : drv->outputs) { - dirsInChroot.erase(i.second.path); - } - } - - if (needsHashRewrite()) { - if (pathExists(homeDir)) { - throw Error(format("directory '%1%' exists; please remove it") % homeDir); - } - - /* We're not doing a chroot build, but we have some valid - output paths. Since we can't just overwrite or delete - them, we have to do hash rewriting: i.e. in the - environment/arguments passed to the build, we replace the - hashes of the valid outputs with unique dummy strings; - after the build, we discard the redirected outputs - corresponding to the valid outputs, and rewrite the - contents of the new outputs to replace the dummy strings - with the actual hashes. */ - if (!validPaths.empty()) { - for (auto& i : validPaths) { - addHashRewrite(i); - } - } - - /* If we're repairing, then we don't want to delete the - corrupt outputs in advance. So rewrite them as well. */ - if (buildMode == bmRepair) { - for (auto& i : missingPaths) { - if (worker.store.isValidPath(i) && pathExists(i)) { - addHashRewrite(i); - redirectedBadOutputs.insert(i); - } - } - } - } - - if (useChroot && settings.preBuildHook != "" && - (dynamic_cast(drv.get()) != nullptr)) { - DLOG(INFO) << "executing pre-build hook '" << settings.preBuildHook << "'"; - auto args = - useChroot ? Strings({drvPath, chrootRootDir}) : Strings({drvPath}); - enum BuildHookState { stBegin, stExtraChrootDirs }; - auto state = stBegin; - auto lines = runProgram(settings.preBuildHook, false, args); - auto lastPos = std::string::size_type{0}; - for (auto nlPos = lines.find('\n'); nlPos != std::string::npos; - nlPos = lines.find('\n', lastPos)) { - auto line = std::string{lines, lastPos, nlPos - lastPos}; - lastPos = nlPos + 1; - if (state == stBegin) { - if (line == "extra-sandbox-paths" || line == "extra-chroot-dirs") { - state = stExtraChrootDirs; - } else { - throw Error(format("unknown pre-build hook command '%1%'") % line); - } - } else if (state == stExtraChrootDirs) { - if (line.empty()) { - state = stBegin; - } else { - auto p = line.find('='); - if (p == std::string::npos) { - dirsInChroot[line] = ChrootPath(line); - } else { - dirsInChroot[std::string(line, 0, p)] = - ChrootPath(std::string(line, p + 1)); - } - } - } - } - } - - /* Run the builder. */ - DLOG(INFO) << "executing builder '" << drv->builder << "'"; - - /* Create the log file. */ - Path logFile = openLogFile(); - - /* Create a pipe to get the output of the builder. */ - // builderOut.create(); - - builderOut.readSide = AutoCloseFD(posix_openpt(O_RDWR | O_NOCTTY)); - if (!builderOut.readSide) { - throw SysError("opening pseudoterminal master"); - } - - std::string slaveName(ptsname(builderOut.readSide.get())); - - if (buildUser) { - if (chmod(slaveName.c_str(), 0600) != 0) { - throw SysError("changing mode of pseudoterminal slave"); - } - - if (chown(slaveName.c_str(), buildUser->getUID(), 0) != 0) { - throw SysError("changing owner of pseudoterminal slave"); - } - } else { - if (grantpt(builderOut.readSide.get()) != 0) { - throw SysError("granting access to pseudoterminal slave"); - } - } - -#if 0 - // Mount the pt in the sandbox so that the "tty" command works. - // FIXME: this doesn't work with the new devpts in the sandbox. - if (useChroot) - dirsInChroot[slaveName] = {slaveName, false}; -#endif - - if (unlockpt(builderOut.readSide.get()) != 0) { - throw SysError("unlocking pseudoterminal"); - } - - builderOut.writeSide = - AutoCloseFD(open(slaveName.c_str(), O_RDWR | O_NOCTTY)); - if (!builderOut.writeSide) { - throw SysError("opening pseudoterminal slave"); - } - - // Put the pt into raw mode to prevent \n -> \r\n translation. - struct termios term; - if (tcgetattr(builderOut.writeSide.get(), &term) != 0) { - throw SysError("getting pseudoterminal attributes"); - } - - cfmakeraw(&term); - - if (tcsetattr(builderOut.writeSide.get(), TCSANOW, &term) != 0) { - throw SysError("putting pseudoterminal into raw mode"); - } - - result.startTime = time(nullptr); - - /* Fork a child to build the package. */ - ProcessOptions options; - -#if __linux__ - if (useChroot) { - /* Set up private namespaces for the build: - - - The PID namespace causes the build to start as PID 1. - Processes outside of the chroot are not visible to those - on the inside, but processes inside the chroot are - visible from the outside (though with different PIDs). - - - The private mount namespace ensures that all the bind - mounts we do will only show up in this process and its - children, and will disappear automatically when we're - done. - - - The private network namespace ensures that the builder - cannot talk to the outside world (or vice versa). It - only has a private loopback interface. (Fixed-output - derivations are not run in a private network namespace - to allow functions like fetchurl to work.) - - - The IPC namespace prevents the builder from communicating - with outside processes using SysV IPC mechanisms (shared - memory, message queues, semaphores). It also ensures - that all IPC objects are destroyed when the builder - exits. - - - The UTS namespace ensures that builders see a hostname of - localhost rather than the actual hostname. - - We use a helper process to do the clone() to work around - clone() being broken in multi-threaded programs due to - at-fork handlers not being run. Note that we use - CLONE_PARENT to ensure that the real builder is parented to - us. - */ - - if (!fixedOutput) { - privateNetwork = true; - } - - userNamespaceSync.create(); - - Pid helper(startProcess( - [&]() { - /* Drop additional groups here because we can't do it - after we've created the new user namespace. FIXME: - this means that if we're not root in the parent - namespace, we can't drop additional groups; they will - be mapped to nogroup in the child namespace. There does - not seem to be a workaround for this. (But who can tell - from reading user_namespaces(7)?) - See also https://lwn.net/Articles/621612/. */ - if (getuid() == 0 && setgroups(0, nullptr) == -1) { - throw SysError("setgroups failed"); - } - - size_t stackSize = 1 * 1024 * 1024; - char* stack = static_cast( - mmap(nullptr, stackSize, PROT_WRITE | PROT_READ, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0)); - if (stack == MAP_FAILED) { - throw SysError("allocating stack"); - } - - int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS | - CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD; - if (privateNetwork) { - flags |= CLONE_NEWNET; - } - - pid_t child = clone(childEntry, stack + stackSize, flags, this); - if (child == -1 && errno == EINVAL) { - /* Fallback for Linux < 2.13 where CLONE_NEWPID and - CLONE_PARENT are not allowed together. */ - flags &= ~CLONE_NEWPID; - child = clone(childEntry, stack + stackSize, flags, this); - } - if (child == -1 && (errno == EPERM || errno == EINVAL)) { - /* Some distros patch Linux to not allow unpriveleged - * user namespaces. If we get EPERM or EINVAL, try - * without CLONE_NEWUSER and see if that works. - */ - flags &= ~CLONE_NEWUSER; - child = clone(childEntry, stack + stackSize, flags, this); - } - /* Otherwise exit with EPERM so we can handle this in the - parent. This is only done when sandbox-fallback is set - to true (the default). */ - if (child == -1 && (errno == EPERM || errno == EINVAL) && - settings.sandboxFallback) { - _exit(1); - } - if (child == -1) { - throw SysError("cloning builder process"); - } - - writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n"); - _exit(0); - }, - options)); - - int res = helper.wait(); - if (res != 0 && settings.sandboxFallback) { - useChroot = false; - initTmpDir(); - goto fallback; - } else if (res != 0) { - throw Error("unable to start build process"); - } - - userNamespaceSync.readSide = AutoCloseFD(-1); - - pid_t tmp; - if (!absl::SimpleAtoi(readLine(builderOut.readSide.get()), &tmp)) { - abort(); - } - pid = tmp; - - /* Set the UID/GID mapping of the builder's user namespace - such that the sandbox user maps to the build user, or to - the calling user (if build users are disabled). */ - uid_t hostUid = buildUser ? buildUser->getUID() : getuid(); - uid_t hostGid = buildUser ? buildUser->getGID() : getgid(); - - writeFile("/proc/" + std::to_string(static_cast(pid)) + "/uid_map", - (format("%d %d 1") % sandboxUid % hostUid).str()); - - writeFile("/proc/" + std::to_string(static_cast(pid)) + "/setgroups", - "deny"); - - writeFile("/proc/" + std::to_string(static_cast(pid)) + "/gid_map", - (format("%d %d 1") % sandboxGid % hostGid).str()); - - /* Signal the builder that we've updated its user - namespace. */ - writeFull(userNamespaceSync.writeSide.get(), "1"); - userNamespaceSync.writeSide = AutoCloseFD(-1); - - } else -#endif - { - fallback: - pid = startProcess([&]() { runChild(); }, options); - } - - /* parent */ - pid.setSeparatePG(true); - builderOut.writeSide = AutoCloseFD(-1); - worker.childStarted(shared_from_this(), {builderOut.readSide.get()}, true, - true); - - /* Check if setting up the build environment failed. */ - while (true) { - std::string msg = readLine(builderOut.readSide.get()); - if (std::string(msg, 0, 1) == "\1") { - if (msg.size() == 1) { - break; - } - throw Error(std::string(msg, 1)); - } - DLOG(INFO) << msg; - } -} - -void DerivationGoal::initTmpDir() { - /* In a sandbox, for determinism, always use the same temporary - directory. */ -#if __linux__ - tmpDirInSandbox = useChroot ? settings.sandboxBuildDir : tmpDir; -#else - tmpDirInSandbox = tmpDir; -#endif - - /* In non-structured mode, add all bindings specified in the - derivation via the environment, except those listed in the - passAsFile attribute. Those are passed as file names pointing - to temporary files containing the contents. Note that - passAsFile is ignored in structure mode because it's not - needed (attributes are not passed through the environment, so - there is no size constraint). */ - if (!parsedDrv->getStructuredAttrs()) { - std::set passAsFile = - absl::StrSplit(get(drv->env, "passAsFile"), absl::ByAnyChar(" \t\n\r"), - absl::SkipEmpty()); - for (auto& i : drv->env) { - if (passAsFile.find(i.first) == passAsFile.end()) { - env[i.first] = i.second; - } else { - auto hash = hashString(htSHA256, i.first); - std::string fn = ".attr-" + hash.to_string(Base32, false); - Path p = tmpDir + "/" + fn; - writeFile(p, rewriteStrings(i.second, inputRewrites)); - chownToBuilder(p); - env[i.first + "Path"] = tmpDirInSandbox + "/" + fn; - } - } - } - - /* For convenience, set an environment pointing to the top build - directory. */ - env["NIX_BUILD_TOP"] = tmpDirInSandbox; - - /* Also set TMPDIR and variants to point to this directory. */ - env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDirInSandbox; - - /* Explicitly set PWD to prevent problems with chroot builds. In - particular, dietlibc cannot figure out the cwd because the - inode of the current directory doesn't appear in .. (because - getdents returns the inode of the mount point). */ - env["PWD"] = tmpDirInSandbox; -} - -void DerivationGoal::initEnv() { - env.clear(); - - /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when - PATH is not set. We don't want this, so we fill it in with some dummy - value. */ - env["PATH"] = "/path-not-set"; - - /* Set HOME to a non-existing path to prevent certain programs from using - /etc/passwd (or NIS, or whatever) to locate the home directory (for - example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd - if HOME is not set, but they will just assume that the settings file - they are looking for does not exist if HOME is set but points to some - non-existing path. */ - env["HOME"] = homeDir; - - /* Tell the builder where the Nix store is. Usually they - shouldn't care, but this is useful for purity checking (e.g., - the compiler or linker might only want to accept paths to files - in the store or in the build directory). */ - env["NIX_STORE"] = worker.store.storeDir; - - /* The maximum number of cores to utilize for parallel building. */ - env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str(); - - initTmpDir(); - - /* Compatibility hack with Nix <= 0.7: if this is a fixed-output - derivation, tell the builder, so that for instance `fetchurl' - can skip checking the output. On older Nixes, this environment - variable won't be set, so `fetchurl' will do the check. */ - if (fixedOutput) { - env["NIX_OUTPUT_CHECKED"] = "1"; - } - - /* *Only* if this is a fixed-output derivation, propagate the - values of the environment variables specified in the - `impureEnvVars' attribute to the builder. This allows for - instance environment variables for proxy configuration such as - `http_proxy' to be easily passed to downloaders like - `fetchurl'. Passing such environment variables from the caller - to the builder is generally impure, but the output of - fixed-output derivations is by definition pure (since we - already know the cryptographic hash of the output). */ - if (fixedOutput) { - for (auto& i : - parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings())) { - env[i] = getEnv(i).value_or(""); - } - } - - /* Currently structured log messages piggyback on stderr, but we - may change that in the future. So tell the builder which file - descriptor to use for that. */ - env["NIX_LOG_FD"] = "2"; - - /* Trigger colored output in various tools. */ - env["TERM"] = "xterm-256color"; -} - -static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); - -void DerivationGoal::writeStructuredAttrs() { - auto& structuredAttrs = parsedDrv->getStructuredAttrs(); - if (!structuredAttrs) { - return; - } - - auto json = *structuredAttrs; - - /* Add an "outputs" object containing the output paths. */ - nlohmann::json outputs; - for (auto& i : drv->outputs) { - outputs[i.first] = rewriteStrings(i.second.path, inputRewrites); - } - json["outputs"] = outputs; - - /* Handle exportReferencesGraph. */ - auto e = json.find("exportReferencesGraph"); - if (e != json.end() && e->is_object()) { - for (auto i = e->begin(); i != e->end(); ++i) { - std::ostringstream str; - { - JSONPlaceholder jsonRoot(str, true); - PathSet storePaths; - for (auto& p : *i) { - storePaths.insert(p.get()); - } - worker.store.pathInfoToJSON(jsonRoot, exportReferences(storePaths), - false, true); - } - json[i.key()] = nlohmann::json::parse(str.str()); // urgh - } - } - - writeFile(tmpDir + "/.attrs.json", - rewriteStrings(json.dump(), inputRewrites)); - chownToBuilder(tmpDir + "/.attrs.json"); - - /* As a convenience to bash scripts, write a shell file that - maps all attributes that are representable in bash - - namely, strings, integers, nulls, Booleans, and arrays and - objects consisting entirely of those values. (So nested - arrays or objects are not supported.) */ - - auto handleSimpleType = - [](const nlohmann::json& value) -> std::optional { - if (value.is_string()) { - return shellEscape(value); - } - - if (value.is_number()) { - auto f = value.get(); - if (std::ceil(f) == f) { - return std::to_string(value.get()); - } - } - - if (value.is_null()) { - return std::string("''"); - } - - if (value.is_boolean()) { - return value.get() ? std::string("1") : std::string(""); - } - - return {}; - }; - - std::string jsonSh; - - for (auto i = json.begin(); i != json.end(); ++i) { - if (!std::regex_match(i.key(), shVarName)) { - continue; - } - - auto& value = i.value(); - - auto s = handleSimpleType(value); - if (s) { - jsonSh += fmt("declare %s=%s\n", i.key(), *s); - - } else if (value.is_array()) { - std::string s2; - bool good = true; - - for (auto i = value.begin(); i != value.end(); ++i) { - auto s3 = handleSimpleType(i.value()); - if (!s3) { - good = false; - break; - } - s2 += *s3; - s2 += ' '; - } - - if (good) { - jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2); - } - } - - else if (value.is_object()) { - std::string s2; - bool good = true; - - for (auto i = value.begin(); i != value.end(); ++i) { - auto s3 = handleSimpleType(i.value()); - if (!s3) { - good = false; - break; - } - s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3); - } - - if (good) { - jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2); - } - } - } - - writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); - chownToBuilder(tmpDir + "/.attrs.sh"); -} - -void DerivationGoal::chownToBuilder(const Path& path) { - if (!buildUser) { - return; - } - if (chown(path.c_str(), buildUser->getUID(), buildUser->getGID()) == -1) { - throw SysError(format("cannot change ownership of '%1%'") % path); - } -} - -void setupSeccomp() { -#if __linux__ - if (!settings.filterSyscalls) { - return; - } -#if HAVE_SECCOMP - scmp_filter_ctx ctx; - - if ((ctx = seccomp_init(SCMP_ACT_ALLOW)) == nullptr) { - throw SysError("unable to initialize seccomp mode 2"); - } - - Finally cleanup([&]() { seccomp_release(ctx); }); - - if (nativeSystem == "x86_64-linux" && - seccomp_arch_add(ctx, SCMP_ARCH_X86) != 0) { - throw SysError("unable to add 32-bit seccomp architecture"); - } - - if (nativeSystem == "x86_64-linux" && - seccomp_arch_add(ctx, SCMP_ARCH_X32) != 0) { - throw SysError("unable to add X32 seccomp architecture"); - } - - if (nativeSystem == "aarch64-linux" && - seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0) { - LOG(ERROR) << "unable to add ARM seccomp architecture; this may result in " - << "spurious build failures if running 32-bit ARM processes"; - } - - /* Prevent builders from creating setuid/setgid binaries. */ - for (int perm : {S_ISUID, S_ISGID}) { - if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1, - SCMP_A1(SCMP_CMP_MASKED_EQ, (scmp_datum_t)perm, - (scmp_datum_t)perm)) != 0) { - throw SysError("unable to add seccomp rule"); - } - - if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(fchmod), 1, - SCMP_A1(SCMP_CMP_MASKED_EQ, (scmp_datum_t)perm, - (scmp_datum_t)perm)) != 0) { - throw SysError("unable to add seccomp rule"); - } - - if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(fchmodat), 1, - SCMP_A2(SCMP_CMP_MASKED_EQ, (scmp_datum_t)perm, - (scmp_datum_t)perm)) != 0) { - throw SysError("unable to add seccomp rule"); - } - } - - /* Prevent builders from creating EAs or ACLs. Not all filesystems - support these, and they're not allowed in the Nix store because - they're not representable in the NAR serialisation. */ - if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(setxattr), 0) != - 0 || - seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(lsetxattr), 0) != - 0 || - seccomp_rule_add(ctx, SCMP_ACT_ERRNO(ENOTSUP), SCMP_SYS(fsetxattr), 0) != - 0) { - throw SysError("unable to add seccomp rule"); - } - - if (seccomp_attr_set(ctx, SCMP_FLTATR_CTL_NNP, - settings.allowNewPrivileges ? 0 : 1) != 0) { - throw SysError("unable to set 'no new privileges' seccomp attribute"); - } - - if (seccomp_load(ctx) != 0) { - throw SysError("unable to load seccomp BPF program"); - } -#else - throw Error( - "seccomp is not supported on this platform; " - "you can bypass this error by setting the option 'filter-syscalls' to " - "false, but note that untrusted builds can then create setuid binaries!"); -#endif -#endif -} - -void DerivationGoal::runChild() { - /* Warning: in the child we should absolutely not make any SQLite - calls! */ - - try { /* child */ - - commonChildInit(builderOut); - - try { - setupSeccomp(); - } catch (...) { - if (buildUser) { - throw; - } - } - - bool setUser = true; - - /* Make the contents of netrc available to builtin:fetchurl - (which may run under a different uid and/or in a sandbox). */ - std::string netrcData; - try { - if (drv->isBuiltin() && drv->builder == "builtin:fetchurl") { - const std::string& netrc_file = settings.netrcFile; - netrcData = readFile(netrc_file); - } - } catch (SysError&) { - } - -#if __linux__ - if (useChroot) { - userNamespaceSync.writeSide = AutoCloseFD(-1); - - if (drainFD(userNamespaceSync.readSide.get()) != "1") { - throw Error("user namespace initialisation failed"); - } - - userNamespaceSync.readSide = AutoCloseFD(-1); - - if (privateNetwork) { - /* Initialise the loopback interface. */ - AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP)); - if (!fd) { - throw SysError("cannot open IP socket"); - } - - struct ifreq ifr; - strncpy(ifr.ifr_name, "lo", sizeof("lo")); - ifr.ifr_flags = IFF_UP | IFF_LOOPBACK | IFF_RUNNING; - if (ioctl(fd.get(), SIOCSIFFLAGS, &ifr) == -1) { - throw SysError("cannot set loopback interface flags"); - } - } - - /* Set the hostname etc. to fixed values. */ - char hostname[] = "localhost"; - if (sethostname(hostname, sizeof(hostname)) == -1) { - throw SysError("cannot set host name"); - } - char domainname[] = "(none)"; // kernel default - if (setdomainname(domainname, sizeof(domainname)) == -1) { - throw SysError("cannot set domain name"); - } - - /* Make all filesystems private. This is necessary - because subtrees may have been mounted as "shared" - (MS_SHARED). (Systemd does this, for instance.) Even - though we have a private mount namespace, mounting - filesystems on top of a shared subtree still propagates - outside of the namespace. Making a subtree private is - local to the namespace, though, so setting MS_PRIVATE - does not affect the outside world. */ - if (mount(nullptr, "/", nullptr, MS_REC | MS_PRIVATE, nullptr) == -1) { - throw SysError("unable to make '/' private mount"); - } - - /* Bind-mount chroot directory to itself, to treat it as a - different filesystem from /, as needed for pivot_root. */ - if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), nullptr, MS_BIND, - nullptr) == -1) { - throw SysError(format("unable to bind mount '%1%'") % chrootRootDir); - } - - /* Set up a nearly empty /dev, unless the user asked to - bind-mount the host /dev. */ - Strings ss; - if (dirsInChroot.find("/dev") == dirsInChroot.end()) { - createDirs(chrootRootDir + "/dev/shm"); - createDirs(chrootRootDir + "/dev/pts"); - ss.push_back("/dev/full"); - if ((settings.systemFeatures.get().count("kvm") != 0u) && - pathExists("/dev/kvm")) { - ss.push_back("/dev/kvm"); - } - ss.push_back("/dev/null"); - ss.push_back("/dev/random"); - ss.push_back("/dev/tty"); - ss.push_back("/dev/urandom"); - ss.push_back("/dev/zero"); - createSymlink("/proc/self/fd", chrootRootDir + "/dev/fd"); - createSymlink("/proc/self/fd/0", chrootRootDir + "/dev/stdin"); - createSymlink("/proc/self/fd/1", chrootRootDir + "/dev/stdout"); - createSymlink("/proc/self/fd/2", chrootRootDir + "/dev/stderr"); - } - - /* Fixed-output derivations typically need to access the - network, so give them access to /etc/resolv.conf and so - on. */ - if (fixedOutput) { - ss.push_back("/etc/resolv.conf"); - - // Only use nss functions to resolve hosts and - // services. Don’t use it for anything else that may - // be configured for this system. This limits the - // potential impurities introduced in fixed outputs. - writeFile(chrootRootDir + "/etc/nsswitch.conf", - "hosts: files dns\nservices: files\n"); - - ss.push_back("/etc/services"); - ss.push_back("/etc/hosts"); - if (pathExists("/var/run/nscd/socket")) { - ss.push_back("/var/run/nscd/socket"); - } - } - - for (auto& i : ss) { - dirsInChroot.emplace(i, i); - } - - /* Bind-mount all the directories from the "host" - filesystem that we want in the chroot - environment. */ - auto doBind = [&](const Path& source, const Path& target, - bool optional = false) { - DLOG(INFO) << "bind mounting '" << source << "' to '" << target << "'"; - struct stat st; - if (stat(source.c_str(), &st) == -1) { - if (optional && errno == ENOENT) { - return; - } - throw SysError("getting attributes of path '%1%'", source); - } - if (S_ISDIR(st.st_mode)) { - createDirs(target); - } else { - createDirs(dirOf(target)); - writeFile(target, ""); - } - if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC, - nullptr) == -1) { - throw SysError("bind mount from '%1%' to '%2%' failed", source, - target); - } - }; - - for (auto& i : dirsInChroot) { - if (i.second.source == "/proc") { - continue; - } // backwards compatibility - doBind(i.second.source, chrootRootDir + i.first, i.second.optional); - } - - /* Bind a new instance of procfs on /proc. */ - createDirs(chrootRootDir + "/proc"); - if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, - nullptr) == -1) { - throw SysError("mounting /proc"); - } - - /* Mount a new tmpfs on /dev/shm to ensure that whatever - the builder puts in /dev/shm is cleaned up automatically. */ - if (pathExists("/dev/shm") && - mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0, - fmt("size=%s", settings.sandboxShmSize).c_str()) == -1) { - throw SysError("mounting /dev/shm"); - } - - /* Mount a new devpts on /dev/pts. Note that this - requires the kernel to be compiled with - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case - if /dev/ptx/ptmx exists). */ - if (pathExists("/dev/pts/ptmx") && - !pathExists(chrootRootDir + "/dev/ptmx") && - (dirsInChroot.count("/dev/pts") == 0u)) { - if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, - "newinstance,mode=0620") == 0) { - createSymlink("/dev/pts/ptmx", chrootRootDir + "/dev/ptmx"); - - /* Make sure /dev/pts/ptmx is world-writable. With some - Linux versions, it is created with permissions 0. */ - chmod_(chrootRootDir + "/dev/pts/ptmx", 0666); - } else { - if (errno != EINVAL) { - throw SysError("mounting /dev/pts"); - } - doBind("/dev/pts", chrootRootDir + "/dev/pts"); - doBind("/dev/ptmx", chrootRootDir + "/dev/ptmx"); - } - } - - /* Do the chroot(). */ - if (chdir(chrootRootDir.c_str()) == -1) { - throw SysError(format("cannot change directory to '%1%'") % - chrootRootDir); - } - - if (mkdir("real-root", 0) == -1) { - throw SysError("cannot create real-root directory"); - } - - if (pivot_root(".", "real-root") == -1) { - throw SysError(format("cannot pivot old root directory onto '%1%'") % - (chrootRootDir + "/real-root")); - } - - if (chroot(".") == -1) { - throw SysError(format("cannot change root directory to '%1%'") % - chrootRootDir); - } - - if (umount2("real-root", MNT_DETACH) == -1) { - throw SysError("cannot unmount real root filesystem"); - } - - if (rmdir("real-root") == -1) { - throw SysError("cannot remove real-root directory"); - } - - /* Switch to the sandbox uid/gid in the user namespace, - which corresponds to the build user or calling user in - the parent namespace. */ - if (setgid(sandboxGid) == -1) { - throw SysError("setgid failed"); - } - if (setuid(sandboxUid) == -1) { - throw SysError("setuid failed"); - } - - setUser = false; - } -#endif - - if (chdir(tmpDirInSandbox.c_str()) == -1) { - throw SysError(format("changing into '%1%'") % tmpDir); - } - - /* Close all other file descriptors. */ - closeMostFDs({STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}); - -#if __linux__ - /* Change the personality to 32-bit if we're doing an - i686-linux build on an x86_64-linux machine. */ - struct utsname utsbuf; - uname(&utsbuf); - if (drv->platform == "i686-linux" && - (settings.thisSystem == "x86_64-linux" || - ((strcmp(utsbuf.sysname, "Linux") == 0) && - (strcmp(utsbuf.machine, "x86_64") == 0)))) { - if (personality(PER_LINUX32) == -1) { - throw SysError("cannot set i686-linux personality"); - } - } - - /* Impersonate a Linux 2.6 machine to get some determinism in - builds that depend on the kernel version. */ - if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && - settings.impersonateLinux26) { - int cur = personality(0xffffffff); - if (cur != -1) { - personality(cur | 0x0020000 /* == UNAME26 */); - } - } - - /* Disable address space randomization for improved - determinism. */ - int cur = personality(0xffffffff); - if (cur != -1) { - personality(cur | ADDR_NO_RANDOMIZE); - } -#endif - - /* Disable core dumps by default. */ - struct rlimit limit = {0, RLIM_INFINITY}; - setrlimit(RLIMIT_CORE, &limit); - - // FIXME: set other limits to deterministic values? - - /* Fill in the environment. */ - Strings envStrs; - for (auto& i : env) { - envStrs.push_back( - rewriteStrings(i.first + "=" + i.second, inputRewrites)); - } - - /* If we are running in `build-users' mode, then switch to the - user we allocated above. Make sure that we drop all root - privileges. Note that above we have closed all file - descriptors except std*, so that's safe. Also note that - setuid() when run as root sets the real, effective and - saved UIDs. */ - if (setUser && buildUser) { - /* Preserve supplementary groups of the build user, to allow - admins to specify groups such as "kvm". */ - if (!buildUser->getSupplementaryGIDs().empty() && - setgroups(buildUser->getSupplementaryGIDs().size(), - buildUser->getSupplementaryGIDs().data()) == -1) { - throw SysError("cannot set supplementary groups of build user"); - } - - if (setgid(buildUser->getGID()) == -1 || - getgid() != buildUser->getGID() || getegid() != buildUser->getGID()) { - throw SysError("setgid failed"); - } - - if (setuid(buildUser->getUID()) == -1 || - getuid() != buildUser->getUID() || geteuid() != buildUser->getUID()) { - throw SysError("setuid failed"); - } - } - - /* Fill in the arguments. */ - Strings args; - - const char* builder = "invalid"; - - if (!drv->isBuiltin()) { - builder = drv->builder.c_str(); - std::string builderBasename = baseNameOf(drv->builder); - args.push_back(builderBasename); - } - - for (auto& i : drv->args) { - args.push_back(rewriteStrings(i, inputRewrites)); - } - - /* Indicate that we managed to set up the build environment. */ - writeFull(STDERR_FILENO, std::string("\1\n")); - - /* Execute the program. This should not return. */ - if (drv->isBuiltin()) { - try { - BasicDerivation drv2(*drv); - for (auto& e : drv2.env) { - e.second = rewriteStrings(e.second, inputRewrites); - } - - if (drv->builder == "builtin:fetchurl") { - builtinFetchurl(drv2, netrcData); - } else if (drv->builder == "builtin:buildenv") { - builtinBuildenv(drv2); - } else { - throw Error(format("unsupported builtin function '%1%'") % - std::string(drv->builder, 8)); - } - _exit(0); - } catch (std::exception& e) { - writeFull(STDERR_FILENO, "error: " + std::string(e.what()) + "\n"); - _exit(1); - } - } - - execve(builder, stringsToCharPtrs(args).data(), - stringsToCharPtrs(envStrs).data()); - - throw SysError(format("executing '%1%'") % drv->builder); - - } catch (std::exception& e) { - writeFull(STDERR_FILENO, "\1while setting up the build environment: " + - std::string(e.what()) + "\n"); - _exit(1); - } -} - -/* Parse a list of reference specifiers. Each element must either be - a store path, or the symbolic name of the output of the derivation - (such as `out'). */ -PathSet parseReferenceSpecifiers(Store& store, const BasicDerivation& drv, - const Strings& paths) { - PathSet result; - for (auto& i : paths) { - if (store.isStorePath(i)) { - result.insert(i); - } else if (drv.outputs.find(i) != drv.outputs.end()) { - result.insert(drv.outputs.find(i)->second.path); - } else { - throw BuildError( - format("derivation contains an illegal reference specifier '%1%'") % - i); - } - } - return result; -} - -void DerivationGoal::registerOutputs() { - /* When using a build hook, the build hook can register the output - as valid (by doing `nix-store --import'). If so we don't have - to do anything here. */ - if (hook) { - bool allValid = true; - for (auto& i : drv->outputs) { - if (!worker.store.isValidPath(i.second.path)) { - allValid = false; - } - } - if (allValid) { - return; - } - } - - std::map infos; - - /* Set of inodes seen during calls to canonicalisePathMetaData() - for this build's outputs. This needs to be shared between - outputs to allow hard links between outputs. */ - InodesSeen inodesSeen; - - Path checkSuffix = ".check"; - bool keepPreviousRound = settings.keepFailed || settings.runDiffHook; - - std::exception_ptr delayedException; - - /* Check whether the output paths were created, and grep each - output path to determine what other paths it references. Also make all - output paths read-only. */ - for (auto& i : drv->outputs) { - Path path = i.second.path; - if (missingPaths.find(path) == missingPaths.end()) { - continue; - } - - ValidPathInfo info; - - Path actualPath = path; - if (useChroot) { - actualPath = chrootRootDir + path; - if (pathExists(actualPath)) { - /* Move output paths from the chroot to the Nix store. */ - if (buildMode == bmRepair) { - replaceValidPath(path, actualPath); - } else if (buildMode != bmCheck && - rename(actualPath.c_str(), - worker.store.toRealPath(path).c_str()) == -1) { - throw SysError(format("moving build output '%1%' from the sandbox to " - "the Nix store") % - path); - } - } - if (buildMode != bmCheck) { - actualPath = worker.store.toRealPath(path); - } - } - - if (needsHashRewrite()) { - Path redirected = redirectedOutputs[path]; - if (buildMode == bmRepair && - redirectedBadOutputs.find(path) != redirectedBadOutputs.end() && - pathExists(redirected)) { - replaceValidPath(path, redirected); - } - if (buildMode == bmCheck && !redirected.empty()) { - actualPath = redirected; - } - } - - struct stat st; - if (lstat(actualPath.c_str(), &st) == -1) { - if (errno == ENOENT) { - throw BuildError( - format("builder for '%1%' failed to produce output path '%2%'") % - drvPath % path); - } - throw SysError(format("getting attributes of path '%1%'") % actualPath); - } - -#ifndef __CYGWIN__ - /* Check that the output is not group or world writable, as - that means that someone else can have interfered with the - build. Also, the output should be owned by the build - user. */ - if ((!S_ISLNK(st.st_mode) && ((st.st_mode & (S_IWGRP | S_IWOTH)) != 0u)) || - (buildUser && st.st_uid != buildUser->getUID())) { - throw BuildError(format("suspicious ownership or permission on '%1%'; " - "rejecting this build output") % - path); - } -#endif - - /* Apply hash rewriting if necessary. */ - bool rewritten = false; - if (!outputRewrites.empty()) { - LOG(WARNING) << "rewriting hashes in '" << path << "'; cross fingers"; - - /* Canonicalise first. This ensures that the path we're - rewriting doesn't contain a hard link to /etc/shadow or - something like that. */ - canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, - inodesSeen); - - /* FIXME: this is in-memory. */ - StringSink sink; - dumpPath(actualPath, sink); - deletePath(actualPath); - sink.s = make_ref(rewriteStrings(*sink.s, outputRewrites)); - StringSource source(*sink.s); - restorePath(actualPath, source); - - rewritten = true; - } - - /* Check that fixed-output derivations produced the right - outputs (i.e., the content hash should match the specified - hash). */ - if (fixedOutput) { - bool recursive; - Hash h; - i.second.parseHashInfo(recursive, h); - - if (!recursive) { - /* The output path should be a regular file without - execute permission. */ - if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0) { - throw BuildError( - format( - "output path '%1%' should be a non-executable regular file") % - path); - } - } - - /* Check the hash. In hash mode, move the path produced by - the derivation to its content-addressed location. */ - Hash h2 = recursive ? hashPath(h.type, actualPath).first - : hashFile(h.type, actualPath); - - Path dest = worker.store.makeFixedOutputPath(recursive, h2, - storePathToName(path)); - - if (h != h2) { - /* Throw an error after registering the path as - valid. */ - worker.hashMismatch = true; - delayedException = std::make_exception_ptr( - BuildError("hash mismatch in fixed-output derivation '%s':\n " - "wanted: %s\n got: %s", - dest, h.to_string(), h2.to_string())); - - Path actualDest = worker.store.toRealPath(dest); - - if (worker.store.isValidPath(dest)) { - std::rethrow_exception(delayedException); - } - - if (actualPath != actualDest) { - PathLocks outputLocks({actualDest}); - deletePath(actualDest); - if (rename(actualPath.c_str(), actualDest.c_str()) == -1) { - throw SysError(format("moving '%1%' to '%2%'") % actualPath % dest); - } - } - - path = dest; - actualPath = actualDest; - } else { - assert(path == dest); - } - - info.ca = makeFixedOutputCA(recursive, h2); - } - - /* Get rid of all weird permissions. This also checks that - all files are owned by the build user, if applicable. */ - canonicalisePathMetaData(actualPath, - buildUser && !rewritten ? buildUser->getUID() : -1, - inodesSeen); - - /* For this output path, find the references to other paths - contained in it. Compute the SHA-256 NAR hash at the same - time. The hash is stored in the database so that we can - verify later on whether nobody has messed with the store. */ - DLOG(INFO) << "scanning for references inside '" << path << "'"; - HashResult hash; - PathSet references = scanForReferences(actualPath, allPaths, hash); - - if (buildMode == bmCheck) { - if (!worker.store.isValidPath(path)) { - continue; - } - auto info = *worker.store.queryPathInfo(path); - if (hash.first != info.narHash) { - worker.checkMismatch = true; - if (settings.runDiffHook || settings.keepFailed) { - Path dst = worker.store.toRealPath(path + checkSuffix); - deletePath(dst); - if (rename(actualPath.c_str(), dst.c_str()) != 0) { - throw SysError(format("renaming '%1%' to '%2%'") % actualPath % - dst); - } - - handleDiffHook(buildUser ? buildUser->getUID() : getuid(), - buildUser ? buildUser->getGID() : getgid(), path, dst, - drvPath, tmpDir, log_sink()); - - throw NotDeterministic( - format("derivation '%1%' may not be deterministic: output '%2%' " - "differs from '%3%'") % - drvPath % path % dst); - } - throw NotDeterministic(format("derivation '%1%' may not be " - "deterministic: output '%2%' differs") % - drvPath % path); - } - - /* Since we verified the build, it's now ultimately - trusted. */ - if (!info.ultimate) { - info.ultimate = true; - worker.store.signPathInfo(info); - worker.store.registerValidPaths({info}); - } - - continue; - } - - /* For debugging, print out the referenced and unreferenced - paths. */ - for (auto& i : inputPaths) { - auto j = references.find(i); - if (j == references.end()) { - DLOG(INFO) << "unreferenced input: '" << i << "'"; - } else { - DLOG(INFO) << "referenced input: '" << i << "'"; - } - } - - if (curRound == nrRounds) { - worker.store.optimisePath( - actualPath); // FIXME: combine with scanForReferences() - worker.markContentsGood(path); - } - - info.path = path; - info.narHash = hash.first; - info.narSize = hash.second; - info.references = references; - info.deriver = drvPath; - info.ultimate = true; - worker.store.signPathInfo(info); - - if (!info.references.empty()) { - info.ca.clear(); - } - - infos[i.first] = info; - } - - if (buildMode == bmCheck) { - return; - } - - /* Apply output checks. */ - checkOutputs(infos); - - /* Compare the result with the previous round, and report which - path is different, if any.*/ - if (curRound > 1 && prevInfos != infos) { - assert(prevInfos.size() == infos.size()); - for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); - ++i, ++j) { - if (!(*i == *j)) { - result.isNonDeterministic = true; - Path prev = i->second.path + checkSuffix; - bool prevExists = keepPreviousRound && pathExists(prev); - auto msg = - prevExists - ? fmt("output '%1%' of '%2%' differs from '%3%' from previous " - "round", - i->second.path, drvPath, prev) - : fmt("output '%1%' of '%2%' differs from previous round", - i->second.path, drvPath); - - handleDiffHook(buildUser ? buildUser->getUID() : getuid(), - buildUser ? buildUser->getGID() : getgid(), prev, - i->second.path, drvPath, tmpDir, log_sink()); - - if (settings.enforceDeterminism) { - throw NotDeterministic(msg); - } - - log_sink() << msg << std::endl; - curRound = nrRounds; // we know enough, bail out early - } - } - } - - /* If this is the first round of several, then move the output out - of the way. */ - if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && - keepPreviousRound) { - for (auto& i : drv->outputs) { - Path prev = i.second.path + checkSuffix; - deletePath(prev); - Path dst = i.second.path + checkSuffix; - if (rename(i.second.path.c_str(), dst.c_str()) != 0) { - throw SysError(format("renaming '%1%' to '%2%'") % i.second.path % dst); - } - } - } - - if (curRound < nrRounds) { - prevInfos = infos; - return; - } - - /* Remove the .check directories if we're done. FIXME: keep them - if the result was not determistic? */ - if (curRound == nrRounds) { - for (auto& i : drv->outputs) { - Path prev = i.second.path + checkSuffix; - deletePath(prev); - } - } - - /* Register each output path as valid, and register the sets of - paths referenced by each of them. If there are cycles in the - outputs, this will fail. */ - { - ValidPathInfos infos2; - for (auto& i : infos) { - infos2.push_back(i.second); - } - worker.store.registerValidPaths(infos2); - } - - /* In case of a fixed-output derivation hash mismatch, throw an - exception now that we have registered the output as valid. */ - if (delayedException) { - std::rethrow_exception(delayedException); - } -} - -void DerivationGoal::checkOutputs( - const std::map& outputs) { - std::map outputsByPath; - for (auto& output : outputs) { - outputsByPath.emplace(output.second.path, output.second); - } - - for (auto& output : outputs) { - auto& outputName = output.first; - auto& info = output.second; - - struct Checks { - bool ignoreSelfRefs = false; - std::optional maxSize, maxClosureSize; - std::optional allowedReferences, allowedRequisites, - disallowedReferences, disallowedRequisites; - }; - - /* Compute the closure and closure size of some output. This - is slightly tricky because some of its references (namely - other outputs) may not be valid yet. */ - auto getClosure = [&](const Path& path) { - uint64_t closureSize = 0; - PathSet pathsDone; - std::queue pathsLeft; - pathsLeft.push(path); - - while (!pathsLeft.empty()) { - auto path = pathsLeft.front(); - pathsLeft.pop(); - if (!pathsDone.insert(path).second) { - continue; - } - - auto i = outputsByPath.find(path); - if (i != outputsByPath.end()) { - closureSize += i->second.narSize; - for (auto& ref : i->second.references) { - pathsLeft.push(ref); - } - } else { - auto info = worker.store.queryPathInfo(path); - closureSize += info->narSize; - for (auto& ref : info->references) { - pathsLeft.push(ref); - } - } - } - - return std::make_pair(pathsDone, closureSize); - }; - - auto applyChecks = [&](const Checks& checks) { - if (checks.maxSize && info.narSize > *checks.maxSize) { - throw BuildError( - "path '%s' is too large at %d bytes; limit is %d bytes", info.path, - info.narSize, *checks.maxSize); - } - - if (checks.maxClosureSize) { - uint64_t closureSize = getClosure(info.path).second; - if (closureSize > *checks.maxClosureSize) { - throw BuildError( - "closure of path '%s' is too large at %d bytes; limit is %d " - "bytes", - info.path, closureSize, *checks.maxClosureSize); - } - } - - auto checkRefs = [&](const std::optional& value, bool allowed, - bool recursive) { - if (!value) { - return; - } - - PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value); - - PathSet used = - recursive ? getClosure(info.path).first : info.references; - - if (recursive && checks.ignoreSelfRefs) { - used.erase(info.path); - } - - PathSet badPaths; - - for (auto& i : used) { - if (allowed) { - if (spec.count(i) == 0u) { - badPaths.insert(i); - } - } else { - if (spec.count(i) != 0u) { - badPaths.insert(i); - } - } - } - - if (!badPaths.empty()) { - std::string badPathsStr; - for (auto& i : badPaths) { - badPathsStr += "\n "; - badPathsStr += i; - } - throw BuildError( - "output '%s' is not allowed to refer to the following paths:%s", - info.path, badPathsStr); - } - }; - - checkRefs(checks.allowedReferences, true, false); - checkRefs(checks.allowedRequisites, true, true); - checkRefs(checks.disallowedReferences, false, false); - checkRefs(checks.disallowedRequisites, false, true); - }; - - if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) { - auto outputChecks = structuredAttrs->find("outputChecks"); - if (outputChecks != structuredAttrs->end()) { - auto output = outputChecks->find(outputName); - - if (output != outputChecks->end()) { - Checks checks; - - auto maxSize = output->find("maxSize"); - if (maxSize != output->end()) { - checks.maxSize = maxSize->get(); - } - - auto maxClosureSize = output->find("maxClosureSize"); - if (maxClosureSize != output->end()) { - checks.maxClosureSize = maxClosureSize->get(); - } - - auto get = [&](const std::string& name) -> std::optional { - auto i = output->find(name); - if (i != output->end()) { - Strings res; - for (auto& j : *i) { - if (!j.is_string()) { - throw Error( - "attribute '%s' of derivation '%s' must be a list of " - "strings", - name, drvPath); - } - res.push_back(j.get()); - } - checks.disallowedRequisites = res; - return res; - } - return {}; - }; - - checks.allowedReferences = get("allowedReferences"); - checks.allowedRequisites = get("allowedRequisites"); - checks.disallowedReferences = get("disallowedReferences"); - checks.disallowedRequisites = get("disallowedRequisites"); - - applyChecks(checks); - } - } - } else { - // legacy non-structured-attributes case - Checks checks; - checks.ignoreSelfRefs = true; - checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences"); - checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites"); - checks.disallowedReferences = - parsedDrv->getStringsAttr("disallowedReferences"); - checks.disallowedRequisites = - parsedDrv->getStringsAttr("disallowedRequisites"); - applyChecks(checks); - } - } -} - -Path DerivationGoal::openLogFile() { - logSize = 0; - - if (!settings.keepLog) { - return ""; - } - - std::string baseName = baseNameOf(drvPath); - - /* Create a log file. */ - Path dir = fmt("%s/%s/%s/", worker.store.logDir, nix::LocalStore::drvsLogDir, - std::string(baseName, 0, 2)); - createDirs(dir); - - Path logFileName = fmt("%s/%s%s", dir, std::string(baseName, 2), - settings.compressLog ? ".bz2" : ""); - - fdLogFile = AutoCloseFD(open(logFileName.c_str(), - O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666)); - if (!fdLogFile) { - throw SysError(format("creating log file '%1%'") % logFileName); - } - - logFileSink = std::make_shared(fdLogFile.get()); - - if (settings.compressLog) { - logSink = std::shared_ptr( - makeCompressionSink("bzip2", *logFileSink)); - } else { - logSink = logFileSink; - } - - return logFileName; -} - -void DerivationGoal::closeLogFile() { - auto logSink2 = std::dynamic_pointer_cast(logSink); - if (logSink2) { - logSink2->finish(); - } - if (logFileSink) { - logFileSink->flush(); - } - logSink = logFileSink = nullptr; - fdLogFile = AutoCloseFD(-1); -} - -void DerivationGoal::deleteTmpDir(bool force) { - if (!tmpDir.empty()) { - /* Don't keep temporary directories for builtins because they - might have privileged stuff (like a copy of netrc). */ - if (settings.keepFailed && !force && !drv->isBuiltin()) { - log_sink() << "note: keeping build directory '" << tmpDir << "'" - << std::endl; - chmod(tmpDir.c_str(), 0755); - } else { - deletePath(tmpDir); - } - tmpDir = ""; - } -} - -// TODO(tazjin): What ... what does this function ... do? -void DerivationGoal::handleChildOutput(int fd, const std::string& data) { - if ((hook && fd == hook->builderOut.readSide.get()) || - (!hook && fd == builderOut.readSide.get())) { - logSize += data.size(); - if (settings.maxLogSize && logSize > settings.maxLogSize) { - log_sink() << getName() << " killed after writing more than " - << settings.maxLogSize << " bytes of log output" << std::endl; - killChild(); - done(BuildResult::LogLimitExceeded); - return; - } - - for (auto c : data) { - if (c == '\r') { - currentLogLinePos = 0; - } else if (c == '\n') { - flushLine(); - } else { - if (currentLogLinePos >= currentLogLine.size()) { - currentLogLine.resize(currentLogLinePos + 1); - } - currentLogLine[currentLogLinePos++] = c; - } - } - - if (logSink) { - (*logSink)(data); - } - } - - if (hook && fd == hook->fromHook.readSide.get()) { - for (auto c : data) { - if (c == '\n') { - currentHookLine.clear(); - } else { - currentHookLine += c; - } - } - } -} - -void DerivationGoal::handleEOF(int /* fd */) { - if (!currentLogLine.empty()) { - flushLine(); - } - worker.wakeUp(shared_from_this()); -} - -void DerivationGoal::flushLine() { - if (settings.verboseBuild && - (settings.printRepeatedBuilds || curRound == 1)) { - log_sink() << currentLogLine << std::endl; - } else { - logTail.push_back(currentLogLine); - if (logTail.size() > settings.logLines) { - logTail.pop_front(); - } - } - - currentLogLine = ""; - currentLogLinePos = 0; -} - -PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash) { - PathSet result; - for (auto& i : drv->outputs) { - if (!wantOutput(i.first, wantedOutputs)) { - continue; - } - bool good = worker.store.isValidPath(i.second.path) && - (!checkHash || worker.pathContentsGood(i.second.path)); - if (good == returnValid) { - result.insert(i.second.path); - } - } - return result; -} - -Path DerivationGoal::addHashRewrite(const Path& path) { - std::string h1 = std::string(path, worker.store.storeDir.size() + 1, 32); - std::string h2 = - std::string(hashString(htSHA256, "rewrite:" + drvPath + ":" + path) - .to_string(Base32, false), - 0, 32); - Path p = worker.store.storeDir + "/" + h2 + - std::string(path, worker.store.storeDir.size() + 33); - deletePath(p); - assert(path.size() == p.size()); - inputRewrites[h1] = h2; - outputRewrites[h2] = h1; - redirectedOutputs[path] = p; - return p; -} - -void DerivationGoal::done(BuildResult::Status status, const std::string& msg) { - result.status = status; - result.errorMsg = msg; - amDone(result.success() ? ecSuccess : ecFailed); - if (result.status == BuildResult::TimedOut) { - worker.timedOut = true; - } - if (result.status == BuildResult::PermanentFailure) { - worker.permanentFailure = true; - } - - mcExpectedBuilds.reset(); - mcRunningBuilds.reset(); - - if (result.success()) { - if (status == BuildResult::Built) { - worker.doneBuilds++; - } - } else { - if (status != BuildResult::DependencyFailed) { - worker.failedBuilds++; - } - } -} - -////////////////////////////////////////////////////////////////////// - -class SubstitutionGoal : public Goal { - friend class Worker; - - private: - /* The store path that should be realised through a substitute. */ - Path storePath; - - /* The remaining substituters. */ - std::list> subs; - - /* The current substituter. */ - std::shared_ptr sub; - - /* Whether a substituter failed. */ - bool substituterFailed = false; - - /* Path info returned by the substituter's query info operation. */ - std::shared_ptr info; - - /* Pipe for the substituter's standard output. */ - Pipe outPipe; - - /* The substituter thread. */ - std::thread thr; - - std::promise promise; - - /* Whether to try to repair a valid path. */ - RepairFlag repair; - - /* Location where we're downloading the substitute. Differs from - storePath when doing a repair. */ - Path destPath; - - std::unique_ptr> maintainExpectedSubstitutions, - maintainRunningSubstitutions, maintainExpectedNar, - maintainExpectedDownload; - - using GoalState = void (SubstitutionGoal::*)(); - GoalState state; - - public: - SubstitutionGoal(Worker& worker, const Path& storePath, - RepairFlag repair = NoRepair); - - ~SubstitutionGoal() override; - - void timedOut() override { abort(); }; - - std::string key() override { - /* "a$" ensures substitution goals happen before derivation - goals. */ - return "a$" + storePathToName(storePath) + "$" + storePath; - } - - void work() override; - - /* The states. */ - void init(); - void tryNext(); - void gotInfo(); - void referencesValid(); - void tryToRun(); - void finished(); - - /* Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, const std::string& data) override; - void handleEOF(int fd) override; - - Path getStorePath() { return storePath; } - - void amDone(ExitCode result) override { Goal::amDone(result); } -}; - -SubstitutionGoal::SubstitutionGoal(Worker& worker, const Path& storePath, - RepairFlag repair) - : Goal(worker), repair(repair) { - this->storePath = storePath; - state = &SubstitutionGoal::init; - name = absl::StrCat("substitution of ", storePath); - trace("created"); - maintainExpectedSubstitutions = - std::make_unique>(worker.expectedSubstitutions); -} - -SubstitutionGoal::~SubstitutionGoal() { - try { - if (thr.joinable()) { - // FIXME: signal worker thread to quit. - thr.join(); - worker.childTerminated(this); - } - } catch (...) { - ignoreException(); - } -} - -void SubstitutionGoal::work() { (this->*state)(); } - -void SubstitutionGoal::init() { - trace("init"); - - worker.store.addTempRoot(storePath); - - /* If the path already exists we're done. */ - if ((repair == 0u) && worker.store.isValidPath(storePath)) { - amDone(ecSuccess); - return; - } - - if (settings.readOnlyMode) { - throw Error( - format( - "cannot substitute path '%1%' - no write access to the Nix store") % - storePath); - } - - subs = settings.useSubstitutes ? getDefaultSubstituters() - : std::list>(); - - tryNext(); -} - -void SubstitutionGoal::tryNext() { - trace("trying next substituter"); - - if (subs.empty()) { - /* None left. Terminate this goal and let someone else deal - with it. */ - DLOG(WARNING) - << "path '" << storePath - << "' is required, but there is no substituter that can build it"; - - /* Hack: don't indicate failure if there were no substituters. - In that case the calling derivation should just do a - build. */ - amDone(substituterFailed ? ecFailed : ecNoSubstituters); - - if (substituterFailed) { - worker.failedSubstitutions++; - } - - return; - } - - sub = subs.front(); - subs.pop_front(); - - if (sub->storeDir != worker.store.storeDir) { - tryNext(); - return; - } - - try { - // FIXME: make async - info = sub->queryPathInfo(storePath); - } catch (InvalidPath&) { - tryNext(); - return; - } catch (SubstituterDisabled&) { - if (settings.tryFallback) { - tryNext(); - return; - } - throw; - } catch (Error& e) { - if (settings.tryFallback) { - log_sink() << e.what() << std::endl; - tryNext(); - return; - } - throw; - } - - /* Update the total expected download size. */ - auto narInfo = std::dynamic_pointer_cast(info); - - maintainExpectedNar = std::make_unique>( - worker.expectedNarSize, info->narSize); - - maintainExpectedDownload = - narInfo && (narInfo->fileSize != 0u) - ? std::make_unique>( - worker.expectedDownloadSize, narInfo->fileSize) - : nullptr; - - /* Bail out early if this substituter lacks a valid - signature. LocalStore::addToStore() also checks for this, but - only after we've downloaded the path. */ - if (worker.store.requireSigs && !sub->isTrusted && - (info->checkSignatures(worker.store, worker.store.getPublicKeys()) == - 0u)) { - log_sink() << "substituter '" << sub->getUri() - << "' does not have a valid signature for path '" << storePath - << "'" << std::endl; - tryNext(); - return; - } - - /* To maintain the closure invariant, we first have to realise the - paths referenced by this one. */ - for (auto& i : info->references) { - if (i != storePath) { /* ignore self-references */ - addWaitee(worker.makeSubstitutionGoal(i)); - } - } - - if (waitees.empty()) { /* to prevent hang (no wake-up event) */ - referencesValid(); - } else { - state = &SubstitutionGoal::referencesValid; - } -} - -void SubstitutionGoal::referencesValid() { - trace("all references realised"); - - if (nrFailed > 0) { - DLOG(WARNING) << "some references of path '" << storePath - << "' could not be realised"; - amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure - : ecFailed); - return; - } - - for (auto& i : info->references) { - if (i != storePath) { /* ignore self-references */ - assert(worker.store.isValidPath(i)); - } - } - - state = &SubstitutionGoal::tryToRun; - worker.wakeUp(shared_from_this()); -} - -void SubstitutionGoal::tryToRun() { - trace("trying to run"); - - /* Make sure that we are allowed to start a build. Note that even - if maxBuildJobs == 0 (no local builds allowed), we still allow - a substituter to run. This is because substitutions cannot be - distributed to another machine via the build hook. */ - if (worker.getNrLocalBuilds() >= - std::max(1U, (unsigned int)settings.maxBuildJobs)) { - worker.waitForBuildSlot(shared_from_this()); - return; - } - - maintainRunningSubstitutions = - std::make_unique>(worker.runningSubstitutions); - - outPipe.create(); - - promise = std::promise(); - - thr = std::thread([this]() { - try { - /* Wake up the worker loop when we're done. */ - Finally updateStats([this]() { outPipe.writeSide = AutoCloseFD(-1); }); - - copyStorePath(ref(sub), - ref(worker.store.shared_from_this()), storePath, - repair, sub->isTrusted ? NoCheckSigs : CheckSigs); - - promise.set_value(); - } catch (...) { - promise.set_exception(std::current_exception()); - } - }); - - worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, - false); - - state = &SubstitutionGoal::finished; -} - -void SubstitutionGoal::finished() { - trace("substitute finished"); - - thr.join(); - worker.childTerminated(this); - - try { - promise.get_future().get(); - } catch (std::exception& e) { - log_sink() << e.what() << std::endl; - - /* Cause the parent build to fail unless --fallback is given, - or the substitute has disappeared. The latter case behaves - the same as the substitute never having existed in the - first place. */ - try { - throw; - } catch (SubstituteGone&) { - } catch (...) { - substituterFailed = true; - } - - /* Try the next substitute. */ - state = &SubstitutionGoal::tryNext; - worker.wakeUp(shared_from_this()); - return; - } - - worker.markContentsGood(storePath); - - DLOG(INFO) << "substitution of path '" << storePath << "' succeeded"; - - maintainRunningSubstitutions.reset(); - - maintainExpectedSubstitutions.reset(); - worker.doneSubstitutions++; - - if (maintainExpectedDownload) { - auto fileSize = maintainExpectedDownload->delta; - maintainExpectedDownload.reset(); - worker.doneDownloadSize += fileSize; - } - - worker.doneNarSize += maintainExpectedNar->delta; - maintainExpectedNar.reset(); - - amDone(ecSuccess); -} - -void SubstitutionGoal::handleChildOutput(int fd, const std::string& data) {} - -void SubstitutionGoal::handleEOF(int fd) { - if (fd == outPipe.readSide.get()) { - worker.wakeUp(shared_from_this()); - } -} - -////////////////////////////////////////////////////////////////////// - -ABSL_CONST_INIT static thread_local bool working = false; - -Worker::Worker(LocalStore& store, std::ostream& log_sink) - : log_sink_(log_sink), store(store) { - // Debugging: prevent recursive workers. - // TODO(grfn): Do we need this? - CHECK(!working) << "Worker initialized during execution of a worker"; - working = true; - nrLocalBuilds = 0; - lastWokenUp = steady_time_point::min(); - permanentFailure = false; - timedOut = false; - hashMismatch = false; - checkMismatch = false; -} - -Worker::~Worker() { - working = false; - - /* Explicitly get rid of all strong pointers now. After this all - goals that refer to this worker should be gone. (Otherwise we - are in trouble, since goals may call childTerminated() etc. in - their destructors). */ - topGoals.clear(); - - assert(expectedSubstitutions == 0); - assert(expectedDownloadSize == 0); - assert(expectedNarSize == 0); -} - -GoalPtr Worker::makeDerivationGoal(const Path& drv_path, - const StringSet& wantedOutputs, - BuildMode buildMode) { - GoalPtr goal = derivationGoals[drv_path].lock(); - if (!goal) { - goal = std::make_shared(*this, drv_path, wantedOutputs, - buildMode); - derivationGoals[drv_path] = goal; - wakeUp(goal); - } else { - (dynamic_cast(goal.get())) - ->addWantedOutputs(wantedOutputs); - } - return goal; -} - -std::shared_ptr Worker::makeBasicDerivationGoal( - const Path& drvPath, const BasicDerivation& drv, BuildMode buildMode) { - std::shared_ptr goal = - std::make_shared(*this, drvPath, drv, buildMode); - wakeUp(goal); - return goal; -} - -GoalPtr Worker::makeSubstitutionGoal(const Path& path, RepairFlag repair) { - GoalPtr goal = substitutionGoals[path].lock(); - if (!goal) { - goal = std::make_shared(*this, path, repair); - substitutionGoals[path] = goal; - wakeUp(goal); - } - return goal; -} - -static void removeGoal(const GoalPtr& goal, WeakGoalMap& goalMap) { - /* !!! inefficient */ - for (auto i = goalMap.begin(); i != goalMap.end();) { - if (i->second.lock() == goal) { - auto j = i; - ++j; - goalMap.erase(i); - i = j; - } else { - ++i; - } - } -} - -void Worker::removeGoal(const GoalPtr& goal) { - nix::removeGoal(goal, derivationGoals); - nix::removeGoal(goal, substitutionGoals); - if (topGoals.find(goal) != topGoals.end()) { - topGoals.erase(goal); - /* If a top-level goal failed, then kill all other goals - (unless keepGoing was set). */ - if (goal->getExitCode() == Goal::ecFailed && !settings.keepGoing) { - topGoals.clear(); - } - } - - /* Wake up goals waiting for any goal to finish. */ - for (auto& i : waitingForAnyGoal) { - GoalPtr goal = i.lock(); - if (goal) { - wakeUp(goal); - } - } - - waitingForAnyGoal.clear(); -} - -void Worker::wakeUp(const GoalPtr& goal) { - goal->trace("woken up"); - addToWeakGoals(awake, goal); -} - -unsigned Worker::getNrLocalBuilds() { return nrLocalBuilds; } - -void Worker::childStarted(const GoalPtr& goal, const std::set& fds, - bool inBuildSlot, bool respectTimeouts) { - Child child; - child.goal = goal; - child.goal2 = goal.get(); - child.fds = fds; - child.timeStarted = child.lastOutput = steady_time_point::clock::now(); - child.inBuildSlot = inBuildSlot; - child.respectTimeouts = respectTimeouts; - children.emplace_back(child); - if (inBuildSlot) { - nrLocalBuilds++; - } -} - -void Worker::childTerminated(Goal* goal, bool wakeSleepers) { - auto i = - std::find_if(children.begin(), children.end(), - [&](const Child& child) { return child.goal2 == goal; }); - if (i == children.end()) { - return; - } - - if (i->inBuildSlot) { - assert(nrLocalBuilds > 0); - nrLocalBuilds--; - } - - children.erase(i); - - if (wakeSleepers) { - /* Wake up goals waiting for a build slot. */ - for (auto& j : wantingToBuild) { - GoalPtr goal = j.lock(); - if (goal) { - wakeUp(goal); - } - } - - wantingToBuild.clear(); - } -} - -void Worker::waitForBuildSlot(const GoalPtr& goal) { - DLOG(INFO) << "wait for build slot"; - if (getNrLocalBuilds() < settings.maxBuildJobs) { - wakeUp(goal); /* we can do it right away */ - } else { - addToWeakGoals(wantingToBuild, goal); - } -} - -void Worker::waitForAnyGoal(GoalPtr goal) { - DLOG(INFO) << "wait for any goal"; - addToWeakGoals(waitingForAnyGoal, std::move(goal)); -} - -void Worker::waitForAWhile(GoalPtr goal) { - DLOG(INFO) << "wait for a while"; - addToWeakGoals(waitingForAWhile, std::move(goal)); -} - -void Worker::run(const Goals& _topGoals) { - for (auto& i : _topGoals) { - topGoals.insert(i); - } - - DLOG(INFO) << "entered goal loop"; - - while (true) { - checkInterrupt(); - - store.autoGC(false); - - /* Call every wake goal (in the ordering established by - CompareGoalPtrs). */ - while (!awake.empty() && !topGoals.empty()) { - Goals awake2; - for (auto& i : awake) { - GoalPtr goal = i.lock(); - if (goal) { - awake2.insert(goal); - } - } - awake.clear(); - for (auto& goal : awake2) { - checkInterrupt(); - goal->work(); - if (topGoals.empty()) { - break; - } // stuff may have been cancelled - } - } - - if (topGoals.empty()) { - break; - } - - /* Wait for input. */ - if (!children.empty() || !waitingForAWhile.empty()) { - waitForInput(); - } else { - if (awake.empty() && 0 == settings.maxBuildJobs) { - throw Error( - "unable to start any build; either increase '--max-jobs' " - "or enable remote builds"); - } - assert(!awake.empty()); - } - } - - /* If --keep-going is not set, it's possible that the main goal - exited while some of its subgoals were still active. But if - --keep-going *is* set, then they must all be finished now. */ - assert(!settings.keepGoing || awake.empty()); - assert(!settings.keepGoing || wantingToBuild.empty()); - assert(!settings.keepGoing || children.empty()); -} - -void Worker::waitForInput() { - DLOG(INFO) << "waiting for children"; - - /* Process output from the file descriptors attached to the - children, namely log output and output path creation commands. - We also use this to detect child termination: if we get EOF on - the logger pipe of a build, we assume that the builder has - terminated. */ - - bool useTimeout = false; - struct timeval timeout; - timeout.tv_usec = 0; - auto before = steady_time_point::clock::now(); - - /* If we're monitoring for silence on stdout/stderr, or if there - is a build timeout, then wait for input until the first - deadline for any child. */ - auto nearest = steady_time_point::max(); // nearest deadline - if (settings.minFree.get() != 0) { - // Periodicallty wake up to see if we need to run the garbage collector. - nearest = before + std::chrono::seconds(10); - } - for (auto& i : children) { - if (!i.respectTimeouts) { - continue; - } - if (0 != settings.maxSilentTime) { - nearest = std::min( - nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime)); - } - if (0 != settings.buildTimeout) { - nearest = std::min( - nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout)); - } - } - if (nearest != steady_time_point::max()) { - timeout.tv_sec = std::max( - 1L, static_cast(std::chrono::duration_cast( - nearest - before) - .count())); - useTimeout = true; - } - - /* If we are polling goals that are waiting for a lock, then wake - up after a few seconds at most. */ - if (!waitingForAWhile.empty()) { - useTimeout = true; - if (lastWokenUp == steady_time_point::min()) { - DLOG(WARNING) << "waiting for locks or build slots..."; - } - if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) { - lastWokenUp = before; - } - timeout.tv_sec = std::max( - 1L, static_cast(std::chrono::duration_cast( - lastWokenUp + - std::chrono::seconds(settings.pollInterval) - - before) - .count())); - } else { - lastWokenUp = steady_time_point::min(); - } - - if (useTimeout) { - DLOG(INFO) << "sleeping " << timeout.tv_sec << " seconds"; - } - - /* Use select() to wait for the input side of any logger pipe to - become `available'. Note that `available' (i.e., non-blocking) - includes EOF. */ - fd_set fds; - FD_ZERO(&fds); - int fdMax = 0; - for (auto& i : children) { - for (auto& j : i.fds) { - if (j >= FD_SETSIZE) { - throw Error("reached FD_SETSIZE limit"); - } - FD_SET(j, &fds); - if (j >= fdMax) { - fdMax = j + 1; - } - } - } - - if (select(fdMax, &fds, nullptr, nullptr, useTimeout ? &timeout : nullptr) == - -1) { - if (errno == EINTR) { - return; - } - throw SysError("waiting for input"); - } - - auto after = steady_time_point::clock::now(); - - /* Process all available file descriptors. FIXME: this is - O(children * fds). */ - decltype(children)::iterator i; - for (auto j = children.begin(); j != children.end(); j = i) { - i = std::next(j); - - checkInterrupt(); - - GoalPtr goal = j->goal.lock(); - assert(goal); - - std::set fds2(j->fds); - std::vector buffer(4096); - for (auto& k : fds2) { - if (FD_ISSET(k, &fds)) { - ssize_t rd = read(k, buffer.data(), buffer.size()); - // FIXME: is there a cleaner way to handle pt close - // than EIO? Is this even standard? - if (rd == 0 || (rd == -1 && errno == EIO)) { - DLOG(WARNING) << goal->getName() << ": got EOF"; - goal->handleEOF(k); - j->fds.erase(k); - } else if (rd == -1) { - if (errno != EINTR) { - throw SysError("%s: read failed", goal->getName()); - } - } else { - DLOG(INFO) << goal->getName() << ": read " << rd << " bytes"; - std::string data(reinterpret_cast(buffer.data()), rd); - j->lastOutput = after; - goal->handleChildOutput(k, data); - } - } - } - - if (goal->getExitCode() == Goal::ecBusy && 0 != settings.maxSilentTime && - j->respectTimeouts && - after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime)) { - log_sink_ << goal->getName() << " timed out after " - << settings.maxSilentTime << " seconds of silence"; - goal->timedOut(); - } - - else if (goal->getExitCode() == Goal::ecBusy && - 0 != settings.buildTimeout && j->respectTimeouts && - after - j->timeStarted >= - std::chrono::seconds(settings.buildTimeout)) { - log_sink_ << goal->getName() << " timed out after " - << settings.buildTimeout << " seconds"; - goal->timedOut(); - } - } - - if (!waitingForAWhile.empty() && - lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) { - lastWokenUp = after; - for (auto& i : waitingForAWhile) { - GoalPtr goal = i.lock(); - if (goal) { - wakeUp(goal); - } - } - waitingForAWhile.clear(); - } -} - -unsigned int Worker::exitStatus() { - /* - * 1100100 - * ^^^^ - * |||`- timeout - * ||`-- output hash mismatch - * |`--- build failure - * `---- not deterministic - */ - unsigned int mask = 0; - bool buildFailure = permanentFailure || timedOut || hashMismatch; - if (buildFailure) { - mask |= 0x04; // 100 - } - if (timedOut) { - mask |= 0x01; // 101 - } - if (hashMismatch) { - mask |= 0x02; // 102 - } - if (checkMismatch) { - mask |= 0x08; // 104 - } - - if (mask != 0u) { - mask |= 0x60; - } - return mask != 0u ? mask : 1; -} - -bool Worker::pathContentsGood(const Path& path) { - auto i = pathContentsGoodCache.find(path); - if (i != pathContentsGoodCache.end()) { - return i->second; - } - log_sink_ << "checking path '" << path << "'..."; - auto info = store.queryPathInfo(path); - bool res; - if (!pathExists(path)) { - res = false; - } else { - HashResult current = hashPath(info->narHash.type, path); - Hash nullHash(htSHA256); - res = info->narHash == nullHash || info->narHash == current.first; - } - pathContentsGoodCache[path] = res; - if (!res) { - log_sink_ << "path '" << path << "' is corrupted or missing!"; - } - return res; -} - -void Worker::markContentsGood(const Path& path) { - pathContentsGoodCache[path] = true; -} - -////////////////////////////////////////////////////////////////////// - -static void primeCache(Store& store, const PathSet& paths) { - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - unsigned long long downloadSize; - unsigned long long narSize; - store.queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, - narSize); - - if (!willBuild.empty() && 0 == settings.maxBuildJobs && - getMachines().empty()) { - throw Error( - "%d derivations need to be built, but neither local builds " - "('--max-jobs') " - "nor remote builds ('--builders') are enabled", - willBuild.size()); - } -} - -absl::Status LocalStore::buildPaths(std::ostream& log_sink, - const PathSet& drvPaths, - BuildMode build_mode) { - Worker worker(*this, log_sink); - - primeCache(*this, drvPaths); - - Goals goals; - for (auto& i : drvPaths) { - DrvPathWithOutputs i2 = parseDrvPathWithOutputs(i); - if (isDerivation(i2.first)) { - goals.insert(worker.makeDerivationGoal(i2.first, i2.second, build_mode)); - } else { - goals.insert(worker.makeSubstitutionGoal( - i, build_mode == bmRepair ? Repair : NoRepair)); - } - } - - worker.run(goals); - - PathSet failed; - for (auto& i : goals) { - if (i->getExitCode() != Goal::ecSuccess) { - auto* i2 = dynamic_cast(i.get()); - if (i2 != nullptr) { - failed.insert(i2->getDrvPath()); - } else { - failed.insert(dynamic_cast(i.get())->getStorePath()); - } - } - } - - if (!failed.empty()) { - return absl::Status( - absl::StatusCode::kInternal, - absl::StrFormat("build of %s failed (exit code %d)", showPaths(failed), - worker.exitStatus())); - } - return absl::OkStatus(); -} - -BuildResult LocalStore::buildDerivation(std::ostream& log_sink, - const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) { - Worker worker(*this, log_sink); - auto goal = worker.makeBasicDerivationGoal(drvPath, drv, buildMode); - - BuildResult result; - - try { - worker.run(Goals{goal}); - result = goal->getResult(); - } catch (Error& e) { - result.status = BuildResult::MiscFailure; - result.errorMsg = e.msg(); - } - - return result; -} - -void LocalStore::ensurePath(const Path& path) { - /* If the path is already valid, we're done. */ - if (isValidPath(path)) { - return; - } - - primeCache(*this, {path}); - - auto discard_logs = DiscardLogsSink(); - Worker worker(*this, discard_logs); - GoalPtr goal = worker.makeSubstitutionGoal(path); - Goals goals = {goal}; - - worker.run(goals); - - if (goal->getExitCode() != Goal::ecSuccess) { - throw Error(worker.exitStatus(), - "path '%s' does not exist and cannot be created", path); - } -} - -void LocalStore::repairPath(const Path& path) { - auto discard_logs = DiscardLogsSink(); - Worker worker(*this, discard_logs); - GoalPtr goal = worker.makeSubstitutionGoal(path, Repair); - Goals goals = {goal}; - - worker.run(goals); - - if (goal->getExitCode() != Goal::ecSuccess) { - /* Since substituting the path didn't work, if we have a valid - deriver, then rebuild the deriver. */ - auto deriver = queryPathInfo(path)->deriver; - if (!deriver.empty() && isValidPath(deriver)) { - goals.clear(); - goals.insert(worker.makeDerivationGoal(deriver, StringSet(), bmRepair)); - worker.run(goals); - } else { - throw Error(worker.exitStatus(), "cannot repair path '%s'", path); - } - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/builtins.hh b/third_party/nix/src/libstore/builtins.hh deleted file mode 100644 index bc53e78ebc..0000000000 --- a/third_party/nix/src/libstore/builtins.hh +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -#include "libstore/derivations.hh" - -namespace nix { - -// TODO: make pluggable. -void builtinFetchurl(const BasicDerivation& drv, const std::string& netrcData); -void builtinBuildenv(const BasicDerivation& drv); - -} // namespace nix diff --git a/third_party/nix/src/libstore/builtins/buildenv.cc b/third_party/nix/src/libstore/builtins/buildenv.cc deleted file mode 100644 index 433082a0f9..0000000000 --- a/third_party/nix/src/libstore/builtins/buildenv.cc +++ /dev/null @@ -1,240 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - -#include "libstore/builtins.hh" - -namespace nix { - -typedef std::map Priorities; - -// FIXME: change into local variables. - -static Priorities priorities; - -static unsigned long symlinks; - -/* For each activated package, create symlinks */ -static void createLinks(const Path& srcDir, const Path& dstDir, int priority) { - DirEntries srcFiles; - - try { - srcFiles = readDirectory(srcDir); - } catch (SysError& e) { - if (e.errNo == ENOTDIR) { - LOG(ERROR) << "warning: not including '" << srcDir - << "' in the user environment because it's not a directory"; - return; - } - throw; - } - - for (const auto& ent : srcFiles) { - if (ent.name[0] == '.') { /* not matched by glob */ - continue; - } - auto srcFile = srcDir + "/" + ent.name; - auto dstFile = dstDir + "/" + ent.name; - - struct stat srcSt; - try { - if (stat(srcFile.c_str(), &srcSt) == -1) { - throw SysError("getting status of '%1%'", srcFile); - } - } catch (SysError& e) { - if (e.errNo == ENOENT || e.errNo == ENOTDIR) { - LOG(ERROR) << "warning: skipping dangling symlink '" << dstFile << "'"; - continue; - } - throw; - } - - /* The files below are special-cased to that they don't show up - * in user profiles, either because they are useless, or - * because they would cauase pointless collisions (e.g., each - * Python package brings its own - * `$out/lib/pythonX.Y/site-packages/easy-install.pth'.) - */ - if (absl::EndsWith(srcFile, "/propagated-build-inputs") || - absl::EndsWith(srcFile, "/nix-support") || - absl::EndsWith(srcFile, "/perllocal.pod") || - absl::EndsWith(srcFile, "/info/dir") || - absl::EndsWith(srcFile, "/log")) { - continue; - - } else if (S_ISDIR(srcSt.st_mode)) { - struct stat dstSt; - auto res = lstat(dstFile.c_str(), &dstSt); - if (res == 0) { - if (S_ISDIR(dstSt.st_mode)) { - createLinks(srcFile, dstFile, priority); - continue; - } else if (S_ISLNK(dstSt.st_mode)) { - auto target = canonPath(dstFile, true); - if (!S_ISDIR(lstat(target).st_mode)) { - throw Error("collision between '%1%' and non-directory '%2%'", - srcFile, target); - } - if (unlink(dstFile.c_str()) == -1) { - throw SysError(format("unlinking '%1%'") % dstFile); - } - if (mkdir(dstFile.c_str(), 0755) == -1) { - throw SysError(format("creating directory '%1%'")); - } - createLinks(target, dstFile, priorities[dstFile]); - createLinks(srcFile, dstFile, priority); - continue; - } - } else if (errno != ENOENT) { - throw SysError(format("getting status of '%1%'") % dstFile); - } - } - - else { - struct stat dstSt; - auto res = lstat(dstFile.c_str(), &dstSt); - if (res == 0) { - if (S_ISLNK(dstSt.st_mode)) { - auto prevPriority = priorities[dstFile]; - if (prevPriority == priority) { - throw Error( - "packages '%1%' and '%2%' have the same priority %3%; " - "use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' " - "to change the priority of one of the conflicting packages" - " (0 being the highest priority)", - srcFile, readLink(dstFile), priority); - } - if (prevPriority < priority) { - continue; - } - if (unlink(dstFile.c_str()) == -1) { - throw SysError(format("unlinking '%1%'") % dstFile); - } - } else if (S_ISDIR(dstSt.st_mode)) { - throw Error( - "collision between non-directory '%1%' and directory '%2%'", - srcFile, dstFile); - } - } else if (errno != ENOENT) { - throw SysError(format("getting status of '%1%'") % dstFile); - } - } - - createSymlink(srcFile, dstFile); - priorities[dstFile] = priority; - symlinks++; - } -} - -using FileProp = std::set; - -static FileProp done; -static FileProp postponed = FileProp{}; - -static Path out; - -static void addPkg(const Path& pkgDir, int priority) { - if (done.count(pkgDir)) { - return; - } - done.insert(pkgDir); - createLinks(pkgDir, out, priority); - - try { - for (auto p : absl::StrSplit( - readFile(pkgDir + "/nix-support/propagated-user-env-packages"), - absl::ByAnyChar(" \n"), absl::SkipEmpty())) { - auto pkg = std::string(p); - if (!done.count(pkg)) { - postponed.insert(pkg); - } - } - } catch (SysError& e) { - if (e.errNo != ENOENT && e.errNo != ENOTDIR) { - throw; - } - } -} - -struct Package { - Path path; - bool active; - int priority; - Package(Path path, bool active, int priority) - : path{path}, active{active}, priority{priority} {} -}; - -using Packages = std::vector; - -void builtinBuildenv(const BasicDerivation& drv) { - auto getAttr = [&](const std::string& name) { - auto i = drv.env.find(name); - if (i == drv.env.end()) { - throw Error("attribute '%s' missing", name); - } - return i->second; - }; - - out = getAttr("out"); - createDirs(out); - - /* Convert the stuff we get from the environment back into a - * coherent data type. */ - Packages pkgs; - Strings derivations = absl::StrSplit( - getAttr("derivations"), absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - while (!derivations.empty()) { - /* !!! We're trusting the caller to structure derivations env var correctly - */ - auto active = derivations.front(); - derivations.pop_front(); - auto priority = stoi(derivations.front()); - derivations.pop_front(); - auto outputs = stoi(derivations.front()); - derivations.pop_front(); - for (auto n = 0; n < outputs; n++) { - auto path = derivations.front(); - derivations.pop_front(); - pkgs.emplace_back(path, active != "false", priority); - } - } - - /* Symlink to the packages that have been installed explicitly by the - * user. Process in priority order to reduce unnecessary - * symlink/unlink steps. - */ - std::sort(pkgs.begin(), pkgs.end(), [](const Package& a, const Package& b) { - return a.priority < b.priority || - (a.priority == b.priority && a.path < b.path); - }); - for (const auto& pkg : pkgs) { - if (pkg.active) { - addPkg(pkg.path, pkg.priority); - } - } - - /* Symlink to the packages that have been "propagated" by packages - * installed by the user (i.e., package X declares that it wants Y - * installed as well). We do these later because they have a lower - * priority in case of collisions. - */ - auto priorityCounter = 1000; - while (!postponed.empty()) { - auto pkgDirs = postponed; - postponed = FileProp{}; - for (const auto& pkgDir : pkgDirs) { - addPkg(pkgDir, priorityCounter++); - } - } - - LOG(INFO) << "created " << symlinks << " symlinks in user environment"; - - createSymlink(getAttr("manifest"), out + "/manifest.nix"); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/builtins/fetchurl.cc b/third_party/nix/src/libstore/builtins/fetchurl.cc deleted file mode 100644 index 961d081423..0000000000 --- a/third_party/nix/src/libstore/builtins/fetchurl.cc +++ /dev/null @@ -1,93 +0,0 @@ -#include -#include - -#include "libstore/builtins.hh" -#include "libstore/download.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "libutil/compression.hh" - -namespace nix { - -void builtinFetchurl(const BasicDerivation& drv, const std::string& netrcData) { - /* Make the host's netrc data available. Too bad curl requires - this to be stored in a file. It would be nice if we could just - pass a pointer to the data. */ - if (netrcData != "") { - settings.netrcFile = "netrc"; - writeFile(settings.netrcFile, netrcData, 0600); - } - - auto getAttr = [&](const std::string& name) { - auto i = drv.env.find(name); - if (i == drv.env.end()) { - throw Error(format("attribute '%s' missing") % name); - } - return i->second; - }; - - Path storePath = getAttr("out"); - auto mainUrl = getAttr("url"); - bool unpack = get(drv.env, "unpack", "") == "1"; - - /* Note: have to use a fresh downloader here because we're in - a forked process. */ - auto downloader = makeDownloader(); - - auto fetch = [&](const std::string& url) { - auto source = sinkToSource([&](Sink& sink) { - /* No need to do TLS verification, because we check the hash of - the result anyway. */ - DownloadRequest request(url); - request.verifyTLS = false; - request.decompress = false; - - auto decompressor = makeDecompressionSink( - unpack && absl::EndsWith(mainUrl, ".xz") ? "xz" : "none", sink); - downloader->download(std::move(request), *decompressor); - decompressor->finish(); - }); - - if (unpack) { - restorePath(storePath, *source); - } else { - writeFile(storePath, *source); - } - - auto executable = drv.env.find("executable"); - if (executable != drv.env.end() && executable->second == "1") { - if (chmod(storePath.c_str(), 0755) == -1) { - throw SysError(format("making '%1%' executable") % storePath); - } - } - }; - - /* Try the hashed mirrors first. */ - if (getAttr("outputHashMode") == "flat") { - auto hash_ = Hash::deserialize(getAttr("outputHash"), - parseHashType(getAttr("outputHashAlgo"))); - if (hash_.ok()) { - auto h = *hash_; - for (auto hashedMirror : settings.hashedMirrors.get()) { - try { - if (!absl::EndsWith(hashedMirror, "/")) { - hashedMirror += '/'; - } - fetch(hashedMirror + printHashType(h.type) + "/" + - h.to_string(Base16, false)); - return; - } catch (Error& e) { - LOG(ERROR) << e.what(); - } - } - } else { - LOG(ERROR) << "checking mirrors for '" << mainUrl - << "': " << hash_.status().ToString(); - } - } - - /* Otherwise try the specified URL. */ - fetch(mainUrl); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/crypto.cc b/third_party/nix/src/libstore/crypto.cc deleted file mode 100644 index 0a2795cb0a..0000000000 --- a/third_party/nix/src/libstore/crypto.cc +++ /dev/null @@ -1,138 +0,0 @@ -#include "libstore/crypto.hh" - -#include - -#include "libstore/globals.hh" -#include "libutil/util.hh" - -#if HAVE_SODIUM -#include -#endif - -namespace nix { - -// TODO(riking): convert to string_view to reduce allocations -static std::pair split(const std::string& s) { - size_t colon = s.find(':'); - if (colon == std::string::npos || colon == 0) { - return {"", ""}; - } - return {std::string(s, 0, colon), std::string(s, colon + 1)}; -} - -Key::Key(const std::string& s) { - auto ss = split(s); - - name = ss.first; - std::string keyb64 = ss.second; - - if (name.empty() || keyb64.empty()) { - throw Error("secret key is corrupt"); - } - - if (!absl::Base64Unescape(keyb64, &key)) { - // TODO(grfn): replace this with StatusOr - throw Error("Invalid Base64"); - } -} - -SecretKey::SecretKey(const std::string& s) : Key(s) { -#if HAVE_SODIUM - if (key.size() != crypto_sign_SECRETKEYBYTES) { - throw Error("secret key is not valid"); - } -#endif -} - -#if !HAVE_SODIUM -[[noreturn]] static void noSodium() { - throw Error( - "Nix was not compiled with libsodium, required for signed binary cache " - "support"); -} -#endif - -std::string SecretKey::signDetached(const std::string& data) const { -#if HAVE_SODIUM - unsigned char sig[crypto_sign_BYTES]; - unsigned long long sigLen; - crypto_sign_detached(sig, &sigLen, (unsigned char*)data.data(), data.size(), - (unsigned char*)key.data()); - return name + ":" + - absl::Base64Escape(std::string(reinterpret_cast(sig), sigLen)); -#else - noSodium(); -#endif -} - -PublicKey SecretKey::toPublicKey() const { -#if HAVE_SODIUM - unsigned char pk[crypto_sign_PUBLICKEYBYTES]; - crypto_sign_ed25519_sk_to_pk(pk, (unsigned char*)key.data()); - return PublicKey(name, std::string(reinterpret_cast(pk), - crypto_sign_PUBLICKEYBYTES)); -#else - noSodium(); -#endif -} - -PublicKey::PublicKey(const std::string& s) : Key(s) { -#if HAVE_SODIUM - if (key.size() != crypto_sign_PUBLICKEYBYTES) { - throw Error("public key is not valid"); - } -#endif -} - -bool verifyDetached(const std::string& data, const std::string& sig, - const PublicKeys& publicKeys) { -#if HAVE_SODIUM - auto ss = split(sig); - - auto key = publicKeys.find(ss.first); - if (key == publicKeys.end()) { - return false; - } - - std::string sig2; - if (!absl::Base64Unescape(ss.second, &sig2)) { - // TODO(grfn): replace this with StatusOr - throw Error("Invalid Base64"); - } - if (sig2.size() != crypto_sign_BYTES) { - throw Error("signature is not valid"); - } - - return crypto_sign_verify_detached( - reinterpret_cast(sig2.data()), - (unsigned char*)data.data(), data.size(), - (unsigned char*)key->second.key.data()) == 0; -#else - noSodium(); -#endif -} - -PublicKeys getDefaultPublicKeys() { - PublicKeys publicKeys; - - // FIXME: filter duplicates - - for (const auto& s : settings.trustedPublicKeys.get()) { - PublicKey key(s); - publicKeys.emplace(key.name, key); - } - - for (const auto& secretKeyFile : settings.secretKeyFiles.get()) { - try { - SecretKey secretKey(readFile(secretKeyFile)); - publicKeys.emplace(secretKey.name, secretKey.toPublicKey()); - } catch (SysError& e) { - /* Ignore unreadable key files. That's normal in a - multi-user installation. */ - } - } - - return publicKeys; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/crypto.hh b/third_party/nix/src/libstore/crypto.hh deleted file mode 100644 index e282f4f8ef..0000000000 --- a/third_party/nix/src/libstore/crypto.hh +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once - -#include - -#include "libutil/types.hh" - -namespace nix { - -struct Key { - std::string name; - std::string key; - - /* Construct Key from a string in the format - ‘:’. */ - Key(const std::string& s); - - protected: - Key(const std::string& name, const std::string& key) : name(name), key(key) {} -}; - -struct PublicKey; - -struct SecretKey : Key { - SecretKey(const std::string& s); - - /* Return a detached signature of the given string. */ - std::string signDetached(const std::string& data) const; - - PublicKey toPublicKey() const; -}; - -struct PublicKey : Key { - PublicKey(const std::string& s); - - private: - PublicKey(const std::string& name, const std::string& key) : Key(name, key) {} - friend struct SecretKey; -}; - -typedef std::map PublicKeys; - -/* Return true iff ‘sig’ is a correct signature over ‘data’ using one - of the given public keys. */ -bool verifyDetached(const std::string& data, const std::string& sig, - const PublicKeys& publicKeys); - -PublicKeys getDefaultPublicKeys(); - -} // namespace nix diff --git a/third_party/nix/src/libstore/derivations.cc b/third_party/nix/src/libstore/derivations.cc deleted file mode 100644 index 9c344502f3..0000000000 --- a/third_party/nix/src/libstore/derivations.cc +++ /dev/null @@ -1,520 +0,0 @@ -#include "libstore/derivations.hh" - -#include -#include -#include -#include - -#include "libproto/worker.pb.h" -#include "libstore/fs-accessor.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/istringstream_nocopy.hh" -#include "libutil/util.hh" - -namespace nix { - -// TODO(#statusor): looks like easy absl::Status conversion -void DerivationOutput::parseHashInfo(bool& recursive, Hash& hash) const { - recursive = false; - std::string algo = hashAlgo; - - if (std::string(algo, 0, 2) == "r:") { - recursive = true; - algo = std::string(algo, 2); - } - - HashType hashType = parseHashType(algo); - if (hashType == htUnknown) { - throw Error(format("unknown hash algorithm '%1%'") % algo); - } - - auto hash_ = Hash::deserialize(this->hash, hashType); - hash = Hash::unwrap_throw(hash_); -} - -nix::proto::Derivation_DerivationOutput DerivationOutput::to_proto() const { - nix::proto::Derivation_DerivationOutput result; - result.mutable_path()->set_path(path); - result.set_hash_algo(hashAlgo); - result.set_hash(hash); - return result; -} - -BasicDerivation BasicDerivation::from_proto( - const nix::proto::Derivation* proto_derivation) { - BasicDerivation result; - result.platform = proto_derivation->platform(); - result.builder = proto_derivation->builder().path(); - - for (auto [k, v] : proto_derivation->outputs()) { - result.outputs.emplace(k, v); - } - - result.inputSrcs.insert(proto_derivation->input_sources().paths().begin(), - proto_derivation->input_sources().paths().end()); - - result.args.insert(result.args.end(), proto_derivation->args().begin(), - proto_derivation->args().end()); - - for (auto [k, v] : proto_derivation->env()) { - result.env.emplace(k, v); - } - - return result; -} - -nix::proto::Derivation BasicDerivation::to_proto() const { - nix::proto::Derivation result; - for (const auto& [key, output] : outputs) { - result.mutable_outputs()->insert({key, output.to_proto()}); - } - for (const auto& input_src : inputSrcs) { - *result.mutable_input_sources()->add_paths() = input_src; - } - result.set_platform(platform); - result.mutable_builder()->set_path(builder); - for (const auto& arg : args) { - result.add_args(arg); - } - - for (const auto& [key, value] : env) { - result.mutable_env()->insert({key, value}); - } - - return result; -} - -Path BasicDerivation::findOutput(const std::string& id) const { - auto i = outputs.find(id); - if (i == outputs.end()) { - throw Error(format("derivation has no output '%1%'") % id); - } - return i->second.path; -} - -bool BasicDerivation::isBuiltin() const { - return std::string(builder, 0, 8) == "builtin:"; -} - -Path writeDerivation(const ref& store, const Derivation& drv, - const std::string& name, RepairFlag repair) { - PathSet references; - references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end()); - for (auto& i : drv.inputDrvs) { - references.insert(i.first); - } - /* Note that the outputs of a derivation are *not* references - (that can be missing (of course) and should not necessarily be - held during a garbage collection). */ - std::string suffix = name + drvExtension; - std::string contents = drv.unparse(); - return settings.readOnlyMode - ? store->computeStorePathForText(suffix, contents, references) - : store->addTextToStore(suffix, contents, references, repair); -} - -/* Read string `s' from stream `str'. */ -static void expect(std::istream& str, const std::string& s) { - char s2[s.size()]; - str.read(s2, s.size()); - if (std::string(s2, s.size()) != s) { - throw FormatError(format("expected string '%1%'") % s); - } -} - -/* Read a C-style string from stream `str'. */ -static std::string parseString(std::istream& str) { - std::string res; - expect(str, "\""); - int c; - while ((c = str.get()) != '"' && c != EOF) { - if (c == '\\') { - c = str.get(); - if (c == 'n') { - res += '\n'; - } else if (c == 'r') { - res += '\r'; - } else if (c == 't') { - res += '\t'; - } else if (c == EOF) { - throw FormatError("unexpected EOF while parsing C-style escape"); - } else { - res += static_cast(c); - } - } else { - res += static_cast(c); - } - } - return res; -} - -static Path parsePath(std::istream& str) { - std::string s = parseString(str); - if (s.empty() || s[0] != '/') { - throw FormatError(format("bad path '%1%' in derivation") % s); - } - return s; -} - -static bool endOfList(std::istream& str) { - if (str.peek() == ',') { - str.get(); - return false; - } - if (str.peek() == ']') { - str.get(); - return true; - } - return false; -} - -static StringSet parseStrings(std::istream& str, bool arePaths) { - StringSet res; - while (!endOfList(str)) { - res.insert(arePaths ? parsePath(str) : parseString(str)); - } - return res; -} - -Derivation parseDerivation(const std::string& s) { - Derivation drv; - istringstream_nocopy str(s); - expect(str, "Derive(["); - - /* Parse the list of outputs. */ - while (!endOfList(str)) { - DerivationOutput out; - expect(str, "("); - std::string id = parseString(str); - expect(str, ","); - out.path = parsePath(str); - expect(str, ","); - out.hashAlgo = parseString(str); - expect(str, ","); - out.hash = parseString(str); - expect(str, ")"); - drv.outputs[id] = out; - } - - /* Parse the list of input derivations. */ - expect(str, ",["); - while (!endOfList(str)) { - expect(str, "("); - Path drvPath = parsePath(str); - expect(str, ",["); - drv.inputDrvs[drvPath] = parseStrings(str, false); - expect(str, ")"); - } - - expect(str, ",["); - drv.inputSrcs = parseStrings(str, true); - expect(str, ","); - drv.platform = parseString(str); - expect(str, ","); - drv.builder = parseString(str); - - /* Parse the builder arguments. */ - expect(str, ",["); - while (!endOfList(str)) { - drv.args.push_back(parseString(str)); - } - - /* Parse the environment variables. */ - expect(str, ",["); - while (!endOfList(str)) { - expect(str, "("); - std::string name = parseString(str); - expect(str, ","); - std::string value = parseString(str); - expect(str, ")"); - drv.env[name] = value; - } - - expect(str, ")"); - return drv; -} - -Derivation readDerivation(const Path& drvPath) { - try { - return parseDerivation(readFile(drvPath)); - } catch (FormatError& e) { - throw Error(format("error parsing derivation '%1%': %2%") % drvPath % - e.msg()); - } -} - -Derivation Store::derivationFromPath(const Path& drvPath) { - assertStorePath(drvPath); - ensurePath(drvPath); - auto accessor = getFSAccessor(); - try { - return parseDerivation(accessor->readFile(drvPath)); - } catch (FormatError& e) { - throw Error(format("error parsing derivation '%1%': %2%") % drvPath % - e.msg()); - } -} - -const char* findChunk(const char* begin) { - while (*begin != 0 && *begin != '\"' && *begin != '\\' && *begin != '\n' && - *begin != '\r' && *begin != '\t') { - begin++; - } - - return begin; -} - -static void printString(std::string& res, const std::string& s) { - res += '"'; - - const char* it = s.c_str(); - while (*it != 0) { - const char* end = findChunk(it); - std::copy(it, end, std::back_inserter(res)); - - it = end; - - switch (*it) { - case '"': - case '\\': - res += "\\"; - res += *it; - break; - case '\n': - res += "\\n"; - break; - case '\r': - res += "\\r"; - break; - case '\t': - res += "\\t"; - break; - default: - continue; - } - - it++; - } - - res += '"'; -} - -template -static void printStrings(std::string& res, ForwardIterator i, - ForwardIterator j) { - res += '['; - bool first = true; - for (; i != j; ++i) { - if (first) { - first = false; - } else { - res += ','; - } - printString(res, *i); - } - res += ']'; -} - -std::string Derivation::unparse() const { - std::string s; - s.reserve(65536); - s += "Derive(["; - - bool first = true; - for (auto& i : outputs) { - if (first) { - first = false; - } else { - s += ','; - } - s += '('; - printString(s, i.first); - s += ','; - printString(s, i.second.path); - s += ','; - printString(s, i.second.hashAlgo); - s += ','; - printString(s, i.second.hash); - s += ')'; - } - - s += "],["; - first = true; - for (auto& i : inputDrvs) { - if (first) { - first = false; - } else { - s += ','; - } - s += '('; - printString(s, i.first); - s += ','; - printStrings(s, i.second.begin(), i.second.end()); - s += ')'; - } - - s += "],"; - printStrings(s, inputSrcs.begin(), inputSrcs.end()); - - s += ','; - printString(s, platform); - s += ','; - printString(s, builder); - s += ','; - printStrings(s, args.begin(), args.end()); - - s += ",["; - first = true; - for (auto& i : env) { - if (first) { - first = false; - } else { - s += ','; - } - s += '('; - printString(s, i.first); - s += ','; - printString(s, i.second); - s += ')'; - } - - s += "])"; - - return s; -} - -bool isDerivation(const std::string& fileName) { - return absl::EndsWith(fileName, drvExtension); -} - -bool BasicDerivation::isFixedOutput() const { - return outputs.size() == 1 && outputs.begin()->first == "out" && - !outputs.begin()->second.hash.empty(); -} - -DrvHashes drvHashes; - -/* Returns the hash of a derivation modulo fixed-output - subderivations. A fixed-output derivation is a derivation with one - output (`out') for which an expected hash and hash algorithm are - specified (using the `outputHash' and `outputHashAlgo' - attributes). We don't want changes to such derivations to - propagate upwards through the dependency graph, changing output - paths everywhere. - - For instance, if we change the url in a call to the `fetchurl' - function, we do not want to rebuild everything depending on it - (after all, (the hash of) the file being downloaded is unchanged). - So the *output paths* should not change. On the other hand, the - *derivation paths* should change to reflect the new dependency - graph. - - That's what this function does: it returns a hash which is just the - hash of the derivation ATerm, except that any input derivation - paths have been replaced by the result of a recursive call to this - function, and that for fixed-output derivations we return a hash of - its output path. */ -Hash hashDerivationModulo(Store& store, Derivation drv) { - /* Return a fixed hash for fixed-output derivations. */ - if (drv.isFixedOutput()) { - auto i = drv.outputs.begin(); - return hashString(htSHA256, "fixed:out:" + i->second.hashAlgo + ":" + - i->second.hash + ":" + i->second.path); - } - - /* For other derivations, replace the inputs paths with recursive - calls to this function.*/ - DerivationInputs inputs2; - for (auto& i : drv.inputDrvs) { - Hash h = drvHashes[i.first]; - if (!h) { - assert(store.isValidPath(i.first)); - Derivation drv2 = readDerivation(store.toRealPath(i.first)); - h = hashDerivationModulo(store, drv2); - drvHashes[i.first] = h; - } - inputs2[h.to_string(Base16, false)] = i.second; - } - drv.inputDrvs = inputs2; - - return hashString(htSHA256, drv.unparse()); -} - -DrvPathWithOutputs parseDrvPathWithOutputs(absl::string_view path) { - auto pos = path.find('!'); - if (pos == absl::string_view::npos) { - return DrvPathWithOutputs(path, std::set()); - } - - return DrvPathWithOutputs( - path.substr(0, pos), - absl::StrSplit(path.substr(pos + 1), absl::ByChar(','), - absl::SkipEmpty())); -} - -Path makeDrvPathWithOutputs(const Path& drvPath, - const std::set& outputs) { - return outputs.empty() ? drvPath - : drvPath + "!" + concatStringsSep(",", outputs); -} - -bool wantOutput(const std::string& output, - const std::set& wanted) { - return wanted.empty() || wanted.find(output) != wanted.end(); -} - -PathSet BasicDerivation::outputPaths() const { - PathSet paths; - for (auto& i : outputs) { - paths.insert(i.second.path); - } - return paths; -} - -Source& readDerivation(Source& in, Store& store, BasicDerivation& drv) { - drv.outputs.clear(); - auto nr = readNum(in); - for (size_t n = 0; n < nr; n++) { - auto name = readString(in); - DerivationOutput o; - in >> o.path >> o.hashAlgo >> o.hash; - store.assertStorePath(o.path); - drv.outputs[name] = o; - } - - drv.inputSrcs = readStorePaths(store, in); - in >> drv.platform >> drv.builder; - drv.args = readStrings(in); - - nr = readNum(in); - for (size_t n = 0; n < nr; n++) { - auto key = readString(in); - auto value = readString(in); - drv.env[key] = value; - } - - return in; -} - -Sink& operator<<(Sink& out, const BasicDerivation& drv) { - out << drv.outputs.size(); - for (auto& i : drv.outputs) { - out << i.first << i.second.path << i.second.hashAlgo << i.second.hash; - } - out << drv.inputSrcs << drv.platform << drv.builder << drv.args; - out << drv.env.size(); - for (auto& i : drv.env) { - out << i.first << i.second; - } - return out; -} - -std::string hashPlaceholder(const std::string& outputName) { - // FIXME: memoize? - return "/" + hashString(htSHA256, "nix-output:" + outputName) - .to_string(Base32, false); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/derivations.hh b/third_party/nix/src/libstore/derivations.hh deleted file mode 100644 index 4966b858d3..0000000000 --- a/third_party/nix/src/libstore/derivations.hh +++ /dev/null @@ -1,130 +0,0 @@ -#pragma once - -#include - -#include - -#include "libproto/worker.pb.h" -#include "libstore/store-api.hh" -#include "libutil/hash.hh" -#include "libutil/types.hh" - -namespace nix { - -/* Extension of derivations in the Nix store. */ -const std::string drvExtension = ".drv"; - -/* Abstract syntax of derivations. */ - -struct DerivationOutput { - Path path; - // TODO(grfn): make these two fields a Hash - std::string hashAlgo; /* hash used for expected hash computation */ - std::string hash; /* expected hash, may be null */ - DerivationOutput() {} - DerivationOutput(Path path, std::string hashAlgo, std::string hash) { - this->path = path; - this->hashAlgo = hashAlgo; - this->hash = hash; - } - - explicit DerivationOutput( - const nix::proto::Derivation_DerivationOutput& proto_derivation_output) - : path(proto_derivation_output.path().path()), - hashAlgo(proto_derivation_output.hash_algo()), - hash(proto_derivation_output.hash()) {} - - void parseHashInfo(bool& recursive, Hash& hash) const; - - [[nodiscard]] nix::proto::Derivation_DerivationOutput to_proto() const; -}; - -// TODO(tazjin): Determine whether this actually needs to be ordered. -using DerivationOutputs = absl::btree_map; - -/* For inputs that are sub-derivations, we specify exactly which - output IDs we are interested in. */ -using DerivationInputs = absl::btree_map; - -using StringPairs = absl::btree_map; - -struct BasicDerivation { - DerivationOutputs outputs; /* keyed on symbolic IDs */ - PathSet inputSrcs; /* inputs that are sources */ - std::string platform; - Path builder; - Strings args; - StringPairs env; - - BasicDerivation() = default; - - // Convert the given proto derivation to a BasicDerivation - static BasicDerivation from_proto( - const nix::proto::Derivation* proto_derivation); - - [[nodiscard]] nix::proto::Derivation to_proto() const; - - virtual ~BasicDerivation(){}; - - /* Return the path corresponding to the output identifier `id' in - the given derivation. */ - Path findOutput(const std::string& id) const; - - bool isBuiltin() const; - - /* Return true iff this is a fixed-output derivation. */ - bool isFixedOutput() const; - - /* Return the output paths of a derivation. */ - PathSet outputPaths() const; -}; - -struct Derivation : BasicDerivation { - DerivationInputs inputDrvs; /* inputs that are sub-derivations */ - - /* Print a derivation. */ - std::string unparse() const; -}; - -class Store; - -/* Write a derivation to the Nix store, and return its path. */ -Path writeDerivation(const ref& store, const Derivation& drv, - const std::string& name, RepairFlag repair = NoRepair); - -/* Read a derivation from a file. */ -Derivation readDerivation(const Path& drvPath); - -Derivation parseDerivation(const std::string& s); - -/* Check whether a file name ends with the extension for - derivations. */ -bool isDerivation(const std::string& fileName); - -Hash hashDerivationModulo(Store& store, Derivation drv); - -/* Memoisation of hashDerivationModulo(). */ -typedef std::map DrvHashes; - -extern DrvHashes drvHashes; // FIXME: global, not thread-safe - -/* Split a string specifying a derivation and a set of outputs - (/nix/store/hash-foo!out1,out2,...) into the derivation path and - the outputs. */ -using DrvPathWithOutputs = std::pair >; -DrvPathWithOutputs parseDrvPathWithOutputs(absl::string_view path); - -Path makeDrvPathWithOutputs(const Path& drvPath, - const std::set& outputs); - -bool wantOutput(const std::string& output, const std::set& wanted); - -struct Source; -struct Sink; - -Source& readDerivation(Source& in, Store& store, BasicDerivation& drv); -Sink& operator<<(Sink& out, const BasicDerivation& drv); - -std::string hashPlaceholder(const std::string& outputName); - -} // namespace nix diff --git a/third_party/nix/src/libstore/download.cc b/third_party/nix/src/libstore/download.cc deleted file mode 100644 index fd472713e6..0000000000 --- a/third_party/nix/src/libstore/download.cc +++ /dev/null @@ -1,1024 +0,0 @@ -#include "libstore/download.hh" - -#include -#include -#include -#include - -#include "libstore/globals.hh" -#include "libstore/pathlocks.hh" -#include "libstore/s3.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "libutil/compression.hh" -#include "libutil/finally.hh" -#include "libutil/hash.hh" -#include "libutil/util.hh" - -#ifdef ENABLE_S3 -#include -#endif - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -using namespace std::string_literals; - -namespace nix { - -DownloadSettings downloadSettings; - -static GlobalConfig::Register r1(&downloadSettings); - -std::string resolveUri(const std::string& uri) { - if (uri.compare(0, 8, "channel:") == 0) { - return "https://nixos.org/channels/" + std::string(uri, 8) + - "/nixexprs.tar.xz"; - } - return uri; -} - -struct CurlDownloader : public Downloader { - CURLM* curlm = nullptr; - - std::random_device rd; - std::mt19937 mt19937; - - struct DownloadItem : public std::enable_shared_from_this { - CurlDownloader& downloader; - DownloadRequest request; - DownloadResult result; - bool done = false; // whether either the success or failure function has - // been called - Callback callback; - CURL* req = nullptr; - bool active = - false; // whether the handle has been added to the multi object - std::string status; - - unsigned int attempt = 0; - - /* Don't start this download until the specified time point - has been reached. */ - std::chrono::steady_clock::time_point embargo; - - struct curl_slist* requestHeaders = nullptr; - - std::string encoding; - - bool acceptRanges = false; - - curl_off_t writtenToSink = 0; - - DownloadItem(CurlDownloader& downloader, const DownloadRequest& request, - Callback&& callback) - : downloader(downloader), - request(request), - callback(std::move(callback)), - finalSink([this](const unsigned char* data, size_t len) { - if (this->request.dataCallback) { - long httpStatus = 0; - curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus); - - /* Only write data to the sink if this is a - successful response. */ - if (httpStatus == 0 || httpStatus == 200 || httpStatus == 201 || - httpStatus == 206) { - writtenToSink += len; - this->request.dataCallback((char*)data, len); - } - } else { - this->result.data->append((char*)data, len); - } - }) { - LOG(INFO) << (request.data ? "uploading '" : "downloading '") - << request.uri << "'"; - - if (!request.expectedETag.empty()) { - requestHeaders = curl_slist_append( - requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); - } - if (!request.mimeType.empty()) { - requestHeaders = curl_slist_append( - requestHeaders, ("Content-Type: " + request.mimeType).c_str()); - } - } - - ~DownloadItem() { - if (req != nullptr) { - if (active) { - curl_multi_remove_handle(downloader.curlm, req); - } - curl_easy_cleanup(req); - } - if (requestHeaders != nullptr) { - curl_slist_free_all(requestHeaders); - } - try { - if (!done) { - fail(DownloadError( - Interrupted, - format("download of '%s' was interrupted") % request.uri)); - } - } catch (...) { - ignoreException(); - } - } - - void failEx(const std::exception_ptr& ex) { - assert(!done); - done = true; - callback.rethrow(ex); - } - - template - void fail(const T& e) { - failEx(std::make_exception_ptr(e)); - } - - LambdaSink finalSink; - std::shared_ptr decompressionSink; - - std::exception_ptr writeException; - - size_t writeCallback(void* contents, size_t size, size_t nmemb) { - try { - size_t realSize = size * nmemb; - result.bodySize += realSize; - - if (!decompressionSink) { - decompressionSink = makeDecompressionSink(encoding, finalSink); - } - - (*decompressionSink)(static_cast(contents), realSize); - - return realSize; - } catch (...) { - writeException = std::current_exception(); - return 0; - } - } - - static size_t writeCallbackWrapper(void* contents, size_t size, - size_t nmemb, void* userp) { - return (static_cast(userp)) - ->writeCallback(contents, size, nmemb); - } - - size_t headerCallback(void* contents, size_t size, size_t nmemb) { - size_t realSize = size * nmemb; - std::string line(static_cast(contents), realSize); - DLOG(INFO) << "got header for '" << request.uri - << "': " << absl::StripAsciiWhitespace(line); - if (line.compare(0, 5, "HTTP/") == 0) { // new response starts - result.etag = ""; - std::vector ss = - absl::StrSplit(line, absl::ByChar(' '), absl::SkipEmpty()); - status = ss.size() >= 2 ? ss[1] : ""; - result.data = std::make_shared(); - result.bodySize = 0; - acceptRanges = false; - encoding = ""; - } else { - auto i = line.find(':'); - if (i != std::string::npos) { - std::string name = absl::AsciiStrToLower( - absl::StripAsciiWhitespace(std::string(line, 0, i))); - if (name == "etag") { - result.etag = absl::StripAsciiWhitespace(std::string(line, i + 1)); - /* Hack to work around a GitHub bug: it sends - ETags, but ignores If-None-Match. So if we get - the expected ETag on a 200 response, then shut - down the connection because we already have the - data. */ - if (result.etag == request.expectedETag && status == "200") { - DLOG(INFO) - << "shutting down on 200 HTTP response with expected ETag"; - return 0; - } - } else if (name == "content-encoding") { - encoding = absl::StripAsciiWhitespace(std::string(line, i + 1)); - } else if (name == "accept-ranges" && - absl::AsciiStrToLower(absl::StripAsciiWhitespace( - std::string(line, i + 1))) == "bytes") { - acceptRanges = true; - } - } - } - return realSize; - } - - static size_t headerCallbackWrapper(void* contents, size_t size, - size_t nmemb, void* userp) { - return (static_cast(userp)) - ->headerCallback(contents, size, nmemb); - } - - static int debugCallback(CURL* handle, curl_infotype type, char* data, - size_t size, void* userptr) { - if (type == CURLINFO_TEXT) { - DLOG(INFO) << "curl: " - << absl::StripTrailingAsciiWhitespace( - std::string(data, size)); - } - return 0; - } - - size_t readOffset = 0; - size_t readCallback(char* buffer, size_t size, size_t nitems) { - if (readOffset == request.data->length()) { - return 0; - } - auto count = std::min(size * nitems, request.data->length() - readOffset); - assert(count); - memcpy(buffer, request.data->data() + readOffset, count); - readOffset += count; - return count; - } - - static size_t readCallbackWrapper(char* buffer, size_t size, size_t nitems, - void* userp) { - return (static_cast(userp)) - ->readCallback(buffer, size, nitems); - } - - void init() { - if (req == nullptr) { - req = curl_easy_init(); - } - - curl_easy_reset(req); - - // TODO(tazjin): Add an Abseil flag for this - // if (verbosity >= lvlVomit) { - // curl_easy_setopt(req, CURLOPT_VERBOSE, 1); - // curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, - // DownloadItem::debugCallback); - // } - - curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str()); - curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10); - curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(req, CURLOPT_USERAGENT, - ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + - (downloadSettings.userAgentSuffix != "" - ? " " + downloadSettings.userAgentSuffix.get() - : "")) - .c_str()); -#if LIBCURL_VERSION_NUM >= 0x072b00 - curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1); -#endif -#if LIBCURL_VERSION_NUM >= 0x072f00 - if (downloadSettings.enableHttp2) { - curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS); - } else { - curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); - } -#endif - curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, - DownloadItem::writeCallbackWrapper); - curl_easy_setopt(req, CURLOPT_WRITEDATA, this); - curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, - DownloadItem::headerCallbackWrapper); - curl_easy_setopt(req, CURLOPT_HEADERDATA, this); - - curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders); - - if (request.head) { - curl_easy_setopt(req, CURLOPT_NOBODY, 1); - } - - if (request.data) { - curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); - curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); - curl_easy_setopt(req, CURLOPT_READDATA, this); - curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, - (curl_off_t)request.data->length()); - } - - if (request.verifyTLS) { - if (!settings.caFile.empty()) { - curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str()); - } - } else { - curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0); - curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0); - } - - curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, - downloadSettings.connectTimeout.get()); - - curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L); - curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, - downloadSettings.stalledDownloadTimeout.get()); - - /* If no file exist in the specified path, curl continues to work - anyway as if netrc support was disabled. */ - curl_easy_setopt(req, CURLOPT_NETRC_FILE, - settings.netrcFile.get().c_str()); - curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL); - - if (writtenToSink != 0) { - curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink); - } - - result.data = std::make_shared(); - result.bodySize = 0; - } - - void finish(CURLcode code) { - long httpStatus = 0; - curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus); - - char* effectiveUriCStr; - curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr); - if (effectiveUriCStr != nullptr) { - result.effectiveUri = effectiveUriCStr; - } - - DLOG(INFO) << "finished " << request.verb() << " of " << request.uri - << "; curl status = " << code - << ", HTTP status = " << httpStatus - << ", body = " << result.bodySize << " bytes"; - - if (decompressionSink) { - try { - decompressionSink->finish(); - } catch (...) { - writeException = std::current_exception(); - } - } - - if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) { - code = CURLE_OK; - httpStatus = 304; - } - - if (writeException) { - failEx(writeException); - - } else if (code == CURLE_OK && - (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || - httpStatus == 206 || httpStatus == 304 || - httpStatus == 226 /* FTP */ || - httpStatus == 0 /* other protocol */)) { - result.cached = httpStatus == 304; - done = true; - callback(std::move(result)); - } - - else { - // We treat most errors as transient, but won't retry when hopeless - Error err = Transient; - - if (httpStatus == 404 || httpStatus == 410 || - code == CURLE_FILE_COULDNT_READ_FILE) { - // The file is definitely not there - err = NotFound; - } else if (httpStatus == 401 || httpStatus == 403 || - httpStatus == 407) { - // Don't retry on authentication/authorization failures - err = Forbidden; - } else if (httpStatus >= 400 && httpStatus < 500 && httpStatus != 408 && - httpStatus != 429) { - // Most 4xx errors are client errors and are probably not worth - // retrying: - // * 408 means the server timed out waiting for us, so we try again - // * 429 means too many requests, so we retry (with a delay) - err = Misc; - } else if (httpStatus == 501 || httpStatus == 505 || - httpStatus == 511) { - // Let's treat most 5xx (server) errors as transient, except for a - // handful: - // * 501 not implemented - // * 505 http version not supported - // * 511 we're behind a captive portal - err = Misc; - } else { - // Don't bother retrying on certain cURL errors either - switch (code) { - case CURLE_FAILED_INIT: - case CURLE_URL_MALFORMAT: - case CURLE_NOT_BUILT_IN: - case CURLE_REMOTE_ACCESS_DENIED: - case CURLE_FILE_COULDNT_READ_FILE: - case CURLE_FUNCTION_NOT_FOUND: - case CURLE_ABORTED_BY_CALLBACK: - case CURLE_BAD_FUNCTION_ARGUMENT: - case CURLE_INTERFACE_FAILED: - case CURLE_UNKNOWN_OPTION: - case CURLE_SSL_CACERT_BADFILE: - case CURLE_TOO_MANY_REDIRECTS: - case CURLE_WRITE_ERROR: - case CURLE_UNSUPPORTED_PROTOCOL: - err = Misc; - break; - default: // Shut up warnings - break; - } - } - - attempt++; - - auto exc = - code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted - ? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", - request.verb(), request.uri)) - : httpStatus != 0 - ? DownloadError( - err, - fmt("unable to %s '%s': HTTP error %d", request.verb(), - request.uri, httpStatus) + - (code == CURLE_OK ? "" - : fmt(" (curl error: %s)", - curl_easy_strerror(code)))) - : DownloadError( - err, fmt("unable to %s '%s': %s (%d)", request.verb(), - request.uri, curl_easy_strerror(code), code)); - - /* If this is a transient error, then maybe retry the - download after a while. If we're writing to a - sink, we can only retry if the server supports - ranged requests. */ - if (err == Transient && attempt < request.tries && - (!this->request.dataCallback || writtenToSink == 0 || - (acceptRanges && encoding.empty()))) { - int ms = request.baseRetryTimeMs * - std::pow(2.0F, attempt - 1 + - std::uniform_real_distribution<>( - 0.0, 0.5)(downloader.mt19937)); - if (writtenToSink != 0) { - LOG(WARNING) << exc.what() << "; retrying from offset " - << writtenToSink << " in " << ms << "ms"; - } else { - LOG(WARNING) << exc.what() << "; retrying in " << ms << "ms"; - } - embargo = - std::chrono::steady_clock::now() + std::chrono::milliseconds(ms); - downloader.enqueueItem(shared_from_this()); - } else { - fail(exc); - } - } - } - }; - - struct State { - struct EmbargoComparator { - bool operator()(const std::shared_ptr& i1, - const std::shared_ptr& i2) { - return i1->embargo > i2->embargo; - } - }; - bool quit = false; - std::priority_queue, - std::vector>, - EmbargoComparator> - incoming; - }; - - Sync state_; - - /* We can't use a std::condition_variable to wake up the curl - thread, because it only monitors file descriptors. So use a - pipe instead. */ - Pipe wakeupPipe; - - std::thread workerThread; - - CurlDownloader() : mt19937(rd()) { - static std::once_flag globalInit; - std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL); - - curlm = curl_multi_init(); - -#if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0 - curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX); -#endif -#if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0 - curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS, - downloadSettings.httpConnections.get()); -#endif - - wakeupPipe.create(); - fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK); - - workerThread = std::thread([&]() { workerThreadEntry(); }); - } - - ~CurlDownloader() override { - stopWorkerThread(); - - workerThread.join(); - - if (curlm != nullptr) { - curl_multi_cleanup(curlm); - } - } - - void stopWorkerThread() { - /* Signal the worker thread to exit. */ - { - auto state(state_.lock()); - state->quit = true; - } - writeFull(wakeupPipe.writeSide.get(), " ", false); - } - - void workerThreadMain() { - /* Cause this thread to be notified on SIGINT. */ - auto callback = createInterruptCallback([&]() { stopWorkerThread(); }); - - std::map> items; - - bool quit = false; - - std::chrono::steady_clock::time_point nextWakeup; - - while (!quit) { - checkInterrupt(); - - /* Let curl do its thing. */ - int running; - CURLMcode mc = curl_multi_perform(curlm, &running); - if (mc != CURLM_OK) { - throw nix::Error( - format("unexpected error from curl_multi_perform(): %s") % - curl_multi_strerror(mc)); - } - - /* Set the promises of any finished requests. */ - CURLMsg* msg; - int left; - while ((msg = curl_multi_info_read(curlm, &left)) != nullptr) { - if (msg->msg == CURLMSG_DONE) { - auto i = items.find(msg->easy_handle); - assert(i != items.end()); - i->second->finish(msg->data.result); - curl_multi_remove_handle(curlm, i->second->req); - i->second->active = false; - items.erase(i); - } - } - - /* Wait for activity, including wakeup events. */ - int numfds = 0; - struct curl_waitfd extraFDs[1]; - extraFDs[0].fd = wakeupPipe.readSide.get(); - extraFDs[0].events = CURL_WAIT_POLLIN; - extraFDs[0].revents = 0; - long maxSleepTimeMs = items.empty() ? 10000 : 100; - auto sleepTimeMs = - nextWakeup != std::chrono::steady_clock::time_point() - ? std::max( - 0, - static_cast( - std::chrono::duration_cast( - nextWakeup - std::chrono::steady_clock::now()) - .count())) - : maxSleepTimeMs; - VLOG(2) << "download thread waiting for " << sleepTimeMs << " ms"; - mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds); - if (mc != CURLM_OK) { - throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % - curl_multi_strerror(mc)); - } - - nextWakeup = std::chrono::steady_clock::time_point(); - - /* Add new curl requests from the incoming requests queue, - except for requests that are embargoed (waiting for a - retry timeout to expire). */ - if ((extraFDs[0].revents & CURL_WAIT_POLLIN) != 0) { - char buf[1024]; - auto res = read(extraFDs[0].fd, buf, sizeof(buf)); - if (res == -1 && errno != EINTR) { - throw SysError("reading curl wakeup socket"); - } - } - - std::vector> incoming; - auto now = std::chrono::steady_clock::now(); - - { - auto state(state_.lock()); - while (!state->incoming.empty()) { - auto item = state->incoming.top(); - if (item->embargo <= now) { - incoming.push_back(item); - state->incoming.pop(); - } else { - if (nextWakeup == std::chrono::steady_clock::time_point() || - item->embargo < nextWakeup) { - nextWakeup = item->embargo; - } - break; - } - } - quit = state->quit; - } - - for (auto& item : incoming) { - DLOG(INFO) << "starting " << item->request.verb() << " of " - << item->request.uri; - item->init(); - curl_multi_add_handle(curlm, item->req); - item->active = true; - items[item->req] = item; - } - } - - DLOG(INFO) << "download thread shutting down"; - } - - void workerThreadEntry() { - try { - workerThreadMain(); - } catch (nix::Interrupted& e) { - } catch (std::exception& e) { - LOG(ERROR) << "unexpected error in download thread: " << e.what(); - } - - { - auto state(state_.lock()); - while (!state->incoming.empty()) { - state->incoming.pop(); - } - state->quit = true; - } - } - - void enqueueItem(const std::shared_ptr& item) { - if (item->request.data && !absl::StartsWith(item->request.uri, "http://") && - !absl::StartsWith(item->request.uri, "https://")) { - throw nix::Error("uploading to '%s' is not supported", item->request.uri); - } - - { - auto state(state_.lock()); - if (state->quit) { - throw nix::Error( - "cannot enqueue download request because the download thread is " - "shutting down"); - } - state->incoming.push(item); - } - writeFull(wakeupPipe.writeSide.get(), " "); - } - -#ifdef ENABLE_S3 - std::tuple parseS3Uri( - std::string uri) { - auto [path, params] = splitUriAndParams(uri); - - auto slash = path.find('/', 5); // 5 is the length of "s3://" prefix - if (slash == std::string::npos) { - throw nix::Error("bad S3 URI '%s'", path); - } - - std::string bucketName(path, 5, slash - 5); - std::string key(path, slash + 1); - - return {bucketName, key, params}; - } -#endif - - void enqueueDownload(const DownloadRequest& request, - Callback callback) override { - /* Ugly hack to support s3:// URIs. */ - if (absl::StartsWith(request.uri, "s3://")) { - // FIXME: do this on a worker thread - try { -#ifdef ENABLE_S3 - auto [bucketName, key, params] = parseS3Uri(request.uri); - - std::string profile = get(params, "profile", ""); - std::string region = get(params, "region", Aws::Region::US_EAST_1); - std::string scheme = get(params, "scheme", ""); - std::string endpoint = get(params, "endpoint", ""); - - S3Helper s3Helper(profile, region, scheme, endpoint); - - // FIXME: implement ETag - auto s3Res = s3Helper.getObject(bucketName, key); - DownloadResult res; - if (!s3Res.data) - throw DownloadError( - NotFound, fmt("S3 object '%s' does not exist", request.uri)); - res.data = s3Res.data; - callback(std::move(res)); -#else - throw nix::Error( - "cannot download '%s' because Nix is not built with S3 support", - request.uri); -#endif - } catch (...) { - callback.rethrow(); - } - return; - } - - enqueueItem( - std::make_shared(*this, request, std::move(callback))); - } -}; - -ref getDownloader() { - static ref downloader = makeDownloader(); - return downloader; -} - -ref makeDownloader() { return make_ref(); } - -std::future Downloader::enqueueDownload( - const DownloadRequest& request) { - auto promise = std::make_shared>(); - enqueueDownload( - request, - Callback([promise](std::future fut) { - try { - promise->set_value(fut.get()); - } catch (...) { - promise->set_exception(std::current_exception()); - } - })); - return promise->get_future(); -} - -DownloadResult Downloader::download(const DownloadRequest& request) { - return enqueueDownload(request).get(); -} - -void Downloader::download(DownloadRequest&& request, Sink& sink) { - /* Note: we can't call 'sink' via request.dataCallback, because - that would cause the sink to execute on the downloader - thread. If 'sink' is a coroutine, this will fail. Also, if the - sink is expensive (e.g. one that does decompression and writing - to the Nix store), it would stall the download thread too much. - Therefore we use a buffer to communicate data between the - download thread and the calling thread. */ - - struct State { - bool quit = false; - std::exception_ptr exc; - std::string data; - std::condition_variable avail, request; - }; - - auto _state = std::make_shared>(); - - /* In case of an exception, wake up the download thread. FIXME: - abort the download request. */ - Finally finally([&]() { - auto state(_state->lock()); - state->quit = true; - state->request.notify_one(); - }); - - request.dataCallback = [_state](char* buf, size_t len) { - auto state(_state->lock()); - - if (state->quit) { - return; - } - - /* If the buffer is full, then go to sleep until the calling - thread wakes us up (i.e. when it has removed data from the - buffer). We don't wait forever to prevent stalling the - download thread. (Hopefully sleeping will throttle the - sender.) */ - if (state->data.size() > 1024 * 1024) { - DLOG(INFO) << "download buffer is full; going to sleep"; - state.wait_for(state->request, std::chrono::seconds(10)); - } - - /* Append data to the buffer and wake up the calling - thread. */ - state->data.append(buf, len); - state->avail.notify_one(); - }; - - enqueueDownload(request, Callback( - [_state](std::future fut) { - auto state(_state->lock()); - state->quit = true; - try { - fut.get(); - } catch (...) { - state->exc = std::current_exception(); - } - state->avail.notify_one(); - state->request.notify_one(); - })); - - while (true) { - checkInterrupt(); - - std::string chunk; - - /* Grab data if available, otherwise wait for the download - thread to wake us up. */ - { - auto state(_state->lock()); - - while (state->data.empty()) { - if (state->quit) { - if (state->exc) { - std::rethrow_exception(state->exc); - } - return; - } - - state.wait(state->avail); - } - - chunk = std::move(state->data); - state->data = std::string(); - - state->request.notify_one(); - } - - /* Flush the data to the sink and wake up the download thread - if it's blocked on a full buffer. We don't hold the state - lock while doing this to prevent blocking the download - thread if sink() takes a long time. */ - sink(reinterpret_cast(chunk.data()), chunk.size()); - } -} - -CachedDownloadResult Downloader::downloadCached( - const ref& store, const CachedDownloadRequest& request) { - auto url = resolveUri(request.uri); - - auto name = request.name; - if (name.empty()) { - auto p = url.rfind('/'); - if (p != std::string::npos) { - name = std::string(url, p + 1); - } - } - - Path expectedStorePath; - if (request.expectedHash) { - expectedStorePath = - store->makeFixedOutputPath(request.unpack, request.expectedHash, name); - if (store->isValidPath(expectedStorePath)) { - CachedDownloadResult result; - result.storePath = expectedStorePath; - result.path = store->toRealPath(expectedStorePath); - return result; - } - } - - Path cacheDir = getCacheDir() + "/nix/tarballs"; - createDirs(cacheDir); - - std::string urlHash = hashString(htSHA256, name + std::string("\0"s) + url) - .to_string(Base32, false); - - Path dataFile = cacheDir + "/" + urlHash + ".info"; - Path fileLink = cacheDir + "/" + urlHash + "-file"; - - PathLocks lock({fileLink}, fmt("waiting for lock on '%1%'...", fileLink)); - - Path storePath; - - std::string expectedETag; - - bool skip = false; - - CachedDownloadResult result; - - if (pathExists(fileLink) && pathExists(dataFile)) { - storePath = readLink(fileLink); - store->addTempRoot(storePath); - if (store->isValidPath(storePath)) { - std::vector ss = absl::StrSplit( - readFile(dataFile), absl::ByChar('\n'), absl::SkipEmpty()); - if (ss.size() >= 3 && ss[0] == url) { - time_t lastChecked; - if (absl::SimpleAtoi(ss[2], &lastChecked) && - static_cast(lastChecked) + request.ttl >= - static_cast(time(nullptr))) { - skip = true; - result.effectiveUri = request.uri; - result.etag = ss[1]; - } else if (!ss[1].empty()) { - DLOG(INFO) << "verifying previous ETag: " << ss[1]; - expectedETag = ss[1]; - } - } - } else { - storePath = ""; - } - } - - if (!skip) { - try { - DownloadRequest request2(url); - request2.expectedETag = expectedETag; - auto res = download(request2); - result.effectiveUri = res.effectiveUri; - result.etag = res.etag; - - if (!res.cached) { - ValidPathInfo info; - StringSink sink; - dumpString(*res.data, sink); - Hash hash = hashString( - request.expectedHash ? request.expectedHash.type : htSHA256, - *res.data); - info.path = store->makeFixedOutputPath(false, hash, name); - info.narHash = hashString(htSHA256, *sink.s); - info.narSize = sink.s->size(); - info.ca = makeFixedOutputCA(false, hash); - store->addToStore(info, sink.s, NoRepair, NoCheckSigs); - storePath = info.path; - } - - assert(!storePath.empty()); - replaceSymlink(storePath, fileLink); - - writeFile(dataFile, url + "\n" + res.etag + "\n" + - std::to_string(time(nullptr)) + "\n"); - } catch (DownloadError& e) { - if (storePath.empty()) { - throw; - } - LOG(WARNING) << e.msg() << "; using cached result"; - result.etag = expectedETag; - } - } - - if (request.unpack) { - Path unpackedLink = cacheDir + "/" + baseNameOf(storePath) + "-unpacked"; - PathLocks lock2({unpackedLink}, - fmt("waiting for lock on '%1%'...", unpackedLink)); - Path unpackedStorePath; - if (pathExists(unpackedLink)) { - unpackedStorePath = readLink(unpackedLink); - store->addTempRoot(unpackedStorePath); - if (!store->isValidPath(unpackedStorePath)) { - unpackedStorePath = ""; - } - } - if (unpackedStorePath.empty()) { - LOG(INFO) << "unpacking '" << url << "' ..."; - Path tmpDir = createTempDir(); - AutoDelete autoDelete(tmpDir, true); - // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, - {"xf", store->toRealPath(storePath), "-C", tmpDir, - "--strip-components", "1"}); - unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, - defaultPathFilter, NoRepair); - } - replaceSymlink(unpackedStorePath, unpackedLink); - storePath = unpackedStorePath; - } - - if (!expectedStorePath.empty() && storePath != expectedStorePath) { - unsigned int statusCode = 102; - Hash gotHash = - request.unpack - ? hashPath(request.expectedHash.type, store->toRealPath(storePath)) - .first - : hashFile(request.expectedHash.type, store->toRealPath(storePath)); - throw nix::Error(statusCode, - "hash mismatch in file downloaded from '%s':\n wanted: " - "%s\n got: %s", - url, request.expectedHash.to_string(), - gotHash.to_string()); - } - - result.storePath = storePath; - result.path = store->toRealPath(storePath); - return result; -} - -bool isUri(const std::string& s) { - if (s.compare(0, 8, "channel:") == 0) { - return true; - } - size_t pos = s.find("://"); - if (pos == std::string::npos) { - return false; - } - std::string scheme(s, 0, pos); - return scheme == "http" || scheme == "https" || scheme == "file" || - scheme == "channel" || scheme == "git" || scheme == "s3" || - scheme == "ssh"; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/download.hh b/third_party/nix/src/libstore/download.hh deleted file mode 100644 index cbfab5f40d..0000000000 --- a/third_party/nix/src/libstore/download.hh +++ /dev/null @@ -1,133 +0,0 @@ -#pragma once - -#include -#include - -#include "libstore/globals.hh" -#include "libutil/hash.hh" -#include "libutil/types.hh" - -namespace nix { - -struct DownloadSettings : Config { - Setting enableHttp2{this, true, "http2", - "Whether to enable HTTP/2 support."}; - - Setting userAgentSuffix{ - this, "", "user-agent-suffix", - "String appended to the user agent in HTTP requests."}; - - Setting httpConnections{this, - 25, - "http-connections", - "Number of parallel HTTP connections.", - {"binary-caches-parallel-connections"}}; - - Setting connectTimeout{ - this, 0, "connect-timeout", - "Timeout for connecting to servers during downloads. 0 means use curl's " - "builtin default."}; - - Setting stalledDownloadTimeout{ - this, 300, "stalled-download-timeout", - "Timeout (in seconds) for receiving data from servers during download. " - "Nix cancels idle downloads after this timeout's duration."}; - - Setting tries{ - this, 5, "download-attempts", - "How often Nix will attempt to download a file before giving up."}; -}; - -extern DownloadSettings downloadSettings; - -struct DownloadRequest { - std::string uri; - std::string expectedETag; - bool verifyTLS = true; - bool head = false; - size_t tries = downloadSettings.tries; - unsigned int baseRetryTimeMs = 250; - bool decompress = true; - std::shared_ptr data; - std::string mimeType; - std::function dataCallback; - - DownloadRequest(const std::string& uri) : uri(uri) {} - - std::string verb() { return data ? "upload" : "download"; } -}; - -struct DownloadResult { - bool cached = false; - std::string etag; - std::string effectiveUri; - std::shared_ptr data; - uint64_t bodySize = 0; -}; - -struct CachedDownloadRequest { - std::string uri; - bool unpack = false; - std::string name; - Hash expectedHash; - unsigned int ttl = settings.tarballTtl; - - CachedDownloadRequest(const std::string& uri) : uri(uri) {} -}; - -struct CachedDownloadResult { - // Note: 'storePath' may be different from 'path' when using a - // chroot store. - Path storePath; - Path path; - std::optional etag; - std::string effectiveUri; -}; - -class Store; - -struct Downloader { - virtual ~Downloader() {} - - /* Enqueue a download request, returning a future to the result of - the download. The future may throw a DownloadError - exception. */ - virtual void enqueueDownload(const DownloadRequest& request, - Callback callback) = 0; - - std::future enqueueDownload(const DownloadRequest& request); - - /* Synchronously download a file. */ - DownloadResult download(const DownloadRequest& request); - - /* Download a file, writing its data to a sink. The sink will be - invoked on the thread of the caller. */ - void download(DownloadRequest&& request, Sink& sink); - - /* Check if the specified file is already in ~/.cache/nix/tarballs - and is more recent than ‘tarball-ttl’ seconds. Otherwise, - use the recorded ETag to verify if the server has a more - recent version, and if so, download it to the Nix store. */ - CachedDownloadResult downloadCached(const ref& store, - const CachedDownloadRequest& request); - - enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; -}; - -/* Return a shared Downloader object. Using this object is preferred - because it enables connection reuse and HTTP/2 multiplexing. */ -ref getDownloader(); - -/* Return a new Downloader object. */ -ref makeDownloader(); - -class DownloadError : public Error { - public: - Downloader::Error error; - DownloadError(Downloader::Error error, const FormatOrString& fs) - : Error(fs), error(error) {} -}; - -bool isUri(const std::string& s); - -} // namespace nix diff --git a/third_party/nix/src/libstore/export-import.cc b/third_party/nix/src/libstore/export-import.cc deleted file mode 100644 index 8e93144339..0000000000 --- a/third_party/nix/src/libstore/export-import.cc +++ /dev/null @@ -1,111 +0,0 @@ -#include - -#include "libstore/store-api.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/archive.hh" - -namespace nix { - -struct HashAndWriteSink : Sink { - Sink& writeSink; - HashSink hashSink; - explicit HashAndWriteSink(Sink& writeSink) - : writeSink(writeSink), hashSink(htSHA256) {} - void operator()(const unsigned char* data, size_t len) override { - writeSink(data, len); - hashSink(data, len); - } - Hash currentHash() { return hashSink.currentHash().first; } -}; - -void Store::exportPaths(const Paths& paths, Sink& sink) { - Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end())); - std::reverse(sorted.begin(), sorted.end()); - - std::string doneLabel("paths exported"); - // logger->incExpected(doneLabel, sorted.size()); - - for (auto& path : sorted) { - // Activity act(*logger, lvlInfo, format("exporting path '%s'") % path); - sink << 1; - exportPath(path, sink); - // logger->incProgress(doneLabel); - } - - sink << 0; -} - -void Store::exportPath(const Path& path, Sink& sink) { - auto info = queryPathInfo(path); - - HashAndWriteSink hashAndWriteSink(sink); - - narFromPath(path, hashAndWriteSink); - - /* Refuse to export paths that have changed. This prevents - filesystem corruption from spreading to other machines. - Don't complain if the stored hash is zero (unknown). */ - Hash hash = hashAndWriteSink.currentHash(); - if (hash != info->narHash && info->narHash != Hash(info->narHash.type)) { - throw Error(format("hash of path '%1%' has changed from '%2%' to '%3%'!") % - path % info->narHash.to_string() % hash.to_string()); - } - - hashAndWriteSink << exportMagic << path << info->references << info->deriver - << 0; -} - -Paths Store::importPaths(Source& source, - const std::shared_ptr& accessor, - CheckSigsFlag checkSigs) { - Paths res; - while (true) { - auto n = readNum(source); - if (n == 0) { - break; - } - if (n != 1) { - throw Error( - "input doesn't look like something created by 'nix-store --export'"); - } - - /* Extract the NAR from the source. */ - TeeSink tee(source); - parseDump(tee, tee.source); - - uint32_t magic = readInt(source); - if (magic != exportMagic) { - throw Error("Nix archive cannot be imported; wrong format"); - } - - ValidPathInfo info; - - info.path = readStorePath(*this, source); - - // Activity act(*logger, lvlInfo, format("importing path '%s'") % - // info.path); - - info.references = readStorePaths(*this, source); - - info.deriver = readString(source); - if (!info.deriver.empty()) { - assertStorePath(info.deriver); - } - - info.narHash = hashString(htSHA256, *tee.source.data); - info.narSize = tee.source.data->size(); - - // Ignore optional legacy signature. - if (readInt(source) == 1) { - readString(source); - } - - addToStore(info, tee.source.data, NoRepair, checkSigs, accessor); - - res.push_back(info.path); - } - - return res; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/fs-accessor.hh b/third_party/nix/src/libstore/fs-accessor.hh deleted file mode 100644 index 1bc1373dcb..0000000000 --- a/third_party/nix/src/libstore/fs-accessor.hh +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include "libutil/types.hh" - -namespace nix { - -/* An abstract class for accessing a filesystem-like structure, such - as a (possibly remote) Nix store or the contents of a NAR file. */ -class FSAccessor { - public: - enum Type { tMissing, tRegular, tSymlink, tDirectory }; - - struct Stat { - Type type = tMissing; - uint64_t fileSize = 0; // regular files only - bool isExecutable = false; // regular files only - uint64_t narOffset = 0; // regular files only - }; - - virtual ~FSAccessor() {} - - virtual Stat stat(const Path& path) = 0; - - virtual StringSet readDirectory(const Path& path) = 0; - - virtual std::string readFile(const Path& path) = 0; - - virtual std::string readLink(const Path& path) = 0; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/gc.cc b/third_party/nix/src/libstore/gc.cc deleted file mode 100644 index 07dc10629a..0000000000 --- a/third_party/nix/src/libstore/gc.cc +++ /dev/null @@ -1,997 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libutil/finally.hh" - -namespace nix { - -constexpr std::string_view kGcLockName = "gc.lock"; -constexpr std::string_view kGcRootsDir = "gcroots"; - -/* Acquire the global GC lock. This is used to prevent new Nix - processes from starting after the temporary root files have been - read. To be precise: when they try to create a new temporary root - file, they will block until the garbage collector has finished / - yielded the GC lock. */ -AutoCloseFD LocalStore::openGCLock(LockType lockType) { - Path fnGCLock = absl::StrCat(stateDir.get(), "/", kGcLockName); - - DLOG(INFO) << "acquiring global GC lock " << fnGCLock; - - AutoCloseFD fdGCLock( - open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600)); - - if (!fdGCLock) { - throw SysError(format("opening global GC lock '%1%'") % fnGCLock); - } - - if (!lockFile(fdGCLock.get(), lockType, false)) { - LOG(ERROR) << "waiting for the big garbage collector lock..."; - lockFile(fdGCLock.get(), lockType, true); - } - - /* !!! Restrict read permission on the GC root. Otherwise any - process that can open the file for reading can DoS the - collector. */ - - return fdGCLock; -} - -static void makeSymlink(const Path& link, const Path& target) { - /* Create directories up to `gcRoot'. */ - createDirs(dirOf(link)); - - /* Create the new symlink. */ - Path tempLink = - (format("%1%.tmp-%2%-%3%") % link % getpid() % random()).str(); - createSymlink(target, tempLink); - - /* Atomically replace the old one. */ - if (rename(tempLink.c_str(), link.c_str()) == -1) { - throw SysError(format("cannot rename '%1%' to '%2%'") % tempLink % link); - } -} - -void LocalStore::syncWithGC() { AutoCloseFD fdGCLock = openGCLock(ltRead); } - -void LocalStore::addIndirectRoot(const Path& path) { - std::string hash = hashString(htSHA1, path).to_string(Base32, false); - Path realRoot = - canonPath(absl::StrCat(stateDir.get(), "/", kGcRootsDir, "/auto/", hash)); - makeSymlink(realRoot, path); -} - -Path LocalFSStore::addPermRoot(const Path& _storePath, const Path& _gcRoot, - bool indirect, bool allowOutsideRootsDir) { - Path storePath(canonPath(_storePath)); - Path gcRoot(canonPath(_gcRoot)); - assertStorePath(storePath); - - if (isInStore(gcRoot)) { - throw Error(format("creating a garbage collector root (%1%) in the Nix " - "store is forbidden " - "(are you running nix-build inside the store?)") % - gcRoot); - } - - if (indirect) { - /* Don't clobber the link if it already exists and doesn't - point to the Nix store. */ - if (pathExists(gcRoot) && - (!isLink(gcRoot) || !isInStore(readLink(gcRoot)))) { - throw Error(format("cannot create symlink '%1%'; already exists") % - gcRoot); - } - makeSymlink(gcRoot, storePath); - addIndirectRoot(gcRoot); - } - - else { - if (!allowOutsideRootsDir) { - Path rootsDir = canonPath(absl::StrCat(stateDir.get(), "/", kGcRootsDir)); - - if (std::string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/") { - throw Error(format("path '%1%' is not a valid garbage collector root; " - "it's not in the directory '%2%'") % - gcRoot % rootsDir); - } - } - - if (baseNameOf(gcRoot) == baseNameOf(storePath)) { - writeFile(gcRoot, ""); - } else { - makeSymlink(gcRoot, storePath); - } - } - - /* Check that the root can be found by the garbage collector. - !!! This can be very slow on machines that have many roots. - Instead of reading all the roots, it would be more efficient to - check if the root is in a directory in or linked from the - gcroots directory. */ - if (settings.checkRootReachability) { - Roots roots = findRoots(false); - if (roots[storePath].count(gcRoot) == 0) { - LOG(ERROR) << "warning: '" << gcRoot - << "' is not in a directory where the garbage " - << "collector looks for roots; therefore, '" << storePath - << "' might be removed by the garbage collector"; - } - } - - /* Grab the global GC root, causing us to block while a GC is in - progress. This prevents the set of permanent roots from - increasing while a GC is in progress. */ - syncWithGC(); - - return gcRoot; -} - -void LocalStore::addTempRoot(const Path& path) { - auto state(_state.lock()); - - /* Create the temporary roots file for this process. */ - if (!state->fdTempRoots) { - while (true) { - AutoCloseFD fdGCLock = openGCLock(ltRead); - - if (pathExists(fnTempRoots)) { - /* It *must* be stale, since there can be no two - processes with the same pid. */ - unlink(fnTempRoots.c_str()); - } - - state->fdTempRoots = openLockFile(fnTempRoots, true); - - fdGCLock = AutoCloseFD(-1); - - DLOG(INFO) << "acquiring read lock on " << fnTempRoots; - lockFile(state->fdTempRoots.get(), ltRead, true); - - /* Check whether the garbage collector didn't get in our - way. */ - struct stat st; - if (fstat(state->fdTempRoots.get(), &st) == -1) { - throw SysError(format("statting '%1%'") % fnTempRoots); - } - if (st.st_size == 0) { - break; - } - - /* The garbage collector deleted this file before we could - get a lock. (It won't delete the file after we get a - lock.) Try again. */ - } - } - - /* Upgrade the lock to a write lock. This will cause us to block - if the garbage collector is holding our lock. */ - DLOG(INFO) << "acquiring write lock on " << fnTempRoots; - lockFile(state->fdTempRoots.get(), ltWrite, true); - - std::string s = path + '\0'; - writeFull(state->fdTempRoots.get(), s); - - /* Downgrade to a read lock. */ - DLOG(INFO) << "downgrading to read lock on " << fnTempRoots; - lockFile(state->fdTempRoots.get(), ltRead, true); -} - -constexpr std::string_view kCensored = "{censored}"; - -void LocalStore::findTempRoots(FDs& fds, Roots& tempRoots, bool censor) { - /* Read the `temproots' directory for per-process temporary root - files. */ - for (auto& i : readDirectory(tempRootsDir)) { - Path path = tempRootsDir + "/" + i.name; - - pid_t pid = std::stoi(i.name); - - DLOG(INFO) << "reading temporary root file " << path; - FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666))); - if (!*fd) { - /* It's okay if the file has disappeared. */ - if (errno == ENOENT) { - continue; - } - throw SysError(format("opening temporary roots file '%1%'") % path); - } - - /* This should work, but doesn't, for some reason. */ - // FDPtr fd(new AutoCloseFD(openLockFile(path, false))); - // if (*fd == -1) { continue; } - - /* Try to acquire a write lock without blocking. This can - only succeed if the owning process has died. In that case - we don't care about its temporary roots. */ - if (lockFile(fd->get(), ltWrite, false)) { - LOG(ERROR) << "removing stale temporary roots file " << path; - unlink(path.c_str()); - writeFull(fd->get(), "d"); - continue; - } - - /* Acquire a read lock. This will prevent the owning process - from upgrading to a write lock, therefore it will block in - addTempRoot(). */ - DLOG(INFO) << "waiting for read lock on " << path; - lockFile(fd->get(), ltRead, true); - - /* Read the entire file. */ - std::string contents = readFile(fd->get()); - - /* Extract the roots. */ - std::string::size_type pos = 0; - std::string::size_type end; - - while ((end = contents.find(static_cast(0), pos)) != - std::string::npos) { - Path root(contents, pos, end - pos); - DLOG(INFO) << "got temporary root " << root; - assertStorePath(root); - tempRoots[root].emplace(censor ? kCensored : fmt("{temp:%d}", pid)); - pos = end + 1; - } - - fds.push_back(fd); /* keep open */ - } -} - -void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) { - auto foundRoot = [&](const Path& path, const Path& target) { - Path storePath = toStorePath(target); - if (isStorePath(storePath) && isValidPath(storePath)) { - roots[storePath].emplace(path); - } else { - LOG(INFO) << "skipping invalid root from '" << path << "' to '" - << storePath << "'"; - } - }; - - try { - if (type == DT_UNKNOWN) { - type = getFileType(path); - } - - if (type == DT_DIR) { - for (auto& i : readDirectory(path)) { - findRoots(path + "/" + i.name, i.type, roots); - } - } - - else if (type == DT_LNK) { - Path target = readLink(path); - if (isInStore(target)) { - foundRoot(path, target); - } - - /* Handle indirect roots. */ - else { - target = absPath(target, dirOf(path)); - if (!pathExists(target)) { - if (isInDir(path, absl::StrCat(stateDir.get(), "/", kGcRootsDir, - "/auto"))) { - LOG(INFO) << "removing stale link from '" << path << "' to '" - << target << "'"; - unlink(path.c_str()); - } - } else { - struct stat st2 = lstat(target); - if (!S_ISLNK(st2.st_mode)) { - return; - } - Path target2 = readLink(target); - if (isInStore(target2)) { - foundRoot(target, target2); - } - } - } - } - - else if (type == DT_REG) { - Path storePath = storeDir + "/" + baseNameOf(path); - if (isStorePath(storePath) && isValidPath(storePath)) { - roots[storePath].emplace(path); - } - } - - } - - catch (SysError& e) { - /* We only ignore permanent failures. */ - if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR) { - LOG(INFO) << "cannot read potential root '" << path << "'"; - } else { - throw; - } - } -} - -void LocalStore::findRootsNoTemp(Roots& roots, bool censor) { - /* Process direct roots in {gcroots,profiles}. */ - findRoots(absl::StrCat(stateDir.get(), "/", kGcRootsDir), DT_UNKNOWN, roots); - findRoots(stateDir + "/profiles", DT_UNKNOWN, roots); - - /* Add additional roots returned by different platforms-specific - heuristics. This is typically used to add running programs to - the set of roots (to prevent them from being garbage collected). */ - findRuntimeRoots(roots, censor); -} - -Roots LocalStore::findRoots(bool censor) { - Roots roots; - findRootsNoTemp(roots, censor); - - FDs fds; - findTempRoots(fds, roots, censor); - - return roots; -} - -static void readProcLink(const std::string& file, Roots& roots) { - /* 64 is the starting buffer size gnu readlink uses... */ - auto bufsiz = ssize_t{64}; -try_again: - char buf[bufsiz]; - auto res = readlink(file.c_str(), buf, bufsiz); - if (res == -1) { - if (errno == ENOENT || errno == EACCES || errno == ESRCH) { - return; - } - throw SysError("reading symlink"); - } - if (res == bufsiz) { - if (SSIZE_MAX / 2 < bufsiz) { - throw Error("stupidly long symlink"); - } - bufsiz *= 2; - goto try_again; - } - if (res > 0 && buf[0] == '/') { - roots[std::string(static_cast(buf), res)].emplace(file); - } -} - -static std::string quoteRegexChars(const std::string& raw) { - static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])"); - return std::regex_replace(raw, specialRegex, R"(\$&)"); -} - -static void readFileRoots(const char* path, Roots& roots) { - try { - roots[readFile(path)].emplace(path); - } catch (SysError& e) { - if (e.errNo != ENOENT && e.errNo != EACCES) { - throw; - } - } -} - -void LocalStore::findRuntimeRoots(Roots& roots, bool censor) { - Roots unchecked; - - auto procDir = AutoCloseDir{opendir("/proc")}; - if (procDir) { - struct dirent* ent; - auto digitsRegex = std::regex(R"(^\d+$)"); - auto mapRegex = - std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)"); - auto storePathRegex = std::regex(quoteRegexChars(storeDir) + - R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)"); - while (errno = 0, ent = readdir(procDir.get())) { - checkInterrupt(); - if (std::regex_match(ent->d_name, digitsRegex)) { - readProcLink(fmt("/proc/%s/exe", ent->d_name), unchecked); - readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked); - - auto fdStr = fmt("/proc/%s/fd", ent->d_name); - auto fdDir = AutoCloseDir(opendir(fdStr.c_str())); - if (!fdDir) { - if (errno == ENOENT || errno == EACCES) { - continue; - } - throw SysError(format("opening %1%") % fdStr); - } - struct dirent* fd_ent; - while (errno = 0, fd_ent = readdir(fdDir.get())) { - if (fd_ent->d_name[0] != '.') { - readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked); - } - } - if (errno) { - if (errno == ESRCH) { - continue; - } - throw SysError(format("iterating /proc/%1%/fd") % ent->d_name); - } - fdDir.reset(); - - try { - auto mapFile = fmt("/proc/%s/maps", ent->d_name); - std::vector mapLines = absl::StrSplit( - readFile(mapFile, true), absl::ByChar('\n'), absl::SkipEmpty()); - for (const auto& line : mapLines) { - auto match = std::smatch{}; - if (std::regex_match(line, match, mapRegex)) { - unchecked[match[1]].emplace(mapFile); - } - } - - auto envFile = fmt("/proc/%s/environ", ent->d_name); - auto envString = readFile(envFile, true); - auto env_end = std::sregex_iterator{}; - for (auto i = std::sregex_iterator{envString.begin(), envString.end(), - storePathRegex}; - i != env_end; ++i) { - unchecked[i->str()].emplace(envFile); - } - } catch (SysError& e) { - if (errno == ENOENT || errno == EACCES || errno == ESRCH) { - continue; - } - throw; - } - } - } - if (errno) { - throw SysError("iterating /proc"); - } - } - - readFileRoots("/proc/sys/kernel/modprobe", unchecked); - readFileRoots("/proc/sys/kernel/fbsplash", unchecked); - readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked); - - for (auto& [target, links] : unchecked) { - if (isInStore(target)) { - Path path = toStorePath(target); - if (isStorePath(path) && isValidPath(path)) { - DLOG(INFO) << "got additional root " << path; - if (censor) { - roots[path].insert(std::string(kCensored)); - } else { - roots[path].insert(links.begin(), links.end()); - } - } - } - } -} - -struct GCLimitReached {}; - -struct LocalStore::GCState { - GCOptions options; - GCResults& results; - PathSet roots; - PathSet tempRoots; - PathSet dead; - PathSet alive; - bool gcKeepOutputs; - bool gcKeepDerivations; - unsigned long long bytesInvalidated; - bool moveToTrash = true; - bool shouldDelete; - explicit GCState(GCResults& results_) - : results(results_), bytesInvalidated(0) {} -}; - -bool LocalStore::isActiveTempFile(const GCState& state, const Path& path, - const std::string& suffix) { - return absl::EndsWith(path, suffix) && - state.tempRoots.find(std::string( - path, 0, path.size() - suffix.size())) != state.tempRoots.end(); -} - -void LocalStore::deleteGarbage(GCState& state, const Path& path) { - unsigned long long bytesFreed; - deletePath(path, bytesFreed); - state.results.bytesFreed += bytesFreed; -} - -void LocalStore::deletePathRecursive(GCState& state, const Path& path) { - checkInterrupt(); - - unsigned long long size = 0; - - if (isStorePath(path) && isValidPath(path)) { - PathSet referrers; - queryReferrers(path, referrers); - for (auto& i : referrers) { - if (i != path) { - deletePathRecursive(state, i); - } - } - size = queryPathInfo(path)->narSize; - invalidatePathChecked(path); - } - - Path realPath = realStoreDir + "/" + baseNameOf(path); - - struct stat st; - if (lstat(realPath.c_str(), &st) != 0) { - if (errno == ENOENT) { - return; - } - throw SysError(format("getting status of %1%") % realPath); - } - - LOG(INFO) << "deleting '" << path << "'"; - - state.results.paths.insert(path); - - /* If the path is not a regular file or symlink, move it to the - trash directory. The move is to ensure that later (when we're - not holding the global GC lock) we can delete the path without - being afraid that the path has become alive again. Otherwise - delete it right away. */ - if (state.moveToTrash && S_ISDIR(st.st_mode)) { - // Estimate the amount freed using the narSize field. FIXME: - // if the path was not valid, need to determine the actual - // size. - try { - if (chmod(realPath.c_str(), st.st_mode | S_IWUSR) == -1) { - throw SysError(format("making '%1%' writable") % realPath); - } - Path tmp = trashDir + "/" + baseNameOf(path); - if (rename(realPath.c_str(), tmp.c_str()) != 0) { - throw SysError(format("unable to rename '%1%' to '%2%'") % realPath % - tmp); - } - state.bytesInvalidated += size; - } catch (SysError& e) { - if (e.errNo == ENOSPC) { - LOG(INFO) << "note: can't create move '" << realPath - << "': " << e.msg(); - deleteGarbage(state, realPath); - } - } - } else { - deleteGarbage(state, realPath); - } - - if (state.results.bytesFreed + state.bytesInvalidated > - state.options.maxFreed) { - LOG(INFO) << "deleted or invalidated more than " << state.options.maxFreed - << " bytes; stopping"; - throw GCLimitReached(); - } -} - -bool LocalStore::canReachRoot(GCState& state, PathSet& visited, - const Path& path) { - if (visited.count(path) != 0u) { - return false; - } - - if (state.alive.count(path) != 0u) { - return true; - } - - if (state.dead.count(path) != 0u) { - return false; - } - - if (state.roots.count(path) != 0u) { - DLOG(INFO) << "cannot delete '" << path << "' because it's a root"; - state.alive.insert(path); - return true; - } - - visited.insert(path); - - if (!isStorePath(path) || !isValidPath(path)) { - return false; - } - - PathSet incoming; - - /* Don't delete this path if any of its referrers are alive. */ - queryReferrers(path, incoming); - - /* If keep-derivations is set and this is a derivation, then - don't delete the derivation if any of the outputs are alive. */ - if (state.gcKeepDerivations && isDerivation(path)) { - PathSet outputs = queryDerivationOutputs(path); - for (auto& i : outputs) { - if (isValidPath(i) && queryPathInfo(i)->deriver == path) { - incoming.insert(i); - } - } - } - - /* If keep-outputs is set, then don't delete this path if there - are derivers of this path that are not garbage. */ - if (state.gcKeepOutputs) { - PathSet derivers = queryValidDerivers(path); - for (auto& i : derivers) { - incoming.insert(i); - } - } - - for (auto& i : incoming) { - if (i != path) { - if (canReachRoot(state, visited, i)) { - state.alive.insert(path); - return true; - } - } - } - - return false; -} - -void LocalStore::tryToDelete(GCState& state, const Path& path) { - checkInterrupt(); - - auto realPath = realStoreDir + "/" + baseNameOf(path); - if (realPath == linksDir || realPath == trashDir) { - return; - } - - // Activity act(*logger, lvlDebug, format("considering whether to delete - // '%1%'") % path); - - if (!isStorePath(path) || !isValidPath(path)) { - /* A lock file belonging to a path that we're building right - now isn't garbage. */ - if (isActiveTempFile(state, path, ".lock")) { - return; - } - - /* Don't delete .chroot directories for derivations that are - currently being built. */ - if (isActiveTempFile(state, path, ".chroot")) { - return; - } - - /* Don't delete .check directories for derivations that are - currently being built, because we may need to run - diff-hook. */ - if (isActiveTempFile(state, path, ".check")) { - return; - } - } - - PathSet visited; - - if (canReachRoot(state, visited, path)) { - DLOG(INFO) << "cannot delete '" << path << "' because it's still reachable"; - } else { - /* No path we visited was a root, so everything is garbage. - But we only delete ‘path’ and its referrers here so that - ‘nix-store --delete’ doesn't have the unexpected effect of - recursing into derivations and outputs. */ - state.dead.insert(visited.begin(), visited.end()); - if (state.shouldDelete) { - deletePathRecursive(state, path); - } - } -} - -/* Unlink all files in /nix/store/.links that have a link count of 1, - which indicates that there are no other links and so they can be - safely deleted. FIXME: race condition with optimisePath(): we - might see a link count of 1 just before optimisePath() increases - the link count. */ -void LocalStore::removeUnusedLinks(const GCState& state) { - AutoCloseDir dir(opendir(linksDir.c_str())); - if (!dir) { - throw SysError(format("opening directory '%1%'") % linksDir); - } - - long long actualSize = 0; - long long unsharedSize = 0; - - struct dirent* dirent; - while (errno = 0, dirent = readdir(dir.get())) { - checkInterrupt(); - std::string name = dirent->d_name; - if (name == "." || name == "..") { - continue; - } - Path path = linksDir + "/" + name; - - struct stat st; - if (lstat(path.c_str(), &st) == -1) { - throw SysError(format("statting '%1%'") % path); - } - - if (st.st_nlink != 1) { - actualSize += st.st_size; - unsharedSize += (st.st_nlink - 1) * st.st_size; - continue; - } - - LOG(INFO) << "deleting unused link " << path; - - if (unlink(path.c_str()) == -1) { - throw SysError(format("deleting '%1%'") % path); - } - - state.results.bytesFreed += st.st_size; - } - - struct stat st; - if (stat(linksDir.c_str(), &st) == -1) { - throw SysError(format("statting '%1%'") % linksDir); - } - - long long overhead = st.st_blocks * 512ULL; - - // TODO(tazjin): absl::StrFormat %.2f - LOG(INFO) << "note: currently hard linking saves " - << ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)) - << " MiB"; -} - -void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) { - GCState state(results); - state.options = options; - state.gcKeepOutputs = settings.gcKeepOutputs; - state.gcKeepDerivations = settings.gcKeepDerivations; - - /* Using `--ignore-liveness' with `--delete' can have unintended - consequences if `keep-outputs' or `keep-derivations' are true - (the garbage collector will recurse into deleting the outputs - or derivers, respectively). So disable them. */ - if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) { - state.gcKeepOutputs = false; - state.gcKeepDerivations = false; - } - - state.shouldDelete = options.action == GCOptions::gcDeleteDead || - options.action == GCOptions::gcDeleteSpecific; - - if (state.shouldDelete) { - deletePath(reservedPath); - } - - /* Acquire the global GC root. This prevents - a) New roots from being added. - b) Processes from creating new temporary root files. */ - AutoCloseFD fdGCLock = openGCLock(ltWrite); - - /* Find the roots. Since we've grabbed the GC lock, the set of - permanent roots cannot increase now. */ - LOG(INFO) << "finding garbage collector roots..."; - Roots rootMap; - if (!options.ignoreLiveness) { - findRootsNoTemp(rootMap, true); - } - - for (auto& i : rootMap) { - state.roots.insert(i.first); - } - - /* Read the temporary roots. This acquires read locks on all - per-process temporary root files. So after this point no paths - can be added to the set of temporary roots. */ - FDs fds; - Roots tempRoots; - findTempRoots(fds, tempRoots, true); - for (auto& root : tempRoots) { - state.tempRoots.insert(root.first); - } - state.roots.insert(state.tempRoots.begin(), state.tempRoots.end()); - - /* After this point the set of roots or temporary roots cannot - increase, since we hold locks on everything. So everything - that is not reachable from `roots' is garbage. */ - - if (state.shouldDelete) { - if (pathExists(trashDir)) { - deleteGarbage(state, trashDir); - } - try { - createDirs(trashDir); - } catch (SysError& e) { - if (e.errNo == ENOSPC) { - LOG(INFO) << "note: can't create trash directory: " << e.msg(); - state.moveToTrash = false; - } - } - } - - /* Now either delete all garbage paths, or just the specified - paths (for gcDeleteSpecific). */ - - if (options.action == GCOptions::gcDeleteSpecific) { - for (auto& i : options.pathsToDelete) { - assertStorePath(i); - tryToDelete(state, i); - if (state.dead.find(i) == state.dead.end()) { - throw Error(format("cannot delete path '%1%' since it is still alive") % - i); - } - } - - } else if (options.maxFreed > 0) { - if (state.shouldDelete) { - LOG(INFO) << "deleting garbage..."; - } else { - LOG(ERROR) << "determining live/dead paths..."; - } - - try { - AutoCloseDir dir(opendir(realStoreDir.c_str())); - if (!dir) { - throw SysError(format("opening directory '%1%'") % realStoreDir); - } - - /* Read the store and immediately delete all paths that - aren't valid. When using --max-freed etc., deleting - invalid paths is preferred over deleting unreachable - paths, since unreachable paths could become reachable - again. We don't use readDirectory() here so that GCing - can start faster. */ - Paths entries; - struct dirent* dirent; - while (errno = 0, dirent = readdir(dir.get())) { - checkInterrupt(); - std::string name = dirent->d_name; - if (name == "." || name == "..") { - continue; - } - Path path = storeDir + "/" + name; - if (isStorePath(path) && isValidPath(path)) { - entries.push_back(path); - } else { - tryToDelete(state, path); - } - } - - dir.reset(); - - /* Now delete the unreachable valid paths. Randomise the - order in which we delete entries to make the collector - less biased towards deleting paths that come - alphabetically first (e.g. /nix/store/000...). This - matters when using --max-freed etc. */ - std::vector entries_(entries.begin(), entries.end()); - std::mt19937 gen(1); - std::shuffle(entries_.begin(), entries_.end(), gen); - - for (auto& i : entries_) { - tryToDelete(state, i); - } - - } catch (GCLimitReached& e) { - } - } - - if (state.options.action == GCOptions::gcReturnLive) { - state.results.paths = state.alive; - return; - } - - if (state.options.action == GCOptions::gcReturnDead) { - state.results.paths = state.dead; - return; - } - - /* Allow other processes to add to the store from here on. */ - fdGCLock = AutoCloseFD(-1); - fds.clear(); - - /* Delete the trash directory. */ - LOG(INFO) << "deleting " << trashDir; - deleteGarbage(state, trashDir); - - /* Clean up the links directory. */ - if (options.action == GCOptions::gcDeleteDead || - options.action == GCOptions::gcDeleteSpecific) { - LOG(INFO) << "deleting unused links..."; - removeUnusedLinks(state); - } - - /* While we're at it, vacuum the database. */ - // if (options.action == GCOptions::gcDeleteDead) { vacuumDB(); } -} - -void LocalStore::autoGC(bool sync) { - static auto fakeFreeSpaceFile = - getEnv("_NIX_TEST_FREE_SPACE_FILE").value_or(""); - - auto getAvail = [this]() -> uint64_t { - if (!fakeFreeSpaceFile.empty()) { - return std::stoll(readFile(fakeFreeSpaceFile)); - } - - struct statvfs st; - if (statvfs(realStoreDir.c_str(), &st) != 0) { - throw SysError("getting filesystem info about '%s'", realStoreDir); - } - - return static_cast(st.f_bavail) * st.f_bsize; - }; - - std::shared_future future; - - { - auto state(_state.lock()); - - if (state->gcRunning) { - future = state->gcFuture; - DLOG(INFO) << "waiting for auto-GC to finish"; - goto sync; - } - - auto now = std::chrono::steady_clock::now(); - - if (now < state->lastGCCheck + - std::chrono::seconds(settings.minFreeCheckInterval)) { - return; - } - - auto avail = getAvail(); - - state->lastGCCheck = now; - - if (avail >= settings.minFree || avail >= settings.maxFree) { - return; - } - - if (avail > state->availAfterGC * 0.97) { - return; - } - - state->gcRunning = true; - - std::promise promise; - future = state->gcFuture = promise.get_future().share(); - - std::thread([promise{std::move(promise)}, this, avail, getAvail]() mutable { - try { - /* Wake up any threads waiting for the auto-GC to finish. */ - Finally wakeup([&]() { - auto state(_state.lock()); - state->gcRunning = false; - state->lastGCCheck = std::chrono::steady_clock::now(); - promise.set_value(); - }); - - GCOptions options; - options.maxFreed = settings.maxFree - avail; - - LOG(INFO) << "running auto-GC to free " << options.maxFreed << " bytes"; - - GCResults results; - - collectGarbage(options, results); - - _state.lock()->availAfterGC = getAvail(); - - } catch (...) { - // FIXME: we could propagate the exception to the - // future, but we don't really care. - ignoreException(); - } - }).detach(); - } - -sync: - // Wait for the future outside of the state lock. - if (sync) { - future.get(); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/globals.cc b/third_party/nix/src/libstore/globals.cc deleted file mode 100644 index 6babb4589f..0000000000 --- a/third_party/nix/src/libstore/globals.cc +++ /dev/null @@ -1,178 +0,0 @@ -#include "libstore/globals.hh" - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "libutil/archive.hh" -#include "libutil/args.hh" -#include "libutil/util.hh" -#include "nix_config.h" - -namespace nix { - -/* The default location of the daemon socket, relative to nixStateDir. - The socket is in a directory to allow you to control access to the - Nix daemon by setting the mode/ownership of the directory - appropriately. (This wouldn't work on the socket itself since it - must be deleted and recreated on startup.) */ -#define DEFAULT_SOCKET_PATH "/daemon-socket/socket" - -Settings settings; - -static GlobalConfig::Register r1(&settings); - -Settings::Settings() - : nixPrefix(NIX_PREFIX), - nixStore(canonPath( - getEnv("NIX_STORE_DIR") - .value_or(getEnv("NIX_STORE").value_or(NIX_STORE_DIR)))), - nixDataDir(canonPath(getEnv("NIX_DATA_DIR").value_or(NIX_DATA_DIR))), - nixLogDir(canonPath(getEnv("NIX_LOG_DIR").value_or(NIX_LOG_DIR))), - nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR))), - nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR))), - nixLibexecDir( - canonPath(getEnv("NIX_LIBEXEC_DIR").value_or(NIX_LIBEXEC_DIR))), - nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR))), - nixManDir(canonPath(NIX_MAN_DIR)), - nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH)) { - buildUsersGroup = getuid() == 0 ? "nixbld" : ""; - lockCPU = getEnv("NIX_AFFINITY_HACK").value_or("1") == "1"; - - caFile = getEnv("NIX_SSL_CERT_FILE") - .value_or(getEnv("SSL_CERT_FILE").value_or("")); - if (caFile.empty()) { - for (auto& fn : - {"/etc/ssl/certs/ca-certificates.crt", - "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"}) { - if (pathExists(fn)) { - caFile = fn; - break; - } - } - } - - /* Backwards compatibility. */ - // TODO(tazjin): still? - auto s = getEnv("NIX_REMOTE_SYSTEMS"); - if (s) { - Strings ss; - for (auto p : absl::StrSplit(*s, absl::ByChar(':'), absl::SkipEmpty())) { - ss.push_back(absl::StrCat("@", p)); - } - builders = concatStringsSep(" ", ss); - } - - sandboxPaths = absl::StrSplit("/bin/sh=" SANDBOX_SHELL, - absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); -} - -void loadConfFile() { - if (std::filesystem::exists(settings.nixConfDir + "/nix.conf")) { - globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf"); - } - - /* We only want to send overrides to the daemon, i.e. stuff from - ~/.nix/nix.conf or the command line. */ - globalConfig.resetOverriden(); - - auto dirs = getConfigDirs(); - // Iterate over them in reverse so that the ones appearing first in the path - // take priority - for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) { - if (std::filesystem::exists(*dir + "/nix.conf")) { - globalConfig.applyConfigFile(*dir + "/nix/nix.conf"); - } - } -} - -unsigned int Settings::getDefaultCores() { - return std::max(1U, std::thread::hardware_concurrency()); -} - -StringSet Settings::getDefaultSystemFeatures() { - /* For backwards compatibility, accept some "features" that are - used in Nixpkgs to route builds to certain machines but don't - actually require anything special on the machines. */ - StringSet features{"nixos-test", "benchmark", "big-parallel"}; - -#if __linux__ - if (access("/dev/kvm", R_OK | W_OK) == 0) { - features.insert("kvm"); - } -#endif - - return features; -} - -const std::string nixVersion = PACKAGE_VERSION; - -template <> -void BaseSetting::set(const std::string& str) { - if (str == "true") { - value = smEnabled; - } else if (str == "relaxed") { - value = smRelaxed; - } else if (str == "false") { - value = smDisabled; - } else { - throw UsageError("option '%s' has invalid value '%s'", name, str); - } -} - -template <> -std::string BaseSetting::to_string() { - if (value == smEnabled) { - return "true"; - } - if (value == smRelaxed) { - return "relaxed"; - } else if (value == smDisabled) { - return "false"; - } else { - abort(); - } -} - -template <> -void BaseSetting::toJSON(JSONPlaceholder& out) { - AbstractSetting::toJSON(out); -} - -template <> -void BaseSetting::convertToArg(Args& args, - const std::string& category) { - args.mkFlag() - .longName(name) - .description("Enable sandboxing.") - .handler([=](const std::vector& ss) { override(smEnabled); }) - .category(category); - args.mkFlag() - .longName("no-" + name) - .description("Disable sandboxing.") - .handler( - [=](const std::vector& ss) { override(smDisabled); }) - .category(category); - args.mkFlag() - .longName("relaxed-" + name) - .description("Enable sandboxing, but allow builds to disable it.") - .handler([=](const std::vector& ss) { override(smRelaxed); }) - .category(category); -} - -void MaxBuildJobsSetting::set(const std::string& str) { - if (str == "auto") { - value = std::max(1U, std::thread::hardware_concurrency()); - } else if (!absl::SimpleAtoi(str, &value)) { - throw UsageError( - "configuration setting '%s' should be 'auto' or an integer", name); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/globals.hh b/third_party/nix/src/libstore/globals.hh deleted file mode 100644 index ed9b6a338e..0000000000 --- a/third_party/nix/src/libstore/globals.hh +++ /dev/null @@ -1,464 +0,0 @@ -#pragma once - -#include -#include - -#include - -#include "libutil/config.hh" -#include "libutil/types.hh" -#include "libutil/util.hh" -#include "nix_config.h" - -namespace nix { - -typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode; - -struct MaxBuildJobsSetting : public BaseSetting { - MaxBuildJobsSetting(Config* options, unsigned int def, - const std::string& name, const std::string& description, - const std::set& aliases = {}) - : BaseSetting(def, name, description, aliases) { - options->addSetting(this); - } - - void set(const std::string& str) override; -}; - -class Settings : public Config { - static unsigned int getDefaultCores(); - - static StringSet getDefaultSystemFeatures(); - - public: - Settings(); - - Path nixPrefix; - - /* The directory where we store sources and derived files. */ - Path nixStore; - - Path nixDataDir; /* !!! fix */ - - /* The directory where we log various operations. */ - Path nixLogDir; - - /* The directory where state is stored. */ - Path nixStateDir; - - /* The directory where configuration files are stored. */ - Path nixConfDir; - - /* The directory where internal helper programs are stored. */ - Path nixLibexecDir; - - /* The directory where the main programs are stored. */ - Path nixBinDir; - - /* The directory where the man pages are stored. */ - Path nixManDir; - - /* File name of the socket the daemon listens to. */ - Path nixDaemonSocketFile; - - Setting storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), - "store", "The default Nix store to use."}; - - Setting keepFailed{ - this, false, "keep-failed", - "Whether to keep temporary directories of failed builds."}; - - Setting keepGoing{ - this, false, "keep-going", - "Whether to keep building derivations when another build fails."}; - - Setting tryFallback{ - this, - false, - "fallback", - "Whether to fall back to building when substitution fails.", - {"build-fallback"}}; - - /* Whether to show build log output in real time. */ - bool verboseBuild = true; - - Setting logLines{ - this, 10, "log-lines", - "If verbose-build is false, the number of lines of the tail of " - "the log to show if a build fails."}; - - MaxBuildJobsSetting maxBuildJobs{this, - 1, - "max-jobs", - "Maximum number of parallel build jobs. " - "\"auto\" means use number of cores.", - {"build-max-jobs"}}; - - Setting buildCores{ - this, - getDefaultCores(), - "cores", - "Number of CPU cores to utilize in parallel within a build, " - "i.e. by passing this number to Make via '-j'. 0 means that the " - "number of actual CPU cores on the local host ought to be " - "auto-detected.", - {"build-cores"}}; - - /* Read-only mode. Don't copy stuff to the store, don't change - the database. */ - bool readOnlyMode = false; - - Setting thisSystem{this, SYSTEM, "system", - "The canonical Nix system name."}; - - Setting maxSilentTime{ - this, - 0, - "max-silent-time", - "The maximum time in seconds that a builer can go without " - "producing any output on stdout/stderr before it is killed. " - "0 means infinity.", - {"build-max-silent-time"}}; - - Setting buildTimeout{ - this, - 0, - "timeout", - "The maximum duration in seconds that a builder can run. " - "0 means infinity.", - {"build-timeout"}}; - - PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", - "build-hook", - "The path of the helper program that executes builds " - "to remote machines."}; - - Setting builders{this, "@" + nixConfDir + "/machines", - "builders", - "A semicolon-separated list of build machines, " - "in the format of nix.machines."}; - - Setting buildersUseSubstitutes{ - this, false, "builders-use-substitutes", - "Whether build machines should use their own substitutes for obtaining " - "build dependencies if possible, rather than waiting for this host to " - "upload them."}; - - Setting reservedSize{ - this, 8 * 1024 * 1024, "gc-reserved-space", - "Amount of reserved disk space for the garbage collector."}; - - Setting fsyncMetadata{this, true, "fsync-metadata", - "Whether SQLite should use fsync()."}; - - Setting useSQLiteWAL{this, true, "use-sqlite-wal", - "Whether SQLite should use WAL mode."}; - - Setting syncBeforeRegistering{ - this, false, "sync-before-registering", - "Whether to call sync() before registering a path as valid."}; - - Setting useSubstitutes{this, - true, - "substitute", - "Whether to use substitutes.", - {"build-use-substitutes"}}; - - Setting buildUsersGroup{ - this, "", "build-users-group", - "The Unix group that contains the build users."}; - - Setting impersonateLinux26{ - this, - false, - "impersonate-linux-26", - "Whether to impersonate a Linux 2.6 machine on newer kernels.", - {"build-impersonate-linux-26"}}; - - Setting keepLog{this, - true, - "keep-build-log", - "Whether to store build logs.", - {"build-keep-log"}}; - - Setting compressLog{this, - true, - "compress-build-log", - "Whether to compress logs.", - {"build-compress-log"}}; - - Setting maxLogSize{ - this, - 0, - "max-build-log-size", - "Maximum number of bytes a builder can write to stdout/stderr " - "before being killed (0 means no limit).", - {"build-max-log-size"}}; - - /* When buildRepeat > 0 and verboseBuild == true, whether to print - repeated builds (i.e. builds other than the first one) to - stderr. Hack to prevent Hydra logs from being polluted. */ - bool printRepeatedBuilds = true; - - Setting pollInterval{ - this, 5, "build-poll-interval", - "How often (in seconds) to poll for locks."}; - - Setting checkRootReachability{ - this, false, "gc-check-reachability", - "Whether to check if new GC roots can in fact be found by the " - "garbage collector."}; - - Setting gcKeepOutputs{ - this, - false, - "keep-outputs", - "Whether the garbage collector should keep outputs of live derivations.", - {"gc-keep-outputs"}}; - - Setting gcKeepDerivations{ - this, - true, - "keep-derivations", - "Whether the garbage collector should keep derivers of live paths.", - {"gc-keep-derivations"}}; - - Setting autoOptimiseStore{this, false, "auto-optimise-store", - "Whether to automatically replace files with " - "identical contents with hard links."}; - - Setting envKeepDerivations{ - this, - false, - "keep-env-derivations", - "Whether to add derivations as a dependency of user environments " - "(to prevent them from being GCed).", - {"env-keep-derivations"}}; - - /* Whether to lock the Nix client and worker to the same CPU. */ - bool lockCPU; - - /* Whether to show a stack trace if Nix evaluation fails. */ - Setting showTrace{ - this, false, "show-trace", - "Whether to show a stack trace on evaluation errors."}; - - Setting sandboxMode { - this, -#if __linux__ - smEnabled -#else - smDisabled -#endif - , - "sandbox", - "Whether to enable sandboxed builds. Can be \"true\", \"false\" or " - "\"relaxed\".", - { - "build-use-chroot", "build-use-sandbox" - } - }; - - Setting sandboxPaths{ - this, - {}, - "sandbox-paths", - "The paths to make available inside the build sandbox.", - {"build-chroot-dirs", "build-sandbox-paths"}}; - - Setting sandboxFallback{ - this, true, "sandbox-fallback", - "Whether to disable sandboxing when the kernel doesn't allow it."}; - - Setting extraSandboxPaths{ - this, - {}, - "extra-sandbox-paths", - "Additional paths to make available inside the build sandbox.", - {"build-extra-chroot-dirs", "build-extra-sandbox-paths"}}; - - Setting buildRepeat{ - this, - 0, - "repeat", - "The number of times to repeat a build in order to verify determinism.", - {"build-repeat"}}; - -#if __linux__ - Setting sandboxShmSize{ - this, "50%", "sandbox-dev-shm-size", - "The size of /dev/shm in the build sandbox."}; - - Setting sandboxBuildDir{this, "/build", "sandbox-build-dir", - "The build directory inside the sandbox."}; -#endif - - Setting allowedImpureHostPrefixes{ - this, - {}, - "allowed-impure-host-deps", - "Which prefixes to allow derivations to ask for access to (primarily for " - "Darwin)."}; - - Setting runDiffHook{ - this, false, "run-diff-hook", - "Whether to run the program specified by the diff-hook setting " - "repeated builds produce a different result. Typically used to " - "plug in diffoscope."}; - - PathSetting diffHook{ - this, true, "", "diff-hook", - "A program that prints out the differences between the two paths " - "specified on its command line."}; - - Setting enforceDeterminism{ - this, true, "enforce-determinism", - "Whether to fail if repeated builds produce different output."}; - - Setting trustedPublicKeys{ - this, - {"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}, - "trusted-public-keys", - "Trusted public keys for secure substitution.", - {"binary-cache-public-keys"}}; - - Setting secretKeyFiles{ - this, - {}, - "secret-key-files", - "Secret keys with which to sign local builds."}; - - Setting tarballTtl{ - this, 60 * 60, "tarball-ttl", - "How long downloaded files are considered up-to-date."}; - - Setting requireSigs{ - this, true, "require-sigs", - "Whether to check that any non-content-addressed path added to the " - "Nix store has a valid signature (that is, one signed using a key " - "listed in 'trusted-public-keys'."}; - - Setting extraPlatforms{ - this, - std::string{SYSTEM} == "x86_64-linux" ? StringSet{"i686-linux"} - : StringSet{}, - "extra-platforms", - "Additional platforms that can be built on the local system. " - "These may be supported natively (e.g. armv7 on some aarch64 CPUs " - "or using hacks like qemu-user."}; - - Setting systemFeatures{ - this, getDefaultSystemFeatures(), "system-features", - "Optional features that this system implements (like \"kvm\")."}; - - Setting substituters{ - this, - nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} - : Strings(), - "substituters", - "The URIs of substituters (such as https://cache.nixos.org/).", - {"binary-caches"}}; - - // FIXME: provide a way to add to option values. - Setting extraSubstituters{this, - {}, - "extra-substituters", - "Additional URIs of substituters.", - {"extra-binary-caches"}}; - - Setting trustedSubstituters{ - this, - {}, - "trusted-substituters", - "Disabled substituters that may be enabled via the substituters option " - "by untrusted users.", - {"trusted-binary-caches"}}; - - Setting trustedUsers{this, - {"root"}, - "trusted-users", - "Which users or groups are trusted to ask the " - "daemon to do unsafe things."}; - - Setting ttlNegativeNarInfoCache{ - this, 3600, "narinfo-cache-negative-ttl", - "The TTL in seconds for negative lookups in the disk cache i.e binary " - "cache lookups that " - "return an invalid path result"}; - - Setting ttlPositiveNarInfoCache{ - this, 30 * 24 * 3600, "narinfo-cache-positive-ttl", - "The TTL in seconds for positive lookups in the disk cache i.e binary " - "cache lookups that " - "return a valid path result."}; - - /* ?Who we trust to use the daemon in safe ways */ - Setting allowedUsers{ - this, - {"*"}, - "allowed-users", - "Which users or groups are allowed to connect to the daemon."}; - - Setting printMissing{ - this, true, "print-missing", - "Whether to print what paths need to be built or downloaded."}; - - Setting preBuildHook{ - this, "", "pre-build-hook", - "A program to run just before a build to set derivation-specific build " - "settings."}; - - Setting postBuildHook{ - this, "", "post-build-hook", - "A program to run just after each successful build."}; - - Setting netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), - "netrc-file", - "Path to the netrc file used to obtain " - "usernames/passwords for downloads."}; - - /* Path to the SSL CA file used */ - Path caFile; - -#if __linux__ - Setting filterSyscalls{ - this, true, "filter-syscalls", - "Whether to prevent certain dangerous system calls, such as " - "creation of setuid/setgid files or adding ACLs or extended " - "attributes. Only disable this if you're aware of the " - "security implications."}; - - Setting allowNewPrivileges{ - this, false, "allow-new-privileges", - "Whether builders can acquire new privileges by calling programs with " - "setuid/setgid bits or with file capabilities."}; -#endif - - Setting hashedMirrors{ - this, - {"http://tarballs.nixos.org/"}, - "hashed-mirrors", - "A list of servers used by builtins.fetchurl to fetch files by hash."}; - - Setting minFree{this, 0, "min-free", - "Automatically run the garbage collector when free " - "disk space drops below the specified amount."}; - - Setting maxFree{this, std::numeric_limits::max(), - "max-free", - "Stop deleting garbage when free disk space is " - "above the specified amount."}; - - Setting minFreeCheckInterval{ - this, 5, "min-free-check-interval", - "Number of seconds between checking free disk space."}; -}; - -// FIXME: don't use a global variable. -extern Settings settings; - -void loadConfFile(); - -extern const std::string nixVersion; - -} // namespace nix diff --git a/third_party/nix/src/libstore/http-binary-cache-store.cc b/third_party/nix/src/libstore/http-binary-cache-store.cc deleted file mode 100644 index c713ac43c4..0000000000 --- a/third_party/nix/src/libstore/http-binary-cache-store.cc +++ /dev/null @@ -1,171 +0,0 @@ -#include - -#include - -#include "libstore/binary-cache-store.hh" -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/nar-info-disk-cache.hh" - -namespace nix { - -MakeError(UploadToHTTP, Error); - -class HttpBinaryCacheStore : public BinaryCacheStore { - private: - Path cacheUri; - - struct State { - bool enabled = true; - std::chrono::steady_clock::time_point disabledUntil; - }; - - Sync _state; - - public: - HttpBinaryCacheStore(const Params& params, Path _cacheUri) - : BinaryCacheStore(params), cacheUri(std::move(_cacheUri)) { - if (cacheUri.back() == '/') { - cacheUri.pop_back(); - } - - diskCache = getNarInfoDiskCache(); - } - - std::string getUri() override { return cacheUri; } - - void init() override { - // FIXME: do this lazily? - if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) { - try { - BinaryCacheStore::init(); - } catch (UploadToHTTP&) { - throw Error("'%s' does not appear to be a binary cache", cacheUri); - } - diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority); - } - } - - protected: - void maybeDisable() { - auto state(_state.lock()); - if (state->enabled && settings.tryFallback) { - int t = 60; - LOG(WARNING) << "disabling binary cache '" << getUri() << "' for " << t - << " seconds"; - state->enabled = false; - state->disabledUntil = - std::chrono::steady_clock::now() + std::chrono::seconds(t); - } - } - - void checkEnabled() { - auto state(_state.lock()); - if (state->enabled) { - return; - } - if (std::chrono::steady_clock::now() > state->disabledUntil) { - state->enabled = true; - DLOG(INFO) << "re-enabling binary cache '" << getUri() << "'"; - return; - } - throw SubstituterDisabled("substituter '%s' is disabled", getUri()); - } - - bool fileExists(const std::string& path) override { - checkEnabled(); - - try { - DownloadRequest request(cacheUri + "/" + path); - request.head = true; - getDownloader()->download(request); - return true; - } catch (DownloadError& e) { - /* S3 buckets return 403 if a file doesn't exist and the - bucket is unlistable, so treat 403 as 404. */ - if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) { - return false; - } - maybeDisable(); - throw; - } - } - - void upsertFile(const std::string& path, const std::string& data, - const std::string& mimeType) override { - auto req = DownloadRequest(cacheUri + "/" + path); - req.data = std::make_shared(data); // FIXME: inefficient - req.mimeType = mimeType; - try { - getDownloader()->download(req); - } catch (DownloadError& e) { - throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", - cacheUri, e.msg()); - } - } - - DownloadRequest makeRequest(const std::string& path) { - DownloadRequest request(cacheUri + "/" + path); - return request; - } - - void getFile(const std::string& path, Sink& sink) override { - checkEnabled(); - auto request(makeRequest(path)); - try { - getDownloader()->download(std::move(request), sink); - } catch (DownloadError& e) { - if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) { - throw NoSuchBinaryCacheFile( - "file '%s' does not exist in binary cache '%s'", path, getUri()); - } - maybeDisable(); - throw; - } - } - - void getFile( - const std::string& path, - Callback> callback) noexcept override { - checkEnabled(); - - auto request(makeRequest(path)); - - auto callbackPtr = - std::make_shared(std::move(callback)); - - getDownloader()->enqueueDownload( - request, - Callback{ - [callbackPtr, this](std::future result) { - try { - (*callbackPtr)(result.get().data); - } catch (DownloadError& e) { - if (e.error == Downloader::NotFound || - e.error == Downloader::Forbidden) { - return (*callbackPtr)(std::shared_ptr()); - } - maybeDisable(); - callbackPtr->rethrow(); - } catch (...) { - callbackPtr->rethrow(); - } - }}); - } -}; - -static RegisterStoreImplementation regStore( - [](const std::string& uri, - const Store::Params& params) -> std::shared_ptr { - if (std::string(uri, 0, 7) != "http://" && - std::string(uri, 0, 8) != "https://" && - (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || - std::string(uri, 0, 7) != "file://")) { - return nullptr; - } - auto store = std::make_shared(params, uri); - store->init(); - return store; - }); - -} // namespace nix diff --git a/third_party/nix/src/libstore/legacy-ssh-store.cc b/third_party/nix/src/libstore/legacy-ssh-store.cc deleted file mode 100644 index 8163258179..0000000000 --- a/third_party/nix/src/libstore/legacy-ssh-store.cc +++ /dev/null @@ -1,282 +0,0 @@ -#include -#include -#include - -#include "libstore/derivations.hh" -#include "libstore/remote-store.hh" -#include "libstore/serve-protocol.hh" -#include "libstore/ssh.hh" -#include "libstore/store-api.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/archive.hh" -#include "libutil/pool.hh" - -namespace nix { - -constexpr std::string_view kUriScheme = "ssh://"; - -struct LegacySSHStore : public Store { - const Setting maxConnections{ - this, 1, "max-connections", - "maximum number of concurrent SSH connections"}; - const Setting sshKey{this, "", "ssh-key", "path to an SSH private key"}; - const Setting compress{this, false, "compress", - "whether to compress the connection"}; - const Setting remoteProgram{ - this, "nix-store", "remote-program", - "path to the nix-store executable on the remote system"}; - const Setting remoteStore{ - this, "", "remote-store", "URI of the store on the remote system"}; - - // Hack for getting remote build log output. - const Setting logFD{ - this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"}; - - struct Connection { - std::unique_ptr sshConn; - FdSink to; - FdSource from; - int remoteVersion; - bool good = true; - }; - - std::string host; - - ref> connections; - - SSHMaster master; - - LegacySSHStore(const std::string& host, const Params& params) - : Store(params), - host(host), - connections(make_ref>( - std::max(1, (int)maxConnections), - [this]() { return openConnection(); }, - [](const ref& r) { return r->good; })), - master(host, sshKey, - // Use SSH master only if using more than 1 connection. - connections->capacity() > 1, compress, logFD) {} - - ref openConnection() { - auto conn = make_ref(); - conn->sshConn = master.startCommand( - fmt("%s --serve --write", remoteProgram) + - (remoteStore.get().empty() - ? "" - : " --store " + shellEscape(remoteStore.get()))); - conn->to = FdSink(conn->sshConn->in.get()); - conn->from = FdSource(conn->sshConn->out.get()); - - try { - conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION; - conn->to.flush(); - - unsigned int magic = readInt(conn->from); - if (magic != SERVE_MAGIC_2) { - throw Error("protocol mismatch with 'nix-store --serve' on '%s'", host); - } - conn->remoteVersion = readInt(conn->from); - if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200) { - throw Error("unsupported 'nix-store --serve' protocol version on '%s'", - host); - } - - } catch (EndOfFile& e) { - throw Error("cannot connect to '%1%'", host); - } - - return conn; - }; - - std::string getUri() override { return absl::StrCat(kUriScheme, host); } - - void queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept override { - try { - auto conn(connections->get()); - - DLOG(INFO) << "querying remote host '" << host << "' for info on '" - << path << "'"; - - conn->to << cmdQueryPathInfos << PathSet{path}; - conn->to.flush(); - - auto info = std::make_shared(); - conn->from >> info->path; - if (info->path.empty()) { - return callback(nullptr); - } - assert(path == info->path); - - PathSet references; - conn->from >> info->deriver; - info->references = readStorePaths(*this, conn->from); - readLongLong(conn->from); // download size - info->narSize = readLongLong(conn->from); - - if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4) { - auto s = readString(conn->from); - if (s.empty()) { - info->narHash = Hash(); - } else { - auto hash_ = Hash::deserialize(s); - info->narHash = Hash::unwrap_throw(hash_); - } - conn->from >> info->ca; - info->sigs = readStrings(conn->from); - } - - auto s = readString(conn->from); - assert(s.empty()); - - callback(std::move(info)); - } catch (...) { - callback.rethrow(); - } - } - - void addToStore(const ValidPathInfo& info, Source& source, RepairFlag repair, - CheckSigsFlag checkSigs, - std::shared_ptr accessor) override { - DLOG(INFO) << "adding path '" << info.path << "' to remote host '" << host - << "'"; - - auto conn(connections->get()); - - if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) { - conn->to << cmdAddToStoreNar << info.path << info.deriver - << info.narHash.to_string(Base16, false) << info.references - << info.registrationTime << info.narSize - << static_cast(info.ultimate) << info.sigs << info.ca; - try { - copyNAR(source, conn->to); - } catch (...) { - conn->good = false; - throw; - } - conn->to.flush(); - - } else { - conn->to << cmdImportPaths << 1; - try { - copyNAR(source, conn->to); - } catch (...) { - conn->good = false; - throw; - } - conn->to << exportMagic << info.path << info.references << info.deriver - << 0 << 0; - conn->to.flush(); - } - - if (readInt(conn->from) != 1) { - throw Error( - "failed to add path '%s' to remote host '%s', info.path, host"); - } - } - - void narFromPath(const Path& path, Sink& sink) override { - auto conn(connections->get()); - - conn->to << cmdDumpStorePath << path; - conn->to.flush(); - copyNAR(conn->from, sink); - } - - Path queryPathFromHashPart(const std::string& hashPart) override { - unsupported("queryPathFromHashPart"); - } - - Path addToStore(const std::string& name, const Path& srcPath, bool recursive, - HashType hashAlgo, PathFilter& filter, - RepairFlag repair) override { - unsupported("addToStore"); - } - - Path addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, RepairFlag repair) override { - unsupported("addTextToStore"); - } - - BuildResult buildDerivation(std::ostream& /*log_sink*/, const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) override { - auto conn(connections->get()); - - conn->to << cmdBuildDerivation << drvPath << drv << settings.maxSilentTime - << settings.buildTimeout; - if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 2) { - conn->to << settings.maxLogSize; - } - if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3) { - conn->to << settings.buildRepeat - << static_cast(settings.enforceDeterminism); - } - - conn->to.flush(); - - BuildResult status; - status.status = static_cast(readInt(conn->from)); - conn->from >> status.errorMsg; - - if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3) { - conn->from >> status.timesBuilt >> status.isNonDeterministic >> - status.startTime >> status.stopTime; - } - - return status; - } - - void ensurePath(const Path& path) override { unsupported("ensurePath"); } - - void computeFSClosure(const PathSet& paths, PathSet& out, - bool flipDirection = false, bool includeOutputs = false, - bool includeDerivers = false) override { - if (flipDirection || includeDerivers) { - Store::computeFSClosure(paths, out, flipDirection, includeOutputs, - includeDerivers); - return; - } - - auto conn(connections->get()); - - conn->to << cmdQueryClosure << static_cast(includeOutputs) - << paths; - conn->to.flush(); - - auto res = readStorePaths(*this, conn->from); - - out.insert(res.begin(), res.end()); - } - - PathSet queryValidPaths(const PathSet& paths, SubstituteFlag maybeSubstitute = - NoSubstitute) override { - auto conn(connections->get()); - - conn->to << cmdQueryValidPaths << 0u // lock - << maybeSubstitute << paths; - conn->to.flush(); - - return readStorePaths(*this, conn->from); - } - - void connect() override { auto conn(connections->get()); } - - unsigned int getProtocol() override { - auto conn(connections->get()); - return conn->remoteVersion; - } -}; - -static RegisterStoreImplementation regStore( - [](const std::string& uri, - const Store::Params& params) -> std::shared_ptr { - if (!absl::StartsWith(uri, kUriScheme)) { - return nullptr; - } - return std::make_shared( - std::string(uri, kUriScheme.size()), params); - }); - -} // namespace nix diff --git a/third_party/nix/src/libstore/local-binary-cache-store.cc b/third_party/nix/src/libstore/local-binary-cache-store.cc deleted file mode 100644 index 4555de5047..0000000000 --- a/third_party/nix/src/libstore/local-binary-cache-store.cc +++ /dev/null @@ -1,93 +0,0 @@ -#include - -#include - -#include "libstore/binary-cache-store.hh" -#include "libstore/globals.hh" -#include "libstore/nar-info-disk-cache.hh" - -namespace nix { - -class LocalBinaryCacheStore : public BinaryCacheStore { - private: - Path binaryCacheDir; - - public: - LocalBinaryCacheStore(const Params& params, Path binaryCacheDir) - : BinaryCacheStore(params), binaryCacheDir(std::move(binaryCacheDir)) {} - - void init() override; - - std::string getUri() override { return "file://" + binaryCacheDir; } - - protected: - bool fileExists(const std::string& path) override; - - void upsertFile(const std::string& path, const std::string& data, - const std::string& mimeType) override; - - void getFile(const std::string& path, Sink& sink) override { - try { - readFile(binaryCacheDir + "/" + path, sink); - } catch (SysError& e) { - if (e.errNo == ENOENT) { - throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", - path); - } - } - } - - PathSet queryAllValidPaths() override { - PathSet paths; - - for (auto& entry : readDirectory(binaryCacheDir)) { - if (entry.name.size() != 40 || !absl::EndsWith(entry.name, ".narinfo")) { - continue; - } - paths.insert(storeDir + "/" + - entry.name.substr(0, entry.name.size() - 8)); - } - - return paths; - } -}; - -void LocalBinaryCacheStore::init() { - createDirs(binaryCacheDir + "/nar"); - BinaryCacheStore::init(); -} - -static void atomicWrite(const Path& path, const std::string& s) { - Path tmp = path + ".tmp." + std::to_string(getpid()); - AutoDelete del(tmp, false); - writeFile(tmp, s); - if (rename(tmp.c_str(), path.c_str()) != 0) { - throw SysError(format("renaming '%1%' to '%2%'") % tmp % path); - } - del.cancel(); -} - -bool LocalBinaryCacheStore::fileExists(const std::string& path) { - return pathExists(binaryCacheDir + "/" + path); -} - -void LocalBinaryCacheStore::upsertFile(const std::string& path, - const std::string& data, - const std::string& mimeType) { - atomicWrite(binaryCacheDir + "/" + path, data); -} - -static RegisterStoreImplementation regStore( - [](const std::string& uri, - const Store::Params& params) -> std::shared_ptr { - if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" || - std::string(uri, 0, 7) != "file://") { - return nullptr; - } - auto store = - std::make_shared(params, std::string(uri, 7)); - store->init(); - return store; - }); - -} // namespace nix diff --git a/third_party/nix/src/libstore/local-fs-store.cc b/third_party/nix/src/libstore/local-fs-store.cc deleted file mode 100644 index f2235bad76..0000000000 --- a/third_party/nix/src/libstore/local-fs-store.cc +++ /dev/null @@ -1,123 +0,0 @@ -#include "libstore/derivations.hh" -#include "libstore/fs-accessor.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "libutil/compression.hh" - -namespace nix { - -LocalFSStore::LocalFSStore(const Params& params) : Store(params) {} - -struct LocalStoreAccessor : public FSAccessor { - ref store; - - explicit LocalStoreAccessor(const ref& store) : store(store) {} - - Path toRealPath(const Path& path) { - Path storePath = store->toStorePath(path); - if (!store->isValidPath(storePath)) { - throw InvalidPath(format("path '%1%' is not a valid store path") % - storePath); - } - return store->getRealStoreDir() + std::string(path, store->storeDir.size()); - } - - FSAccessor::Stat stat(const Path& path) override { - auto realPath = toRealPath(path); - - struct stat st; - if (lstat(realPath.c_str(), &st) != 0) { - if (errno == ENOENT || errno == ENOTDIR) { - return {Type::tMissing, 0, false}; - } - throw SysError(format("getting status of '%1%'") % path); - } - - if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode)) { - throw Error(format("file '%1%' has unsupported type") % path); - } - - return {S_ISREG(st.st_mode) ? Type::tRegular - : S_ISLNK(st.st_mode) ? Type::tSymlink - : Type::tDirectory, - S_ISREG(st.st_mode) ? static_cast(st.st_size) : 0, - S_ISREG(st.st_mode) && ((st.st_mode & S_IXUSR) != 0u)}; - } - - StringSet readDirectory(const Path& path) override { - auto realPath = toRealPath(path); - - auto entries = nix::readDirectory(realPath); - - StringSet res; - for (auto& entry : entries) { - res.insert(entry.name); - } - - return res; - } - - std::string readFile(const Path& path) override { - return nix::readFile(toRealPath(path)); - } - - std::string readLink(const Path& path) override { - return nix::readLink(toRealPath(path)); - } -}; - -ref LocalFSStore::getFSAccessor() { - return make_ref(ref( - std::dynamic_pointer_cast(shared_from_this()))); -} - -void LocalFSStore::narFromPath(const Path& path, Sink& sink) { - if (!isValidPath(path)) { - throw Error(format("path '%s' is not valid") % path); - } - dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink); -} - -const std::string LocalFSStore::drvsLogDir = "drvs"; - -std::shared_ptr LocalFSStore::getBuildLog(const Path& path_) { - auto path(path_); - - assertStorePath(path); - - if (!isDerivation(path)) { - try { - path = queryPathInfo(path)->deriver; - } catch (InvalidPath&) { - return nullptr; - } - if (path.empty()) { - return nullptr; - } - } - - std::string baseName = baseNameOf(path); - - for (int j = 0; j < 2; j++) { - Path logPath = - j == 0 ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, - std::string(baseName, 0, 2), std::string(baseName, 2)) - : fmt("%s/%s/%s", logDir, drvsLogDir, baseName); - Path logBz2Path = logPath + ".bz2"; - - if (pathExists(logPath)) { - return std::make_shared(readFile(logPath)); - } - if (pathExists(logBz2Path)) { - try { - return decompress("bzip2", readFile(logBz2Path)); - } catch (Error&) { - } - } - } - - return nullptr; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/local-store.cc b/third_party/nix/src/libstore/local-store.cc deleted file mode 100644 index aca305e1a5..0000000000 --- a/third_party/nix/src/libstore/local-store.cc +++ /dev/null @@ -1,1519 +0,0 @@ -#include "libstore/local-store.hh" - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "generated/schema.sql.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/nar-info.hh" -#include "libstore/pathlocks.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/archive.hh" - -namespace nix { - -LocalStore::LocalStore(const Params& params) - : Store(params), - LocalFSStore(params), - realStoreDir_{this, false, - rootDir != "" ? rootDir + "/nix/store" : storeDir, "real", - "physical path to the Nix store"}, - realStoreDir(realStoreDir_), - dbDir(stateDir + "/db"), - linksDir(realStoreDir + "/.links"), - reservedPath(dbDir + "/reserved"), - schemaPath(dbDir + "/schema"), - trashDir(realStoreDir + "/trash"), - tempRootsDir(stateDir + "/temproots"), - fnTempRoots(fmt("%s/%d", tempRootsDir, getpid())) { - auto state(_state.lock()); - - /* Create missing state directories if they don't already exist. */ - createDirs(realStoreDir); - makeStoreWritable(); - createDirs(linksDir); - Path profilesDir = stateDir + "/profiles"; - createDirs(profilesDir); - createDirs(tempRootsDir); - createDirs(dbDir); - Path gcRootsDir = stateDir + "/gcroots"; - if (!pathExists(gcRootsDir)) { - createDirs(gcRootsDir); - createSymlink(profilesDir, gcRootsDir + "/profiles"); - } - - for (auto& perUserDir : - {profilesDir + "/per-user", gcRootsDir + "/per-user"}) { - createDirs(perUserDir); - if (chmod(perUserDir.c_str(), 0755) == -1) { - throw SysError("could not set permissions on '%s' to 755", perUserDir); - } - } - - // TODO(kanepyork): migrate to external constructor, this bypasses virtual - // dispatch - // NOLINTNEXTLINE clang-analyzer-optin.cplusplus.VirtualCall - createUser(getUserName(), getuid()); - - /* Optionally, create directories and set permissions for a - multi-user install. */ - if (getuid() == 0 && settings.buildUsersGroup != "") { - mode_t perm = 01775; - - struct group* gr = getgrnam(settings.buildUsersGroup.get().c_str()); - if (gr == nullptr) { - LOG(ERROR) << "warning: the group '" << settings.buildUsersGroup - << "' specified in 'build-users-group' does not exist"; - } else { - struct stat st; - if (stat(realStoreDir.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % - realStoreDir); - } - - if (st.st_uid != 0 || st.st_gid != gr->gr_gid || - (st.st_mode & ~S_IFMT) != perm) { - if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1) { - throw SysError(format("changing ownership of path '%1%'") % - realStoreDir); - } - if (chmod(realStoreDir.c_str(), perm) == -1) { - throw SysError(format("changing permissions on path '%1%'") % - realStoreDir); - } - } - } - } - - /* Ensure that the store and its parents are not symlinks. */ - if (getEnv("NIX_IGNORE_SYMLINK_STORE") != "1") { - Path path = realStoreDir; - struct stat st; - while (path != "/") { - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting status of '%1%'") % path); - } - if (S_ISLNK(st.st_mode)) { - throw Error(format("the path '%1%' is a symlink; " - "this is not allowed for the Nix store and its " - "parent directories") % - path); - } - path = dirOf(path); - } - } - - /* We can't open a SQLite database if the disk is full. Since - this prevents the garbage collector from running when it's most - needed, we reserve some dummy space that we can free just - before doing a garbage collection. */ - try { - struct stat st; - if (stat(reservedPath.c_str(), &st) == -1 || - st.st_size != settings.reservedSize) { - AutoCloseFD fd( - open(reservedPath.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600)); - int res = -1; -#if HAVE_POSIX_FALLOCATE - res = posix_fallocate(fd.get(), 0, settings.reservedSize); -#endif - if (res == -1) { - writeFull(fd.get(), std::string(settings.reservedSize, 'X')); - [[gnu::unused]] auto res2 = ftruncate(fd.get(), settings.reservedSize); - } - } - } catch (SysError& e) { /* don't care about errors */ - } - - /* Acquire the big fat lock in shared mode to make sure that no - schema upgrade is in progress. */ - Path globalLockPath = dbDir + "/big-lock"; - globalLock = openLockFile(globalLockPath, true); - - if (!lockFile(globalLock.get(), ltRead, false)) { - LOG(INFO) << "waiting for the big Nix store lock..."; - lockFile(globalLock.get(), ltRead, true); - } - - /* Check the current database schema and if necessary do an - upgrade. */ - int curSchema = getSchema(); - if (curSchema > nixSchemaVersion) { - throw Error( - format( - "current Nix store schema is version %1%, but I only support %2%") % - curSchema % nixSchemaVersion); - } - if (curSchema == 0) { /* new store */ - curSchema = nixSchemaVersion; - openDB(*state, true); - writeFile(schemaPath, (format("%1%") % curSchema).str()); - } else if (curSchema < nixSchemaVersion) { - if (curSchema < 5) { - throw Error( - "Your Nix store has a database in Berkeley DB format,\n" - "which is no longer supported. To convert to the new format,\n" - "please upgrade Nix to version 0.12 first."); - } - - if (curSchema < 6) { - throw Error( - "Your Nix store has a database in flat file format,\n" - "which is no longer supported. To convert to the new format,\n" - "please upgrade Nix to version 1.11 first."); - } - - if (!lockFile(globalLock.get(), ltWrite, false)) { - LOG(INFO) << "waiting for exclusive access to the Nix store..."; - lockFile(globalLock.get(), ltWrite, true); - } - - /* Get the schema version again, because another process may - have performed the upgrade already. */ - curSchema = getSchema(); - - if (curSchema < 7) { - upgradeStore7(); - } - - openDB(*state, false); - - if (curSchema < 8) { - SQLiteTxn txn(state->db); - state->db.exec("alter table ValidPaths add column ultimate integer"); - state->db.exec("alter table ValidPaths add column sigs text"); - txn.commit(); - } - - if (curSchema < 9) { - SQLiteTxn txn(state->db); - state->db.exec("drop table FailedPaths"); - txn.commit(); - } - - if (curSchema < 10) { - SQLiteTxn txn(state->db); - state->db.exec("alter table ValidPaths add column ca text"); - txn.commit(); - } - - writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); - - lockFile(globalLock.get(), ltRead, true); - } else { - openDB(*state, false); - } - - /* Prepare SQL statements. */ - state->stmtRegisterValidPath.create( - state->db, - "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, " - "ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);"); - state->stmtUpdatePathInfo.create( - state->db, - "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca " - "= ? where path = ?;"); - state->stmtAddReference.create( - state->db, - "insert or replace into Refs (referrer, reference) values (?, ?);"); - state->stmtQueryPathInfo.create( - state->db, - "select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca " - "from ValidPaths where path = ?;"); - state->stmtQueryReferences.create(state->db, - "select path from Refs join ValidPaths on " - "reference = id where referrer = ?;"); - state->stmtQueryReferrers.create( - state->db, - "select path from Refs join ValidPaths on referrer = id where reference " - "= (select id from ValidPaths where path = ?);"); - state->stmtInvalidatePath.create(state->db, - "delete from ValidPaths where path = ?;"); - state->stmtAddDerivationOutput.create( - state->db, - "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, " - "?);"); - state->stmtQueryValidDerivers.create( - state->db, - "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv " - "= v.id where d.path = ?;"); - state->stmtQueryDerivationOutputs.create( - state->db, "select id, path from DerivationOutputs where drv = ?;"); - // Use "path >= ?" with limit 1 rather than "path like '?%'" to - // ensure efficient lookup. - state->stmtQueryPathFromHashPart.create( - state->db, "select path from ValidPaths where path >= ? limit 1;"); - state->stmtQueryValidPaths.create(state->db, "select path from ValidPaths"); -} - -LocalStore::~LocalStore() { - std::shared_future future; - - { - auto state(_state.lock()); - if (state->gcRunning) { - future = state->gcFuture; - } - } - - if (future.valid()) { - LOG(INFO) << "waiting for auto-GC to finish on exit..."; - future.get(); - } - - try { - auto state(_state.lock()); - if (state->fdTempRoots) { - state->fdTempRoots = AutoCloseFD(-1); - unlink(fnTempRoots.c_str()); - } - } catch (...) { - ignoreException(); - } -} - -std::string LocalStore::getUri() { return "local"; } - -int LocalStore::getSchema() { - int curSchema = 0; - if (pathExists(schemaPath)) { - std::string s = readFile(schemaPath); - if (!absl::SimpleAtoi(s, &curSchema)) { - throw Error(format("'%1%' is corrupt") % schemaPath); - } - } - return curSchema; -} - -void LocalStore::openDB(State& state, bool create) { - if (access(dbDir.c_str(), R_OK | W_OK) != 0) { - throw SysError(format("Nix database directory '%1%' is not writable") % - dbDir); - } - - /* Open the Nix database. */ - std::string dbPath = dbDir + "/db.sqlite"; - auto& db(state.db); - if (sqlite3_open_v2(dbPath.c_str(), &db.db, - SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), - nullptr) != SQLITE_OK) { - throw Error(format("cannot open Nix database '%1%'") % dbPath); - } - - if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK) { - throwSQLiteError(db, "setting timeout"); - } - - db.exec("pragma foreign_keys = 1"); - - /* !!! check whether sqlite has been built with foreign key - support */ - - /* Whether SQLite should fsync(). "Normal" synchronous mode - should be safe enough. If the user asks for it, don't sync at - all. This can cause database corruption if the system - crashes. */ - std::string syncMode = settings.fsyncMetadata ? "normal" : "off"; - db.exec("pragma synchronous = " + syncMode); - - /* Set the SQLite journal mode. WAL mode is fastest, so it's the - default. */ - std::string mode = settings.useSQLiteWAL ? "wal" : "truncate"; - std::string prevMode; - { - SQLiteStmt stmt; - stmt.create(db, "pragma main.journal_mode;"); - if (sqlite3_step(stmt) != SQLITE_ROW) { - throwSQLiteError(db, "querying journal mode"); - } - prevMode = std::string( - reinterpret_cast(sqlite3_column_text(stmt, 0))); - } - if (prevMode != mode && - sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), - nullptr, nullptr, nullptr) != SQLITE_OK) { - throwSQLiteError(db, "setting journal mode"); - } - - /* Increase the auto-checkpoint interval to 40000 pages. This - seems enough to ensure that instantiating the NixOS system - derivation is done in a single fsync(). */ - if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", - nullptr, nullptr, nullptr) != SQLITE_OK) { - throwSQLiteError(db, "setting autocheckpoint interval"); - } - - /* Initialise the database schema, if necessary. */ - if (create) { - db.exec(kNixSqlSchema); - } -} - -/* To improve purity, users may want to make the Nix store a read-only - bind mount. So make the Nix store writable for this process. */ -void LocalStore::makeStoreWritable() { - if (getuid() != 0) { - return; - } - /* Check if /nix/store is on a read-only mount. */ - struct statvfs stat; - if (statvfs(realStoreDir.c_str(), &stat) != 0) { - throw SysError("getting info about the Nix store mount point"); - } - - if ((stat.f_flag & ST_RDONLY) != 0u) { - if (unshare(CLONE_NEWNS) == -1) { - throw SysError("setting up a private mount namespace"); - } - - if (mount(nullptr, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, - nullptr) == -1) { - throw SysError(format("remounting %1% writable") % realStoreDir); - } - } -} - -const time_t mtimeStore = 1; /* 1 second into the epoch */ - -static void canonicaliseTimestampAndPermissions(const Path& path, - const struct stat& st) { - if (!S_ISLNK(st.st_mode)) { - /* Mask out all type related bits. */ - mode_t mode = st.st_mode & ~S_IFMT; - - if (mode != 0444 && mode != 0555) { - mode = (st.st_mode & S_IFMT) | 0444 | - ((st.st_mode & S_IXUSR) != 0u ? 0111 : 0); - if (chmod(path.c_str(), mode) == -1) { - throw SysError(format("changing mode of '%1%' to %2$o") % path % mode); - } - } - } - - if (st.st_mtime != mtimeStore) { - struct timeval times[2]; - times[0].tv_sec = st.st_atime; - times[0].tv_usec = 0; - times[1].tv_sec = mtimeStore; - times[1].tv_usec = 0; -#if HAVE_LUTIMES - if (lutimes(path.c_str(), times) == -1) { - if (errno != ENOSYS || - (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)) { -#else - if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1) { -#endif - throw SysError(format("changing modification time of '%1%'") % path); - } - } - } // namespace nix -} // namespace nix - -void canonicaliseTimestampAndPermissions(const Path& path) { - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % path); - } - canonicaliseTimestampAndPermissions(path, st); -} - -static void canonicalisePathMetaData_(const Path& path, uid_t fromUid, - InodesSeen& inodesSeen) { - checkInterrupt(); - - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % path); - } - - /* Really make sure that the path is of a supported type. */ - if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))) { - throw Error(format("file '%1%' has an unsupported type") % path); - } - - /* Remove extended attributes / ACLs. */ - ssize_t eaSize = llistxattr(path.c_str(), nullptr, 0); - - if (eaSize < 0) { - if (errno != ENOTSUP && errno != ENODATA) { - throw SysError("querying extended attributes of '%s'", path); - } - } else if (eaSize > 0) { - std::vector eaBuf(eaSize); - - if ((eaSize = llistxattr(path.c_str(), eaBuf.data(), eaBuf.size())) < 0) { - throw SysError("querying extended attributes of '%s'", path); - } - - for (auto& eaName : absl::StrSplit(std::string(eaBuf.data(), eaSize), - absl::ByString(std::string("\000", 1)), - absl::SkipEmpty())) { - /* Ignore SELinux security labels since these cannot be - removed even by root. */ - if (eaName == "security.selinux") { - continue; - } - if (lremovexattr(path.c_str(), std::string(eaName).c_str()) == -1) { - throw SysError("removing extended attribute '%s' from '%s'", eaName, - path); - } - } - } - - /* Fail if the file is not owned by the build user. This prevents - us from messing up the ownership/permissions of files - hard-linked into the output (e.g. "ln /etc/shadow $out/foo"). - However, ignore files that we chown'ed ourselves previously to - ensure that we don't fail on hard links within the same build - (i.e. "touch $out/foo; ln $out/foo $out/bar"). */ - if (fromUid != static_cast(-1) && st.st_uid != fromUid) { - if (S_ISDIR(st.st_mode)) { - throw BuildError(format("invalid file '%1%': is a directory") % path); - } - if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end()) { - throw BuildError(format("invalid ownership on file '%1%'") % path); - } - if (!(S_ISLNK(st.st_mode) || - (st.st_uid == geteuid() && - ((st.st_mode & ~S_IFMT) == 0444 || (st.st_mode & ~S_IFMT) == 0555) && - st.st_mtime == mtimeStore))) { - throw BuildError( - format("invalid permissions on file '%1%', should be 0444/0555") % - path); - } - - return; - } - - inodesSeen.insert(Inode(st.st_dev, st.st_ino)); - - canonicaliseTimestampAndPermissions(path, st); - - /* Change ownership to the current uid. If it's a symlink, use - lchown if available, otherwise don't bother. Wrong ownership - of a symlink doesn't matter, since the owning user can't change - the symlink and can't delete it because the directory is not - writable. The only exception is top-level paths in the Nix - store (since that directory is group-writable for the Nix build - users group); we check for this case below. */ - if (st.st_uid != geteuid()) { -#if HAVE_LCHOWN - if (lchown(path.c_str(), geteuid(), getegid()) == -1) { -#else - if (!S_ISLNK(st.st_mode) && chown(path.c_str(), geteuid(), getegid()) == -1) -#endif - throw SysError(format("changing owner of '%1%' to %2%") % path % - geteuid()); - } - } - - if (S_ISDIR(st.st_mode)) { - DirEntries entries = readDirectory(path); - for (auto& i : entries) { - canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen); - } - } -} - -void canonicalisePathMetaData(const Path& path, uid_t fromUid, - InodesSeen& inodesSeen) { - canonicalisePathMetaData_(path, fromUid, inodesSeen); - - /* On platforms that don't have lchown(), the top-level path can't - be a symlink, since we can't change its ownership. */ - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % path); - } - - if (st.st_uid != geteuid()) { - assert(S_ISLNK(st.st_mode)); - throw Error(format("wrong ownership of top-level store path '%1%'") % path); - } -} - -void canonicalisePathMetaData(const Path& path, uid_t fromUid) { - InodesSeen inodesSeen; - canonicalisePathMetaData(path, fromUid, inodesSeen); -} - -void LocalStore::checkDerivationOutputs(const Path& drvPath, - const Derivation& drv) { - std::string drvName = storePathToName(drvPath); - assert(isDerivation(drvName)); - drvName = std::string(drvName, 0, drvName.size() - drvExtension.size()); - - if (drv.isFixedOutput()) { - auto out = drv.outputs.find("out"); - if (out == drv.outputs.end()) { - throw Error( - format("derivation '%1%' does not have an output named 'out'") % - drvPath); - } - - bool recursive; - Hash h; - out->second.parseHashInfo(recursive, h); - Path outPath = makeFixedOutputPath(recursive, h, drvName); - - auto j = drv.env.find("out"); - if (out->second.path != outPath || j == drv.env.end() || - j->second != outPath) { - throw Error( - format( - "derivation '%1%' has incorrect output '%2%', should be '%3%'") % - drvPath % out->second.path % outPath); - } - } - - else { - Derivation drvCopy(drv); - for (auto& i : drvCopy.outputs) { - i.second.path = ""; - drvCopy.env[i.first] = ""; - } - - Hash h = hashDerivationModulo(*this, drvCopy); - - for (auto& i : drv.outputs) { - Path outPath = makeOutputPath(i.first, h, drvName); - auto j = drv.env.find(i.first); - if (i.second.path != outPath || j == drv.env.end() || - j->second != outPath) { - throw Error(format("derivation '%1%' has incorrect output '%2%', " - "should be '%3%'") % - drvPath % i.second.path % outPath); - } - } - } -} - -uint64_t LocalStore::addValidPath(State& state, const ValidPathInfo& info, - bool checkOutputs) { - if (!info.ca.empty() && !info.isContentAddressed(*this)) { - throw Error( - "cannot add path '%s' to the Nix store because it claims to be " - "content-addressed but isn't", - info.path); - } - - state.stmtRegisterValidPath - .use()(info.path)(info.narHash.to_string(Base16))( - info.registrationTime == 0 ? time(nullptr) : info.registrationTime)( - info.deriver, !info.deriver.empty())(info.narSize, info.narSize != 0)( - info.ultimate ? 1 : 0, info.ultimate)( - concatStringsSep(" ", info.sigs), !info.sigs.empty())( - info.ca, !info.ca.empty()) - .exec(); - uint64_t id = sqlite3_last_insert_rowid(state.db); - - /* If this is a derivation, then store the derivation outputs in - the database. This is useful for the garbage collector: it can - efficiently query whether a path is an output of some - derivation. */ - if (isDerivation(info.path)) { - Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(info.path)); - - /* Verify that the output paths in the derivation are correct - (i.e., follow the scheme for computing output paths from - derivations). Note that if this throws an error, then the - DB transaction is rolled back, so the path validity - registration above is undone. */ - if (checkOutputs) { - checkDerivationOutputs(info.path, drv); - } - - for (auto& i : drv.outputs) { - state.stmtAddDerivationOutput.use()(id)(i.first)(i.second.path).exec(); - } - } - - { - auto state_(Store::state.lock()); - state_->pathInfoCache.upsert(storePathToHash(info.path), - std::make_shared(info)); - } - - return id; -} - -void LocalStore::queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept { - try { - auto info = std::make_shared(); - info->path = path; - - assertStorePath(path); - - callback(retrySQLite>([&]() { - auto state(_state.lock()); - - /* Get the path info. */ - auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path)); - - if (!useQueryPathInfo.next()) { - return std::shared_ptr(); - } - - info->id = useQueryPathInfo.getInt(0); - - auto hash_ = Hash::deserialize(useQueryPathInfo.getStr(1)); - if (!hash_.ok()) { - throw Error(absl::StrCat("in valid-path entry for '", path, - "': ", hash_.status().ToString())); - } - info->narHash = *hash_; - - info->registrationTime = useQueryPathInfo.getInt(2); - - auto s = reinterpret_cast( - sqlite3_column_text(state->stmtQueryPathInfo, 3)); - if (s != nullptr) { - info->deriver = s; - } - - /* Note that narSize = NULL yields 0. */ - info->narSize = useQueryPathInfo.getInt(4); - - info->ultimate = useQueryPathInfo.getInt(5) == 1; - - s = reinterpret_cast( - sqlite3_column_text(state->stmtQueryPathInfo, 6)); - if (s != nullptr) { - info->sigs = absl::StrSplit(s, absl::ByChar(' '), absl::SkipEmpty()); - } - - s = reinterpret_cast( - sqlite3_column_text(state->stmtQueryPathInfo, 7)); - if (s != nullptr) { - info->ca = s; - } - - /* Get the references. */ - auto useQueryReferences(state->stmtQueryReferences.use()(info->id)); - - while (useQueryReferences.next()) { - info->references.insert(useQueryReferences.getStr(0)); - } - - return info; - })); - - } catch (...) { - callback.rethrow(); - } -} - -/* Update path info in the database. */ -void LocalStore::updatePathInfo(State& state, const ValidPathInfo& info) { - state.stmtUpdatePathInfo - .use()(info.narSize, info.narSize != 0)(info.narHash.to_string(Base16))( - info.ultimate ? 1 : 0, info.ultimate)( - concatStringsSep(" ", info.sigs), !info.sigs.empty())( - info.ca, !info.ca.empty())(info.path) - .exec(); -} - -uint64_t LocalStore::queryValidPathId(State& state, const Path& path) { - auto use(state.stmtQueryPathInfo.use()(path)); - if (!use.next()) { - throw Error(format("path '%1%' is not valid") % path); - } - return use.getInt(0); -} - -bool LocalStore::isValidPath_(State& state, const Path& path) { - return state.stmtQueryPathInfo.use()(path).next(); -} - -bool LocalStore::isValidPathUncached(const Path& path) { - return retrySQLite([&]() { - auto state(_state.lock()); - return isValidPath_(*state, path); - }); -} - -PathSet LocalStore::queryValidPaths(const PathSet& paths, - SubstituteFlag maybeSubstitute) { - PathSet res; - for (auto& i : paths) { - if (isValidPath(i)) { - res.insert(i); - } - } - return res; -} - -PathSet LocalStore::queryAllValidPaths() { - return retrySQLite([&]() { - auto state(_state.lock()); - auto use(state->stmtQueryValidPaths.use()); - PathSet res; - while (use.next()) { - res.insert(use.getStr(0)); - } - return res; - }); -} - -void LocalStore::queryReferrers(State& state, const Path& path, - PathSet& referrers) { - auto useQueryReferrers(state.stmtQueryReferrers.use()(path)); - - while (useQueryReferrers.next()) { - referrers.insert(useQueryReferrers.getStr(0)); - } -} - -void LocalStore::queryReferrers(const Path& path, PathSet& referrers) { - assertStorePath(path); - return retrySQLite([&]() { - auto state(_state.lock()); - queryReferrers(*state, path, referrers); - }); -} - -PathSet LocalStore::queryValidDerivers(const Path& path) { - assertStorePath(path); - - return retrySQLite([&]() { - auto state(_state.lock()); - - auto useQueryValidDerivers(state->stmtQueryValidDerivers.use()(path)); - - PathSet derivers; - while (useQueryValidDerivers.next()) { - derivers.insert(useQueryValidDerivers.getStr(1)); - } - - return derivers; - }); -} - -PathSet LocalStore::queryDerivationOutputs(const Path& path) { - return retrySQLite([&]() { - auto state(_state.lock()); - - auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()( - queryValidPathId(*state, path))); - - PathSet outputs; - while (useQueryDerivationOutputs.next()) { - outputs.insert(useQueryDerivationOutputs.getStr(1)); - } - - return outputs; - }); -} - -StringSet LocalStore::queryDerivationOutputNames(const Path& path) { - return retrySQLite([&]() { - auto state(_state.lock()); - - auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()( - queryValidPathId(*state, path))); - - StringSet outputNames; - while (useQueryDerivationOutputs.next()) { - outputNames.insert(useQueryDerivationOutputs.getStr(0)); - } - - return outputNames; - }); -} - -Path LocalStore::queryPathFromHashPart(const std::string& hashPart) { - if (hashPart.size() != storePathHashLen) { - throw Error("invalid hash part"); - } - - Path prefix = storeDir + "/" + hashPart; - - return retrySQLite([&]() -> std::string { - auto state(_state.lock()); - - auto useQueryPathFromHashPart( - state->stmtQueryPathFromHashPart.use()(prefix)); - - if (!useQueryPathFromHashPart.next()) { - return ""; - } - - const char* s = reinterpret_cast( - sqlite3_column_text(state->stmtQueryPathFromHashPart, 0)); - return (s != nullptr) && - prefix.compare(0, prefix.size(), s, prefix.size()) == 0 - ? s - : ""; - }); -} - -PathSet LocalStore::querySubstitutablePaths(const PathSet& paths) { - if (!settings.useSubstitutes) { - return PathSet(); - } - - auto remaining = paths; - PathSet res; - - for (auto& sub : getDefaultSubstituters()) { - if (remaining.empty()) { - break; - } - if (sub->storeDir != storeDir) { - continue; - } - if (!sub->wantMassQuery()) { - continue; - } - - auto valid = sub->queryValidPaths(remaining); - - PathSet remaining2; - for (auto& path : remaining) { - if (valid.count(path) != 0u) { - res.insert(path); - } else { - remaining2.insert(path); - } - } - - std::swap(remaining, remaining2); - } - - return res; -} - -void LocalStore::querySubstitutablePathInfos(const PathSet& paths, - SubstitutablePathInfos& infos) { - if (!settings.useSubstitutes) { - return; - } - for (auto& sub : getDefaultSubstituters()) { - if (sub->storeDir != storeDir) { - continue; - } - for (auto& path : paths) { - if (infos.count(path) != 0u) { - continue; - } - DLOG(INFO) << "checking substituter '" << sub->getUri() << "' for path '" - << path << "'"; - try { - auto info = sub->queryPathInfo(path); - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - infos[path] = SubstitutablePathInfo{info->deriver, info->references, - narInfo ? narInfo->fileSize : 0, - info->narSize}; - } catch (InvalidPath&) { - } catch (SubstituterDisabled&) { - } catch (Error& e) { - if (settings.tryFallback) { - LOG(ERROR) << e.what(); - } else { - throw; - } - } - } - } -} - -void LocalStore::registerValidPath(const ValidPathInfo& info) { - ValidPathInfos infos; - infos.push_back(info); - registerValidPaths(infos); -} - -void LocalStore::registerValidPaths(const ValidPathInfos& infos) { - /* SQLite will fsync by default, but the new valid paths may not - be fsync-ed. So some may want to fsync them before registering - the validity, at the expense of some speed of the path - registering operation. */ - if (settings.syncBeforeRegistering) { - sync(); - } - - return retrySQLite([&]() { - auto state(_state.lock()); - - SQLiteTxn txn(state->db); - PathSet paths; - - for (auto& i : infos) { - assert(i.narHash.type == htSHA256); - if (isValidPath_(*state, i.path)) { - updatePathInfo(*state, i); - } else { - addValidPath(*state, i, false); - } - paths.insert(i.path); - } - - for (auto& i : infos) { - auto referrer = queryValidPathId(*state, i.path); - for (auto& j : i.references) { - state->stmtAddReference.use()(referrer)(queryValidPathId(*state, j)) - .exec(); - } - } - - /* Check that the derivation outputs are correct. We can't do - this in addValidPath() above, because the references might - not be valid yet. */ - for (auto& i : infos) { - if (isDerivation(i.path)) { - // FIXME: inefficient; we already loaded the - // derivation in addValidPath(). - Derivation drv = - readDerivation(realStoreDir + "/" + baseNameOf(i.path)); - checkDerivationOutputs(i.path, drv); - } - } - - /* Do a topological sort of the paths. This will throw an - error if a cycle is detected and roll back the - transaction. Cycles can only occur when a derivation - has multiple outputs. */ - topoSortPaths(paths); - - txn.commit(); - }); -} - -/* Invalidate a path. The caller is responsible for checking that - there are no referrers. */ -void LocalStore::invalidatePath(State& state, const Path& path) { - LOG(INFO) << "invalidating path '" << path << "'"; - - state.stmtInvalidatePath.use()(path).exec(); - - /* Note that the foreign key constraints on the Refs table take - care of deleting the references entries for `path'. */ - - { - auto state_(Store::state.lock()); - state_->pathInfoCache.erase(storePathToHash(path)); - } -} - -const PublicKeys& LocalStore::getPublicKeys() { - auto state(_state.lock()); - if (!state->publicKeys) { - state->publicKeys = std::make_unique(getDefaultPublicKeys()); - } - return *state->publicKeys; -} - -void LocalStore::addToStore(const ValidPathInfo& info, Source& source, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) { - if (!info.narHash) { - throw Error("cannot add path '%s' because it lacks a hash", info.path); - } - - if (requireSigs && (checkSigs != 0u) && - (info.checkSignatures(*this, getPublicKeys()) == 0u)) { - throw Error("cannot add path '%s' because it lacks a valid signature", - info.path); - } - - addTempRoot(info.path); - - if ((repair != 0u) || !isValidPath(info.path)) { - PathLocks outputLock; - - Path realPath = realStoreDir + "/" + baseNameOf(info.path); - - /* Lock the output path. But don't lock if we're being called - from a build hook (whose parent process already acquired a - lock on this path). */ - if (locksHeld.count(info.path) == 0u) { - outputLock.lockPaths({realPath}); - } - - if ((repair != 0u) || !isValidPath(info.path)) { - deletePath(realPath); - - /* While restoring the path from the NAR, compute the hash - of the NAR. */ - HashSink hashSink(htSHA256); - - LambdaSource wrapperSource( - [&](unsigned char* data, size_t len) -> size_t { - size_t n = source.read(data, len); - hashSink(data, n); - return n; - }); - - restorePath(realPath, wrapperSource); - - auto hashResult = hashSink.finish(); - - if (hashResult.first != info.narHash) { - throw Error( - "hash mismatch importing path '%s';\n wanted: %s\n got: %s", - info.path, info.narHash.to_string(), hashResult.first.to_string()); - } - - if (hashResult.second != info.narSize) { - throw Error( - "size mismatch importing path '%s';\n wanted: %s\n got: %s", - info.path, info.narSize, hashResult.second); - } - - autoGC(); - - canonicalisePathMetaData(realPath, -1); - - optimisePath(realPath); // FIXME: combine with hashPath() - - registerValidPath(info); - } - - outputLock.setDeletion(true); - } -} - -Path LocalStore::addToStoreFromDump(const std::string& dump, - const std::string& name, bool recursive, - HashType hashAlgo, RepairFlag repair) { - Hash h = hashString(hashAlgo, dump); - - Path dstPath = makeFixedOutputPath(recursive, h, name); - - addTempRoot(dstPath); - - if ((repair != 0u) || !isValidPath(dstPath)) { - /* The first check above is an optimisation to prevent - unnecessary lock acquisition. */ - - Path realPath = realStoreDir + "/" + baseNameOf(dstPath); - - PathLocks outputLock({realPath}); - - if ((repair != 0u) || !isValidPath(dstPath)) { - deletePath(realPath); - - autoGC(); - - if (recursive) { - StringSource source(dump); - restorePath(realPath, source); - } else { - writeFile(realPath, dump); - } - - canonicalisePathMetaData(realPath, -1); - - /* Register the SHA-256 hash of the NAR serialisation of - the path in the database. We may just have computed it - above (if called with recursive == true and hashAlgo == - sha256); otherwise, compute it here. */ - HashResult hash; - if (recursive) { - hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump); - hash.second = dump.size(); - } else { - hash = hashPath(htSHA256, realPath); - } - - optimisePath(realPath); // FIXME: combine with hashPath() - - ValidPathInfo info; - info.path = dstPath; - info.narHash = hash.first; - info.narSize = hash.second; - info.ca = makeFixedOutputCA(recursive, h); - registerValidPath(info); - } - - outputLock.setDeletion(true); - } - - return dstPath; -} - -Path LocalStore::addToStore(const std::string& name, const Path& _srcPath, - bool recursive, HashType hashAlgo, - PathFilter& filter, RepairFlag repair) { - Path srcPath(absPath(_srcPath)); - - /* Read the whole path into memory. This is not a very scalable - method for very large paths, but `copyPath' is mainly used for - small files. */ - StringSink sink; - if (recursive) { - dumpPath(srcPath, sink, filter); - } else { - sink.s = make_ref(readFile(srcPath)); - } - - return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair); -} - -Path LocalStore::addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, RepairFlag repair) { - auto hash = hashString(htSHA256, s); - auto dstPath = makeTextPath(name, hash, references); - - addTempRoot(dstPath); - - if ((repair != 0u) || !isValidPath(dstPath)) { - Path realPath = realStoreDir + "/" + baseNameOf(dstPath); - - PathLocks outputLock({realPath}); - - if ((repair != 0u) || !isValidPath(dstPath)) { - deletePath(realPath); - - autoGC(); - - writeFile(realPath, s); - - canonicalisePathMetaData(realPath, -1); - - StringSink sink; - dumpString(s, sink); - auto narHash = hashString(htSHA256, *sink.s); - - optimisePath(realPath); - - ValidPathInfo info; - info.path = dstPath; - info.narHash = narHash; - info.narSize = sink.s->size(); - info.references = references; - info.ca = "text:" + hash.to_string(); - registerValidPath(info); - } - - outputLock.setDeletion(true); - } - - return dstPath; -} - -/* Create a temporary directory in the store that won't be - garbage-collected. */ -Path LocalStore::createTempDirInStore() { - Path tmpDir; - do { - /* There is a slight possibility that `tmpDir' gets deleted by - the GC between createTempDir() and addTempRoot(), so repeat - until `tmpDir' exists. */ - tmpDir = createTempDir(realStoreDir); - addTempRoot(tmpDir); - } while (!pathExists(tmpDir)); - return tmpDir; -} - -void LocalStore::invalidatePathChecked(const Path& path) { - assertStorePath(path); - - retrySQLite([&]() { - auto state(_state.lock()); - - SQLiteTxn txn(state->db); - - if (isValidPath_(*state, path)) { - PathSet referrers; - queryReferrers(*state, path, referrers); - referrers.erase(path); /* ignore self-references */ - if (!referrers.empty()) { - throw PathInUse( - format("cannot delete path '%1%' because it is in use by %2%") % - path % showPaths(referrers)); - } - invalidatePath(*state, path); - } - - txn.commit(); - }); -} - -bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { - LOG(INFO) << "reading the Nix store..."; - - bool errors = false; - - /* Acquire the global GC lock to get a consistent snapshot of - existing and valid paths. */ - AutoCloseFD fdGCLock = openGCLock(ltWrite); - - PathSet store; - for (auto& i : readDirectory(realStoreDir)) { - store.insert(i.name); - } - - /* Check whether all valid paths actually exist. */ - LOG(INFO) << "checking path existence..."; - - PathSet validPaths2 = queryAllValidPaths(); - PathSet validPaths; - PathSet done; - - fdGCLock = AutoCloseFD(-1); - - for (auto& i : validPaths2) { - verifyPath(i, store, done, validPaths, repair, errors); - } - - /* Optionally, check the content hashes (slow). */ - if (checkContents) { - LOG(INFO) << "checking hashes..."; - - Hash nullHash(htSHA256); - - for (auto& i : validPaths) { - try { - auto info = std::const_pointer_cast( - std::shared_ptr(queryPathInfo(i))); - - /* Check the content hash (optionally - slow). */ - DLOG(INFO) << "checking contents of '" << i << "'"; - HashResult current = hashPath(info->narHash.type, toRealPath(i)); - - if (info->narHash != nullHash && info->narHash != current.first) { - LOG(ERROR) << "path '" << i << "' was modified! expected hash '" - << info->narHash.to_string() << "', got '" - << current.first.to_string() << "'"; - if (repair != 0u) { - repairPath(i); - } else { - errors = true; - } - } else { - bool update = false; - - /* Fill in missing hashes. */ - if (info->narHash == nullHash) { - LOG(WARNING) << "fixing missing hash on '" << i << "'"; - info->narHash = current.first; - update = true; - } - - /* Fill in missing narSize fields (from old stores). */ - if (info->narSize == 0) { - LOG(ERROR) << "updating size field on '" << i << "' to " - << current.second; - info->narSize = current.second; - update = true; - } - - if (update) { - auto state(_state.lock()); - updatePathInfo(*state, *info); - } - } - - } catch (Error& e) { - /* It's possible that the path got GC'ed, so ignore - errors on invalid paths. */ - if (isValidPath(i)) { - LOG(ERROR) << e.msg(); - } else { - LOG(WARNING) << e.msg(); - } - errors = true; - } - } - } - - return errors; -} - -void LocalStore::verifyPath(const Path& path, const PathSet& store, - PathSet& done, PathSet& validPaths, - RepairFlag repair, bool& errors) { - checkInterrupt(); - - if (done.find(path) != done.end()) { - return; - } - done.insert(path); - - if (!isStorePath(path)) { - LOG(ERROR) << "path '" << path << "' is not in the Nix store"; - auto state(_state.lock()); - invalidatePath(*state, path); - return; - } - - if (store.find(baseNameOf(path)) == store.end()) { - /* Check any referrers first. If we can invalidate them - first, then we can invalidate this path as well. */ - bool canInvalidate = true; - PathSet referrers; - queryReferrers(path, referrers); - for (auto& i : referrers) { - if (i != path) { - verifyPath(i, store, done, validPaths, repair, errors); - if (validPaths.find(i) != validPaths.end()) { - canInvalidate = false; - } - } - } - - if (canInvalidate) { - LOG(WARNING) << "path '" << path - << "' disappeared, removing from database..."; - auto state(_state.lock()); - invalidatePath(*state, path); - } else { - LOG(ERROR) << "path '" << path - << "' disappeared, but it still has valid referrers!"; - if (repair != 0u) { - try { - repairPath(path); - } catch (Error& e) { - LOG(WARNING) << e.msg(); - errors = true; - } - } else { - errors = true; - } - } - - return; - } - - validPaths.insert(path); -} - -unsigned int LocalStore::getProtocol() { return PROTOCOL_VERSION; } - -#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && \ - defined(FS_IMMUTABLE_FL) - -static void makeMutable(const Path& path) { - checkInterrupt(); - - struct stat st = lstat(path); - - if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) { - return; - } - - if (S_ISDIR(st.st_mode)) { - for (auto& i : readDirectory(path)) { - makeMutable(path + "/" + i.name); - } - } - - /* The O_NOFOLLOW is important to prevent us from changing the - mutable bit on the target of a symlink (which would be a - security hole). */ - AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW | O_CLOEXEC); - if (fd == -1) { - if (errno == ELOOP) { - return; - } // it's a symlink - throw SysError(format("opening file '%1%'") % path); - } - - unsigned int flags = 0, old; - - /* Silently ignore errors getting/setting the immutable flag so - that we work correctly on filesystems that don't support it. */ - if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) { - return; - } - old = flags; - flags &= ~FS_IMMUTABLE_FL; - if (old == flags) { - return; - } - if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) { - return; - } -} - -/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */ -void LocalStore::upgradeStore7() { - if (getuid() != 0) { - return; - } - printError( - "removing immutable bits from the Nix store (this may take a while)..."); - makeMutable(realStoreDir); -} - -#else - -void LocalStore::upgradeStore7() {} - -#endif - -void LocalStore::vacuumDB() { - auto state(_state.lock()); - state->db.exec("vacuum"); -} - -void LocalStore::addSignatures(const Path& storePath, const StringSet& sigs) { - retrySQLite([&]() { - auto state(_state.lock()); - - SQLiteTxn txn(state->db); - - auto info = std::const_pointer_cast( - std::shared_ptr(queryPathInfo(storePath))); - - info->sigs.insert(sigs.begin(), sigs.end()); - - updatePathInfo(*state, *info); - - txn.commit(); - }); -} - -void LocalStore::signPathInfo(ValidPathInfo& info) { - // FIXME: keep secret keys in memory. - - auto secretKeyFiles = settings.secretKeyFiles; - - for (auto& secretKeyFile : secretKeyFiles.get()) { - SecretKey secretKey(readFile(secretKeyFile)); - info.sign(secretKey); - } -} - -void LocalStore::createUser(const std::string& userName, uid_t userId) { - for (auto& dir : {fmt("%s/profiles/per-user/%s", stateDir, userName), - fmt("%s/gcroots/per-user/%s", stateDir, userName)}) { - createDirs(dir); - if (chmod(dir.c_str(), 0755) == -1) { - throw SysError("changing permissions of directory '%s'", dir); - } - if (chown(dir.c_str(), userId, getgid()) == -1) { - throw SysError("changing owner of directory '%s'", dir); - } - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/local-store.hh b/third_party/nix/src/libstore/local-store.hh deleted file mode 100644 index a7c49079d2..0000000000 --- a/third_party/nix/src/libstore/local-store.hh +++ /dev/null @@ -1,319 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include -#include - -#include "libstore/pathlocks.hh" -#include "libstore/sqlite.hh" -#include "libstore/store-api.hh" -#include "libutil/sync.hh" -#include "libutil/util.hh" - -namespace nix { - -/* Nix store and database schema version. Version 1 (or 0) was Nix <= - 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. - Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is - Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */ -const int nixSchemaVersion = 10; - -struct Derivation; - -struct OptimiseStats { - unsigned long filesLinked = 0; - unsigned long long bytesFreed = 0; - unsigned long long blocksFreed = 0; -}; - -class LocalStore : public LocalFSStore { - private: - /* Lock file used for upgrading. */ - AutoCloseFD globalLock; - - struct State { - /* The SQLite database object. */ - SQLite db; - - /* Some precompiled SQLite statements. */ - SQLiteStmt stmtRegisterValidPath; - SQLiteStmt stmtUpdatePathInfo; - SQLiteStmt stmtAddReference; - SQLiteStmt stmtQueryPathInfo; - SQLiteStmt stmtQueryReferences; - SQLiteStmt stmtQueryReferrers; - SQLiteStmt stmtInvalidatePath; - SQLiteStmt stmtAddDerivationOutput; - SQLiteStmt stmtQueryValidDerivers; - SQLiteStmt stmtQueryDerivationOutputs; - SQLiteStmt stmtQueryPathFromHashPart; - SQLiteStmt stmtQueryValidPaths; - - /* The file to which we write our temporary roots. */ - AutoCloseFD fdTempRoots; - - /* The last time we checked whether to do an auto-GC, or an - auto-GC finished. */ - std::chrono::time_point lastGCCheck; - - /* Whether auto-GC is running. If so, get gcFuture to wait for - the GC to finish. */ - bool gcRunning = false; - std::shared_future gcFuture; - - /* How much disk space was available after the previous - auto-GC. If the current available disk space is below - minFree but not much below availAfterGC, then there is no - point in starting a new GC. */ - uint64_t availAfterGC = std::numeric_limits::max(); - - std::unique_ptr publicKeys; - }; - - Sync _state; - - public: - PathSetting realStoreDir_; - - const Path realStoreDir; - const Path dbDir; - const Path linksDir; - const Path reservedPath; - const Path schemaPath; - const Path trashDir; - const Path tempRootsDir; - const Path fnTempRoots; - - private: - Setting requireSigs{ - (Store*)this, settings.requireSigs, "require-sigs", - "whether store paths should have a trusted signature on import"}; - - const PublicKeys& getPublicKeys(); - - public: - // Hack for build-remote.cc. - // TODO(tazjin): remove this when we've got gRPC - PathSet locksHeld = - absl::StrSplit(getEnv("NIX_HELD_LOCKS").value_or(""), - absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - - /* Initialise the local store, upgrading the schema if - necessary. */ - LocalStore(const Params& params); - - ~LocalStore(); - - /* Implementations of abstract store API methods. */ - - std::string getUri() override; - - bool isValidPathUncached(const Path& path) override; - - PathSet queryValidPaths(const PathSet& paths, SubstituteFlag maybeSubstitute = - NoSubstitute) override; - - PathSet queryAllValidPaths() override; - - void queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept override; - - void queryReferrers(const Path& path, PathSet& referrers) override; - - PathSet queryValidDerivers(const Path& path) override; - - PathSet queryDerivationOutputs(const Path& path) override; - - StringSet queryDerivationOutputNames(const Path& path) override; - - Path queryPathFromHashPart(const std::string& hashPart) override; - - PathSet querySubstitutablePaths(const PathSet& paths) override; - - void querySubstitutablePathInfos(const PathSet& paths, - SubstitutablePathInfos& infos) override; - - void addToStore(const ValidPathInfo& info, Source& source, RepairFlag repair, - CheckSigsFlag checkSigs, - std::shared_ptr accessor) override; - - Path addToStore(const std::string& name, const Path& srcPath, bool recursive, - HashType hashAlgo, PathFilter& filter, - RepairFlag repair) override; - - /* Like addToStore(), but the contents of the path are contained - in `dump', which is either a NAR serialisation (if recursive == - true) or simply the contents of a regular file (if recursive == - false). */ - Path addToStoreFromDump(const std::string& dump, const std::string& name, - bool recursive = true, HashType hashAlgo = htSHA256, - RepairFlag repair = NoRepair); - - Path addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, RepairFlag repair) override; - - absl::Status buildPaths(std::ostream& log_sink, const PathSet& paths, - BuildMode build_mode) override; - - BuildResult buildDerivation(std::ostream& log_sink, const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) override; - - void ensurePath(const Path& path) override; - - void addTempRoot(const Path& path) override; - - void addIndirectRoot(const Path& path) override; - - void syncWithGC() override; - - private: - typedef std::shared_ptr FDPtr; - using FDs = std::list; - - void findTempRoots(FDs& fds, Roots& roots, bool censor); - - public: - Roots findRoots(bool censor) override; - - void collectGarbage(const GCOptions& options, GCResults& results) override; - - /* Optimise the disk space usage of the Nix store by hard-linking - files with the same contents. */ - void optimiseStore(OptimiseStats& stats); - - void optimiseStore() override; - - /* Optimise a single store path. */ - void optimisePath(const Path& path); - - bool verifyStore(bool checkContents, RepairFlag repair) override; - - /* Register the validity of a path, i.e., that `path' exists, that - the paths referenced by it exists, and in the case of an output - path of a derivation, that it has been produced by a successful - execution of the derivation (or something equivalent). Also - register the hash of the file system contents of the path. The - hash must be a SHA-256 hash. */ - void registerValidPath(const ValidPathInfo& info); - - void registerValidPaths(const ValidPathInfos& infos); - - unsigned int getProtocol() override; - - void vacuumDB(); - - /* Repair the contents of the given path by redownloading it using - a substituter (if available). */ - void repairPath(const Path& path); - - void addSignatures(const Path& storePath, const StringSet& sigs) override; - - /* If free disk space in /nix/store if below minFree, delete - garbage until it exceeds maxFree. */ - void autoGC(bool sync = true); - - private: - int getSchema(); - - void openDB(State& state, bool create); - - void makeStoreWritable(); - - static uint64_t queryValidPathId(State& state, const Path& path); - - uint64_t addValidPath(State& state, const ValidPathInfo& info, - bool checkOutputs = true); - - void invalidatePath(State& state, const Path& path); - - /* Delete a path from the Nix store. */ - void invalidatePathChecked(const Path& path); - - void verifyPath(const Path& path, const PathSet& store, PathSet& done, - PathSet& validPaths, RepairFlag repair, bool& errors); - - static void updatePathInfo(State& state, const ValidPathInfo& info); - - void upgradeStore6(); - void upgradeStore7(); - PathSet queryValidPathsOld(); - ValidPathInfo queryPathInfoOld(const Path& path); - - struct GCState; - - static void deleteGarbage(GCState& state, const Path& path); - - void tryToDelete(GCState& state, const Path& path); - - bool canReachRoot(GCState& state, PathSet& visited, const Path& path); - - void deletePathRecursive(GCState& state, const Path& path); - - static bool isActiveTempFile(const GCState& state, const Path& path, - const std::string& suffix); - - AutoCloseFD openGCLock(LockType lockType); - - void findRoots(const Path& path, unsigned char type, Roots& roots); - - void findRootsNoTemp(Roots& roots, bool censor); - - void findRuntimeRoots(Roots& roots, bool censor); - - void removeUnusedLinks(const GCState& state); - - Path createTempDirInStore(); - - void checkDerivationOutputs(const Path& drvPath, const Derivation& drv); - - using InodeHash = std::unordered_set; - - InodeHash loadInodeHash(); - static Strings readDirectoryIgnoringInodes(const Path& path, - const InodeHash& inodeHash); - void optimisePath_(OptimiseStats& stats, const Path& path, - InodeHash& inodeHash); - - // Internal versions that are not wrapped in retry_sqlite. - static bool isValidPath_(State& state, const Path& path); - static void queryReferrers(State& state, const Path& path, - PathSet& referrers); - - /* Add signatures to a ValidPathInfo using the secret keys - specified by the ‘secret-key-files’ option. */ - static void signPathInfo(ValidPathInfo& info); - - Path getRealStoreDir() override { return realStoreDir; } - - void createUser(const std::string& userName, uid_t userId) override; - - friend class DerivationGoal; - friend class SubstitutionGoal; -}; - -using Inode = std::pair; -using InodesSeen = std::set; - -/* "Fix", or canonicalise, the meta-data of the files in a store path - after it has been built. In particular: - - the last modification date on each file is set to 1 (i.e., - 00:00:01 1/1/1970 UTC) - - the permissions are set of 444 or 555 (i.e., read-only with or - without execute permission; setuid bits etc. are cleared) - - the owner and group are set to the Nix user and group, if we're - running as root. */ -void canonicalisePathMetaData(const Path& path, uid_t fromUid, - InodesSeen& inodesSeen); -void canonicalisePathMetaData(const Path& path, uid_t fromUid); - -void canonicaliseTimestampAndPermissions(const Path& path); - -MakeError(PathInUse, Error); - -} // namespace nix diff --git a/third_party/nix/src/libstore/machines.cc b/third_party/nix/src/libstore/machines.cc deleted file mode 100644 index 57c89e0692..0000000000 --- a/third_party/nix/src/libstore/machines.cc +++ /dev/null @@ -1,114 +0,0 @@ -#include "libstore/machines.hh" - -#include - -#include -#include -#include -#include -#include - -#include "libstore/globals.hh" -#include "libutil/util.hh" - -namespace nix { - -Machine::Machine(decltype(storeUri)& storeUri, - decltype(systemTypes)& systemTypes, decltype(sshKey)& sshKey, - decltype(maxJobs) maxJobs, decltype(speedFactor) speedFactor, - decltype(supportedFeatures)& supportedFeatures, - decltype(mandatoryFeatures)& mandatoryFeatures, - decltype(sshPublicHostKey)& sshPublicHostKey) - : storeUri( - // Backwards compatibility: if the URI is a hostname, - // prepend ssh://. - storeUri.find("://") != std::string::npos || - absl::StartsWith(storeUri, "local") || - absl::StartsWith(storeUri, "remote") || - absl::StartsWith(storeUri, "auto") || - absl::StartsWith(storeUri, "/") - ? storeUri - : "ssh://" + storeUri), - systemTypes(systemTypes), - sshKey(sshKey), - maxJobs(maxJobs), - speedFactor(std::max(1U, speedFactor)), - supportedFeatures(supportedFeatures), - mandatoryFeatures(mandatoryFeatures), - sshPublicHostKey(sshPublicHostKey) {} - -bool Machine::allSupported(const std::set& features) const { - return std::all_of(features.begin(), features.end(), - [&](const std::string& feature) { - return (supportedFeatures.count(feature) != 0u) || - (mandatoryFeatures.count(feature) != 0u); - }); -} - -bool Machine::mandatoryMet(const std::set& features) const { - return std::all_of( - mandatoryFeatures.begin(), mandatoryFeatures.end(), - [&](const std::string& feature) { return features.count(feature); }); -} - -void parseMachines(const std::string& s, Machines& machines) { - for (auto line : - absl::StrSplit(s, absl::ByAnyChar("\n;"), absl::SkipEmpty())) { - // Skip empty lines & comments - line = absl::StripAsciiWhitespace(line); - if (line.empty() || line[line.find_first_not_of(" \t")] == '#') { - continue; - } - - if (line[0] == '@') { - auto file = absl::StripAsciiWhitespace(line.substr(1)); - try { - parseMachines(readFile(file), machines); - } catch (const SysError& e) { - if (e.errNo != ENOENT) { - throw; - } - DLOG(INFO) << "cannot find machines file: " << file; - } - continue; - } - - std::vector tokens = - absl::StrSplit(line, absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - auto sz = tokens.size(); - if (sz < 1) { - throw FormatError("bad machine specification '%s'", line); - } - - auto isSet = [&](size_t n) { - return tokens.size() > n && !tokens[n].empty() && tokens[n] != "-"; - }; - - // TODO(tazjin): what??? - machines.emplace_back( - tokens[0], - isSet(1) - ? absl::StrSplit(tokens[1], absl::ByChar(','), absl::SkipEmpty()) - : std::vector{settings.thisSystem}, - isSet(2) ? tokens[2] : "", isSet(3) ? std::stoull(tokens[3]) : 1LL, - isSet(4) ? std::stoull(tokens[4]) : 1LL, - isSet(5) - ? absl::StrSplit(tokens[5], absl::ByChar(','), absl::SkipEmpty()) - : std::set{}, - isSet(6) - ? absl::StrSplit(tokens[6], absl::ByChar(','), absl::SkipEmpty()) - : std::set{}, - isSet(7) ? tokens[7] : ""); - } -} - -Machines getMachines() { - static auto machines = [&]() { - Machines machines; - parseMachines(settings.builders, machines); - return machines; - }(); - return machines; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/machines.hh b/third_party/nix/src/libstore/machines.hh deleted file mode 100644 index 0e72697237..0000000000 --- a/third_party/nix/src/libstore/machines.hh +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include "libutil/types.hh" - -namespace nix { - -struct Machine { - const std::string storeUri; - const std::vector systemTypes; - const std::string sshKey; - const unsigned int maxJobs; - const unsigned int speedFactor; - const std::set supportedFeatures; - const std::set mandatoryFeatures; - const std::string sshPublicHostKey; - bool enabled = true; - - bool allSupported(const std::set& features) const; - - bool mandatoryMet(const std::set& features) const; - - Machine(decltype(storeUri)& storeUri, decltype(systemTypes)& systemTypes, - decltype(sshKey)& sshKey, decltype(maxJobs) maxJobs, - decltype(speedFactor) speedFactor, - decltype(supportedFeatures)& supportedFeatures, - decltype(mandatoryFeatures)& mandatoryFeatures, - decltype(sshPublicHostKey)& sshPublicHostKey); -}; - -typedef std::vector Machines; - -void parseMachines(const std::string& s, Machines& machines); - -Machines getMachines(); - -} // namespace nix diff --git a/third_party/nix/src/libstore/misc.cc b/third_party/nix/src/libstore/misc.cc deleted file mode 100644 index 44e67ada36..0000000000 --- a/third_party/nix/src/libstore/misc.cc +++ /dev/null @@ -1,331 +0,0 @@ -#include - -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libstore/parsed-derivations.hh" -#include "libstore/store-api.hh" -#include "libutil/thread-pool.hh" - -namespace nix { - -void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_, - bool flipDirection, bool includeOutputs, - bool includeDerivers) { - struct State { - size_t pending; - PathSet& paths; - std::exception_ptr exc; - }; - - Sync state_(State{0, paths_, nullptr}); - - std::function enqueue; - - std::condition_variable done; - - enqueue = [&](const Path& path) -> void { - { - auto state(state_.lock()); - if (state->exc) { - return; - } - if (state->paths.count(path) != 0u) { - return; - } - state->paths.insert(path); - state->pending++; - } - - queryPathInfo( - path, - Callback>( - [&, path](std::future> fut) { - // FIXME: calls to isValidPath() should be async - - try { - auto info = fut.get(); - - if (flipDirection) { - PathSet referrers; - queryReferrers(path, referrers); - for (auto& ref : referrers) { - if (ref != path) { - enqueue(ref); - } - } - - if (includeOutputs) { - for (auto& i : queryValidDerivers(path)) { - enqueue(i); - } - } - - if (includeDerivers && isDerivation(path)) { - for (auto& i : queryDerivationOutputs(path)) { - if (isValidPath(i) && queryPathInfo(i)->deriver == path) { - enqueue(i); - } - } - } - - } else { - for (auto& ref : info->references) { - if (ref != path) { - enqueue(ref); - } - } - - if (includeOutputs && isDerivation(path)) { - for (auto& i : queryDerivationOutputs(path)) { - if (isValidPath(i)) { - enqueue(i); - } - } - } - - if (includeDerivers && isValidPath(info->deriver)) { - enqueue(info->deriver); - } - } - - { - auto state(state_.lock()); - assert(state->pending); - if (--state->pending == 0u) { - done.notify_one(); - } - } - - } catch (...) { - auto state(state_.lock()); - if (!state->exc) { - state->exc = std::current_exception(); - } - assert(state->pending); - if (--state->pending == 0u) { - done.notify_one(); - } - }; - })); - }; - - for (auto& startPath : startPaths) { - enqueue(startPath); - } - - { - auto state(state_.lock()); - while (state->pending != 0u) { - state.wait(done); - } - if (state->exc) { - std::rethrow_exception(state->exc); - } - } -} - -void Store::computeFSClosure(const Path& startPath, PathSet& paths_, - bool flipDirection, bool includeOutputs, - bool includeDerivers) { - computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs, - includeDerivers); -} - -void Store::queryMissing(const PathSet& targets, PathSet& willBuild_, - PathSet& willSubstitute_, PathSet& unknown_, - unsigned long long& downloadSize_, - unsigned long long& narSize_) { - LOG(INFO) << "querying info about missing paths"; - - downloadSize_ = narSize_ = 0; - - ThreadPool pool; - - struct State { - PathSet done; - PathSet &unknown, &willSubstitute, &willBuild; - unsigned long long& downloadSize; - unsigned long long& narSize; - }; - - struct DrvState { - size_t left; - bool done = false; - PathSet outPaths; - explicit DrvState(size_t left) : left(left) {} - }; - - Sync state_(State{PathSet(), unknown_, willSubstitute_, willBuild_, - downloadSize_, narSize_}); - - std::function doPath; - - auto mustBuildDrv = [&](const Path& drvPath, const Derivation& drv) { - { - auto state(state_.lock()); - state->willBuild.insert(drvPath); - } - - for (auto& i : drv.inputDrvs) { - pool.enqueue( - std::bind(doPath, makeDrvPathWithOutputs(i.first, i.second))); - } - }; - - auto checkOutput = [&](const Path& drvPath, const ref& drv, - const Path& outPath, - const ref>& drvState_) { - if (drvState_->lock()->done) { - return; - } - - SubstitutablePathInfos infos; - querySubstitutablePathInfos({outPath}, infos); - - if (infos.empty()) { - drvState_->lock()->done = true; - mustBuildDrv(drvPath, *drv); - } else { - { - auto drvState(drvState_->lock()); - if (drvState->done) { - return; - } - assert(drvState->left); - drvState->left--; - drvState->outPaths.insert(outPath); - if (drvState->left == 0u) { - for (auto& path : drvState->outPaths) { - pool.enqueue(std::bind(doPath, path)); - } - } - } - } - }; - - doPath = [&](const Path& path) { - { - auto state(state_.lock()); - if (state->done.count(path) != 0u) { - return; - } - state->done.insert(path); - } - - DrvPathWithOutputs i2 = parseDrvPathWithOutputs(path); - - if (isDerivation(i2.first)) { - if (!isValidPath(i2.first)) { - // FIXME: we could try to substitute the derivation. - auto state(state_.lock()); - state->unknown.insert(path); - return; - } - - Derivation drv = derivationFromPath(i2.first); - ParsedDerivation parsedDrv(i2.first, drv); - - PathSet invalid; - for (auto& j : drv.outputs) { - if (wantOutput(j.first, i2.second) && !isValidPath(j.second.path)) { - invalid.insert(j.second.path); - } - } - if (invalid.empty()) { - return; - } - - if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) { - auto drvState = make_ref>(DrvState(invalid.size())); - for (auto& output : invalid) { - pool.enqueue(std::bind(checkOutput, i2.first, - make_ref(drv), output, drvState)); - } - } else { - mustBuildDrv(i2.first, drv); - } - - } else { - if (isValidPath(path)) { - return; - } - - SubstitutablePathInfos infos; - querySubstitutablePathInfos({path}, infos); - - if (infos.empty()) { - auto state(state_.lock()); - state->unknown.insert(path); - return; - } - - auto info = infos.find(path); - assert(info != infos.end()); - - { - auto state(state_.lock()); - state->willSubstitute.insert(path); - state->downloadSize += info->second.downloadSize; - state->narSize += info->second.narSize; - } - - for (auto& ref : info->second.references) { - pool.enqueue(std::bind(doPath, ref)); - } - } - }; - - for (auto& path : targets) { - pool.enqueue(std::bind(doPath, path)); - } - - pool.process(); -} - -Paths Store::topoSortPaths(const PathSet& paths) { - Paths sorted; - PathSet visited; - PathSet parents; - - std::function dfsVisit; - - dfsVisit = [&](const Path& path, const Path* parent) { - if (parents.find(path) != parents.end()) { - throw BuildError( - format("cycle detected in the references of '%1%' from '%2%'") % - path % *parent); - } - - if (visited.find(path) != visited.end()) { - return; - } - visited.insert(path); - parents.insert(path); - - PathSet references; - try { - references = queryPathInfo(path)->references; - } catch (InvalidPath&) { - } - - for (auto& i : references) { - /* Don't traverse into paths that don't exist. That can - happen due to substitutes for non-existent paths. */ - if (i != path && paths.find(i) != paths.end()) { - dfsVisit(i, &path); - } - } - - sorted.push_front(path); - parents.erase(path); - }; - - for (auto& i : paths) { - dfsVisit(i, nullptr); - } - - return sorted; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/mock-binary-cache-store.cc b/third_party/nix/src/libstore/mock-binary-cache-store.cc deleted file mode 100644 index 995d61521c..0000000000 --- a/third_party/nix/src/libstore/mock-binary-cache-store.cc +++ /dev/null @@ -1,91 +0,0 @@ -#include "libstore/mock-binary-cache-store.hh" - -#include - -namespace nix { - -MockBinaryCacheStore::MockBinaryCacheStore(const Params& params) - : BinaryCacheStore(params), contents_(), errorInjections_() {} - -std::string MockBinaryCacheStore::getUri() { return "mock://1"; } - -bool MockBinaryCacheStore::fileExists(const std::string& path) { - ThrowInjectedErrors(path); - - return contents_.find(path) != contents_.end(); -}; - -void MockBinaryCacheStore::upsertFile(const std::string& path, - const std::string& data, - const std::string& mimeType) { - ThrowInjectedErrors(path); - - contents_[path] = MemoryFile{data, mimeType}; -} - -void MockBinaryCacheStore::getFile( - const std::string& path, - Callback> callback) noexcept { - auto eit = errorInjections_.find(path); - if (eit != errorInjections_.end()) { - try { - eit->second(); - LOG(FATAL) << "thrower failed to throw"; - } catch (...) { - callback.rethrow(); - } - return; - } - - auto it = contents_.find(path); - if (it == contents_.end()) { - try { - throw NoSuchBinaryCacheFile(absl::StrCat( - "file '", path, "' was not added to the MockBinaryCache")); - } catch (...) { - callback.rethrow(); - } - return; - } - callback(std::make_shared(it->second.data)); -} - -PathSet MockBinaryCacheStore::queryAllValidPaths() { - PathSet paths; - - for (auto it : contents_) { - paths.insert(it.first); - } - - return paths; -} - -void MockBinaryCacheStore::DeleteFile(const std::string& path) { - contents_.erase(path); -} - -// Same as upsert, but bypasses injected errors. -void MockBinaryCacheStore::SetFileContentsForTest(const std::string& path, - const std::string& data, - const std::string& mimeType) { - contents_[path] = MemoryFile{data, mimeType}; -} - -void MockBinaryCacheStore::PrepareErrorInjection( - const std::string& path, std::function err_factory) { - errorInjections_[path] = err_factory; -} - -void MockBinaryCacheStore::CancelErrorInjection(const std::string& path) { - errorInjections_.erase(path); -} - -void MockBinaryCacheStore::ThrowInjectedErrors(const std::string& path) { - auto it = errorInjections_.find(path); - if (it != errorInjections_.end()) { - it->second(); - LOG(FATAL) << "thrower failed to throw"; - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/mock-binary-cache-store.hh b/third_party/nix/src/libstore/mock-binary-cache-store.hh deleted file mode 100644 index 419077b6bb..0000000000 --- a/third_party/nix/src/libstore/mock-binary-cache-store.hh +++ /dev/null @@ -1,59 +0,0 @@ -#pragma once - -#include -#include - -#include "libstore/binary-cache-store.hh" - -namespace nix { - -// MockBinaryCacheStore implements a memory-based BinaryCacheStore, for use in -// tests. -class MockBinaryCacheStore : public BinaryCacheStore { - public: - MockBinaryCacheStore(const Params& params); - - // Store API - - std::string getUri() override; - - bool fileExists(const std::string& path) override; - - void upsertFile(const std::string& path, const std::string& data, - const std::string& mimeType) override; - - void getFile( - const std::string& path, - Callback> callback) noexcept override; - - PathSet queryAllValidPaths() override; - - // Test API - - // Remove a file from the store. - void DeleteFile(const std::string& path); - - // Same as upsert, but bypasses injected errors. - void SetFileContentsForTest(const std::string& path, const std::string& data, - const std::string& mimeType); - - void PrepareErrorInjection(const std::string& path, - std::function throw_func); - - void CancelErrorInjection(const std::string& path); - - // Internals - - private: - void ThrowInjectedErrors(const std::string& path); - - struct MemoryFile { - std::string data; - std::string mimeType; - }; - - absl::btree_map contents_; - absl::flat_hash_map> errorInjections_; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/nar-accessor.cc b/third_party/nix/src/libstore/nar-accessor.cc deleted file mode 100644 index cfd3d50b32..0000000000 --- a/third_party/nix/src/libstore/nar-accessor.cc +++ /dev/null @@ -1,268 +0,0 @@ -#include "libstore/nar-accessor.hh" - -#include -#include -#include -#include -#include - -#include "libutil/archive.hh" -#include "libutil/json.hh" - -namespace nix { - -struct NarMember { - FSAccessor::Type type = FSAccessor::Type::tMissing; - - bool isExecutable = false; - - /* If this is a regular file, position of the contents of this - file in the NAR. */ - size_t start = 0, size = 0; - - std::string target; - - /* If this is a directory, all the children of the directory. */ - std::map children; -}; - -struct NarAccessor : public FSAccessor { - std::shared_ptr nar; - - GetNarBytes getNarBytes; - - NarMember root; - - struct NarIndexer : ParseSink, StringSource { - NarAccessor& acc; - - std::stack parents; - - std::string currentStart; - bool isExec = false; - - NarIndexer(NarAccessor& acc, const std::string& nar) - : StringSource(nar), acc(acc) {} - - void createMember(const Path& path, NarMember member) { - size_t level = std::count(path.begin(), path.end(), '/'); - while (parents.size() > level) { - parents.pop(); - } - - if (parents.empty()) { - acc.root = std::move(member); - parents.push(&acc.root); - } else { - if (parents.top()->type != FSAccessor::Type::tDirectory) { - throw Error("NAR file missing parent directory of path '%s'", path); - } - auto result = parents.top()->children.emplace(baseNameOf(path), - std::move(member)); - parents.push(&result.first->second); - } - } - - void createDirectory(const Path& path) override { - createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0}); - } - - void createRegularFile(const Path& path) override { - createMember(path, {FSAccessor::Type::tRegular, false, 0, 0}); - } - - void isExecutable() override { parents.top()->isExecutable = true; } - - void preallocateContents(unsigned long long size) override { - currentStart = std::string(s, pos, 16); - assert(size <= std::numeric_limits::max()); - parents.top()->size = static_cast(size); - parents.top()->start = pos; - } - - void receiveContents(unsigned char* data, unsigned int len) override { - // Sanity check - if (!currentStart.empty()) { - assert(len < 16 || currentStart == std::string((char*)data, 16)); - currentStart.clear(); - } - } - - void createSymlink(const Path& path, const std::string& target) override { - createMember(path, - NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target}); - } - }; - - explicit NarAccessor(const ref& nar) : nar(nar) { - NarIndexer indexer(*this, *nar); - parseDump(indexer, indexer); - } - - NarAccessor(const std::string& listing, GetNarBytes getNarBytes) - : getNarBytes(std::move(getNarBytes)) { - using json = nlohmann::json; - - std::function recurse; - - recurse = [&](NarMember& member, json& v) { - std::string type = v["type"]; - - if (type == "directory") { - member.type = FSAccessor::Type::tDirectory; - for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) { - const std::string& name = i.key(); - recurse(member.children[name], i.value()); - } - } else if (type == "regular") { - member.type = FSAccessor::Type::tRegular; - member.size = v["size"]; - member.isExecutable = v.value("executable", false); - member.start = v["narOffset"]; - } else if (type == "symlink") { - member.type = FSAccessor::Type::tSymlink; - member.target = v.value("target", ""); - } else { - return; - } - }; - - json v = json::parse(listing); - recurse(root, v); - } - - NarMember* find(const Path& path) { - Path canon = path.empty() ? "" : canonPath(path); - NarMember* current = &root; - auto end = path.end(); - for (auto it = path.begin(); it != end;) { - // because it != end, the remaining component is non-empty so we need - // a directory - if (current->type != FSAccessor::Type::tDirectory) { - return nullptr; - } - - // skip slash (canonPath above ensures that this is always a slash) - assert(*it == '/'); - it += 1; - - // lookup current component - auto next = std::find(it, end, '/'); - auto child = current->children.find(std::string(it, next)); - if (child == current->children.end()) { - return nullptr; - } - current = &child->second; - - it = next; - } - - return current; - } - - NarMember& get(const Path& path) { - auto result = find(path); - if (result == nullptr) { - throw Error("NAR file does not contain path '%1%'", path); - } - return *result; - } - - Stat stat(const Path& path) override { - auto i = find(path); - if (i == nullptr) { - return {FSAccessor::Type::tMissing, 0, false}; - } - return {i->type, i->size, i->isExecutable, i->start}; - } - - StringSet readDirectory(const Path& path) override { - auto i = get(path); - - if (i.type != FSAccessor::Type::tDirectory) { - throw Error(format("path '%1%' inside NAR file is not a directory") % - path); - } - - StringSet res; - for (auto& child : i.children) { - res.insert(child.first); - } - - return res; - } - - std::string readFile(const Path& path) override { - auto i = get(path); - if (i.type != FSAccessor::Type::tRegular) { - throw Error(format("path '%1%' inside NAR file is not a regular file") % - path); - } - - if (getNarBytes) { - return getNarBytes(i.start, i.size); - } - - assert(nar); - return std::string(*nar, i.start, i.size); - } - - std::string readLink(const Path& path) override { - auto i = get(path); - if (i.type != FSAccessor::Type::tSymlink) { - throw Error(format("path '%1%' inside NAR file is not a symlink") % path); - } - return i.target; - } -}; - -ref makeNarAccessor(ref nar) { - return make_ref(nar); -} - -ref makeLazyNarAccessor(const std::string& listing, - GetNarBytes getNarBytes) { - return make_ref(listing, getNarBytes); -} - -void listNar(JSONPlaceholder& res, const ref& accessor, - const Path& path, bool recurse) { - auto st = accessor->stat(path); - - auto obj = res.object(); - - switch (st.type) { - case FSAccessor::Type::tRegular: - obj.attr("type", "regular"); - obj.attr("size", st.fileSize); - if (st.isExecutable) { - obj.attr("executable", true); - } - if (st.narOffset != 0u) { - obj.attr("narOffset", st.narOffset); - } - break; - case FSAccessor::Type::tDirectory: - obj.attr("type", "directory"); - { - auto res2 = obj.object("entries"); - for (auto& name : accessor->readDirectory(path)) { - if (recurse) { - auto res3 = res2.placeholder(name); - listNar(res3, accessor, path + "/" + name, true); - } else { - res2.object(name); - } - } - } - break; - case FSAccessor::Type::tSymlink: - obj.attr("type", "symlink"); - obj.attr("target", accessor->readLink(path)); - break; - default: - throw Error("path '%s' does not exist in NAR", path); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/nar-accessor.hh b/third_party/nix/src/libstore/nar-accessor.hh deleted file mode 100644 index 0906a4606e..0000000000 --- a/third_party/nix/src/libstore/nar-accessor.hh +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include - -#include "libstore/fs-accessor.hh" - -namespace nix { - -/* Return an object that provides access to the contents of a NAR - file. */ -ref makeNarAccessor(ref nar); - -/* Create a NAR accessor from a NAR listing (in the format produced by - listNar()). The callback getNarBytes(offset, length) is used by the - readFile() method of the accessor to get the contents of files - inside the NAR. */ -typedef std::function GetNarBytes; - -ref makeLazyNarAccessor(const std::string& listing, - GetNarBytes getNarBytes); - -class JSONPlaceholder; - -/* Write a JSON representation of the contents of a NAR (except file - contents). */ -void listNar(JSONPlaceholder& res, const ref& accessor, - const Path& path, bool recurse); - -} // namespace nix diff --git a/third_party/nix/src/libstore/nar-info-disk-cache.cc b/third_party/nix/src/libstore/nar-info-disk-cache.cc deleted file mode 100644 index 90ea20a893..0000000000 --- a/third_party/nix/src/libstore/nar-info-disk-cache.cc +++ /dev/null @@ -1,295 +0,0 @@ -#include "libstore/nar-info-disk-cache.hh" - -#include -#include -#include -#include - -#include "libstore/globals.hh" -#include "libstore/sqlite.hh" -#include "libutil/sync.hh" - -namespace nix { - -static const char* schema = R"sql( - -create table if not exists BinaryCaches ( - id integer primary key autoincrement not null, - url text unique not null, - timestamp integer not null, - storeDir text not null, - wantMassQuery integer not null, - priority integer not null -); - -create table if not exists NARs ( - cache integer not null, - hashPart text not null, - namePart text, - url text, - compression text, - fileHash text, - fileSize integer, - narHash text, - narSize integer, - refs text, - deriver text, - sigs text, - ca text, - timestamp integer not null, - present integer not null, - primary key (cache, hashPart), - foreign key (cache) references BinaryCaches(id) on delete cascade -); - -create table if not exists LastPurge ( - dummy text primary key, - value integer -); - -)sql"; - -class NarInfoDiskCacheImpl final : public NarInfoDiskCache { - public: - /* How often to purge expired entries from the cache. */ - const int purgeInterval = 24 * 3600; - - struct Cache { - int id; - Path storeDir; - bool wantMassQuery; - int priority; - }; - - struct State { - SQLite db; - SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, - purgeCache; - std::map caches; - }; - - Sync _state; - - NarInfoDiskCacheImpl() { - auto state(_state.lock()); - - Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite"; - createDirs(dirOf(dbPath)); - - state->db = SQLite(dbPath); - - if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK) { - throwSQLiteError(state->db, "setting timeout"); - } - - // We can always reproduce the cache. - state->db.exec("pragma synchronous = off"); - state->db.exec("pragma main.journal_mode = truncate"); - - state->db.exec(schema); - - state->insertCache.create( - state->db, - "insert or replace into BinaryCaches(url, timestamp, storeDir, " - "wantMassQuery, priority) values (?, ?, ?, ?, ?)"); - - state->queryCache.create(state->db, - "select id, storeDir, wantMassQuery, priority " - "from BinaryCaches where url = ?"); - - state->insertNAR.create( - state->db, - "insert or replace into NARs(cache, hashPart, namePart, url, " - "compression, fileHash, fileSize, narHash, " - "narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, " - "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); - - state->insertMissingNAR.create( - state->db, - "insert or replace into NARs(cache, hashPart, timestamp, present) " - "values (?, ?, ?, 0)"); - - state->queryNAR.create( - state->db, - "select present, namePart, url, compression, fileHash, fileSize, " - "narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? " - "and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 " - "and timestamp > ?))"); - - /* Periodically purge expired entries from the database. */ - retrySQLite([&]() { - auto now = time(nullptr); - - SQLiteStmt queryLastPurge(state->db, "select value from LastPurge"); - auto queryLastPurge_(queryLastPurge.use()); - - if (!queryLastPurge_.next() || - queryLastPurge_.getInt(0) < now - purgeInterval) { - SQLiteStmt(state->db, - "delete from NARs where ((present = 0 and timestamp < ?) or " - "(present = 1 and timestamp < ?))") - .use()(now - settings.ttlNegativeNarInfoCache)( - now - settings.ttlPositiveNarInfoCache) - .exec(); - - DLOG(INFO) << "deleted " << sqlite3_changes(state->db) - << " entries from the NAR info disk cache"; - - SQLiteStmt( - state->db, - "insert or replace into LastPurge(dummy, value) values ('', ?)") - .use()(now) - .exec(); - } - }); - } - - static Cache& getCache(State& state, const std::string& uri) { - auto i = state.caches.find(uri); - if (i == state.caches.end()) { - abort(); - } - return i->second; - } - - void createCache(const std::string& uri, const Path& storeDir, - bool wantMassQuery, int priority) override { - retrySQLite([&]() { - auto state(_state.lock()); - - // FIXME: race - - state->insertCache - .use()(uri)(time(nullptr))(storeDir)( - static_cast(wantMassQuery))(priority) - .exec(); - assert(sqlite3_changes(state->db) == 1); - state->caches[uri] = - Cache{static_cast(sqlite3_last_insert_rowid(state->db)), - storeDir, wantMassQuery, priority}; - }); - } - - bool cacheExists(const std::string& uri, bool& wantMassQuery, - int& priority) override { - return retrySQLite([&]() { - auto state(_state.lock()); - - auto i = state->caches.find(uri); - if (i == state->caches.end()) { - auto queryCache(state->queryCache.use()(uri)); - if (!queryCache.next()) { - return false; - } - state->caches.emplace( - uri, Cache{static_cast(queryCache.getInt(0)), - queryCache.getStr(1), queryCache.getInt(2) != 0, - static_cast(queryCache.getInt(3))}); - } - - auto& cache(getCache(*state, uri)); - - wantMassQuery = cache.wantMassQuery; - priority = cache.priority; - - return true; - }); - } - - std::pair> lookupNarInfo( - const std::string& uri, const std::string& hashPart) override { - return retrySQLite>>( - [&]() -> std::pair> { - auto state(_state.lock()); - - auto& cache(getCache(*state, uri)); - - auto now = time(nullptr); - - auto queryNAR(state->queryNAR.use()(cache.id)(hashPart)( - now - settings.ttlNegativeNarInfoCache)( - now - settings.ttlPositiveNarInfoCache)); - - if (!queryNAR.next()) { - return {oUnknown, nullptr}; - } - - if (queryNAR.getInt(0) == 0) { - return {oInvalid, nullptr}; - } - - auto narInfo = make_ref(); - - auto namePart = queryNAR.getStr(1); - narInfo->path = cache.storeDir + "/" + hashPart + - (namePart.empty() ? "" : "-" + namePart); - narInfo->url = queryNAR.getStr(2); - narInfo->compression = queryNAR.getStr(3); - if (!queryNAR.isNull(4)) { - auto hash_ = Hash::deserialize(queryNAR.getStr(4)); - // TODO(#statusor): does this throw mess with retrySQLite? - narInfo->fileHash = Hash::unwrap_throw(hash_); - } - narInfo->fileSize = queryNAR.getInt(5); - auto hash_ = Hash::deserialize(queryNAR.getStr(6)); - narInfo->narHash = Hash::unwrap_throw(hash_); - narInfo->narSize = queryNAR.getInt(7); - for (auto r : absl::StrSplit(queryNAR.getStr(8), absl::ByChar(' '), - absl::SkipEmpty())) { - narInfo->references.insert(absl::StrCat(cache.storeDir, "/", r)); - } - if (!queryNAR.isNull(9)) { - narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(9); - } - for (auto& sig : absl::StrSplit( - queryNAR.getStr(10), absl::ByChar(' '), absl::SkipEmpty())) { - narInfo->sigs.insert(std::string(sig)); - } - narInfo->ca = queryNAR.getStr(11); - - return {oValid, narInfo}; - }); - } - - void upsertNarInfo(const std::string& uri, const std::string& hashPart, - std::shared_ptr info) override { - retrySQLite([&]() { - auto state(_state.lock()); - - auto& cache(getCache(*state, uri)); - - if (info) { - auto narInfo = std::dynamic_pointer_cast(info); - - assert(hashPart == storePathToHash(info->path)); - - state->insertNAR - .use()(cache.id)(hashPart)(storePathToName(info->path))( - narInfo ? narInfo->url : "", narInfo != nullptr)( - narInfo ? narInfo->compression : "", narInfo != nullptr)( - narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() - : "", - narInfo && narInfo->fileHash)( - narInfo ? narInfo->fileSize : 0, - narInfo != nullptr && - (narInfo->fileSize != 0u))(info->narHash.to_string())( - info->narSize)(concatStringsSep(" ", info->shortRefs()))( - !info->deriver.empty() ? baseNameOf(info->deriver) : "", - !info->deriver.empty())(concatStringsSep(" ", info->sigs))( - info->ca)(time(nullptr)) - .exec(); - - } else { - state->insertMissingNAR.use()(cache.id)(hashPart)(time(nullptr)).exec(); - } - }); - } -}; - -std::shared_ptr getNarInfoDiskCache() { - static std::shared_ptr cache = - std::make_shared(); - return cache; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/nar-info-disk-cache.hh b/third_party/nix/src/libstore/nar-info-disk-cache.hh deleted file mode 100644 index 8eeab7635a..0000000000 --- a/third_party/nix/src/libstore/nar-info-disk-cache.hh +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#include "libstore/nar-info.hh" -#include "libutil/ref.hh" - -namespace nix { - -class NarInfoDiskCache { - public: - typedef enum { oValid, oInvalid, oUnknown } Outcome; - - virtual void createCache(const std::string& uri, const Path& storeDir, - bool wantMassQuery, int priority) = 0; - - virtual bool cacheExists(const std::string& uri, bool& wantMassQuery, - int& priority) = 0; - - virtual std::pair> lookupNarInfo( - const std::string& uri, const std::string& hashPart) = 0; - - virtual void upsertNarInfo(const std::string& uri, - const std::string& hashPart, - std::shared_ptr info) = 0; -}; - -/* Return a singleton cache object that can be used concurrently by - multiple threads. */ -std::shared_ptr getNarInfoDiskCache(); - -} // namespace nix diff --git a/third_party/nix/src/libstore/nar-info.cc b/third_party/nix/src/libstore/nar-info.cc deleted file mode 100644 index d42167dbfa..0000000000 --- a/third_party/nix/src/libstore/nar-info.cc +++ /dev/null @@ -1,142 +0,0 @@ -#include "libstore/nar-info.hh" - -#include -#include - -#include "libstore/globals.hh" - -namespace nix { - -NarInfo::NarInfo(const Store& store, const std::string& s, - const std::string& whence) { - auto corrupt = [&]() { - throw Error(format("NAR info file '%1%' is corrupt") % whence); - }; - - auto parseHashField = [&](const std::string& s) { - auto hash_ = Hash::deserialize(s); - if (hash_.ok()) { - return *hash_; - } else { - // TODO(#statusor): return an actual error - corrupt(); - return Hash(); - } - }; - - size_t pos = 0; - while (pos < s.size()) { - size_t colon = s.find(':', pos); - if (colon == std::string::npos) { - corrupt(); - } - - std::string name(s, pos, colon - pos); - - size_t eol = s.find('\n', colon + 2); - if (eol == std::string::npos) { - corrupt(); - } - - std::string value(s, colon + 2, eol - colon - 2); - - if (name == "StorePath") { - if (!store.isStorePath(value)) { - corrupt(); - } - path = value; - } else if (name == "URL") { - url = value; - } else if (name == "Compression") { - compression = value; - } else if (name == "FileHash") { - fileHash = parseHashField(value); - } else if (name == "FileSize") { - if (!absl::SimpleAtoi(value, &fileSize)) { - corrupt(); - } - } else if (name == "NarHash") { - narHash = parseHashField(value); - } else if (name == "NarSize") { - if (!absl::SimpleAtoi(value, &narSize)) { - corrupt(); - } - } else if (name == "References") { - std::vector refs = - absl::StrSplit(value, absl::ByChar(' '), absl::SkipEmpty()); - if (!references.empty()) { - corrupt(); - } - for (auto& r : refs) { - auto r2 = store.storeDir + "/" + r; - if (!store.isStorePath(r2)) { - corrupt(); - } - references.insert(r2); - } - } else if (name == "Deriver") { - if (value != "unknown-deriver") { - auto p = store.storeDir + "/" + value; - if (!store.isStorePath(p)) { - corrupt(); - } - deriver = p; - } - } else if (name == "System") { - system = value; - } else if (name == "Sig") { - sigs.insert(value); - } else if (name == "CA") { - if (!ca.empty()) { - corrupt(); - } - ca = value; - } - - pos = eol + 1; - } - - if (compression.empty()) { - compression = "bzip2"; - } - - if (path.empty() || url.empty() || narSize == 0 || !narHash) { - corrupt(); - } -} - -std::string NarInfo::to_string() const { - std::string res; - res += "StorePath: " + path + "\n"; - res += "URL: " + url + "\n"; - assert(!compression.empty()); - res += "Compression: " + compression + "\n"; - assert(fileHash.type == htSHA256); - res += "FileHash: " + fileHash.to_string(Base32) + "\n"; - res += "FileSize: " + std::to_string(fileSize) + "\n"; - assert(narHash.type == htSHA256); - res += "NarHash: " + narHash.to_string(Base32) + "\n"; - res += "NarSize: " + std::to_string(narSize) + "\n"; - - res += "References: " + concatStringsSep(" ", shortRefs()) + "\n"; - - if (!deriver.empty()) { - res += "Deriver: " + baseNameOf(deriver) + "\n"; - } - - if (!system.empty()) { - res += "System: " + system + "\n"; - } - - for (const auto& sig : sigs) { - res += "Sig: " + sig + "\n"; - } - - if (!ca.empty()) { - res += "CA: " + ca + "\n"; - } - - return res; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/nar-info.hh b/third_party/nix/src/libstore/nar-info.hh deleted file mode 100644 index 48eccf8302..0000000000 --- a/third_party/nix/src/libstore/nar-info.hh +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include "libstore/store-api.hh" -#include "libutil/hash.hh" -#include "libutil/types.hh" - -namespace nix { - -struct NarInfo : ValidPathInfo { - std::string url; - std::string compression; - Hash fileHash; - uint64_t fileSize = 0; - std::string system; - - NarInfo() {} - NarInfo(const ValidPathInfo& info) : ValidPathInfo(info) {} - NarInfo(const Store& store, const std::string& s, const std::string& whence); - - std::string to_string() const; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/nix-store.pc.in b/third_party/nix/src/libstore/nix-store.pc.in deleted file mode 100644 index b204776b37..0000000000 --- a/third_party/nix/src/libstore/nix-store.pc.in +++ /dev/null @@ -1,9 +0,0 @@ -prefix=@CMAKE_INSTALL_PREFIX@ -libdir=@CMAKE_INSTALL_FULL_LIBDIR@ -includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ - -Name: Nix -Description: Nix Package Manager -Version: @PACKAGE_VERSION@ -Libs: -L${libdir} -lnixstore -lnixutil -Cflags: -I${includedir}/nix diff --git a/third_party/nix/src/libstore/optimise-store.cc b/third_party/nix/src/libstore/optimise-store.cc deleted file mode 100644 index eb24633c18..0000000000 --- a/third_party/nix/src/libstore/optimise-store.cc +++ /dev/null @@ -1,296 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libutil/util.hh" - -namespace nix { - -static void makeWritable(const Path& path) { - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % path); - } - if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1) { - throw SysError(format("changing writability of '%1%'") % path); - } -} - -struct MakeReadOnly { - Path path; - explicit MakeReadOnly(Path path) : path(std::move(path)) {} - ~MakeReadOnly() { - try { - /* This will make the path read-only. */ - if (!path.empty()) { - canonicaliseTimestampAndPermissions(path); - } - } catch (...) { - ignoreException(); - } - } -}; - -LocalStore::InodeHash LocalStore::loadInodeHash() { - DLOG(INFO) << "loading hash inodes in memory"; - InodeHash inodeHash; - - AutoCloseDir dir(opendir(linksDir.c_str())); - if (!dir) { - throw SysError(format("opening directory '%1%'") % linksDir); - } - - struct dirent* dirent; - while (errno = 0, dirent = readdir(dir.get())) { /* sic */ - checkInterrupt(); - // We don't care if we hit non-hash files, anything goes - inodeHash.insert(dirent->d_ino); - } - if (errno) { - throw SysError(format("reading directory '%1%'") % linksDir); - } - - DLOG(INFO) << "loaded " << inodeHash.size() << " hash inodes"; - - return inodeHash; -} - -Strings LocalStore::readDirectoryIgnoringInodes(const Path& path, - const InodeHash& inodeHash) { - Strings names; - - AutoCloseDir dir(opendir(path.c_str())); - if (!dir) { - throw SysError(format("opening directory '%1%'") % path); - } - - struct dirent* dirent; - while (errno = 0, dirent = readdir(dir.get())) { /* sic */ - checkInterrupt(); - - if (inodeHash.count(dirent->d_ino) != 0u) { - DLOG(WARNING) << dirent->d_name << " is already linked"; - continue; - } - - std::string name = dirent->d_name; - if (name == "." || name == "..") { - continue; - } - names.push_back(name); - } - if (errno) { - throw SysError(format("reading directory '%1%'") % path); - } - - return names; -} - -void LocalStore::optimisePath_(OptimiseStats& stats, const Path& path, - InodeHash& inodeHash) { - checkInterrupt(); - - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % path); - } - - if (S_ISDIR(st.st_mode)) { - Strings names = readDirectoryIgnoringInodes(path, inodeHash); - for (auto& i : names) { - optimisePath_(stats, path + "/" + i, inodeHash); - } - return; - } - - /* We can hard link regular files and maybe symlinks. */ - if (!S_ISREG(st.st_mode) -#if CAN_LINK_SYMLINK - && !S_ISLNK(st.st_mode) -#endif - ) - return; - - /* Sometimes SNAFUs can cause files in the Nix store to be - modified, in particular when running programs as root under - NixOS (example: $fontconfig/var/cache being modified). Skip - those files. FIXME: check the modification time. */ - if (S_ISREG(st.st_mode) && ((st.st_mode & S_IWUSR) != 0u)) { - LOG(WARNING) << "skipping suspicious writable file '" << path << "'"; - return; - } - - /* This can still happen on top-level files. */ - if (st.st_nlink > 1 && (inodeHash.count(st.st_ino) != 0u)) { - DLOG(INFO) << path << " is already linked, with " << (st.st_nlink - 2) - << " other file(s)"; - return; - } - - /* Hash the file. Note that hashPath() returns the hash over the - NAR serialisation, which includes the execute bit on the file. - Thus, executable and non-executable files with the same - contents *won't* be linked (which is good because otherwise the - permissions would be screwed up). - - Also note that if `path' is a symlink, then we're hashing the - contents of the symlink (i.e. the result of readlink()), not - the contents of the target (which may not even exist). */ - Hash hash = hashPath(htSHA256, path).first; - LOG(INFO) << path << " has hash " << hash.to_string(); - - /* Check if this is a known hash. */ - Path linkPath = linksDir + "/" + hash.to_string(Base32, false); - -retry: - if (!pathExists(linkPath)) { - /* Nope, create a hard link in the links directory. */ - if (link(path.c_str(), linkPath.c_str()) == 0) { - inodeHash.insert(st.st_ino); - return; - } - - switch (errno) { - case EEXIST: - /* Fall through if another process created ‘linkPath’ before - we did. */ - break; - - case ENOSPC: - /* On ext4, that probably means the directory index is - full. When that happens, it's fine to ignore it: we - just effectively disable deduplication of this - file. */ - LOG(WARNING) << "cannot link '" << linkPath << " to " << path << ": " - << strerror(errno); - - return; - - default: - throw SysError("cannot link '%1%' to '%2%'", linkPath, path); - } - } - - /* Yes! We've seen a file with the same contents. Replace the - current file with a hard link to that file. */ - struct stat stLink; - if (lstat(linkPath.c_str(), &stLink) != 0) { - throw SysError(format("getting attributes of path '%1%'") % linkPath); - } - - if (st.st_ino == stLink.st_ino) { - DLOG(INFO) << path << " is already linked to " << linkPath; - return; - } - - if (st.st_size != stLink.st_size) { - LOG(WARNING) << "removing corrupted link '" << linkPath << "'"; - unlink(linkPath.c_str()); - goto retry; - } - - DLOG(INFO) << "linking '" << path << "' to '" << linkPath << "'"; - - /* Make the containing directory writable, but only if it's not - the store itself (we don't want or need to mess with its - permissions). */ - bool mustToggle = dirOf(path) != realStoreDir; - if (mustToggle) { - makeWritable(dirOf(path)); - } - - /* When we're done, make the directory read-only again and reset - its timestamp back to 0. */ - MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : ""); - - Path tempLink = - (format("%1%/.tmp-link-%2%-%3%") % realStoreDir % getpid() % random()) - .str(); - - if (link(linkPath.c_str(), tempLink.c_str()) == -1) { - if (errno == EMLINK) { - /* Too many links to the same file (>= 32000 on most file - systems). This is likely to happen with empty files. - Just shrug and ignore. */ - if (st.st_size != 0) { - LOG(WARNING) << linkPath << " has maximum number of links"; - } - return; - } - throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath); - } - - /* Atomically replace the old file with the new hard link. */ - if (rename(tempLink.c_str(), path.c_str()) == -1) { - if (unlink(tempLink.c_str()) == -1) { - LOG(ERROR) << "unable to unlink '" << tempLink << "'"; - } - if (errno == EMLINK) { - /* Some filesystems generate too many links on the rename, - rather than on the original link. (Probably it - temporarily increases the st_nlink field before - decreasing it again.) */ - DLOG(WARNING) << "'" << linkPath - << "' has reached maximum number of links"; - return; - } - throw SysError(format("cannot rename '%1%' to '%2%'") % tempLink % path); - } - - stats.filesLinked++; - stats.bytesFreed += st.st_size; - stats.blocksFreed += st.st_blocks; -} - -void LocalStore::optimiseStore(OptimiseStats& stats) { - PathSet paths = queryAllValidPaths(); - InodeHash inodeHash = loadInodeHash(); - - uint64_t done = 0; - - for (auto& i : paths) { - addTempRoot(i); - if (!isValidPath(i)) { - continue; - } /* path was GC'ed, probably */ - { - LOG(INFO) << "optimising path '" << i << "'"; - optimisePath_(stats, realStoreDir + "/" + baseNameOf(i), inodeHash); - } - done++; - } -} - -static std::string showBytes(unsigned long long bytes) { - return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str(); -} - -void LocalStore::optimiseStore() { - OptimiseStats stats; - - optimiseStore(stats); - - LOG(INFO) << showBytes(stats.bytesFreed) << " freed by hard-linking " - << stats.filesLinked << " files"; -} - -void LocalStore::optimisePath(const Path& path) { - OptimiseStats stats; - InodeHash inodeHash; - - if (settings.autoOptimiseStore) { - optimisePath_(stats, path, inodeHash); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/parsed-derivations.cc b/third_party/nix/src/libstore/parsed-derivations.cc deleted file mode 100644 index 6989a21fee..0000000000 --- a/third_party/nix/src/libstore/parsed-derivations.cc +++ /dev/null @@ -1,128 +0,0 @@ -#include "libstore/parsed-derivations.hh" - -#include - -namespace nix { - -ParsedDerivation::ParsedDerivation(const Path& drvPath, BasicDerivation& drv) - : drvPath(drvPath), drv(drv) { - /* Parse the __json attribute, if any. */ - auto jsonAttr = drv.env.find("__json"); - if (jsonAttr != drv.env.end()) { - try { - structuredAttrs = nlohmann::json::parse(jsonAttr->second); - } catch (std::exception& e) { - throw Error("cannot process __json attribute of '%s': %s", drvPath, - e.what()); - } - } -} - -std::optional ParsedDerivation::getStringAttr( - const std::string& name) const { - if (structuredAttrs) { - auto i = structuredAttrs->find(name); - if (i == structuredAttrs->end()) { - return {}; - } - if (!i->is_string()) { - throw Error("attribute '%s' of derivation '%s' must be a string", name, - drvPath); - } - return i->get(); - - } else { - auto i = drv.env.find(name); - if (i == drv.env.end()) { - return {}; - } - return i->second; - } -} - -bool ParsedDerivation::getBoolAttr(const std::string& name, bool def) const { - if (structuredAttrs) { - auto i = structuredAttrs->find(name); - if (i == structuredAttrs->end()) { - return def; - } - if (!i->is_boolean()) { - throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, - drvPath); - } - return i->get(); - - } else { - auto i = drv.env.find(name); - if (i == drv.env.end()) { - return def; - } - return i->second == "1"; - } -} - -std::optional ParsedDerivation::getStringsAttr( - const std::string& name) const { - if (structuredAttrs) { - auto i = structuredAttrs->find(name); - if (i == structuredAttrs->end()) { - return {}; - } - if (!i->is_array()) { - throw Error("attribute '%s' of derivation '%s' must be a list of strings", - name, drvPath); - } - Strings res; - for (const auto& j : *i) { - if (!j.is_string()) { - throw Error( - "attribute '%s' of derivation '%s' must be a list of strings", name, - drvPath); - } - res.push_back(j.get()); - } - return res; - - } else { - auto i = drv.env.find(name); - if (i == drv.env.end()) { - return {}; - } - return absl::StrSplit(i->second, absl::ByAnyChar(" \t\n\r"), - absl::SkipEmpty()); - } -} - -StringSet ParsedDerivation::getRequiredSystemFeatures() const { - StringSet res; - for (auto& i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) { - res.insert(i); - } - return res; -} - -bool ParsedDerivation::canBuildLocally() const { - if (drv.platform != settings.thisSystem.get() && - (settings.extraPlatforms.get().count(drv.platform) == 0u) && - !drv.isBuiltin()) { - return false; - } - - for (auto& feature : getRequiredSystemFeatures()) { - if (settings.systemFeatures.get().count(feature) == 0u) { - return false; - } - } - - return true; -} - -bool ParsedDerivation::willBuildLocally() const { - return getBoolAttr("preferLocalBuild") && canBuildLocally(); -} - -bool ParsedDerivation::substitutesAllowed() const { - return getBoolAttr("allowSubstitutes", true); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/parsed-derivations.hh b/third_party/nix/src/libstore/parsed-derivations.hh deleted file mode 100644 index 7cd3d36f67..0000000000 --- a/third_party/nix/src/libstore/parsed-derivations.hh +++ /dev/null @@ -1,34 +0,0 @@ -#include - -#include "libstore/derivations.hh" - -namespace nix { - -class ParsedDerivation { - Path drvPath; - BasicDerivation& drv; - std::optional structuredAttrs; - - public: - ParsedDerivation(const Path& drvPath, BasicDerivation& drv); - - const std::optional& getStructuredAttrs() const { - return structuredAttrs; - } - - std::optional getStringAttr(const std::string& name) const; - - bool getBoolAttr(const std::string& name, bool def = false) const; - - std::optional getStringsAttr(const std::string& name) const; - - StringSet getRequiredSystemFeatures() const; - - bool canBuildLocally() const; - - bool willBuildLocally() const; - - bool substitutesAllowed() const; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/pathlocks.cc b/third_party/nix/src/libstore/pathlocks.cc deleted file mode 100644 index 09dec08c45..0000000000 --- a/third_party/nix/src/libstore/pathlocks.cc +++ /dev/null @@ -1,172 +0,0 @@ -#include "libstore/pathlocks.hh" - -#include -#include - -#include -#include -#include -#include -#include - -#include "libutil/sync.hh" -#include "libutil/util.hh" - -namespace nix { - -AutoCloseFD openLockFile(const Path& path, bool create) { - AutoCloseFD fd( - open(path.c_str(), O_CLOEXEC | O_RDWR | (create ? O_CREAT : 0), 0600)); - - if (!fd && (create || errno != ENOENT)) { - throw SysError(format("opening lock file '%1%'") % path); - } - - return fd; -} - -void deleteLockFile(const Path& path, int fd) { - /* Get rid of the lock file. Have to be careful not to introduce - races. Write a (meaningless) token to the file to indicate to - other processes waiting on this lock that the lock is stale - (deleted). */ - unlink(path.c_str()); - writeFull(fd, "d"); - /* Note that the result of unlink() is ignored; removing the lock - file is an optimisation, not a necessity. */ -} - -bool lockFile(int fd, LockType lockType, bool wait) { - int type; - if (lockType == ltRead) { - type = LOCK_SH; - } else if (lockType == ltWrite) { - type = LOCK_EX; - } else if (lockType == ltNone) { - type = LOCK_UN; - } else { - abort(); - } - - if (wait) { - while (flock(fd, type) != 0) { - checkInterrupt(); - if (errno != EINTR) { - throw SysError(format("acquiring/releasing lock")); - } - return false; - } - } else { - while (flock(fd, type | LOCK_NB) != 0) { - checkInterrupt(); - if (errno == EWOULDBLOCK) { - return false; - } - if (errno != EINTR) { - throw SysError(format("acquiring/releasing lock")); - } - } - } - - return true; -} - -PathLocks::PathLocks() : deletePaths(false) {} - -PathLocks::PathLocks(const PathSet& paths, const std::string& waitMsg) - : deletePaths(false) { - lockPaths(paths, waitMsg); -} - -bool PathLocks::lockPaths(const PathSet& paths, const std::string& waitMsg, - bool wait) { - assert(fds.empty()); - - /* Note that `fds' is built incrementally so that the destructor - will only release those locks that we have already acquired. */ - - /* Acquire the lock for each path in sorted order. This ensures - that locks are always acquired in the same order, thus - preventing deadlocks. */ - for (auto& path : paths) { - checkInterrupt(); - Path lockPath = path + ".lock"; - - VLOG(2) << "locking path '" << path << "'"; - - AutoCloseFD fd; - - while (true) { - /* Open/create the lock file. */ - fd = openLockFile(lockPath, true); - - /* Acquire an exclusive lock. */ - if (!lockFile(fd.get(), ltWrite, false)) { - if (wait) { - if (!waitMsg.empty()) { - LOG(WARNING) << waitMsg; - } - lockFile(fd.get(), ltWrite, true); - } else { - /* Failed to lock this path; release all other - locks. */ - unlock(); - return false; - } - } - - VLOG(2) << "lock acquired on '" << lockPath << "'"; - - /* Check that the lock file hasn't become stale (i.e., - hasn't been unlinked). */ - struct stat st; - if (fstat(fd.get(), &st) == -1) { - throw SysError(format("statting lock file '%1%'") % lockPath); - } - if (st.st_size != 0) { - /* This lock file has been unlinked, so we're holding - a lock on a deleted file. This means that other - processes may create and acquire a lock on - `lockPath', and proceed. So we must retry. */ - DLOG(INFO) << "open lock file '" << lockPath << "' has become stale"; - } else { - break; - } - } - - /* Use borrow so that the descriptor isn't closed. */ - fds.emplace_back(fd.release(), lockPath); - } - - return true; -} - -PathLocks::~PathLocks() { - try { - unlock(); - } catch (...) { - ignoreException(); - } -} - -void PathLocks::unlock() { - for (auto& i : fds) { - if (deletePaths) { - deleteLockFile(i.second, i.first); - } - - if (close(i.first) == -1) { - LOG(WARNING) << "cannot close lock file on '" << i.second << "'"; - } - - VLOG(2) << "lock released on '" << i.second << "'"; - } - - fds.clear(); -} - -void PathLocks::setDeletion(bool deletePaths) { - this->deletePaths = deletePaths; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/pathlocks.hh b/third_party/nix/src/libstore/pathlocks.hh deleted file mode 100644 index d515963e76..0000000000 --- a/third_party/nix/src/libstore/pathlocks.hh +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once - -#include "libutil/util.hh" - -namespace nix { - -/* Open (possibly create) a lock file and return the file descriptor. - -1 is returned if create is false and the lock could not be opened - because it doesn't exist. Any other error throws an exception. */ -AutoCloseFD openLockFile(const Path& path, bool create); - -/* Delete an open lock file. */ -void deleteLockFile(const Path& path, int fd); - -enum LockType { ltRead, ltWrite, ltNone }; - -bool lockFile(int fd, LockType lockType, bool wait); - -class PathLocks { - private: - typedef std::pair FDPair; - std::list fds; - bool deletePaths; - - public: - PathLocks(); - PathLocks(const PathSet& paths, const std::string& waitMsg = ""); - bool lockPaths(const PathSet& _paths, const std::string& waitMsg = "", - bool wait = true); - ~PathLocks(); - void unlock(); - void setDeletion(bool deletePaths); -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/profiles.cc b/third_party/nix/src/libstore/profiles.cc deleted file mode 100644 index 0d44c60cc4..0000000000 --- a/third_party/nix/src/libstore/profiles.cc +++ /dev/null @@ -1,252 +0,0 @@ -#include "libstore/profiles.hh" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "libstore/store-api.hh" -#include "libutil/util.hh" - -namespace nix { - -static bool cmpGensByNumber(const Generation& a, const Generation& b) { - return a.number < b.number; -} - -// Parse a generation out of the format -// `--link'. -static int parseName(absl::string_view profileName, absl::string_view name) { - // Consume the `-' prefix and and `-link' suffix. - if (!(absl::ConsumePrefix(&name, profileName) && - absl::ConsumePrefix(&name, "-") && - absl::ConsumeSuffix(&name, "-link"))) { - return -1; - } - - int n; - if (!absl::SimpleAtoi(name, &n) || n < 0) { - return -1; - } - - return n; -} - -Generations findGenerations(const Path& profile, int& curGen) { - Generations gens; - - Path profileDir = dirOf(profile); - std::string profileName = baseNameOf(profile); - - for (auto& i : readDirectory(profileDir)) { - int n; - if ((n = parseName(profileName, i.name)) != -1) { - Generation gen; - gen.path = profileDir + "/" + i.name; - gen.number = n; - struct stat st; - if (lstat(gen.path.c_str(), &st) != 0) { - throw SysError(format("statting '%1%'") % gen.path); - } - gen.creationTime = st.st_mtime; - gens.push_back(gen); - } - } - - gens.sort(cmpGensByNumber); - - curGen = pathExists(profile) ? parseName(profileName, readLink(profile)) : -1; - - return gens; -} - -static void makeName(const Path& profile, unsigned int num, Path& outLink) { - Path prefix = (format("%1%-%2%") % profile % num).str(); - outLink = prefix + "-link"; -} - -Path createGeneration(const ref& store, const Path& profile, - const Path& outPath) { - /* The new generation number should be higher than old the - previous ones. */ - int dummy; - Generations gens = findGenerations(profile, dummy); - - unsigned int num; - if (!gens.empty()) { - Generation last = gens.back(); - - if (readLink(last.path) == outPath) { - /* We only create a new generation symlink if it differs - from the last one. - - This helps keeping gratuitous installs/rebuilds from piling - up uncontrolled numbers of generations, cluttering up the - UI like grub. */ - return last.path; - } - - num = gens.back().number; - } else { - num = 0; - } - - /* Create the new generation. Note that addPermRoot() blocks if - the garbage collector is running to prevent the stuff we've - built from moving from the temporary roots (which the GC knows) - to the permanent roots (of which the GC would have a stale - view). If we didn't do it this way, the GC might remove the - user environment etc. we've just built. */ - Path generation; - makeName(profile, num + 1, generation); - store->addPermRoot(outPath, generation, false, true); - - return generation; -} - -static void removeFile(const Path& path) { - if (remove(path.c_str()) == -1) { - throw SysError(format("cannot unlink '%1%'") % path); - } -} - -void deleteGeneration(const Path& profile, unsigned int gen) { - Path generation; - makeName(profile, gen, generation); - removeFile(generation); -} - -static void deleteGeneration2(const Path& profile, unsigned int gen, - bool dryRun) { - if (dryRun) { - LOG(INFO) << "would remove generation " << gen; - } else { - LOG(INFO) << "removing generation " << gen; - deleteGeneration(profile, gen); - } -} - -void deleteGenerations(const Path& profile, - const std::set& gensToDelete, - bool dryRun) { - PathLocks lock; - lockProfile(lock, profile); - - int curGen; - Generations gens = findGenerations(profile, curGen); - - if (gensToDelete.find(curGen) != gensToDelete.end()) { - throw Error(format("cannot delete current generation of profile %1%'") % - profile); - } - - for (auto& i : gens) { - if (gensToDelete.find(i.number) == gensToDelete.end()) { - continue; - } - deleteGeneration2(profile, i.number, dryRun); - } -} - -void deleteGenerationsGreaterThan(const Path& profile, int max, bool dryRun) { - PathLocks lock; - lockProfile(lock, profile); - - int curGen; - bool fromCurGen = false; - Generations gens = findGenerations(profile, curGen); - for (auto i = gens.rbegin(); i != gens.rend(); ++i) { - if (i->number == curGen) { - fromCurGen = true; - max--; - continue; - } - if (fromCurGen) { - if (max != 0) { - max--; - continue; - } - deleteGeneration2(profile, i->number, dryRun); - } - } -} - -void deleteOldGenerations(const Path& profile, bool dryRun) { - PathLocks lock; - lockProfile(lock, profile); - - int curGen; - Generations gens = findGenerations(profile, curGen); - - for (auto& i : gens) { - if (i.number != curGen) { - deleteGeneration2(profile, i.number, dryRun); - } - } -} - -void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun) { - PathLocks lock; - lockProfile(lock, profile); - - int curGen; - Generations gens = findGenerations(profile, curGen); - - bool canDelete = false; - for (auto i = gens.rbegin(); i != gens.rend(); ++i) { - if (canDelete) { - assert(i->creationTime < t); - if (i->number != curGen) { - deleteGeneration2(profile, i->number, dryRun); - } - } else if (i->creationTime < t) { - /* We may now start deleting generations, but we don't - delete this generation yet, because this generation was - still the one that was active at the requested point in - time. */ - canDelete = true; - } - } -} - -void deleteGenerationsOlderThan(const Path& profile, - const std::string& timeSpec, bool dryRun) { - time_t curTime = time(nullptr); - std::string strDays = std::string(timeSpec, 0, timeSpec.size() - 1); - int days; - - if (!absl::SimpleAtoi(strDays, &days) || days < 1) { - throw Error(format("invalid number of days specifier '%1%'") % timeSpec); - } - - time_t oldTime = curTime - days * 24 * 3600; - - deleteGenerationsOlderThan(profile, oldTime, dryRun); -} - -void switchLink(const Path& link, Path target) { - /* Hacky. */ - if (dirOf(target) == dirOf(link)) { - target = baseNameOf(target); - } - - replaceSymlink(target, link); -} - -void lockProfile(PathLocks& lock, const Path& profile) { - lock.lockPaths({profile}, - (format("waiting for lock on profile '%1%'") % profile).str()); - lock.setDeletion(true); -} - -std::string optimisticLockProfile(const Path& profile) { - return pathExists(profile) ? readLink(profile) : ""; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/profiles.hh b/third_party/nix/src/libstore/profiles.hh deleted file mode 100644 index ff63990409..0000000000 --- a/third_party/nix/src/libstore/profiles.hh +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -#include - -#include "libstore/pathlocks.hh" -#include "libutil/types.hh" - -namespace nix { - -struct Generation { - int number; - Path path; - time_t creationTime; - Generation() { number = -1; } - operator bool() const { return number != -1; } -}; - -typedef std::list Generations; - -/* Returns the list of currently present generations for the specified - profile, sorted by generation number. */ -Generations findGenerations(const Path& profile, int& curGen); - -class LocalFSStore; - -Path createGeneration(const ref& store, const Path& profile, - const Path& outPath); - -void deleteGeneration(const Path& profile, unsigned int gen); - -void deleteGenerations(const Path& profile, - const std::set& gensToDelete, bool dryRun); - -void deleteGenerationsGreaterThan(const Path& profile, const int max, - bool dryRun); - -void deleteOldGenerations(const Path& profile, bool dryRun); - -void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun); - -void deleteGenerationsOlderThan(const Path& profile, - const std::string& timeSpec, bool dryRun); - -void switchLink(const Path& link, Path target); - -/* Ensure exclusive access to a profile. Any command that modifies - the profile first acquires this lock. */ -void lockProfile(PathLocks& lock, const Path& profile); - -/* Optimistic locking is used by long-running operations like `nix-env - -i'. Instead of acquiring the exclusive lock for the entire - duration of the operation, we just perform the operation - optimistically (without an exclusive lock), and check at the end - whether the profile changed while we were busy (i.e., the symlink - target changed). If so, the operation is restarted. Restarting is - generally cheap, since the build results are still in the Nix - store. Most of the time, only the user environment has to be - rebuilt. */ -std::string optimisticLockProfile(const Path& profile); - -} // namespace nix diff --git a/third_party/nix/src/libstore/references.cc b/third_party/nix/src/libstore/references.cc deleted file mode 100644 index f120439c10..0000000000 --- a/third_party/nix/src/libstore/references.cc +++ /dev/null @@ -1,126 +0,0 @@ -#include "libstore/references.hh" - -#include -#include - -#include - -#include "libutil/archive.hh" -#include "libutil/hash.hh" -#include "libutil/util.hh" - -namespace nix { - -constexpr unsigned int kRefLength = 32; /* characters */ - -static void search(const unsigned char* s, size_t len, StringSet& hashes, - StringSet& seen) { - static bool initialised = false; - static bool isBase32[256]; - if (!initialised) { - for (bool& i : isBase32) { - i = false; - } - for (char base32Char : base32Chars) { - isBase32[static_cast(base32Char)] = true; - } - initialised = true; - } - - for (size_t i = 0; i + kRefLength <= len;) { - int j = 0; - bool match = true; - for (j = kRefLength - 1; j >= 0; --j) { - if (!isBase32[s[i + j]]) { - i += j + 1; - match = false; - break; - } - } - if (!match) { - continue; - } - std::string ref(reinterpret_cast(s) + i, kRefLength); - if (hashes.find(ref) != hashes.end()) { - DLOG(INFO) << "found reference to '" << ref << "' at offset " << i; - seen.insert(ref); - hashes.erase(ref); - } - ++i; - } -} - -struct RefScanSink : Sink { - HashSink hashSink; - StringSet hashes; - StringSet seen; - - std::string tail; - - RefScanSink() : hashSink(htSHA256) {} - - void operator()(const unsigned char* data, size_t len) override; -}; - -void RefScanSink::operator()(const unsigned char* data, size_t len) { - hashSink(data, len); - - /* It's possible that a reference spans the previous and current - fragment, so search in the concatenation of the tail of the - previous fragment and the start of the current fragment. */ - std::string s = tail + std::string(reinterpret_cast(data), - len > kRefLength ? kRefLength : len); - search(reinterpret_cast(s.data()), s.size(), hashes, - seen); - - search(data, len, hashes, seen); - - size_t tailLen = len <= kRefLength ? len : kRefLength; - tail = - std::string(tail, tail.size() < kRefLength - tailLen - ? 0 - : tail.size() - (kRefLength - tailLen)) + - std::string(reinterpret_cast(data) + len - tailLen, tailLen); -} - -PathSet scanForReferences(const std::string& path, const PathSet& refs, - HashResult& hash) { - RefScanSink sink; - std::map backMap; - - /* For efficiency (and a higher hit rate), just search for the - hash part of the file name. (This assumes that all references - have the form `HASH-bla'). */ - for (auto& i : refs) { - std::string baseName = baseNameOf(i); - std::string::size_type pos = baseName.find('-'); - if (pos == std::string::npos) { - throw Error(format("bad reference '%1%'") % i); - } - std::string s = std::string(baseName, 0, pos); - assert(s.size() == kRefLength); - assert(backMap.find(s) == backMap.end()); - // parseHash(htSHA256, s); - sink.hashes.insert(s); - backMap[s] = i; - } - - /* Look for the hashes in the NAR dump of the path. */ - dumpPath(path, sink); - - /* Map the hashes found back to their store paths. */ - PathSet found; - for (auto& i : sink.seen) { - std::map::iterator j; - if ((j = backMap.find(i)) == backMap.end()) { - abort(); - } - found.insert(j->second); - } - - hash = sink.hashSink.finish(); - - return found; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/references.hh b/third_party/nix/src/libstore/references.hh deleted file mode 100644 index 94ac5200bd..0000000000 --- a/third_party/nix/src/libstore/references.hh +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -#include "libutil/hash.hh" -#include "libutil/types.hh" - -namespace nix { - -PathSet scanForReferences(const Path& path, const PathSet& refs, - HashResult& hash); - -} diff --git a/third_party/nix/src/libstore/remote-fs-accessor.cc b/third_party/nix/src/libstore/remote-fs-accessor.cc deleted file mode 100644 index 4178030b55..0000000000 --- a/third_party/nix/src/libstore/remote-fs-accessor.cc +++ /dev/null @@ -1,133 +0,0 @@ -#include "libstore/remote-fs-accessor.hh" - -#include -#include -#include - -#include "libstore/nar-accessor.hh" -#include "libutil/json.hh" - -namespace nix { - -RemoteFSAccessor::RemoteFSAccessor(const ref& store, - const Path& cacheDir) - : store(store), cacheDir(cacheDir) { - if (!cacheDir.empty()) { - createDirs(cacheDir); - } -} - -Path RemoteFSAccessor::makeCacheFile(const Path& storePath, - const std::string& ext) { - assert(!cacheDir.empty()); - return fmt("%s/%s.%s", cacheDir, storePathToHash(storePath), ext); -} - -void RemoteFSAccessor::addToCache(const Path& storePath, const std::string& nar, - const ref& narAccessor) { - nars.emplace(storePath, narAccessor); - - if (!cacheDir.empty()) { - try { - std::ostringstream str; - JSONPlaceholder jsonRoot(str); - listNar(jsonRoot, narAccessor, "", true); - writeFile(makeCacheFile(storePath, "ls"), str.str()); - - /* FIXME: do this asynchronously. */ - writeFile(makeCacheFile(storePath, "nar"), nar); - - } catch (...) { - ignoreException(); - } - } -} - -std::pair, Path> RemoteFSAccessor::fetch(const Path& path_) { - auto path = canonPath(path_); - - auto storePath = store->toStorePath(path); - std::string restPath = std::string(path, storePath.size()); - - if (!store->isValidPath(storePath)) { - throw InvalidPath(format("path '%1%' is not a valid store path") % - storePath); - } - - auto i = nars.find(storePath); - if (i != nars.end()) { - return {i->second, restPath}; - } - - StringSink sink; - std::string listing; - Path cacheFile; - - if (!cacheDir.empty() && - pathExists(cacheFile = makeCacheFile(storePath, "nar"))) { - try { - listing = nix::readFile(makeCacheFile(storePath, "ls")); - - auto narAccessor = makeLazyNarAccessor( - listing, [cacheFile](uint64_t offset, uint64_t length) { - AutoCloseFD fd(open(cacheFile.c_str(), O_RDONLY | O_CLOEXEC)); - if (!fd) { - throw SysError("opening NAR cache file '%s'", cacheFile); - } - - if (lseek(fd.get(), offset, SEEK_SET) != - static_cast(offset)) { - throw SysError("seeking in '%s'", cacheFile); - } - - std::string buf(length, 0); - readFull(fd.get(), reinterpret_cast(buf.data()), - length); - - return buf; - }); - - nars.emplace(storePath, narAccessor); - return {narAccessor, restPath}; - - } catch (SysError&) { - } - - try { - *sink.s = nix::readFile(cacheFile); - - auto narAccessor = makeNarAccessor(sink.s); - nars.emplace(storePath, narAccessor); - return {narAccessor, restPath}; - - } catch (SysError&) { - } - } - - store->narFromPath(storePath, sink); - auto narAccessor = makeNarAccessor(sink.s); - addToCache(storePath, *sink.s, narAccessor); - return {narAccessor, restPath}; -} - -FSAccessor::Stat RemoteFSAccessor::stat(const Path& path) { - auto res = fetch(path); - return res.first->stat(res.second); -} - -StringSet RemoteFSAccessor::readDirectory(const Path& path) { - auto res = fetch(path); - return res.first->readDirectory(res.second); -} - -std::string RemoteFSAccessor::readFile(const Path& path) { - auto res = fetch(path); - return res.first->readFile(res.second); -} - -std::string RemoteFSAccessor::readLink(const Path& path) { - auto res = fetch(path); - return res.first->readLink(res.second); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/remote-fs-accessor.hh b/third_party/nix/src/libstore/remote-fs-accessor.hh deleted file mode 100644 index c4f6e89c97..0000000000 --- a/third_party/nix/src/libstore/remote-fs-accessor.hh +++ /dev/null @@ -1,38 +0,0 @@ -#pragma once - -#include "libstore/fs-accessor.hh" -#include "libstore/store-api.hh" -#include "libutil/ref.hh" - -namespace nix { - -class RemoteFSAccessor : public FSAccessor { - ref store; - - std::map> nars; - - Path cacheDir; - - std::pair, Path> fetch(const Path& path_); - - friend class BinaryCacheStore; - - Path makeCacheFile(const Path& storePath, const std::string& ext); - - void addToCache(const Path& storePath, const std::string& nar, - const ref& narAccessor); - - public: - RemoteFSAccessor(const ref& store, - const /* FIXME: use std::optional */ Path& cacheDir = ""); - - Stat stat(const Path& path) override; - - StringSet readDirectory(const Path& path) override; - - std::string readFile(const Path& path) override; - - std::string readLink(const Path& path) override; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/remote-store.cc b/third_party/nix/src/libstore/remote-store.cc deleted file mode 100644 index cb6cc808c6..0000000000 --- a/third_party/nix/src/libstore/remote-store.cc +++ /dev/null @@ -1,686 +0,0 @@ -#include "libstore/remote-store.hh" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/affinity.hh" -#include "libutil/archive.hh" -#include "libutil/finally.hh" -#include "libutil/pool.hh" -#include "libutil/serialise.hh" -#include "libutil/util.hh" - -namespace nix { - -Path readStorePath(Store& store, Source& from) { - Path path = readString(from); - store.assertStorePath(path); - return path; -} - -template -T readStorePaths(Store& store, Source& from) { - T paths = readStrings(from); - for (auto& i : paths) { - store.assertStorePath(i); - } - return paths; -} - -template PathSet readStorePaths(Store& store, Source& from); -template Paths readStorePaths(Store& store, Source& from); - -/* TODO: Separate these store impls into different files, give them better names - */ -RemoteStore::RemoteStore(const Params& params) - : Store(params), - connections(make_ref>( - std::max(1, (int)maxConnections), - [this]() { return openConnectionWrapper(); }, - [this](const ref& r) { - return r->to.good() && r->from.good() && - std::chrono::duration_cast( - std::chrono::steady_clock::now() - r->startTime) - .count() < maxConnectionAge; - })) {} - -ref RemoteStore::openConnectionWrapper() { - if (failed) { - throw Error("opening a connection to remote store '%s' previously failed", - getUri()); - } - try { - return openConnection(); - } catch (...) { - failed = true; - throw; - } -} - -void RemoteStore::initConnection(Connection& conn) { - /* Send the magic greeting, check for the reply. */ - try { - conn.to << WORKER_MAGIC_1; - conn.to.flush(); - unsigned int magic = readInt(conn.from); - if (magic != WORKER_MAGIC_2) { - throw Error("protocol mismatch"); - } - - conn.from >> conn.daemonVersion; - if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != - GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) { - throw Error("Nix daemon protocol version not supported"); - } - if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10) { - throw Error("the Nix daemon version is too old"); - } - conn.to << PROTOCOL_VERSION; - - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) { - int cpu = sameMachine() && settings.lockCPU ? lockToCurrentCPU() : -1; - if (cpu != -1) { - conn.to << 1 << cpu; - } else { - conn.to << 0; - } - } - - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11) { - conn.to << 0u; - } - - auto ex = conn.processStderr(); - if (ex) { - std::rethrow_exception(ex); - } - } catch (Error& e) { - throw Error("cannot open connection to remote store '%s': %s", getUri(), - e.what()); - } - - setOptions(conn); -} - -void RemoteStore::setOptions(Connection& conn) { - conn.to << wopSetOptions << static_cast(settings.keepFailed) - << static_cast(settings.keepGoing) - << static_cast(settings.tryFallback) - << /* previously: verbosity = */ 0 << settings.maxBuildJobs - << settings.maxSilentTime << 1u - << /* previously: remote verbosity = */ 0 << 0 // obsolete log type - << 0 /* obsolete print build trace */ - << settings.buildCores - << static_cast(settings.useSubstitutes); - - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) { - std::map overrides; - globalConfig.getSettings(overrides, true); - overrides.erase(settings.keepFailed.name); - overrides.erase(settings.keepGoing.name); - overrides.erase(settings.tryFallback.name); - overrides.erase(settings.maxBuildJobs.name); - overrides.erase(settings.maxSilentTime.name); - overrides.erase(settings.buildCores.name); - overrides.erase(settings.useSubstitutes.name); - overrides.erase(settings.showTrace.name); - conn.to << overrides.size(); - for (auto& i : overrides) { - conn.to << i.first << i.second.value; - } - } - - auto ex = conn.processStderr(); - if (ex) { - std::rethrow_exception(ex); - } -} - -/* A wrapper around Pool::Handle that marks - the connection as bad (causing it to be closed) if a non-daemon - exception is thrown before the handle is closed. Such an exception - causes a deviation from the expected protocol and therefore a - desynchronization between the client and daemon. */ -struct ConnectionHandle { - Pool::Handle handle; - bool daemonException = false; - - explicit ConnectionHandle(Pool::Handle&& handle) - : handle(std::move(handle)) {} - - ConnectionHandle(ConnectionHandle&& h) : handle(std::move(h.handle)) {} - - ~ConnectionHandle() { - if (!daemonException && (std::uncaught_exceptions() != 0)) { - handle.markBad(); - // TODO(tazjin): are these types of things supposed to be DEBUG? - DLOG(INFO) << "closing daemon connection because of an exception"; - } - } - - RemoteStore::Connection* operator->() { return &*handle; } - - void processStderr(Sink* sink = nullptr, Source* source = nullptr) { - auto ex = handle->processStderr(sink, source); - if (ex) { - daemonException = true; - std::rethrow_exception(ex); - } - } -}; - -ConnectionHandle RemoteStore::getConnection() { - return ConnectionHandle(connections->get()); -} - -bool RemoteStore::isValidPathUncached(const Path& path) { - auto conn(getConnection()); - conn->to << wopIsValidPath << path; - conn.processStderr(); - return readInt(conn->from) != 0u; -} - -PathSet RemoteStore::queryValidPaths(const PathSet& paths, - SubstituteFlag maybeSubstitute) { - auto conn(getConnection()); - if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { - PathSet res; - for (auto& i : paths) { - if (isValidPath(i)) { - res.insert(i); - } - } - return res; - } - conn->to << wopQueryValidPaths << paths; - conn.processStderr(); - return readStorePaths(*this, conn->from); -} - -PathSet RemoteStore::queryAllValidPaths() { - auto conn(getConnection()); - conn->to << wopQueryAllValidPaths; - conn.processStderr(); - return readStorePaths(*this, conn->from); -} - -PathSet RemoteStore::querySubstitutablePaths(const PathSet& paths) { - auto conn(getConnection()); - if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { - PathSet res; - for (auto& i : paths) { - conn->to << wopHasSubstitutes << i; - conn.processStderr(); - if (readInt(conn->from) != 0u) { - res.insert(i); - } - } - return res; - } - conn->to << wopQuerySubstitutablePaths << paths; - conn.processStderr(); - return readStorePaths(*this, conn->from); -} - -void RemoteStore::querySubstitutablePathInfos(const PathSet& paths, - SubstitutablePathInfos& infos) { - if (paths.empty()) { - return; - } - - auto conn(getConnection()); - - if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { - for (auto& i : paths) { - SubstitutablePathInfo info; - conn->to << wopQuerySubstitutablePathInfo << i; - conn.processStderr(); - unsigned int reply = readInt(conn->from); - if (reply == 0) { - continue; - } - info.deriver = readString(conn->from); - if (!info.deriver.empty()) { - assertStorePath(info.deriver); - } - info.references = readStorePaths(*this, conn->from); - info.downloadSize = readLongLong(conn->from); - info.narSize = readLongLong(conn->from); - infos[i] = info; - } - - } else { - conn->to << wopQuerySubstitutablePathInfos << paths; - conn.processStderr(); - auto count = readNum(conn->from); - for (size_t n = 0; n < count; n++) { - Path path = readStorePath(*this, conn->from); - SubstitutablePathInfo& info(infos[path]); - info.deriver = readString(conn->from); - if (!info.deriver.empty()) { - assertStorePath(info.deriver); - } - info.references = readStorePaths(*this, conn->from); - info.downloadSize = readLongLong(conn->from); - info.narSize = readLongLong(conn->from); - } - } -} - -void RemoteStore::queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept { - try { - std::shared_ptr info; - { - auto conn(getConnection()); - conn->to << wopQueryPathInfo << path; - try { - conn.processStderr(); - } catch (Error& e) { - // Ugly backwards compatibility hack. - if (e.msg().find("is not valid") != std::string::npos) { - throw InvalidPath(e.what()); - } - throw; - } - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { - bool valid; - conn->from >> valid; - if (!valid) { - throw InvalidPath(format("path '%s' is not valid") % path); - } - } - info = std::make_shared(); - info->path = path; - info->deriver = readString(conn->from); - if (!info->deriver.empty()) { - assertStorePath(info->deriver); - } - auto hash_ = Hash::deserialize(readString(conn->from), htSHA256); - info->narHash = Hash::unwrap_throw(hash_); - info->references = readStorePaths(*this, conn->from); - conn->from >> info->registrationTime >> info->narSize; - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { - conn->from >> info->ultimate; - info->sigs = readStrings(conn->from); - conn->from >> info->ca; - } - } - callback(std::move(info)); - } catch (...) { - callback.rethrow(); - } -} - -void RemoteStore::queryReferrers(const Path& path, PathSet& referrers) { - auto conn(getConnection()); - conn->to << wopQueryReferrers << path; - conn.processStderr(); - auto referrers2 = readStorePaths(*this, conn->from); - referrers.insert(referrers2.begin(), referrers2.end()); -} - -PathSet RemoteStore::queryValidDerivers(const Path& path) { - auto conn(getConnection()); - conn->to << wopQueryValidDerivers << path; - conn.processStderr(); - return readStorePaths(*this, conn->from); -} - -PathSet RemoteStore::queryDerivationOutputs(const Path& path) { - auto conn(getConnection()); - conn->to << wopQueryDerivationOutputs << path; - conn.processStderr(); - return readStorePaths(*this, conn->from); -} - -PathSet RemoteStore::queryDerivationOutputNames(const Path& path) { - auto conn(getConnection()); - conn->to << wopQueryDerivationOutputNames << path; - conn.processStderr(); - return readStrings(conn->from); -} - -Path RemoteStore::queryPathFromHashPart(const std::string& hashPart) { - auto conn(getConnection()); - conn->to << wopQueryPathFromHashPart << hashPart; - conn.processStderr(); - Path path = readString(conn->from); - if (!path.empty()) { - assertStorePath(path); - } - return path; -} - -void RemoteStore::addToStore(const ValidPathInfo& info, Source& source, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) { - auto conn(getConnection()); - - if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) { - conn->to << wopImportPaths; - - auto source2 = sinkToSource([&](Sink& sink) { - sink << 1 // == path follows - ; - copyNAR(source, sink); - sink << exportMagic << info.path << info.references << info.deriver - << 0 // == no legacy signature - << 0 // == no path follows - ; - }); - - conn.processStderr(nullptr, source2.get()); - - auto importedPaths = readStorePaths(*this, conn->from); - assert(importedPaths.size() <= 1); - } - - else { - conn->to << wopAddToStoreNar << info.path << info.deriver - << info.narHash.to_string(Base16, false) << info.references - << info.registrationTime << info.narSize << info.ultimate - << info.sigs << info.ca << repair << !checkSigs; - bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21; - if (!tunnel) { - copyNAR(source, conn->to); - } - conn.processStderr(nullptr, tunnel ? &source : nullptr); - } -} - -Path RemoteStore::addToStore(const std::string& name, const Path& _srcPath, - bool recursive, HashType hashAlgo, - PathFilter& filter, RepairFlag repair) { - if (repair != 0u) { - throw Error( - "repairing is not supported when building through the Nix daemon"); - } - - auto conn(getConnection()); - - Path srcPath(absPath(_srcPath)); - - conn->to << wopAddToStore << name - << ((hashAlgo == htSHA256 && recursive) - ? 0 - : 1) /* backwards compatibility hack */ - << (recursive ? 1 : 0) << printHashType(hashAlgo); - - try { - conn->to.written = 0; - conn->to.warn = true; - connections->incCapacity(); - { - Finally cleanup([&]() { connections->decCapacity(); }); - dumpPath(srcPath, conn->to, filter); - } - conn->to.warn = false; - conn.processStderr(); - } catch (SysError& e) { - /* Daemon closed while we were sending the path. Probably OOM - or I/O error. */ - if (e.errNo == EPIPE) { - try { - conn.processStderr(); - } catch (EndOfFile& e) { - } - } - throw; - } - - return readStorePath(*this, conn->from); -} - -Path RemoteStore::addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, RepairFlag repair) { - if (repair != 0u) { - throw Error( - "repairing is not supported when building through the Nix daemon"); - } - - auto conn(getConnection()); - conn->to << wopAddTextToStore << name << s << references; - - conn.processStderr(); - return readStorePath(*this, conn->from); -} - -absl::Status RemoteStore::buildPaths(std::ostream& /* log_sink */, - const PathSet& drvPaths, - BuildMode build_mode) { - auto conn(getConnection()); - conn->to << wopBuildPaths; - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) { - conn->to << drvPaths; - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15) { - conn->to << build_mode; - } else if (build_mode != bmNormal) { - /* Old daemons did not take a 'buildMode' parameter, so we - need to validate it here on the client side. */ - return absl::Status( - absl::StatusCode::kInvalidArgument, - "repairing or checking is not supported when building through the " - "Nix daemon"); - } - } else { - /* For backwards compatibility with old daemons, strip output - identifiers. */ - PathSet drvPaths2; - for (auto& i : drvPaths) { - drvPaths2.insert(std::string(i, 0, i.find('!'))); - } - conn->to << drvPaths2; - } - conn.processStderr(); - readInt(conn->from); - - return absl::OkStatus(); -} - -BuildResult RemoteStore::buildDerivation(std::ostream& /*log_sink*/, - const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) { - auto conn(getConnection()); - conn->to << wopBuildDerivation << drvPath << drv << buildMode; - conn.processStderr(); - BuildResult res; - unsigned int status; - conn->from >> status >> res.errorMsg; - res.status = static_cast(status); - return res; -} - -void RemoteStore::ensurePath(const Path& path) { - auto conn(getConnection()); - conn->to << wopEnsurePath << path; - conn.processStderr(); - readInt(conn->from); -} - -void RemoteStore::addTempRoot(const Path& path) { - auto conn(getConnection()); - conn->to << wopAddTempRoot << path; - conn.processStderr(); - readInt(conn->from); -} - -void RemoteStore::addIndirectRoot(const Path& path) { - auto conn(getConnection()); - conn->to << wopAddIndirectRoot << path; - conn.processStderr(); - readInt(conn->from); -} - -void RemoteStore::syncWithGC() { - auto conn(getConnection()); - conn->to << wopSyncWithGC; - conn.processStderr(); - readInt(conn->from); -} - -Roots RemoteStore::findRoots(bool censor) { - auto conn(getConnection()); - conn->to << wopFindRoots; - conn.processStderr(); - auto count = readNum(conn->from); - Roots result; - while ((count--) != 0u) { - Path link = readString(conn->from); - Path target = readStorePath(*this, conn->from); - result[target].emplace(link); - } - return result; -} - -void RemoteStore::collectGarbage(const GCOptions& options, GCResults& results) { - auto conn(getConnection()); - - conn->to << wopCollectGarbage << options.action << options.pathsToDelete - << static_cast(options.ignoreLiveness) - << options.maxFreed - /* removed options */ - << 0 << 0 << 0; - - conn.processStderr(); - - results.paths = readStrings(conn->from); - results.bytesFreed = readLongLong(conn->from); - readLongLong(conn->from); // obsolete - - { - auto state_(Store::state.lock()); - state_->pathInfoCache.clear(); - } -} - -void RemoteStore::optimiseStore() { - auto conn(getConnection()); - conn->to << wopOptimiseStore; - conn.processStderr(); - readInt(conn->from); -} - -bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair) { - auto conn(getConnection()); - conn->to << wopVerifyStore << static_cast(checkContents) << repair; - conn.processStderr(); - return readInt(conn->from) != 0u; -} - -void RemoteStore::addSignatures(const Path& storePath, const StringSet& sigs) { - auto conn(getConnection()); - conn->to << wopAddSignatures << storePath << sigs; - conn.processStderr(); - readInt(conn->from); -} - -void RemoteStore::queryMissing(const PathSet& targets, PathSet& willBuild, - PathSet& willSubstitute, PathSet& unknown, - unsigned long long& downloadSize, - unsigned long long& narSize) { - { - auto conn(getConnection()); - if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19) { - // Don't hold the connection handle in the fallback case - // to prevent a deadlock. - goto fallback; - } - conn->to << wopQueryMissing << targets; - conn.processStderr(); - willBuild = readStorePaths(*this, conn->from); - willSubstitute = readStorePaths(*this, conn->from); - unknown = readStorePaths(*this, conn->from); - conn->from >> downloadSize >> narSize; - return; - } - -fallback: - return Store::queryMissing(targets, willBuild, willSubstitute, unknown, - downloadSize, narSize); -} - -void RemoteStore::connect() { auto conn(getConnection()); } - -unsigned int RemoteStore::getProtocol() { - auto conn(connections->get()); - return conn->daemonVersion; -} - -void RemoteStore::flushBadConnections() { connections->flushBad(); } - -RemoteStore::Connection::~Connection() { - try { - to.flush(); - } catch (...) { - ignoreException(); - } -} - -std::exception_ptr RemoteStore::Connection::processStderr(Sink* sink, - Source* source) { - to.flush(); - - while (true) { - auto msg = readNum(from); - - if (msg == STDERR_WRITE) { - std::string s = readString(from); - if (sink == nullptr) { - throw Error("no sink"); - } - (*sink)(s); - } - - else if (msg == STDERR_READ) { - if (source == nullptr) { - throw Error("no source"); - } - auto len = readNum(from); - auto buf = std::make_unique(len); - writeString(buf.get(), source->read(buf.get(), len), to); - to.flush(); - } - - else if (msg == STDERR_ERROR) { - std::string error = readString(from); - unsigned int status = readInt(from); - return std::make_exception_ptr(Error(status, error)); - } - - else if (msg == STDERR_NEXT) { - LOG(ERROR) << absl::StripTrailingAsciiWhitespace(readString(from)); - } - - else if (msg == STDERR_START_ACTIVITY) { - LOG(INFO) << readString(from); - } - - else if (msg == STDERR_LAST) { - break; - } - - else { - throw Error("got unknown message type %x from Nix daemon", msg); - } - } - - return nullptr; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/remote-store.hh b/third_party/nix/src/libstore/remote-store.hh deleted file mode 100644 index c360055b6e..0000000000 --- a/third_party/nix/src/libstore/remote-store.hh +++ /dev/null @@ -1,141 +0,0 @@ -#pragma once - -#include -#include - -#include "libstore/store-api.hh" - -namespace nix { - -class Pipe; -class Pid; -struct FdSink; -struct FdSource; -template -class Pool; -struct ConnectionHandle; - -/* FIXME: RemoteStore is a misnomer - should be something like - DaemonStore. */ -class RemoteStore : public virtual Store { - public: - const Setting maxConnections{ - (Store*)this, 1, "max-connections", - "maximum number of concurrent connections to the Nix daemon"}; - - const Setting maxConnectionAge{ - (Store*)this, std::numeric_limits::max(), - "max-connection-age", "number of seconds to reuse a connection"}; - - virtual bool sameMachine() = 0; - - RemoteStore(const Params& params); - - /* Implementations of abstract store API methods. */ - - bool isValidPathUncached(const Path& path) override; - - PathSet queryValidPaths(const PathSet& paths, SubstituteFlag maybeSubstitute = - NoSubstitute) override; - - PathSet queryAllValidPaths() override; - - void queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept override; - - void queryReferrers(const Path& path, PathSet& referrers) override; - - PathSet queryValidDerivers(const Path& path) override; - - PathSet queryDerivationOutputs(const Path& path) override; - - StringSet queryDerivationOutputNames(const Path& path) override; - - Path queryPathFromHashPart(const std::string& hashPart) override; - - PathSet querySubstitutablePaths(const PathSet& paths) override; - - void querySubstitutablePathInfos(const PathSet& paths, - SubstitutablePathInfos& infos) override; - - void addToStore(const ValidPathInfo& info, Source& source, RepairFlag repair, - CheckSigsFlag checkSigs, - std::shared_ptr accessor) override; - - Path addToStore(const std::string& name, const Path& srcPath, - bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter& filter = defaultPathFilter, - RepairFlag repair = NoRepair) override; - - Path addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, RepairFlag repair) override; - - absl::Status buildPaths(std::ostream& log_sink, const PathSet& paths, - BuildMode build_mode) override; - - BuildResult buildDerivation(std::ostream& log_sink, const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) override; - - void ensurePath(const Path& path) override; - - void addTempRoot(const Path& path) override; - - void addIndirectRoot(const Path& path) override; - - void syncWithGC() override; - - Roots findRoots(bool censor) override; - - void collectGarbage(const GCOptions& options, GCResults& results) override; - - void optimiseStore() override; - - bool verifyStore(bool checkContents, RepairFlag repair) override; - - void addSignatures(const Path& storePath, const StringSet& sigs) override; - - void queryMissing(const PathSet& targets, PathSet& willBuild, - PathSet& willSubstitute, PathSet& unknown, - unsigned long long& downloadSize, - unsigned long long& narSize) override; - - void connect() override; - - unsigned int getProtocol() override; - - void flushBadConnections(); - - protected: - struct Connection { - AutoCloseFD fd; - FdSink to; - FdSource from; - unsigned int daemonVersion; - std::chrono::time_point startTime; - - virtual ~Connection(); - - std::exception_ptr processStderr(Sink* sink = 0, Source* source = 0); - }; - - ref openConnectionWrapper(); - - virtual ref openConnection() = 0; - - void initConnection(Connection& conn); - - ref> connections; - - virtual void setOptions(Connection& conn); - - ConnectionHandle getConnection(); - - friend struct ConnectionHandle; - - private: - std::atomic_bool failed{false}; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/rpc-store.cc b/third_party/nix/src/libstore/rpc-store.cc deleted file mode 100644 index c29bd059de..0000000000 --- a/third_party/nix/src/libstore/rpc-store.cc +++ /dev/null @@ -1,549 +0,0 @@ -#include "rpc-store.hh" - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libproto/worker.grpc.pb.h" -#include "libproto/worker.pb.h" -#include "libstore/derivations.hh" -#include "libstore/store-api.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/archive.hh" -#include "libutil/hash.hh" -#include "libutil/proto.hh" -#include "libutil/types.hh" - -namespace nix { - -namespace store { - -// Should be set to the bandwidth delay product between the client and the -// daemon. The current value, which should eventually be determined dynamically, -// has currently been set to a developer's deskop computer, rounded up -constexpr size_t kChunkSize = 1024 * 64; - -using google::protobuf::util::TimeUtil; -using grpc::ClientContext; -using nix::proto::WorkerService; - -static google::protobuf::Empty kEmpty; - -template -class RPCSink : public BufferedSink { - public: - using Writer = grpc::ClientWriter; - explicit RPCSink(std::unique_ptr&& writer) - : writer_(std::move(writer)), good_(true) {} - - bool good() override { return good_; } - - void write(const unsigned char* data, size_t len) override { - Request req; - req.set_data(data, len); - if (!writer_->Write(req)) { - good_ = false; - } - } - - ~RPCSink() override { flush(); } - - grpc::Status Finish() { - flush(); - return writer_->Finish(); - } - - private: - std::unique_ptr writer_; - bool good_; -}; - -// TODO(grfn): Obviously this should go away and be replaced by StatusOr... but -// that would require refactoring the entire store api, which we don't feel like -// doing right now. We should at some point though -void const RpcStore::SuccessOrThrow(const grpc::Status& status, - const absl::string_view& call) const { - if (!status.ok()) { - auto uri = uri_.value_or("unknown URI"); - switch (status.error_code()) { - case grpc::StatusCode::UNIMPLEMENTED: - throw Unsupported( - absl::StrFormat("operation %s is not supported by store at %s: %s", - call, uri, status.error_message())); - default: - throw Error(absl::StrFormat( - "Rpc call %s to %s failed (%s): %s ", call, uri, - util::proto::GRPCStatusCodeDescription(status.error_code()), - status.error_message())); - } - } -} - -bool RpcStore::isValidPathUncached(const Path& path) { - ClientContext ctx; - proto::IsValidPathResponse resp; - SuccessOrThrow(stub_->IsValidPath(&ctx, util::proto::StorePath(path), &resp), - __FUNCTION__); - return resp.is_valid(); -} - -PathSet RpcStore::queryAllValidPaths() { - ClientContext ctx; - proto::StorePaths paths; - SuccessOrThrow(stub_->QueryAllValidPaths(&ctx, kEmpty, &paths), __FUNCTION__); - return util::proto::FillFrom(paths.paths()); -} - -PathSet RpcStore::queryValidPaths(const PathSet& paths, - SubstituteFlag maybeSubstitute) { - ClientContext ctx; - proto::StorePaths store_paths; - for (const auto& path : paths) { - store_paths.add_paths(path); - } - proto::StorePaths result_paths; - SuccessOrThrow(stub_->QueryValidPaths(&ctx, store_paths, &result_paths), - __FUNCTION__); - return util::proto::FillFrom(result_paths.paths()); -} - -void RpcStore::queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept { - ClientContext ctx; - proto::StorePath store_path; - store_path.set_path(path); - - try { - proto::PathInfo path_info; - auto result = stub_->QueryPathInfo(&ctx, store_path, &path_info); - if (result.error_code() == grpc::INVALID_ARGUMENT) { - throw InvalidPath(absl::StrFormat("path '%s' is not valid", path)); - } - SuccessOrThrow(result); - - std::shared_ptr info; - - if (!path_info.is_valid()) { - throw InvalidPath(absl::StrFormat("path '%s' is not valid", path)); - } - - info = std::make_shared(); - info->path = path; - info->deriver = path_info.deriver().path(); - if (!info->deriver.empty()) { - assertStorePath(info->deriver); - } - auto hash_ = Hash::deserialize(path_info.nar_hash(), htSHA256); - info->narHash = Hash::unwrap_throw(hash_); - info->references.insert(path_info.references().begin(), - path_info.references().end()); - info->registrationTime = - TimeUtil::TimestampToTimeT(path_info.registration_time()); - info->narSize = path_info.nar_size(); - info->ultimate = path_info.ultimate(); - info->sigs.insert(path_info.sigs().begin(), path_info.sigs().end()); - info->ca = path_info.ca(); - - callback(std::move(info)); - } catch (...) { - callback.rethrow(); - } -} - -void RpcStore::queryReferrers(const Path& path, PathSet& referrers) { - ClientContext ctx; - proto::StorePaths paths; - SuccessOrThrow( - stub_->QueryReferrers(&ctx, util::proto::StorePath(path), &paths), - __FUNCTION__); - referrers.insert(paths.paths().begin(), paths.paths().end()); -} - -PathSet RpcStore::queryValidDerivers(const Path& path) { - ClientContext ctx; - proto::StorePaths paths; - SuccessOrThrow( - stub_->QueryValidDerivers(&ctx, util::proto::StorePath(path), &paths), - __FUNCTION__); - return util::proto::FillFrom(paths.paths()); -} - -PathSet RpcStore::queryDerivationOutputs(const Path& path) { - ClientContext ctx; - proto::StorePaths paths; - SuccessOrThrow( - stub_->QueryDerivationOutputs(&ctx, util::proto::StorePath(path), &paths), - __FUNCTION__); - return util::proto::FillFrom(paths.paths()); -} - -StringSet RpcStore::queryDerivationOutputNames(const Path& path) { - ClientContext ctx; - proto::DerivationOutputNames output_names; - SuccessOrThrow(stub_->QueryDerivationOutputNames( - &ctx, util::proto::StorePath(path), &output_names)); - return util::proto::FillFrom(output_names.names()); -} - -Path RpcStore::queryPathFromHashPart(const std::string& hashPart) { - ClientContext ctx; - proto::StorePath path; - proto::HashPart proto_hash_part; - proto_hash_part.set_hash_part(hashPart); - SuccessOrThrow(stub_->QueryPathFromHashPart(&ctx, proto_hash_part, &path), - __FUNCTION__); - return path.path(); -} - -PathSet RpcStore::querySubstitutablePaths(const PathSet& paths) { - ClientContext ctx; - proto::StorePaths result; - SuccessOrThrow(stub_->QuerySubstitutablePaths( - &ctx, util::proto::StorePaths(paths), &result)); - return util::proto::FillFrom(result.paths()); -} - -void RpcStore::querySubstitutablePathInfos(const PathSet& paths, - SubstitutablePathInfos& infos) { - ClientContext ctx; - proto::SubstitutablePathInfos result; - SuccessOrThrow(stub_->QuerySubstitutablePathInfos( - &ctx, util::proto::StorePaths(paths), &result)); - - for (const auto& path_info : result.path_infos()) { - auto path = path_info.path().path(); - SubstitutablePathInfo& info(infos[path]); - info.deriver = path_info.deriver().path(); - if (!info.deriver.empty()) { - assertStorePath(info.deriver); - } - info.references = util::proto::FillFrom(path_info.references()); - info.downloadSize = path_info.download_size(); - info.narSize = path_info.nar_size(); - } -} - -void RpcStore::addToStore(const ValidPathInfo& info, Source& narSource, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) { - ClientContext ctx; - google::protobuf::Empty response; - auto writer = stub_->AddToStoreNar(&ctx, &response); - - proto::AddToStoreNarRequest path_info_req; - path_info_req.mutable_path_info()->mutable_path()->set_path(info.path); - path_info_req.mutable_path_info()->mutable_deriver()->set_path(info.deriver); - path_info_req.mutable_path_info()->set_nar_hash( - info.narHash.to_string(Base16, false)); - for (const auto& ref : info.references) { - path_info_req.mutable_path_info()->add_references(ref); - } - *path_info_req.mutable_path_info()->mutable_registration_time() = - TimeUtil::TimeTToTimestamp(info.registrationTime); - path_info_req.mutable_path_info()->set_nar_size(info.narSize); - path_info_req.mutable_path_info()->set_ultimate(info.ultimate); - for (const auto& sig : info.sigs) { - path_info_req.mutable_path_info()->add_sigs(sig); - } - path_info_req.mutable_path_info()->set_ca(info.ca); - path_info_req.mutable_path_info()->set_repair(repair); - path_info_req.mutable_path_info()->set_check_sigs(checkSigs); - - if (!writer->Write(path_info_req)) { - throw Error("Could not write to nix daemon"); - } - - RPCSink sink(std::move(writer)); - copyNAR(narSource, sink); - SuccessOrThrow(sink.Finish(), __FUNCTION__); -} - -Path RpcStore::addToStore(const std::string& name, const Path& srcPath, - bool recursive, HashType hashAlgo, PathFilter& filter, - RepairFlag repair) { - if (repair != 0u) { - throw Error( - "repairing is not supported when building through the Nix daemon"); - } - - ClientContext ctx; - proto::StorePath response; - auto writer = stub_->AddToStore(&ctx, &response); - - proto::AddToStoreRequest metadata_req; - metadata_req.mutable_meta()->set_base_name(name); - // TODO(grfn): what is fixed? - metadata_req.mutable_meta()->set_fixed(!(hashAlgo == htSHA256 && recursive)); - metadata_req.mutable_meta()->set_recursive(recursive); - metadata_req.mutable_meta()->set_hash_type(HashTypeToProto(hashAlgo)); - - if (!writer->Write(metadata_req)) { - throw Error("Could not write to nix daemon"); - } - - RPCSink sink(std::move(writer)); - dumpPath(std::filesystem::absolute(srcPath), sink); - sink.flush(); - SuccessOrThrow(sink.Finish(), __FUNCTION__); - - return response.path(); -} - -Path RpcStore::addTextToStore(const std::string& name, - const std::string& content, - const PathSet& references, RepairFlag repair) { - if (repair != 0u) { - throw Error( - "repairing is not supported when building through the Nix daemon"); - } - ClientContext ctx; - proto::StorePath result; - auto writer = stub_->AddTextToStore(&ctx, &result); - - proto::AddTextToStoreRequest meta; - meta.mutable_meta()->set_name(name); - meta.mutable_meta()->set_size(content.size()); - for (const auto& ref : references) { - meta.mutable_meta()->add_references(ref); - } - writer->Write(meta); - - for (int i = 0; i <= content.size(); i += kChunkSize) { - auto len = std::min(kChunkSize, content.size() - i); - proto::AddTextToStoreRequest data; - data.set_data(content.data() + i, len); - if (!writer->Write(data)) { - // Finish() below will error - break; - } - } - - writer->WritesDone(); - SuccessOrThrow(writer->Finish(), __FUNCTION__); - return result.path(); -} - -absl::Status RpcStore::buildPaths(std::ostream& log_sink, const PathSet& paths, - BuildMode build_mode) { - ClientContext ctx; - proto::BuildPathsRequest request; - for (const auto& path : paths) { - request.add_drvs(path); - } - - google::protobuf::Empty response; - request.set_mode(nix::BuildModeToProto(build_mode)); - - std::unique_ptr> reader = - stub_->BuildPaths(&ctx, request); - - proto::BuildEvent event; - while (reader->Read(&event)) { - if (event.has_build_log()) { - // TODO(tazjin): Include .path()? - log_sink << event.build_log().line(); - } else { - log_sink << "Building path: " << event.building_path().path() - << std::endl; - } - - // has_result() is not in use in this call (for now) - } - - return nix::util::proto::GRPCStatusToAbsl(reader->Finish()); -} - -BuildResult RpcStore::buildDerivation(std::ostream& log_sink, - const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) { - ClientContext ctx; - proto::BuildDerivationRequest request; - request.mutable_drv_path()->set_path(drvPath); - proto::Derivation proto_drv = drv.to_proto(); - *request.mutable_derivation() = proto_drv; - request.set_build_mode(BuildModeToProto(buildMode)); - - std::unique_ptr> reader = - stub_->BuildDerivation(&ctx, request); - - std::optional result; - - proto::BuildEvent event; - while (reader->Read(&event)) { - if (event.has_build_log()) { - log_sink << event.build_log().line(); - } else if (event.has_result()) { - result = BuildResult::FromProto(event.result()); - } - } - SuccessOrThrow(reader->Finish(), __FUNCTION__); - - if (!result.has_value()) { - throw Error("Invalid response from daemon for buildDerivation"); - } - return result.value(); -} - -void RpcStore::ensurePath(const Path& path) { - ClientContext ctx; - google::protobuf::Empty response; - SuccessOrThrow( - stub_->EnsurePath(&ctx, util::proto::StorePath(path), &response), - __FUNCTION__); -} - -void RpcStore::addTempRoot(const Path& path) { - ClientContext ctx; - google::protobuf::Empty response; - SuccessOrThrow( - stub_->AddTempRoot(&ctx, util::proto::StorePath(path), &response), - __FUNCTION__); -} - -void RpcStore::addIndirectRoot(const Path& path) { - ClientContext ctx; - google::protobuf::Empty response; - SuccessOrThrow( - stub_->AddIndirectRoot(&ctx, util::proto::StorePath(path), &response), - __FUNCTION__); -} - -void RpcStore::syncWithGC() { - ClientContext ctx; - google::protobuf::Empty response; - SuccessOrThrow(stub_->SyncWithGC(&ctx, kEmpty, &response), __FUNCTION__); -} - -Roots RpcStore::findRoots(bool censor) { - ClientContext ctx; - proto::FindRootsResponse response; - SuccessOrThrow(stub_->FindRoots(&ctx, kEmpty, &response), __FUNCTION__); - Roots result; - - for (const auto& [target, links] : response.roots()) { - auto link_paths = - util::proto::FillFrom>(links.paths()); - result.insert({target, link_paths}); - } - - return result; -} - -void RpcStore::collectGarbage(const GCOptions& options, GCResults& results) { - ClientContext ctx; - proto::CollectGarbageRequest request; - request.set_action(options.ActionToProto()); - for (const auto& path : options.pathsToDelete) { - request.add_paths_to_delete(path); - } - request.set_ignore_liveness(options.ignoreLiveness); - request.set_max_freed(options.maxFreed); - - proto::CollectGarbageResponse response; - SuccessOrThrow(stub_->CollectGarbage(&ctx, request, &response), __FUNCTION__); - - for (const auto& path : response.deleted_paths()) { - results.paths.insert(path); - } - results.bytesFreed = response.bytes_freed(); -} - -void RpcStore::optimiseStore() { - ClientContext ctx; - google::protobuf::Empty response; - SuccessOrThrow(stub_->OptimiseStore(&ctx, kEmpty, &response), __FUNCTION__); -} - -bool RpcStore::verifyStore(bool checkContents, RepairFlag repair) { - ClientContext ctx; - proto::VerifyStoreRequest request; - request.set_check_contents(checkContents); - request.set_repair(repair); - proto::VerifyStoreResponse response; - SuccessOrThrow(stub_->VerifyStore(&ctx, request, &response), __FUNCTION__); - return response.errors(); -} - -void RpcStore::addSignatures(const Path& storePath, const StringSet& sigs) { - ClientContext ctx; - proto::AddSignaturesRequest request; - request.mutable_path()->set_path(storePath); - for (const auto& sig : sigs) { - request.mutable_sigs()->add_sigs(sig); - } - google::protobuf::Empty response; - SuccessOrThrow(stub_->AddSignatures(&ctx, request, &response), __FUNCTION__); -} - -void RpcStore::queryMissing(const PathSet& targets, PathSet& willBuild, - PathSet& willSubstitute, PathSet& unknown, - unsigned long long& downloadSize, - unsigned long long& narSize) { - ClientContext ctx; - proto::QueryMissingResponse response; - SuccessOrThrow( - stub_->QueryMissing(&ctx, util::proto::StorePaths(targets), &response), - __FUNCTION__); - - willBuild = util::proto::FillFrom(response.will_build()); - willSubstitute = util::proto::FillFrom(response.will_substitute()); - unknown = util::proto::FillFrom(response.unknown()); - downloadSize = response.download_size(); - narSize = response.nar_size(); -} - -std::shared_ptr RpcStore::getBuildLog(const Path& path) { - ClientContext ctx; - proto::BuildLog response; - SuccessOrThrow( - stub_->GetBuildLog(&ctx, util::proto::StorePath(path), &response), - __FUNCTION__); - - auto build_log = response.build_log(); - if (build_log.empty()) { - return nullptr; - } - return std::make_shared(build_log); -} - -unsigned int RpcStore::getProtocol() { return PROTOCOL_VERSION; } - -} // namespace store - -constexpr std::string_view kUriScheme = "unix://"; - -// TODO(grfn): Make this a function that we call from main rather than... this -static RegisterStoreImplementation regStore([](const std::string& uri, - const Store::Params& params) - -> std::shared_ptr { - if (std::string(uri, 0, kUriScheme.size()) != kUriScheme) { - return nullptr; - } - auto channel = grpc::CreateChannel(uri, grpc::InsecureChannelCredentials()); - return std::make_shared( - uri, params, proto::WorkerService::NewStub(channel)); -}); - -} // namespace nix diff --git a/third_party/nix/src/libstore/rpc-store.hh b/third_party/nix/src/libstore/rpc-store.hh deleted file mode 100644 index 679ceac7af..0000000000 --- a/third_party/nix/src/libstore/rpc-store.hh +++ /dev/null @@ -1,129 +0,0 @@ -#pragma once - -#include - -#include "libproto/worker.grpc.pb.h" -#include "libproto/worker.pb.h" -#include "libstore/remote-store.hh" -#include "libstore/store-api.hh" - -namespace nix::store { - -// TODO(grfn): Currently, since the RPCStore is only used for the connection to -// the nix daemon over a unix socket, it inherits from the LocalFSStore since it -// shares a filesystem with the daemon. This will not always be the case, at -// which point we should tease these two things apart. -class RpcStore : public LocalFSStore, public virtual Store { - public: - RpcStore(const Params& params, - std::unique_ptr stub) - : Store(params), LocalFSStore(params), stub_(std::move(stub)) {} - - RpcStore(std::string uri, const Params& params, - std::unique_ptr stub) - : Store(params), - LocalFSStore(params), - uri_(uri), - stub_(std::move(stub)) {} - - std::string getUri() override { - if (uri_.has_value()) { - return uri_.value(); - } else { - return "daemon"; - } - }; - - virtual PathSet queryAllValidPaths() override; - - virtual void queryReferrers(const Path& path, PathSet& referrers) override; - - virtual PathSet queryValidDerivers(const Path& path) override; - - virtual PathSet queryDerivationOutputs(const Path& path) override; - - virtual StringSet queryDerivationOutputNames(const Path& path) override; - - virtual Path queryPathFromHashPart(const std::string& hashPart) override; - - virtual PathSet querySubstitutablePaths(const PathSet& paths) override; - - virtual void querySubstitutablePathInfos( - const PathSet& paths, SubstitutablePathInfos& infos) override; - - virtual bool wantMassQuery() override { return true; } - - virtual void addToStore(const ValidPathInfo& info, Source& narSource, - RepairFlag repair = NoRepair, - CheckSigsFlag checkSigs = CheckSigs, - std::shared_ptr accessor = 0) override; - - virtual Path addToStore(const std::string& name, const Path& srcPath, - bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter& filter = defaultPathFilter, - RepairFlag repair = NoRepair) override; - - virtual Path addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, - RepairFlag repair = NoRepair) override; - - absl::Status buildPaths(std::ostream& log_sink, const PathSet& paths, - BuildMode build_mode) override; - - virtual BuildResult buildDerivation(std::ostream& log_sink, - const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) override; - - virtual void ensurePath(const Path& path) override; - - virtual void addTempRoot(const Path& path) override; - - virtual void addIndirectRoot(const Path& path) override; - - virtual void syncWithGC() override; - - virtual Roots findRoots(bool censor) override; - - virtual void collectGarbage(const GCOptions& options, - GCResults& results) override; - - virtual void optimiseStore() override; - - virtual bool verifyStore(bool checkContents, - RepairFlag repair = NoRepair) override; - - virtual void addSignatures(const Path& storePath, - const StringSet& sigs) override; - - virtual void queryMissing(const PathSet& targets, PathSet& willBuild, - PathSet& willSubstitute, PathSet& unknown, - unsigned long long& downloadSize, - unsigned long long& narSize) override; - - virtual std::shared_ptr getBuildLog(const Path& path) override; - - void connect() override{}; - - virtual unsigned int getProtocol() override; - - protected: - virtual bool isValidPathUncached(const Path& path) override; - - virtual PathSet queryValidPaths( - const PathSet& paths, - SubstituteFlag maybeSubstitute = NoSubstitute) override; - - virtual void queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept override; - - private: - std::optional uri_; - std::unique_ptr stub_; - - void const SuccessOrThrow(const grpc::Status& status, - const absl::string_view& call = "") const; -}; - -} // namespace nix::store diff --git a/third_party/nix/src/libstore/s3-binary-cache-store.cc b/third_party/nix/src/libstore/s3-binary-cache-store.cc deleted file mode 100644 index 0c13039b52..0000000000 --- a/third_party/nix/src/libstore/s3-binary-cache-store.cc +++ /dev/null @@ -1,431 +0,0 @@ -#if ENABLE_S3 - -#include "libstore/s3-binary-cache-store.hh" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/nar-info-disk-cache.hh" -#include "libstore/nar-info.hh" -#include "libstore/s3.hh" -#include "libutil/compression.hh" -#include "libutil/istringstream_nocopy.hh" - -using namespace Aws::Transfer; - -namespace nix { - -struct S3Error : public Error { - Aws::S3::S3Errors err; - S3Error(Aws::S3::S3Errors err, const FormatOrString& fs) - : Error(fs), err(err){}; -}; - -/* Helper: given an Outcome, return R in case of success, or - throw an exception in case of an error. */ -template -R&& checkAws(const FormatOrString& fs, Aws::Utils::Outcome&& outcome) { - if (!outcome.IsSuccess()) - throw S3Error(outcome.GetError().GetErrorType(), - fs.s + ": " + outcome.GetError().GetMessage()); - return outcome.GetResultWithOwnership(); -} - -class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem { - using Aws::Utils::Logging::FormattedLogSystem::FormattedLogSystem; - - void ProcessFormattedStatement(Aws::String&& statement) override { - debug("AWS: %s", absl::StripTrailingAsciiWhitespace(statement)); - } -}; - -static void initAWS() { - static std::once_flag flag; - std::call_once(flag, []() { - Aws::SDKOptions options; - - /* We install our own OpenSSL locking function (see - shared.cc), so don't let aws-sdk-cpp override it. */ - options.cryptoOptions.initAndCleanupOpenSSL = false; - - if (verbosity >= lvlDebug) { - options.loggingOptions.logLevel = - verbosity == lvlDebug ? Aws::Utils::Logging::LogLevel::Debug - : Aws::Utils::Logging::LogLevel::Trace; - options.loggingOptions.logger_create_fn = [options]() { - return std::make_shared(options.loggingOptions.logLevel); - }; - } - - Aws::InitAPI(options); - }); -} - -S3Helper::S3Helper(const std::string& profile, const std::string& region, - const std::string& scheme, const std::string& endpoint) - : config(makeConfig(region, scheme, endpoint)), - client(make_ref( - profile == "" - ? std::dynamic_pointer_cast( - std::make_shared< - Aws::Auth::DefaultAWSCredentialsProviderChain>()) - : std::dynamic_pointer_cast( - std::make_shared< - Aws::Auth::ProfileConfigFileAWSCredentialsProvider>( - profile.c_str())), - *config, -// FIXME: https://github.com/aws/aws-sdk-cpp/issues/759 -#if AWS_VERSION_MAJOR == 1 && AWS_VERSION_MINOR < 3 - false, -#else - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, -#endif - endpoint.empty())) { -} - -/* Log AWS retries. */ -class RetryStrategy : public Aws::Client::DefaultRetryStrategy { - bool ShouldRetry(const Aws::Client::AWSError& error, - long attemptedRetries) const override { - auto retry = - Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries); - if (retry) - printError("AWS error '%s' (%s), will retry in %d ms", - error.GetExceptionName(), error.GetMessage(), - CalculateDelayBeforeNextRetry(error, attemptedRetries)); - return retry; - } -}; - -ref S3Helper::makeConfig( - const std::string& region, const std::string& scheme, - const std::string& endpoint) { - initAWS(); - auto res = make_ref(); - res->region = region; - if (!scheme.empty()) { - res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str()); - } - if (!endpoint.empty()) { - res->endpointOverride = endpoint; - } - res->requestTimeoutMs = 600 * 1000; - res->connectTimeoutMs = 5 * 1000; - res->retryStrategy = std::make_shared(); - res->caFile = settings.caFile; - return res; -} - -S3Helper::DownloadResult S3Helper::getObject(const std::string& bucketName, - const std::string& key) { - debug("fetching 's3://%s/%s'...", bucketName, key); - - auto request = - Aws::S3::Model::GetObjectRequest().WithBucket(bucketName).WithKey(key); - - request.SetResponseStreamFactory( - [&]() { return Aws::New("STRINGSTREAM"); }); - - DownloadResult res; - - auto now1 = std::chrono::steady_clock::now(); - - try { - auto result = checkAws(fmt("AWS error fetching '%s'", key), - client->GetObject(request)); - - res.data = - decompress(result.GetContentEncoding(), - dynamic_cast(result.GetBody()).str()); - - } catch (S3Error& e) { - if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) { - throw; - } - } - - auto now2 = std::chrono::steady_clock::now(); - - res.durationMs = - std::chrono::duration_cast(now2 - now1) - .count(); - - return res; -} - -struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore { - const Setting profile{ - this, "", "profile", "The name of the AWS configuration profile to use."}; - const Setting region{ - this, Aws::Region::US_EAST_1, "region", {"aws-region"}}; - const Setting scheme{ - this, "", "scheme", - "The scheme to use for S3 requests, https by default."}; - const Setting endpoint{ - this, "", "endpoint", - "An optional override of the endpoint to use when talking to S3."}; - const Setting narinfoCompression{ - this, "", "narinfo-compression", "compression method for .narinfo files"}; - const Setting lsCompression{this, "", "ls-compression", - "compression method for .ls files"}; - const Setting logCompression{ - this, "", "log-compression", "compression method for log/* files"}; - const Setting multipartUpload{this, false, "multipart-upload", - "whether to use multi-part uploads"}; - const Setting bufferSize{ - this, 5 * 1024 * 1024, "buffer-size", - "size (in bytes) of each part in multi-part uploads"}; - - std::string bucketName; - - Stats stats; - - S3Helper s3Helper; - - S3BinaryCacheStoreImpl(const Params& params, const std::string& bucketName) - : S3BinaryCacheStore(params), - bucketName(bucketName), - s3Helper(profile, region, scheme, endpoint) { - diskCache = getNarInfoDiskCache(); - } - - std::string getUri() override { return "s3://" + bucketName; } - - void init() override { - if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) { - BinaryCacheStore::init(); - - diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority); - } - } - - const Stats& getS3Stats() override { return stats; } - - /* This is a specialisation of isValidPath() that optimistically - fetches the .narinfo file, rather than first checking for its - existence via a HEAD request. Since .narinfos are small, doing - a GET is unlikely to be slower than HEAD. */ - bool isValidPathUncached(const Path& storePath) override { - try { - queryPathInfo(storePath); - return true; - } catch (InvalidPath& e) { - return false; - } - } - - bool fileExists(const std::string& path) override { - stats.head++; - - auto res = s3Helper.client->HeadObject(Aws::S3::Model::HeadObjectRequest() - .WithBucket(bucketName) - .WithKey(path)); - - if (!res.IsSuccess()) { - auto& error = res.GetError(); - if (error.GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND || - error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY - // If bucket listing is disabled, 404s turn into 403s - || error.GetErrorType() == Aws::S3::S3Errors::ACCESS_DENIED) - return false; - throw Error(format("AWS error fetching '%s': %s") % path % - error.GetMessage()); - } - - return true; - } - - std::shared_ptr transferManager; - std::once_flag transferManagerCreated; - - void uploadFile(const std::string& path, const std::string& data, - const std::string& mimeType, - const std::string& contentEncoding) { - auto stream = std::make_shared(data); - - auto maxThreads = std::thread::hardware_concurrency(); - - static std::shared_ptr - executor = - std::make_shared( - maxThreads); - - std::call_once(transferManagerCreated, [&]() { - if (multipartUpload) { - TransferManagerConfiguration transferConfig(executor.get()); - - transferConfig.s3Client = s3Helper.client; - transferConfig.bufferSize = bufferSize; - - transferConfig.uploadProgressCallback = - [](const TransferManager* transferManager, - const std::shared_ptr& transferHandle) { - // FIXME: find a way to properly abort the multipart upload. - // checkInterrupt(); - debug("upload progress ('%s'): '%d' of '%d' bytes", - transferHandle->GetKey(), - transferHandle->GetBytesTransferred(), - transferHandle->GetBytesTotalSize()); - }; - - transferManager = TransferManager::Create(transferConfig); - } - }); - - auto now1 = std::chrono::steady_clock::now(); - - if (transferManager) { - if (contentEncoding != "") - throw Error( - "setting a content encoding is not supported with S3 multi-part " - "uploads"); - - std::shared_ptr transferHandle = - transferManager->UploadFile(stream, bucketName, path, mimeType, - Aws::Map(), - nullptr /*, contentEncoding */); - - transferHandle->WaitUntilFinished(); - - if (transferHandle->GetStatus() == TransferStatus::FAILED) - throw Error("AWS error: failed to upload 's3://%s/%s': %s", bucketName, - path, transferHandle->GetLastError().GetMessage()); - - if (transferHandle->GetStatus() != TransferStatus::COMPLETED) - throw Error( - "AWS error: transfer status of 's3://%s/%s' in unexpected state", - bucketName, path); - - } else { - auto request = Aws::S3::Model::PutObjectRequest() - .WithBucket(bucketName) - .WithKey(path); - - request.SetContentType(mimeType); - - if (contentEncoding != "") { - request.SetContentEncoding(contentEncoding); - } - - auto stream = std::make_shared(data); - - request.SetBody(stream); - - auto result = checkAws(fmt("AWS error uploading '%s'", path), - s3Helper.client->PutObject(request)); - } - - auto now2 = std::chrono::steady_clock::now(); - - auto duration = - std::chrono::duration_cast(now2 - now1) - .count(); - - printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") % - bucketName % path % data.size() % duration); - - stats.putTimeMs += duration; - stats.putBytes += data.size(); - stats.put++; - } - - void upsertFile(const std::string& path, const std::string& data, - const std::string& mimeType) override { - if (narinfoCompression != "" && absl::EndsWith(path, ".narinfo")) - uploadFile(path, *compress(narinfoCompression, data), mimeType, - narinfoCompression); - else if (lsCompression != "" && absl::EndsWith(path, ".ls")) - uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression); - else if (logCompression != "" && absl::StartsWith(path, "log/")) - uploadFile(path, *compress(logCompression, data), mimeType, - logCompression); - else - uploadFile(path, data, mimeType, ""); - } - - void getFile(const std::string& path, Sink& sink) override { - stats.get++; - - // FIXME: stream output to sink. - auto res = s3Helper.getObject(bucketName, path); - - stats.getBytes += res.data ? res.data->size() : 0; - stats.getTimeMs += res.durationMs; - - if (res.data) { - printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms", bucketName, - path, res.data->size(), res.durationMs); - - sink((unsigned char*)res.data->data(), res.data->size()); - } else - throw NoSuchBinaryCacheFile( - "file '%s' does not exist in binary cache '%s'", path, getUri()); - } - - PathSet queryAllValidPaths() override { - PathSet paths; - std::string marker; - - do { - debug(format("listing bucket 's3://%s' from key '%s'...") % bucketName % - marker); - - auto res = checkAws( - format("AWS error listing bucket '%s'") % bucketName, - s3Helper.client->ListObjects(Aws::S3::Model::ListObjectsRequest() - .WithBucket(bucketName) - .WithDelimiter("/") - .WithMarker(marker))); - - auto& contents = res.GetContents(); - - debug(format("got %d keys, next marker '%s'") % contents.size() % - res.GetNextMarker()); - - for (auto object : contents) { - auto& key = object.GetKey(); - if (key.size() != 40 || !absl::EndsWith(key, ".narinfo")) { - continue; - } - paths.insert(storeDir + "/" + key.substr(0, key.size() - 8)); - } - - marker = res.GetNextMarker(); - } while (!marker.empty()); - - return paths; - } -}; - -static RegisterStoreImplementation regStore( - [](const std::string& uri, - const Store::Params& params) -> std::shared_ptr { - if (std::string(uri, 0, 5) != "s3://") { - return 0; - } - auto store = - std::make_shared(params, std::string(uri, 5)); - store->init(); - return store; - }); - -} // namespace nix - -#endif diff --git a/third_party/nix/src/libstore/s3-binary-cache-store.hh b/third_party/nix/src/libstore/s3-binary-cache-store.hh deleted file mode 100644 index 3d0d0b3c44..0000000000 --- a/third_party/nix/src/libstore/s3-binary-cache-store.hh +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include - -#include "libstore/binary-cache-store.hh" - -namespace nix { - -class S3BinaryCacheStore : public BinaryCacheStore { - protected: - S3BinaryCacheStore(const Params& params) : BinaryCacheStore(params) {} - - public: - struct Stats { - std::atomic put{0}; - std::atomic putBytes{0}; - std::atomic putTimeMs{0}; - std::atomic get{0}; - std::atomic getBytes{0}; - std::atomic getTimeMs{0}; - std::atomic head{0}; - }; - - virtual const Stats& getS3Stats() = 0; -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/s3.hh b/third_party/nix/src/libstore/s3.hh deleted file mode 100644 index 4f1852dc3d..0000000000 --- a/third_party/nix/src/libstore/s3.hh +++ /dev/null @@ -1,42 +0,0 @@ -#pragma once - -#if ENABLE_S3 - -#include "libutil/ref.hh" - -namespace Aws { -namespace Client { -class ClientConfiguration; -} -} // namespace Aws -namespace Aws { -namespace S3 { -class S3Client; -} -} // namespace Aws - -namespace nix { - -struct S3Helper { - ref config; - ref client; - - S3Helper(const std::string& profile, const std::string& region, - const std::string& scheme, const std::string& endpoint); - - ref makeConfig(const std::string& region, - const std::string& scheme, - const std::string& endpoint); - - struct DownloadResult { - std::shared_ptr data; - unsigned int durationMs; - }; - - DownloadResult getObject(const std::string& bucketName, - const std::string& key); -}; - -} // namespace nix - -#endif diff --git a/third_party/nix/src/libstore/sandbox-defaults.sb b/third_party/nix/src/libstore/sandbox-defaults.sb deleted file mode 100644 index 0299d1ee45..0000000000 --- a/third_party/nix/src/libstore/sandbox-defaults.sb +++ /dev/null @@ -1,87 +0,0 @@ -(define TMPDIR (param "_GLOBAL_TMP_DIR")) - -(deny default) - -; Disallow creating setuid/setgid binaries, since that -; would allow breaking build user isolation. -(deny file-write-setugid) - -; Allow forking. -(allow process-fork) - -; Allow reading system information like #CPUs, etc. -(allow sysctl-read) - -; Allow POSIX semaphores and shared memory. -(allow ipc-posix*) - -; Allow socket creation. -(allow system-socket) - -; Allow sending signals within the sandbox. -(allow signal (target same-sandbox)) - -; Allow getpwuid. -(allow mach-lookup (global-name "com.apple.system.opendirectoryd.libinfo")) - -; Access to /tmp. -; The network-outbound/network-inbound ones are for unix domain sockets, which -; we allow access to in TMPDIR (but if we allow them more broadly, you could in -; theory escape the sandbox) -(allow file* process-exec network-outbound network-inbound - (literal "/tmp") (subpath TMPDIR)) - -; Some packages like to read the system version. -(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist")) - -; Without this line clang cannot write to /dev/null, breaking some configure tests. -(allow file-read-metadata (literal "/dev")) - -; Many packages like to do local networking in their test suites, but let's only -; allow it if the package explicitly asks for it. -(if (param "_ALLOW_LOCAL_NETWORKING") - (begin - (allow network* (local ip) (local tcp) (local udp)) - - ; Allow access to /etc/resolv.conf (which is a symlink to - ; /private/var/run/resolv.conf). - ; TODO: deduplicate with sandbox-network.sb - (allow file-read-metadata - (literal "/var") - (literal "/etc") - (literal "/etc/resolv.conf") - (literal "/private/etc/resolv.conf")) - - (allow file-read* - (literal "/private/var/run/resolv.conf")) - - ; Allow DNS lookups. This is even needed for localhost, which lots of tests rely on - (allow file-read-metadata (literal "/etc/hosts")) - (allow file-read* (literal "/private/etc/hosts")) - (allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder"))))) - -; Standard devices. -(allow file* - (literal "/dev/null") - (literal "/dev/random") - (literal "/dev/stdin") - (literal "/dev/stdout") - (literal "/dev/tty") - (literal "/dev/urandom") - (literal "/dev/zero") - (subpath "/dev/fd")) - -; Does nothing, but reduces build noise. -(allow file* (literal "/dev/dtracehelper")) - -; Allow access to zoneinfo since libSystem needs it. -(allow file-read* (subpath "/usr/share/zoneinfo")) - -(allow file-read* (subpath "/usr/share/locale")) - -; This is mostly to get more specific log messages when builds try to -; access something in /etc or /var. -(allow file-read-metadata - (literal "/etc") - (literal "/var") - (literal "/private/var/tmp")) diff --git a/third_party/nix/src/libstore/sandbox-minimal.sb b/third_party/nix/src/libstore/sandbox-minimal.sb deleted file mode 100644 index 65f5108b39..0000000000 --- a/third_party/nix/src/libstore/sandbox-minimal.sb +++ /dev/null @@ -1,5 +0,0 @@ -(allow default) - -; Disallow creating setuid/setgid binaries, since that -; would allow breaking build user isolation. -(deny file-write-setugid) diff --git a/third_party/nix/src/libstore/sandbox-network.sb b/third_party/nix/src/libstore/sandbox-network.sb deleted file mode 100644 index 56beec761f..0000000000 --- a/third_party/nix/src/libstore/sandbox-network.sb +++ /dev/null @@ -1,16 +0,0 @@ -; Allow local and remote network traffic. -(allow network* (local ip) (remote ip)) - -; Allow access to /etc/resolv.conf (which is a symlink to -; /private/var/run/resolv.conf). -(allow file-read-metadata - (literal "/var") - (literal "/etc") - (literal "/etc/resolv.conf") - (literal "/private/etc/resolv.conf")) - -(allow file-read* - (literal "/private/var/run/resolv.conf")) - -; Allow DNS lookups. -(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder"))) diff --git a/third_party/nix/src/libstore/schema.sql b/third_party/nix/src/libstore/schema.sql deleted file mode 100644 index 09c71a2b8d..0000000000 --- a/third_party/nix/src/libstore/schema.sql +++ /dev/null @@ -1,42 +0,0 @@ -create table if not exists ValidPaths ( - id integer primary key autoincrement not null, - path text unique not null, - hash text not null, - registrationTime integer not null, - deriver text, - narSize integer, - ultimate integer, -- null implies "false" - sigs text, -- space-separated - ca text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo -); - -create table if not exists Refs ( - referrer integer not null, - reference integer not null, - primary key (referrer, reference), - foreign key (referrer) references ValidPaths(id) on delete cascade, - foreign key (reference) references ValidPaths(id) on delete restrict -); - -create index if not exists IndexReferrer on Refs(referrer); -create index if not exists IndexReference on Refs(reference); - --- Paths can refer to themselves, causing a tuple (N, N) in the Refs --- table. This causes a deletion of the corresponding row in --- ValidPaths to cause a foreign key constraint violation (due to `on --- delete restrict' on the `reference' column). Therefore, explicitly --- get rid of self-references. -create trigger if not exists DeleteSelfRefs before delete on ValidPaths - begin - delete from Refs where referrer = old.id and reference = old.id; - end; - -create table if not exists DerivationOutputs ( - drv integer not null, - id text not null, -- symbolic output id, usually "out" - path text not null, - primary key (drv, id), - foreign key (drv) references ValidPaths(id) on delete cascade -); - -create index if not exists IndexDerivationOutputs on DerivationOutputs(path); diff --git a/third_party/nix/src/libstore/serve-protocol.hh b/third_party/nix/src/libstore/serve-protocol.hh deleted file mode 100644 index 04c92e63f6..0000000000 --- a/third_party/nix/src/libstore/serve-protocol.hh +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -namespace nix { - -#define SERVE_MAGIC_1 0x390c9deb -#define SERVE_MAGIC_2 0x5452eecb - -#define SERVE_PROTOCOL_VERSION 0x205 -#define GET_PROTOCOL_MAJOR(x) ((x)&0xff00) -#define GET_PROTOCOL_MINOR(x) ((x)&0x00ff) - -using ServeCommand = enum { - cmdQueryValidPaths = 1, - cmdQueryPathInfos = 2, - cmdDumpStorePath = 3, - cmdImportPaths = 4, - cmdExportPaths = 5, - cmdBuildPaths = 6, - cmdQueryClosure = 7, - cmdBuildDerivation = 8, - cmdAddToStoreNar = 9, -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/sqlite.cc b/third_party/nix/src/libstore/sqlite.cc deleted file mode 100644 index 0fb32326f5..0000000000 --- a/third_party/nix/src/libstore/sqlite.cc +++ /dev/null @@ -1,195 +0,0 @@ -#include "libstore/sqlite.hh" - -#include - -#include -#include - -#include "libutil/util.hh" - -namespace nix { - -[[noreturn]] void throwSQLiteError(sqlite3* db, const FormatOrString& fs) { - int err = sqlite3_errcode(db); - int exterr = sqlite3_extended_errcode(db); - - auto path = sqlite3_db_filename(db, nullptr); - if (path == nullptr) { - path = "(in-memory)"; - } - - if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) { - throw SQLiteBusy( - err == SQLITE_PROTOCOL - ? fmt("SQLite database '%s' is busy (SQLITE_PROTOCOL)", path) - : fmt("SQLite database '%s' is busy", path)); - } - throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path); -} - -SQLite::SQLite(const Path& path) { - if (sqlite3_open_v2(path.c_str(), &db, - SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, - nullptr) != SQLITE_OK) { - throw Error(format("cannot open SQLite database '%s'") % path); - } -} - -SQLite::~SQLite() { - try { - if ((db != nullptr) && sqlite3_close(db) != SQLITE_OK) { - throwSQLiteError(db, "closing database"); - } - } catch (...) { - ignoreException(); - } -} - -void SQLite::exec(const std::string& stmt) { - retrySQLite([&]() { - if (sqlite3_exec(db, stmt.c_str(), nullptr, nullptr, nullptr) != - SQLITE_OK) { - throwSQLiteError(db, format("executing SQLite statement '%s'") % stmt); - } - }); -} - -void SQLiteStmt::create(sqlite3* db, const std::string& sql) { - checkInterrupt(); - assert(!stmt); - if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - throwSQLiteError(db, fmt("creating statement '%s'", sql)); - } - this->db = db; - this->sql = sql; -} - -SQLiteStmt::~SQLiteStmt() { - try { - if ((stmt != nullptr) && sqlite3_finalize(stmt) != SQLITE_OK) { - throwSQLiteError(db, fmt("finalizing statement '%s'", sql)); - } - } catch (...) { - ignoreException(); - } -} - -SQLiteStmt::Use::Use(SQLiteStmt& stmt) : stmt(stmt) { - assert(stmt.stmt); - /* Note: sqlite3_reset() returns the error code for the most - recent call to sqlite3_step(). So ignore it. */ - sqlite3_reset(stmt); -} - -SQLiteStmt::Use::~Use() { sqlite3_reset(stmt); } - -SQLiteStmt::Use& SQLiteStmt::Use::operator()(const std::string& value, - bool notNull) { - if (notNull) { - if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, - SQLITE_TRANSIENT) != SQLITE_OK) { - throwSQLiteError(stmt.db, "binding argument"); - } - } else { - bind(); - } - return *this; -} - -SQLiteStmt::Use& SQLiteStmt::Use::operator()(int64_t value, bool notNull) { - if (notNull) { - if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK) { - throwSQLiteError(stmt.db, "binding argument"); - } - } else { - bind(); - } - return *this; -} - -SQLiteStmt::Use& SQLiteStmt::Use::bind() { - if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK) { - throwSQLiteError(stmt.db, "binding argument"); - } - return *this; -} - -int SQLiteStmt::Use::step() { return sqlite3_step(stmt); } - -void SQLiteStmt::Use::exec() { - int r = step(); - assert(r != SQLITE_ROW); - if (r != SQLITE_DONE) { - throwSQLiteError(stmt.db, fmt("executing SQLite statement '%s'", stmt.sql)); - } -} - -bool SQLiteStmt::Use::next() { - int r = step(); - if (r != SQLITE_DONE && r != SQLITE_ROW) { - throwSQLiteError(stmt.db, fmt("executing SQLite query '%s'", stmt.sql)); - } - return r == SQLITE_ROW; -} - -std::string SQLiteStmt::Use::getStr(int col) { - auto s = reinterpret_cast(sqlite3_column_text(stmt, col)); - assert(s); - return s; -} - -int64_t SQLiteStmt::Use::getInt(int col) { - // FIXME: detect nulls? - return sqlite3_column_int64(stmt, col); -} - -bool SQLiteStmt::Use::isNull(int col) { - return sqlite3_column_type(stmt, col) == SQLITE_NULL; -} - -SQLiteTxn::SQLiteTxn(sqlite3* db) { - this->db = db; - if (sqlite3_exec(db, "begin;", nullptr, nullptr, nullptr) != SQLITE_OK) { - throwSQLiteError(db, "starting transaction"); - } - active = true; -} - -void SQLiteTxn::commit() { - if (sqlite3_exec(db, "commit;", nullptr, nullptr, nullptr) != SQLITE_OK) { - throwSQLiteError(db, "committing transaction"); - } - active = false; -} - -SQLiteTxn::~SQLiteTxn() { - try { - if (active && - sqlite3_exec(db, "rollback;", nullptr, nullptr, nullptr) != SQLITE_OK) { - throwSQLiteError(db, "aborting transaction"); - } - } catch (...) { - ignoreException(); - } -} - -void handleSQLiteBusy(const SQLiteBusy& e) { - static std::atomic lastWarned{0}; - - time_t now = time(nullptr); - - if (now > lastWarned + 10) { - lastWarned = now; - LOG(ERROR) << e.what(); - } - - /* Sleep for a while since retrying the transaction right away - is likely to fail again. */ - checkInterrupt(); - struct timespec t; - t.tv_sec = 0; - t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */ - nanosleep(&t, nullptr); -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/sqlite.hh b/third_party/nix/src/libstore/sqlite.hh deleted file mode 100644 index cad78aed45..0000000000 --- a/third_party/nix/src/libstore/sqlite.hh +++ /dev/null @@ -1,109 +0,0 @@ -#pragma once - -#include -#include - -#include "libutil/types.hh" - -class sqlite3; -class sqlite3_stmt; - -namespace nix { - -/* RAII wrapper to close a SQLite database automatically. */ -struct SQLite { - sqlite3* db = 0; - SQLite() {} - SQLite(const Path& path); - SQLite(const SQLite& from) = delete; - SQLite& operator=(const SQLite& from) = delete; - SQLite& operator=(SQLite&& from) { - db = from.db; - from.db = 0; - return *this; - } - ~SQLite(); - operator sqlite3*() { return db; } - - void exec(const std::string& stmt); -}; - -/* RAII wrapper to create and destroy SQLite prepared statements. */ -struct SQLiteStmt { - sqlite3* db = 0; - sqlite3_stmt* stmt = 0; - std::string sql; - SQLiteStmt() {} - SQLiteStmt(sqlite3* db, const std::string& sql) { create(db, sql); } - void create(sqlite3* db, const std::string& s); - ~SQLiteStmt(); - operator sqlite3_stmt*() { return stmt; } - - /* Helper for binding / executing statements. */ - class Use { - friend struct SQLiteStmt; - - private: - SQLiteStmt& stmt; - int curArg = 1; - Use(SQLiteStmt& stmt); - - public: - ~Use(); - - /* Bind the next parameter. */ - Use& operator()(const std::string& value, bool notNull = true); - Use& operator()(int64_t value, bool notNull = true); - Use& bind(); // null - - int step(); - - /* Execute a statement that does not return rows. */ - void exec(); - - /* For statements that return 0 or more rows. Returns true iff - a row is available. */ - bool next(); - - std::string getStr(int col); - int64_t getInt(int col); - bool isNull(int col); - }; - - Use use() { return Use(*this); } -}; - -/* RAII helper that ensures transactions are aborted unless explicitly - committed. */ -struct SQLiteTxn { - bool active = false; - sqlite3* db; - - SQLiteTxn(sqlite3* db); - - void commit(); - - ~SQLiteTxn(); -}; - -MakeError(SQLiteError, Error); -MakeError(SQLiteBusy, SQLiteError); - -[[noreturn]] void throwSQLiteError(sqlite3* db, const FormatOrString& fs); - -void handleSQLiteBusy(const SQLiteBusy& e); - -/* Convenience function for retrying a SQLite transaction when the - database is busy. */ -template -T retrySQLite(std::function fun) { - while (true) { - try { - return fun(); - } catch (SQLiteBusy& e) { - handleSQLiteBusy(e); - } - } -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/ssh-store.cc b/third_party/nix/src/libstore/ssh-store.cc deleted file mode 100644 index 96adb3660d..0000000000 --- a/third_party/nix/src/libstore/ssh-store.cc +++ /dev/null @@ -1,89 +0,0 @@ -#include - -#include "libstore/remote-fs-accessor.hh" -#include "libstore/remote-store.hh" -#include "libstore/ssh.hh" -#include "libstore/store-api.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/archive.hh" -#include "libutil/pool.hh" - -namespace nix { - -constexpr std::string_view kUriScheme = "ssh-ng://"; - -class SSHStore : public RemoteStore { - public: - const Setting sshKey{(Store*)this, "", "ssh-key", - "path to an SSH private key"}; - const Setting compress{(Store*)this, false, "compress", - "whether to compress the connection"}; - - SSHStore(const std::string& host, const Params& params) - : Store(params), - RemoteStore(params), - host(host), - master(host, sshKey, - // Use SSH master only if using more than 1 connection. - connections->capacity() > 1, compress) {} - - std::string getUri() override { return absl::StrCat(kUriScheme, host); } - - bool sameMachine() override { return false; } - - void narFromPath(const Path& path, Sink& sink) override; - - ref getFSAccessor() override; - - private: - struct Connection : RemoteStore::Connection { - std::unique_ptr sshConn; - }; - - ref openConnection() override; - - std::string host; - - SSHMaster master; - - void setOptions(RemoteStore::Connection& conn) override{ - /* TODO Add a way to explicitly ask for some options to be - forwarded. One option: A way to query the daemon for its - settings, and then a series of params to SSHStore like - forward-cores or forward-overridden-cores that only - override the requested settings. - */ - }; -}; - -void SSHStore::narFromPath(const Path& path, Sink& sink) { - auto conn(connections->get()); - conn->to << wopNarFromPath << path; - conn->processStderr(); - copyNAR(conn->from, sink); -} - -ref SSHStore::getFSAccessor() { - return make_ref(ref(shared_from_this())); -} - -ref SSHStore::openConnection() { - auto conn = make_ref(); - conn->sshConn = master.startCommand("nix-daemon --pipe"); - conn->to = FdSink(conn->sshConn->in.get()); - conn->from = FdSource(conn->sshConn->out.get()); - initConnection(*conn); - return conn; -} - -static RegisterStoreImplementation regStore( - [](const std::string& uri, - const Store::Params& params) -> std::shared_ptr { - if (std::string(uri, 0, kUriScheme.size()) != kUriScheme) { - return nullptr; - } - return std::make_shared(std::string(uri, kUriScheme.size()), - params); - }); - -} // namespace nix diff --git a/third_party/nix/src/libstore/ssh.cc b/third_party/nix/src/libstore/ssh.cc deleted file mode 100644 index 6043e584dd..0000000000 --- a/third_party/nix/src/libstore/ssh.cc +++ /dev/null @@ -1,160 +0,0 @@ -#include "libstore/ssh.hh" - -#include - -#include -#include - -namespace nix { - -SSHMaster::SSHMaster(const std::string& host, std::string keyFile, - bool useMaster, bool compress, int logFD) - : host(host), - fakeSSH(host == "localhost"), - keyFile(std::move(keyFile)), - useMaster(useMaster && !fakeSSH), - compress(compress), - logFD(logFD) { - if (host.empty() || absl::StartsWith(host, "-")) { - throw Error("invalid SSH host name '%s'", host); - } -} - -void SSHMaster::addCommonSSHOpts(Strings& args) { - for (auto& i : - absl::StrSplit(getEnv("NIX_SSHOPTS").value_or(""), - absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty())) { - args.push_back(std::string(i)); - } - if (!keyFile.empty()) { - args.insert(args.end(), {"-i", keyFile}); - } - if (compress) { - args.push_back("-C"); - } -} - -std::unique_ptr SSHMaster::startCommand( - const std::string& command) { - Path socketPath = startMaster(); - - Pipe in; - Pipe out; - in.create(); - out.create(); - - auto conn = std::make_unique(); - ProcessOptions options; - options.dieWithParent = false; - - conn->sshPid = startProcess( - [&]() { - restoreSignals(); - - close(in.writeSide.get()); - close(out.readSide.get()); - - if (dup2(in.readSide.get(), STDIN_FILENO) == -1) { - throw SysError("duping over stdin"); - } - if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) { - throw SysError("duping over stdout"); - } - if (logFD != -1 && dup2(logFD, STDERR_FILENO) == -1) { - throw SysError("duping over stderr"); - } - - Strings args; - - if (fakeSSH) { - args = {"bash", "-c"}; - } else { - args = {"ssh", host, "-x", "-a"}; - addCommonSSHOpts(args); - if (!socketPath.empty()) { - args.insert(args.end(), {"-S", socketPath}); - } - // TODO(tazjin): Abseil verbosity flag - /*if (verbosity >= lvlChatty) { - args.push_back("-v"); - }*/ - } - - args.push_back(command); - execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); - - // could not exec ssh/bash - throw SysError("unable to execute '%s'", args.front()); - }, - options); - - in.readSide = AutoCloseFD(-1); - out.writeSide = AutoCloseFD(-1); - - conn->out = std::move(out.readSide); - conn->in = std::move(in.writeSide); - - return conn; -} - -Path SSHMaster::startMaster() { - if (!useMaster) { - return ""; - } - - auto state(state_.lock()); - - if (state->sshMaster != Pid(-1)) { - return state->socketPath; - } - - state->tmpDir = - std::make_unique(createTempDir("", "nix", true, true, 0700)); - - state->socketPath = Path(*state->tmpDir) + "/ssh.sock"; - - Pipe out; - out.create(); - - ProcessOptions options; - options.dieWithParent = false; - - state->sshMaster = startProcess( - [&]() { - restoreSignals(); - - close(out.readSide.get()); - - if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) { - throw SysError("duping over stdout"); - } - - Strings args = {"ssh", host, - "-M", "-N", - "-S", state->socketPath, - "-o", "LocalCommand=echo started", - "-o", "PermitLocalCommand=yes"}; - // if (verbosity >= lvlChatty) { args.push_back("-v"); } - addCommonSSHOpts(args); - execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); - - throw SysError("unable to execute '%s'", args.front()); - }, - options); - - out.writeSide = AutoCloseFD(-1); - - std::string reply; - try { - reply = readLine(out.readSide.get()); - } catch (EndOfFile& e) { - } - - if (reply != "started") { - throw Error("failed to start SSH master connection to '%s'", host); - } - - return state->socketPath; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/ssh.hh b/third_party/nix/src/libstore/ssh.hh deleted file mode 100644 index 9844f89d35..0000000000 --- a/third_party/nix/src/libstore/ssh.hh +++ /dev/null @@ -1,41 +0,0 @@ -#pragma once - -#include "libutil/sync.hh" -#include "libutil/util.hh" - -namespace nix { - -class SSHMaster { - private: - const std::string host; - bool fakeSSH; - const std::string keyFile; - const bool useMaster; - const bool compress; - const int logFD; - - struct State { - Pid sshMaster; - std::unique_ptr tmpDir; - Path socketPath; - }; - - Sync state_; - - void addCommonSSHOpts(Strings& args); - - public: - SSHMaster(const std::string& host, std::string keyFile, bool useMaster, - bool compress, int logFD = -1); - - struct Connection { - Pid sshPid; - AutoCloseFD out, in; - }; - - std::unique_ptr startCommand(const std::string& command); - - Path startMaster(); -}; - -} // namespace nix diff --git a/third_party/nix/src/libstore/store-api.cc b/third_party/nix/src/libstore/store-api.cc deleted file mode 100644 index d8dbea18e9..0000000000 --- a/third_party/nix/src/libstore/store-api.cc +++ /dev/null @@ -1,1167 +0,0 @@ -#include "libstore/store-api.hh" - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "libproto/worker.pb.h" -#include "libstore/crypto.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/nar-info-disk-cache.hh" -#include "libstore/rpc-store.hh" -#include "libutil/json.hh" -#include "libutil/thread-pool.hh" -#include "libutil/util.hh" - -namespace nix { - -namespace { -class NullStream : public std::streambuf { - public: - int overflow(int c) override { return c; } -}; - -static NullStream NULL_STREAM{}; -} // namespace - -std::ostream DiscardLogsSink() { return std::ostream(&NULL_STREAM); } - -std::optional BuildModeFrom(nix::proto::BuildMode mode) { - switch (mode) { - case nix::proto::BuildMode::Normal: - return BuildMode::bmNormal; - case nix::proto::BuildMode::Repair: - return BuildMode::bmRepair; - case nix::proto::BuildMode::Check: - return BuildMode::bmCheck; - default: - return {}; - } -} - -nix::proto::BuildMode BuildModeToProto(BuildMode mode) { - switch (mode) { - case BuildMode::bmNormal: - return nix::proto::BuildMode::Normal; - case BuildMode::bmRepair: - return nix::proto::BuildMode::Repair; - case BuildMode::bmCheck: - return nix::proto::BuildMode::Check; - } -} - -nix::proto::BuildStatus BuildResult::status_to_proto() { - switch (status) { - case BuildResult::Status::Built: - return proto::BuildStatus::Built; - case BuildResult::Status::Substituted: - return proto::BuildStatus::Substituted; - case BuildResult::Status::AlreadyValid: - return proto::BuildStatus::AlreadyValid; - case BuildResult::Status::PermanentFailure: - return proto::BuildStatus::PermanentFailure; - case BuildResult::Status::InputRejected: - return proto::BuildStatus::InputRejected; - case BuildResult::Status::OutputRejected: - return proto::BuildStatus::OutputRejected; - case BuildResult::Status::TransientFailure: - return proto::BuildStatus::TransientFailure; - case BuildResult::Status::CachedFailure: - return proto::BuildStatus::CachedFailure; - case BuildResult::Status::TimedOut: - return proto::BuildStatus::TimedOut; - case BuildResult::Status::MiscFailure: - return proto::BuildStatus::MiscFailure; - case BuildResult::Status::DependencyFailed: - return proto::BuildStatus::DependencyFailed; - case BuildResult::Status::LogLimitExceeded: - return proto::BuildStatus::LogLimitExceeded; - case BuildResult::Status::NotDeterministic: - return proto::BuildStatus::NotDeterministic; - } -} - -std::optional BuildResult::FromProto( - const nix::proto::BuildResult& resp) { - BuildResult result; - switch (resp.status()) { - case proto::BuildStatus::Built: - result.status = BuildResult::Status::Built; - break; - case proto::BuildStatus::Substituted: - result.status = BuildResult::Status::Substituted; - break; - case proto::BuildStatus::AlreadyValid: - result.status = BuildResult::Status::AlreadyValid; - break; - case proto::BuildStatus::PermanentFailure: - result.status = BuildResult::Status::PermanentFailure; - break; - case proto::BuildStatus::InputRejected: - result.status = BuildResult::Status::InputRejected; - break; - case proto::BuildStatus::OutputRejected: - result.status = BuildResult::Status::OutputRejected; - break; - case proto::BuildStatus::TransientFailure: - result.status = BuildResult::Status::TransientFailure; - break; - case proto::BuildStatus::CachedFailure: - result.status = BuildResult::Status::CachedFailure; - break; - case proto::BuildStatus::TimedOut: - result.status = BuildResult::Status::TimedOut; - break; - case proto::BuildStatus::MiscFailure: - result.status = BuildResult::Status::MiscFailure; - break; - case proto::BuildStatus::DependencyFailed: - result.status = BuildResult::Status::DependencyFailed; - break; - case proto::BuildStatus::LogLimitExceeded: - result.status = BuildResult::Status::LogLimitExceeded; - break; - case proto::BuildStatus::NotDeterministic: - result.status = BuildResult::Status::NotDeterministic; - break; - default: - return {}; - } - - result.errorMsg = resp.msg(); - return result; -} - -std::optional GCActionFromProto( - nix::proto::GCAction gc_action) { - switch (gc_action) { - case nix::proto::GCAction::ReturnLive: - return GCOptions::GCAction::gcReturnLive; - case nix::proto::GCAction::ReturnDead: - return GCOptions::GCAction::gcReturnDead; - case nix::proto::GCAction::DeleteDead: - return GCOptions::GCAction::gcDeleteDead; - case nix::proto::GCAction::DeleteSpecific: - return GCOptions::GCAction::gcDeleteSpecific; - default: - return {}; - } -} - -[[nodiscard]] const proto::GCAction GCOptions::ActionToProto() const { - switch (action) { - case GCOptions::GCAction::gcReturnLive: - return nix::proto::GCAction::ReturnLive; - case GCOptions::GCAction::gcReturnDead: - return nix::proto::GCAction::ReturnDead; - case GCOptions::GCAction::gcDeleteDead: - return nix::proto::GCAction::DeleteDead; - case GCOptions::GCAction::gcDeleteSpecific: - return nix::proto::GCAction::DeleteSpecific; - } -} - -bool Store::isInStore(const Path& path) const { - return isInDir(path, storeDir); -} - -bool Store::isStorePath(const Path& path) const { - return isInStore(path) && - path.size() >= storeDir.size() + 1 + storePathHashLen && - path.find('/', storeDir.size() + 1) == Path::npos; -} - -void Store::assertStorePath(const Path& path) const { - if (!isStorePath(path)) { - throw Error(format("path '%1%' is not in the Nix store") % path); - } -} - -Path Store::toStorePath(const Path& path) const { - if (!isInStore(path)) { - throw Error(format("path '%1%' is not in the Nix store") % path); - } - Path::size_type slash = path.find('/', storeDir.size() + 1); - if (slash == Path::npos) { - return path; - } - return Path(path, 0, slash); -} - -Path Store::followLinksToStore(const Path& _path) const { - Path path = absPath(_path); - while (!isInStore(path)) { - if (!isLink(path)) { - break; - } - std::string target = readLink(path); - path = absPath(target, dirOf(path)); - } - if (!isInStore(path)) { - throw Error(format("path '%1%' is not in the Nix store") % path); - } - return path; -} - -Path Store::followLinksToStorePath(const Path& path) const { - return toStorePath(followLinksToStore(path)); -} - -std::string storePathToName(const Path& path) { - auto base = baseNameOf(path); - - // The base name of the store path must be `storePathHashLen` characters long, - // if it is not `storePathHashLen` long then the next character, following - // the hash part, MUST be a dash (`-`). - const bool hasLengthMismatch = base.size() != storePathHashLen; - const bool hasInvalidSuffix = - base.size() > storePathHashLen && base[storePathHashLen] != '-'; - if (hasLengthMismatch && hasInvalidSuffix) { - throw Error(format("path '%1%' is not a valid store path") % path); - } - - return base.size() == storePathHashLen - ? "" - : std::string(base, storePathHashLen + 1); -} - -std::string storePathToHash(const Path& path) { - auto base = baseNameOf(path); - assert(base.size() >= storePathHashLen); - return std::string(base, 0, storePathHashLen); -} - -void checkStoreName(const std::string& name) { - std::string validChars = "+-._?="; - - auto baseError = - format( - "The path name '%2%' is invalid: %3%. " - "Path names are alphanumeric and can include the symbols %1% " - "and must not begin with a period. " - "Note: If '%2%' is a source file and you cannot rename it on " - "disk, builtins.path { name = ... } can be used to give it an " - "alternative name.") % - validChars % name; - - /* Disallow names starting with a dot for possible security - reasons (e.g., "." and ".."). */ - if (std::string(name, 0, 1) == ".") { - throw Error(baseError % "it is illegal to start the name with a period"); - } - /* Disallow names longer than 211 characters. ext4’s max is 256, - but we need extra space for the hash and .chroot extensions. */ - if (name.length() > 211) { - throw Error(baseError % "name must be less than 212 characters"); - } - for (auto& i : name) { - if (!((i >= 'A' && i <= 'Z') || (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || validChars.find(i) != std::string::npos)) { - throw Error(baseError % (format("the '%1%' character is invalid") % i)); - } - } -} - -/* Store paths have the following form: - - /- - - where - - = the location of the Nix store, usually /nix/store - - = a human readable name for the path, typically obtained - from the name attribute of the derivation, or the name of the - source file from which the store path is created. For derivation - outputs other than the default "out" output, the string "-" - is suffixed to . - - = base-32 representation of the first 160 bits of a SHA-256 - hash of ; the hash part of the store name - - = the string ":sha256:

::"; - note that it includes the location of the store as well as the - name to make sure that changes to either of those are reflected - in the hash (e.g. you won't get /nix/store/-name1 and - /nix/store/-name2 with equal hash parts). - - = one of: - "text:::..." - for plain text files written to the store using - addTextToStore(); ... are the references of the - path. - "source" - for paths copied to the store using addToStore() when recursive - = true and hashAlgo = "sha256" - "output:" - for either the outputs created by derivations, OR paths copied - to the store using addToStore() with recursive != true or - hashAlgo != "sha256" (in that case "source" is used; it's - silly, but it's done that way for compatibility). is the - name of the output (usually, "out"). - -

= base-16 representation of a SHA-256 hash of: - if = "text:...": - the string written to the resulting store path - if = "source": - the serialisation of the path from which this store path is - copied, as returned by hashPath() - if = "output:": - for non-fixed derivation outputs: - the derivation (see hashDerivationModulo() in - primops.cc) - for paths copied by addToStore() or produced by fixed-output - derivations: - the string "fixed:out:::", where - = "r:" for recursive (path) hashes, or "" for flat - (file) hashes - = "md5", "sha1" or "sha256" - = base-16 representation of the path or flat hash of - the contents of the path (or expected contents of the - path for fixed-output derivations) - - It would have been nicer to handle fixed-output derivations under - "source", e.g. have something like "source:", but we're - stuck with this for now... - - The main reason for this way of computing names is to prevent name - collisions (for security). For instance, it shouldn't be feasible - to come up with a derivation whose output path collides with the - path for a copied source. The former would have a starting with - "output:out:", while the latter would have a starting with - "source:". -*/ - -Path Store::makeStorePath(const std::string& type, const Hash& hash, - const std::string& name) const { - /* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */ - std::string s = - type + ":" + hash.to_string(Base16) + ":" + storeDir + ":" + name; - - checkStoreName(name); - - return absl::StrCat(storeDir, "/", hashString(htSHA256, s).ToStorePathHash(), - "-", name); -} - -Path Store::makeOutputPath(const std::string& id, const Hash& hash, - const std::string& name) const { - return makeStorePath("output:" + id, hash, - name + (id == "out" ? "" : "-" + id)); -} - -Path Store::makeFixedOutputPath(bool recursive, const Hash& hash, - const std::string& name) const { - return hash.type == htSHA256 && recursive - ? makeStorePath("source", hash, name) - : makeStorePath( - "output:out", - hashString( - htSHA256, - absl::StrCat("fixed:out:", (recursive ? "r:" : ""), - hash.to_string(Base16), ":")), - name); -} - -Path Store::makeTextPath(const std::string& name, const Hash& hash, - const PathSet& references) const { - assert(hash.type == htSHA256); - /* Stuff the references (if any) into the type. This is a bit - hacky, but we can't put them in `s' since that would be - ambiguous. */ - std::string type = "text"; - for (auto& i : references) { - type += ":"; - type += i; - } - return makeStorePath(type, hash, name); -} - -std::pair Store::computeStorePathForPath(const std::string& name, - const Path& srcPath, - bool recursive, - HashType hashAlgo, - PathFilter& filter) const { - Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first - : hashFile(hashAlgo, srcPath); - Path dstPath = makeFixedOutputPath(recursive, h, name); - return std::pair(dstPath, h); -} - -Path Store::computeStorePathForText(const std::string& name, - const std::string& s, - const PathSet& references) const { - return makeTextPath(name, hashString(htSHA256, s), references); -} - -Store::Store(const Params& params) - : Config(params), - state(Sync{ - State{LRUCache>( - (size_t)pathInfoCacheSize)}}) {} - -std::string Store::getUri() { return ""; } - -bool Store::isValidPath(const Path& storePath) { - assertStorePath(storePath); - - auto hashPart = storePathToHash(storePath); - - { - auto state_(state.lock()); - auto res = state_->pathInfoCache.get(hashPart); - if (res) { - stats.narInfoReadAverted++; - return *res != nullptr; - } - } - - if (diskCache) { - auto res = diskCache->lookupNarInfo(getUri(), hashPart); - if (res.first != NarInfoDiskCache::oUnknown) { - stats.narInfoReadAverted++; - auto state_(state.lock()); - state_->pathInfoCache.upsert( - hashPart, - res.first == NarInfoDiskCache::oInvalid ? nullptr : res.second); - return res.first == NarInfoDiskCache::oValid; - } - } - - bool valid = isValidPathUncached(storePath); - - if (diskCache && !valid) { - // FIXME: handle valid = true case. - diskCache->upsertNarInfo(getUri(), hashPart, nullptr); - } - - return valid; -} - -/* Default implementation for stores that only implement - queryPathInfoUncached(). */ -bool Store::isValidPathUncached(const Path& path) { - try { - queryPathInfo(path); - return true; - } catch (InvalidPath&) { - return false; - } -} - -ref Store::queryPathInfo(const Path& storePath) { - std::promise> promise; - - queryPathInfo( - storePath, - Callback>([&](std::future> result) { - try { - promise.set_value(result.get()); - } catch (...) { - promise.set_exception(std::current_exception()); - } - })); - - return promise.get_future().get(); -} - -void Store::queryPathInfo(const Path& storePath, - Callback> callback) noexcept { - std::string hashPart; - - try { - assertStorePath(storePath); - - hashPart = storePathToHash(storePath); - - { - auto res = state.lock()->pathInfoCache.get(hashPart); - if (res) { - stats.narInfoReadAverted++; - if (!*res) { - throw InvalidPath(format("path '%s' is not valid") % storePath); - } - return callback(ref(*res)); - } - } - - if (diskCache) { - auto res = diskCache->lookupNarInfo(getUri(), hashPart); - if (res.first != NarInfoDiskCache::oUnknown) { - stats.narInfoReadAverted++; - { - auto state_(state.lock()); - state_->pathInfoCache.upsert( - hashPart, - res.first == NarInfoDiskCache::oInvalid ? nullptr : res.second); - if (res.first == NarInfoDiskCache::oInvalid || - (res.second->path != storePath && - !storePathToName(storePath).empty())) { - throw InvalidPath(format("path '%s' is not valid") % storePath); - } - } - return callback(ref(res.second)); - } - } - - } catch (...) { - return callback.rethrow(); - } - - auto callbackPtr = std::make_shared(std::move(callback)); - - queryPathInfoUncached( - storePath, - Callback>{ - [this, storePath, hashPart, - callbackPtr](std::future> fut) { - try { - auto info = fut.get(); - - if (diskCache) { - diskCache->upsertNarInfo(getUri(), hashPart, info); - } - - { - auto state_(state.lock()); - state_->pathInfoCache.upsert(hashPart, info); - } - - if (!info || (info->path != storePath && - !storePathToName(storePath).empty())) { - stats.narInfoMissing++; - throw InvalidPath("path '%s' is not valid", storePath); - } - - (*callbackPtr)(ref(info)); - } catch (...) { - callbackPtr->rethrow(); - } - }}); -} - -PathSet Store::queryValidPaths(const PathSet& paths, - SubstituteFlag maybeSubstitute) { - struct State { - size_t left; - PathSet valid; - std::exception_ptr exc; - }; - - Sync state_(State{paths.size(), PathSet()}); - - std::condition_variable wakeup; - ThreadPool pool; - - auto doQuery = [&](const Path& path) { - checkInterrupt(); - queryPathInfo(path, Callback>( - [path, &state_, - &wakeup](std::future> fut) { - auto state(state_.lock()); - try { - auto info = fut.get(); - state->valid.insert(path); - } catch (InvalidPath&) { - } catch (...) { - state->exc = std::current_exception(); - } - assert(state->left); - if (--state->left == 0u) { - wakeup.notify_one(); - } - })); - }; - - for (auto& path : paths) { - pool.enqueue(std::bind(doQuery, path)); - } - - pool.process(); - - while (true) { - auto state(state_.lock()); - if (state->left == 0u) { - if (state->exc) { - std::rethrow_exception(state->exc); - } - return state->valid; - } - state.wait(wakeup); - } -} - -/* Return a string accepted by decodeValidPathInfo() that - registers the specified paths as valid. Note: it's the - responsibility of the caller to provide a closure. */ -std::string Store::makeValidityRegistration(const PathSet& paths, - bool showDerivers, bool showHash) { - std::string s; - - for (auto& i : paths) { - s += i + "\n"; - - auto info = queryPathInfo(i); - - if (showHash) { - s += info->narHash.to_string(Base16, false) + "\n"; - s += (format("%1%\n") % info->narSize).str(); - } - - Path deriver = showDerivers ? info->deriver : ""; - s += deriver + "\n"; - - s += (format("%1%\n") % info->references.size()).str(); - - for (auto& j : info->references) { - s += j + "\n"; - } - } - - return s; -} - -void Store::pathInfoToJSON(JSONPlaceholder& jsonOut, const PathSet& storePaths, - bool includeImpureInfo, bool showClosureSize, - AllowInvalidFlag allowInvalid) { - auto jsonList = jsonOut.list(); - - for (auto storePath : storePaths) { - auto jsonPath = jsonList.object(); - jsonPath.attr("path", storePath); - - try { - auto info = queryPathInfo(storePath); - storePath = info->path; - - jsonPath.attr("narHash", info->narHash.to_string()) - .attr("narSize", info->narSize); - - { - auto jsonRefs = jsonPath.list("references"); - for (auto& ref : info->references) { - jsonRefs.elem(ref); - } - } - - if (!info->ca.empty()) { - jsonPath.attr("ca", info->ca); - } - - std::pair closureSizes; - - if (showClosureSize) { - closureSizes = getClosureSize(storePath); - jsonPath.attr("closureSize", closureSizes.first); - } - - if (includeImpureInfo) { - if (!info->deriver.empty()) { - jsonPath.attr("deriver", info->deriver); - } - - if (info->registrationTime != 0) { - jsonPath.attr("registrationTime", info->registrationTime); - } - - if (info->ultimate) { - jsonPath.attr("ultimate", info->ultimate); - } - - if (!info->sigs.empty()) { - auto jsonSigs = jsonPath.list("signatures"); - for (auto& sig : info->sigs) { - jsonSigs.elem(sig); - } - } - - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - - if (narInfo) { - if (!narInfo->url.empty()) { - jsonPath.attr("url", narInfo->url); - } - if (narInfo->fileHash) { - jsonPath.attr("downloadHash", narInfo->fileHash.to_string()); - } - if (narInfo->fileSize != 0u) { - jsonPath.attr("downloadSize", narInfo->fileSize); - } - if (showClosureSize) { - jsonPath.attr("closureDownloadSize", closureSizes.second); - } - } - } - - } catch (InvalidPath&) { - jsonPath.attr("valid", false); - } - } -} - -std::pair Store::getClosureSize(const Path& storePath) { - uint64_t totalNarSize = 0; - uint64_t totalDownloadSize = 0; - PathSet closure; - computeFSClosure(storePath, closure, false, false); - for (auto& p : closure) { - auto info = queryPathInfo(p); - totalNarSize += info->narSize; - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - if (narInfo) { - totalDownloadSize += narInfo->fileSize; - } - } - return {totalNarSize, totalDownloadSize}; -} - -const Store::Stats& Store::getStats() { - { - auto state_(state.lock()); - stats.pathInfoCacheSize = state_->pathInfoCache.size(); - } - return stats; -} - -absl::Status Store::buildPaths(std::ostream& /* log_sink */, - const PathSet& paths, BuildMode) { - for (auto& path : paths) { - if (isDerivation(path)) { - return absl::Status(absl::StatusCode::kUnimplemented, - "buildPaths is unsupported"); - } - } - - if (queryValidPaths(paths).size() != paths.size()) { - return absl::Status(absl::StatusCode::kUnimplemented, - "buildPaths is unsupported"); - } - - return absl::OkStatus(); -} - -void copyStorePath(ref srcStore, const ref& dstStore, - const Path& storePath, RepairFlag repair, - CheckSigsFlag checkSigs) { - auto srcUri = srcStore->getUri(); - auto dstUri = dstStore->getUri(); - - if (srcUri == "local" || srcUri == "daemon") { - LOG(INFO) << "copying path '" << storePath << "' to '" << dstUri << "'"; - } else { - if (dstUri == "local" || dstUri == "daemon") { - LOG(INFO) << "copying path '" << storePath << "' from '" << srcUri << "'"; - } else { - LOG(INFO) << "copying path '" << storePath << "' from '" << srcUri - << "' to '" << dstUri << "'"; - } - } - - auto info = srcStore->queryPathInfo(storePath); - - uint64_t total = 0; - - if (!info->narHash) { - StringSink sink; - srcStore->narFromPath({storePath}, sink); - auto info2 = make_ref(*info); - info2->narHash = hashString(htSHA256, *sink.s); - if (info->narSize == 0u) { - info2->narSize = sink.s->size(); - } - if (info->ultimate) { - info2->ultimate = false; - } - info = info2; - - StringSource source(*sink.s); - dstStore->addToStore(*info, source, repair, checkSigs); - return; - } - - if (info->ultimate) { - auto info2 = make_ref(*info); - info2->ultimate = false; - info = info2; - } - - auto source = sinkToSource( - [&](Sink& sink) { - LambdaSink wrapperSink([&](const unsigned char* data, size_t len) { - sink(data, len); - total += len; - }); - srcStore->narFromPath({storePath}, wrapperSink); - }, - [&]() { - throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", - storePath, srcStore->getUri()); - }); - - dstStore->addToStore(*info, *source, repair, checkSigs); -} - -void copyPaths(ref srcStore, ref dstStore, - const PathSet& storePaths, RepairFlag repair, - CheckSigsFlag checkSigs, SubstituteFlag substitute) { - PathSet valid = dstStore->queryValidPaths(storePaths, substitute); - - PathSet missing; - for (auto& path : storePaths) { - if (valid.count(path) == 0u) { - missing.insert(path); - } - } - - if (missing.empty()) { - return; - } - - LOG(INFO) << "copying " << missing.size() << " paths"; - - std::atomic nrDone{0}; - std::atomic nrFailed{0}; - std::atomic bytesExpected{0}; - std::atomic nrRunning{0}; - - ThreadPool pool; - - processGraph( - pool, PathSet(missing.begin(), missing.end()), - - [&](const Path& storePath) { - if (dstStore->isValidPath(storePath)) { - nrDone++; - return PathSet(); - } - - auto info = srcStore->queryPathInfo(storePath); - - bytesExpected += info->narSize; - - return info->references; - }, - - [&](const Path& storePath) { - checkInterrupt(); - - if (!dstStore->isValidPath(storePath)) { - MaintainCount mc(nrRunning); - try { - copyStorePath(srcStore, dstStore, storePath, repair, checkSigs); - } catch (Error& e) { - nrFailed++; - if (!settings.keepGoing) { - throw e; - } - LOG(ERROR) << "could not copy " << storePath << ": " << e.what(); - return; - } - } - - nrDone++; - }); -} - -void copyClosure(const ref& srcStore, const ref& dstStore, - const PathSet& storePaths, RepairFlag repair, - CheckSigsFlag checkSigs, SubstituteFlag substitute) { - PathSet closure; - srcStore->computeFSClosure({storePaths}, closure); - copyPaths(srcStore, dstStore, closure, repair, checkSigs, substitute); -} - -ValidPathInfo decodeValidPathInfo(std::istream& str, bool hashGiven) { - ValidPathInfo info; - getline(str, info.path); - if (str.eof()) { - info.path = ""; - return info; - } - if (hashGiven) { - std::string s; - getline(str, s); - auto hash_ = Hash::deserialize(s, htSHA256); - info.narHash = Hash::unwrap_throw(hash_); - getline(str, s); - if (!absl::SimpleAtoi(s, &info.narSize)) { - throw Error("number expected"); - } - } - getline(str, info.deriver); - std::string s; - int n; - getline(str, s); - if (!absl::SimpleAtoi(s, &n)) { - throw Error("number expected"); - } - while ((n--) != 0) { - getline(str, s); - info.references.insert(s); - } - if (!str || str.eof()) { - throw Error("missing input"); - } - return info; -} - -std::string showPaths(const PathSet& paths) { - std::string s; - for (auto& i : paths) { - if (!s.empty()) { - s += ", "; - } - s += "'" + i + "'"; - } - return s; -} - -std::string ValidPathInfo::fingerprint() const { - if (narSize == 0 || !narHash) { - throw Error(format("cannot calculate fingerprint of path '%s' because its " - "size/hash is not known") % - path); - } - return "1;" + path + ";" + narHash.to_string(Base32) + ";" + - std::to_string(narSize) + ";" + concatStringsSep(",", references); -} - -void ValidPathInfo::sign(const SecretKey& secretKey) { - sigs.insert(secretKey.signDetached(fingerprint())); -} - -bool ValidPathInfo::isContentAddressed(const Store& store) const { - auto warn = [&]() { - LOG(ERROR) << "warning: path '" << path - << "' claims to be content-addressed but isn't"; - }; - - if (absl::StartsWith(ca, "text:")) { - auto hash_ = Hash::deserialize(std::string_view(ca).substr(5)); - Hash hash = Hash::unwrap_throw(hash_); - if (store.makeTextPath(storePathToName(path), hash, references) == path) { - return true; - } - warn(); - - } - - else if (absl::StartsWith(ca, "fixed:")) { - bool recursive = ca.compare(6, 2, "r:") == 0; - auto hash_ = - Hash::deserialize(std::string_view(ca).substr(recursive ? 8 : 6)); - Hash hash = Hash::unwrap_throw(hash_); - if (references.empty() && - store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == - path) { - return true; - } - warn(); - } - - return false; -} - -size_t ValidPathInfo::checkSignatures(const Store& store, - const PublicKeys& publicKeys) const { - if (isContentAddressed(store)) { - return maxSigs; - } - - size_t good = 0; - for (auto& sig : sigs) { - if (checkSignature(publicKeys, sig)) { - good++; - } - } - return good; -} - -bool ValidPathInfo::checkSignature(const PublicKeys& publicKeys, - const std::string& sig) const { - return verifyDetached(fingerprint(), sig, publicKeys); -} - -Strings ValidPathInfo::shortRefs() const { - Strings refs; - for (auto& r : references) { - refs.push_back(baseNameOf(r)); - } - return refs; -} - -std::string makeFixedOutputCA(bool recursive, const Hash& hash) { - return "fixed:" + (recursive ? std::string("r:") : "") + hash.to_string(); -} - -void Store::addToStore(const ValidPathInfo& info, Source& narSource, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) { - addToStore(info, make_ref(narSource.drain()), repair, checkSigs, - std::move(accessor)); -} - -void Store::addToStore(const ValidPathInfo& info, const ref& nar, - RepairFlag repair, CheckSigsFlag checkSigs, - std::shared_ptr accessor) { - StringSource source(*nar); - addToStore(info, source, repair, checkSigs, std::move(accessor)); -} - -} // namespace nix - -#include "libstore/local-store.hh" -#include "libstore/remote-store.hh" - -namespace nix { - -RegisterStoreImplementation::Implementations* - RegisterStoreImplementation::implementations = nullptr; - -/* Split URI into protocol+hierarchy part and its parameter set. */ -std::pair splitUriAndParams( - const std::string& uri_) { - auto uri(uri_); - Store::Params params; - auto q = uri.find('?'); - if (q != std::string::npos) { - Strings parts = - absl::StrSplit(uri.substr(q + 1), absl::ByChar('&'), absl::SkipEmpty()); - for (const auto& s : parts) { - auto e = s.find('='); - if (e != std::string::npos) { - auto value = s.substr(e + 1); - std::string decoded; - for (size_t i = 0; i < value.size();) { - if (value[i] == '%') { - if (i + 2 >= value.size()) { - throw Error("invalid URI parameter '%s'", value); - } - try { - decoded += std::stoul(std::string(value, i + 1, 2), nullptr, 16); - i += 3; - } catch (...) { - throw Error("invalid URI parameter '%s'", value); - } - } else { - decoded += value[i++]; - } - } - params[s.substr(0, e)] = decoded; - } - } - uri = uri_.substr(0, q); - } - return {uri, params}; -} - -ref openStore(const std::string& uri_, - const Store::Params& extraParams) { - auto [uri, uriParams] = splitUriAndParams(uri_); - auto params = extraParams; - params.insert(uriParams.begin(), uriParams.end()); - - for (const auto& fun : *RegisterStoreImplementation::implementations) { - auto store = fun(uri, params); - if (store) { - store->warnUnknownSettings(); - return ref(store); - } - } - - throw Error("don't know how to open Nix store '%s'", uri); -} - -StoreType getStoreType(const std::string& uri, const std::string& stateDir) { - if (uri == "daemon") { - return tDaemon; - } - if (uri == "local" || absl::StartsWith(uri, "/")) { - return tLocal; - } else if (uri.empty() || uri == "auto") { - if (access(stateDir.c_str(), R_OK | W_OK) == 0) { - return tLocal; - } - if (pathExists(settings.nixDaemonSocketFile)) { - return tDaemon; - } else { - return tLocal; - } - } else { - return tOther; - } -} - -static RegisterStoreImplementation regStore([](const std::string& uri, - const Store::Params& params) - -> std::shared_ptr { - switch (getStoreType(uri, get(params, "state", settings.nixStateDir))) { - case tDaemon: { - auto daemon_socket_uri = - absl::StrCat("unix://", settings.nixDaemonSocketFile); - auto channel = grpc::CreateChannel(daemon_socket_uri, - grpc::InsecureChannelCredentials()); - return std::shared_ptr(std::make_shared( - daemon_socket_uri, params, proto::WorkerService::NewStub(channel))); - } - case tLocal: { - Store::Params params2 = params; - if (absl::StartsWith(uri, "/")) { - params2["root"] = uri; - } - return std::shared_ptr(std::make_shared(params2)); - } - default: - return nullptr; - } -}); - -std::list> getDefaultSubstituters() { - static auto stores([]() { - std::list> stores; - - StringSet done; - - auto addStore = [&](const std::string& uri) { - if (done.count(uri) != 0u) { - return; - } - done.insert(uri); - try { - stores.push_back(openStore(uri)); - } catch (Error& e) { - LOG(WARNING) << e.what(); - } - }; - - for (const auto& uri : settings.substituters.get()) { - addStore(uri); - } - - for (const auto& uri : settings.extraSubstituters.get()) { - addStore(uri); - } - - stores.sort([](ref& a, ref& b) { - return a->getPriority() < b->getPriority(); - }); - - return stores; - }()); - - return stores; -} - -} // namespace nix diff --git a/third_party/nix/src/libstore/store-api.hh b/third_party/nix/src/libstore/store-api.hh deleted file mode 100644 index eb18511e60..0000000000 --- a/third_party/nix/src/libstore/store-api.hh +++ /dev/null @@ -1,816 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "libproto/worker.pb.h" -#include "libstore/crypto.hh" -#include "libstore/globals.hh" -#include "libutil/config.hh" -#include "libutil/hash.hh" -#include "libutil/lru-cache.hh" -#include "libutil/serialise.hh" -#include "libutil/sync.hh" - -namespace nix { - -// Create a no-op stream buffer used to discard build output in cases -// where we don't have a build log sink to thread through. -// -// TODO(tazjin): Get rid of this and do *something* with those logs. -std::ostream DiscardLogsSink(); - -MakeError(SubstError, Error); -MakeError(BuildError, Error); /* denotes a permanent build failure */ -MakeError(InvalidPath, Error); -MakeError(Unsupported, Error); -MakeError(SubstituteGone, Error); -MakeError(SubstituterDisabled, Error); - -struct BasicDerivation; -struct Derivation; -class FSAccessor; -class NarInfoDiskCache; -class Store; -class JSONPlaceholder; - -enum RepairFlag : bool { NoRepair = false, Repair = true }; -enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true }; -enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true }; -enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true }; - -/* Size of the hash part of store paths, in base-32 characters. */ -const size_t storePathHashLen = 32; // i.e. 160 bits - -/* Magic header of exportPath() output (obsolete). */ -const uint32_t exportMagic = 0x4558494e; - -using Roots = std::unordered_map>; - -struct GCOptions { - /* Garbage collector operation: - - - `gcReturnLive': return the set of paths reachable from - (i.e. in the closure of) the roots. - - - `gcReturnDead': return the set of paths not reachable from - the roots. - - - `gcDeleteDead': actually delete the latter set. - - - `gcDeleteSpecific': delete the paths listed in - `pathsToDelete', insofar as they are not reachable. - */ - using GCAction = enum { - gcReturnLive, - gcReturnDead, - gcDeleteDead, - gcDeleteSpecific, - }; - - GCAction action{gcDeleteDead}; - - /* If `ignoreLiveness' is set, then reachability from the roots is - ignored (dangerous!). However, the paths must still be - unreferenced *within* the store (i.e., there can be no other - store paths that depend on them). */ - bool ignoreLiveness{false}; - - /* For `gcDeleteSpecific', the paths to delete. */ - PathSet pathsToDelete; - - /* Stop after at least `maxFreed' bytes have been freed. */ - unsigned long long maxFreed{std::numeric_limits::max()}; - - [[nodiscard]] const proto::GCAction ActionToProto() const; -}; - -std::optional GCActionFromProto( - nix::proto::GCAction gc_action); - -struct GCResults { - /* Depending on the action, the GC roots, or the paths that would - be or have been deleted. */ - PathSet paths; - - /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the - number of bytes that would be or was freed. */ - unsigned long long bytesFreed = 0; -}; - -struct SubstitutablePathInfo { - Path deriver; - PathSet references; - unsigned long long downloadSize; /* 0 = unknown or inapplicable */ - unsigned long long narSize; /* 0 = unknown */ -}; - -using SubstitutablePathInfos = std::map; - -struct ValidPathInfo { - Path path; - Path deriver; - Hash narHash; - PathSet references; - time_t registrationTime = 0; - uint64_t narSize = 0; // 0 = unknown - uint64_t id; // internal use only - - /* Whether the path is ultimately trusted, that is, it's a - derivation output that was built locally. */ - bool ultimate = false; - - StringSet sigs; // note: not necessarily verified - - /* If non-empty, an assertion that the path is content-addressed, - i.e., that the store path is computed from a cryptographic hash - of the contents of the path, plus some other bits of data like - the "name" part of the path. Such a path doesn't need - signatures, since we don't have to trust anybody's claim that - the path is the output of a particular derivation. (In the - extensional store model, we have to trust that the *contents* - of an output path of a derivation were actually produced by - that derivation. In the intensional model, we have to trust - that a particular output path was produced by a derivation; the - path then implies the contents.) - - Ideally, the content-addressability assertion would just be a - Boolean, and the store path would be computed from - ‘storePathToName(path)’, ‘narHash’ and ‘references’. However, - 1) we've accumulated several types of content-addressed paths - over the years; and 2) fixed-output derivations support - multiple hash algorithms and serialisation methods (flat file - vs NAR). Thus, ‘ca’ has one of the following forms: - - * ‘text:sha256:’: For paths - computed by makeTextPath() / addTextToStore(). - - * ‘fixed:::’: For paths computed by - makeFixedOutputPath() / addToStore(). - */ - std::string ca; - - bool operator==(const ValidPathInfo& i) const { - return path == i.path && narHash == i.narHash && references == i.references; - } - - /* Return a fingerprint of the store path to be used in binary - cache signatures. It contains the store path, the base-32 - SHA-256 hash of the NAR serialisation of the path, the size of - the NAR, and the sorted references. The size field is strictly - speaking superfluous, but might prevent endless/excessive data - attacks. */ - std::string fingerprint() const; - - void sign(const SecretKey& secretKey); - - /* Return true iff the path is verifiably content-addressed. */ - bool isContentAddressed(const Store& store) const; - - static const size_t maxSigs = std::numeric_limits::max(); - - /* Return the number of signatures on this .narinfo that were - produced by one of the specified keys, or maxSigs if the path - is content-addressed. */ - size_t checkSignatures(const Store& store, - const PublicKeys& publicKeys) const; - - /* Verify a single signature. */ - bool checkSignature(const PublicKeys& publicKeys, - const std::string& sig) const; - - Strings shortRefs() const; - - virtual ~ValidPathInfo() {} -}; - -using ValidPathInfos = std::list; - -enum BuildMode { bmNormal, bmRepair, bmCheck }; - -// Convert the proto version of a `nix::proto::BuildMode` to its corresponding -// nix `BuildMode` -std::optional BuildModeFrom(nix::proto::BuildMode mode); - -// Convert a `nix::BuildMode` to its corresponding proto representation -nix::proto::BuildMode BuildModeToProto(BuildMode mode); - -struct BuildResult { - /* Note: don't remove status codes, and only add new status codes - at the end of the list, to prevent client/server - incompatibilities in the nix-store --serve protocol. */ - enum Status { - Built = 0, - Substituted, - AlreadyValid, - PermanentFailure, - InputRejected, - OutputRejected, - TransientFailure, // possibly transient - CachedFailure, // no longer used - TimedOut, - MiscFailure, - DependencyFailed, - LogLimitExceeded, - NotDeterministic, - } status = MiscFailure; - std::string errorMsg; - - /* How many times this build was performed. */ - unsigned int timesBuilt = 0; - - /* If timesBuilt > 1, whether some builds did not produce the same - result. (Note that 'isNonDeterministic = false' does not mean - the build is deterministic, just that we don't have evidence of - non-determinism.) */ - bool isNonDeterministic = false; - - /* The start/stop times of the build (or one of the rounds, if it - was repeated). */ - time_t startTime = 0, stopTime = 0; - - bool success() { - return status == Built || status == Substituted || status == AlreadyValid; - } - - // Convert the status of this `BuildResult` to its corresponding - // `nix::proto::BuildStatus` - nix::proto::BuildStatus status_to_proto(); - - static std::optional FromProto( - const nix::proto::BuildResult& resp); -}; - -class Store : public std::enable_shared_from_this, public Config { - public: - using Params = std::map; - - const PathSetting storeDir_{this, false, settings.nixStore, "store", - "path to the Nix store"}; - const Path storeDir = storeDir_; - - const Setting pathInfoCacheSize{ - this, 65536, "path-info-cache-size", - "size of the in-memory store path information cache"}; - - const Setting isTrusted{ - this, false, "trusted", - "whether paths from this store can be used as substitutes even when they " - "lack trusted signatures"}; - - protected: - struct State { - LRUCache> pathInfoCache; - }; - - Sync state; - - std::shared_ptr diskCache; - - Store(const Params& params); - - public: - virtual ~Store() {} - - virtual std::string getUri() = 0; - - /* Return true if ‘path’ is in the Nix store (but not the Nix - store itself). */ - bool isInStore(const Path& path) const; - - /* Return true if ‘path’ is a store path, i.e. a direct child of - the Nix store. */ - bool isStorePath(const Path& path) const; - - /* Throw an exception if ‘path’ is not a store path. */ - void assertStorePath(const Path& path) const; - - /* Chop off the parts after the top-level store name, e.g., - /nix/store/abcd-foo/bar => /nix/store/abcd-foo. */ - Path toStorePath(const Path& path) const; - - /* Follow symlinks until we end up with a path in the Nix store. */ - Path followLinksToStore(const Path& path) const; - - /* Same as followLinksToStore(), but apply toStorePath() to the - result. */ - Path followLinksToStorePath(const Path& path) const; - - /* Constructs a unique store path name. */ - Path makeStorePath(const std::string& type, const Hash& hash, - const std::string& name) const; - - Path makeOutputPath(const std::string& id, const Hash& hash, - const std::string& name) const; - - Path makeFixedOutputPath(bool recursive, const Hash& hash, - const std::string& name) const; - - Path makeTextPath(const std::string& name, const Hash& hash, - const PathSet& references) const; - - /* This is the preparatory part of addToStore(); it computes the - store path to which srcPath is to be copied. Returns the store - path and the cryptographic hash of the contents of srcPath. */ - std::pair computeStorePathForPath( - const std::string& name, const Path& srcPath, bool recursive = true, - HashType hashAlgo = htSHA256, - PathFilter& filter = defaultPathFilter) const; - - /* Preparatory part of addTextToStore(). - - !!! Computation of the path should take the references given to - addTextToStore() into account, otherwise we have a (relatively - minor) security hole: a caller can register a source file with - bogus references. If there are too many references, the path may - not be garbage collected when it has to be (not really a problem, - the caller could create a root anyway), or it may be garbage - collected when it shouldn't be (more serious). - - Hashing the references would solve this (bogus references would - simply yield a different store path, so other users wouldn't be - affected), but it has some backwards compatibility issues (the - hashing scheme changes), so I'm not doing that for now. */ - Path computeStorePathForText(const std::string& name, const std::string& s, - const PathSet& references) const; - - /* Check whether a path is valid. */ - bool isValidPath(const Path& path); - - protected: - virtual bool isValidPathUncached(const Path& path); - - public: - /* Query which of the given paths is valid. Optionally, try to - substitute missing paths. */ - virtual PathSet queryValidPaths(const PathSet& paths, - SubstituteFlag maybeSubstitute); - - PathSet queryValidPaths(const PathSet& paths) { - return queryValidPaths(paths, NoSubstitute); - } - - /* Query the set of all valid paths. Note that for some store - backends, the name part of store paths may be omitted - (i.e. you'll get /nix/store/ rather than - /nix/store/-). Use queryPathInfo() to obtain the - full store path. */ - virtual PathSet queryAllValidPaths() { unsupported("queryAllValidPaths"); } - - /* Query information about a valid path. It is permitted to omit - the name part of the store path. */ - ref queryPathInfo(const Path& path); - - /* Asynchronous version of queryPathInfo(). */ - void queryPathInfo(const Path& path, - Callback> callback) noexcept; - - protected: - virtual void queryPathInfoUncached( - const Path& path, - Callback> callback) noexcept = 0; - - public: - /* Queries the set of incoming FS references for a store path. - The result is not cleared. */ - virtual void queryReferrers(const Path& path, PathSet& referrers) { - unsupported("queryReferrers"); - } - - /* Return all currently valid derivations that have `path' as an - output. (Note that the result of `queryDeriver()' is the - derivation that was actually used to produce `path', which may - not exist anymore.) */ - virtual PathSet queryValidDerivers(const Path& path) { return {}; }; - - /* Query the outputs of the derivation denoted by `path'. */ - virtual PathSet queryDerivationOutputs(const Path& path) { - unsupported("queryDerivationOutputs"); - } - - /* Query the output names of the derivation denoted by `path'. */ - virtual StringSet queryDerivationOutputNames(const Path& path) { - unsupported("queryDerivationOutputNames"); - } - - /* Query the full store path given the hash part of a valid store - path, or "" if the path doesn't exist. */ - virtual Path queryPathFromHashPart(const std::string& hashPart) = 0; - - /* Query which of the given paths have substitutes. */ - virtual PathSet querySubstitutablePaths(const PathSet& paths) { return {}; }; - - /* Query substitute info (i.e. references, derivers and download - sizes) of a set of paths. If a path does not have substitute - info, it's omitted from the resulting ‘infos’ map. */ - virtual void querySubstitutablePathInfos(const PathSet& paths, - SubstitutablePathInfos& infos) { - return; - }; - - virtual bool wantMassQuery() { return false; } - - /* Import a path into the store. */ - virtual void addToStore(const ValidPathInfo& info, Source& narSource, - RepairFlag repair = NoRepair, - CheckSigsFlag checkSigs = CheckSigs, - std::shared_ptr accessor = 0); - - // FIXME: remove - virtual void addToStore(const ValidPathInfo& info, - const ref& nar, - RepairFlag repair = NoRepair, - CheckSigsFlag checkSigs = CheckSigs, - std::shared_ptr accessor = 0); - - /* Copy the contents of a path to the store and register the - validity of the resulting path. The resulting path is returned. - The function object `filter' can be used to exclude files (see - libutil/archive.hh). If recursive is set to true, the path will be treated - as a directory (eg cp -r vs cp) */ - virtual Path addToStore(const std::string& name, const Path& srcPath, - bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter& filter = defaultPathFilter, - RepairFlag repair = NoRepair) = 0; - - /* Like addToStore, but the contents written to the output path is - a regular file containing the given string. */ - virtual Path addTextToStore(const std::string& name, const std::string& s, - const PathSet& references, - RepairFlag repair = NoRepair) = 0; - - /* Write a NAR dump of a store path. */ - virtual void narFromPath(const Path& path, Sink& sink) = 0; - - /* For each path, if it's a derivation, build it. Building a - derivation means ensuring that the output paths are valid. If - they are already valid, this is a no-op. Otherwise, validity - can be reached in two ways. First, if the output paths is - substitutable, then build the path that way. Second, the - output paths can be created by running the builder, after - recursively building any sub-derivations. For inputs that are - not derivations, substitute them. */ - [[nodiscard]] virtual absl::Status buildPaths(std::ostream& log_sink, - const PathSet& paths, - BuildMode build_mode); - - [[nodiscard]] absl::Status buildPaths(std::ostream& log_sink, - const PathSet& paths) { - return buildPaths(log_sink, paths, bmNormal); - } - - /* Build a single non-materialized derivation (i.e. not from an - on-disk .drv file). Note that ‘drvPath’ is only used for - informational purposes. */ - // TODO(tazjin): Thread std::ostream through here, too. - virtual BuildResult buildDerivation(std::ostream& log_sink, - const Path& drvPath, - const BasicDerivation& drv, - BuildMode buildMode) = 0; - - BuildResult buildDerivation(std::ostream& log_sink, const Path& drvPath, - const BasicDerivation& drv) { - return buildDerivation(log_sink, drvPath, drv, bmNormal); - } - - /* Ensure that a path is valid. If it is not currently valid, it - may be made valid by running a substitute (if defined for the - path). */ - virtual void ensurePath(const Path& path) = 0; - - /* Add a store path as a temporary root of the garbage collector. - The root disappears as soon as we exit. */ - virtual void addTempRoot(const Path& path) { unsupported("addTempRoot"); } - - /* Add an indirect root, which is merely a symlink to `path' from - /nix/var/nix/gcroots/auto/. `path' is supposed - to be a symlink to a store path. The garbage collector will - automatically remove the indirect root when it finds that - `path' has disappeared. */ - virtual void addIndirectRoot(const Path& path) { - unsupported("addIndirectRoot"); - } - - /* Acquire the global GC lock, then immediately release it. This - function must be called after registering a new permanent root, - but before exiting. Otherwise, it is possible that a running - garbage collector doesn't see the new root and deletes the - stuff we've just built. By acquiring the lock briefly, we - ensure that either: - - - The collector is already running, and so we block until the - collector is finished. The collector will know about our - *temporary* locks, which should include whatever it is we - want to register as a permanent lock. - - - The collector isn't running, or it's just started but hasn't - acquired the GC lock yet. In that case we get and release - the lock right away, then exit. The collector scans the - permanent root and sees our's. - - In either case the permanent root is seen by the collector. */ - virtual void syncWithGC(){}; - - /* Find the roots of the garbage collector. Each root is a pair - (link, storepath) where `link' is the path of the symlink - outside of the Nix store that point to `storePath'. If - 'censor' is true, privacy-sensitive information about roots - found in /proc is censored. */ - virtual Roots findRoots(bool censor) { unsupported("findRoots"); } - - /* Perform a garbage collection. */ - virtual void collectGarbage(const GCOptions& options, GCResults& results) { - unsupported("collectGarbage"); - } - - /* Return a string representing information about the path that - can be loaded into the database using `nix-store --load-db' or - `nix-store --register-validity'. */ - std::string makeValidityRegistration(const PathSet& paths, bool showDerivers, - bool showHash); - - /* Write a JSON representation of store path metadata, such as the - hash and the references. If ‘includeImpureInfo’ is true, - variable elements such as the registration time are - included. If ‘showClosureSize’ is true, the closure size of - each path is included. */ - void pathInfoToJSON(JSONPlaceholder& jsonOut, const PathSet& storePaths, - bool includeImpureInfo, bool showClosureSize, - AllowInvalidFlag allowInvalid = DisallowInvalid); - - /* Return the size of the closure of the specified path, that is, - the sum of the size of the NAR serialisation of each path in - the closure. */ - std::pair getClosureSize(const Path& storePath); - - /* Optimise the disk space usage of the Nix store by hard-linking files - with the same contents. */ - virtual void optimiseStore(){}; - - /* Check the integrity of the Nix store. Returns true if errors - remain. */ - virtual bool verifyStore(bool checkContents, RepairFlag repair = NoRepair) { - return false; - }; - - /* Return an object to access files in the Nix store. */ - virtual ref getFSAccessor() { unsupported("getFSAccessor"); } - - /* Add signatures to the specified store path. The signatures are - not verified. */ - virtual void addSignatures(const Path& storePath, const StringSet& sigs) { - unsupported("addSignatures"); - } - - /* Utility functions. */ - - /* Read a derivation, after ensuring its existence through - ensurePath(). */ - Derivation derivationFromPath(const Path& drvPath); - - /* Place in `out' the set of all store paths in the file system - closure of `storePath'; that is, all paths than can be directly - or indirectly reached from it. `out' is not cleared. If - `flipDirection' is true, the set of paths that can reach - `storePath' is returned; that is, the closures under the - `referrers' relation instead of the `references' relation is - returned. */ - virtual void computeFSClosure(const PathSet& paths, PathSet& paths_, - bool flipDirection = false, - bool includeOutputs = false, - bool includeDerivers = false); - - void computeFSClosure(const Path& path, PathSet& paths_, - bool flipDirection = false, bool includeOutputs = false, - bool includeDerivers = false); - - /* Given a set of paths that are to be built, return the set of - derivations that will be built, and the set of output paths - that will be substituted. */ - virtual void queryMissing(const PathSet& targets, PathSet& willBuild, - PathSet& willSubstitute, PathSet& unknown, - unsigned long long& downloadSize, - unsigned long long& narSize); - - /* Sort a set of paths topologically under the references - relation. If p refers to q, then p precedes q in this list. */ - Paths topoSortPaths(const PathSet& paths); - - /* Export multiple paths in the format expected by ‘nix-store - --import’. */ - void exportPaths(const Paths& paths, Sink& sink); - - void exportPath(const Path& path, Sink& sink); - - /* Import a sequence of NAR dumps created by exportPaths() into - the Nix store. Optionally, the contents of the NARs are - preloaded into the specified FS accessor to speed up subsequent - access. */ - Paths importPaths(Source& source, const std::shared_ptr& accessor, - CheckSigsFlag checkSigs = CheckSigs); - - struct Stats { - std::atomic narInfoRead{0}; - std::atomic narInfoReadAverted{0}; - std::atomic narInfoMissing{0}; - std::atomic narInfoWrite{0}; - std::atomic pathInfoCacheSize{0}; - std::atomic narRead{0}; - std::atomic narReadBytes{0}; - std::atomic narReadCompressedBytes{0}; - std::atomic narWrite{0}; - std::atomic narWriteAverted{0}; - std::atomic narWriteBytes{0}; - std::atomic narWriteCompressedBytes{0}; - std::atomic narWriteCompressionTimeMs{0}; - }; - - const Stats& getStats(); - - /* Return the build log of the specified store path, if available, - or null otherwise. */ - virtual std::shared_ptr getBuildLog(const Path& path) { - return nullptr; - } - - /* Hack to allow long-running processes like hydra-queue-runner to - occasionally flush their path info cache. */ - void clearPathInfoCache() { state.lock()->pathInfoCache.clear(); } - - /* Establish a connection to the store, for store types that have - a notion of connection. Otherwise this is a no-op. */ - virtual void connect(){}; - - /* Get the protocol version of this store or it's connection. */ - virtual unsigned int getProtocol() { return 0; }; - - /* Get the priority of the store, used to order substituters. In - particular, binary caches can specify a priority field in their - "nix-cache-info" file. Lower value means higher priority. */ - virtual int getPriority() { return 0; } - - virtual Path toRealPath(const Path& storePath) { return storePath; } - - virtual void createUser(const std::string& userName, uid_t userId) {} - - protected: - Stats stats; - - /* Unsupported methods. */ - [[noreturn]] void unsupported(const std::string& op) { - throw Unsupported("operation '%s' is not supported by store '%s'", op, - getUri()); - } -}; - -class LocalFSStore : public virtual Store { - public: - // FIXME: the (Store*) cast works around a bug in gcc that causes - // it to emit the call to the Option constructor. Clang works fine - // either way. - const PathSetting rootDir{(Store*)this, true, "", "root", - "directory prefixed to all other paths"}; - const PathSetting stateDir{ - (Store*)this, false, - rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir, "state", - "directory where Nix will store state"}; - const PathSetting logDir{ - (Store*)this, false, - rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir, "log", - "directory where Nix will store state"}; - - const static std::string drvsLogDir; - - LocalFSStore(const Params& params); - - void narFromPath(const Path& path, Sink& sink) override; - ref getFSAccessor() override; - - /* Register a permanent GC root. */ - Path addPermRoot(const Path& storePath, const Path& gcRoot, bool indirect, - bool allowOutsideRootsDir = false); - - virtual Path getRealStoreDir() { return storeDir; } - - Path toRealPath(const Path& storePath) override { - assert(isInStore(storePath)); - return getRealStoreDir() + "/" + - std::string(storePath, storeDir.size() + 1); - } - - std::shared_ptr getBuildLog(const Path& path) override; -}; - -/* Extract the name part of the given store path. */ -std::string storePathToName(const Path& path); - -/* Extract the hash part of the given store path. */ -std::string storePathToHash(const Path& path); - -/* Check whether ‘name’ is a valid store path name part, i.e. contains - only the characters [a-zA-Z0-9\+\-\.\_\?\=] and doesn't start with - a dot. */ -void checkStoreName(const std::string& name); - -/* Copy a path from one store to another. */ -void copyStorePath(ref srcStore, const ref& dstStore, - const Path& storePath, RepairFlag repair = NoRepair, - CheckSigsFlag checkSigs = CheckSigs); - -/* Copy store paths from one store to another. The paths may be copied - in parallel. They are copied in a topologically sorted order - (i.e. if A is a reference of B, then A is copied before B), but - the set of store paths is not automatically closed; use - copyClosure() for that. */ -void copyPaths(ref srcStore, ref dstStore, - const PathSet& storePaths, RepairFlag repair = NoRepair, - CheckSigsFlag checkSigs = CheckSigs, - SubstituteFlag substitute = NoSubstitute); - -/* Copy the closure of the specified paths from one store to another. */ -void copyClosure(const ref& srcStore, const ref& dstStore, - const PathSet& storePaths, RepairFlag repair = NoRepair, - CheckSigsFlag checkSigs = CheckSigs, - SubstituteFlag substitute = NoSubstitute); - -/* Remove the temporary roots file for this process. Any temporary - root becomes garbage after this point unless it has been registered - as a (permanent) root. */ -void removeTempRoots(); - -/* Return a Store object to access the Nix store denoted by - ‘uri’ (slight misnomer...). Supported values are: - - * ‘local’: The Nix store in /nix/store and database in - /nix/var/nix/db, accessed directly. - - * ‘daemon’: The Nix store accessed via a Unix domain socket - connection to nix-daemon. - - * ‘unix://’: The Nix store accessed via a Unix domain socket - connection to nix-daemon, with the socket located at . - - * ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on - whether the user has write access to the local Nix - store/database. - - * ‘file://’: A binary cache stored in . - - * ‘https://’: A binary cache accessed via HTTP. - - * ‘s3://’: A writable binary cache stored on Amazon's Simple - Storage Service. - - * ‘ssh://[user@]’: A remote Nix store accessed by running - ‘nix-store --serve’ via SSH. - - You can pass parameters to the store implementation by appending - ‘?key=value&key=value&...’ to the URI. -*/ -ref openStore(const std::string& uri = settings.storeUri.get(), - const Store::Params& extraParams = Store::Params()); - -enum StoreType { tDaemon, tLocal, tOther }; - -StoreType getStoreType(const std::string& uri = settings.storeUri.get(), - const std::string& stateDir = settings.nixStateDir); - -/* Return the default substituter stores, defined by the - ‘substituters’ option and various legacy options. */ -std::list> getDefaultSubstituters(); - -/* Store implementation registration. */ -using OpenStore = std::function(const std::string&, - const Store::Params&)>; - -struct RegisterStoreImplementation { - using Implementations = std::vector; - static Implementations* implementations; - - RegisterStoreImplementation(OpenStore fun) { - if (!implementations) { - implementations = new Implementations; - } - implementations->push_back(fun); - } -}; - -/* Display a set of paths in human-readable form (i.e., between quotes - and separated by commas). */ -std::string showPaths(const PathSet& paths); - -ValidPathInfo decodeValidPathInfo(std::istream& str, bool hashGiven = false); - -/* Compute the content-addressability assertion (ValidPathInfo::ca) - for paths created by makeFixedOutputPath() / addToStore(). */ -std::string makeFixedOutputCA(bool recursive, const Hash& hash); - -/* Split URI into protocol+hierarchy part and its parameter set. */ -std::pair splitUriAndParams(const std::string& uri); - -} // namespace nix diff --git a/third_party/nix/src/libstore/worker-protocol.hh b/third_party/nix/src/libstore/worker-protocol.hh deleted file mode 100644 index e2f40a449d..0000000000 --- a/third_party/nix/src/libstore/worker-protocol.hh +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include "libstore/store-api.hh" -#include "libutil/types.hh" - -namespace nix { - -#define WORKER_MAGIC_1 0x6e697863 -#define WORKER_MAGIC_2 0x6478696f - -#define PROTOCOL_VERSION 0x115 -#define GET_PROTOCOL_MAJOR(x) ((x)&0xff00) -#define GET_PROTOCOL_MINOR(x) ((x)&0x00ff) - -typedef enum { - wopIsValidPath = 1, - wopHasSubstitutes = 3, - wopQueryPathHash = 4, // obsolete - wopQueryReferences = 5, // obsolete - wopQueryReferrers = 6, - wopAddToStore = 7, - wopAddTextToStore = 8, - wopBuildPaths = 9, - wopEnsurePath = 10, - wopAddTempRoot = 11, - wopAddIndirectRoot = 12, - wopSyncWithGC = 13, - wopFindRoots = 14, - wopExportPath = 16, // obsolete - wopQueryDeriver = 18, // obsolete - wopSetOptions = 19, - wopCollectGarbage = 20, - wopQuerySubstitutablePathInfo = 21, - wopQueryDerivationOutputs = 22, - wopQueryAllValidPaths = 23, - wopQueryFailedPaths = 24, // obsolete - wopClearFailedPaths = 25, // obsolete - wopQueryPathInfo = 26, - wopImportPaths = 27, // obsolete - wopQueryDerivationOutputNames = 28, - wopQueryPathFromHashPart = 29, - wopQuerySubstitutablePathInfos = 30, - wopQueryValidPaths = 31, - wopQuerySubstitutablePaths = 32, - wopQueryValidDerivers = 33, - wopOptimiseStore = 34, - wopVerifyStore = 35, - wopBuildDerivation = 36, - wopAddSignatures = 37, - wopNarFromPath = 38, - wopAddToStoreNar = 39, - wopQueryMissing = 40, -} WorkerOp; - -#define STDERR_NEXT 0x6f6c6d67 -#define STDERR_READ 0x64617461 // data needed from source -#define STDERR_WRITE 0x64617416 // data for sink -#define STDERR_LAST 0x616c7473 -#define STDERR_ERROR 0x63787470 -#define STDERR_START_ACTIVITY 0x53545254 -#define STDERR_STOP_ACTIVITY 0x53544f50 -#define STDERR_RESULT 0x52534c54 - -Path readStorePath(Store& store, Source& from); -template -T readStorePaths(Store& store, Source& from); - -} // namespace nix diff --git a/third_party/nix/src/libutil/CMakeLists.txt b/third_party/nix/src/libutil/CMakeLists.txt deleted file mode 100644 index 0b36929218..0000000000 --- a/third_party/nix/src/libutil/CMakeLists.txt +++ /dev/null @@ -1,68 +0,0 @@ -# -*- mode: cmake; -*- -add_library(nixutil SHARED) -set_property(TARGET nixutil PROPERTY CXX_STANDARD 17) -include_directories(${PROJECT_BINARY_DIR}) # for config.h -target_compile_features(nixutil PUBLIC cxx_std_17) - -set(HEADER_FILES - affinity.hh - archive.hh - args.hh - compression.hh - config.hh - finally.hh - hash.hh - istringstream_nocopy.hh - json.hh - lazy.hh - lru-cache.hh - monitor-fd.hh - pool.hh - proto.hh - ref.hh - serialise.hh - status.hh - sync.hh - thread-pool.hh - types.hh - util.hh - visitor.hh - xml-writer.hh -) - -target_sources(nixutil - PUBLIC - ${HEADER_FILES} - - PRIVATE - affinity.cc - archive.cc - args.cc - compression.cc - config.cc - hash.cc - json.cc - serialise.cc - thread-pool.cc - util.cc - xml-writer.cc -) - -target_link_libraries(nixutil - nixproto - absl::strings - absl::statusor - glog - BZip2::BZip2 - LibLZMA::LibLZMA - Boost::context - brotlienc - brotlidec - ssl -) - -# Install header files to include/libutil and mark them for automatic -# inclusion in targets that link to this one. -target_include_directories(nixutil PUBLIC "${nix_SOURCE_DIR}/src") -INSTALL(FILES ${HEADER_FILES} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/nix/libutil) -INSTALL(TARGETS nixutil DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/third_party/nix/src/libutil/affinity.cc b/third_party/nix/src/libutil/affinity.cc deleted file mode 100644 index 03fbe12439..0000000000 --- a/third_party/nix/src/libutil/affinity.cc +++ /dev/null @@ -1,60 +0,0 @@ -#include "libutil/affinity.hh" - -#include - -#include "libutil/types.hh" -#include "libutil/util.hh" - -#if __linux__ -#include -#endif - -namespace nix { - -#if __linux__ -static bool didSaveAffinity = false; -static cpu_set_t savedAffinity; -#endif - -void setAffinityTo(int cpu) { -#if __linux__ - if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) { - return; - } - - didSaveAffinity = true; - DLOG(INFO) << "locking this thread to CPU " << cpu; - cpu_set_t newAffinity; - CPU_ZERO(&newAffinity); - CPU_SET(cpu, &newAffinity); - if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1) { - LOG(ERROR) << "failed to lock thread to CPU " << cpu; - } -#endif -} - -int lockToCurrentCPU() { -#if __linux__ - int cpu = sched_getcpu(); - if (cpu != -1) { - setAffinityTo(cpu); - } - return cpu; -#else - return -1; -#endif -} - -void restoreAffinity() { -#if __linux__ - if (!didSaveAffinity) { - return; - } - - if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) { - LOG(ERROR) << "failed to restore affinity"; - } -#endif -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/affinity.hh b/third_party/nix/src/libutil/affinity.hh deleted file mode 100644 index 5e5ef9b0de..0000000000 --- a/third_party/nix/src/libutil/affinity.hh +++ /dev/null @@ -1,9 +0,0 @@ -#pragma once - -namespace nix { - -void setAffinityTo(int cpu); -int lockToCurrentCPU(); -void restoreAffinity(); - -} // namespace nix diff --git a/third_party/nix/src/libutil/archive.cc b/third_party/nix/src/libutil/archive.cc deleted file mode 100644 index e470ad7be6..0000000000 --- a/third_party/nix/src/libutil/archive.cc +++ /dev/null @@ -1,398 +0,0 @@ -#include "libutil/archive.hh" - -#include -#include -#include -#include - -#include -#include -#include -#include // for strcasecmp -#include -#include -#include - -#include "libutil/config.hh" -#include "libutil/util.hh" - -namespace nix { - -struct ArchiveSettings : Config { - Setting useCaseHack { - this, -#if __APPLE__ - true, -#else - false, -#endif - "use-case-hack", - "Whether to enable a Darwin-specific hack for dealing with file name " - "collisions." - }; -}; - -static ArchiveSettings archiveSettings; - -static GlobalConfig::Register r1(&archiveSettings); - -constexpr std::string_view kCaseHackSuffix = "~nix~case~hack~"; - -PathFilter defaultPathFilter = [](const Path& /*unused*/) { return true; }; - -static void dumpContents(const Path& path, size_t size, Sink& sink) { - sink << "contents" << size; - - AutoCloseFD fd(open(path.c_str(), O_RDONLY | O_CLOEXEC)); - if (!fd) { - throw SysError(format("opening file '%1%'") % path); - } - - std::vector buf(65536); - size_t left = size; - - while (left > 0) { - auto n = std::min(left, buf.size()); - readFull(fd.get(), buf.data(), n); - left -= n; - sink(buf.data(), n); - } - - writePadding(size, sink); -} - -static void dump(const Path& path, Sink& sink, PathFilter& filter) { - checkInterrupt(); - - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting attributes of path '%1%'") % path); - } - - sink << "("; - - if (S_ISREG(st.st_mode)) { - sink << "type" - << "regular"; - if ((st.st_mode & S_IXUSR) != 0u) { - sink << "executable" - << ""; - } - dumpContents(path, static_cast(st.st_size), sink); - } - - else if (S_ISDIR(st.st_mode)) { - sink << "type" - << "directory"; - - /* If we're on a case-insensitive system like macOS, undo - the case hack applied by restorePath(). */ - std::map unhacked; - for (auto& i : readDirectory(path)) { - if (archiveSettings.useCaseHack) { - std::string name(i.name); - size_t pos = i.name.find(kCaseHackSuffix); - if (pos != std::string::npos) { - DLOG(INFO) << "removing case hack suffix from " << path << "/" - << i.name; - - name.erase(pos); - } - if (unhacked.find(name) != unhacked.end()) { - throw Error(format("file name collision in between '%1%' and '%2%'") % - (path + "/" + unhacked[name]) % (path + "/" + i.name)); - } - unhacked[name] = i.name; - } else { - unhacked[i.name] = i.name; - } - } - - for (auto& i : unhacked) { - if (filter(path + "/" + i.first)) { - sink << "entry" - << "(" - << "name" << i.first << "node"; - dump(path + "/" + i.second, sink, filter); - sink << ")"; - } - } - } - - else if (S_ISLNK(st.st_mode)) { - sink << "type" - << "symlink" - << "target" << readLink(path); - - } else { - throw Error(format("file '%1%' has an unsupported type") % path); - } - - sink << ")"; -} - -void dumpPath(const Path& path, Sink& sink, PathFilter& filter) { - sink << std::string(kNarVersionMagic1); - dump(path, sink, filter); -} - -void dumpString(const std::string& s, Sink& sink) { - sink << std::string(kNarVersionMagic1) << "(" - << "type" - << "regular" - << "contents" << s << ")"; -} - -static SerialisationError badArchive(const std::string& s) { - return SerialisationError("bad archive: " + s); -} - -#if 0 -static void skipGeneric(Source & source) -{ - if (readString(source) == "(") { - while (readString(source) != ")") - skipGeneric(source); - } -} -#endif - -static void parseContents(ParseSink& sink, Source& source, const Path& path) { - unsigned long long size = readLongLong(source); - - sink.preallocateContents(size); - - unsigned long long left = size; - std::vector buf(65536); - - while (left != 0u) { - checkInterrupt(); - auto n = buf.size(); - if (static_cast(n) > left) { - n = left; - } - source(buf.data(), n); - sink.receiveContents(buf.data(), n); - left -= n; - } - - readPadding(size, source); -} - -struct CaseInsensitiveCompare { - bool operator()(const std::string& a, const std::string& b) const { - return strcasecmp(a.c_str(), b.c_str()) < 0; - } -}; - -static void parse(ParseSink& sink, Source& source, const Path& path) { - std::string s; - - s = readString(source); - if (s != "(") { - throw badArchive("expected open tag"); - } - - enum { tpUnknown, tpRegular, tpDirectory, tpSymlink } type = tpUnknown; - - std::map names; - - while (true) { - checkInterrupt(); - - s = readString(source); - - if (s == ")") { - break; - } - - if (s == "type") { - if (type != tpUnknown) { - throw badArchive("multiple type fields"); - } - std::string t = readString(source); - - if (t == "regular") { - type = tpRegular; - sink.createRegularFile(path); - } - - else if (t == "directory") { - sink.createDirectory(path); - type = tpDirectory; - } - - else if (t == "symlink") { - type = tpSymlink; - } - - else { - throw badArchive("unknown file type " + t); - } - - } - - else if (s == "contents" && type == tpRegular) { - parseContents(sink, source, path); - } - - else if (s == "executable" && type == tpRegular) { - auto s = readString(source); - if (!s.empty()) { - throw badArchive("executable marker has non-empty value"); - } - sink.isExecutable(); - } - - else if (s == "entry" && type == tpDirectory) { - std::string name; - std::string prevName; - - s = readString(source); - if (s != "(") { - throw badArchive("expected open tag"); - } - - while (true) { - checkInterrupt(); - - s = readString(source); - - if (s == ")") { - break; - } - if (s == "name") { - name = readString(source); - if (name.empty() || name == "." || name == ".." || - name.find('/') != std::string::npos || - name.find(static_cast(0)) != std::string::npos) { - throw Error(format("NAR contains invalid file name '%1%'") % name); - } - if (name <= prevName) { - throw Error("NAR directory is not sorted"); - } - prevName = name; - if (archiveSettings.useCaseHack) { - auto i = names.find(name); - if (i != names.end()) { - DLOG(INFO) << "case collision between '" << i->first << "' and '" - << name << "'"; - name += kCaseHackSuffix; - name += std::to_string(++i->second); - } else { - names[name] = 0; - } - } - } else if (s == "node") { - if (s.empty()) { - throw badArchive("entry name missing"); - } - parse(sink, source, path + "/" + name); - } else { - throw badArchive("unknown field " + s); - } - } - } - - else if (s == "target" && type == tpSymlink) { - std::string target = readString(source); - sink.createSymlink(path, target); - } - - else { - throw badArchive("unknown field " + s); - } - } -} - -void parseDump(ParseSink& sink, Source& source) { - std::string version; - try { - version = readString(source, kNarVersionMagic1.size()); - } catch (SerialisationError& e) { - /* This generally means the integer at the start couldn't be - decoded. Ignore and throw the exception below. */ - } - if (version != kNarVersionMagic1) { - throw badArchive("input doesn't look like a Nix archive"); - } - parse(sink, source, ""); -} - -struct RestoreSink : ParseSink { - Path dstPath; - AutoCloseFD fd; - - void createDirectory(const Path& path) override { - Path p = dstPath + path; - if (mkdir(p.c_str(), 0777) == -1) { - throw SysError(format("creating directory '%1%'") % p); - } - }; - - void createRegularFile(const Path& path) override { - Path p = dstPath + path; - fd = AutoCloseFD( - open(p.c_str(), O_CREAT | O_EXCL | O_WRONLY | O_CLOEXEC, 0666)); - if (!fd) { - throw SysError(format("creating file '%1%'") % p); - } - } - - void isExecutable() override { - struct stat st; - if (fstat(fd.get(), &st) == -1) { - throw SysError("fstat"); - } - if (fchmod(fd.get(), st.st_mode | (S_IXUSR | S_IXGRP | S_IXOTH)) == -1) { - throw SysError("fchmod"); - } - } - - void preallocateContents(unsigned long long len) override { -#if HAVE_POSIX_FALLOCATE - if (len != 0u) { - errno = posix_fallocate(fd.get(), 0, len); - /* Note that EINVAL may indicate that the underlying - filesystem doesn't support preallocation (e.g. on - OpenSolaris). Since preallocation is just an - optimisation, ignore it. */ - if (errno && errno != EINVAL && errno != EOPNOTSUPP && errno != ENOSYS) { - throw SysError(format("preallocating file of %1% bytes") % len); - } - } -#endif - } - - void receiveContents(unsigned char* data, unsigned int len) override { - writeFull(fd.get(), data, len); - } - - void createSymlink(const Path& path, const std::string& target) override { - Path p = dstPath + path; - nix::createSymlink(target, p); - } -}; - -void restorePath(const Path& path, Source& source) { - RestoreSink sink; - sink.dstPath = path; - parseDump(sink, source); -} - -void copyNAR(Source& source, Sink& sink) { - // FIXME: if 'source' is the output of dumpPath() followed by EOF, - // we should just forward all data directly without parsing. - - ParseSink parseSink; /* null sink; just parse the NAR */ - - LambdaSource wrapper([&](unsigned char* data, size_t len) { - auto n = source.read(data, len); - sink(data, n); - return n; - }); - - parseDump(parseSink, wrapper); -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/archive.hh b/third_party/nix/src/libutil/archive.hh deleted file mode 100644 index 3966278785..0000000000 --- a/third_party/nix/src/libutil/archive.hh +++ /dev/null @@ -1,77 +0,0 @@ -#pragma once - -#include "libutil/serialise.hh" -#include "libutil/types.hh" - -namespace nix { - -/* dumpPath creates a Nix archive of the specified path. The format - is as follows: - - IF path points to a REGULAR FILE: - dump(path) = attrs( - [ ("type", "regular") - , ("contents", contents(path)) - ]) - - IF path points to a DIRECTORY: - dump(path) = attrs( - [ ("type", "directory") - , ("entries", concat(map(f, sort(entries(path))))) - ]) - where f(fn) = attrs( - [ ("name", fn) - , ("file", dump(path + "/" + fn)) - ]) - - where: - - attrs(as) = concat(map(attr, as)) + encN(0) - attrs((a, b)) = encS(a) + encS(b) - - encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary) - - encN(n) = 64-bit little-endian encoding of n. - - contents(path) = the contents of a regular file. - - sort(strings) = lexicographic sort by 8-bit value (strcmp). - - entries(path) = the entries of a directory, without `.' and - `..'. - - `+' denotes string concatenation. */ - -void dumpPath(const Path& path, Sink& sink, - PathFilter& filter = defaultPathFilter); - -void dumpString(const std::string& s, Sink& sink); - -/* FIXME: fix this API, it sucks. */ -struct ParseSink { - virtual void createDirectory(const Path& path){}; - - virtual void createRegularFile(const Path& path){}; - virtual void isExecutable(){}; - virtual void preallocateContents(unsigned long long size){}; - virtual void receiveContents(unsigned char* data, unsigned int len){}; - - virtual void createSymlink(const Path& path, const std::string& target){}; -}; - -struct TeeSink : ParseSink { - TeeSource source; - - explicit TeeSink(Source& source) : source(source) {} -}; - -void parseDump(ParseSink& sink, Source& source); - -void restorePath(const Path& path, Source& source); - -/* Read a NAR from 'source' and write it to 'sink'. */ -void copyNAR(Source& source, Sink& sink); - -constexpr std::string_view kNarVersionMagic1 = "nix-archive-1"; - -} // namespace nix diff --git a/third_party/nix/src/libutil/args.cc b/third_party/nix/src/libutil/args.cc deleted file mode 100644 index 2be8a1b0ce..0000000000 --- a/third_party/nix/src/libutil/args.cc +++ /dev/null @@ -1,219 +0,0 @@ -#include "libutil/args.hh" - -#include "libutil/hash.hh" - -namespace nix { - -Args::FlagMaker Args::mkFlag() { return FlagMaker(*this); } - -Args::FlagMaker::~FlagMaker() { - assert(!flag->longName.empty()); - args.longFlags[flag->longName] = flag; - if (flag->shortName != 0) { - args.shortFlags[flag->shortName] = flag; - } -} - -void Args::parseCmdline(const Strings& _cmdline) { - Strings pendingArgs; - bool dashDash = false; - - Strings cmdline(_cmdline); - - for (auto pos = cmdline.begin(); pos != cmdline.end();) { - auto arg = *pos; - - /* Expand compound dash options (i.e., `-qlf' -> `-q -l -f', - `-j3` -> `-j 3`). */ - if (!dashDash && arg.length() > 2 && arg[0] == '-' && arg[1] != '-' && - (isalpha(arg[1]) != 0)) { - *pos = std::string("-") + arg[1]; - auto next = pos; - ++next; - for (unsigned int j = 2; j < arg.length(); j++) { - if (isalpha(arg[j]) != 0) { - cmdline.insert(next, std::string("-") + arg[j]); - } else { - cmdline.insert(next, std::string(arg, j)); - break; - } - } - arg = *pos; - } - - if (!dashDash && arg == "--") { - dashDash = true; - ++pos; - } else if (!dashDash && std::string(arg, 0, 1) == "-") { - if (!processFlag(pos, cmdline.end())) { - throw UsageError(format("unrecognised flag '%1%'") % arg); - } - } else { - pendingArgs.push_back(*pos++); - if (processArgs(pendingArgs, false)) { - pendingArgs.clear(); - } - } - } - - processArgs(pendingArgs, true); -} - -void Args::printHelp(const std::string& programName, std::ostream& out) { - std::cout << "Usage: " << programName << " ..."; - for (auto& exp : expectedArgs) { - std::cout << renderLabels({exp.label}); - // FIXME: handle arity > 1 - if (exp.arity == 0) { - std::cout << "..."; - } - if (exp.optional) { - std::cout << "?"; - } - } - std::cout << "\n"; - - auto s = description(); - if (!s.empty()) { - std::cout << "\nSummary: " << s << ".\n"; - } - - if (!longFlags.empty() != 0u) { - std::cout << "\n"; - std::cout << "Flags:\n"; - printFlags(out); - } -} - -void Args::printFlags(std::ostream& out) { - Table2 table; - for (auto& flag : longFlags) { - if (hiddenCategories.count(flag.second->category) != 0u) { - continue; - } - table.push_back(std::make_pair( - (flag.second->shortName != 0 - ? std::string("-") + flag.second->shortName + ", " - : " ") + - "--" + flag.first + renderLabels(flag.second->labels), - flag.second->description)); - } - printTable(out, table); -} - -bool Args::processFlag(Strings::iterator& pos, Strings::iterator end) { - assert(pos != end); - - auto process = [&](const std::string& name, const Flag& flag) -> bool { - ++pos; - std::vector args; - for (size_t n = 0; n < flag.arity; ++n) { - if (pos == end) { - if (flag.arity == ArityAny) { - break; - } - throw UsageError(format("flag '%1%' requires %2% argument(s)") % name % - flag.arity); - } - args.push_back(*pos++); - } - flag.handler(std::move(args)); - return true; - }; - - if (std::string(*pos, 0, 2) == "--") { - auto i = longFlags.find(std::string(*pos, 2)); - if (i == longFlags.end()) { - return false; - } - return process("--" + i->first, *i->second); - } - - if (std::string(*pos, 0, 1) == "-" && pos->size() == 2) { - auto c = (*pos)[1]; - auto i = shortFlags.find(c); - if (i == shortFlags.end()) { - return false; - } - return process(std::string("-") + c, *i->second); - } - - return false; -} - -bool Args::processArgs(const Strings& args, bool finish) { - if (expectedArgs.empty()) { - if (!args.empty()) { - throw UsageError(format("unexpected argument '%1%'") % args.front()); - } - return true; - } - - auto& exp = expectedArgs.front(); - - bool res = false; - - if ((exp.arity == 0 && finish) || - (exp.arity > 0 && args.size() == exp.arity)) { - std::vector ss; - for (auto& s : args) { - ss.push_back(s); - } - exp.handler(std::move(ss)); - expectedArgs.pop_front(); - res = true; - } - - if (finish && !expectedArgs.empty() && !expectedArgs.front().optional) { - throw UsageError("more arguments are required"); - } - - return res; -} - -Args::FlagMaker& Args::FlagMaker::mkHashTypeFlag(HashType* ht) { - arity(1); - label("type"); - description("hash algorithm ('md5', 'sha1', 'sha256', or 'sha512')"); - handler([ht](const std::string& s) { - *ht = parseHashType(s); - if (*ht == htUnknown) { - throw UsageError("unknown hash type '%1%'", s); - } - }); - return *this; -} - -Strings argvToStrings(int argc, char** argv) { - Strings args; - argc--; - argv++; - while ((argc--) != 0) { - args.push_back(*argv++); - } - return args; -} - -std::string renderLabels(const Strings& labels) { - std::string res; - for (auto label : labels) { - for (auto& c : label) { - c = std::toupper(c); - } - res += " <" + label + ">"; - } - return res; -} - -void printTable(std::ostream& out, const Table2& table) { - size_t max = 0; - for (auto& row : table) { - max = std::max(max, row.first.size()); - } - for (auto& row : table) { - out << " " << row.first << std::string(max - row.first.size() + 2, ' ') - << row.second << "\n"; - } -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/args.hh b/third_party/nix/src/libutil/args.hh deleted file mode 100644 index bb1ef43912..0000000000 --- a/third_party/nix/src/libutil/args.hh +++ /dev/null @@ -1,221 +0,0 @@ -#pragma once - -#include -#include -#include - -#include - -#include "libutil/util.hh" - -namespace nix { - -MakeError(UsageError, Error); - -enum HashType : char; - -class Args { - public: - /* Parse the command line, throwing a UsageError if something goes - wrong. */ - void parseCmdline(const Strings& cmdline); - - virtual void printHelp(const std::string& programName, std::ostream& out); - - virtual std::string description() { return ""; } - - protected: - static const size_t ArityAny = std::numeric_limits::max(); - - /* Flags. */ - struct Flag { - typedef std::shared_ptr ptr; - std::string longName; - char shortName = 0; - std::string description; - Strings labels; - size_t arity = 0; - std::function)> handler; - std::string category; - }; - - std::map longFlags; - std::map shortFlags; - - virtual bool processFlag(Strings::iterator& pos, Strings::iterator end); - - virtual void printFlags(std::ostream& out); - - /* Positional arguments. */ - struct ExpectedArg { - std::string label; - size_t arity; // 0 = any - bool optional; - std::function)> handler; - }; - - std::list expectedArgs; - - virtual bool processArgs(const Strings& args, bool finish); - - std::set hiddenCategories; - - public: - class FlagMaker { - Args& args; - Flag::ptr flag; - friend class Args; - explicit FlagMaker(Args& args) - : args(args), flag(std::make_shared()){}; - - public: - ~FlagMaker(); - FlagMaker& longName(const std::string& s) { - flag->longName = s; - return *this; - }; - FlagMaker& shortName(char s) { - flag->shortName = s; - return *this; - }; - FlagMaker& description(const std::string& s) { - flag->description = s; - return *this; - }; - FlagMaker& label(const std::string& l) { - flag->arity = 1; - flag->labels = {l}; - return *this; - }; - FlagMaker& labels(const Strings& ls) { - flag->arity = ls.size(); - flag->labels = ls; - return *this; - }; - FlagMaker& arity(size_t arity) { - flag->arity = arity; - return *this; - }; - FlagMaker& handler(std::function)> handler) { - flag->handler = handler; - return *this; - }; - FlagMaker& handler(std::function handler) { - flag->handler = [handler](std::vector) { handler(); }; - return *this; - }; - FlagMaker& handler(std::function handler) { - flag->arity = 1; - flag->handler = [handler](std::vector ss) { - handler(std::move(ss[0])); - }; - return *this; - }; - FlagMaker& category(const std::string& s) { - flag->category = s; - return *this; - }; - - template - FlagMaker& dest(T* dest) { - flag->arity = 1; - flag->handler = [=](std::vector ss) { *dest = ss[0]; }; - return *this; - } - - template - FlagMaker& set(T* dest, const T& val) { - flag->arity = 0; - flag->handler = [=](std::vector ss) { *dest = val; }; - return *this; - } - - FlagMaker& mkHashTypeFlag(HashType* ht); - }; - - FlagMaker mkFlag(); - - /* Helper functions for constructing flags / positional - arguments. */ - - void mkFlag1(char shortName, const std::string& longName, - const std::string& label, const std::string& description, - std::function fun) { - mkFlag() - .shortName(shortName) - .longName(longName) - .labels({label}) - .description(description) - .arity(1) - .handler([=](std::vector ss) { fun(ss[0]); }); - } - - void mkFlag(char shortName, const std::string& name, - const std::string& description, bool* dest) { - mkFlag(shortName, name, description, dest, true); - } - - template - void mkFlag(char shortName, const std::string& longName, - const std::string& description, T* dest, const T& value) { - mkFlag() - .shortName(shortName) - .longName(longName) - .description(description) - .handler([=](std::vector ss) { *dest = value; }); - } - - template - void mkIntFlag(char shortName, const std::string& longName, - const std::string& description, I* dest) { - mkFlag(shortName, longName, description, [=](I n) { *dest = n; }); - } - - template - void mkFlag(char shortName, const std::string& longName, - const std::string& description, std::function fun) { - mkFlag() - .shortName(shortName) - .longName(longName) - .labels({"N"}) - .description(description) - .arity(1) - .handler([=](std::vector ss) { - I n; - if (!absl::SimpleAtoi(ss[0], &n)) { - throw UsageError("flag '--%s' requires a integer argument", - longName); - } - fun(n); - }); - } - - /* Expect a string argument. */ - void expectArg(const std::string& label, std::string* dest, - bool optional = false) { - expectedArgs.push_back( - ExpectedArg{label, 1, optional, - [=](std::vector ss) { *dest = ss[0]; }}); - } - - /* Expect 0 or more arguments. */ - void expectArgs(const std::string& label, std::vector* dest) { - expectedArgs.push_back(ExpectedArg{ - label, 0, false, - [=](std::vector ss) { *dest = std::move(ss); }}); - } - - friend class MultiCommand; -}; - -Strings argvToStrings(int argc, char** argv); - -/* Helper function for rendering argument labels. */ -std::string renderLabels(const Strings& labels); - -/* Helper function for printing 2-column tables. */ -using Table2 = std::vector >; - -void printTable(std::ostream& out, const Table2& table); - -} // namespace nix diff --git a/third_party/nix/src/libutil/compression.cc b/third_party/nix/src/libutil/compression.cc deleted file mode 100644 index d0895ca5fd..0000000000 --- a/third_party/nix/src/libutil/compression.cc +++ /dev/null @@ -1,400 +0,0 @@ -#include "libutil/compression.hh" - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "libutil/finally.hh" -#include "libutil/util.hh" - -namespace nix { - -// Don't feed brotli too much at once. -struct ChunkedCompressionSink : CompressionSink { - uint8_t outbuf[32 * 1024]; - - void write(const unsigned char* data, size_t len) override { - const size_t CHUNK_SIZE = sizeof(outbuf) << 2; - while (len != 0u) { - size_t n = std::min(CHUNK_SIZE, len); - writeInternal(data, n); - data += n; - len -= n; - } - } - - virtual void writeInternal(const unsigned char* data, size_t len) = 0; -}; - -struct NoneSink : CompressionSink { - Sink& nextSink; - explicit NoneSink(Sink& nextSink) : nextSink(nextSink) {} - void finish() override { flush(); } - void write(const unsigned char* data, size_t len) override { - nextSink(data, len); - } -}; - -struct XzDecompressionSink : CompressionSink { - Sink& nextSink; - uint8_t outbuf[BUFSIZ]; - lzma_stream strm = LZMA_STREAM_INIT; - bool finished = false; - - explicit XzDecompressionSink(Sink& nextSink) : nextSink(nextSink) { - lzma_ret ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED); - if (ret != LZMA_OK) { - throw CompressionError("unable to initialise lzma decoder"); - } - - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - - ~XzDecompressionSink() override { lzma_end(&strm); } - - void finish() override { - CompressionSink::flush(); - write(nullptr, 0); - } - - void write(const unsigned char* data, size_t len) override { - strm.next_in = data; - strm.avail_in = len; - - while (!finished && ((data == nullptr) || (strm.avail_in != 0u))) { - checkInterrupt(); - - lzma_ret ret = lzma_code(&strm, data != nullptr ? LZMA_RUN : LZMA_FINISH); - if (ret != LZMA_OK && ret != LZMA_STREAM_END) { - throw CompressionError("error %d while decompressing xz file", ret); - } - - finished = ret == LZMA_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink(outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - } - } -}; - -struct BzipDecompressionSink : ChunkedCompressionSink { - Sink& nextSink; - bz_stream strm; - bool finished = false; - - explicit BzipDecompressionSink(Sink& nextSink) : nextSink(nextSink) { - memset(&strm, 0, sizeof(strm)); - int ret = BZ2_bzDecompressInit(&strm, 0, 0); - if (ret != BZ_OK) { - throw CompressionError("unable to initialise bzip2 decoder"); - } - - strm.next_out = reinterpret_cast(outbuf); - strm.avail_out = sizeof(outbuf); - } - - ~BzipDecompressionSink() override { BZ2_bzDecompressEnd(&strm); } - - void finish() override { - flush(); - write(nullptr, 0); - } - - void writeInternal(const unsigned char* data, size_t len) override { - assert(len <= std::numeric_limits::max()); - - strm.next_in = (char*)data; - strm.avail_in = len; - - while (strm.avail_in != 0u) { - checkInterrupt(); - - int ret = BZ2_bzDecompress(&strm); - if (ret != BZ_OK && ret != BZ_STREAM_END) { - throw CompressionError("error while decompressing bzip2 file"); - } - - finished = ret == BZ_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink(outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = reinterpret_cast(outbuf); - strm.avail_out = sizeof(outbuf); - } - } - } -}; - -struct BrotliDecompressionSink : ChunkedCompressionSink { - Sink& nextSink; - BrotliDecoderState* state; - bool finished = false; - - explicit BrotliDecompressionSink(Sink& nextSink) : nextSink(nextSink) { - state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); - if (state == nullptr) { - throw CompressionError("unable to initialize brotli decoder"); - } - } - - ~BrotliDecompressionSink() override { BrotliDecoderDestroyInstance(state); } - - void finish() override { - flush(); - writeInternal(nullptr, 0); - } - - void writeInternal(const unsigned char* data, size_t len) override { - const uint8_t* next_in = data; - size_t avail_in = len; - uint8_t* next_out = outbuf; - size_t avail_out = sizeof(outbuf); - - while (!finished && ((data == nullptr) || (avail_in != 0u))) { - checkInterrupt(); - - if (BrotliDecoderDecompressStream(state, &avail_in, &next_in, &avail_out, - &next_out, nullptr) == 0u) { - throw CompressionError("error while decompressing brotli file"); - } - - if (avail_out < sizeof(outbuf) || avail_in == 0) { - nextSink(outbuf, sizeof(outbuf) - avail_out); - next_out = outbuf; - avail_out = sizeof(outbuf); - } - - finished = (BrotliDecoderIsFinished(state) != 0); - } - } -}; - -ref decompress(const std::string& method, const std::string& in) { - StringSink ssink; - auto sink = makeDecompressionSink(method, ssink); - (*sink)(in); - sink->finish(); - return ssink.s; -} - -ref makeDecompressionSink(const std::string& method, - Sink& nextSink) { - if (method == "none" || method.empty()) { - return make_ref(nextSink); - } - if (method == "xz") { - return make_ref(nextSink); - } else if (method == "bzip2") { - return make_ref(nextSink); - } else if (method == "br") { - return make_ref(nextSink); - } else { - throw UnknownCompressionMethod("unknown compression method '%s'", method); - } -} - -struct XzCompressionSink : CompressionSink { - Sink& nextSink; - uint8_t outbuf[BUFSIZ]; - lzma_stream strm = LZMA_STREAM_INIT; - bool finished = false; - - XzCompressionSink(Sink& nextSink, bool parallel) : nextSink(nextSink) { - lzma_ret ret; - bool done = false; - - if (parallel) { - lzma_mt mt_options = {}; - mt_options.flags = 0; - mt_options.timeout = 300; // Using the same setting as the xz cmd line - mt_options.preset = LZMA_PRESET_DEFAULT; - mt_options.filters = NULL; - mt_options.check = LZMA_CHECK_CRC64; - mt_options.threads = lzma_cputhreads(); - mt_options.block_size = 0; - if (mt_options.threads == 0) { - mt_options.threads = 1; - } - // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the - // number of threads. - ret = lzma_stream_encoder_mt(&strm, &mt_options); - done = true; - } - - if (!done) { - ret = lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64); - } - - if (ret != LZMA_OK) { - throw CompressionError("unable to initialise lzma encoder"); - } - - // FIXME: apply the x86 BCJ filter? - - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - - ~XzCompressionSink() override { lzma_end(&strm); } - - void finish() override { - CompressionSink::flush(); - write(nullptr, 0); - } - - void write(const unsigned char* data, size_t len) override { - strm.next_in = data; - strm.avail_in = len; - - while (!finished && ((data == nullptr) || (strm.avail_in != 0u))) { - checkInterrupt(); - - lzma_ret ret = lzma_code(&strm, data != nullptr ? LZMA_RUN : LZMA_FINISH); - if (ret != LZMA_OK && ret != LZMA_STREAM_END) { - throw CompressionError("error %d while compressing xz file", ret); - } - - finished = ret == LZMA_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink(outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - } - } -}; - -struct BzipCompressionSink : ChunkedCompressionSink { - Sink& nextSink; - bz_stream strm; - bool finished = false; - - explicit BzipCompressionSink(Sink& nextSink) : nextSink(nextSink) { - memset(&strm, 0, sizeof(strm)); - int ret = BZ2_bzCompressInit(&strm, 9, 0, 30); - if (ret != BZ_OK) { - throw CompressionError("unable to initialise bzip2 encoder"); - } - - strm.next_out = reinterpret_cast(outbuf); - strm.avail_out = sizeof(outbuf); - } - - ~BzipCompressionSink() override { BZ2_bzCompressEnd(&strm); } - - void finish() override { - flush(); - writeInternal(nullptr, 0); - } - - void writeInternal(const unsigned char* data, size_t len) override { - assert(len <= std::numeric_limits::max()); - - strm.next_in = (char*)data; - strm.avail_in = len; - - while (!finished && ((data == nullptr) || (strm.avail_in != 0u))) { - checkInterrupt(); - - int ret = BZ2_bzCompress(&strm, data != nullptr ? BZ_RUN : BZ_FINISH); - if (ret != BZ_RUN_OK && ret != BZ_FINISH_OK && ret != BZ_STREAM_END) { - throw CompressionError("error %d while compressing bzip2 file", ret); - } - - finished = ret == BZ_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink(outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = reinterpret_cast(outbuf); - strm.avail_out = sizeof(outbuf); - } - } - } -}; - -struct BrotliCompressionSink : ChunkedCompressionSink { - Sink& nextSink; - uint8_t outbuf[BUFSIZ]; - BrotliEncoderState* state; - bool finished = false; - - explicit BrotliCompressionSink(Sink& nextSink) : nextSink(nextSink) { - state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); - if (state == nullptr) { - throw CompressionError("unable to initialise brotli encoder"); - } - } - - ~BrotliCompressionSink() override { BrotliEncoderDestroyInstance(state); } - - void finish() override { - flush(); - writeInternal(nullptr, 0); - } - - void writeInternal(const unsigned char* data, size_t len) override { - const uint8_t* next_in = data; - size_t avail_in = len; - uint8_t* next_out = outbuf; - size_t avail_out = sizeof(outbuf); - - while (!finished && ((data == nullptr) || (avail_in != 0u))) { - checkInterrupt(); - - if (BrotliEncoderCompressStream(state, - data != nullptr ? BROTLI_OPERATION_PROCESS - : BROTLI_OPERATION_FINISH, - &avail_in, &next_in, &avail_out, - &next_out, nullptr) == 0) { - throw CompressionError("error while compressing brotli compression"); - } - - if (avail_out < sizeof(outbuf) || avail_in == 0) { - nextSink(outbuf, sizeof(outbuf) - avail_out); - next_out = outbuf; - avail_out = sizeof(outbuf); - } - - finished = (BrotliEncoderIsFinished(state) != 0); - } - } -}; - -ref makeCompressionSink(const std::string& method, - Sink& nextSink, const bool parallel) { - if (method == "none") { - return make_ref(nextSink); - } - if (method == "xz") { - return make_ref(nextSink, parallel); - } else if (method == "bzip2") { - return make_ref(nextSink); - } else if (method == "br") { - return make_ref(nextSink); - } else { - throw UnknownCompressionMethod(format("unknown compression method '%s'") % - method); - } -} - -ref compress(const std::string& method, const std::string& in, - const bool parallel) { - StringSink ssink; - auto sink = makeCompressionSink(method, ssink, parallel); - (*sink)(in); - sink->finish(); - return ssink.s; -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/compression.hh b/third_party/nix/src/libutil/compression.hh deleted file mode 100644 index 8ec340ab74..0000000000 --- a/third_party/nix/src/libutil/compression.hh +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include - -#include "libutil/ref.hh" -#include "libutil/serialise.hh" -#include "libutil/types.hh" - -namespace nix { - -struct CompressionSink : BufferedSink { - virtual void finish() = 0; -}; - -ref decompress(const std::string& method, const std::string& in); - -ref makeDecompressionSink(const std::string& method, - Sink& nextSink); - -ref compress(const std::string& method, const std::string& in, - const bool parallel = false); - -ref makeCompressionSink(const std::string& method, - Sink& nextSink, - const bool parallel = false); - -MakeError(UnknownCompressionMethod, Error); - -MakeError(CompressionError, Error); - -} // namespace nix diff --git a/third_party/nix/src/libutil/config.cc b/third_party/nix/src/libutil/config.cc deleted file mode 100644 index 7c6e7af487..0000000000 --- a/third_party/nix/src/libutil/config.cc +++ /dev/null @@ -1,370 +0,0 @@ -#include "libutil/config.hh" - -#include -#include -#include - -#include -#include -#include -#include - -#include "libutil/args.hh" -#include "libutil/json.hh" - -namespace nix { - -bool Config::set(const std::string& name, const std::string& value) { - auto i = _settings.find(name); - if (i == _settings.end()) { - return false; - } - i->second.setting->set(value); - i->second.setting->overriden = true; - return true; -} - -void Config::addSetting(AbstractSetting* setting) { - _settings.emplace(setting->name, Config::SettingData(false, setting)); - for (auto& alias : setting->aliases) { - _settings.emplace(alias, Config::SettingData(true, setting)); - } - - bool set = false; - - auto i = unknownSettings.find(setting->name); - if (i != unknownSettings.end()) { - setting->set(i->second); - setting->overriden = true; - unknownSettings.erase(i); - set = true; - } - - for (auto& alias : setting->aliases) { - auto i = unknownSettings.find(alias); - if (i != unknownSettings.end()) { - if (set) { - LOG(WARNING) << "setting '" << alias - << "' is set, but it's an alias of '" << setting->name - << "', which is also set"; - } - - else { - setting->set(i->second); - setting->overriden = true; - unknownSettings.erase(i); - set = true; - } - } - } -} - -void AbstractConfig::warnUnknownSettings() { - for (auto& s : unknownSettings) { - LOG(WARNING) << "unknown setting: " << s.first; - } -} - -void AbstractConfig::reapplyUnknownSettings() { - auto unknownSettings2 = std::move(unknownSettings); - for (auto& s : unknownSettings2) { - set(s.first, s.second); - } -} - -void Config::getSettings(std::map& res, - bool overridenOnly) { - for (auto& opt : _settings) { - if (!opt.second.isAlias && - (!overridenOnly || opt.second.setting->overriden)) { - res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), - opt.second.setting->description}); - } - } -} - -void AbstractConfig::applyConfigFile(const Path& path) { - try { - std::string contents = readFile(path); - - unsigned int pos = 0; - - while (pos < contents.size()) { - std::string line; - while (pos < contents.size() && contents[pos] != '\n') { - line += contents[pos++]; - } - pos++; - - std::string::size_type hash = line.find('#'); - if (hash != std::string::npos) { - line = std::string(line, 0, hash); - } - - // TODO(tazjin): absl::string_view after path functions are fixed. - std::vector tokens = absl::StrSplit( - line, absl::ByAnyChar(" \t\n\r"), absl::SkipWhitespace()); - if (tokens.empty()) { - continue; - } - - if (tokens.size() < 2) { - throw UsageError("illegal configuration line '%1%' in '%2%'", line, - path); - } - - auto include = false; - auto ignoreMissing = false; - if (tokens[0] == "include") { - include = true; - } else if (tokens[0] == "!include") { - include = true; - ignoreMissing = true; - } - - if (include) { - if (tokens.size() != 2) { - throw UsageError("illegal configuration line '%1%' in '%2%'", line, - path); - } - auto p = absPath(tokens[1], dirOf(path)); - if (pathExists(p)) { - applyConfigFile(p); - } else if (!ignoreMissing) { - throw Error("file '%1%' included from '%2%' not found", p, path); - } - continue; - } - - if (tokens[1] != "=") { - throw UsageError("illegal configuration line '%1%' in '%2%'", line, - path); - } - - std::string name = tokens[0]; - - auto i = tokens.begin(); - advance(i, 2); - - set(name, - concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow - }; - } catch (SysError&) { - } -} - -void Config::resetOverriden() { - for (auto& s : _settings) { - s.second.setting->overriden = false; - } -} - -void Config::toJSON(JSONObject& out) { - for (auto& s : _settings) { - if (!s.second.isAlias) { - JSONObject out2(out.object(s.first)); - out2.attr("description", s.second.setting->description); - JSONPlaceholder out3(out2.placeholder("value")); - s.second.setting->toJSON(out3); - } - } -} - -void Config::convertToArgs(Args& args, const std::string& category) { - for (auto& s : _settings) { - if (!s.second.isAlias) { - s.second.setting->convertToArg(args, category); - } - } -} - -AbstractSetting::AbstractSetting(std::string name, std::string description, - std::set aliases) - : name(std::move(name)), - description(std::move(description)), - aliases(std::move(aliases)) {} - -void AbstractSetting::toJSON(JSONPlaceholder& out) { out.write(to_string()); } - -void AbstractSetting::convertToArg(Args& args, const std::string& category) {} - -template -void BaseSetting::toJSON(JSONPlaceholder& out) { - out.write(value); -} - -template -void BaseSetting::convertToArg(Args& args, const std::string& category) { - args.mkFlag() - .longName(name) - .description(description) - .arity(1) - .handler([=](std::vector ss) { - overriden = true; - set(ss[0]); - }) - .category(category); -} - -template <> -void BaseSetting::set(const std::string& str) { - value = str; -} - -template <> -std::string BaseSetting::to_string() { - return value; -} - -template -void BaseSetting::set(const std::string& str) { - static_assert(std::is_integral::value, "Integer required."); - if (!absl::SimpleAtoi(str, &value)) { - throw UsageError("setting '%s' has invalid value '%s'", name, str); - } -} - -template -std::string BaseSetting::to_string() { - static_assert(std::is_integral::value, "Integer required."); - return std::to_string(value); -} - -template <> -void BaseSetting::set(const std::string& str) { - if (str == "true" || str == "yes" || str == "1") { - value = true; - } else if (str == "false" || str == "no" || str == "0") { - value = false; - } else { - throw UsageError("Boolean setting '%s' has invalid value '%s'", name, str); - } -} - -template <> -std::string BaseSetting::to_string() { - return value ? "true" : "false"; -} - -template <> -void BaseSetting::convertToArg(Args& args, const std::string& category) { - args.mkFlag() - .longName(name) - .description(description) - .handler([=](const std::vector& ss) { override(true); }) - .category(category); - args.mkFlag() - .longName("no-" + name) - .description(description) - .handler([=](const std::vector& ss) { override(false); }) - .category(category); -} - -template <> -void BaseSetting::set(const std::string& str) { - value = absl::StrSplit(str, absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); -} - -template <> -std::string BaseSetting::to_string() { - return concatStringsSep(" ", value); -} - -template <> -void BaseSetting::toJSON(JSONPlaceholder& out) { - JSONList list(out.list()); - for (auto& s : value) { - list.elem(s); - } -} - -template <> -void BaseSetting::set(const std::string& str) { - value = absl::StrSplit(str, absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); -} - -template <> -std::string BaseSetting::to_string() { - return concatStringsSep(" ", value); -} - -template <> -void BaseSetting::toJSON(JSONPlaceholder& out) { - JSONList list(out.list()); - for (auto& s : value) { - list.elem(s); - } -} - -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; -template class BaseSetting; - -void PathSetting::set(const std::string& str) { - if (str.empty()) { - if (allowEmpty) { - value = ""; - } else { - throw UsageError("setting '%s' cannot be empty", name); - } - } else { - value = canonPath(str); - } -} - -bool GlobalConfig::set(const std::string& name, const std::string& value) { - for (auto& config : *configRegistrations) { - if (config->set(name, value)) { - return true; - } - } - - unknownSettings.emplace(name, value); - - return false; -} - -void GlobalConfig::getSettings(std::map& res, - bool overridenOnly) { - for (auto& config : *configRegistrations) { - config->getSettings(res, overridenOnly); - } -} - -void GlobalConfig::resetOverriden() { - for (auto& config : *configRegistrations) { - config->resetOverriden(); - } -} - -void GlobalConfig::toJSON(JSONObject& out) { - for (auto& config : *configRegistrations) { - config->toJSON(out); - } -} - -void GlobalConfig::convertToArgs(Args& args, const std::string& category) { - for (auto& config : *configRegistrations) { - config->convertToArgs(args, category); - } -} - -GlobalConfig globalConfig; - -GlobalConfig::ConfigRegistrations* GlobalConfig::configRegistrations; - -GlobalConfig::Register::Register(Config* config) { - if (configRegistrations == nullptr) { - configRegistrations = new ConfigRegistrations; - } - configRegistrations->emplace_back(config); -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/config.hh b/third_party/nix/src/libutil/config.hh deleted file mode 100644 index 81b1c80e0e..0000000000 --- a/third_party/nix/src/libutil/config.hh +++ /dev/null @@ -1,228 +0,0 @@ -#include -#include - -#include "libutil/types.hh" - -#pragma once - -namespace nix { - -class Args; -class AbstractSetting; -class JSONPlaceholder; -class JSONObject; - -class AbstractConfig { - protected: - StringMap unknownSettings; - - explicit AbstractConfig(const StringMap& initials = {}) - : unknownSettings(initials) {} - - public: - virtual bool set(const std::string& name, const std::string& value) = 0; - - struct SettingInfo { - std::string value; - std::string description; - }; - - virtual void getSettings(std::map& res, - bool overridenOnly = false) = 0; - - void applyConfigFile(const Path& path); - - virtual void resetOverriden() = 0; - - virtual void toJSON(JSONObject& out) = 0; - - virtual void convertToArgs(Args& args, const std::string& category) = 0; - - void warnUnknownSettings(); - - void reapplyUnknownSettings(); -}; - -/* A class to simplify providing configuration settings. The typical - use is to inherit Config and add Setting members: - - class MyClass : private Config - { - Setting foo{this, 123, "foo", "the number of foos to use"}; - Setting bar{this, "blabla", "bar", "the name of the bar"}; - - MyClass() : Config(readConfigFile("/etc/my-app.conf")) - { - std::cout << foo << "\n"; // will print 123 unless overriden - } - }; -*/ - -class Config : public AbstractConfig { - friend class AbstractSetting; - - public: - struct SettingData { - bool isAlias; - AbstractSetting* setting; - SettingData(bool isAlias, AbstractSetting* setting) - : isAlias(isAlias), setting(setting) {} - }; - - typedef std::map Settings; - - private: - Settings _settings; - - public: - explicit Config(const StringMap& initials = {}) : AbstractConfig(initials) {} - - bool set(const std::string& name, const std::string& value) override; - - void addSetting(AbstractSetting* setting); - - void getSettings(std::map& res, - bool overridenOnly = false) override; - - void resetOverriden() override; - - void toJSON(JSONObject& out) override; - - void convertToArgs(Args& args, const std::string& category) override; -}; - -class AbstractSetting { - friend class Config; - - public: - const std::string name; - const std::string description; - const std::set aliases; - - int created = 123; - - bool overriden = false; - - protected: - AbstractSetting(std::string name, std::string description, - std::set aliases); - - virtual ~AbstractSetting() { - // Check against a gcc miscompilation causing our constructor - // not to run (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80431). - assert(created == 123); - } - - virtual void set(const std::string& value) = 0; - - virtual std::string to_string() = 0; - - virtual void toJSON(JSONPlaceholder& out); - - virtual void convertToArg(Args& args, const std::string& category); - - bool isOverriden() { return overriden; } -}; - -/* A setting of type T. */ -template -class BaseSetting : public AbstractSetting { - protected: - T value; - - public: - BaseSetting(const T& def, const std::string& name, - const std::string& description, - const std::set& aliases = {}) - : AbstractSetting(name, description, aliases), value(def) {} - - operator const T&() const { return value; } - operator T&() { return value; } - const T& get() const { return value; } - bool operator==(const T& v2) const { return value == v2; } - bool operator!=(const T& v2) const { return value != v2; } - void operator=(const T& v) { assign(v); } - virtual void assign(const T& v) { value = v; } - - void set(const std::string& str) override; - - virtual void override(const T& v) { - overriden = true; - value = v; - } - - std::string to_string() override; - - void convertToArg(Args& args, const std::string& category) override; - - void toJSON(JSONPlaceholder& out) override; -}; - -template -std::ostream& operator<<(std::ostream& str, const BaseSetting& opt) { - str << (const T&)opt; - return str; -} - -template -bool operator==(const T& v1, const BaseSetting& v2) { - return v1 == (const T&)v2; -} - -template -class Setting : public BaseSetting { - public: - Setting(Config* options, const T& def, const std::string& name, - const std::string& description, - const std::set& aliases = {}) - : BaseSetting(def, name, description, aliases) { - options->addSetting(this); - } - - void operator=(const T& v) { this->assign(v); } -}; - -/* A special setting for Paths. These are automatically canonicalised - (e.g. "/foo//bar/" becomes "/foo/bar"). */ -class PathSetting : public BaseSetting { - bool allowEmpty; - - public: - PathSetting(Config* options, bool allowEmpty, const Path& def, - const std::string& name, const std::string& description, - const std::set& aliases = {}) - : BaseSetting(def, name, description, aliases), - allowEmpty(allowEmpty) { - options->addSetting(this); - } - - void set(const std::string& str) override; - - Path operator+(const char* p) const { return value + p; } - - void operator=(const Path& v) { this->assign(v); } -}; - -struct GlobalConfig : public AbstractConfig { - using ConfigRegistrations = std::vector; - static ConfigRegistrations* configRegistrations; - - bool set(const std::string& name, const std::string& value) override; - - void getSettings(std::map& res, - bool overridenOnly = false) override; - - void resetOverriden() override; - - void toJSON(JSONObject& out) override; - - void convertToArgs(Args& args, const std::string& category) override; - - struct Register { - explicit Register(Config* config); - }; -}; - -extern GlobalConfig globalConfig; - -} // namespace nix diff --git a/third_party/nix/src/libutil/finally.hh b/third_party/nix/src/libutil/finally.hh deleted file mode 100644 index 2ead8661a6..0000000000 --- a/third_party/nix/src/libutil/finally.hh +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -#include - -/* A trivial class to run a function at the end of a scope. */ -class Finally { - private: - std::function fun; - - public: - explicit Finally(std::function fun) : fun(fun) {} - ~Finally() { fun(); } -}; diff --git a/third_party/nix/src/libutil/hash.cc b/third_party/nix/src/libutil/hash.cc deleted file mode 100644 index ba61254392..0000000000 --- a/third_party/nix/src/libutil/hash.cc +++ /dev/null @@ -1,484 +0,0 @@ -#include "libutil/hash.hh" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libutil/archive.hh" -#include "libutil/istringstream_nocopy.hh" -#include "libutil/util.hh" - -namespace nix { - -std::optional hash_type_from(nix::proto::HashType hash_type) { - switch (hash_type) { - case nix::proto::HashType::UNKNOWN: - return HashType::htUnknown; - case nix::proto::HashType::MD5: - return HashType::htMD5; - case nix::proto::HashType::SHA1: - return HashType::htSHA1; - case nix::proto::HashType::SHA256: - return HashType::htSHA256; - case nix::proto::HashType::SHA512: - return HashType::htSHA512; - default: - return {}; - } -} - -nix::proto::HashType HashTypeToProto(HashType hash_type) { - switch (hash_type) { - case HashType::htMD5: - return nix::proto::HashType::MD5; - case HashType::htSHA1: - return nix::proto::HashType::SHA1; - case HashType::htSHA256: - return nix::proto::HashType::SHA256; - case HashType::htSHA512: - return nix::proto::HashType::SHA512; - default: - return nix::proto::HashType::UNKNOWN; - } -} - -void Hash::init() { - if (type == htMD5) { - hashSize = md5HashSize; - } else if (type == htSHA1) { - hashSize = sha1HashSize; - } else if (type == htSHA256) { - hashSize = sha256HashSize; - } else if (type == htSHA512) { - hashSize = sha512HashSize; - } else { - abort(); - } - assert(hashSize <= maxHashSize); - memset(hash, 0, maxHashSize); -} - -bool Hash::operator==(const Hash& h2) const { - if (hashSize != h2.hashSize) { - return false; - } - for (unsigned int i = 0; i < hashSize; i++) { - if (hash[i] != h2.hash[i]) { - return false; - } - } - return true; -} - -bool Hash::operator!=(const Hash& h2) const { return !(*this == h2); } - -bool Hash::operator<(const Hash& h) const { - if (hashSize < h.hashSize) { - return true; - } - if (hashSize > h.hashSize) { - return false; - } - for (unsigned int i = 0; i < hashSize; i++) { - if (hash[i] < h.hash[i]) { - return true; - } - if (hash[i] > h.hash[i]) { - return false; - } - } - return false; -} - -const std::string base16Chars = "0123456789abcdef"; - -static std::string printHash16(const Hash& hash) { - char buf[hash.hashSize * 2]; - for (unsigned int i = 0; i < hash.hashSize; i++) { - buf[i * 2] = base16Chars[hash.hash[i] >> 4]; - buf[i * 2 + 1] = base16Chars[hash.hash[i] & 0x0f]; - } - return std::string(buf, hash.hashSize * 2); -} - -bool Hash::IsValidBase16(absl::string_view s) { - for (char c : s) { - if ('0' <= c && c <= '9') { - continue; - } - if ('a' <= c && c <= 'f') { - continue; - } - if ('A' <= c && c <= 'F') { - continue; - } - return false; - } - return true; -} - -constexpr signed char kUnBase32[] = { - -1, -1, -1, -1, -1, -1, -1, -1, /* unprintables */ - -1, -1, -1, -1, -1, -1, -1, -1, /* unprintables */ - -1, -1, -1, -1, -1, -1, -1, -1, /* unprintables */ - -1, -1, -1, -1, -1, -1, -1, -1, /* unprintables */ - -1, -1, -1, -1, -1, -1, -1, -1, /* SP..' */ - -1, -1, -1, -1, -1, -1, -1, -1, /* (../ */ - 0, 1, 2, 3, 4, 5, 6, 7, /* 0..7 */ - 8, 9, -1, -1, -1, -1, -1, -1, /* 8..? */ - -1, -1, -1, -1, -1, -1, -1, -1, /* @..G */ - -1, -1, -1, -1, -1, -1, -1, -1, /* H..O */ - -1, -1, -1, -1, -1, -1, -1, -1, /* P..W */ - -1, -1, -1, -1, -1, -1, -1, -1, /* X.._ */ - -1, 10, 11, 12, 13, -1, 14, 15, /* `..g */ - 16, 17, 18, 19, 20, 21, 22, -1, /* h..o */ - 23, 24, 25, 26, -1, -1, 27, 28, /* p..w */ - 29, 30, 31, -1, -1, -1, -1, -1, /* x..DEL */ - - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* high */ -}; - -bool Hash::IsValidBase32(absl::string_view s) { - static_assert(sizeof(kUnBase32) == 256); - - for (char c : s) { - if (kUnBase32[static_cast(c)] == -1) { - return false; - } - } - return true; -} - -std::string Hash::ToStorePathHash() const { - return compressHash(*this, kStorePathHashSize).to_string(Base32, false); -} - -static std::string printHash32(const Hash& hash) { - assert(hash.hashSize); - size_t len = hash.base32Len(); - assert(len); - - std::string s; - s.reserve(len); - - for (int n = static_cast(len) - 1; n >= 0; n--) { - unsigned int b = n * 5; - unsigned int i = b / 8; - unsigned int j = b % 8; - unsigned char c = - (hash.hash[i] >> j) | - (i >= hash.hashSize - 1 ? 0 : hash.hash[i + 1] << (8 - j)); - s.push_back(base32Chars[c & 0x1f]); - } - - return s; -} - -std::string printHash16or32(const Hash& hash) { - return hash.to_string(hash.type == htMD5 ? Base16 : Base32, false); -} - -std::string Hash::to_string(Base base, bool includeType) const { - std::string s; - if (base == SRI || includeType) { - s += printHashType(type); - s += base == SRI ? '-' : ':'; - } - switch (base) { - case Base16: - s += printHash16(*this); - break; - case Base32: - s += printHash32(*this); - break; - case Base64: - case SRI: - std::string b64; - absl::Base64Escape( - std::string(reinterpret_cast(hash), hashSize), &b64); - s += b64; - break; - } - return s; -} - -Hash::Hash(std::string_view s, HashType type) : type(type) { - absl::StatusOr result = deserialize(s, type); - *this = unwrap_throw(result); -} - -// TODO(riking): change ht to an optional -absl::StatusOr Hash::deserialize(std::string_view s, HashType type) { - size_t pos = 0; - bool isSRI = false; - - auto sep = s.find(':'); - if (sep == std::string::npos) { - sep = s.find('-'); - if (sep != std::string::npos) { - isSRI = true; - } else if (type == htUnknown) { - return absl::InvalidArgumentError( - absl::StrCat("hash string '", s, " does not include a type")); - } - } - - HashType parsedType = type; - if (sep != std::string::npos) { - std::string hts = std::string(s, 0, sep); - parsedType = parseHashType(hts); - if (type != htUnknown && parsedType != type) { - return absl::InvalidArgumentError( - absl::StrCat("hash '", s, "' should have type '", printHashType(type), - "', found '", printHashType(parsedType), "'")); - } - pos = sep + 1; - } - - Hash dest(parsedType); - - size_t size = s.size() - pos; - absl::string_view sv(s.data() + pos, size); - - if (!isSRI && size == dest.base16Len()) { - std::string bytes; - if (!IsValidBase16(sv)) { - return absl::InvalidArgumentError( - absl::StrCat("invalid base-16 hash: bad character in '", s, "'")); - } - bytes = absl::HexStringToBytes(sv); - if (bytes.size() != dest.hashSize) { - return absl::InvalidArgumentError( - absl::StrCat("hash '", s, "' has wrong length for base16 ", - printHashType(dest.type))); - } - memcpy(dest.hash, bytes.data(), dest.hashSize); - } - - else if (!isSRI && size == dest.base32Len()) { - for (unsigned int n = 0; n < size; ++n) { - char c = sv[size - n - 1]; - // range: -1, 0..31 - signed char digit = kUnBase32[static_cast(c)]; - if (digit < 0) { - return absl::InvalidArgumentError( - absl::StrCat("invalid base-32 hash: bad character ", - absl::CEscape(absl::string_view(&c, 1)))); - } - unsigned int b = n * 5; - unsigned int i = b / 8; - unsigned int j = b % 8; - dest.hash[i] |= digit << j; - - if (i < dest.hashSize - 1) { - dest.hash[i + 1] |= digit >> (8 - j); - } else { - if ((digit >> (8 - j)) != 0) { - return absl::InvalidArgumentError( - absl::StrCat("invalid base-32 hash '", s, "'")); - } - } - } - } - - else if (isSRI || size == dest.base64Len()) { - std::string decoded; - if (!absl::Base64Unescape(sv, &decoded)) { - return absl::InvalidArgumentError("invalid base-64 hash"); - } - if (decoded.size() != dest.hashSize) { - return absl::InvalidArgumentError( - absl::StrCat("hash '", s, "' has wrong length for base64 ", - printHashType(dest.type))); - } - memcpy(dest.hash, decoded.data(), dest.hashSize); - } - - else { - return absl::InvalidArgumentError(absl::StrCat( - "hash '", s, "' has wrong length for ", printHashType(dest.type))); - } - - return dest; -} - -Hash Hash::unwrap_throw(absl::StatusOr hash) { - if (hash.ok()) { - return *hash; - } else { - throw BadHash(hash.status().message()); - } -} - -namespace hash { - -union Ctx { - MD5_CTX md5; - SHA_CTX sha1; - SHA256_CTX sha256; - SHA512_CTX sha512; -}; - -static void start(HashType ht, Ctx& ctx) { - if (ht == htMD5) { - MD5_Init(&ctx.md5); - } else if (ht == htSHA1) { - SHA1_Init(&ctx.sha1); - } else if (ht == htSHA256) { - SHA256_Init(&ctx.sha256); - } else if (ht == htSHA512) { - SHA512_Init(&ctx.sha512); - } -} - -static void update(HashType ht, Ctx& ctx, const unsigned char* bytes, - size_t len) { - if (ht == htMD5) { - MD5_Update(&ctx.md5, bytes, len); - } else if (ht == htSHA1) { - SHA1_Update(&ctx.sha1, bytes, len); - } else if (ht == htSHA256) { - SHA256_Update(&ctx.sha256, bytes, len); - } else if (ht == htSHA512) { - SHA512_Update(&ctx.sha512, bytes, len); - } -} - -static void finish(HashType ht, Ctx& ctx, unsigned char* hash) { - if (ht == htMD5) { - MD5_Final(hash, &ctx.md5); - } else if (ht == htSHA1) { - SHA1_Final(hash, &ctx.sha1); - } else if (ht == htSHA256) { - SHA256_Final(hash, &ctx.sha256); - } else if (ht == htSHA512) { - SHA512_Final(hash, &ctx.sha512); - } -} - -} // namespace hash - -Hash hashString(HashType ht, const std::string& s) { - hash::Ctx ctx{}; - Hash hash(ht); - start(ht, ctx); - update(ht, ctx, reinterpret_cast(s.data()), s.length()); - finish(ht, ctx, hash.hash); - return hash; -} - -Hash hashFile(HashType ht, const Path& path) { - hash::Ctx ctx{}; - Hash hash(ht); - start(ht, ctx); - - AutoCloseFD fd(open(path.c_str(), O_RDONLY | O_CLOEXEC)); - if (!fd) { - throw SysError(format("opening file '%1%'") % path); - } - - std::vector buf(8192); - ssize_t n; - while ((n = read(fd.get(), buf.data(), buf.size())) != 0) { - checkInterrupt(); - if (n == -1) { - throw SysError(format("reading file '%1%'") % path); - } - update(ht, ctx, buf.data(), n); - } - - finish(ht, ctx, hash.hash); - return hash; -} - -HashSink::HashSink(HashType ht) - : ht(ht), ctx(std::make_unique()), bytes(0) { - start(ht, *ctx); -} - -HashSink::~HashSink() { bufPos = 0; } - -void HashSink::write(const unsigned char* data, size_t len) { - bytes += len; - nix::hash::update(ht, *ctx, data, len); -} - -HashResult HashSink::finish() { - flush(); - Hash hash(ht); - nix::hash::finish(ht, *ctx, hash.hash); - return HashResult(hash, bytes); -} - -HashResult HashSink::currentHash() { - flush(); - nix::hash::Ctx ctx2 = *ctx; - Hash hash(ht); - nix::hash::finish(ht, ctx2, hash.hash); - return HashResult(hash, bytes); -} - -HashResult hashPath(HashType ht, const Path& path, PathFilter& filter) { - HashSink sink(ht); - dumpPath(path, sink, filter); - return sink.finish(); -} - -Hash compressHash(const Hash& hash, unsigned int newSize) { - Hash h; - h.hashSize = newSize; - for (unsigned int i = 0; i < hash.hashSize; ++i) { - h.hash[i % newSize] ^= hash.hash[i]; - } - return h; -} - -HashType parseHashType(const std::string& s) { - if (s == "md5") { - return htMD5; - } - if (s == "sha1") { - return htSHA1; - } else if (s == "sha256") { - return htSHA256; - } else if (s == "sha512") { - return htSHA512; - } else { - return htUnknown; - } -} - -std::string printHashType(HashType ht) { - if (ht == htMD5) { - return "md5"; - } - if (ht == htSHA1) { - return "sha1"; - } else if (ht == htSHA256) { - return "sha256"; - } else if (ht == htSHA512) { - return "sha512"; - } else if (ht == htUnknown) { - return ""; - } else { - LOG(FATAL) << "Unrecognized hash type: " << static_cast(ht); - abort(); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/hash.hh b/third_party/nix/src/libutil/hash.hh deleted file mode 100644 index 8b52ac657e..0000000000 --- a/third_party/nix/src/libutil/hash.hh +++ /dev/null @@ -1,147 +0,0 @@ -#pragma once - -#include - -#include "libproto/worker.grpc.pb.h" -#include "libutil/serialise.hh" -#include "libutil/types.hh" - -namespace nix { - -// Size of the hashes rendered in store paths, in bytes -constexpr unsigned int kStorePathHashSize = 20; - -MakeError(BadHash, Error); - -// TODO(grfn): Replace this with the hash type enum from the daemon proto so we -// don't have to juggle two different types -enum HashType : char { htUnknown, htMD5, htSHA1, htSHA256, htSHA512 }; - -std::optional hash_type_from(nix::proto::HashType hash_type); - -nix::proto::HashType HashTypeToProto(HashType hash_type); - -const int md5HashSize = 16; -const int sha1HashSize = 20; -const int sha256HashSize = 32; -const int sha512HashSize = 64; - -// omitted: E O U T -constexpr char base32Chars[] = "0123456789abcdfghijklmnpqrsvwxyz"; - -enum Base : int { Base64, Base32, Base16, SRI }; - -struct Hash { - static const unsigned int maxHashSize = 64; - unsigned int hashSize = 0; - unsigned char hash[maxHashSize] = {}; - - HashType type = htUnknown; - - /* Create an unset hash object. */ - Hash(){}; - - /* Create a zero-filled hash object. */ - explicit Hash(HashType type) : type(type) { init(); }; - - /* Initialize the hash from a string representation, in the format - "[:]" or "-" (a - Subresource Integrity hash expression). If the 'type' argument - is htUnknown, then the hash type must be specified in the - string. */ - explicit Hash(std::string_view s, HashType type = htUnknown); - - /* Status-returning version of above constructor */ - static absl::StatusOr deserialize(std::string_view s, - HashType type = htUnknown); - - // Legacy unwrapper for StatusOr. Throws BadHash. - static Hash unwrap_throw(absl::StatusOr hash) noexcept(false); - - void init(); - - /* Check whether a hash is set. */ - explicit operator bool() const { return type != htUnknown; } - - /* Check whether two hash are equal. */ - bool operator==(const Hash& h2) const; - - /* Check whether two hash are not equal. */ - bool operator!=(const Hash& h2) const; - - /* For sorting. */ - bool operator<(const Hash& h) const; - - /* Returns the length of a base-16 representation of this hash. */ - size_t base16Len() const { return hashSize * 2; } - - /* Returns the length of a base-32 representation of this hash. */ - size_t base32Len() const { return (hashSize * 8 - 1) / 5 + 1; } - - /* Returns the length of a base-64 representation of this hash. */ - size_t base64Len() const { return ((4 * hashSize / 3) + 3) & ~3; } - - /* Return a string representation of the hash, in base-16, base-32 - or base-64. By default, this is prefixed by the hash type - (e.g. "sha256:"). */ - std::string to_string(Base base = Base32, bool includeType = true) const; - - /* Returns whether the passed string contains entirely valid base16 - characters. */ - static bool IsValidBase16(absl::string_view s); - - /* Returns whether the passed string contains entirely valid base32 - characters. */ - static bool IsValidBase32(absl::string_view s); - - // Convert this Hash to the format expected in store paths - [[nodiscard]] std::string ToStorePathHash() const; -}; - -/* Print a hash in base-16 if it's MD5, or base-32 otherwise. */ -std::string printHash16or32(const Hash& hash); - -/* Compute the hash of the given string. */ -Hash hashString(HashType ht, const std::string& s); - -/* Compute the hash of the given file. */ -Hash hashFile(HashType ht, const Path& path); - -/* A pair of the Hash, and the number of bytes consumed. */ -typedef std::pair HashResult; - -/* Compute the hash of the given path. The hash is defined as - (essentially) hashString(ht, dumpPath(path)). */ -HashResult hashPath(HashType ht, const Path& path, - PathFilter& filter = defaultPathFilter); - -/* Compress a hash to the specified number of bytes by cyclically - XORing bytes together. */ -Hash compressHash(const Hash& hash, unsigned int newSize); - -/* Parse a string representing a hash type. */ -HashType parseHashType(const std::string& s); - -/* And the reverse. */ -std::string printHashType(HashType ht); - -namespace hash { -union Ctx; -} - -class HashSink : public BufferedSink { - private: - HashType ht; - std::unique_ptr ctx; - unsigned long long bytes; - - public: - explicit HashSink(HashType ht); - HashSink(const HashSink& h); - ~HashSink(); - void write(const unsigned char* data, size_t len); - HashResult finish(); - HashResult currentHash(); -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/istringstream_nocopy.hh b/third_party/nix/src/libutil/istringstream_nocopy.hh deleted file mode 100644 index 31683d37c9..0000000000 --- a/third_party/nix/src/libutil/istringstream_nocopy.hh +++ /dev/null @@ -1,85 +0,0 @@ -/* This file provides a variant of std::istringstream that doesn't - copy its string argument. This is useful for large strings. The - caller must ensure that the string object is not destroyed while - it's referenced by this object. */ - -#pragma once - -#include -#include - -template , - class Allocator = std::allocator> -class basic_istringbuf_nocopy : public std::basic_streambuf { - public: - using string_type = std::basic_string; - - using off_type = typename std::basic_streambuf::off_type; - - using pos_type = typename std::basic_streambuf::pos_type; - - using int_type = typename std::basic_streambuf::int_type; - - using traits_type = typename std::basic_streambuf::traits_type; - - private: - const string_type& s; - - off_type off; - - public: - explicit basic_istringbuf_nocopy(const string_type& s) : s{s}, off{0} {} - - private: - pos_type seekoff(off_type off, std::ios_base::seekdir dir, - std::ios_base::openmode which) { - if (which & std::ios_base::in) { - this->off = - dir == std::ios_base::beg - ? off - : (dir == std::ios_base::end ? s.size() + off : this->off + off); - } - return pos_type(this->off); - } - - pos_type seekpos(pos_type pos, std::ios_base::openmode which) { - return seekoff(pos, std::ios_base::beg, which); - } - - std::streamsize showmanyc() { return s.size() - off; } - - int_type underflow() { - if (typename string_type::size_type(off) == s.size()) { - return traits_type::eof(); - } - return traits_type::to_int_type(s[off]); - } - - int_type uflow() { - if (typename string_type::size_type(off) == s.size()) { - return traits_type::eof(); - } - return traits_type::to_int_type(s[off++]); - } - - int_type pbackfail(int_type ch) { - if (off == 0 || (ch != traits_type::eof() && ch != s[off - 1])) { - return traits_type::eof(); - } - - return traits_type::to_int_type(s[--off]); - } -}; - -template , - class Allocator = std::allocator> -class basic_istringstream_nocopy : public std::basic_iostream { - using buf_type = basic_istringbuf_nocopy; - buf_type buf; - - public: - explicit basic_istringstream_nocopy(const typename buf_type::string_type& s) - : std::basic_iostream(&buf), buf(s){}; -}; - -using istringstream_nocopy = basic_istringstream_nocopy; diff --git a/third_party/nix/src/libutil/json.cc b/third_party/nix/src/libutil/json.cc deleted file mode 100644 index 59ff74f579..0000000000 --- a/third_party/nix/src/libutil/json.cc +++ /dev/null @@ -1,198 +0,0 @@ -#include "libutil/json.hh" - -#include -#include - -namespace nix { - -void toJSON(std::ostream& str, const char* start, const char* end) { - str << '"'; - for (auto i = start; i != end; i++) { - if (*i == '\"' || *i == '\\') { - str << '\\' << *i; - } else if (*i == '\n') { - str << "\\n"; - } else if (*i == '\r') { - str << "\\r"; - } else if (*i == '\t') { - str << "\\t"; - } else if (*i >= 0 && *i < 32) { - str << "\\u" << std::setfill('0') << std::setw(4) << std::hex - << static_cast(*i) << std::dec; - } else { - str << *i; - } - } - str << '"'; -} - -void toJSON(std::ostream& str, const char* s) { - if (s == nullptr) { - str << "null"; - } else { - toJSON(str, s, s + strlen(s)); - } -} - -template <> -void toJSON(std::ostream& str, const int& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, const unsigned int& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, const long& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, const unsigned long& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, const long long& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, - const unsigned long long& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, const float& n) { - str << n; -} -template <> -void toJSON(std::ostream& str, const double& n) { - str << n; -} - -template <> -void toJSON(std::ostream& str, const std::string& s) { - toJSON(str, s.c_str(), s.c_str() + s.size()); -} - -template <> -void toJSON(std::ostream& str, const bool& b) { - str << (b ? "true" : "false"); -} - -template <> -void toJSON(std::ostream& str, const std::nullptr_t& b) { - str << "null"; -} - -JSONWriter::JSONWriter(std::ostream& str, bool indent) - : state(new JSONState(str, indent)) { - state->stack++; -} - -JSONWriter::JSONWriter(JSONState* state) : state(state) { state->stack++; } - -JSONWriter::~JSONWriter() { - if (state != nullptr) { - assertActive(); - state->stack--; - if (state->stack == 0) { - delete state; - } - } -} - -void JSONWriter::comma() { - assertActive(); - if (first) { - first = false; - } else { - state->str << ','; - } - if (state->indent) { - indent(); - } -} - -void JSONWriter::indent() { - state->str << '\n' << std::string(state->depth * 2, ' '); -} - -void JSONList::open() { - state->depth++; - state->str << '['; -} - -JSONList::~JSONList() { - state->depth--; - if (state->indent && !first) { - indent(); - } - state->str << "]"; -} - -JSONList JSONList::list() { - comma(); - return JSONList(state); -} - -JSONObject JSONList::object() { - comma(); - return JSONObject(state); -} - -JSONPlaceholder JSONList::placeholder() { - comma(); - return JSONPlaceholder(state); -} - -void JSONObject::open() { - state->depth++; - state->str << '{'; -} - -JSONObject::~JSONObject() { - if (state != nullptr) { - state->depth--; - if (state->indent && !first) { - indent(); - } - state->str << "}"; - } -} - -void JSONObject::attr(const std::string& s) { - comma(); - toJSON(state->str, s); - state->str << ':'; - if (state->indent) { - state->str << ' '; - } -} - -JSONList JSONObject::list(const std::string& name) { - attr(name); - return JSONList(state); -} - -JSONObject JSONObject::object(const std::string& name) { - attr(name); - return JSONObject(state); -} - -JSONPlaceholder JSONObject::placeholder(const std::string& name) { - attr(name); - return JSONPlaceholder(state); -} - -JSONList JSONPlaceholder::list() { - assertValid(); - first = false; - return JSONList(state); -} - -JSONObject JSONPlaceholder::object() { - assertValid(); - first = false; - return JSONObject(state); -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/json.hh b/third_party/nix/src/libutil/json.hh deleted file mode 100644 index 14d61d8a57..0000000000 --- a/third_party/nix/src/libutil/json.hh +++ /dev/null @@ -1,144 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace nix { - -void toJSON(std::ostream& str, const char* start, const char* end); -void toJSON(std::ostream& str, const char* s); - -template -void toJSON(std::ostream& str, const T& n); - -class JSONWriter { - protected: - struct JSONState { - std::ostream& str; - bool indent; - size_t depth = 0; - size_t stack = 0; - JSONState(std::ostream& str, bool indent) : str(str), indent(indent) {} - ~JSONState() { assert(stack == 0); } - }; - - JSONState* state; - - bool first = true; - - JSONWriter(std::ostream& str, bool indent); - - explicit JSONWriter(JSONState* state); - - ~JSONWriter(); - - void assertActive() { assert(state->stack != 0); } - - void comma(); - - void indent(); -}; - -class JSONObject; -class JSONPlaceholder; - -class JSONList : JSONWriter { - private: - friend class JSONObject; - friend class JSONPlaceholder; - - void open(); - - explicit JSONList(JSONState* state) : JSONWriter(state) { open(); } - - public: - explicit JSONList(std::ostream& str, bool indent = false) - : JSONWriter(str, indent) { - open(); - } - - ~JSONList(); - - template - JSONList& elem(const T& v) { - comma(); - toJSON(state->str, v); - return *this; - } - - JSONList list(); - - JSONObject object(); - - JSONPlaceholder placeholder(); -}; - -class JSONObject : JSONWriter { - private: - friend class JSONList; - friend class JSONPlaceholder; - - void open(); - - explicit JSONObject(JSONState* state) : JSONWriter(state) { open(); } - - void attr(const std::string& s); - - public: - explicit JSONObject(std::ostream& str, bool indent = false) - : JSONWriter(str, indent) { - open(); - } - - JSONObject(const JSONObject& obj) = delete; - - JSONObject(JSONObject&& obj) : JSONWriter(obj.state) { obj.state = 0; } - - ~JSONObject(); - - template - JSONObject& attr(const std::string& name, const T& v) { - attr(name); - toJSON(state->str, v); - return *this; - } - - JSONList list(const std::string& name); - - JSONObject object(const std::string& name); - - JSONPlaceholder placeholder(const std::string& name); -}; - -class JSONPlaceholder : JSONWriter { - private: - friend class JSONList; - friend class JSONObject; - - explicit JSONPlaceholder(JSONState* state) : JSONWriter(state) {} - - void assertValid() { - assertActive(); - assert(first); - } - - public: - explicit JSONPlaceholder(std::ostream& str, bool indent = false) - : JSONWriter(str, indent) {} - - ~JSONPlaceholder() { assert(!first || std::uncaught_exception()); } - - template - void write(const T& v) { - assertValid(); - first = false; - toJSON(state->str, v); - } - - JSONList list(); - - JSONObject object(); -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/lazy.hh b/third_party/nix/src/libutil/lazy.hh deleted file mode 100644 index 5c6ff5d8df..0000000000 --- a/third_party/nix/src/libutil/lazy.hh +++ /dev/null @@ -1,45 +0,0 @@ -#include -#include -#include - -namespace nix { - -/* A helper class for lazily-initialized variables. - - Lazy var([]() { return value; }); - - declares a variable of type T that is initialized to 'value' (in a - thread-safe way) on first use, that is, when var() is first - called. If the initialiser code throws an exception, then all - subsequent calls to var() will rethrow that exception. */ -template -class Lazy { - typedef std::function Init; - - Init init; - - std::once_flag done; - - T value; - - std::exception_ptr ex; - - public: - explicit Lazy(Init init) : init(init) {} - - const T& operator()() { - std::call_once(done, [&]() { - try { - value = init(); - } catch (...) { - ex = std::current_exception(); - } - }); - if (ex) { - std::rethrow_exception(ex); - } - return value; - } -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/lru-cache.hh b/third_party/nix/src/libutil/lru-cache.hh deleted file mode 100644 index 1832c54244..0000000000 --- a/third_party/nix/src/libutil/lru-cache.hh +++ /dev/null @@ -1,90 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace nix { - -/* A simple least-recently used cache. Not thread-safe. */ -template -class LRUCache { - private: - size_t capacity; - - // Stupid wrapper to get around circular dependency between Data - // and LRU. - struct LRUIterator; - - using Data = std::map>; - using LRU = std::list; - - struct LRUIterator { - typename LRU::iterator it; - }; - - Data data; - LRU lru; - - public: - explicit LRUCache(size_t capacity) : capacity(capacity) {} - - /* Insert or upsert an item in the cache. */ - void upsert(const Key& key, const Value& value) { - if (capacity == 0) { - return; - } - - erase(key); - - if (data.size() >= capacity) { - /* Retire the oldest item. */ - auto oldest = lru.begin(); - data.erase(*oldest); - lru.erase(oldest); - } - - auto res = data.emplace(key, std::make_pair(LRUIterator(), value)); - assert(res.second); - auto& i(res.first); - - auto j = lru.insert(lru.end(), i); - - i->second.first.it = j; - } - - bool erase(const Key& key) { - auto i = data.find(key); - if (i == data.end()) { - return false; - } - lru.erase(i->second.first.it); - data.erase(i); - return true; - } - - /* Look up an item in the cache. If it exists, it becomes the most - recently used item. */ - std::optional get(const Key& key) { - auto i = data.find(key); - if (i == data.end()) { - return {}; - } - - /* Move this item to the back of the LRU list. */ - lru.erase(i->second.first.it); - auto j = lru.insert(lru.end(), i); - i->second.first.it = j; - - return i->second.second; - } - - size_t size() { return data.size(); } - - void clear() { - data.clear(); - lru.clear(); - } -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/monitor-fd.hh b/third_party/nix/src/libutil/monitor-fd.hh deleted file mode 100644 index c818c58261..0000000000 --- a/third_party/nix/src/libutil/monitor-fd.hh +++ /dev/null @@ -1,57 +0,0 @@ -#pragma once - -#include -#include -#include - -#include -#include -#include -#include - -namespace nix { - -class MonitorFdHup { - private: - std::thread thread; - - public: - MonitorFdHup(int fd) { - thread = std::thread([fd]() { - while (true) { - /* Wait indefinitely until a POLLHUP occurs. */ - struct pollfd fds[1]; - fds[0].fd = fd; - /* This shouldn't be necessary, but macOS doesn't seem to - like a zeroed out events field. - See rdar://37537852. - */ - fds[0].events = POLLHUP; - auto count = poll(fds, 1, -1); - if (count == -1) { - abort(); - } // can't happen - /* This shouldn't happen, but can on macOS due to a bug. - See rdar://37550628. - - This may eventually need a delay or further - coordination with the main thread if spinning proves - too harmful. - */ - if (count == 0) { - continue; - } - assert(fds[0].revents & POLLHUP); - triggerInterrupt(); - break; - } - }); - }; - - ~MonitorFdHup() { - pthread_cancel(thread.native_handle()); - thread.join(); - } -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/pool.hh b/third_party/nix/src/libutil/pool.hh deleted file mode 100644 index b5c3c4b5c4..0000000000 --- a/third_party/nix/src/libutil/pool.hh +++ /dev/null @@ -1,176 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libutil/ref.hh" -#include "libutil/sync.hh" - -namespace nix { - -/* This template class implements a simple pool manager of resources - of some type R, such as database connections. It is used as - follows: - - class Connection { ... }; - - Pool pool; - - { - auto conn(pool.get()); - conn->exec("select ..."); - } - - Here, the Connection object referenced by ‘conn’ is automatically - returned to the pool when ‘conn’ goes out of scope. -*/ - -template -class Pool { - public: - /* A function that produces new instances of R on demand. */ - typedef std::function()> Factory; - - /* A function that checks whether an instance of R is still - usable. Unusable instances are removed from the pool. */ - using Validator = std::function&)>; - - private: - Factory factory; - Validator validator; - - struct State { - size_t inUse = 0; - size_t max; - std::vector> idle; - }; - - Sync state; - - std::condition_variable wakeup; - - public: - explicit Pool( - size_t max = std::numeric_limits::max(), - const Factory& factory = []() { return make_ref(); }, - const Validator& validator = [](ref r) { return true; }) - : factory(factory), validator(validator) { - auto state_(state.lock()); - state_->max = max; - } - - void incCapacity() { - auto state_(state.lock()); - state_->max++; - /* we could wakeup here, but this is only used when we're - * about to nest Pool usages, and we want to save the slot for - * the nested use if we can - */ - } - - void decCapacity() { - auto state_(state.lock()); - state_->max--; - } - - ~Pool() { - auto state_(state.lock()); - assert(!state_->inUse); - state_->max = 0; - state_->idle.clear(); - } - - class Handle { - private: - Pool& pool; - std::shared_ptr r; - bool bad = false; - - friend Pool; - - Handle(Pool& pool, std::shared_ptr r) : pool(pool), r(r) {} - - public: - Handle(Handle&& h) : pool(h.pool), r(h.r) { h.r.reset(); } - - Handle(const Handle& l) = delete; - - ~Handle() { - if (!r) { - return; - } - { - auto state_(pool.state.lock()); - if (!bad) { - state_->idle.push_back(ref(r)); - } - assert(state_->inUse); - state_->inUse--; - } - pool.wakeup.notify_one(); - } - - R* operator->() { return &*r; } - R& operator*() { return *r; } - - void markBad() { bad = true; } - }; - - Handle get() { - { - auto state_(state.lock()); - - /* If we're over the maximum number of instance, we need - to wait until a slot becomes available. */ - while (state_->idle.empty() && state_->inUse >= state_->max) { - state_.wait(wakeup); - } - - while (!state_->idle.empty()) { - auto p = state_->idle.back(); - state_->idle.pop_back(); - if (validator(p)) { - state_->inUse++; - return Handle(*this, p); - } - } - - state_->inUse++; - } - - /* We need to create a new instance. Because that might take a - while, we don't hold the lock in the meantime. */ - try { - Handle h(*this, factory()); - return h; - } catch (...) { - auto state_(state.lock()); - state_->inUse--; - wakeup.notify_one(); - throw; - } - } - - size_t count() { - auto state_(state.lock()); - return state_->idle.size() + state_->inUse; - } - - size_t capacity() { return state.lock()->max; } - - void flushBad() { - auto state_(state.lock()); - std::vector> left; - for (auto& p : state_->idle) { - if (validator(p)) { - left.push_back(p); - } - } - std::swap(state_->idle, left); - } -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/proto.hh b/third_party/nix/src/libutil/proto.hh deleted file mode 100644 index 058cb7b7b4..0000000000 --- a/third_party/nix/src/libutil/proto.hh +++ /dev/null @@ -1,174 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "libproto/worker.pb.h" -#include "libutil/types.hh" - -namespace nix::util::proto { - -inline ::nix::proto::StorePath StorePath(const Path& path) { - ::nix::proto::StorePath store_path; - store_path.set_path(path); - return store_path; -} - -inline ::nix::proto::StorePaths StorePaths(const PathSet& paths) { - ::nix::proto::StorePaths result; - for (const auto& path : paths) { - result.add_paths(path); - } - return result; -} - -template -T FillFrom(const U& src) { - T result; - result.insert(src.begin(), src.end()); - return result; -} - -constexpr absl::StatusCode GRPCStatusCodeToAbsl(grpc::StatusCode code) { - switch (code) { - case grpc::StatusCode::OK: - return absl::StatusCode::kOk; - case grpc::StatusCode::CANCELLED: - return absl::StatusCode::kCancelled; - case grpc::StatusCode::UNKNOWN: - return absl::StatusCode::kUnknown; - case grpc::StatusCode::INVALID_ARGUMENT: - return absl::StatusCode::kInvalidArgument; - case grpc::StatusCode::DEADLINE_EXCEEDED: - return absl::StatusCode::kDeadlineExceeded; - case grpc::StatusCode::NOT_FOUND: - return absl::StatusCode::kNotFound; - case grpc::StatusCode::ALREADY_EXISTS: - return absl::StatusCode::kAlreadyExists; - case grpc::StatusCode::PERMISSION_DENIED: - return absl::StatusCode::kPermissionDenied; - case grpc::StatusCode::UNAUTHENTICATED: - return absl::StatusCode::kUnauthenticated; - case grpc::StatusCode::RESOURCE_EXHAUSTED: - return absl::StatusCode::kResourceExhausted; - case grpc::StatusCode::FAILED_PRECONDITION: - return absl::StatusCode::kFailedPrecondition; - case grpc::StatusCode::ABORTED: - return absl::StatusCode::kAborted; - case grpc::StatusCode::OUT_OF_RANGE: - return absl::StatusCode::kOutOfRange; - case grpc::StatusCode::UNIMPLEMENTED: - return absl::StatusCode::kUnimplemented; - case grpc::StatusCode::INTERNAL: - return absl::StatusCode::kInternal; - case grpc::StatusCode::UNAVAILABLE: - return absl::StatusCode::kUnavailable; - case grpc::StatusCode::DATA_LOSS: - return absl::StatusCode::kDataLoss; - default: - return absl::StatusCode::kInternal; - } -} - -constexpr grpc::StatusCode AbslStatusCodeToGRPC(absl::StatusCode code) { - switch (code) { - case absl::StatusCode::kOk: - return grpc::StatusCode::OK; - case absl::StatusCode::kCancelled: - return grpc::StatusCode::CANCELLED; - case absl::StatusCode::kUnknown: - return grpc::StatusCode::UNKNOWN; - case absl::StatusCode::kInvalidArgument: - return grpc::StatusCode::INVALID_ARGUMENT; - case absl::StatusCode::kDeadlineExceeded: - return grpc::StatusCode::DEADLINE_EXCEEDED; - case absl::StatusCode::kNotFound: - return grpc::StatusCode::NOT_FOUND; - case absl::StatusCode::kAlreadyExists: - return grpc::StatusCode::ALREADY_EXISTS; - case absl::StatusCode::kPermissionDenied: - return grpc::StatusCode::PERMISSION_DENIED; - case absl::StatusCode::kUnauthenticated: - return grpc::StatusCode::UNAUTHENTICATED; - case absl::StatusCode::kResourceExhausted: - return grpc::StatusCode::RESOURCE_EXHAUSTED; - case absl::StatusCode::kFailedPrecondition: - return grpc::StatusCode::FAILED_PRECONDITION; - case absl::StatusCode::kAborted: - return grpc::StatusCode::ABORTED; - case absl::StatusCode::kOutOfRange: - return grpc::StatusCode::OUT_OF_RANGE; - case absl::StatusCode::kUnimplemented: - return grpc::StatusCode::UNIMPLEMENTED; - case absl::StatusCode::kInternal: - return grpc::StatusCode::INTERNAL; - case absl::StatusCode::kUnavailable: - return grpc::StatusCode::UNAVAILABLE; - case absl::StatusCode::kDataLoss: - return grpc::StatusCode::DATA_LOSS; - default: - return grpc::StatusCode::INTERNAL; - } -} - -constexpr absl::string_view GRPCStatusCodeDescription(grpc::StatusCode code) { - switch (code) { - case grpc::StatusCode::OK: - return "OK"; - case grpc::StatusCode::CANCELLED: - return "CANCELLED"; - case grpc::StatusCode::UNKNOWN: - return "UNKNOWN"; - case grpc::StatusCode::INVALID_ARGUMENT: - return "INVALID_ARGUMENT"; - case grpc::StatusCode::DEADLINE_EXCEEDED: - return "DEADLINE_EXCEEDED"; - case grpc::StatusCode::NOT_FOUND: - return "NOT_FOUND"; - case grpc::StatusCode::ALREADY_EXISTS: - return "ALREADY_EXISTS"; - case grpc::StatusCode::PERMISSION_DENIED: - return "PERMISSION_DENIED"; - case grpc::StatusCode::UNAUTHENTICATED: - return "UNAUTHENTICATED"; - case grpc::StatusCode::RESOURCE_EXHAUSTED: - return "RESOURCE_EXHAUSTED"; - case grpc::StatusCode::FAILED_PRECONDITION: - return "FAILED_PRECONDITION"; - case grpc::StatusCode::ABORTED: - return "ABORTED"; - case grpc::StatusCode::OUT_OF_RANGE: - return "OUT_OF_RANGE"; - case grpc::StatusCode::UNIMPLEMENTED: - return "UNIMPLEMENTED"; - case grpc::StatusCode::INTERNAL: - return "INTERNAL"; - case grpc::StatusCode::UNAVAILABLE: - return "UNAVAILABLE"; - case grpc::StatusCode::DATA_LOSS: - return "DATA_LOSS"; - default: - return ""; - }; -} - -inline absl::Status GRPCStatusToAbsl(grpc::Status status) { - if (status.ok()) { - return absl::OkStatus(); - } - - return absl::Status(GRPCStatusCodeToAbsl(status.error_code()), - status.error_message()); -} - -inline grpc::Status AbslToGRPCStatus(absl::Status status) { - if (status.ok()) { - return grpc::Status::OK; - } - - return grpc::Status(AbslStatusCodeToGRPC(status.code()), - std::string(status.message())); -} - -} // namespace nix::util::proto diff --git a/third_party/nix/src/libutil/ref.hh b/third_party/nix/src/libutil/ref.hh deleted file mode 100644 index 3c375491fd..0000000000 --- a/third_party/nix/src/libutil/ref.hh +++ /dev/null @@ -1,65 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace nix { - -/* A simple non-nullable reference-counted pointer. Actually a wrapper - around std::shared_ptr that prevents non-null constructions. */ -template -class ref { // TODO(tazjin): rename to brainworm_ref or something - private: - std::shared_ptr p; - - public: - ref(const ref& r) : p(r.p) {} - - explicit ref(const std::shared_ptr& p) : p(p) { - if (!p) { - throw std::invalid_argument("null pointer cast to ref"); - } - } - - explicit ref(T* p) : p(p) { - if (!p) { - throw std::invalid_argument("null pointer cast to ref"); - } - } - - T* operator->() const { return &*p; } - - T& operator*() const { return *p; } - - operator std::shared_ptr() const { return p; } - - std::shared_ptr get_ptr() const { return p; } - - template - ref cast() const { - return ref(std::dynamic_pointer_cast(p)); - } - - template - std::shared_ptr dynamic_pointer_cast() const { - return std::dynamic_pointer_cast(p); - } - - template - operator ref() const { - return ref((std::shared_ptr)p); - } - - private: - template - friend ref make_ref(Args&&... args); -}; - -template -inline ref make_ref(Args&&... args) { - auto p = std::make_shared(std::forward(args)...); - return ref(p); -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/serialise.cc b/third_party/nix/src/libutil/serialise.cc deleted file mode 100644 index 288255089b..0000000000 --- a/third_party/nix/src/libutil/serialise.cc +++ /dev/null @@ -1,311 +0,0 @@ -#include "libutil/serialise.hh" - -#include -#include -#include -#include -#include - -#include - -#include "libutil/util.hh" - -namespace nix { - -void BufferedSink::operator()(const unsigned char* data, size_t len) { - if (!buffer) { - buffer = decltype(buffer)(new unsigned char[bufSize]); - } - - while (len != 0u) { - /* Optimisation: bypass the buffer if the data exceeds the - buffer size. */ - if (bufPos + len >= bufSize) { - flush(); - write(data, len); - break; - } - /* Otherwise, copy the bytes to the buffer. Flush the buffer - when it's full. */ - size_t n = bufPos + len > bufSize ? bufSize - bufPos : len; - memcpy(buffer.get() + bufPos, data, n); - data += n; - bufPos += n; - len -= n; - if (bufPos == bufSize) { - flush(); - } - } -} - -void BufferedSink::flush() { - if (bufPos == 0) { - return; - } - size_t n = bufPos; - bufPos = 0; // don't trigger the assert() in ~BufferedSink() - write(buffer.get(), n); -} - -FdSink::~FdSink() { - try { - flush(); - } catch (...) { - ignoreException(); - } -} - -size_t threshold = 256 * 1024 * 1024; - -static void warnLargeDump() { - LOG(WARNING) - << "dumping very large path (> 256 MiB); this may run out of memory"; -} - -void FdSink::write(const unsigned char* data, size_t len) { - written += len; - static bool warned = false; - if (warn && !warned) { - if (written > threshold) { - warnLargeDump(); - warned = true; - } - } - try { - writeFull(fd, data, len); - } catch (SysError& e) { - _good = false; - throw; - } -} - -bool FdSink::good() { return _good; } - -void Source::operator()(unsigned char* data, size_t len) { - while (len != 0u) { - size_t n = read(data, len); - data += n; - len -= n; - } -} - -std::string Source::drain() { - std::string s; - std::vector buf(8192); - while (true) { - size_t n; - try { - n = read(buf.data(), buf.size()); - s.append(reinterpret_cast(buf.data()), n); - } catch (EndOfFile&) { - break; - } - } - return s; -} - -size_t BufferedSource::read(unsigned char* data, size_t len) { - if (!buffer) { - buffer = decltype(buffer)(new unsigned char[bufSize]); - } - - if (bufPosIn == 0u) { - bufPosIn = readUnbuffered(buffer.get(), bufSize); - } - - /* Copy out the data in the buffer. */ - size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; - memcpy(data, buffer.get() + bufPosOut, n); - bufPosOut += n; - if (bufPosIn == bufPosOut) { - bufPosIn = bufPosOut = 0; - } - return n; -} - -bool BufferedSource::hasData() { return bufPosOut < bufPosIn; } - -size_t FdSource::readUnbuffered(unsigned char* data, size_t len) { - ssize_t n; - do { - checkInterrupt(); - n = ::read(fd, reinterpret_cast(data), len); - } while (n == -1 && errno == EINTR); - if (n == -1) { - _good = false; - throw SysError("reading from file"); - } - if (n == 0) { - _good = false; - throw EndOfFile("unexpected end-of-file"); - } - read += n; - return n; -} - -bool FdSource::good() { return _good; } - -size_t StringSource::read(unsigned char* data, size_t len) { - if (pos == s.size()) { - throw EndOfFile("end of string reached"); - } - size_t n = s.copy(reinterpret_cast(data), len, pos); - pos += n; - return n; -} - -#if BOOST_VERSION >= 106300 && BOOST_VERSION < 106600 -#error Coroutines are broken in this version of Boost! -#endif - -std::unique_ptr sinkToSource(const std::function& fun, - const std::function& eof) { - struct SinkToSource : Source { - using coro_t = boost::coroutines2::coroutine; - - std::function fun; - std::function eof; - std::optional coro; - bool started = false; - - SinkToSource(std::function fun, std::function eof) - : fun(std::move(fun)), eof(std::move(eof)) {} - - std::string cur; - size_t pos = 0; - - size_t read(unsigned char* data, size_t len) override { - if (!coro) { - coro = coro_t::pull_type([&](coro_t::push_type& yield) { - LambdaSink sink([&](const unsigned char* data, size_t len) { - if (len != 0u) { - yield(std::string(reinterpret_cast(data), len)); - } - }); - fun(sink); - }); - } - - if (!*coro) { - eof(); - abort(); - } - - if (pos == cur.size()) { - if (!cur.empty()) { - (*coro)(); - } - cur = coro->get(); - pos = 0; - } - - auto n = std::min(cur.size() - pos, len); - memcpy(data, reinterpret_cast(cur.data()) + pos, n); - pos += n; - - return n; - } - }; - - return std::make_unique(fun, eof); -} - -void writePadding(size_t len, Sink& sink) { - if ((len % 8) != 0u) { - unsigned char zero[8]; - memset(zero, 0, sizeof(zero)); - sink(zero, 8 - (len % 8)); - } -} - -void writeString(const unsigned char* buf, size_t len, Sink& sink) { - sink << len; - sink(buf, len); - writePadding(len, sink); -} - -Sink& operator<<(Sink& sink, const std::string& s) { - writeString(reinterpret_cast(s.data()), s.size(), sink); - return sink; -} - -template -void writeStrings(const T& ss, Sink& sink) { - sink << ss.size(); - for (auto& i : ss) { - sink << i; - } -} - -Sink& operator<<(Sink& sink, const Strings& s) { - writeStrings(s, sink); - return sink; -} - -Sink& operator<<(Sink& sink, const StringSet& s) { - writeStrings(s, sink); - return sink; -} - -void readPadding(size_t len, Source& source) { - if ((len % 8) != 0u) { - unsigned char zero[8]; - size_t n = 8 - (len % 8); - source(zero, n); - for (unsigned int i = 0; i < n; i++) { - if (zero[i] != 0u) { - throw SerialisationError("non-zero padding"); - } - } - } -} - -size_t readString(unsigned char* buf, size_t max, Source& source) { - auto len = readNum(source); - if (len > max) { - throw SerialisationError("string is too long"); - } - source(buf, len); - readPadding(len, source); - return len; -} - -std::string readString(Source& source, size_t max) { - auto len = readNum(source); - if (len > max) { - throw SerialisationError("string is too long"); - } - std::string res(len, 0); - source(reinterpret_cast(res.data()), len); - readPadding(len, source); - return res; -} - -Source& operator>>(Source& in, std::string& s) { - s = readString(in); - return in; -} - -template -T readStrings(Source& source) { - auto count = readNum(source); - T ss; - while (count--) { - ss.insert(ss.end(), readString(source)); - } - return ss; -} - -template Paths readStrings(Source& source); -template PathSet readStrings(Source& source); - -void StringSink::operator()(const unsigned char* data, size_t len) { - static bool warned = false; - if (!warned && s->size() > threshold) { - warnLargeDump(); - warned = true; - } - s->append(reinterpret_cast(data), len); -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/serialise.hh b/third_party/nix/src/libutil/serialise.hh deleted file mode 100644 index c6d1d814db..0000000000 --- a/third_party/nix/src/libutil/serialise.hh +++ /dev/null @@ -1,289 +0,0 @@ -#pragma once - -#include - -#include "libutil/types.hh" -#include "libutil/util.hh" - -namespace nix { - -/* Abstract destination of binary data. */ -struct Sink { - virtual ~Sink() {} - virtual void operator()(const unsigned char* data, size_t len) = 0; - virtual bool good() { return true; } - - void operator()(const std::string& s) { - (*this)((const unsigned char*)s.data(), s.size()); - } -}; - -/* A buffered abstract sink. */ -struct BufferedSink : Sink { - size_t bufSize, bufPos; - std::unique_ptr buffer; - - explicit BufferedSink(size_t bufSize = 32 * 1024) - : bufSize(bufSize), bufPos(0), buffer(nullptr) {} - - void operator()(const unsigned char* data, size_t len) override; - - void operator()(const std::string& s) { Sink::operator()(s); } - - void flush(); - - virtual void write(const unsigned char* data, size_t len) = 0; -}; - -/* Abstract source of binary data. */ -struct Source { - virtual ~Source() {} - - /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’. - It blocks until all the requested data is available, or throws - an error if it is not going to be available. */ - void operator()(unsigned char* data, size_t len); - - /* Store up to ‘len’ in the buffer pointed to by ‘data’, and - return the number of bytes stored. It blocks until at least - one byte is available. */ - virtual size_t read(unsigned char* data, size_t len) = 0; - - virtual bool good() { return true; } - - std::string drain(); -}; - -/* A buffered abstract source. */ -struct BufferedSource : Source { - size_t bufSize, bufPosIn, bufPosOut; - std::unique_ptr buffer; - - explicit BufferedSource(size_t bufSize = 32 * 1024) - : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(nullptr) {} - - size_t read(unsigned char* data, size_t len) override; - - bool hasData(); - - protected: - /* Underlying read call, to be overridden. */ - virtual size_t readUnbuffered(unsigned char* data, size_t len) = 0; -}; - -/* A sink that writes data to a file descriptor. */ -struct FdSink : BufferedSink { - int fd; - bool warn = false; - size_t written = 0; - - FdSink() : fd(-1) {} - explicit FdSink(int fd) : fd(fd) {} - FdSink(FdSink&&) = default; - - FdSink& operator=(FdSink&& s) { - flush(); - fd = s.fd; - s.fd = -1; - warn = s.warn; - written = s.written; - return *this; - } - - ~FdSink(); - - void write(const unsigned char* data, size_t len) override; - - bool good() override; - - private: - bool _good = true; -}; - -/* A source that reads data from a file descriptor. */ -struct FdSource : BufferedSource { - int fd; - size_t read = 0; - - FdSource() : fd(-1) {} - explicit FdSource(int fd) : fd(fd) {} - FdSource(FdSource&&) = default; - - FdSource& operator=(FdSource&& s) { - fd = s.fd; - s.fd = -1; - read = s.read; - return *this; - } - - bool good() override; - - protected: - size_t readUnbuffered(unsigned char* data, size_t len) override; - - private: - bool _good = true; -}; - -/* A sink that writes data to a string. */ -struct StringSink : Sink { - ref s; - StringSink() : s(make_ref()){}; - explicit StringSink(ref s) : s(s){}; - void operator()(const unsigned char* data, size_t len) override; -}; - -/* A source that reads data from a string. */ -struct StringSource : Source { - const std::string& s; - size_t pos; - explicit StringSource(const std::string& _s) : s(_s), pos(0) {} - size_t read(unsigned char* data, size_t len) override; -}; - -/* Adapter class of a Source that saves all data read to `s'. */ -struct TeeSource : Source { - Source& orig; - ref data; - explicit TeeSource(Source& orig) - : orig(orig), data(make_ref()) {} - size_t read(unsigned char* data, size_t len) { - size_t n = orig.read(data, len); - this->data->append((const char*)data, n); - return n; - } -}; - -/* A reader that consumes the original Source until 'size'. */ -struct SizedSource : Source { - Source& orig; - size_t remain; - SizedSource(Source& orig, size_t size) : orig(orig), remain(size) {} - size_t read(unsigned char* data, size_t len) { - if (this->remain <= 0) { - throw EndOfFile("sized: unexpected end-of-file"); - } - len = std::min(len, this->remain); - size_t n = this->orig.read(data, len); - this->remain -= n; - return n; - } - - /* Consume the original source until no remain data is left to consume. */ - size_t drainAll() { - std::vector buf(8192); - size_t sum = 0; - while (this->remain > 0) { - size_t n = read(buf.data(), buf.size()); - sum += n; - } - return sum; - } -}; - -/* Convert a function into a sink. */ -struct LambdaSink : Sink { - typedef std::function lambda_t; - - lambda_t lambda; - - explicit LambdaSink(const lambda_t& lambda) : lambda(lambda) {} - - virtual void operator()(const unsigned char* data, size_t len) { - lambda(data, len); - } -}; - -/* Convert a function into a source. */ -struct LambdaSource : Source { - using lambda_t = std::function; - - lambda_t lambda; - - explicit LambdaSource(const lambda_t& lambda) : lambda(lambda) {} - - size_t read(unsigned char* data, size_t len) override { - return lambda(data, len); - } -}; - -/* Convert a function that feeds data into a Sink into a Source. The - Source executes the function as a coroutine. */ -std::unique_ptr sinkToSource( - const std::function& fun, - const std::function& eof = []() { - throw EndOfFile("coroutine has finished"); - }); - -void writePadding(size_t len, Sink& sink); -void writeString(const unsigned char* buf, size_t len, Sink& sink); - -inline Sink& operator<<(Sink& sink, uint64_t n) { - unsigned char buf[8]; - buf[0] = n & 0xff; - buf[1] = (n >> 8) & 0xff; - buf[2] = (n >> 16) & 0xff; - buf[3] = (n >> 24) & 0xff; - buf[4] = (n >> 32) & 0xff; - buf[5] = (n >> 40) & 0xff; - buf[6] = (n >> 48) & 0xff; - buf[7] = (unsigned char)(n >> 56) & 0xff; - sink(buf, sizeof(buf)); - return sink; -} - -Sink& operator<<(Sink& sink, const std::string& s); -Sink& operator<<(Sink& sink, const Strings& s); -Sink& operator<<(Sink& sink, const StringSet& s); - -MakeError(SerialisationError, Error); - -template -T readNum(Source& source) { - unsigned char buf[8]; - source(buf, sizeof(buf)); - - uint64_t n = - ((unsigned long long)buf[0]) | ((unsigned long long)buf[1] << 8) | - ((unsigned long long)buf[2] << 16) | ((unsigned long long)buf[3] << 24) | - ((unsigned long long)buf[4] << 32) | ((unsigned long long)buf[5] << 40) | - ((unsigned long long)buf[6] << 48) | ((unsigned long long)buf[7] << 56); - - if (n > std::numeric_limits::max()) { - throw SerialisationError("serialised integer %d is too large for type '%s'", - n, typeid(T).name()); - } - - return (T)n; -} - -inline unsigned int readInt(Source& source) { - return readNum(source); -} - -inline uint64_t readLongLong(Source& source) { - return readNum(source); -} - -void readPadding(size_t len, Source& source); -size_t readString(unsigned char* buf, size_t max, Source& source); -std::string readString(Source& source, - size_t max = std::numeric_limits::max()); -template -T readStrings(Source& source); - -Source& operator>>(Source& in, std::string& s); - -template -Source& operator>>(Source& in, T& n) { - n = readNum(in); - return in; -} - -template -Source& operator>>(Source& in, bool& b) { - b = readNum(in); - return in; -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/status.hh b/third_party/nix/src/libutil/status.hh deleted file mode 100644 index aeee0f5022..0000000000 --- a/third_party/nix/src/libutil/status.hh +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include -#include -#include - -#include "libutil/types.hh" - -namespace nix::util { - -inline void OkOrThrow(absl::Status status) { - if (!status.ok()) { - throw Error(absl::StrFormat("Operation failed: %s", status.ToString())); - } -} - -} // namespace nix::util diff --git a/third_party/nix/src/libutil/sync.hh b/third_party/nix/src/libutil/sync.hh deleted file mode 100644 index ef640d5b56..0000000000 --- a/third_party/nix/src/libutil/sync.hh +++ /dev/null @@ -1,84 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace nix { - -/* This template class ensures synchronized access to a value of type - T. It is used as follows: - - struct Data { int x; ... }; - - Sync data; - - { - auto data_(data.lock()); - data_->x = 123; - } - - Here, "data" is automatically unlocked when "data_" goes out of - scope. -*/ - -template -class Sync { - private: - M mutex; - T data; - - public: - Sync() {} - explicit Sync(const T& data) : data(data) {} - explicit Sync(T&& data) noexcept : data(std::move(data)) {} - - class Lock { - private: - Sync* s; - std::unique_lock lk; - friend Sync; - explicit Lock(Sync* s) : s(s), lk(s->mutex) {} - - public: - Lock(Lock&& l) : s(l.s) { abort(); } - Lock(const Lock& l) = delete; - ~Lock() {} - T* operator->() { return &s->data; } - T& operator*() { return s->data; } - - void wait(std::condition_variable& cv) { - assert(s); - cv.wait(lk); - } - - template - std::cv_status wait_for( - std::condition_variable& cv, - const std::chrono::duration& duration) { - assert(s); - return cv.wait_for(lk, duration); - } - - template - bool wait_for(std::condition_variable& cv, - const std::chrono::duration& duration, - Predicate pred) { - assert(s); - return cv.wait_for(lk, duration, pred); - } - - template - std::cv_status wait_until( - std::condition_variable& cv, - const std::chrono::time_point& duration) { - assert(s); - return cv.wait_until(lk, duration); - } - }; - - Lock lock() { return Lock(this); } -}; - -} // namespace nix diff --git a/third_party/nix/src/libutil/thread-pool.cc b/third_party/nix/src/libutil/thread-pool.cc deleted file mode 100644 index 7c6b0a1b46..0000000000 --- a/third_party/nix/src/libutil/thread-pool.cc +++ /dev/null @@ -1,163 +0,0 @@ -#include "libutil/thread-pool.hh" - -#include - -#include "libutil/affinity.hh" - -namespace nix { - -ThreadPool::ThreadPool(size_t _maxThreads) : maxThreads(_maxThreads) { - restoreAffinity(); // FIXME - - if (maxThreads == 0u) { - maxThreads = std::thread::hardware_concurrency(); - if (maxThreads == 0u) { - maxThreads = 1; - } - } - - DLOG(INFO) << "starting pool of " << maxThreads - 1 << " threads"; -} - -ThreadPool::~ThreadPool() { shutdown(); } - -void ThreadPool::shutdown() { - std::vector workers; - { - auto state(state_.lock()); - quit = true; - std::swap(workers, state->workers); - } - - if (workers.empty()) { - return; - } - - DLOG(INFO) << "reaping " << workers.size() << " worker threads"; - - work.notify_all(); - - for (auto& thr : workers) { - thr.join(); - } -} - -void ThreadPool::enqueue(const work_t& t) { - auto state(state_.lock()); - if (quit) { - throw ThreadPoolShutDown( - "cannot enqueue a work item while the thread pool is shutting down"); - } - state->pending.push(t); - /* Note: process() also executes items, so count it as a worker. */ - if (state->pending.size() > state->workers.size() + 1 && - state->workers.size() + 1 < maxThreads) { - state->workers.emplace_back(&ThreadPool::doWork, this, false); - } - work.notify_one(); -} - -void ThreadPool::process() { - state_.lock()->draining = true; - - /* Do work until no more work is pending or active. */ - try { - doWork(true); - - auto state(state_.lock()); - - assert(quit); - - if (state->exception) { - std::rethrow_exception(state->exception); - } - - } catch (...) { - /* In the exceptional case, some workers may still be - active. They may be referencing the stack frame of the - caller. So wait for them to finish. (~ThreadPool also does - this, but it might be destroyed after objects referenced by - the work item lambdas.) */ - shutdown(); - throw; - } -} - -void ThreadPool::doWork(bool mainThread) { - if (!mainThread) { - interruptCheck = [&]() { return (bool)quit; }; - } - - bool didWork = false; - std::exception_ptr exc; - - while (true) { - work_t w; - { - auto state(state_.lock()); - - if (didWork) { - assert(state->active); - state->active--; - - if (exc) { - if (!state->exception) { - state->exception = exc; - // Tell the other workers to quit. - quit = true; - work.notify_all(); - } else { - /* Print the exception, since we can't - propagate it. */ - try { - std::rethrow_exception(exc); - } catch (std::exception& e) { - if ((dynamic_cast(&e) == nullptr) && - (dynamic_cast(&e) == nullptr)) { - ignoreException(); - } - } catch (...) { - } - } - } - } - - /* Wait until a work item is available or we're asked to - quit. */ - while (true) { - if (quit) { - return; - } - - if (!state->pending.empty()) { - break; - } - - /* If there are no active or pending items, and the - main thread is running process(), then no new items - can be added. So exit. */ - if ((state->active == 0u) && state->draining) { - quit = true; - work.notify_all(); - return; - } - - state.wait(work); - } - - w = std::move(state->pending.front()); - state->pending.pop(); - state->active++; - } - - try { - w(); - } catch (...) { - exc = std::current_exception(); - } - - didWork = true; - } -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/thread-pool.hh b/third_party/nix/src/libutil/thread-pool.hh deleted file mode 100644 index 0efc4c1bfc..0000000000 --- a/third_party/nix/src/libutil/thread-pool.hh +++ /dev/null @@ -1,140 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libutil/sync.hh" -#include "libutil/util.hh" - -namespace nix { - -MakeError(ThreadPoolShutDown, Error); - -/* A simple thread pool that executes a queue of work items - (lambdas). */ -class ThreadPool { - public: - explicit ThreadPool(size_t maxThreads = 0); - - ~ThreadPool(); - - // FIXME: use std::packaged_task? - typedef std::function work_t; - - /* Enqueue a function to be executed by the thread pool. */ - void enqueue(const work_t& t); - - /* Execute work items until the queue is empty. Note that work - items are allowed to add new items to the queue; this is - handled correctly. Queue processing stops prematurely if any - work item throws an exception. This exception is propagated to - the calling thread. If multiple work items throw an exception - concurrently, only one item is propagated; the others are - printed on stderr and otherwise ignored. */ - void process(); - - private: - size_t maxThreads; - - struct State { - std::queue pending; - size_t active = 0; - std::exception_ptr exception; - std::vector workers; - bool draining = false; - }; - - std::atomic_bool quit{false}; - - Sync state_; - - std::condition_variable work; - - void doWork(bool mainThread); - - void shutdown(); -}; - -/* Process in parallel a set of items of type T that have a partial - ordering between them. Thus, any item is only processed after all - its dependencies have been processed. */ -template -void processGraph(ThreadPool& pool, const std::set& nodes, - std::function(const T&)> getEdges, - std::function processNode) { - struct Graph { - std::set left; - std::map> refs, rrefs; - }; - - Sync graph_(Graph{nodes, {}, {}}); - - std::function worker; - - worker = [&](const T& node) { - { - auto graph(graph_.lock()); - auto i = graph->refs.find(node); - if (i == graph->refs.end()) { - goto getRefs; - } - goto doWork; - } - - getRefs : { - auto refs = getEdges(node); - refs.erase(node); - - { - auto graph(graph_.lock()); - for (auto& ref : refs) { - if (graph->left.count(ref)) { - graph->refs[node].insert(ref); - graph->rrefs[ref].insert(node); - } - } - if (graph->refs[node].empty()) { - goto doWork; - } - } - } - - return; - - doWork: - processNode(node); - - /* Enqueue work for all nodes that were waiting on this one - and have no unprocessed dependencies. */ - { - auto graph(graph_.lock()); - for (auto& rref : graph->rrefs[node]) { - auto& refs(graph->refs[rref]); - auto i = refs.find(node); - assert(i != refs.end()); - refs.erase(i); - if (refs.empty()) { - pool.enqueue(std::bind(worker, rref)); - } - } - graph->left.erase(node); - graph->refs.erase(node); - graph->rrefs.erase(node); - } - }; - - for (auto& node : nodes) { - pool.enqueue(std::bind(worker, std::ref(node))); - } - - pool.process(); - - if (!graph_.lock()->left.empty()) { - throw Error("graph processing incomplete (cyclic reference?)"); - } -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/types.hh b/third_party/nix/src/libutil/types.hh deleted file mode 100644 index bf95206d08..0000000000 --- a/third_party/nix/src/libutil/types.hh +++ /dev/null @@ -1,118 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "libutil/ref.hh" - -/* Before 4.7, gcc's std::exception uses empty throw() specifiers for - * its (virtual) destructor and what() in c++11 mode, in violation of spec - */ -#ifdef __GNUC__ -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) -#define EXCEPTION_NEEDS_THROW_SPEC -#endif -#endif - -namespace nix { - -/* Inherit some names from other namespaces for convenience. */ -using boost::format; - -/* A variadic template that does nothing. Useful to call a function - for all variadic arguments but ignoring the result. */ -struct nop { - template - explicit nop(T...) {} -}; - -struct FormatOrString { - std::string s; - FormatOrString(const std::string& s) : s(s){}; - FormatOrString(const format& f) : s(f.str()){}; - FormatOrString(const char* s) : s(s){}; -}; - -/* A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is - equivalent to ‘boost::format(format) % a_0 % ... % - ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion - takes place). */ - -inline std::string fmt(const std::string& s) { return s; } - -inline std::string fmt(std::string_view s) { return std::string(s); } - -inline std::string fmt(const char* s) { return s; } - -inline std::string fmt(const FormatOrString& fs) { return fs.s; } - -template -inline std::string fmt(const std::string& fs, Args... args) { - boost::format f(fs); - f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit); - nop{boost::io::detail::feed(f, args)...}; - return f.str(); -} - -/* BaseError should generally not be caught, as it has Interrupted as - a subclass. Catch Error instead. */ -class BaseError : public std::exception { - protected: - std::string prefix_; // used for location traces etc. - std::string err; - - public: - unsigned int status = 1; // exit status - - template - explicit BaseError(unsigned int status, Args... args) - : err(fmt(args...)), status(status) {} - - template - explicit BaseError(Args... args) : err(fmt(args...)) {} - -#ifdef EXCEPTION_NEEDS_THROW_SPEC - ~BaseError() noexcept {}; - const char* what() const noexcept { return err.c_str(); } -#else - const char* what() const noexcept { return err.c_str(); } -#endif - - const std::string& msg() const { return err; } - const std::string& prefix() const { return prefix_; } - BaseError& addPrefix(const FormatOrString& fs); -}; - -#define MakeError(newClass, superClass) \ - class newClass : public superClass { \ - public: \ - using superClass::superClass; \ - }; - -MakeError(Error, BaseError); - -class SysError : public Error { - public: - int errNo; - - template - explicit SysError(Args... args) : Error(addErrno(fmt(args...))) {} - - private: - std::string addErrno(const std::string& s); -}; - -typedef std::list Strings; -using StringSet = std::set; -using StringMap = std::map; - -/* Paths are just strings. */ -using Path = std::string; -using Paths = std::list; -using PathSet = std::set; - -} // namespace nix diff --git a/third_party/nix/src/libutil/util.cc b/third_party/nix/src/libutil/util.cc deleted file mode 100644 index aea1e68e3c..0000000000 --- a/third_party/nix/src/libutil/util.cc +++ /dev/null @@ -1,1426 +0,0 @@ -#include "libutil/util.hh" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libutil/affinity.hh" -#include "libutil/finally.hh" -#include "libutil/lazy.hh" -#include "libutil/serialise.hh" -#include "libutil/sync.hh" -#include "nix_config.h" - -namespace nix { - -const std::string nativeSystem = SYSTEM; - -BaseError& BaseError::addPrefix(const FormatOrString& fs) { - prefix_ = fs.s + prefix_; - return *this; -} - -std::string SysError::addErrno(const std::string& s) { - errNo = errno; - return s + ": " + strerror(errNo); -} - -std::optional getEnv(const std::string& key) { - char* value = getenv(key.c_str()); - if (value == nullptr) return {}; - return std::string(value); -} - -std::map getEnv() { - std::map env; - for (size_t i = 0; environ[i] != nullptr; ++i) { - auto s = environ[i]; - auto eq = strchr(s, '='); - if (eq == nullptr) { - // invalid env, just keep going - continue; - } - env.emplace(std::string(s, eq), std::string(eq + 1)); - } - return env; -} - -void clearEnv() { - for (auto& name : getEnv()) { - unsetenv(name.first.c_str()); - } -} - -void replaceEnv(const std::map& newEnv) { - clearEnv(); - for (const auto& newEnvVar : newEnv) { - setenv(newEnvVar.first.c_str(), newEnvVar.second.c_str(), 1); - } -} - -Path absPath(Path path, Path dir) { - if (path[0] != '/') { - if (dir.empty()) { -#ifdef __GNU__ - /* GNU (aka. GNU/Hurd) doesn't have any limitation on path - lengths and doesn't define `PATH_MAX'. */ - char* buf = getcwd(NULL, 0); - if (buf == NULL) -#else - char buf[PATH_MAX]; - if (getcwd(buf, sizeof(buf)) == nullptr) { -#endif - throw SysError("cannot get cwd"); - } - dir = buf; -#ifdef __GNU__ - free(buf); -#endif - } - path = dir + "/" + path; -} -return canonPath(path); -} // namespace nix - -Path canonPath(const Path& path, bool resolveSymlinks) { - assert(!path.empty()); - - std::string s; - - if (path[0] != '/') { - throw Error(format("not an absolute path: '%1%'") % path); - } - - std::string::const_iterator i = path.begin(); - std::string::const_iterator end = path.end(); - std::string temp; - - /* Count the number of times we follow a symlink and stop at some - arbitrary (but high) limit to prevent infinite loops. */ - unsigned int followCount = 0; - unsigned int maxFollow = 1024; - - while (true) { - /* Skip slashes. */ - while (i != end && *i == '/') { - i++; - } - if (i == end) { - break; - } - - /* Ignore `.'. */ - if (*i == '.' && (i + 1 == end || i[1] == '/')) { - i++; - } - - /* If `..', delete the last component. */ - else if (*i == '.' && i + 1 < end && i[1] == '.' && - (i + 2 == end || i[2] == '/')) { - if (!s.empty()) { - s.erase(s.rfind('/')); - } - i += 2; - } - - /* Normal component; copy it. */ - else { - s += '/'; - while (i != end && *i != '/') { - s += *i++; - } - - /* If s points to a symlink, resolve it and restart (since - the symlink target might contain new symlinks). */ - if (resolveSymlinks && isLink(s)) { - if (++followCount >= maxFollow) { - throw Error(format("infinite symlink recursion in path '%1%'") % - path); - } - temp = absPath(readLink(s), dirOf(s)) + std::string(i, end); - i = temp.begin(); /* restart */ - end = temp.end(); - s = ""; - } - } - } - - return s.empty() ? "/" : s; -} - -// TODO(grfn) remove in favor of std::filesystem::path::parent_path() -Path dirOf(absl::string_view path) { - Path::size_type pos = path.rfind('/'); - if (pos == std::string::npos) { - return "."; - } - return pos == 0 ? "/" : Path(path, 0, pos); -} - -// TODO(grfn) remove in favor of std::filesystem::path::root_name() -std::string baseNameOf(const Path& path) { - if (path.empty()) { - return ""; - } - - Path::size_type last = path.length() - 1; - if (path[last] == '/' && last > 0) { - last -= 1; - } - - Path::size_type pos = path.rfind('/', last); - if (pos == std::string::npos) { - pos = 0; - } else { - pos += 1; - } - - return std::string(path, pos, last - pos + 1); -} - -bool isInDir(const Path& path, const Path& dir) { - return path[0] == '/' && std::string(path, 0, dir.size()) == dir && - path.size() >= dir.size() + 2 && path[dir.size()] == '/'; -} - -bool isDirOrInDir(const Path& path, const Path& dir) { - return path == dir || isInDir(path, dir); -} - -struct stat lstat(const Path& path) { - struct stat st; - if (lstat(path.c_str(), &st) != 0) { - throw SysError(format("getting status of '%1%'") % path); - } - return st; -} - -bool pathExists(const Path& path) { - int res; - struct stat st; - res = lstat(path.c_str(), &st); - if (res == 0) { - return true; - } - if (errno != ENOENT && errno != ENOTDIR) { - throw SysError(format("getting status of %1%") % path); - } - return false; -} - -Path readLink(const Path& path) { - checkInterrupt(); - std::vector buf; - for (ssize_t bufSize = PATH_MAX / 4; true; bufSize += bufSize / 2) { - buf.resize(bufSize); - ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize); - if (rlSize == -1) { - if (errno == EINVAL) { - throw Error("'%1%' is not a symlink", path); - } - throw SysError("reading symbolic link '%1%'", path); - - } else if (rlSize < bufSize) { - return std::string(buf.data(), rlSize); - } - } -} - -bool isLink(const Path& path) { - struct stat st = lstat(path); - return S_ISLNK(st.st_mode); -} - -DirEntries readDirectory(DIR* dir, const Path& path) { - DirEntries entries; - entries.reserve(64); - - struct dirent* dirent; - while (errno = 0, dirent = readdir(dir)) { /* sic */ - checkInterrupt(); - std::string name = dirent->d_name; - if (name == "." || name == "..") { - continue; - } - entries.emplace_back(name, dirent->d_ino, -#ifdef HAVE_STRUCT_DIRENT_D_TYPE - dirent->d_type -#else - DT_UNKNOWN -#endif - ); - } - if (errno) { - throw SysError(format("reading directory '%1%'") % path); - } - - return entries; -} - -DirEntries readDirectory(const Path& path) { - AutoCloseDir dir(opendir(path.c_str())); - if (!dir) { - throw SysError(format("opening directory '%1%'") % path); - } - - return readDirectory(dir.get(), path); -} - -unsigned char getFileType(const Path& path) { - struct stat st = lstat(path); - if (S_ISDIR(st.st_mode)) { - return DT_DIR; - } - if (S_ISLNK(st.st_mode)) { - return DT_LNK; - } - if (S_ISREG(st.st_mode)) { - return DT_REG; - } - return DT_UNKNOWN; -} - -std::string readFile(int fd) { - struct stat st; - if (fstat(fd, &st) == -1) { - throw SysError("statting file"); - } - - std::vector buf(st.st_size); - readFull(fd, buf.data(), st.st_size); - - return std::string(reinterpret_cast(buf.data()), st.st_size); -} - -std::string readFile(absl::string_view path, bool drain) { - AutoCloseFD fd(open(std::string(path).c_str(), O_RDONLY | O_CLOEXEC)); - if (!fd) { - throw SysError(format("opening file '%1%'") % path); - } - return drain ? drainFD(fd.get()) : readFile(fd.get()); -} - -void readFile(absl::string_view path, Sink& sink) { - // TODO(tazjin): use stdlib functions for this stuff - AutoCloseFD fd(open(std::string(path).c_str(), O_RDONLY | O_CLOEXEC)); - if (!fd) { - throw SysError("opening file '%s'", path); - } - drainFD(fd.get(), sink); -} - -void writeFile(const Path& path, const std::string& s, mode_t mode) { - AutoCloseFD fd( - open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode)); - if (!fd) { - throw SysError(format("opening file '%1%'") % path); - } - writeFull(fd.get(), s); -} - -void writeFile(const Path& path, Source& source, mode_t mode) { - AutoCloseFD fd( - open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode)); - if (!fd) { - throw SysError(format("opening file '%1%'") % path); - } - - std::vector buf(64 * 1024); - - while (true) { - try { - auto n = source.read(buf.data(), buf.size()); - writeFull(fd.get(), static_cast(buf.data()), n); - } catch (EndOfFile&) { - break; - } - } -} - -std::string readLine(int fd) { - std::string s; - while (true) { - checkInterrupt(); - char ch; - // FIXME: inefficient - ssize_t rd = read(fd, &ch, 1); - if (rd == -1) { - if (errno != EINTR) { - throw SysError("reading a line"); - } - } else if (rd == 0) { - throw EndOfFile("unexpected EOF reading a line"); - } else { - if (ch == '\n') { - return s; - } - s += ch; - } - } -} - -void writeLine(int fd, std::string s) { - s += '\n'; - writeFull(fd, s); -} - -static void _deletePath(int parentfd, const Path& path, - unsigned long long& bytesFreed) { - checkInterrupt(); - - std::string name(baseNameOf(path)); - - struct stat st; - if (fstatat(parentfd, name.c_str(), &st, AT_SYMLINK_NOFOLLOW) == -1) { - if (errno == ENOENT) { - return; - } - throw SysError(format("getting status of '%1%'") % path); - } - - if (!S_ISDIR(st.st_mode) && st.st_nlink == 1) { - bytesFreed += st.st_size; - } - - if (S_ISDIR(st.st_mode)) { - /* Make the directory accessible. */ - const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR; - if ((st.st_mode & PERM_MASK) != PERM_MASK) { - if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) { - throw SysError(format("chmod '%1%'") % path); - } - } - - int fd = openat(parentfd, path.c_str(), O_RDONLY); - if (!fd) { - throw SysError(format("opening directory '%1%'") % path); - } - AutoCloseDir dir(fdopendir(fd)); - if (!dir) { - throw SysError(format("opening directory '%1%'") % path); - } - for (auto& i : readDirectory(dir.get(), path)) { - _deletePath(dirfd(dir.get()), path + "/" + i.name, bytesFreed); - } - } - - int flags = S_ISDIR(st.st_mode) ? AT_REMOVEDIR : 0; - if (unlinkat(parentfd, name.c_str(), flags) == -1) { - if (errno == ENOENT) { - return; - } - throw SysError(format("cannot unlink '%1%'") % path); - } -} - -static void _deletePath(const Path& path, unsigned long long& bytesFreed) { - Path dir = dirOf(path); - if (dir == "") { - dir = "/"; - } - - AutoCloseFD dirfd(open(dir.c_str(), O_RDONLY)); - if (!dirfd) { - // This really shouldn't fail silently, but it's left this way - // for backwards compatibility. - if (errno == ENOENT) { - return; - } - - throw SysError(format("opening directory '%1%'") % path); - } - - _deletePath(dirfd.get(), path, bytesFreed); -} - -void deletePath(const Path& path) { - unsigned long long dummy; - deletePath(path, dummy); -} - -void deletePath(const Path& path, unsigned long long& bytesFreed) { - // Activity act(*logger, lvlDebug, format("recursively deleting path '%1%'") % - // path); - bytesFreed = 0; - _deletePath(path, bytesFreed); -} - -static Path tempName(Path tmpRoot, const Path& prefix, bool includePid, - int& counter) { - tmpRoot = canonPath( - tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); - - if (includePid) { - return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++) - .str(); - } - return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str(); -} - -Path createTempDir(const Path& tmpRoot, const Path& prefix, bool includePid, - bool useGlobalCounter, mode_t mode) { - static int globalCounter = 0; - int localCounter = 0; - int& counter(useGlobalCounter ? globalCounter : localCounter); - - while (true) { - checkInterrupt(); - Path tmpDir = tempName(tmpRoot, prefix, includePid, counter); - if (mkdir(tmpDir.c_str(), mode) == 0) { -#if __FreeBSD__ - /* Explicitly set the group of the directory. This is to - work around around problems caused by BSD's group - ownership semantics (directories inherit the group of - the parent). For instance, the group of /tmp on - FreeBSD is "wheel", so all directories created in /tmp - will be owned by "wheel"; but if the user is not in - "wheel", then "tar" will fail to unpack archives that - have the setgid bit set on directories. */ - if (chown(tmpDir.c_str(), (uid_t)-1, getegid()) != 0) - throw SysError(format("setting group of directory '%1%'") % tmpDir); -#endif - return tmpDir; - } - if (errno != EEXIST) { - throw SysError(format("creating directory '%1%'") % tmpDir); - } - } -} - -std::string getUserName() { - auto pw = getpwuid(geteuid()); - std::optional name = - pw != nullptr ? pw->pw_name : getEnv("USER"); - if (!name.has_value()) { - throw Error("cannot figure out user name"); - } - return *name; -} - -static Lazy getHome2([]() { - std::optional homeDir = getEnv("HOME"); - if (!homeDir) { - std::vector buf(16384); - struct passwd pwbuf; - struct passwd* pw; - if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0 || - (pw == nullptr) || (pw->pw_dir == nullptr) || (pw->pw_dir[0] == 0)) { - throw Error("cannot determine user's home directory"); - } - return std::string(pw->pw_dir); - } - return homeDir.value(); -}); - -Path getHome() { return getHome2(); } - -Path getCacheDir() { - return getEnv("XDG_CACHE_HOME").value_or(getHome() + "/.cache"); -} - -Path getConfigDir() { - return getEnv("XDG_CONFIG_HOME").value_or(getHome() + "/.config"); -} - -std::vector getConfigDirs() { - Path configHome = getConfigDir(); - std::string configDirs = getEnv("XDG_CONFIG_DIRS").value_or(""); - std::vector result = - absl::StrSplit(configDirs, absl::ByChar(':'), absl::SkipEmpty()); - result.insert(result.begin(), configHome); - return result; -} - -Path getDataDir() { - return getEnv("XDG_DATA_HOME").value_or(getHome() + "/.local/share"); -} - -// TODO(grfn): Remove in favor of std::filesystem::create_directories -Paths createDirs(const Path& path) { - Paths created; - if (path == "/") { - return created; - } - - struct stat st; - if (lstat(path.c_str(), &st) == -1) { - created = createDirs(dirOf(path)); - if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST) { - throw SysError(format("creating directory '%1%'") % path); - } - st = lstat(path); - created.push_back(path); - } - - if (S_ISLNK(st.st_mode) && stat(path.c_str(), &st) == -1) { - throw SysError(format("statting symlink '%1%'") % path); - } - - if (!S_ISDIR(st.st_mode)) { - throw Error(format("'%1%' is not a directory") % path); - } - - return created; -} - -void createSymlink(const Path& target, const Path& link) { - if (symlink(target.c_str(), link.c_str()) != 0) { - throw SysError(format("creating symlink from '%1%' to '%2%'") % link % - target); - } -} - -void replaceSymlink(const Path& target, const Path& link) { - for (unsigned int n = 0; true; n++) { - Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link))); - - try { - createSymlink(target, tmp); - } catch (SysError& e) { - if (e.errNo == EEXIST) { - continue; - } - throw; - } - - if (rename(tmp.c_str(), link.c_str()) != 0) { - throw SysError(format("renaming '%1%' to '%2%'") % tmp % link); - } - - break; - } -} - -void readFull(int fd, unsigned char* buf, size_t count) { - while (count != 0u) { - checkInterrupt(); - ssize_t res = read(fd, reinterpret_cast(buf), count); - if (res == -1) { - if (errno == EINTR) { - continue; - } - throw SysError("reading from file"); - } - if (res == 0) { - throw EndOfFile("unexpected end-of-file"); - } - count -= res; - buf += res; - } -} - -void writeFull(int fd, const unsigned char* buf, size_t count, - bool allowInterrupts) { - while (count != 0u) { - if (allowInterrupts) { - checkInterrupt(); - } - ssize_t res = write(fd, (char*)buf, count); - if (res == -1 && errno != EINTR) { - throw SysError("writing to file"); - } - if (res > 0) { - count -= res; - buf += res; - } - } -} - -void writeFull(int fd, const std::string& s, bool allowInterrupts) { - writeFull(fd, reinterpret_cast(s.data()), s.size(), - allowInterrupts); -} - -std::string drainFD(int fd, bool block) { - StringSink sink; - drainFD(fd, sink, block); - return std::move(*sink.s); -} - -void drainFD(int fd, Sink& sink, bool block) { - int saved; - - Finally finally([&]() { - if (!block) { - if (fcntl(fd, F_SETFL, saved) == -1) { - throw SysError("making file descriptor blocking"); - } - } - }); - - if (!block) { - saved = fcntl(fd, F_GETFL); - if (fcntl(fd, F_SETFL, saved | O_NONBLOCK) == -1) { - throw SysError("making file descriptor non-blocking"); - } - } - - std::vector buf(64 * 1024); - while (true) { - checkInterrupt(); - ssize_t rd = read(fd, buf.data(), buf.size()); - if (rd == -1) { - if (!block && (errno == EAGAIN || errno == EWOULDBLOCK)) { - break; - } - if (errno != EINTR) { - throw SysError("reading from file"); - } - } else if (rd == 0) { - break; - } else { - sink(buf.data(), rd); - } - } -} - -////////////////////////////////////////////////////////////////////// - -AutoDelete::AutoDelete() : del{false} {} - -AutoDelete::AutoDelete(std::string p, bool recursive) : path(std::move(p)) { - del = true; - this->recursive = recursive; -} - -AutoDelete::~AutoDelete() { - try { - if (del) { - if (recursive) { - deletePath(path); - } else { - if (remove(path.c_str()) == -1) { - throw SysError(format("cannot unlink '%1%'") % path); - } - } - } - } catch (...) { - ignoreException(); - } -} - -void AutoDelete::cancel() { del = false; } - -void AutoDelete::reset(const Path& p, bool recursive) { - path = p; - this->recursive = recursive; - del = true; -} - -////////////////////////////////////////////////////////////////////// - -AutoCloseFD::AutoCloseFD() : fd{-1} {} - -AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {} - -AutoCloseFD::AutoCloseFD(AutoCloseFD&& that) : fd{that.fd} { that.fd = -1; } - -AutoCloseFD& AutoCloseFD::operator=(AutoCloseFD&& that) { - close(); - fd = that.fd; - that.fd = -1; - return *this; -} - -AutoCloseFD::~AutoCloseFD() { - try { - close(); - } catch (...) { - ignoreException(); - } -} - -int AutoCloseFD::get() const { return fd; } - -void AutoCloseFD::close() { - if (fd != -1) { - if (::close(fd) == -1) { /* This should never happen. */ - throw SysError(format("closing file descriptor %1%") % fd); - } - } -} - -AutoCloseFD::operator bool() const { return fd != -1; } - -int AutoCloseFD::release() { - int oldFD = fd; - fd = -1; - return oldFD; -} - -void Pipe::create() { - int fds[2]; -#if HAVE_PIPE2 - if (pipe2(fds, O_CLOEXEC) != 0) { - throw SysError("creating pipe"); - } -#else - if (pipe(fds) != 0) { - throw SysError("creating pipe"); - } - closeOnExec(fds[0]); - closeOnExec(fds[1]); -#endif - readSide = AutoCloseFD(fds[0]); - writeSide = AutoCloseFD(fds[1]); -} - -////////////////////////////////////////////////////////////////////// - -Pid::Pid() = default; - -Pid::Pid(pid_t pid) : pid(pid) {} - -Pid::~Pid() { - if (pid != -1) { - kill(); - } -} - -void Pid::operator=(pid_t pid) { - if (this->pid != -1 && this->pid != pid) { - kill(); - } - this->pid = pid; - killSignal = SIGKILL; // reset signal to default -} - -Pid::operator pid_t() { return pid; } - -int Pid::kill() { - assert(pid != -1); - - DLOG(INFO) << "killing process " << pid; - - /* Send the requested signal to the child. If it has its own - process group, send the signal to every process in the child - process group (which hopefully includes *all* its children). */ - if (::kill(separatePG ? -pid : pid, killSignal) != 0) { - LOG(ERROR) << SysError("killing process %d", pid).msg(); - } - - return wait(); -} - -int Pid::wait() { - assert(pid != -1); - while (true) { - int status; - int res = waitpid(pid, &status, 0); - if (res == pid) { - pid = -1; - return status; - } - if (errno != EINTR) { - throw SysError("cannot get child exit status"); - } - checkInterrupt(); - } -} - -void Pid::setSeparatePG(bool separatePG) { this->separatePG = separatePG; } - -void Pid::setKillSignal(int signal) { this->killSignal = signal; } - -pid_t Pid::release() { - pid_t p = pid; - pid = -1; - return p; -} - -void killUser(uid_t uid) { - DLOG(INFO) << "killing all processes running under UID " << uid; - - assert(uid != 0); /* just to be safe... */ - - /* The system call kill(-1, sig) sends the signal `sig' to all - users to which the current process can send signals. So we - fork a process, switch to uid, and send a mass kill. */ - - ProcessOptions options; - - Pid pid(startProcess( - [&]() { - if (setuid(uid) == -1) { - throw SysError("setting uid"); - } - - while (true) { - if (kill(-1, SIGKILL) == 0) { - break; - } - if (errno == ESRCH) { - break; - } /* no more processes */ - if (errno != EINTR) { - throw SysError(format("cannot kill processes for uid '%1%'") % uid); - } - } - - _exit(0); - }, - options)); - - int status = pid.wait(); - if (status != 0) { - throw Error(format("cannot kill processes for uid '%1%': %2%") % uid % - statusToString(status)); - } - - /* !!! We should really do some check to make sure that there are - no processes left running under `uid', but there is no portable - way to do so (I think). The most reliable way may be `ps -eo - uid | grep -q $uid'. */ -} - -////////////////////////////////////////////////////////////////////// - -/* - * Please note that it is not legal for this function to call vfork(). If the - * process created by vfork() returns from the function in which vfork() was - * called, or calls any other function before successfully calling _exit() or - * one of the exec*() family of functions, the behavior is undefined. - */ -static pid_t doFork(const std::function& fun) __attribute__((noinline)); -static pid_t doFork(const std::function& fun) { -#ifdef __linux__ - // TODO(kanepyork): call clone() instead for faster forking -#endif - - pid_t pid = fork(); - if (pid != 0) { - return pid; - } - fun(); - abort(); -} - -pid_t startProcess(std::function fun, const ProcessOptions& options) { - auto wrapper = [&]() { - try { -#if __linux__ - if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) { - throw SysError("setting death signal"); - } -#endif - restoreAffinity(); - fun(); - } catch (std::exception& e) { - try { - LOG(ERROR) << options.errorPrefix << e.what(); - } catch (...) { - } - } catch (...) { - } - if (options.runExitHandlers) { - exit(1); - } else { - _exit(1); - } - }; - - pid_t pid = doFork(wrapper); - if (pid == -1) { - throw SysError("unable to fork"); - } - - return pid; -} - -std::vector stringsToCharPtrs(const Strings& ss) { - std::vector res; - for (auto& s : ss) { - res.push_back(const_cast(s.c_str())); - } - res.push_back(nullptr); - return res; -} - -std::string runProgram(const Path& program, bool searchPath, - const Strings& args, - const std::optional& input) { - RunOptions opts(program, args); - opts.searchPath = searchPath; - opts.input = input; - - auto res = runProgram(opts); - - if (!statusOk(res.first)) { - throw ExecError(res.first, fmt("program '%1%' %2%", program, - statusToString(res.first))); - } - - return res.second; -} - -std::pair runProgram(const RunOptions& options_) { - RunOptions options(options_); - StringSink sink; - options.standardOut = &sink; - - int status = 0; - - try { - runProgram2(options); - } catch (ExecError& e) { - status = e.status; - } - - return {status, std::move(*sink.s)}; -} - -void runProgram2(const RunOptions& options) { - checkInterrupt(); - - assert(!(options.standardIn && options.input)); - - std::unique_ptr source_; - Source* source = options.standardIn; - - if (options.input) { - source_ = std::make_unique(*options.input); - source = source_.get(); - } - - /* Create a pipe. */ - Pipe out; - Pipe in; - if (options.standardOut != nullptr) { - out.create(); - } - if (source != nullptr) { - in.create(); - } - - ProcessOptions processOptions; - - /* Fork. */ - Pid pid(startProcess( - [&]() { - if (options.environment) { - replaceEnv(*options.environment); - } - if ((options.standardOut != nullptr) && - dup2(out.writeSide.get(), STDOUT_FILENO) == -1) { - throw SysError("dupping stdout"); - } - if (options.mergeStderrToStdout) { - if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1) { - throw SysError("cannot dup stdout into stderr"); - } - } - if ((source != nullptr) && - dup2(in.readSide.get(), STDIN_FILENO) == -1) { - throw SysError("dupping stdin"); - } - - if (options.chdir && chdir((*options.chdir).c_str()) == -1) { - throw SysError("chdir failed"); - } - if (options.gid && setgid(*options.gid) == -1) { - throw SysError("setgid failed"); - } - /* Drop all other groups if we're setgid. */ - if (options.gid && setgroups(0, nullptr) == -1) { - throw SysError("setgroups failed"); - } - if (options.uid && setuid(*options.uid) == -1) { - throw SysError("setuid failed"); - } - - Strings args_(options.args); - args_.push_front(options.program); - - restoreSignals(); - - if (options.searchPath) { - execvp(options.program.c_str(), stringsToCharPtrs(args_).data()); - } else { - execv(options.program.c_str(), stringsToCharPtrs(args_).data()); - } - - throw SysError("executing '%1%'", options.program); - }, - processOptions)); - - out.writeSide = AutoCloseFD(-1); - - std::thread writerThread; - - std::promise promise; - - Finally doJoin([&]() { - if (writerThread.joinable()) { - writerThread.join(); - } - }); - - if (source != nullptr) { - in.readSide = AutoCloseFD(-1); - writerThread = std::thread([&]() { - try { - std::vector buf(8 * 1024); - while (true) { - size_t n; - try { - n = source->read(buf.data(), buf.size()); - } catch (EndOfFile&) { - break; - } - writeFull(in.writeSide.get(), buf.data(), n); - } - promise.set_value(); - } catch (...) { - promise.set_exception(std::current_exception()); - } - in.writeSide = AutoCloseFD(-1); - }); - } - - if (options.standardOut != nullptr) { - drainFD(out.readSide.get(), *options.standardOut); - } - - /* Wait for the child to finish. */ - int status = pid.wait(); - - /* Wait for the writer thread to finish. */ - if (source != nullptr) { - promise.get_future().get(); - } - - if (status != 0) { - throw ExecError(status, fmt("program '%1%' %2%", options.program, - statusToString(status))); - } -} - -void closeMostFDs(const std::set& exceptions) { -#if __linux__ - try { - for (auto& s : readDirectory("/proc/self/fd")) { - auto fd = std::stoi(s.name); - if (exceptions.count(fd) == 0u) { - DLOG(INFO) << "closing leaked FD " << fd; - close(fd); - } - } - return; - } catch (SysError&) { - } -#endif - - int maxFD = 0; - maxFD = sysconf(_SC_OPEN_MAX); - for (int fd = 0; fd < maxFD; ++fd) { - if (exceptions.count(fd) == 0u) { - close(fd); - } /* ignore result */ - } -} - -void closeOnExec(int fd) { - int prev; - if ((prev = fcntl(fd, F_GETFD, 0)) == -1 || - fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1) { - throw SysError("setting close-on-exec flag"); - } -} - -////////////////////////////////////////////////////////////////////// - -bool _isInterrupted = false; - -static thread_local bool interruptThrown = false; -thread_local std::function interruptCheck; - -void setInterruptThrown() { interruptThrown = true; } - -void _interrupted() { - /* Block user interrupts while an exception is being handled. - Throwing an exception while another exception is being handled - kills the program! */ - if (!interruptThrown && (std::uncaught_exceptions() == 0)) { - interruptThrown = true; - throw Interrupted("interrupted by the user"); - } -} - -////////////////////////////////////////////////////////////////////// - -std::string concatStringsSep(const std::string& sep, const Strings& ss) { - std::string s; - for (auto& i : ss) { - if (!s.empty()) { - s += sep; - } - s += i; - } - return s; -} - -std::string concatStringsSep(const std::string& sep, const StringSet& ss) { - std::string s; - for (auto& i : ss) { - if (!s.empty()) { - s += sep; - } - s += i; - } - return s; -} - -std::string replaceStrings(const std::string& s, const std::string& from, - const std::string& to) { - if (from.empty()) { - return s; - } - std::string res = s; - size_t pos = 0; - while ((pos = res.find(from, pos)) != std::string::npos) { - res.replace(pos, from.size(), to); - pos += to.size(); - } - return res; -} - -std::string statusToString(int status) { - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { - if (WIFEXITED(status)) { - return (format("failed with exit code %1%") % WEXITSTATUS(status)).str(); - } - if (WIFSIGNALED(status)) { - int sig = WTERMSIG(status); -#if HAVE_STRSIGNAL - const char* description = strsignal(sig); - return (format("failed due to signal %1% (%2%)") % sig % description) - .str(); -#else - return (format("failed due to signal %1%") % sig).str(); -#endif - } else { - return "died abnormally"; - } - } else { - return "succeeded"; - } -} - -bool statusOk(int status) { - return WIFEXITED(status) && WEXITSTATUS(status) == 0; -} - -std::string toLower(const std::string& s) { - std::string r(s); - for (auto& c : r) { - c = std::tolower(c); - } - return r; -} - -std::string shellEscape(const std::string& s) { - std::string r = "'"; - for (auto& i : s) { - if (i == '\'') { - r += "'\\''"; - } else { - r += i; - } - } - r += '\''; - return r; -} - -void ignoreException() { - try { - throw; - } catch (std::exception& e) { - LOG(ERROR) << "error (ignored): " << e.what(); - } -} - -std::string filterANSIEscapes(const std::string& s, bool filterAll, - unsigned int width) { - std::string t; - std::string e; - size_t w = 0; - auto i = s.begin(); - - while (w < static_cast(width) && i != s.end()) { - if (*i == '\e') { - std::string e; - e += *i++; - char last = 0; - - if (i != s.end() && *i == '[') { - e += *i++; - // eat parameter bytes - while (i != s.end() && *i >= 0x30 && *i <= 0x3f) { - e += *i++; - } - // eat intermediate bytes - while (i != s.end() && *i >= 0x20 && *i <= 0x2f) { - e += *i++; - } - // eat final byte - if (i != s.end() && *i >= 0x40 && *i <= 0x7e) { - e += last = *i++; - } - } else { - if (i != s.end() && *i >= 0x40 && *i <= 0x5f) { - e += *i++; - } - } - - if (!filterAll && last == 'm') { - t += e; - } - } - - else if (*i == '\t') { - i++; - t += ' '; - w++; - while (w < static_cast(width) && ((w % 8) != 0u)) { - t += ' '; - w++; - } - } - - else if (*i == '\r') { - // do nothing for now - i++; - - } else { - t += *i++; - w++; - } - } - - return t; -} - -void callFailure(const std::function& failure, - const std::exception_ptr& exc) { - try { - failure(exc); - } catch (std::exception& e) { - LOG(ERROR) << "uncaught exception: " << e.what(); - abort(); - } -} - -static Sync> windowSize{{0, 0}}; - -static void updateWindowSize() { - struct winsize ws; - if (ioctl(2, TIOCGWINSZ, &ws) == 0) { - auto windowSize_(windowSize.lock()); - windowSize_->first = ws.ws_row; - windowSize_->second = ws.ws_col; - } -} - -std::pair getWindowSize() { - return *windowSize.lock(); -} - -static Sync>> _interruptCallbacks; - -static void signalHandlerThread(sigset_t set) { - while (true) { - int signal = 0; - sigwait(&set, &signal); - - if (signal == SIGINT || signal == SIGTERM || signal == SIGHUP) { - triggerInterrupt(); - - } else if (signal == SIGWINCH) { - updateWindowSize(); - } - } -} - -void triggerInterrupt() { - _isInterrupted = true; - - { - auto interruptCallbacks(_interruptCallbacks.lock()); - for (auto& callback : *interruptCallbacks) { - try { - callback(); - } catch (...) { - ignoreException(); - } - } - } -} - -static sigset_t savedSignalMask; - -void startSignalHandlerThread() { - updateWindowSize(); - - if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask) != 0) { - throw SysError("quering signal mask"); - } - - sigset_t set; - sigemptyset(&set); - sigaddset(&set, SIGINT); - sigaddset(&set, SIGTERM); - sigaddset(&set, SIGHUP); - sigaddset(&set, SIGPIPE); - sigaddset(&set, SIGWINCH); - if (pthread_sigmask(SIG_BLOCK, &set, nullptr) != 0) { - throw SysError("blocking signals"); - } - - std::thread(signalHandlerThread, set).detach(); -} - -void restoreSignals() { - if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr) != 0) { - throw SysError("restoring signals"); - } -} - -/* RAII helper to automatically deregister a callback. */ -struct InterruptCallbackImpl : InterruptCallback { - std::list>::iterator it; - ~InterruptCallbackImpl() override { _interruptCallbacks.lock()->erase(it); } -}; - -std::unique_ptr createInterruptCallback( - const std::function& callback) { - auto interruptCallbacks(_interruptCallbacks.lock()); - interruptCallbacks->push_back(callback); - - auto res = std::make_unique(); - res->it = interruptCallbacks->end(); - res->it--; - - return std::unique_ptr(res.release()); -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/util.hh b/third_party/nix/src/libutil/util.hh deleted file mode 100644 index b3349c4f39..0000000000 --- a/third_party/nix/src/libutil/util.hh +++ /dev/null @@ -1,476 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "libutil/types.hh" - -namespace nix { - -struct Sink; -struct Source; - -/* The system for which Nix is compiled. */ -extern const std::string nativeSystem; - -/* Return an environment variable. */ -std::optional getEnv(const std::string& key); - -/* Get the entire environment. */ -std::map getEnv(); - -/* Clear the environment. */ -void clearEnv(); - -/* Return an absolutized path, resolving paths relative to the - specified directory, or the current directory otherwise. The path - is also canonicalised. */ -Path absPath(Path path, Path dir = ""); - -/* Canonicalise a path by removing all `.' or `..' components and - double or trailing slashes. Optionally resolves all symlink - components such that each component of the resulting path is *not* - a symbolic link. */ -Path canonPath(const Path& path, bool resolveSymlinks = false); - -/* Return the directory part of the given canonical path, i.e., - everything before the final `/'. If the path is the root or an - immediate child thereof (e.g., `/foo'), this means an empty string - is returned. */ -Path dirOf(absl::string_view path); - -/* Return the base name of the given canonical path, i.e., everything - following the final `/'. */ -std::string baseNameOf(const Path& path); - -/* Check whether 'path' is a descendant of 'dir'. */ -bool isInDir(const Path& path, const Path& dir); - -/* Check whether 'path' is equal to 'dir' or a descendant of 'dir'. */ -bool isDirOrInDir(const Path& path, const Path& dir); - -/* Get status of `path'. */ -struct stat lstat(const Path& path); - -/* Return true iff the given path exists. */ -bool pathExists(const Path& path); - -/* Read the contents (target) of a symbolic link. The result is not - in any way canonicalised. */ -Path readLink(const Path& path); - -bool isLink(const Path& path); - -/* Read the contents of a directory. The entries `.' and `..' are - removed. */ -struct DirEntry { - std::string name; - ino_t ino; - unsigned char type; // one of DT_* - DirEntry(const std::string& name, ino_t ino, unsigned char type) - : name(name), ino(ino), type(type) {} -}; - -typedef std::vector DirEntries; - -DirEntries readDirectory(const Path& path); - -unsigned char getFileType(const Path& path); - -/* Read the contents of a file into a string. */ -std::string readFile(int fd); -std::string readFile(absl::string_view path, bool drain = false); -void readFile(absl::string_view path, Sink& sink); - -/* Write a string to a file. */ -void writeFile(const Path& path, const std::string& s, mode_t mode = 0666); - -void writeFile(const Path& path, Source& source, mode_t mode = 0666); - -/* Read a line from a file descriptor. */ -std::string readLine(int fd); - -/* Write a line to a file descriptor. */ -void writeLine(int fd, std::string s); - -/* Delete a path; i.e., in the case of a directory, it is deleted - recursively. It's not an error if the path does not exist. The - second variant returns the number of bytes and blocks freed. */ -void deletePath(const Path& path); - -void deletePath(const Path& path, unsigned long long& bytesFreed); - -/* Create a temporary directory. */ -Path createTempDir(const Path& tmpRoot = "", const Path& prefix = "nix", - bool includePid = true, bool useGlobalCounter = true, - mode_t mode = 0755); - -std::string getUserName(); - -/* Return $HOME or the user's home directory from /etc/passwd. */ -Path getHome(); - -/* Return $XDG_CACHE_HOME or $HOME/.cache. */ -Path getCacheDir(); - -/* Return $XDG_CONFIG_HOME or $HOME/.config. */ -Path getConfigDir(); - -/* Return the directories to search for user configuration files */ -std::vector getConfigDirs(); - -/* Return $XDG_DATA_HOME or $HOME/.local/share. */ -Path getDataDir(); - -/* Create a directory and all its parents, if necessary. Returns the - list of created directories, in order of creation. */ -Paths createDirs(const Path& path); - -/* Create a symlink. */ -void createSymlink(const Path& target, const Path& link); - -/* Atomically create or replace a symlink. */ -void replaceSymlink(const Path& target, const Path& link); - -/* Wrappers arount read()/write() that read/write exactly the - requested number of bytes. */ -void readFull(int fd, unsigned char* buf, size_t count); -void writeFull(int fd, const unsigned char* buf, size_t count, - bool allowInterrupts = true); -void writeFull(int fd, const std::string& s, bool allowInterrupts = true); - -MakeError(EndOfFile, Error); - -/* Read a file descriptor until EOF occurs. */ -std::string drainFD(int fd, bool block = true); - -void drainFD(int fd, Sink& sink, bool block = true); - -/* Automatic cleanup of resources. */ - -class AutoDelete { - Path path; - bool del; - bool recursive; - - public: - AutoDelete(); - explicit AutoDelete(Path p, bool recursive = true); - ~AutoDelete(); - void cancel(); - void reset(const Path& p, bool recursive = true); - explicit operator Path() const { return path; } -}; - -class AutoCloseFD { - int fd; - void close(); - - public: - AutoCloseFD(); - explicit AutoCloseFD(int fd); - AutoCloseFD(const AutoCloseFD& fd) = delete; - AutoCloseFD(AutoCloseFD&& that); - ~AutoCloseFD(); - AutoCloseFD& operator=(const AutoCloseFD& fd) = delete; - AutoCloseFD& operator=(AutoCloseFD&& that); - int get() const; - explicit operator bool() const; - int release(); -}; - -class Pipe { - public: - AutoCloseFD readSide, writeSide; - void create(); -}; - -struct DIRDeleter { - void operator()(DIR* dir) const { closedir(dir); } -}; - -using AutoCloseDir = std::unique_ptr; - -class Pid { - pid_t pid = -1; - bool separatePG = false; - int killSignal = SIGKILL; - - public: - Pid(); - explicit Pid(pid_t pid); - ~Pid(); - void operator=(pid_t pid); - explicit operator pid_t(); - int kill(); - int wait(); - - void setSeparatePG(bool separatePG); - void setKillSignal(int signal); - pid_t release(); - - friend bool operator==(const Pid& lhs, const Pid& rhs) { - return lhs.pid == rhs.pid; - } - - friend bool operator!=(const Pid& lhs, const Pid& rhs) { - return !(lhs == rhs); - } -}; - -/* Kill all processes running under the specified uid by sending them - a SIGKILL. */ -void killUser(uid_t uid); - -/* Fork a process that runs the given function, and return the child - pid to the caller. */ -struct ProcessOptions { - std::string errorPrefix = "error: "; - bool dieWithParent = true; - bool runExitHandlers = false; -}; - -pid_t startProcess(std::function fun, - const ProcessOptions& options = ProcessOptions()); - -/* Run a program and return its stdout in a string (i.e., like the - shell backtick operator). */ -std::string runProgram(const Path& program, bool searchPath = false, - const Strings& args = Strings(), - const std::optional& input = {}); - -struct RunOptions { - std::optional uid; - std::optional gid; - std::optional chdir; - std::optional> environment; - Path program; - bool searchPath = true; - Strings args; - std::optional input; - Source* standardIn = nullptr; - Sink* standardOut = nullptr; - bool mergeStderrToStdout = false; - bool _killStderr = false; - - RunOptions(const Path& program, const Strings& args) - : program(program), args(args){}; - - RunOptions& killStderr(bool v) { - _killStderr = true; - return *this; - } -}; - -std::pair runProgram(const RunOptions& options); - -void runProgram2(const RunOptions& options); - -class ExecError : public Error { - public: - int status; - - template - explicit ExecError(int status, Args... args) - : Error(args...), status(status) {} -}; - -/* Convert a list of strings to a null-terminated vector of char - *'s. The result must not be accessed beyond the lifetime of the - list of strings. */ -std::vector stringsToCharPtrs(const Strings& ss); - -/* Close all file descriptors except those listed in the given set. - Good practice in child processes. */ -void closeMostFDs(const std::set& exceptions); - -/* Set the close-on-exec flag for the given file descriptor. */ -void closeOnExec(int fd); - -/* User interruption. */ - -extern bool _isInterrupted; - -extern thread_local std::function interruptCheck; - -void setInterruptThrown(); - -void _interrupted(); - -void inline checkInterrupt() { - if (_isInterrupted || (interruptCheck && interruptCheck())) { - _interrupted(); - } -} - -MakeError(Interrupted, BaseError); - -MakeError(FormatError, Error); - -/* Concatenate the given strings with a separator between the - elements. */ -std::string concatStringsSep(const std::string& sep, const Strings& ss); -std::string concatStringsSep(const std::string& sep, const StringSet& ss); - -/* Replace all occurrences of a string inside another string. */ -std::string replaceStrings(const std::string& s, const std::string& from, - const std::string& to); - -/* Convert the exit status of a child as returned by wait() into an - error string. */ -std::string statusToString(int status); - -bool statusOk(int status); - -/* Parse a string into a float. */ -template -bool string2Float(const std::string& s, N& n) { - std::istringstream str(s); - str >> n; - return str && str.get() == EOF; -} - -/* Convert a string to lower case. */ -std::string toLower(const std::string& s); - -/* Escape a string as a shell word. */ -std::string shellEscape(const std::string& s); - -/* Exception handling in destructors: print an error message, then - ignore the exception. */ -void ignoreException(); - -/* Some ANSI escape sequences. */ -#define ANSI_NORMAL "\e[0m" -#define ANSI_BOLD "\e[1m" -#define ANSI_FAINT "\e[2m" -#define ANSI_RED "\e[31;1m" -#define ANSI_GREEN "\e[32;1m" -#define ANSI_BLUE "\e[34;1m" - -/* Truncate a string to 'width' printable characters. If 'filterAll' - is true, all ANSI escape sequences are filtered out. Otherwise, - some escape sequences (such as colour setting) are copied but not - included in the character count. Also, tabs are expanded to - spaces. */ -std::string filterANSIEscapes( - const std::string& s, bool filterAll = false, - unsigned int width = std::numeric_limits::max()); - -/* Get a value for the specified key from an associate container, or a - default value if the key doesn't exist. */ -template -std::string get(const T& map, const std::string& key, - const std::string& def = "") { - auto i = map.find(key); - return i == map.end() ? def : i->second; -} - -/* A callback is a wrapper around a lambda that accepts a valid of - type T or an exception. (We abuse std::future to pass the value or - exception.) */ -template -class Callback { - std::function)> fun; - std::atomic_flag done = ATOMIC_FLAG_INIT; - - public: - explicit Callback(std::function)> fun) : fun(fun) {} - - Callback(Callback&& callback) : fun(std::move(callback.fun)) { - auto prev = callback.done.test_and_set(); - if (prev) { - done.test_and_set(); - } - } - -// The unused-variable assert is disabled in this block because the -// `prev` variables are only used in debug mode (in the asserts). -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-variable" - - void operator()(T&& t) noexcept { - auto prev = done.test_and_set(); - assert(!prev); - std::promise promise; - promise.set_value(std::move(t)); - fun(promise.get_future()); - } - - void rethrow( - const std::exception_ptr& exc = std::current_exception()) noexcept { - auto prev = done.test_and_set(); - assert(!prev); - std::promise promise; - promise.set_exception(exc); - fun(promise.get_future()); - } - -#pragma clang diagnostic pop -}; - -/* Start a thread that handles various signals. Also block those signals - on the current thread (and thus any threads created by it). */ -void startSignalHandlerThread(); - -/* Restore default signal handling. */ -void restoreSignals(); - -struct InterruptCallback { - virtual ~InterruptCallback(){}; -}; - -/* Register a function that gets called on SIGINT (in a non-signal - context). */ -std::unique_ptr createInterruptCallback( - const std::function& callback); - -void triggerInterrupt(); - -/* A RAII class that causes the current thread to receive SIGUSR1 when - the signal handler thread receives SIGINT. That is, this allows - SIGINT to be multiplexed to multiple threads. */ -struct ReceiveInterrupts { - pthread_t target; - std::unique_ptr callback; - - ReceiveInterrupts() - : target(pthread_self()), callback(createInterruptCallback([&]() { - pthread_kill(target, SIGUSR1); - })) {} -}; - -/* A RAII helper that increments a counter on construction and - decrements it on destruction. */ -template -struct MaintainCount { - T& counter; - long delta; - explicit MaintainCount(T& counter, long delta = 1) - : counter(counter), delta(delta) { - counter += delta; - } - ~MaintainCount() { counter -= delta; } -}; - -/* Return the number of rows and columns of the terminal. */ -std::pair getWindowSize(); - -/* Used in various places. */ -using PathFilter = std::function; - -extern PathFilter defaultPathFilter; - -} // namespace nix diff --git a/third_party/nix/src/libutil/visitor.hh b/third_party/nix/src/libutil/visitor.hh deleted file mode 100644 index bf1d665af7..0000000000 --- a/third_party/nix/src/libutil/visitor.hh +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -namespace nix::util { - -// Helper class used for visiting std::variants by creating a variadic -// list of lambda expressions that delegates calls to each of the -// callables. -// -// See e.g. -// https://dev.to/tmr232/that-overloaded-trick-overloading-lambdas-in-c17 -template -struct overloaded : Ts... { - using Ts::operator()...; -}; - -template -overloaded(Ts...) -> overloaded; - -} // namespace nix::util diff --git a/third_party/nix/src/libutil/xml-writer.cc b/third_party/nix/src/libutil/xml-writer.cc deleted file mode 100644 index 8274ed769e..0000000000 --- a/third_party/nix/src/libutil/xml-writer.cc +++ /dev/null @@ -1,93 +0,0 @@ -#include "libutil/xml-writer.hh" - -#include - -namespace nix { - -XMLWriter::XMLWriter(bool indent, std::ostream& output) - : output(output), indent(indent) { - output << "" << std::endl; - closed = false; -} - -XMLWriter::~XMLWriter() { close(); } - -void XMLWriter::close() { - if (closed) { - return; - } - while (!pendingElems.empty()) { - closeElement(); - } - closed = true; -} - -void XMLWriter::indent_(size_t depth) { - if (!indent) { - return; - } - output << std::string(depth * 2, ' '); -} - -void XMLWriter::openElement(const std::string& name, const XMLAttrs& attrs) { - assert(!closed); - indent_(pendingElems.size()); - output << "<" << name; - writeAttrs(attrs); - output << ">"; - if (indent) { - output << std::endl; - } - pendingElems.push_back(name); -} - -void XMLWriter::closeElement() { - assert(!pendingElems.empty()); - indent_(pendingElems.size() - 1); - output << ""; - if (indent) { - output << std::endl; - } - pendingElems.pop_back(); - if (pendingElems.empty()) { - closed = true; - } -} - -void XMLWriter::writeEmptyElement(const std::string& name, - const XMLAttrs& attrs) { - assert(!closed); - indent_(pendingElems.size()); - output << "<" << name; - writeAttrs(attrs); - output << " />"; - if (indent) { - output << std::endl; - } -} - -void XMLWriter::writeAttrs(const XMLAttrs& attrs) { - for (auto& i : attrs) { - output << " " << i.first << "=\""; - for (char c : i.second) { - if (c == '"') { - output << """; - } else if (c == '<') { - output << "<"; - } else if (c == '>') { - output << ">"; - } else if (c == '&') { - output << "&"; - /* Escape newlines to prevent attribute normalisation (see - XML spec, section 3.3.3. */ - } else if (c == '\n') { - output << " "; - } else { - output << c; - } - } - output << "\""; - } -} - -} // namespace nix diff --git a/third_party/nix/src/libutil/xml-writer.hh b/third_party/nix/src/libutil/xml-writer.hh deleted file mode 100644 index d6f7cddb35..0000000000 --- a/third_party/nix/src/libutil/xml-writer.hh +++ /dev/null @@ -1,52 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace nix { - -typedef std::map XMLAttrs; - -class XMLWriter { - private: - std::ostream& output; - - bool indent; - bool closed; - - std::list pendingElems; - - public: - XMLWriter(bool indent, std::ostream& output); - ~XMLWriter(); - - void close(); - - void openElement(const std::string& name, const XMLAttrs& attrs = XMLAttrs()); - void closeElement(); - - void writeEmptyElement(const std::string& name, - const XMLAttrs& attrs = XMLAttrs()); - - private: - void writeAttrs(const XMLAttrs& attrs); - - void indent_(size_t depth); -}; - -class XMLOpenElement { - private: - XMLWriter& writer; - - public: - XMLOpenElement(XMLWriter& writer, const std::string& name, - const XMLAttrs& attrs = XMLAttrs()) - : writer(writer) { - writer.openElement(name, attrs); - } - ~XMLOpenElement() { writer.closeElement(); } -}; - -} // namespace nix diff --git a/third_party/nix/src/nix-build/nix-build.cc b/third_party/nix/src/nix-build/nix-build.cc deleted file mode 100644 index 26c3089677..0000000000 --- a/third_party/nix/src/nix-build/nix-build.cc +++ /dev/null @@ -1,581 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/common-eval-args.hh" -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libexpr/get-drvs.hh" -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/affinity.hh" -#include "libutil/status.hh" -#include "libutil/util.hh" -#include "nix/legacy.hh" - -using namespace nix; -using namespace std::string_literals; - -/* Recreate the effect of the perl shellwords function, breaking up a - * string into arguments like a shell word, including escapes - */ -std::vector shellwords(const std::string& s) { - std::regex whitespace("^(\\s+).*"); - auto begin = s.cbegin(); - std::vector res; - std::string cur; - enum state { sBegin, sQuote }; - state st = sBegin; - auto it = begin; - for (; it != s.cend(); ++it) { - if (st == sBegin) { - std::smatch match; - if (regex_search(it, s.cend(), match, whitespace)) { - cur.append(begin, it); - res.push_back(cur); - cur.clear(); - it = match[1].second; - begin = it; - } - } - switch (*it) { - case '"': - cur.append(begin, it); - begin = it + 1; - st = st == sBegin ? sQuote : sBegin; - break; - case '\\': - /* perl shellwords mostly just treats the next char as part of the - * string with no special processing */ - cur.append(begin, it); - begin = ++it; - break; - } - } - cur.append(begin, it); - if (!cur.empty()) { - res.push_back(cur); - } - return res; -} - -static void _main(int argc, char** argv) { - auto dryRun = false; - auto runEnv = std::regex_search(argv[0], std::regex("nix-shell$")); - auto pure = false; - auto fromArgs = false; - auto packages = false; - // Same condition as bash uses for interactive shells - auto interactive = - (isatty(STDIN_FILENO) != 0) && (isatty(STDERR_FILENO) != 0); - Strings attrPaths; - Strings left; - RepairFlag repair = NoRepair; - Path gcRoot; - BuildMode buildMode = bmNormal; - bool readStdin = false; - - std::string envCommand; // interactive shell - Strings envExclude; - - auto myName = runEnv ? "nix-shell" : "nix-build"; - - auto inShebang = false; - std::string script; - std::vector savedArgs; - - AutoDelete tmpDir(createTempDir("", myName)); - - std::string outLink = "./result"; - - // List of environment variables kept for --pure - std::set keepVars{ - "HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", - "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"}; - - Strings args; - for (int i = 1; i < argc; ++i) { - args.push_back(argv[i]); - } - - // Heuristic to see if we're invoked as a shebang script, namely, - // if we have at least one argument, it's the name of an - // executable file, and it starts with "#!". - if (runEnv && argc > 1 && - !std::regex_search(argv[1], std::regex("nix-shell"))) { - script = argv[1]; - try { - Strings lines = absl::StrSplit(readFile(script), absl::ByChar('\n'), - absl::SkipEmpty()); - if (std::regex_search(lines.front(), std::regex("^#!"))) { - lines.pop_front(); - inShebang = true; - for (int i = 2; i < argc; ++i) { - savedArgs.emplace_back(argv[i]); - } - args.clear(); - for (auto line : lines) { - line = absl::StripTrailingAsciiWhitespace(line); - std::smatch match; - if (std::regex_match(line, match, - std::regex("^#!\\s*nix-shell (.*)$"))) { - for (const auto& word : shellwords(match[1].str())) { - args.push_back(word); - } - } - } - } - } catch (SysError&) { - } - } - - struct MyArgs : LegacyArgs, MixEvalArgs { - using LegacyArgs::LegacyArgs; - }; - - MyArgs myArgs( - myName, [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - deletePath(Path(tmpDir)); - showManPage(myName); - } - - else if (*arg == "--version") { - printVersion(myName); - - } else if (*arg == "--add-drv-link" || *arg == "--indirect") { - ; // obsolete - - } else if (*arg == "--no-out-link" || *arg == "--no-link") { - outLink = Path(tmpDir) + "/result"; - - } else if (*arg == "--attr" || *arg == "-A") { - attrPaths.push_back(getArg(*arg, arg, end)); - - } else if (*arg == "--drv-link") { - getArg(*arg, arg, end); // obsolete - - } else if (*arg == "--out-link" || *arg == "-o") { - outLink = getArg(*arg, arg, end); - - } else if (*arg == "--add-root") { - gcRoot = getArg(*arg, arg, end); - - } else if (*arg == "--dry-run") { - dryRun = true; - - } else if (*arg == "--repair") { - repair = Repair; - buildMode = bmRepair; - } - - else if (*arg == "--run-env") { // obsolete - runEnv = true; - - } else if (*arg == "--command" || *arg == "--run") { - if (*arg == "--run") { - interactive = false; - } - envCommand = getArg(*arg, arg, end) + "\nexit"; - } - - else if (*arg == "--check") { - buildMode = bmCheck; - - } else if (*arg == "--exclude") { - envExclude.push_back(getArg(*arg, arg, end)); - - } else if (*arg == "--expr" || *arg == "-E") { - fromArgs = true; - - } else if (runEnv && *arg == "--pure") { - pure = true; - } else if (runEnv && *arg == "--impure") { - pure = false; - - } else if (*arg == "--packages" || *arg == "-p") { - packages = true; - - } else if (inShebang && *arg == "-i") { - auto interpreter = getArg(*arg, arg, end); - interactive = false; - auto execArgs = ""; - - // Überhack to support Perl. Perl examines the shebang and - // executes it unless it contains the string "perl" or "indir", - // or (undocumented) argv[0] does not contain "perl". Exploit - // the latter by doing "exec -a". - if (std::regex_search(interpreter, std::regex("perl"))) { - execArgs = "-a PERL"; - } - - std::ostringstream joined; - for (const auto& i : savedArgs) { - joined << shellEscape(i) << ' '; - } - - if (std::regex_search(interpreter, std::regex("ruby"))) { - // Hack for Ruby. Ruby also examines the shebang. It tries to - // read the shebang to understand which packages to read from. Since - // this is handled via nix-shell -p, we wrap our ruby script - // execution in ruby -e 'load' which ignores the shebangs. - envCommand = (format("exec %1% %2% -e 'load(\"%3%\")' -- %4%") % - execArgs % interpreter % script % joined.str()) - .str(); - } else { - envCommand = (format("exec %1% %2% %3% %4%") % execArgs % - interpreter % script % joined.str()) - .str(); - } - } - - else if (*arg == "--keep") { - keepVars.insert(getArg(*arg, arg, end)); - - } else if (*arg == "-") { - readStdin = true; - - } else if (*arg != "" && arg->at(0) == '-') { - return false; - - } else { - left.push_back(*arg); - } - - return true; - }); - - myArgs.parseCmdline(args); - - if (packages && fromArgs) { - throw UsageError("'-p' and '-E' are mutually exclusive"); - } - - auto store = openStore(); - - auto state = std::make_unique(myArgs.searchPath, store); - state->repair = repair; - - std::unique_ptr autoArgs = myArgs.getAutoArgs(*state); - - if (packages) { - std::ostringstream joined; - // TODO(grfn): Generate a syntax tree here, not a string - joined << "with import { }; (pkgs.runCommandCC or " - "pkgs.runCommand) \"shell\" { buildInputs = [ "; - for (const auto& i : left) { - joined << '(' << i << ") "; - } - joined << "]; } \"\""; - fromArgs = true; - left = {joined.str()}; - } else if (!fromArgs) { - if (left.empty() && runEnv && pathExists("shell.nix")) { - left = {"shell.nix"}; - } - if (left.empty()) { - left = {"default.nix"}; - } - } - - if (runEnv) { - setenv("IN_NIX_SHELL", pure ? "pure" : "impure", 1); - } - - DrvInfos drvs; - - /* Parse the expressions. */ - std::vector exprs; - - if (readStdin) { - exprs = {state->parseStdin()}; - } else { - for (const auto& i : left) { - if (fromArgs) { - exprs.push_back(state->parseExprFromString(i, absPath("."))); - } else { - auto absolute = i; - try { - absolute = canonPath(absPath(i), true); - } catch (Error& e) { - }; - if (store->isStorePath(absolute) && - std::regex_match(absolute, std::regex(".*\\.drv(!.*)?"))) { - drvs.push_back(DrvInfo(*state, store, absolute)); - } else { - /* If we're in a #! script, interpret filenames - relative to the script. */ - exprs.push_back( - state->parseExprFromFile(resolveExprPath(state->checkSourcePath( - lookupFileArg(*state, inShebang && !packages - ? absPath(i, absPath(dirOf(script))) - : i))))); - } - } - } - } - - /* Evaluate them into derivations. */ - if (attrPaths.empty()) { - attrPaths = {""}; - } - - for (auto e : exprs) { - Value vRoot; - state->eval(e, vRoot); - - for (auto& i : attrPaths) { - Value& v(*findAlongAttrPath(*state, i, autoArgs.get(), vRoot)); - state->forceValue(v); - getDerivations(*state, v, "", autoArgs.get(), drvs, false); - } - } - - state->printStats(); - - auto buildPaths = [&](const PathSet& paths) { - /* Note: we do this even when !printMissing to efficiently - fetch binary cache data. */ - unsigned long long downloadSize; - unsigned long long narSize; - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - store->queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, - narSize); - - if (settings.printMissing) { - printMissing(ref(store), willBuild, willSubstitute, unknown, - downloadSize, narSize); - } - - if (!dryRun) { - util::OkOrThrow(store->buildPaths(std::cerr, paths, buildMode)); - } - }; - - if (runEnv) { - if (drvs.size() != 1) { - throw UsageError("nix-shell requires a single derivation"); - } - - auto& drvInfo = drvs.front(); - auto drv = store->derivationFromPath(drvInfo.queryDrvPath()); - - PathSet pathsToBuild; - - /* Figure out what bash shell to use. If $NIX_BUILD_SHELL - is not set, then build bashInteractive from - . */ - auto opt_shell = getEnv("NIX_BUILD_SHELL"); - std::string shell; - - if (opt_shell.has_value()) { - shell = opt_shell.value(); - } else { - try { - auto expr = state->parseExprFromString( - "(import {}).bashInteractive", absPath(".")); - - Value v; - state->eval(expr, v); - - auto drv = getDerivation(*state, v, false); - if (!drv) { - throw Error( - "the 'bashInteractive' attribute in did not evaluate " - "to a derivation"); - } - - pathsToBuild.insert(drv->queryDrvPath()); - - shell = drv->queryOutPath() + "/bin/bash"; - - } catch (Error& e) { - LOG(WARNING) << e.what() << "; will use bash from your environment"; - shell = "bash"; - } - } - - // Build or fetch all dependencies of the derivation. - for (const auto& input : drv.inputDrvs) { - if (std::all_of(envExclude.cbegin(), envExclude.cend(), - [&](const std::string& exclude) { - return !std::regex_search(input.first, - std::regex(exclude)); - })) { - pathsToBuild.insert(makeDrvPathWithOutputs(input.first, input.second)); - } - } - for (const auto& src : drv.inputSrcs) { - pathsToBuild.insert(src); - } - - buildPaths(pathsToBuild); - - if (dryRun) { - return; - } - - // Set the environment. - auto env = getEnv(); - - auto tmp = - getEnv("TMPDIR").value_or(getEnv("XDG_RUNTIME_DIR").value_or("/tmp")); - - if (pure) { - decltype(env) newEnv; - for (auto& i : env) { - if (keepVars.count(i.first) != 0u) { - newEnv.emplace(i); - } - } - env = newEnv; - // NixOS hack: prevent /etc/bashrc from sourcing /etc/profile. - env["__ETC_PROFILE_SOURCED"] = "1"; - } - - env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = - env["TEMP"] = tmp; - env["NIX_STORE"] = store->storeDir; - env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores); - - StringSet passAsFile = - absl::StrSplit(get(drv.env, "passAsFile", ""), - absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - - bool keepTmp = false; - int fileNr = 0; - - for (auto& var : drv.env) { - if (passAsFile.count(var.first) != 0u) { - keepTmp = true; - std::string fn = ".attr-" + std::to_string(fileNr++); - Path p = Path(tmpDir) + "/" + fn; - writeFile(p, var.second); - env[var.first + "Path"] = p; - } else { - env[var.first] = var.second; - } - } - - restoreAffinity(); - - /* Run a shell using the derivation's environment. For - convenience, source $stdenv/setup to setup additional - environment variables and shell functions. Also don't - lose the current $PATH directories. */ - auto rcfile = Path(tmpDir) + "/rc"; - writeFile( - rcfile, - fmt((keepTmp ? "" : "rm -rf '%1%'; "s) + - "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc; " - "%2%" - "dontAddDisableDepTrack=1; " - "[ -e $stdenv/setup ] && source $stdenv/setup; " - "%3%" - "PATH=\"%4%:$PATH\"; " - "SHELL=%5%; " - "set +e; " - R"s([ -n "$PS1" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '; )s" - "if [ \"$(type -t runHook)\" = function ]; then runHook " - "shellHook; fi; " - "unset NIX_ENFORCE_PURITY; " - "shopt -u nullglob; " - "unset TZ; %6%" - "%7%", - Path(tmpDir), (pure ? "" : "p=$PATH; "), - (pure ? "" : "PATH=$PATH:$p; unset p; "), dirOf(shell), shell, - (getenv("TZ") != nullptr - ? (std::string("export TZ='") + getenv("TZ") + "'; ") - : ""), - envCommand)); - - Strings envStrs; - for (auto& i : env) { - envStrs.push_back(i.first + "=" + i.second); - } - - auto args = interactive ? Strings{"bash", "--rcfile", rcfile} - : Strings{"bash", rcfile}; - - auto envPtrs = stringsToCharPtrs(envStrs); - - environ = envPtrs.data(); - - auto argPtrs = stringsToCharPtrs(args); - - restoreSignals(); - - execvp(shell.c_str(), argPtrs.data()); - - throw SysError("executing shell '%s'", shell); - } - - PathSet pathsToBuild; - - std::map drvPrefixes; - std::map resultSymlinks; - std::vector outPaths; - - for (auto& drvInfo : drvs) { - auto drvPath = drvInfo.queryDrvPath(); - auto outPath = drvInfo.queryOutPath(); - - auto outputName = drvInfo.queryOutputName(); - if (outputName.empty()) { - throw Error("derivation '%s' lacks an 'outputName' attribute", drvPath); - } - - pathsToBuild.insert(drvPath + "!" + outputName); - - std::string drvPrefix; - auto i = drvPrefixes.find(drvPath); - if (i != drvPrefixes.end()) { - drvPrefix = i->second; - } else { - drvPrefix = outLink; - if (!drvPrefixes.empty() != 0u) { - drvPrefix += fmt("-%d", drvPrefixes.size() + 1); - } - drvPrefixes[drvPath] = drvPrefix; - } - - std::string symlink = drvPrefix; - if (outputName != "out") { - symlink += "-" + outputName; - } - - resultSymlinks[symlink] = outPath; - outPaths.push_back(outPath); - } - - buildPaths(pathsToBuild); - - if (dryRun) { - return; - } - - for (auto& symlink : resultSymlinks) { - if (auto store2 = store.dynamic_pointer_cast()) { - store2->addPermRoot(symlink.second, absPath(symlink.first), true); - } - } - - for (auto& path : outPaths) { - std::cout << path << '\n'; - } -} - -static RegisterLegacyCommand s1("nix-build", _main); -static RegisterLegacyCommand s2("nix-shell", _main); diff --git a/third_party/nix/src/nix-channel/nix-channel.cc b/third_party/nix/src/nix-channel/nix-channel.cc deleted file mode 100644 index 5cc16d1b4b..0000000000 --- a/third_party/nix/src/nix-channel/nix-channel.cc +++ /dev/null @@ -1,275 +0,0 @@ -#include - -#include -#include -#include -#include - -#include "libmain/shared.hh" -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "nix/legacy.hh" - -using namespace nix; - -typedef std::map Channels; - -static Channels channels; -static Path channelsList; - -// Reads the list of channels. -static void readChannels() { - if (!pathExists(channelsList)) { - return; - } - auto channelsFile = readFile(channelsList); - - std::vector lines = - absl::StrSplit(channelsFile, absl::ByChar('\n'), absl::SkipEmpty()); - - for (auto& line : lines) { - line = absl::StripTrailingAsciiWhitespace(line); - if (std::regex_search(line, std::regex("^\\s*\\#"))) { - continue; - } - std::vector split = - absl::StrSplit(line, absl::ByChar(' '), absl::SkipEmpty()); - auto url = std::regex_replace(split[0], std::regex("/*$"), ""); - auto name = split.size() > 1 ? split[1] : baseNameOf(url); - channels[name] = url; - } -} - -// Writes the list of channels. -static void writeChannels() { - auto channelsFD = AutoCloseFD{open( - channelsList.c_str(), O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0644)}; - if (!channelsFD) { - throw SysError(format("opening '%1%' for writing") % channelsList); - } - for (const auto& channel : channels) { - writeFull(channelsFD.get(), channel.second + " " + channel.first + "\n"); - } -} - -// Adds a channel. -static void addChannel(const std::string& url, const std::string& name) { - if (!regex_search(url, std::regex("^(file|http|https)://"))) { - throw Error(format("invalid channel URL '%1%'") % url); - } - if (!regex_search(name, std::regex("^[a-zA-Z0-9_][a-zA-Z0-9_\\.-]*$"))) { - throw Error(format("invalid channel identifier '%1%'") % name); - } - readChannels(); - channels[name] = url; - writeChannels(); -} - -static Path profile; - -// Remove a channel. -static void removeChannel(const std::string& name) { - readChannels(); - channels.erase(name); - writeChannels(); - - runProgram(settings.nixBinDir + "/nix-env", true, - {"--profile", profile, "--uninstall", name}); -} - -static Path nixDefExpr; - -// Fetch Nix expressions and binary cache URLs from the subscribed channels. -static void update(const StringSet& channelNames) { - readChannels(); - - auto store = openStore(); - - // Download each channel. - Strings exprs; - for (const auto& channel : channels) { - auto name = channel.first; - auto url = channel.second; - if (!(channelNames.empty() || (channelNames.count(name) != 0u))) { - continue; - } - - // We want to download the url to a file to see if it's a tarball while also - // checking if we got redirected in the process, so that we can grab the - // various parts of a nix channel definition from a consistent location if - // the redirect changes mid-download. - CachedDownloadRequest request(url); - request.ttl = 0; - auto dl = getDownloader(); - auto result = dl->downloadCached(store, request); - auto filename = result.path; - url = absl::StripTrailingAsciiWhitespace(result.effectiveUri); - - // If the URL contains a version number, append it to the name - // attribute (so that "nix-env -q" on the channels profile - // shows something useful). - auto cname = name; - std::smatch match; - auto urlBase = baseNameOf(url); - if (std::regex_search(urlBase, match, std::regex("(-\\d.*)$"))) { - cname = cname + std::string(match[1]); - } - - std::string extraAttrs; - - bool unpacked = false; - if (std::regex_search(filename, std::regex("\\.tar\\.(gz|bz2|xz)$"))) { - runProgram(settings.nixBinDir + "/nix-build", false, - {"--no-out-link", "--expr", - "import " - "{ name = \"" + - cname + "\"; channelName = \"" + name + - "\"; src = builtins.storePath \"" + filename + "\"; }"}); - unpacked = true; - } - - if (!unpacked) { - // Download the channel tarball. - try { - filename = dl->downloadCached( - store, CachedDownloadRequest(url + "/nixexprs.tar.xz")) - .path; - } catch (DownloadError& e) { - filename = - dl->downloadCached(store, - CachedDownloadRequest(url + "/nixexprs.tar.bz2")) - .path; - } - filename = absl::StripTrailingAsciiWhitespace(filename); - } - - // Regardless of where it came from, add the expression representing this - // channel to accumulated expression - exprs.push_back("f: f { name = \"" + cname + "\"; channelName = \"" + name + - "\"; src = builtins.storePath \"" + filename + "\"; " + - extraAttrs + " }"); - } - - // Unpack the channel tarballs into the Nix store and install them - // into the channels profile. - std::cerr << "unpacking channels...\n"; - Strings envArgs{"--profile", profile, - "--file", "", - "--install", "--from-expression"}; - for (auto& expr : exprs) { - envArgs.push_back(std::move(expr)); - } - envArgs.push_back("--quiet"); - runProgram(settings.nixBinDir + "/nix-env", false, envArgs); - - // Make the channels appear in nix-env. - struct stat st; - if (lstat(nixDefExpr.c_str(), &st) == 0) { - if (S_ISLNK(st.st_mode)) { - // old-skool ~/.nix-defexpr - if (unlink(nixDefExpr.c_str()) == -1) { - throw SysError(format("unlinking %1%") % nixDefExpr); - } - } - } else if (errno != ENOENT) { - throw SysError(format("getting status of %1%") % nixDefExpr); - } - createDirs(nixDefExpr); - auto channelLink = nixDefExpr + "/channels"; - replaceSymlink(profile, channelLink); -} - -static int _main(int argc, char** argv) { - { - // Figure out the name of the `.nix-channels' file to use - auto home = getHome(); - channelsList = home + "/.nix-channels"; - nixDefExpr = home + "/.nix-defexpr"; - - // Figure out the name of the channels profile. - profile = fmt("%s/profiles/per-user/%s/channels", settings.nixStateDir, - getUserName()); - - enum { cNone, cAdd, cRemove, cList, cUpdate, cRollback } cmd = cNone; - std::vector args; - parseCmdLine(argc, argv, - [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - showManPage("nix-channel"); - } else if (*arg == "--version") { - printVersion("nix-channel"); - } else if (*arg == "--add") { - cmd = cAdd; - } else if (*arg == "--remove") { - cmd = cRemove; - } else if (*arg == "--list") { - cmd = cList; - } else if (*arg == "--update") { - cmd = cUpdate; - } else if (*arg == "--rollback") { - cmd = cRollback; - } else { - args.push_back(std::move(*arg)); - } - return true; - }); - - switch (cmd) { - case cNone: - throw UsageError("no command specified"); - case cAdd: - if (args.empty() || args.size() > 2) { - throw UsageError("'--add' requires one or two arguments"); - } - { - auto url = args[0]; - std::string name; - if (args.size() == 2) { - name = args[1]; - } else { - name = baseNameOf(url); - name = std::regex_replace(name, std::regex("-unstable$"), ""); - name = std::regex_replace(name, std::regex("-stable$"), ""); - } - addChannel(url, name); - } - break; - case cRemove: - if (args.size() != 1) { - throw UsageError("'--remove' requires one argument"); - } - removeChannel(args[0]); - break; - case cList: - if (!args.empty()) { - throw UsageError("'--list' expects no arguments"); - } - readChannels(); - for (const auto& channel : channels) { - std::cout << channel.first << ' ' << channel.second << '\n'; - } - break; - case cUpdate: - update(StringSet(args.begin(), args.end())); - break; - case cRollback: - if (args.size() > 1) { - throw UsageError("'--rollback' has at most one argument"); - } - Strings envArgs{"--profile", profile}; - if (args.size() == 1) { - envArgs.push_back("--switch-generation"); - envArgs.push_back(args[0]); - } else { - envArgs.push_back("--rollback"); - } - runProgram(settings.nixBinDir + "/nix-env", false, envArgs); - break; - } - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-channel", _main); diff --git a/third_party/nix/src/nix-collect-garbage/nix-collect-garbage.cc b/third_party/nix/src/nix-collect-garbage/nix-collect-garbage.cc deleted file mode 100644 index ac8c7d9399..0000000000 --- a/third_party/nix/src/nix-collect-garbage/nix-collect-garbage.cc +++ /dev/null @@ -1,103 +0,0 @@ -#include -#include - -#include - -#include "libmain/shared.hh" -#include "libstore/globals.hh" -#include "libstore/profiles.hh" -#include "libstore/store-api.hh" -#include "nix/legacy.hh" - -using namespace nix; - -std::string deleteOlderThan; -bool dryRun = false; - -/* If `-d' was specified, remove all old generations of all profiles. - * Of course, this makes rollbacks to before this point in time - * impossible. */ - -void removeOldGenerations(const std::string& dir) { - if (access(dir.c_str(), R_OK) != 0) { - return; - } - - bool canWrite = access(dir.c_str(), W_OK) == 0; - - for (auto& i : readDirectory(dir)) { - checkInterrupt(); - - auto path = dir + "/" + i.name; - auto type = i.type == DT_UNKNOWN ? getFileType(path) : i.type; - - if (type == DT_LNK && canWrite) { - std::string link; - try { - link = readLink(path); - } catch (SysError& e) { - if (e.errNo == ENOENT) { - continue; - } - } - if (link.find("link") != std::string::npos) { - LOG(INFO) << "removing old generations of profile " << path; - if (!deleteOlderThan.empty()) { - deleteGenerationsOlderThan(path, deleteOlderThan, dryRun); - } else { - deleteOldGenerations(path, dryRun); - } - } - } else if (type == DT_DIR) { - removeOldGenerations(path); - } - } -} - -static int _main(int argc, char** argv) { - { - bool removeOld = false; - - GCOptions options; - - parseCmdLine(argc, argv, - [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - showManPage("nix-collect-garbage"); - } else if (*arg == "--version") { - printVersion("nix-collect-garbage"); - } else if (*arg == "--delete-old" || *arg == "-d") { - removeOld = true; - } else if (*arg == "--delete-older-than") { - removeOld = true; - deleteOlderThan = getArg(*arg, arg, end); - } else if (*arg == "--dry-run") { - dryRun = true; - } else if (*arg == "--max-freed") { - auto maxFreed = getIntArg(*arg, arg, end, true); - options.maxFreed = maxFreed >= 0 ? maxFreed : 0; - } else { - return false; - } - return true; - }); - - auto profilesDir = settings.nixStateDir + "/profiles"; - if (removeOld) { - removeOldGenerations(profilesDir); - } - - // Run the actual garbage collector. - if (!dryRun) { - auto store = openStore(); - options.action = GCOptions::gcDeleteDead; - GCResults results; - PrintFreed freed(true, results); - store->collectGarbage(options, results); - } - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-collect-garbage", _main); diff --git a/third_party/nix/src/nix-copy-closure/nix-copy-closure.cc b/third_party/nix/src/nix-copy-closure/nix-copy-closure.cc deleted file mode 100644 index 3dbe29f224..0000000000 --- a/third_party/nix/src/nix-copy-closure/nix-copy-closure.cc +++ /dev/null @@ -1,73 +0,0 @@ -#include - -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "nix/legacy.hh" - -using namespace nix; - -static int _main(int argc, char** argv) { - { - auto gzip = false; - auto toMode = true; - auto includeOutputs = false; - auto dryRun = false; - auto useSubstitutes = NoSubstitute; - std::string sshHost; - PathSet storePaths; - - parseCmdLine( - argc, argv, [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - showManPage("nix-copy-closure"); - } else if (*arg == "--version") { - printVersion("nix-copy-closure"); - } else if (*arg == "--gzip" || *arg == "--bzip2" || *arg == "--xz") { - if (*arg != "--gzip") { - LOG(WARNING) << "'" << *arg - << "' is not implemented, falling back to gzip"; - } - gzip = true; - } else if (*arg == "--from") { - toMode = false; - } else if (*arg == "--to") { - toMode = true; - } else if (*arg == "--include-outputs") { - includeOutputs = true; - } else if (*arg == "--show-progress") { - LOG(WARNING) << "'--show-progress' is not implemented"; - } else if (*arg == "--dry-run") { - dryRun = true; - } else if (*arg == "--use-substitutes" || *arg == "-s") { - useSubstitutes = Substitute; - } else if (sshHost.empty()) { - sshHost = *arg; - } else { - storePaths.insert(*arg); - } - return true; - }); - - if (sshHost.empty()) { - throw UsageError("no host name specified"); - } - - auto remoteUri = "ssh://" + sshHost + (gzip ? "?compress=true" : ""); - auto to = toMode ? openStore(remoteUri) : openStore(); - auto from = toMode ? openStore() : openStore(remoteUri); - - PathSet storePaths2; - for (auto& path : storePaths) { - storePaths2.insert(from->followLinksToStorePath(path)); - } - - PathSet closure; - from->computeFSClosure(storePaths2, closure, false, includeOutputs); - - copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes); - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-copy-closure", _main); diff --git a/third_party/nix/src/nix-daemon/CMakeLists.txt b/third_party/nix/src/nix-daemon/CMakeLists.txt deleted file mode 100644 index 63125a9b26..0000000000 --- a/third_party/nix/src/nix-daemon/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -# -*- mode: cmake; -*- - -# The nix-daemon is the binary running the gRPC server component to -# which other components of Nix talk to perform store and builder -# operations. - -add_executable(nix-daemon) -include_directories(${PROJECT_BINARY_DIR}) # for config.h -set_property(TARGET nix-daemon PROPERTY CXX_STANDARD 17) - -pkg_check_modules(systemd REQUIRED) - -target_sources(nix-daemon - PRIVATE - nix-daemon-proto.hh - nix-daemon-proto.cc - nix-daemon.cc -) - -target_link_libraries(nix-daemon - nixutil - nixstore - nixmain - absl::flags - absl::flags_parse - systemd -) - -install(TARGETS nix-daemon DESTINATION bin) diff --git a/third_party/nix/src/nix-daemon/nix-daemon-legacy.cc b/third_party/nix/src/nix-daemon/nix-daemon-legacy.cc deleted file mode 100644 index 97cf5195d3..0000000000 --- a/third_party/nix/src/nix-daemon/nix-daemon-legacy.cc +++ /dev/null @@ -1,1185 +0,0 @@ -/* - NOTE: You are looking at the *previous* implementation of the Nix - daemon. This file is not in use, is only left in here for reference - and will be deleted from the codebase eventually. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libmain/shared.hh" -#include "libproto/worker.pb.h" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/affinity.hh" -#include "libutil/archive.hh" -#include "libutil/finally.hh" -#include "libutil/monitor-fd.hh" -#include "libutil/serialise.hh" -#include "libutil/util.hh" -#include "nix/legacy.hh" - -using namespace nix; - -#ifndef __linux__ -#define SPLICE_F_MOVE 0 -static ssize_t splice(int fd_in, void* off_in, int fd_out, void* off_out, - size_t len, unsigned int flags) { - /* We ignore most parameters, we just have them for conformance with the linux - * syscall */ - std::vector buf(8192); - auto read_count = read(fd_in, buf.data(), buf.size()); - if (read_count == -1) { - return read_count; - } - auto write_count = decltype(read_count)(0); - while (write_count < read_count) { - auto res = - write(fd_out, buf.data() + write_count, read_count - write_count); - if (res == -1) { - return res; - } - write_count += res; - } - return read_count; -} -#endif - -static FdSource from(STDIN_FILENO); -static FdSink to(STDOUT_FILENO); - -/* Logger that forwards log messages to the client, *if* we're in a - state where the protocol allows it (i.e., when canSendStderr is - true). */ -struct TunnelLogger { - struct State { - bool canSendStderr = false; - std::vector pendingMsgs; - }; - - Sync state_; - - unsigned int clientVersion; - - explicit TunnelLogger(unsigned int clientVersion) - : clientVersion(clientVersion) {} - - void enqueueMsg(const std::string& s) { - auto state(state_.lock()); - - if (state->canSendStderr) { - assert(state->pendingMsgs.empty()); - try { - to(s); - to.flush(); - } catch (...) { - /* Write failed; that means that the other side is - gone. */ - state->canSendStderr = false; - throw; - } - } else { - state->pendingMsgs.push_back(s); - } - } - - void log(const FormatOrString& fs) { - StringSink buf; - buf << STDERR_NEXT << (fs.s + "\n"); - enqueueMsg(*buf.s); - } - - /* startWork() means that we're starting an operation for which we - want to send out stderr to the client. */ - void startWork() { - auto state(state_.lock()); - state->canSendStderr = true; - - for (auto& msg : state->pendingMsgs) { - to(msg); - } - - state->pendingMsgs.clear(); - - to.flush(); - } - - /* stopWork() means that we're done; stop sending stderr to the - client. */ - void stopWork(bool success = true, const std::string& msg = "", - unsigned int status = 0) { - auto state(state_.lock()); - - state->canSendStderr = false; - - if (success) { - to << STDERR_LAST; - } else { - to << STDERR_ERROR << msg; - if (status != 0) { - to << status; - } - } - } - - void startActivity(const std::string& s) { - DLOG(INFO) << "startActivity(" << s << ")"; - if (GET_PROTOCOL_MINOR(clientVersion) < 20) { - if (!s.empty()) { - LOG(INFO) << s; - } - return; - } - - StringSink buf; - buf << STDERR_START_ACTIVITY << s; - enqueueMsg(*buf.s); - } -}; - -struct TunnelSink : Sink { - Sink& to; - explicit TunnelSink(Sink& to) : to(to) {} - void operator()(const unsigned char* data, size_t len) override { - to << STDERR_WRITE; - writeString(data, len, to); - } -}; - -struct TunnelSource : BufferedSource { - Source& from; - explicit TunnelSource(Source& from) : from(from) {} - - protected: - size_t readUnbuffered(unsigned char* data, size_t len) override { - to << STDERR_READ << len; - to.flush(); - size_t n = readString(data, len, from); - if (n == 0) { - throw EndOfFile("unexpected end-of-file"); - } - return n; - } -}; - -/* If the NAR archive contains a single file at top-level, then save - the contents of the file to `s'. Otherwise barf. */ -struct RetrieveRegularNARSink : ParseSink { - bool regular{true}; - std::string s; - - RetrieveRegularNARSink() {} - - void createDirectory(const Path& path) override { regular = false; } - - void receiveContents(unsigned char* data, unsigned int len) override { - s.append((const char*)data, len); - } - - void createSymlink(const Path& path, const std::string& target) override { - regular = false; - } -}; - -static void performOp(TunnelLogger* logger, const ref& store, - bool trusted, unsigned int clientVersion, Source& from, - Sink& to, unsigned int op) { - switch (op) { - case wopIsValidPath: { - /* 'readStorePath' could raise an error leading to the connection - being closed. To be able to recover from an invalid path error, - call 'startWork' early, and do 'assertStorePath' afterwards so - that the 'Error' exception handler doesn't close the - connection. */ - Path path = readString(from); - logger->startWork(); - store->assertStorePath(path); - bool result = store->isValidPath(path); - logger->stopWork(); - to << static_cast(result); - break; - } - - case wopQueryValidPaths: { - auto paths = readStorePaths(*store, from); - logger->startWork(); - PathSet res = store->queryValidPaths(paths); - logger->stopWork(); - to << res; - break; - } - - case wopHasSubstitutes: { - Path path = readStorePath(*store, from); - logger->startWork(); - PathSet res = store->querySubstitutablePaths({path}); - logger->stopWork(); - to << static_cast(res.find(path) != res.end()); - break; - } - - case wopQuerySubstitutablePaths: { - auto paths = readStorePaths(*store, from); - logger->startWork(); - PathSet res = store->querySubstitutablePaths(paths); - logger->stopWork(); - to << res; - break; - } - - case wopQueryPathHash: { - Path path = readStorePath(*store, from); - logger->startWork(); - auto hash = store->queryPathInfo(path)->narHash; - logger->stopWork(); - to << hash.to_string(Base16, false); - break; - } - - case wopQueryReferences: - case wopQueryReferrers: - case wopQueryValidDerivers: - case wopQueryDerivationOutputs: { - Path path = readStorePath(*store, from); - logger->startWork(); - PathSet paths; - if (op == wopQueryReferences) { - paths = store->queryPathInfo(path)->references; - } else if (op == wopQueryReferrers) { - store->queryReferrers(path, paths); - } else if (op == wopQueryValidDerivers) { - paths = store->queryValidDerivers(path); - } else { - paths = store->queryDerivationOutputs(path); - } - logger->stopWork(); - to << paths; - break; - } - - case wopQueryDerivationOutputNames: { - Path path = readStorePath(*store, from); - logger->startWork(); - StringSet names; - names = store->queryDerivationOutputNames(path); - logger->stopWork(); - to << names; - break; - } - - case wopQueryDeriver: { - Path path = readStorePath(*store, from); - logger->startWork(); - auto deriver = store->queryPathInfo(path)->deriver; - logger->stopWork(); - to << deriver; - break; - } - - case wopQueryPathFromHashPart: { - std::string hashPart = readString(from); - logger->startWork(); - Path path = store->queryPathFromHashPart(hashPart); - logger->stopWork(); - to << path; - break; - } - - case wopAddToStore: { - bool fixed = false; - bool recursive = false; - std::string hashType; - std::string baseName; - from >> baseName >> fixed /* obsolete */ >> recursive >> hashType; - /* Compatibility hack. */ - if (!fixed) { - hashType = "sha256"; - recursive = true; - } - HashType hashAlgo = parseHashType(hashType); - - TeeSource savedNAR(from); - RetrieveRegularNARSink savedRegular; - - if (recursive) { - /* Get the entire NAR dump from the client and save it to - a string so that we can pass it to - addToStoreFromDump(). */ - ParseSink sink; /* null sink; just parse the NAR */ - parseDump(sink, savedNAR); - } else { - parseDump(savedRegular, from); - } - - logger->startWork(); - if (!savedRegular.regular) { - throw Error("regular file expected"); - } - - auto store2 = store.dynamic_pointer_cast(); - if (!store2) { - throw Error("operation is only supported by LocalStore"); - } - - Path path = store2->addToStoreFromDump( - recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, - hashAlgo); - logger->stopWork(); - - to << path; - break; - } - - case wopAddTextToStore: { - std::string suffix = readString(from); - std::string s = readString(from); - auto refs = readStorePaths(*store, from); - logger->startWork(); - Path path = store->addTextToStore(suffix, s, refs, NoRepair); - logger->stopWork(); - to << path; - break; - } - - case wopExportPath: { - Path path = readStorePath(*store, from); - readInt(from); // obsolete - logger->startWork(); - TunnelSink sink(to); - store->exportPath(path, sink); - logger->stopWork(); - to << 1; - break; - } - - case wopImportPaths: { - logger->startWork(); - TunnelSource source(from); - Paths paths = store->importPaths(source, nullptr, - trusted ? NoCheckSigs : CheckSigs); - logger->stopWork(); - to << paths; - break; - } - - case wopBuildPaths: { - auto drvs = readStorePaths(*store, from); - BuildMode mode = bmNormal; - if (GET_PROTOCOL_MINOR(clientVersion) >= 15) { - mode = (BuildMode)readInt(from); - - /* Repairing is not atomic, so disallowed for "untrusted" - clients. */ - if (mode == bmRepair && !trusted) { - throw Error( - "repairing is not allowed because you are not in " - "'trusted-users'"); - } - } - logger->startWork(); - store->buildPaths(drvs, mode); - logger->stopWork(); - to << 1; - break; - } - - case wopBuildDerivation: { - Path drvPath = readStorePath(*store, from); - BasicDerivation drv; - readDerivation(from, *store, drv); - auto buildMode = (BuildMode)readInt(from); - logger->startWork(); - if (!trusted) { - throw Error("you are not privileged to build derivations"); - } - auto res = store->buildDerivation(drvPath, drv, buildMode); - logger->stopWork(); - to << res.status << res.errorMsg; - break; - } - - case wopEnsurePath: { - Path path = readStorePath(*store, from); - logger->startWork(); - store->ensurePath(path); - logger->stopWork(); - to << 1; - break; - } - - case wopAddTempRoot: { - Path path = readStorePath(*store, from); - logger->startWork(); - store->addTempRoot(path); - logger->stopWork(); - to << 1; - break; - } - - case wopAddIndirectRoot: { - Path path = absPath(readString(from)); - logger->startWork(); - store->addIndirectRoot(path); - logger->stopWork(); - to << 1; - break; - } - - case wopSyncWithGC: { - logger->startWork(); - store->syncWithGC(); - logger->stopWork(); - to << 1; - break; - } - - case wopFindRoots: { - logger->startWork(); - Roots roots = store->findRoots(!trusted); - logger->stopWork(); - - size_t size = 0; - for (auto& i : roots) { - size += i.second.size(); - } - - to << size; - - for (auto& [target, links] : roots) { - for (auto& link : links) { - to << link << target; - } - } - - break; - } - - case wopCollectGarbage: { - GCOptions options; - options.action = (GCOptions::GCAction)readInt(from); - options.pathsToDelete = readStorePaths(*store, from); - from >> options.ignoreLiveness >> options.maxFreed; - // obsolete fields - readInt(from); - readInt(from); - readInt(from); - - GCResults results; - - logger->startWork(); - if (options.ignoreLiveness) { - throw Error("you are not allowed to ignore liveness"); - } - store->collectGarbage(options, results); - logger->stopWork(); - - to << results.paths << results.bytesFreed << 0 /* obsolete */; - - break; - } - - case wopSetOptions: { - settings.keepFailed = readInt(from) != 0u; - settings.keepGoing = readInt(from) != 0u; - settings.tryFallback = readInt(from) != 0u; - readInt(from); // obsolete verbosity - settings.maxBuildJobs.assign(readInt(from)); - settings.maxSilentTime = readInt(from); - readInt(from); // obsolete useBuildHook - settings.verboseBuild = 0 == readInt(from); - readInt(from); // obsolete logType - readInt(from); // obsolete printBuildTrace - settings.buildCores = readInt(from); - settings.useSubstitutes = readInt(from) != 0u; - - StringMap overrides; - if (GET_PROTOCOL_MINOR(clientVersion) >= 12) { - unsigned int n = readInt(from); - for (unsigned int i = 0; i < n; i++) { - std::string name = readString(from); - std::string value = readString(from); - overrides.emplace(name, value); - } - } - - logger->startWork(); - - for (auto& i : overrides) { - auto& name(i.first); - auto& value(i.second); - - auto setSubstituters = [&](Setting& res) { - if (name != res.name && res.aliases.count(name) == 0) { - return false; - } - StringSet trusted = settings.trustedSubstituters; - for (auto& s : settings.substituters.get()) { - trusted.insert(s); - } - Strings subs; - Strings ss = absl::StrSplit(value, absl::ByAnyChar(" \t\n\r"), - absl::SkipEmpty()); - for (auto& s : ss) { - if (trusted.count(s) != 0u) { - subs.push_back(s); - } else { - LOG(WARNING) << "ignoring untrusted substituter '" << s << "'"; - } - } - res = subs; - return true; - }; - - try { - if (name == "ssh-auth-sock") { // obsolete - ; - } else if (trusted || name == settings.buildTimeout.name || - name == "connect-timeout" || - (name == "builders" && value.empty())) { - settings.set(name, value); - } else if (setSubstituters(settings.substituters)) { - ; - } else if (setSubstituters(settings.extraSubstituters)) { - ; - } else { - LOG(WARNING) << "ignoring the user-specified setting '" << name - << "', because it is a " - << "restricted setting and you are not a trusted user"; - } - } catch (UsageError& e) { - LOG(WARNING) << e.what(); - } - } - - logger->stopWork(); - break; - } - - case wopQuerySubstitutablePathInfo: { - Path path = absPath(readString(from)); - logger->startWork(); - SubstitutablePathInfos infos; - store->querySubstitutablePathInfos({path}, infos); - logger->stopWork(); - auto i = infos.find(path); - if (i == infos.end()) { - to << 0; - } else { - to << 1 << i->second.deriver << i->second.references - << i->second.downloadSize << i->second.narSize; - } - break; - } - - case wopQuerySubstitutablePathInfos: { - auto paths = readStorePaths(*store, from); - logger->startWork(); - SubstitutablePathInfos infos; - store->querySubstitutablePathInfos(paths, infos); - logger->stopWork(); - to << infos.size(); - for (auto& i : infos) { - to << i.first << i.second.deriver << i.second.references - << i.second.downloadSize << i.second.narSize; - } - break; - } - - case wopQueryAllValidPaths: { - logger->startWork(); - PathSet paths = store->queryAllValidPaths(); - logger->stopWork(); - to << paths; - break; - } - - case wopQueryPathInfo: { - Path path = readStorePath(*store, from); - std::shared_ptr info; - logger->startWork(); - try { - info = store->queryPathInfo(path); - } catch (InvalidPath&) { - if (GET_PROTOCOL_MINOR(clientVersion) < 17) { - throw; - } - } - logger->stopWork(); - if (info) { - if (GET_PROTOCOL_MINOR(clientVersion) >= 17) { - to << 1; - } - to << info->deriver << info->narHash.to_string(Base16, false) - << info->references << info->registrationTime << info->narSize; - if (GET_PROTOCOL_MINOR(clientVersion) >= 16) { - to << static_cast(info->ultimate) << info->sigs << info->ca; - } - } else { - assert(GET_PROTOCOL_MINOR(clientVersion) >= 17); - to << 0; - } - break; - } - - case wopOptimiseStore: { - logger->startWork(); - store->optimiseStore(); - logger->stopWork(); - to << 1; - break; - } - - case wopVerifyStore: { - bool checkContents; - bool repair; - from >> checkContents >> repair; - logger->startWork(); - if (repair && !trusted) { - throw Error("you are not privileged to repair paths"); - } - bool errors = store->verifyStore(checkContents, (RepairFlag)repair); - logger->stopWork(); - to << static_cast(errors); - break; - } - - case wopAddSignatures: { - Path path = readStorePath(*store, from); - auto sigs = readStrings(from); - logger->startWork(); - if (!trusted) { - throw Error("you are not privileged to add signatures"); - } - store->addSignatures(path, sigs); - logger->stopWork(); - to << 1; - break; - } - - case wopNarFromPath: { - auto path = readStorePath(*store, from); - logger->startWork(); - logger->stopWork(); - dumpPath(path, to); - break; - } - - case wopAddToStoreNar: { - bool repair; - bool dontCheckSigs; - ValidPathInfo info; - info.path = readStorePath(*store, from); - from >> info.deriver; - if (!info.deriver.empty()) { - store->assertStorePath(info.deriver); - } - info.narHash = Hash(readString(from), htSHA256); - info.references = readStorePaths(*store, from); - from >> info.registrationTime >> info.narSize >> info.ultimate; - info.sigs = readStrings(from); - from >> info.ca >> repair >> dontCheckSigs; - if (!trusted && dontCheckSigs) { - dontCheckSigs = false; - } - if (!trusted) { - info.ultimate = false; - } - - std::string saved; - std::unique_ptr source; - if (GET_PROTOCOL_MINOR(clientVersion) >= 21) { - source = std::make_unique(from); - } else { - TeeSink tee(from); - parseDump(tee, tee.source); - saved = std::move(*tee.source.data); - source = std::make_unique(saved); - } - - logger->startWork(); - - // FIXME: race if addToStore doesn't read source? - store->addToStore(info, *source, (RepairFlag)repair, - dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr); - - logger->stopWork(); - break; - } - - case wopQueryMissing: { - auto targets = readStorePaths(*store, from); - logger->startWork(); - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - unsigned long long downloadSize; - unsigned long long narSize; - store->queryMissing(targets, willBuild, willSubstitute, unknown, - downloadSize, narSize); - logger->stopWork(); - to << willBuild << willSubstitute << unknown << downloadSize << narSize; - break; - } - - default: - throw Error(format("invalid operation %1%") % op); - } -} - -static void processConnection(bool trusted, const std::string& userName, - uid_t userId) { - MonitorFdHup monitor(from.fd); - - /* Exchange the greeting. */ - unsigned int magic = readInt(from); - if (magic != WORKER_MAGIC_1) { - throw Error("protocol mismatch"); - } - to << WORKER_MAGIC_2 << PROTOCOL_VERSION; - to.flush(); - unsigned int clientVersion = readInt(from); - - if (clientVersion < 0x10a) { - throw Error("the Nix client version is too old"); - } - - auto tunnelLogger = new TunnelLogger(clientVersion); - // logger = tunnelLogger; - - unsigned int opCount = 0; - - Finally finally([&]() { - _isInterrupted = false; - DLOG(INFO) << opCount << " operations"; - }); - - if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && (readInt(from) != 0u)) { - setAffinityTo(readInt(from)); - } - - readInt(from); // obsolete reserveSpace - - /* Send startup error messages to the client. */ - tunnelLogger->startWork(); - - try { - /* If we can't accept clientVersion, then throw an error - *here* (not above). */ - -#if 0 - /* Prevent users from doing something very dangerous. */ - if (geteuid() == 0 && - querySetting("build-users-group", "") == "") - throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!"); -#endif - - /* Open the store. */ - Store::Params params; // FIXME: get params from somewhere - // Disable caching since the client already does that. - params["path-info-cache-size"] = "0"; - auto store = openStore(settings.storeUri, params); - - store->createUser(userName, userId); - - tunnelLogger->stopWork(); - to.flush(); - - /* Process client requests. */ - while (true) { - WorkerOp op; - try { - op = (WorkerOp)readInt(from); - } catch (Interrupted& e) { - break; - } catch (EndOfFile& e) { - break; - } - - opCount++; - - try { - performOp(tunnelLogger, store, trusted, clientVersion, from, to, op); - } catch (Error& e) { - /* If we're not in a state where we can send replies, then - something went wrong processing the input of the - client. This can happen especially if I/O errors occur - during addTextToStore() / importPath(). If that - happens, just send the error message and exit. */ - bool errorAllowed = tunnelLogger->state_.lock()->canSendStderr; - tunnelLogger->stopWork(false, e.msg(), e.status); - if (!errorAllowed) { - throw; - } - } catch (std::bad_alloc& e) { - tunnelLogger->stopWork(false, "Nix daemon out of memory", 1); - throw; - } - - to.flush(); - - assert(!tunnelLogger->state_.lock()->canSendStderr); - }; - - } catch (std::exception& e) { - tunnelLogger->stopWork(false, e.what(), 1); - to.flush(); - return; - } -} - -static void sigChldHandler(int sigNo) { - // Ensure we don't modify errno of whatever we've interrupted - auto saved_errno = errno; - /* Reap all dead children. */ - while (waitpid(-1, nullptr, WNOHANG) > 0) { - ; - } - errno = saved_errno; -} - -static void setSigChldAction(bool autoReap) { - struct sigaction act; - struct sigaction oact; - act.sa_handler = autoReap ? sigChldHandler : SIG_DFL; - sigfillset(&act.sa_mask); - act.sa_flags = 0; - if (sigaction(SIGCHLD, &act, &oact) != 0) { - throw SysError("setting SIGCHLD handler"); - } -} - -bool matchUser(const std::string& user, const std::string& group, - const Strings& users) { - if (find(users.begin(), users.end(), "*") != users.end()) { - return true; - } - - if (find(users.begin(), users.end(), user) != users.end()) { - return true; - } - - for (auto& i : users) { - if (std::string(i, 0, 1) == "@") { - if (group == std::string(i, 1)) { - return true; - } - struct group* gr = getgrnam(i.c_str() + 1); - if (gr == nullptr) { - continue; - } - for (char** mem = gr->gr_mem; *mem != nullptr; mem++) { - if (user == std::string(*mem)) { - return true; - } - } - } - } - - return false; -} - -struct PeerInfo { - bool pidKnown; - pid_t pid; - bool uidKnown; - uid_t uid; - bool gidKnown; - gid_t gid; -}; - -/* Get the identity of the caller, if possible. */ -static PeerInfo getPeerInfo(int remote) { - PeerInfo peer = {false, 0, false, 0, false, 0}; - -#if defined(SO_PEERCRED) - - ucred cred; - socklen_t credLen = sizeof(cred); - if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) == -1) { - throw SysError("getting peer credentials"); - } - peer = {true, cred.pid, true, cred.uid, true, cred.gid}; - -#elif defined(LOCAL_PEERCRED) - -#if !defined(SOL_LOCAL) -#define SOL_LOCAL 0 -#endif - - xucred cred; - socklen_t credLen = sizeof(cred); - if (getsockopt(remote, SOL_LOCAL, LOCAL_PEERCRED, &cred, &credLen) == -1) - throw SysError("getting peer credentials"); - peer = {false, 0, true, cred.cr_uid, false, 0}; - -#endif - - return peer; -} - -#define SD_LISTEN_FDS_START 3 - -static void daemonLoop(char** argv) { - if (chdir("/") == -1) { - throw SysError("cannot change current directory"); - } - - /* Get rid of children automatically; don't let them become - zombies. */ - setSigChldAction(true); - - AutoCloseFD fdSocket; - - /* Handle socket-based activation by systemd. */ - if (!getEnv("LISTEN_FDS").empty()) { - if (getEnv("LISTEN_PID") != std::to_string(getpid()) || - getEnv("LISTEN_FDS") != "1") { - throw Error("unexpected systemd environment variables"); - } - fdSocket = SD_LISTEN_FDS_START; - } - - /* Otherwise, create and bind to a Unix domain socket. */ - else { - /* Create and bind to a Unix domain socket. */ - fdSocket = socket(PF_UNIX, SOCK_STREAM, 0); - if (!fdSocket) { - throw SysError("cannot create Unix domain socket"); - } - - std::string socketPath = settings.nixDaemonSocketFile; - - createDirs(dirOf(socketPath)); - - /* Urgh, sockaddr_un allows path names of only 108 characters. - So chdir to the socket directory so that we can pass a - relative path name. */ - if (chdir(dirOf(socketPath).c_str()) == -1) { - throw SysError("cannot change current directory"); - } - Path socketPathRel = "./" + baseNameOf(socketPath); - - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - strncpy(addr.sun_path, socketPathRel.c_str(), sizeof(addr.sun_path)); - if (addr.sun_path[sizeof(addr.sun_path) - 1] != '\0') { - throw Error(format("socket path '%1%' is too long") % socketPathRel); - } - - unlink(socketPath.c_str()); - - /* Make sure that the socket is created with 0666 permission - (everybody can connect --- provided they have access to the - directory containing the socket). */ - mode_t oldMode = umask(0111); - int res = bind(fdSocket.get(), (struct sockaddr*)&addr, sizeof(addr)); - umask(oldMode); - if (res == -1) { - throw SysError(format("cannot bind to socket '%1%'") % socketPath); - } - - if (chdir("/") == -1) { /* back to the root */ - throw SysError("cannot change current directory"); - } - - if (listen(fdSocket.get(), 5) == -1) { - throw SysError(format("cannot listen on socket '%1%'") % socketPath); - } - } - - closeOnExec(fdSocket.get()); - - /* Loop accepting connections. */ - while (true) { - try { - /* Accept a connection. */ - struct sockaddr_un remoteAddr; - socklen_t remoteAddrLen = sizeof(remoteAddr); - - AutoCloseFD remote = - accept(fdSocket.get(), (struct sockaddr*)&remoteAddr, &remoteAddrLen); - checkInterrupt(); - if (!remote) { - if (errno == EINTR) { - continue; - } - throw SysError("accepting connection"); - } - - closeOnExec(remote.get()); - - bool trusted = false; - PeerInfo peer = getPeerInfo(remote.get()); - - struct passwd* pw = peer.uidKnown ? getpwuid(peer.uid) : nullptr; - std::string user = pw != nullptr ? pw->pw_name : std::to_string(peer.uid); - - struct group* gr = peer.gidKnown ? getgrgid(peer.gid) : nullptr; - std::string group = - gr != nullptr ? gr->gr_name : std::to_string(peer.gid); - - Strings trustedUsers = settings.trustedUsers; - Strings allowedUsers = settings.allowedUsers; - - if (matchUser(user, group, trustedUsers)) { - trusted = true; - } - - if ((!trusted && !matchUser(user, group, allowedUsers)) || - group == settings.buildUsersGroup) { - throw Error( - format("user '%1%' is not allowed to connect to the Nix daemon") % - user); - } - - LOG(INFO) << "accepted connection from pid " - << (peer.pidKnown ? std::to_string(peer.pid) : "") - << ", user " << (peer.uidKnown ? user : "") - << (trusted ? " (trusted)" : ""); - - /* Fork a child to handle the connection. */ - ProcessOptions options; - options.errorPrefix = "unexpected Nix daemon error: "; - options.dieWithParent = false; - options.runExitHandlers = true; - startProcess( - [&]() { - fdSocket = -1; - - /* Background the daemon. */ - if (setsid() == -1) { - throw SysError(format("creating a new session")); - } - - /* Restore normal handling of SIGCHLD. */ - setSigChldAction(false); - - /* For debugging, stuff the pid into argv[1]. */ - if (peer.pidKnown && (argv[1] != nullptr)) { - std::string processName = std::to_string(peer.pid); - strncpy(argv[1], processName.c_str(), strlen(argv[1])); - } - - /* Handle the connection. */ - from.fd = remote.get(); - to.fd = remote.get(); - processConnection(trusted, user, peer.uid); - - exit(0); - }, - options); - - } catch (Interrupted& e) { - return; - } catch (Error& e) { - LOG(ERROR) << "error processing connection: " << e.msg(); - } - } -} - -static int _main(int argc, char** argv) { - { - auto stdio = false; - - parseCmdLine(argc, argv, - [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--daemon") { - ; /* ignored for backwards compatibility */ - } else if (*arg == "--help") { - showManPage("nix-daemon"); - } else if (*arg == "--version") { - printVersion("nix-daemon"); - } else if (*arg == "--stdio") { - stdio = true; - } else { - return false; - } - return true; - }); - - if (stdio) { - if (getStoreType() == tDaemon) { - /* Forward on this connection to the real daemon */ - auto socketPath = settings.nixDaemonSocketFile; - auto s = socket(PF_UNIX, SOCK_STREAM, 0); - if (s == -1) { - throw SysError("creating Unix domain socket"); - } - - auto socketDir = dirOf(socketPath); - if (chdir(socketDir.c_str()) == -1) { - throw SysError(format("changing to socket directory '%1%'") % - socketDir); - } - - auto socketName = baseNameOf(socketPath); - auto addr = sockaddr_un{}; - addr.sun_family = AF_UNIX; - strncpy(addr.sun_path, socketName.c_str(), sizeof(addr.sun_path)); - if (addr.sun_path[sizeof(addr.sun_path) - 1] != '\0') { - throw Error(format("socket name %1% is too long") % socketName); - } - - if (connect(s, (struct sockaddr*)&addr, sizeof(addr)) == -1) { - throw SysError(format("cannot connect to daemon at %1%") % - socketPath); - } - - auto nfds = (s > STDIN_FILENO ? s : STDIN_FILENO) + 1; - while (true) { - fd_set fds; - FD_ZERO(&fds); - FD_SET(s, &fds); - FD_SET(STDIN_FILENO, &fds); - if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1) { - throw SysError("waiting for data from client or server"); - } - if (FD_ISSET(s, &fds)) { - auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, - SPLICE_F_MOVE); - if (res == -1) { - throw SysError("splicing data from daemon socket to stdout"); - } - if (res == 0) { - throw EndOfFile("unexpected EOF from daemon socket"); - } - } - if (FD_ISSET(STDIN_FILENO, &fds)) { - auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SSIZE_MAX, - SPLICE_F_MOVE); - if (res == -1) { - throw SysError("splicing data from stdin to daemon socket"); - } - if (res == 0) { - return 0; - } - } - } - } else { - processConnection(true, "root", 0); - } - } else { - daemonLoop(argv); - } - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-daemon", _main); diff --git a/third_party/nix/src/nix-daemon/nix-daemon-proto.cc b/third_party/nix/src/nix-daemon/nix-daemon-proto.cc deleted file mode 100644 index d6498e77c2..0000000000 --- a/third_party/nix/src/nix-daemon/nix-daemon-proto.cc +++ /dev/null @@ -1,799 +0,0 @@ -#include "nix-daemon-proto.hh" - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libmain/shared.hh" -#include "libproto/worker.grpc.pb.h" -#include "libproto/worker.pb.h" -#include "libstore/derivations.hh" -#include "libstore/local-store.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "libutil/hash.hh" -#include "libutil/proto.hh" -#include "libutil/serialise.hh" -#include "libutil/types.hh" - -namespace nix::daemon { - -using ::google::protobuf::util::TimeUtil; -using ::grpc::Status; -using ::nix::proto::PathInfo; -using ::nix::proto::StorePath; -using ::nix::proto::StorePaths; -using ::nix::proto::WorkerService; - -template -class RPCSource final : public Source { - public: - using Reader = grpc::ServerReader; - explicit RPCSource(Reader* reader) : reader_(reader) {} - - size_t read(unsigned char* data, size_t len) override { - auto got = buffer_.sgetn(reinterpret_cast(data), len); - if (got < len) { - Request msg; - if (!reader_->Read(&msg)) { - return got; - } - if (msg.add_oneof_case() != Request::kData) { - // TODO(grfn): Make Source::read return a StatusOr and get rid of this - // throw - throw Error( - "Invalid AddToStoreRequest: all messages except the first must " - "contain data"); - } - buffer_.sputn(msg.data().data(), msg.data().length()); - return got + read(data + got, len - got); - } - return got; - }; - - private: - std::stringbuf buffer_; - Reader* reader_; -}; - -// TODO(grfn): Make this some sort of pipe so we don't have to store data in -// memory -/* If the NAR archive contains a single file at top-level, then save - the contents of the file to `s'. Otherwise barf. */ -struct RetrieveRegularNARSink : ParseSink { - bool regular{true}; - std::string s; - - RetrieveRegularNARSink() {} - - void createDirectory(const Path& path) override { regular = false; } - - void receiveContents(unsigned char* data, unsigned int len) override { - s.append(reinterpret_cast(data), len); - } - - void createSymlink(const Path& path, const std::string& target) override { - regular = false; - } -}; - -#define ASSERT_INPUT_STORE_PATH(path) \ - if (!store_->isStorePath(path)) { \ - return Status(grpc::StatusCode::INVALID_ARGUMENT, \ - absl::StrFormat("path '%s' is not in the Nix store", path)); \ - } - -class BuildLogStreambuf final : public std::streambuf { - public: - using Writer = grpc::ServerWriter; - explicit BuildLogStreambuf(Writer* writer) : writer_(writer) {} - - // TODO(grfn): buffer with a timeout so we don't have too many messages - std::streamsize xsputn(const char_type* s, std::streamsize n) override { - nix::proto::BuildEvent event; - event.mutable_build_log()->set_line(s, n); - writer_->Write(event); - return n; - } - - int_type overflow(int_type ch) override { - if (ch != traits_type::eof()) { - nix::proto::BuildEvent event; - event.mutable_build_log()->set_line(std::string(1, ch)); - writer_->Write(event); - } - return ch; - } - - private: - Writer* writer_{}; -}; - -class WorkerServiceImpl final : public WorkerService::Service { - public: - WorkerServiceImpl(nix::Store& store) : store_(&store) {} - - Status IsValidPath(grpc::ServerContext* context, const StorePath* request, - nix::proto::IsValidPathResponse* response) override { - return HandleExceptions( - [&]() -> Status { - const auto& path = request->path(); - response->set_is_valid(store_->isValidPath(path)); - - return Status::OK; - }, - __FUNCTION__); - } - - Status HasSubstitutes(grpc::ServerContext* context, const StorePath* request, - nix::proto::HasSubstitutesResponse* response) override { - return HandleExceptions( - [&]() -> Status { - const auto& path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - PathSet res = store_->querySubstitutablePaths({path}); - response->set_has_substitutes(res.find(path) != res.end()); - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryReferrers(grpc::ServerContext* context, const StorePath* request, - StorePaths* response) override { - return HandleExceptions( - [&]() -> Status { - const auto& path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - - PathSet paths; - store_->queryReferrers(path, paths); - - for (const auto& path : paths) { - response->add_paths(path); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status AddToStore(grpc::ServerContext* context, - grpc::ServerReader* reader, - nix::proto::StorePath* response) override { - return HandleExceptions( - [&]() -> Status { - proto::AddToStoreRequest metadata_request; - auto has_metadata = reader->Read(&metadata_request); - - if (!has_metadata || !metadata_request.has_meta()) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "Metadata must be set before sending file content"); - } - - auto meta = metadata_request.meta(); - RPCSource source(reader); - auto opt_hash_type = hash_type_from(meta.hash_type()); - if (!opt_hash_type) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "Invalid hash type"); - } - - std::string* data; - RetrieveRegularNARSink nar; - TeeSource saved_nar(source); - - if (meta.recursive()) { - // TODO(grfn): Don't store the full data in memory, instead just - // make addToStoreFromDump take a Source - ParseSink sink; - parseDump(sink, saved_nar); - data = &(*saved_nar.data); - } else { - parseDump(nar, source); - if (!nar.regular) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "Regular file expected"); - } - data = &nar.s; - } - - auto local_store = store_.dynamic_pointer_cast(); - if (!local_store) { - return Status(grpc::StatusCode::FAILED_PRECONDITION, - "operation is only supported by LocalStore"); - } - - auto path = local_store->addToStoreFromDump( - *data, meta.base_name(), meta.recursive(), opt_hash_type.value()); - - response->set_path(path); - - return Status::OK; - }, - __FUNCTION__); - } - - Status AddToStoreNar( - grpc::ServerContext* context, - grpc::ServerReader* reader, - google::protobuf::Empty*) override { - return HandleExceptions( - [&]() -> Status { - proto::AddToStoreNarRequest path_info_request; - auto has_path_info = reader->Read(&path_info_request); - if (!has_path_info || !path_info_request.has_path_info()) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "Path info must be set before sending nar content"); - } - - auto path_info = path_info_request.path_info(); - - ValidPathInfo info; - info.path = path_info.path().path(); - info.deriver = path_info.deriver().path(); - - if (!info.deriver.empty()) { - ASSERT_INPUT_STORE_PATH(info.deriver); - } - - auto nar_hash = Hash::deserialize(path_info.nar_hash(), htSHA256); - - if (!nar_hash.ok()) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - std::string(nar_hash.status().message())); - } - - info.narHash = *nar_hash; - for (const auto& ref : path_info.references()) { - info.references.insert(ref); - } - info.registrationTime = - TimeUtil::TimestampToTimeT(path_info.registration_time()); - info.narSize = path_info.nar_size(); - info.ultimate = path_info.ultimate(); - for (const auto& sig : path_info.sigs()) { - info.sigs.insert(sig); - } - info.ca = path_info.ca(); - - auto repair = path_info.repair(); - auto check_sigs = path_info.check_sigs(); - - std::string saved; - RPCSource source(reader); - store_->addToStore(info, source, repair ? Repair : NoRepair, - check_sigs ? CheckSigs : NoCheckSigs, nullptr); - - return Status::OK; - }, - __FUNCTION__); - } - - Status AddTextToStore( - grpc::ServerContext*, - grpc::ServerReader* reader, - nix::proto::StorePath* response) override { - return HandleExceptions( - [&]() -> Status { - proto::AddTextToStoreRequest request; - auto has_metadata = reader->Read(&request); - if (!has_metadata || !request.has_meta()) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "Metadata must be set before sending content"); - } - - proto::AddTextToStoreRequest_Metadata meta = request.meta(); - - PathSet references; - for (const auto& ref : meta.references()) { - references.insert(ref); - } - - std::string content; - content.reserve(meta.size()); - while (reader->Read(&request)) { - if (request.add_oneof_case() != request.kData) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "All requests except the first must contain data"); - } - - content.append(request.data()); - } - - auto path = store_->addTextToStore(meta.name(), content, references); - response->set_path(path); - return Status::OK; - }, - __FUNCTION__); - } - - Status BuildPaths( - grpc::ServerContext*, const nix::proto::BuildPathsRequest* request, - grpc::ServerWriter* writer) override { - return HandleExceptions( - [&]() -> Status { - PathSet drvs; - for (const auto& drv : request->drvs()) { - drvs.insert(drv); - } - auto mode = BuildModeFrom(request->mode()); - - if (!mode.has_value()) { - return Status(grpc::StatusCode::INTERNAL, "Invalid build mode"); - } - - BuildLogStreambuf log_buffer(writer); - std::ostream log_sink(&log_buffer); - - // TODO(grfn): If mode is repair and not trusted, we need to return an - // error here (but we can't yet because we don't know anything about - // trusted users) - return nix::util::proto::AbslToGRPCStatus( - store_->buildPaths(log_sink, drvs, mode.value())); - }, - __FUNCTION__); - } - - Status EnsurePath(grpc::ServerContext* context, - const nix::proto::StorePath* request, - google::protobuf::Empty*) override { - auto path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - return HandleExceptions( - [&]() -> Status { - store_->ensurePath(path); - return Status::OK; - }, - __FUNCTION__); - } - - Status AddTempRoot(grpc::ServerContext*, const nix::proto::StorePath* request, - google::protobuf::Empty*) override { - auto path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - - return HandleExceptions( - [&]() -> Status { - store_->addTempRoot(path); - return Status::OK; - }, - __FUNCTION__); - } - - Status AddIndirectRoot(grpc::ServerContext*, - const nix::proto::StorePath* request, - google::protobuf::Empty*) override { - auto path = std::filesystem::canonical(request->path()); - ASSERT_INPUT_STORE_PATH(path); - - return HandleExceptions( - [&]() -> Status { - store_->addIndirectRoot(path); - return Status::OK; - }, - __FUNCTION__); - } - - Status SyncWithGC(grpc::ServerContext*, const google::protobuf::Empty*, - google::protobuf::Empty*) override { - return HandleExceptions( - [&]() -> Status { - store_->syncWithGC(); - return Status::OK; - }, - __FUNCTION__); - } - - Status FindRoots(grpc::ServerContext*, const google::protobuf::Empty*, - nix::proto::FindRootsResponse* response) override { - return HandleExceptions( - [&]() -> Status { - auto roots = store_->findRoots(false); - for (const auto& [target, links] : roots) { - StorePaths link_paths; - for (const auto& link : links) { - link_paths.add_paths(link); - } - response->mutable_roots()->insert({target, link_paths}); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status CollectGarbage(grpc::ServerContext*, - const proto::CollectGarbageRequest* request, - proto::CollectGarbageResponse* response) override { - return HandleExceptions( - [&]() -> Status { - GCOptions options; - auto action = GCActionFromProto(request->action()); - if (!action.has_value()) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "Invalid GC action"); - } - - options.action = action.value(); - for (const auto& path : request->paths_to_delete()) { - options.pathsToDelete.insert(path); - } - options.ignoreLiveness = request->ignore_liveness(); - options.maxFreed = request->max_freed(); - - if (options.ignoreLiveness) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, - "you are not allowed to ignore liveness"); - } - - GCResults results; - store_->collectGarbage(options, results); - - for (const auto& path : results.paths) { - response->add_deleted_paths(path); - } - response->set_bytes_freed(results.bytesFreed); - - return Status::OK; - }, - __FUNCTION__); - } - - Status QuerySubstitutablePathInfos( - grpc::ServerContext*, const StorePaths* request, - nix::proto::SubstitutablePathInfos* response) override { - return HandleExceptions( - [&]() -> Status { - SubstitutablePathInfos infos; - PathSet paths; - for (const auto& path : request->paths()) { - paths.insert(path); - } - store_->querySubstitutablePathInfos(paths, infos); - for (const auto& [path, path_info] : infos) { - auto proto_path_info = response->add_path_infos(); - proto_path_info->mutable_path()->set_path(path); - proto_path_info->mutable_deriver()->set_path(path_info.deriver); - for (const auto& ref : path_info.references) { - proto_path_info->add_references(ref); - } - proto_path_info->set_download_size(path_info.downloadSize); - proto_path_info->set_nar_size(path_info.narSize); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryValidDerivers(grpc::ServerContext* context, - const StorePath* request, - StorePaths* response) override { - return HandleExceptions( - [&]() -> Status { - const auto& path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - - PathSet paths = store_->queryValidDerivers(path); - - for (const auto& path : paths) { - response->add_paths(path); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryDerivationOutputs(grpc::ServerContext* context, - const StorePath* request, - StorePaths* response) override { - return HandleExceptions( - [&]() -> Status { - const auto& path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - - PathSet paths = store_->queryDerivationOutputs(path); - - for (const auto& path : paths) { - response->add_paths(path); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryAllValidPaths(grpc::ServerContext* context, - const google::protobuf::Empty* request, - StorePaths* response) override { - return HandleExceptions( - [&]() -> Status { - const auto paths = store_->queryAllValidPaths(); - for (const auto& path : paths) { - response->add_paths(path); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryPathInfo(grpc::ServerContext* context, const StorePath* request, - PathInfo* response) override { - return HandleExceptions( - [&]() -> Status { - auto path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - - response->mutable_path()->set_path(path); - try { - auto info = store_->queryPathInfo(path); - response->mutable_deriver()->set_path(info->deriver); - response->set_nar_hash( - reinterpret_cast(&info->narHash.hash[0]), - info->narHash.hashSize); - - for (const auto& reference : info->references) { - response->add_references(reference); - } - - *response->mutable_registration_time() = - google::protobuf::util::TimeUtil::TimeTToTimestamp( - info->registrationTime); - - response->set_nar_size(info->narSize); - response->set_ultimate(info->ultimate); - - for (const auto& sig : info->sigs) { - response->add_sigs(sig); - } - - response->set_ca(info->ca); - - return Status::OK; - } catch (InvalidPath& e) { - return Status(grpc::StatusCode::INVALID_ARGUMENT, e.msg()); - } - }, - __FUNCTION__); - } - - Status QueryDerivationOutputNames( - grpc::ServerContext* context, const StorePath* request, - nix::proto::DerivationOutputNames* response) override { - return HandleExceptions( - [&]() -> Status { - auto path = request->path(); - ASSERT_INPUT_STORE_PATH(path); - auto names = store_->queryDerivationOutputNames(path); - for (const auto& name : names) { - response->add_names(name); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryPathFromHashPart(grpc::ServerContext* context, - const nix::proto::HashPart* request, - StorePath* response) override { - return HandleExceptions( - [&]() -> Status { - auto hash_part = request->hash_part(); - auto path = store_->queryPathFromHashPart(hash_part); - ASSERT_INPUT_STORE_PATH(path); - response->set_path(path); - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryValidPaths(grpc::ServerContext* context, - const StorePaths* request, - StorePaths* response) override { - return HandleExceptions( - [&]() -> Status { - std::set paths; - for (const auto& path : request->paths()) { - ASSERT_INPUT_STORE_PATH(path); - paths.insert(path); - } - - auto res = store_->queryValidPaths(paths); - - for (const auto& path : res) { - response->add_paths(path); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status QuerySubstitutablePaths(grpc::ServerContext* context, - const StorePaths* request, - StorePaths* response) override { - return HandleExceptions( - [&]() -> Status { - std::set paths; - for (const auto& path : request->paths()) { - ASSERT_INPUT_STORE_PATH(path); - paths.insert(path); - } - - auto res = store_->querySubstitutablePaths(paths); - - for (const auto& path : res) { - response->add_paths(path); - } - - return Status::OK; - }, - __FUNCTION__); - } - - Status OptimiseStore(grpc::ServerContext* context, - const google::protobuf::Empty* request, - google::protobuf::Empty* response) override { - return HandleExceptions( - [&]() -> Status { - store_->optimiseStore(); - return Status::OK; - }, - __FUNCTION__); - } - - Status VerifyStore(grpc::ServerContext* context, - const nix::proto::VerifyStoreRequest* request, - nix::proto::VerifyStoreResponse* response) override { - return HandleExceptions( - [&]() -> Status { - auto errors = - store_->verifyStore(request->check_contents(), - static_cast(request->repair())); - - response->set_errors(errors); - - return Status::OK; - }, - __FUNCTION__); - } - - Status BuildDerivation( - grpc::ServerContext*, const nix::proto::BuildDerivationRequest* request, - grpc::ServerWriter* writer) override { - return HandleExceptions( - [&]() -> Status { - auto drv_path = request->drv_path().path(); - ASSERT_INPUT_STORE_PATH(drv_path); - auto drv = BasicDerivation::from_proto(&request->derivation()); - - auto build_mode = nix::BuildModeFrom(request->build_mode()); - if (!build_mode) { - return Status(grpc::StatusCode::INTERNAL, "Invalid build mode"); - } - - BuildLogStreambuf log_buffer(writer); - std::ostream log_sink(&log_buffer); - BuildResult res = - store_->buildDerivation(log_sink, drv_path, drv, *build_mode); - - proto::BuildResult proto_res{}; - proto_res.set_status(res.status_to_proto()); - - if (!res.errorMsg.empty()) { - proto_res.set_msg(res.errorMsg); - } - - proto::BuildEvent event{}; - *event.mutable_result() = proto_res; - - writer->Write(event); - return Status::OK; - }, - __FUNCTION__); - } - - Status AddSignatures(grpc::ServerContext* context, - const nix::proto::AddSignaturesRequest* request, - google::protobuf::Empty* response) override { - return HandleExceptions( - [&]() -> Status { - auto path = request->path().path(); - ASSERT_INPUT_STORE_PATH(path); - - StringSet sigs; - sigs.insert(request->sigs().sigs().begin(), - request->sigs().sigs().end()); - - store_->addSignatures(path, sigs); - - return Status::OK; - }, - __FUNCTION__); - } - - Status QueryMissing(grpc::ServerContext* context, const StorePaths* request, - nix::proto::QueryMissingResponse* response) override { - return HandleExceptions( - [&]() -> Status { - std::set targets; - for (auto& path : request->paths()) { - ASSERT_INPUT_STORE_PATH(path); - targets.insert(path); - } - PathSet will_build; - PathSet will_substitute; - PathSet unknown; - // TODO(grfn): Switch to concrete size type - unsigned long long download_size; - unsigned long long nar_size; - - store_->queryMissing(targets, will_build, will_substitute, unknown, - download_size, nar_size); - for (auto& path : will_build) { - response->add_will_build(path); - } - for (auto& path : will_substitute) { - response->add_will_substitute(path); - } - for (auto& path : unknown) { - response->add_unknown(path); - } - response->set_download_size(download_size); - response->set_nar_size(nar_size); - - return Status::OK; - }, - __FUNCTION__); - }; - - Status GetBuildLog(grpc::ServerContext* context, const StorePath* request, - proto::BuildLog* response) override { - return HandleExceptions( - [&]() -> Status { - const auto log = store_->getBuildLog(request->path()); - if (log) { - response->set_build_log(*log); - } - return Status::OK; - }, - __FUNCTION__); - } - - private: - Status HandleExceptions(std::function fn, - absl::string_view methodName) { - try { - return fn(); - } catch (Unsupported& e) { - return Status(grpc::StatusCode::UNIMPLEMENTED, - absl::StrCat(methodName, " is not supported: ", e.what())); - } catch (Error& e) { - return Status(grpc::StatusCode::INTERNAL, e.what()); - } - // add more specific Error-Status mappings above - } - - ref store_; -}; - -WorkerService::Service* NewWorkerService(nix::Store& store) { - return new WorkerServiceImpl(store); -} - -} // namespace nix::daemon diff --git a/third_party/nix/src/nix-daemon/nix-daemon-proto.hh b/third_party/nix/src/nix-daemon/nix-daemon-proto.hh deleted file mode 100644 index ca871213eb..0000000000 --- a/third_party/nix/src/nix-daemon/nix-daemon-proto.hh +++ /dev/null @@ -1,12 +0,0 @@ -#pragma once - -#include - -#include "libproto/worker.grpc.pb.h" -#include "libstore/store-api.hh" - -namespace nix::daemon { - -nix::proto::WorkerService::Service* NewWorkerService(nix::Store&); - -} // namespace nix::daemon diff --git a/third_party/nix/src/nix-daemon/nix-daemon.cc b/third_party/nix/src/nix-daemon/nix-daemon.cc deleted file mode 100644 index 0551625a3e..0000000000 --- a/third_party/nix/src/nix-daemon/nix-daemon.cc +++ /dev/null @@ -1,201 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libmain/shared.hh" // TODO(tazjin): can this be removed? -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/util.hh" -#include "nix-daemon-proto.hh" -#include "nix-daemon/nix-daemon-proto.hh" -#include "nix/legacy.hh" - -ABSL_FLAG(bool, pipe, false, "Use pipes for daemon communication"); - -namespace nix::daemon { - -using grpc::Server; -using grpc::ServerBuilder; - -namespace { - -// TODO(grfn): There has to be a better way to do this - this was ported -// verbatim from the old daemon implementation without much critical evaluation. -static int ForwardToSocket(nix::Path socket_path) { - // Forward on this connection to the real daemon - int sockfd = socket(PF_UNIX, SOCK_STREAM, 0); - if (sockfd == -1) { - throw SysError("creating Unix domain socket"); - } - - auto socketDir = dirOf(socket_path); - if (chdir(socketDir.c_str()) == -1) { - throw SysError(format("changing to socket directory '%1%'") % socketDir); - } - - auto socketName = baseNameOf(socket_path); - auto addr = sockaddr_un{}; - addr.sun_family = AF_UNIX; - if (socketName.size() + 1 >= sizeof(addr.sun_path)) { - throw Error(format("socket name %1% is too long") % socketName); - } - strncpy(addr.sun_path, socketName.c_str(), sizeof(addr.sun_family)); - - if (connect(sockfd, reinterpret_cast(&addr), - sizeof(addr)) == -1) { - throw SysError(format("cannot connect to daemon at %1%") % socket_path); - } - - auto nfds = (sockfd > STDIN_FILENO ? sockfd : STDIN_FILENO) + 1; - while (true) { - fd_set fds; - FD_ZERO(&fds); - FD_SET(sockfd, &fds); - FD_SET(STDIN_FILENO, &fds); - if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1) { - throw SysError("waiting for data from client or server"); - } - if (FD_ISSET(sockfd, &fds)) { - auto res = splice(sockfd, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, - SPLICE_F_MOVE); - if (res == -1) { - throw SysError("splicing data from daemon socket to stdout"); - } - if (res == 0) { - throw EndOfFile("unexpected EOF from daemon socket"); - } - } - if (FD_ISSET(STDIN_FILENO, &fds)) { - auto res = splice(STDIN_FILENO, nullptr, sockfd, nullptr, SSIZE_MAX, - SPLICE_F_MOVE); - if (res == -1) { - throw SysError("splicing data from stdin to daemon socket"); - } - if (res == 0) { - return 0; - } - } - } -} - -void SetNonBlocking(int fd) { - int flags = fcntl(fd, F_GETFL); // NOLINT - PCHECK(flags != 0) << "Error getting socket flags"; - PCHECK(fcntl( // NOLINT - fd, F_SETFL, flags | O_NONBLOCK) == 0) - << "Could not set socket flags"; -} - -} // namespace - -int RunServer() { - Store::Params params; - params["path-info-cache-size"] = "0"; - auto store = openStore(settings.storeUri, params); - auto worker = NewWorkerService(*store); - ServerBuilder builder; - builder.RegisterService(worker); - - auto n_fds = sd_listen_fds(0); - - if (n_fds > 1) { - LOG(FATAL) << "Too many file descriptors (" << n_fds - << ") received from systemd socket activation"; - } - - std::filesystem::path socket_path; - - if (n_fds == 0) { - socket_path = settings.nixDaemonSocketFile; - std::filesystem::create_directories(socket_path.parent_path()); - auto socket_addr = absl::StrFormat("unix://%s", socket_path); - builder.AddListeningPort(socket_addr, grpc::InsecureServerCredentials()); - } - - std::unique_ptr server(builder.BuildAndStart()); - - if (!server) { - LOG(FATAL) << "Error building server"; - return 1; - } - - // We have been systemd socket-activated - instead of asking grpc to make the - // socket path for us, start our own accept loop and pass file descriptors to - // grpc. - // - // This approach was *somewhat* adapted from - // https://gist.github.com/yorickvP/8d523a4df2b10c5812fa7789e82b7c1b - at some - // point we'd like gRPC to do it for us, though - see - // https://github.com/grpc/grpc/issues/19133 - if (n_fds == 1) { - int socket_fd = SD_LISTEN_FDS_START; - // Only used for logging - socket_path = readLink(absl::StrFormat("/proc/self/fd/%d", socket_fd)); - - PCHECK(sd_notify(0, "READY=1") == 0) << "Error notifying systemd"; - for (;;) { - try { - struct sockaddr_un remote_addr {}; - socklen_t remote_addr_len = sizeof(remote_addr); - int remote_fd = - accept(socket_fd, - reinterpret_cast(&remote_addr), // NOLINT - &remote_addr_len); - checkInterrupt(); - if (!remote_fd) { - if (errno == EINTR) { - continue; - } - PCHECK(false) << "error accepting connection"; - } - - LOG(INFO) << "Accepted remote connection on fd " << remote_fd; - SetNonBlocking(remote_fd); - grpc::AddInsecureChannelFromFd(server.get(), remote_fd); - } catch (Interrupted& e) { - return -1; - } catch (Error& e) { - LOG(ERROR) << "error processing connection: " << e.msg(); - } - } - } - - LOG(INFO) << "Nix daemon listening at " << socket_path; - server->Wait(); - return 0; -} - -} // namespace nix::daemon - -int main(int argc, char** argv) { // NOLINT - FLAGS_logtostderr = true; - google::InitGoogleLogging(argv[0]); // NOLINT - - absl::SetFlagsUsageConfig({.version_string = [] { return nix::nixVersion; }}); - absl::ParseCommandLine(argc, argv); - - if (absl::GetFlag(FLAGS_pipe)) { - if (nix::getStoreType() == nix::tDaemon) { - return nix::daemon::ForwardToSocket(nix::settings.nixDaemonSocketFile); - } else { - // TODO(grfn): Need to launch a server on stdin here - upstream calls - // processConnection(true, "root", 0); - LOG(ERROR) << "not implemented"; - return 1; - } - } - - return nix::daemon::RunServer(); -} diff --git a/third_party/nix/src/nix-env/nix-env.cc b/third_party/nix/src/nix-env/nix-env.cc deleted file mode 100644 index 15f12abd97..0000000000 --- a/third_party/nix/src/nix-env/nix-env.cc +++ /dev/null @@ -1,1543 +0,0 @@ -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/common-eval-args.hh" -#include "libexpr/eval.hh" -#include "libexpr/get-drvs.hh" -#include "libexpr/names.hh" -#include "libexpr/value-to-json.hh" -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/profiles.hh" -#include "libstore/store-api.hh" -#include "libutil/json.hh" -#include "libutil/status.hh" -#include "libutil/util.hh" -#include "libutil/xml-writer.hh" -#include "nix-env/user-env.hh" -#include "nix/legacy.hh" - -using namespace nix; -using std::cout; - -using InstallSourceType = enum { - srcNixExprDrvs, - srcNixExprs, - srcStorePaths, - srcProfile, - srcAttrPath, - srcUnknown -}; - -struct InstallSourceInfo { - InstallSourceType type; - Path nixExprPath; /* for srcNixExprDrvs, srcNixExprs */ - Path profile; /* for srcProfile */ - std::string systemFilter; /* for srcNixExprDrvs */ - std::unique_ptr autoArgs; -}; - -struct Globals { - InstallSourceInfo instSource; - Path profile; - std::shared_ptr state; - bool dryRun; - bool preserveInstalled; - bool removeAll; - std::string forceName; - bool prebuiltOnly; -}; - -using Operation = void (*)(Globals&, Strings, Strings); - -static std::string needArg(Strings::iterator& i, Strings& args, - const std::string& arg) { - if (i == args.end()) { - throw UsageError(format("'%1%' requires an argument") % arg); - } - return *i++; -} - -static bool parseInstallSourceOptions(Globals& globals, Strings::iterator& i, - Strings& args, const std::string& arg) { - if (arg == "--from-expression" || arg == "-E") { - globals.instSource.type = srcNixExprs; - } else if (arg == "--from-profile") { - globals.instSource.type = srcProfile; - globals.instSource.profile = needArg(i, args, arg); - } else if (arg == "--attr" || arg == "-A") { - globals.instSource.type = srcAttrPath; - } else { - return false; - } - return true; -} - -static bool isNixExpr(const Path& path, struct stat& st) { - return S_ISREG(st.st_mode) || - (S_ISDIR(st.st_mode) && pathExists(path + "/default.nix")); -} - -static void getAllExprs(EvalState& state, const Path& path, StringSet& attrs, - Value& v) { - StringSet namesSorted; - for (auto& i : readDirectory(path)) { - namesSorted.insert(i.name); - } - - for (auto& i : namesSorted) { - /* Ignore the manifest.nix used by profiles. This is - necessary to prevent it from showing up in channels (which - are implemented using profiles). */ - if (i == "manifest.nix") { - continue; - } - - Path path2 = path + "/" + i; - - struct stat st; - if (stat(path2.c_str(), &st) == -1) { - continue; // ignore dangling symlinks in ~/.nix-defexpr - } - - if (isNixExpr(path2, st) && - (!S_ISREG(st.st_mode) || absl::EndsWith(path2, ".nix"))) { - /* Strip off the `.nix' filename suffix (if applicable), - otherwise the attribute cannot be selected with the - `-A' option. Useful if you want to stick a Nix - expression directly in ~/.nix-defexpr. */ - std::string attrName = i; - if (absl::EndsWith(attrName, ".nix")) { - attrName = std::string(attrName, 0, attrName.size() - 4); - } - if (attrs.find(attrName) != attrs.end()) { - LOG(WARNING) << "name collision in input Nix expressions, skipping '" - << path2 << "'"; - continue; - } - attrs.insert(attrName); - /* Load the expression on demand. */ - Value& vFun = state.getBuiltin("import"); - Value& vArg(*state.allocValue()); - mkString(vArg, path2); - mkApp(*state.allocAttr(v, state.symbols.Create(attrName)), vFun, vArg); - } else if (S_ISDIR(st.st_mode)) { - /* `path2' is a directory (with no default.nix in it); - recurse into it. */ - getAllExprs(state, path2, attrs, v); - } - } -} - -static void loadSourceExpr(EvalState& state, const Path& path, Value& v) { - struct stat st; - if (stat(path.c_str(), &st) == -1) { - throw SysError(format("getting information about '%1%'") % path); - } - - if (isNixExpr(path, st)) { - state.evalFile(path, v); - } - - /* The path is a directory. Put the Nix expressions in the - directory in a set, with the file name of each expression as - the attribute name. Recurse into subdirectories (but keep the - set flat, not nested, to make it easier for a user to have a - ~/.nix-defexpr directory that includes some system-wide - directory). */ - else if (S_ISDIR(st.st_mode)) { - state.mkAttrs(v, 1024); - state.mkList(*state.allocAttr(v, state.symbols.Create("_combineChannels"))); - StringSet attrs; - getAllExprs(state, path, attrs, v); - } - - else { - throw Error("path '%s' is not a directory or a Nix expression", path); - } -} - -static void loadDerivations(EvalState& state, const Path& nixExprPath, - const std::string& systemFilter, Bindings* autoArgs, - const std::string& pathPrefix, DrvInfos& elems) { - Value vRoot; - loadSourceExpr(state, nixExprPath, vRoot); - - Value& v(*findAlongAttrPath(state, pathPrefix, autoArgs, vRoot)); - - getDerivations(state, v, pathPrefix, autoArgs, elems, true); - - /* Filter out all derivations not applicable to the current - system. */ - for (DrvInfos::iterator i = elems.begin(), j; i != elems.end(); i = j) { - j = i; - j++; - if (systemFilter != "*" && i->querySystem() != systemFilter) { - elems.erase(i); - } - } -} - -static long getPriority(EvalState& state, DrvInfo& drv) { - return drv.queryMetaInt("priority", 0); -} - -static long comparePriorities(EvalState& state, DrvInfo& drv1, DrvInfo& drv2) { - return getPriority(state, drv2) - getPriority(state, drv1); -} - -// FIXME: this function is rather slow since it checks a single path -// at a time. -static bool isPrebuilt(EvalState& state, DrvInfo& elem) { - Path path = elem.queryOutPath(); - if (state.store->isValidPath(path)) { - return true; - } - PathSet ps = state.store->querySubstitutablePaths({path}); - return ps.find(path) != ps.end(); -} - -static void checkSelectorUse(DrvNames& selectors) { - /* Check that all selectors have been used. */ - for (auto& i : selectors) { - if (i.hits == 0 && i.fullName != "*") { - throw Error(format("selector '%1%' matches no derivations") % i.fullName); - } - } -} - -static DrvInfos filterBySelector(EvalState& state, const DrvInfos& allElems, - const Strings& args, bool newestOnly) { - DrvNames selectors = drvNamesFromArgs(args); - if (selectors.empty()) { - selectors.push_back(DrvName("*")); - } - - DrvInfos elems; - std::set done; - - for (auto& i : selectors) { - using Matches = std::list >; - Matches matches; - unsigned int n = 0; - for (auto j = allElems.begin(); j != allElems.end(); ++j, ++n) { - DrvName drvName(j->queryName()); - if (i.matches(drvName)) { - i.hits++; - matches.push_back(std::pair(*j, n)); - } - } - - /* If `newestOnly', if a selector matches multiple derivations - with the same name, pick the one matching the current - system. If there are still multiple derivations, pick the - one with the highest priority. If there are still multiple - derivations, pick the one with the highest version. - Finally, if there are still multiple derivations, - arbitrarily pick the first one. */ - if (newestOnly) { - /* Map from package names to derivations. */ - using Newest = std::map >; - Newest newest; - StringSet multiple; - - for (auto& j : matches) { - DrvName drvName(j.first.queryName()); - long d = 1; - - auto k = newest.find(drvName.name); - - if (k != newest.end()) { - d = j.first.querySystem() == k->second.first.querySystem() ? 0 - : j.first.querySystem() == settings.thisSystem ? 1 - : k->second.first.querySystem() == settings.thisSystem ? -1 - : 0; - if (d == 0) { - d = comparePriorities(state, j.first, k->second.first); - } - if (d == 0) { - d = compareVersions(drvName.version, - DrvName(k->second.first.queryName()).version); - } - } - - if (d > 0) { - newest.erase(drvName.name); - newest.insert(Newest::value_type(drvName.name, j)); - multiple.erase(j.first.queryName()); - } else if (d == 0) { - multiple.insert(j.first.queryName()); - } - } - - matches.clear(); - for (auto& j : newest) { - if (multiple.find(j.second.first.queryName()) != multiple.end()) { - LOG(WARNING) << "warning: there are multiple derivations named '" - << j.second.first.queryName() - << "'; using the first one"; - } - matches.push_back(j.second); - } - } - - /* Insert only those elements in the final list that we - haven't inserted before. */ - for (auto& j : matches) { - if (done.find(j.second) == done.end()) { - done.insert(j.second); - elems.push_back(j.first); - } - } - } - - checkSelectorUse(selectors); - - return elems; -} - -static bool isPath(const std::string& s) { - return s.find('/') != std::string::npos; -} - -static void queryInstSources(EvalState& state, InstallSourceInfo& instSource, - const Strings& args, DrvInfos& elems, - bool newestOnly) { - InstallSourceType type = instSource.type; - if (type == srcUnknown && !args.empty() && isPath(args.front())) { - type = srcStorePaths; - } - - switch (type) { - /* Get the available user environment elements from the - derivations specified in a Nix expression, including only - those with names matching any of the names in `args'. */ - case srcUnknown: - case srcNixExprDrvs: { - /* Load the derivations from the (default or specified) - Nix expression. */ - DrvInfos allElems; - loadDerivations(state, instSource.nixExprPath, instSource.systemFilter, - instSource.autoArgs.get(), "", allElems); - - elems = filterBySelector(state, allElems, args, newestOnly); - - break; - } - - /* Get the available user environment elements from the Nix - expressions specified on the command line; these should be - functions that take the default Nix expression file as - argument, e.g., if the file is `./foo.nix', then the - argument `x: x.bar' is equivalent to `(x: x.bar) - (import ./foo.nix)' = `(import ./foo.nix).bar'. */ - case srcNixExprs: { - Value vArg; - loadSourceExpr(state, instSource.nixExprPath, vArg); - - for (auto& i : args) { - Expr* eFun = state.parseExprFromString(i, absPath(".")); - Value vFun; - Value vTmp; - state.eval(eFun, vFun); - mkApp(vTmp, vFun, vArg); - getDerivations(state, vTmp, "", instSource.autoArgs.get(), elems, true); - } - - break; - } - - /* The available user environment elements are specified as a - list of store paths (which may or may not be - derivations). */ - case srcStorePaths: { - for (auto& i : args) { - Path path = state.store->followLinksToStorePath(i); - - std::string name = baseNameOf(path); - std::string::size_type dash = name.find('-'); - if (dash != std::string::npos) { - name = std::string(name, dash + 1); - } - - DrvInfo elem(state, "", nullptr); - elem.setName(name); - - if (isDerivation(path)) { - elem.setDrvPath(path); - elem.setOutPath( - state.store->derivationFromPath(path).findOutput("out")); - if (name.size() >= drvExtension.size() && - std::string(name, name.size() - drvExtension.size()) == - drvExtension) { - name = std::string(name, 0, name.size() - drvExtension.size()); - } - } else { - elem.setOutPath(path); - } - - elems.push_back(elem); - } - - break; - } - - /* Get the available user environment elements from another - user environment. These are then filtered as in the - `srcNixExprDrvs' case. */ - case srcProfile: { - elems = filterBySelector(state, queryInstalled(state, instSource.profile), - args, newestOnly); - break; - } - - case srcAttrPath: { - Value vRoot; - loadSourceExpr(state, instSource.nixExprPath, vRoot); - for (auto& i : args) { - Value& v( - *findAlongAttrPath(state, i, instSource.autoArgs.get(), vRoot)); - getDerivations(state, v, "", instSource.autoArgs.get(), elems, true); - } - break; - } - } -} - -static void printMissing(EvalState& state, DrvInfos& elems) { - PathSet targets; - for (auto& i : elems) { - Path drvPath = i.queryDrvPath(); - if (!drvPath.empty()) { - targets.insert(drvPath); - } else { - targets.insert(i.queryOutPath()); - } - } - - printMissing(state.store, targets); -} - -static bool keep(DrvInfo& drv) { return drv.queryMetaBool("keep", false); } - -static void installDerivations(Globals& globals, const Strings& args, - const Path& profile) { - DLOG(INFO) << "installing derivations"; - - /* Get the set of user environment elements to be installed. */ - DrvInfos newElems; - DrvInfos newElemsTmp; - queryInstSources(*globals.state, globals.instSource, args, newElemsTmp, true); - - /* If --prebuilt-only is given, filter out source-only packages. */ - for (auto& i : newElemsTmp) { - if (!globals.prebuiltOnly || isPrebuilt(*globals.state, i)) { - newElems.push_back(i); - } - } - - StringSet newNames; - for (auto& i : newElems) { - /* `forceName' is a hack to get package names right in some - one-click installs, namely those where the name used in the - path is not the one we want (e.g., `java-front' versus - `java-front-0.9pre15899'). */ - if (!globals.forceName.empty()) { - i.setName(globals.forceName); - } - newNames.insert(DrvName(i.queryName()).name); - } - - while (true) { - std::string lockToken = optimisticLockProfile(profile); - - DrvInfos allElems(newElems); - - /* Add in the already installed derivations, unless they have - the same name as a to-be-installed element. */ - if (!globals.removeAll) { - DrvInfos installedElems = queryInstalled(*globals.state, profile); - - for (auto& i : installedElems) { - DrvName drvName(i.queryName()); - if (!globals.preserveInstalled && - newNames.find(drvName.name) != newNames.end() && !keep(i)) { - LOG(INFO) << "replacing old '" << i.queryName() << "'"; - } else { - allElems.push_back(i); - } - } - - for (auto& i : newElems) { - LOG(INFO) << "installing " << i.queryName(); - } - } - - printMissing(*globals.state, newElems); - - if (globals.dryRun) { - return; - } - - if (createUserEnv(*globals.state, allElems, profile, - settings.envKeepDerivations, lockToken)) { - break; - } - } -} - -static void opInstall(Globals& globals, Strings opFlags, Strings opArgs) { - for (auto i = opFlags.begin(); i != opFlags.end();) { - std::string arg = *i++; - if (parseInstallSourceOptions(globals, i, opFlags, arg)) { - ; - } else if (arg == "--preserve-installed" || arg == "-P") { - globals.preserveInstalled = true; - } else if (arg == "--remove-all" || arg == "-r") { - globals.removeAll = true; - } else { - throw UsageError(format("unknown flag '%1%'") % arg); - } - } - - installDerivations(globals, opArgs, globals.profile); -} - -typedef enum { utLt, utLeq, utEq, utAlways } UpgradeType; - -static void upgradeDerivations(Globals& globals, const Strings& args, - UpgradeType upgradeType) { - DLOG(INFO) << "upgrading derivations"; - - /* Upgrade works as follows: we take all currently installed - derivations, and for any derivation matching any selector, look - for a derivation in the input Nix expression that has the same - name and a higher version number. */ - - while (true) { - std::string lockToken = optimisticLockProfile(globals.profile); - - DrvInfos installedElems = queryInstalled(*globals.state, globals.profile); - - /* Fetch all derivations from the input file. */ - DrvInfos availElems; - queryInstSources(*globals.state, globals.instSource, args, availElems, - false); - - /* Go through all installed derivations. */ - DrvInfos newElems; - for (auto& i : installedElems) { - DrvName drvName(i.queryName()); - - try { - if (keep(i)) { - newElems.push_back(i); - continue; - } - - /* Find the derivation in the input Nix expression - with the same name that satisfies the version - constraints specified by upgradeType. If there are - multiple matches, take the one with the highest - priority. If there are still multiple matches, - take the one with the highest version. - Do not upgrade if it would decrease the priority. */ - auto bestElem = availElems.end(); - std::string bestVersion; - for (auto j = availElems.begin(); j != availElems.end(); ++j) { - if (comparePriorities(*globals.state, i, *j) > 0) { - continue; - } - DrvName newName(j->queryName()); - if (newName.name == drvName.name) { - int d = compareVersions(drvName.version, newName.version); - if ((upgradeType == utLt && d < 0) || - (upgradeType == utLeq && d <= 0) || - (upgradeType == utEq && d == 0) || upgradeType == utAlways) { - long d2 = -1; - if (bestElem != availElems.end()) { - d2 = comparePriorities(*globals.state, *bestElem, *j); - if (d2 == 0) { - d2 = compareVersions(bestVersion, newName.version); - } - } - if (d2 < 0 && - (!globals.prebuiltOnly || isPrebuilt(*globals.state, *j))) { - bestElem = j; - bestVersion = newName.version; - } - } - } - } - - if (bestElem != availElems.end() && - i.queryOutPath() != bestElem->queryOutPath()) { - const char* action = - compareVersions(drvName.version, bestVersion) <= 0 - ? "upgrading" - : "downgrading"; - LOG(INFO) << action << " '" << i.queryName() << "' to '" - << bestElem->queryName() << "'"; - newElems.push_back(*bestElem); - } else { - newElems.push_back(i); - } - - } catch (Error& e) { - e.addPrefix( - fmt("while trying to find an upgrade for '%s':\n", i.queryName())); - throw; - } - } - - printMissing(*globals.state, newElems); - - if (globals.dryRun) { - return; - } - - if (createUserEnv(*globals.state, newElems, globals.profile, - settings.envKeepDerivations, lockToken)) { - break; - } - } -} - -static void opUpgrade(Globals& globals, Strings opFlags, Strings opArgs) { - UpgradeType upgradeType = utLt; - for (auto i = opFlags.begin(); i != opFlags.end();) { - std::string arg = *i++; - if (parseInstallSourceOptions(globals, i, opFlags, arg)) { - ; - } else if (arg == "--lt") { - upgradeType = utLt; - } else if (arg == "--leq") { - upgradeType = utLeq; - } else if (arg == "--eq") { - upgradeType = utEq; - } else if (arg == "--always") { - upgradeType = utAlways; - } else { - throw UsageError(format("unknown flag '%1%'") % arg); - } - } - - upgradeDerivations(globals, opArgs, upgradeType); -} - -static void setMetaFlag(EvalState& state, DrvInfo& drv, const std::string& name, - const std::string& value) { - Value* v = state.allocValue(); - mkString(*v, value.c_str()); - drv.setMeta(name, v); -} - -static void opSetFlag(Globals& globals, Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - if (opArgs.size() < 2) { - throw UsageError("not enough arguments to '--set-flag'"); - } - - auto arg = opArgs.begin(); - std::string flagName = *arg++; - std::string flagValue = *arg++; - DrvNames selectors = drvNamesFromArgs(Strings(arg, opArgs.end())); - - while (true) { - std::string lockToken = optimisticLockProfile(globals.profile); - - DrvInfos installedElems = queryInstalled(*globals.state, globals.profile); - - /* Update all matching derivations. */ - for (auto& i : installedElems) { - DrvName drvName(i.queryName()); - for (auto& j : selectors) { - if (j.matches(drvName)) { - LOG(INFO) << "setting flag on '" << i.queryName() << "'"; - j.hits++; - setMetaFlag(*globals.state, i, flagName, flagValue); - break; - } - } - } - - checkSelectorUse(selectors); - - /* Write the new user environment. */ - if (createUserEnv(*globals.state, installedElems, globals.profile, - settings.envKeepDerivations, lockToken)) { - break; - } - } -} - -static void opSet(Globals& globals, Strings opFlags, Strings opArgs) { - auto store2 = globals.state->store.dynamic_pointer_cast(); - if (!store2) { - throw Error("--set is not supported for this Nix store"); - } - - for (auto i = opFlags.begin(); i != opFlags.end();) { - std::string arg = *i++; - if (parseInstallSourceOptions(globals, i, opFlags, arg)) { - ; - } else { - throw UsageError(format("unknown flag '%1%'") % arg); - } - } - - DrvInfos elems; - queryInstSources(*globals.state, globals.instSource, opArgs, elems, true); - - if (elems.size() != 1) { - throw Error("--set requires exactly one derivation"); - } - - DrvInfo& drv(elems.front()); - - if (!globals.forceName.empty()) { - drv.setName(globals.forceName); - } - - if (!drv.queryDrvPath().empty()) { - PathSet paths = {drv.queryDrvPath()}; - printMissing(globals.state->store, paths); - if (globals.dryRun) { - return; - } - nix::util::OkOrThrow(globals.state->store->buildPaths( - std::cerr, paths, globals.state->repair != 0u ? bmRepair : bmNormal)); - } else { - printMissing(globals.state->store, {drv.queryOutPath()}); - if (globals.dryRun) { - return; - } - globals.state->store->ensurePath(drv.queryOutPath()); - } - - DLOG(INFO) << "switching to new user environment"; - Path generation = createGeneration(ref(store2), globals.profile, - drv.queryOutPath()); - switchLink(globals.profile, generation); -} - -static void uninstallDerivations(Globals& globals, Strings& selectors, - Path& profile) { - while (true) { - std::string lockToken = optimisticLockProfile(profile); - - DrvInfos installedElems = queryInstalled(*globals.state, profile); - DrvInfos newElems; - - for (auto& i : installedElems) { - DrvName drvName(i.queryName()); - bool found = false; - for (auto& j : selectors) { - /* !!! the repeated calls to followLinksToStorePath() - are expensive, should pre-compute them. */ - if ((isPath(j) && - i.queryOutPath() == - globals.state->store->followLinksToStorePath(j)) || - DrvName(j).matches(drvName)) { - LOG(INFO) << "uninstalling '" << i.queryName() << "'"; - found = true; - break; - } - } - if (!found) { - newElems.push_back(i); - } - } - - if (globals.dryRun) { - return; - } - - if (createUserEnv(*globals.state, newElems, profile, - settings.envKeepDerivations, lockToken)) { - break; - } - } -} - -static void opUninstall(Globals& globals, Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - uninstallDerivations(globals, opArgs, globals.profile); -} - -static bool cmpChars(char a, char b) { return toupper(a) < toupper(b); } - -static bool cmpElemByName(const DrvInfo& a, const DrvInfo& b) { - auto a_name = a.queryName(); - auto b_name = b.queryName(); - return lexicographical_compare(a_name.begin(), a_name.end(), b_name.begin(), - b_name.end(), cmpChars); -} - -using Table = std::list; - -void printTable(Table& table) { - auto nrColumns = !table.empty() ? table.front().size() : 0; - - std::vector widths; - widths.resize(nrColumns); - - for (auto& i : table) { - assert(i.size() == nrColumns); - Strings::iterator j; - size_t column; - for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) { - if (j->size() > widths[column]) { - widths[column] = j->size(); - } - } - } - - for (auto& i : table) { - Strings::iterator j; - size_t column; - for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) { - std::string s = *j; - replace(s.begin(), s.end(), '\n', ' '); - cout << s; - if (column < nrColumns - 1) { - cout << std::string(widths[column] - s.size() + 2, ' '); - } - } - cout << std::endl; - } -} - -/* This function compares the version of an element against the - versions in the given set of elements. `cvLess' means that only - lower versions are in the set, `cvEqual' means that at most an - equal version is in the set, and `cvGreater' means that there is at - least one element with a higher version in the set. `cvUnavail' - means that there are no elements with the same name in the set. */ - -using VersionDiff = enum { cvLess, cvEqual, cvGreater, cvUnavail }; - -static VersionDiff compareVersionAgainstSet(const DrvInfo& elem, - const DrvInfos& elems, - std::string& version) { - DrvName name(elem.queryName()); - - VersionDiff diff = cvUnavail; - version = "?"; - - for (auto& i : elems) { - DrvName name2(i.queryName()); - if (name.name == name2.name) { - int d = compareVersions(name.version, name2.version); - if (d < 0) { - diff = cvGreater; - version = name2.version; - } else if (diff != cvGreater && d == 0) { - diff = cvEqual; - version = name2.version; - } else if (diff != cvGreater && diff != cvEqual && d > 0) { - diff = cvLess; - if (version.empty() || compareVersions(version, name2.version) < 0) { - version = name2.version; - } - } - } - } - - return diff; -} - -static void queryJSON(Globals& globals, std::vector& elems) { - JSONObject topObj(cout, true); - for (auto& i : elems) { - JSONObject pkgObj = topObj.object(i.attrPath); - - auto drvName = DrvName(i.queryName()); - pkgObj.attr("name", drvName.fullName); - pkgObj.attr("pname", drvName.name); - pkgObj.attr("version", drvName.version); - pkgObj.attr("system", i.querySystem()); - - JSONObject metaObj = pkgObj.object("meta"); - StringSet metaNames = i.queryMetaNames(); - for (auto& j : metaNames) { - auto placeholder = metaObj.placeholder(j); - Value* v = i.queryMeta(j); - if (v == nullptr) { - LOG(ERROR) << "derivation '" << i.queryName() - << "' has invalid meta attribute '" << j << "'"; - placeholder.write(nullptr); - } else { - PathSet context; - printValueAsJSON(*globals.state, true, *v, placeholder, context); - } - } - } -} - -static void opQuery(Globals& globals, Strings opFlags, Strings opArgs) { - Strings remaining; - std::string attrPath; - - bool printStatus = false; - bool printName = true; - bool printAttrPath = false; - bool printSystem = false; - bool printDrvPath = false; - bool printOutPath = false; - bool printDescription = false; - bool printMeta = false; - bool compareVersions = false; - bool xmlOutput = false; - bool jsonOutput = false; - - enum { sInstalled, sAvailable } source = sInstalled; - - settings.readOnlyMode = true; /* makes evaluation a bit faster */ - - for (auto i = opFlags.begin(); i != opFlags.end();) { - std::string arg = *i++; - if (arg == "--status" || arg == "-s") { - printStatus = true; - } else if (arg == "--no-name") { - printName = false; - } else if (arg == "--system") { - printSystem = true; - } else if (arg == "--description") { - printDescription = true; - } else if (arg == "--compare-versions" || arg == "-c") { - compareVersions = true; - } else if (arg == "--drv-path") { - printDrvPath = true; - } else if (arg == "--out-path") { - printOutPath = true; - } else if (arg == "--meta") { - printMeta = true; - } else if (arg == "--installed") { - source = sInstalled; - } else if (arg == "--available" || arg == "-a") { - source = sAvailable; - } else if (arg == "--xml") { - xmlOutput = true; - } else if (arg == "--json") { - jsonOutput = true; - } else if (arg == "--attr-path" || arg == "-P") { - printAttrPath = true; - } else if (arg == "--attr" || arg == "-A") { - attrPath = needArg(i, opFlags, arg); - } else { - throw UsageError(format("unknown flag '%1%'") % arg); - } - } - - /* Obtain derivation information from the specified source. */ - DrvInfos availElems; - DrvInfos installedElems; - - if (source == sInstalled || compareVersions || printStatus) { - installedElems = queryInstalled(*globals.state, globals.profile); - } - - if (source == sAvailable || compareVersions) { - loadDerivations(*globals.state, globals.instSource.nixExprPath, - globals.instSource.systemFilter, - globals.instSource.autoArgs.get(), attrPath, availElems); - } - - DrvInfos elems_ = filterBySelector( - *globals.state, source == sInstalled ? installedElems : availElems, - opArgs, false); - - DrvInfos& otherElems(source == sInstalled ? availElems : installedElems); - - /* Sort them by name. */ - /* !!! */ - std::vector elems; - for (auto& i : elems_) { - elems.push_back(i); - } - sort(elems.begin(), elems.end(), cmpElemByName); - - /* We only need to know the installed paths when we are querying - the status of the derivation. */ - PathSet installed; /* installed paths */ - - if (printStatus) { - for (auto& i : installedElems) { - installed.insert(i.queryOutPath()); - } - } - - /* Query which paths have substitutes. */ - PathSet validPaths; - PathSet substitutablePaths; - if (printStatus || globals.prebuiltOnly) { - PathSet paths; - for (auto& i : elems) { - try { - paths.insert(i.queryOutPath()); - } catch (AssertionError& e) { - DLOG(WARNING) << "skipping derivation named '" << i.queryName() - << "' which gives an assertion failure"; - i.setFailed(); - } - } - validPaths = globals.state->store->queryValidPaths(paths); - substitutablePaths = globals.state->store->querySubstitutablePaths(paths); - } - - /* Print the desired columns, or XML output. */ - if (jsonOutput) { - queryJSON(globals, elems); - return; - } - - bool tty = isatty(STDOUT_FILENO) != 0; - RunPager pager; - - Table table; - std::ostringstream dummy; - XMLWriter xml(true, *(xmlOutput ? &cout : &dummy)); - XMLOpenElement xmlRoot(xml, "items"); - - for (auto& i : elems) { - try { - if (i.hasFailed()) { - continue; - } - - // Activity act(*logger, lvlDebug, format("outputting query result '%1%'") - // % i.attrPath); - - if (globals.prebuiltOnly && - validPaths.find(i.queryOutPath()) == validPaths.end() && - substitutablePaths.find(i.queryOutPath()) == - substitutablePaths.end()) { - continue; - } - - /* For table output. */ - Strings columns; - - /* For XML output. */ - XMLAttrs attrs; - - if (printStatus) { - Path outPath = i.queryOutPath(); - bool hasSubs = - substitutablePaths.find(outPath) != substitutablePaths.end(); - bool isInstalled = installed.find(outPath) != installed.end(); - bool isValid = validPaths.find(outPath) != validPaths.end(); - if (xmlOutput) { - attrs["installed"] = isInstalled ? "1" : "0"; - attrs["valid"] = isValid ? "1" : "0"; - attrs["substitutable"] = hasSubs ? "1" : "0"; - } else { - columns.push_back(absl::StrCat((isInstalled ? "I" : "-"), - (isValid ? "P" : "-"), - (hasSubs ? "S" : "-"))); - } - } - - if (xmlOutput) { - attrs["attrPath"] = i.attrPath; - } else if (printAttrPath) { - columns.push_back(i.attrPath); - } - - if (xmlOutput) { - auto drvName = DrvName(i.queryName()); - attrs["name"] = drvName.fullName; - attrs["pname"] = drvName.name; - attrs["version"] = drvName.version; - } else if (printName) { - columns.push_back(i.queryName()); - } - - if (compareVersions) { - /* Compare this element against the versions of the - same named packages in either the set of available - elements, or the set of installed elements. !!! - This is O(N * M), should be O(N * lg M). */ - std::string version; - VersionDiff diff = compareVersionAgainstSet(i, otherElems, version); - - char ch; - switch (diff) { - case cvLess: - ch = '>'; - break; - case cvEqual: - ch = '='; - break; - case cvGreater: - ch = '<'; - break; - case cvUnavail: - ch = '-'; - break; - default: - abort(); - } - - if (xmlOutput) { - if (diff != cvUnavail) { - attrs["versionDiff"] = ch; - attrs["maxComparedVersion"] = version; - } - } else { - std::string column = std::to_string(ch) + " " + version; - if (diff == cvGreater && tty) { - column = ANSI_RED + column + ANSI_NORMAL; - } - columns.push_back(column); - } - } - - if (xmlOutput) { - if (!i.querySystem().empty()) { - attrs["system"] = i.querySystem(); - } - } else if (printSystem) { - columns.push_back(i.querySystem()); - } - - if (printDrvPath) { - std::string drvPath = i.queryDrvPath(); - if (xmlOutput) { - if (!drvPath.empty()) { - attrs["drvPath"] = drvPath; - } - } else { - columns.push_back(drvPath.empty() ? "-" : drvPath); - } - } - - if (printOutPath && !xmlOutput) { - DrvInfo::Outputs outputs = i.queryOutputs(); - std::string s; - for (auto& j : outputs) { - if (!s.empty()) { - s += ';'; - } - if (j.first != "out") { - s += j.first; - s += "="; - } - s += j.second; - } - columns.push_back(s); - } - - if (printDescription) { - std::string descr = i.queryMetaString("description"); - if (xmlOutput) { - if (!descr.empty()) { - attrs["description"] = descr; - } - } else { - columns.push_back(descr); - } - } - - if (xmlOutput) { - if (printOutPath || printMeta) { - XMLOpenElement item(xml, "item", attrs); - if (printOutPath) { - DrvInfo::Outputs outputs = i.queryOutputs(); - for (auto& j : outputs) { - XMLAttrs attrs2; - attrs2["name"] = j.first; - attrs2["path"] = j.second; - xml.writeEmptyElement("output", attrs2); - } - } - if (printMeta) { - StringSet metaNames = i.queryMetaNames(); - for (auto& j : metaNames) { - XMLAttrs attrs2; - attrs2["name"] = j; - Value* v = i.queryMeta(j); - if (v == nullptr) { - LOG(ERROR) << "derivation '" << i.queryName() - << "' has invalid meta attribute '" << j << "'"; - } else { - if (v->type == tString) { - attrs2["type"] = "string"; - attrs2["value"] = v->string.s; - xml.writeEmptyElement("meta", attrs2); - } else if (v->type == tInt) { - attrs2["type"] = "int"; - attrs2["value"] = (format("%1%") % v->integer).str(); - xml.writeEmptyElement("meta", attrs2); - } else if (v->type == tFloat) { - attrs2["type"] = "float"; - attrs2["value"] = (format("%1%") % v->fpoint).str(); - xml.writeEmptyElement("meta", attrs2); - } else if (v->type == tBool) { - attrs2["type"] = "bool"; - attrs2["value"] = v->boolean ? "true" : "false"; - xml.writeEmptyElement("meta", attrs2); - } else if (v->isList()) { - attrs2["type"] = "strings"; - XMLOpenElement m(xml, "meta", attrs2); - for (unsigned int j = 0; j < v->listSize(); ++j) { - if ((*v->list)[j]->type != tString) { - continue; - } - XMLAttrs attrs3; - attrs3["value"] = (*v->list)[j]->string.s; - xml.writeEmptyElement("string", attrs3); - } - } else if (v->type == tAttrs) { - attrs2["type"] = "strings"; - XMLOpenElement m(xml, "meta", attrs2); - Bindings& attrs = *v->attrs; - for (auto& [name, a] : attrs) { - if (a.value->type != tString) { - continue; - } - XMLAttrs attrs3; - attrs3["type"] = name; - attrs3["value"] = a.value->string.s; - xml.writeEmptyElement("string", attrs3); - } - } - } - } - } - } else { - xml.writeEmptyElement("item", attrs); - } - } else { - table.push_back(columns); - } - - cout.flush(); - - } catch (AssertionError& e) { - DLOG(WARNING) << "skipping derivation named '" << i.queryName() - << "' which gives an assertion failure"; - } catch (Error& e) { - e.addPrefix( - fmt("while querying the derivation named '%1%':\n", i.queryName())); - throw; - } - } - - if (!xmlOutput) { - printTable(table); - } -} - -static void opSwitchProfile(Globals& globals, Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - if (opArgs.size() != 1) { - throw UsageError(format("exactly one argument expected")); - } - - Path profile = absPath(opArgs.front()); - Path profileLink = getHome() + "/.nix-profile"; - - switchLink(profileLink, profile); -} - -static const int prevGen = -2; - -static void switchGeneration(Globals& globals, int dstGen) { - PathLocks lock; - lockProfile(lock, globals.profile); - - int curGen; - Generations gens = findGenerations(globals.profile, curGen); - - Generation dst; - for (auto& i : gens) { - if ((dstGen == prevGen && i.number < curGen) || - (dstGen >= 0 && i.number == dstGen)) { - dst = i; - } - } - - if (!dst) { - if (dstGen == prevGen) { - throw Error(format("no generation older than the current (%1%) exists") % - curGen); - } - throw Error(format("generation %1% does not exist") % dstGen); - } - - LOG(INFO) << "switching from generation " << curGen << " to " << dst.number; - - if (globals.dryRun) { - return; - } - - switchLink(globals.profile, dst.path); -} - -static void opSwitchGeneration(Globals& globals, Strings opFlags, - Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - if (opArgs.size() != 1) { - throw UsageError(format("exactly one argument expected")); - } - - int dstGen; - if (!absl::SimpleAtoi(opArgs.front(), &dstGen)) { - throw UsageError(format("expected a generation number")); - } - - switchGeneration(globals, dstGen); -} - -static void opRollback(Globals& globals, Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - if (!opArgs.empty()) { - throw UsageError(format("no arguments expected")); - } - - switchGeneration(globals, prevGen); -} - -static void opListGenerations(Globals& globals, Strings opFlags, - Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - if (!opArgs.empty()) { - throw UsageError(format("no arguments expected")); - } - - PathLocks lock; - lockProfile(lock, globals.profile); - - int curGen; - Generations gens = findGenerations(globals.profile, curGen); - - RunPager pager; - - for (auto& i : gens) { - tm t; - if (localtime_r(&i.creationTime, &t) == nullptr) { - throw Error("cannot convert time"); - } - cout << format("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||\n") % - i.number % (t.tm_year + 1900) % (t.tm_mon + 1) % t.tm_mday % - t.tm_hour % t.tm_min % t.tm_sec % - (i.number == curGen ? "(current)" : ""); - } -} - -static void opDeleteGenerations(Globals& globals, Strings opFlags, - Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError(format("unknown flag '%1%'") % opFlags.front()); - } - - if (opArgs.size() == 1 && opArgs.front() == "old") { - deleteOldGenerations(globals.profile, globals.dryRun); - } else if (opArgs.size() == 1 && - opArgs.front().find('d') != std::string::npos) { - deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun); - } else if (opArgs.size() == 1 && - opArgs.front().find('+') != std::string::npos) { - if (opArgs.front().size() < 2) { - throw Error(format("invalid number of generations ‘%1%’") % - opArgs.front()); - } - std::string str_max = std::string(opArgs.front(), 1, opArgs.front().size()); - int max; - if (!absl::SimpleAtoi(str_max, &max) || max == 0) { - throw Error(format("invalid number of generations to keep ‘%1%’") % - opArgs.front()); - } - deleteGenerationsGreaterThan(globals.profile, max, globals.dryRun); - } else { - std::set gens; - for (auto& i : opArgs) { - unsigned int n; - if (!absl::SimpleAtoi(i, &n)) { - throw UsageError(format("invalid generation number '%1%'") % i); - } - gens.insert(n); - } - deleteGenerations(globals.profile, gens, globals.dryRun); - } -} - -static void opVersion(Globals& globals, Strings opFlags, Strings opArgs) { - printVersion("nix-env"); -} - -static int _main(int argc, char** argv) { - { - Strings opFlags; - Strings opArgs; - Operation op = nullptr; - RepairFlag repair = NoRepair; - std::string file; - - Globals globals; - - globals.instSource.type = srcUnknown; - globals.instSource.nixExprPath = getHome() + "/.nix-defexpr"; - globals.instSource.systemFilter = "*"; - - if (!pathExists(globals.instSource.nixExprPath)) { - try { - createDirs(globals.instSource.nixExprPath); - replaceSymlink(fmt("%s/profiles/per-user/%s/channels", - settings.nixStateDir, getUserName()), - globals.instSource.nixExprPath + "/channels"); - if (getuid() != 0) { - replaceSymlink( - fmt("%s/profiles/per-user/root/channels", settings.nixStateDir), - globals.instSource.nixExprPath + "/channels_root"); - } - } catch (Error&) { - } - } - - globals.dryRun = false; - globals.preserveInstalled = false; - globals.removeAll = false; - globals.prebuiltOnly = false; - - struct MyArgs : LegacyArgs, MixEvalArgs { - using LegacyArgs::LegacyArgs; - }; - - MyArgs myArgs(baseNameOf(argv[0]), [&](Strings::iterator& arg, - const Strings::iterator& end) { - Operation oldOp = op; - - if (*arg == "--help") { - showManPage("nix-env"); - } else if (*arg == "--version") { - op = opVersion; - } else if (*arg == "--install" || *arg == "-i") { - op = opInstall; - } else if (*arg == - "--force-name") { // undocumented flag for nix-install-package - globals.forceName = getArg(*arg, arg, end); - } else if (*arg == "--uninstall" || *arg == "-e") { - op = opUninstall; - } else if (*arg == "--upgrade" || *arg == "-u") { - op = opUpgrade; - } else if (*arg == "--set-flag") { - op = opSetFlag; - } else if (*arg == "--set") { - op = opSet; - } else if (*arg == "--query" || *arg == "-q") { - op = opQuery; - } else if (*arg == "--profile" || *arg == "-p") { - globals.profile = absPath(getArg(*arg, arg, end)); - } else if (*arg == "--file" || *arg == "-f") { - file = getArg(*arg, arg, end); - } else if (*arg == "--switch-profile" || *arg == "-S") { - op = opSwitchProfile; - } else if (*arg == "--switch-generation" || *arg == "-G") { - op = opSwitchGeneration; - } else if (*arg == "--rollback") { - op = opRollback; - } else if (*arg == "--list-generations") { - op = opListGenerations; - } else if (*arg == "--delete-generations") { - op = opDeleteGenerations; - } else if (*arg == "--dry-run") { - LOG(INFO) << "(dry run; not doing anything)"; - globals.dryRun = true; - } else if (*arg == "--system-filter") { - globals.instSource.systemFilter = getArg(*arg, arg, end); - } else if (*arg == "--prebuilt-only" || *arg == "-b") { - globals.prebuiltOnly = true; - } else if (*arg == "--repair") { - repair = Repair; - } else if (*arg != "" && arg->at(0) == '-') { - opFlags.push_back(*arg); - /* FIXME: hacky */ - if (*arg == "--from-profile" || - (op == opQuery && (*arg == "--attr" || *arg == "-A"))) { - opFlags.push_back(getArg(*arg, arg, end)); - } - } else { - opArgs.push_back(*arg); - } - - if ((oldOp != nullptr) && oldOp != op) { - throw UsageError("only one operation may be specified"); - } - - return true; - }); - - myArgs.parseCmdline(argvToStrings(argc, argv)); - - if (op == nullptr) { - throw UsageError("no operation specified"); - } - - auto store = openStore(); - - globals.state = - std::shared_ptr(new EvalState(myArgs.searchPath, store)); - globals.state->repair = repair; - - if (!file.empty()) { - globals.instSource.nixExprPath = lookupFileArg(*globals.state, file); - } - - globals.instSource.autoArgs = myArgs.getAutoArgs(*globals.state); - - if (globals.profile.empty()) { - globals.profile = getEnv("NIX_PROFILE").value_or(""); - } - - if (globals.profile.empty()) { - Path profileLink = getHome() + "/.nix-profile"; - try { - if (!pathExists(profileLink)) { - replaceSymlink(getuid() == 0 - ? settings.nixStateDir + "/profiles/default" - : fmt("%s/profiles/per-user/%s/profile", - settings.nixStateDir, getUserName()), - profileLink); - } - globals.profile = absPath(readLink(profileLink), dirOf(profileLink)); - } catch (Error&) { - globals.profile = profileLink; - } - } - - op(globals, opFlags, opArgs); - - globals.state->printStats(); - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-env", _main); diff --git a/third_party/nix/src/nix-env/user-env.cc b/third_party/nix/src/nix-env/user-env.cc deleted file mode 100644 index bce5c44f95..0000000000 --- a/third_party/nix/src/nix-env/user-env.cc +++ /dev/null @@ -1,169 +0,0 @@ -#include "nix-env/user-env.hh" - -#include - -#include - -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/profiles.hh" -#include "libstore/store-api.hh" -#include "libutil/status.hh" -#include "libutil/util.hh" - -namespace nix { - -DrvInfos queryInstalled(EvalState& state, const Path& userEnv) { - DrvInfos elems; - Path manifestFile = userEnv + "/manifest.nix"; - if (pathExists(manifestFile)) { - Value v; - state.evalFile(manifestFile, v); - std::unique_ptr bindings(Bindings::New()); - getDerivations(state, v, "", bindings.get(), elems, false); - } - return elems; -} - -bool createUserEnv(EvalState& state, DrvInfos& elems, const Path& profile, - bool keepDerivations, const std::string& lockToken) { - /* Build the components in the user environment, if they don't - exist already. */ - PathSet drvsToBuild; - for (auto& i : elems) { - if (!i.queryDrvPath().empty()) { - drvsToBuild.insert(i.queryDrvPath()); - } - } - - DLOG(INFO) << "building user environment dependencies"; - util::OkOrThrow(state.store->buildPaths( - std::cerr, drvsToBuild, state.repair != 0u ? bmRepair : bmNormal)); - - /* Construct the whole top level derivation. */ - PathSet references; - Value manifest; - state.mkList(manifest, elems.size()); - unsigned int n = 0; - for (auto& i : elems) { - /* Create a pseudo-derivation containing the name, system, - output paths, and optionally the derivation path, as well - as the meta attributes. */ - Path drvPath = keepDerivations ? i.queryDrvPath() : ""; - - Value* v = state.allocValue(); - (*manifest.list)[n++] = v; - state.mkAttrs(*v, 16); - - mkString(*state.allocAttr(*v, state.sType), "derivation"); - mkString(*state.allocAttr(*v, state.sName), i.queryName()); - auto system = i.querySystem(); - if (!system.empty()) { - mkString(*state.allocAttr(*v, state.sSystem), system); - } - mkString(*state.allocAttr(*v, state.sOutPath), i.queryOutPath()); - if (!drvPath.empty()) { - mkString(*state.allocAttr(*v, state.sDrvPath), i.queryDrvPath()); - } - - // Copy each output meant for installation. - DrvInfo::Outputs outputs = i.queryOutputs(true); - Value& vOutputs = *state.allocAttr(*v, state.sOutputs); - state.mkList(vOutputs, outputs.size()); - unsigned int m = 0; - for (auto& j : outputs) { - mkString(*((*vOutputs.list)[m++] = state.allocValue()), j.first); - Value& vOutputs = *state.allocAttr(*v, state.symbols.Create(j.first)); - state.mkAttrs(vOutputs, 2); - mkString(*state.allocAttr(vOutputs, state.sOutPath), j.second); - - /* This is only necessary when installing store paths, e.g., - `nix-env -i /nix/store/abcd...-foo'. */ - state.store->addTempRoot(j.second); - state.store->ensurePath(j.second); - - references.insert(j.second); - } - - // Copy the meta attributes. - Value& vMeta = *state.allocAttr(*v, state.sMeta); - state.mkAttrs(vMeta, 16); - StringSet metaNames = i.queryMetaNames(); - for (auto& j : metaNames) { - Value* v = i.queryMeta(j); - if (v == nullptr) { - continue; - } - vMeta.attrs->push_back(Attr(state.symbols.Create(j), v)); - } - - if (!drvPath.empty()) { - references.insert(drvPath); - } - } - - /* Also write a copy of the list of user environment elements to - the store; we need it for future modifications of the - environment. */ - Path manifestFile = state.store->addTextToStore( - "env-manifest.nix", (format("%1%") % manifest).str(), references); - - /* Get the environment builder expression. */ - Value envBuilder; - state.evalFile(state.findFile("nix/buildenv.nix"), envBuilder); - - /* Construct a Nix expression that calls the user environment - builder with the manifest as argument. */ - Value args; - Value topLevel; - state.mkAttrs(args, 3); - mkString(*state.allocAttr(args, state.symbols.Create("manifest")), - manifestFile, {manifestFile}); - args.attrs->push_back(Attr(state.symbols.Create("derivations"), &manifest)); - mkApp(topLevel, envBuilder, args); - - /* Evaluate it. */ - DLOG(INFO) << "evaluating user environment builder"; - state.forceValue(topLevel); - PathSet context; - Attr& aDrvPath(topLevel.attrs->find(state.sDrvPath)->second); - Path topLevelDrv = - state.coerceToPath(aDrvPath.pos != nullptr ? *(aDrvPath.pos) : noPos, - *(aDrvPath.value), context); - Attr& aOutPath(topLevel.attrs->find(state.sOutPath)->second); - Path topLevelOut = - state.coerceToPath(aOutPath.pos != nullptr ? *(aOutPath.pos) : noPos, - *(aOutPath.value), context); - - /* Realise the resulting store expression. */ - DLOG(INFO) << "building user environment"; - util::OkOrThrow(state.store->buildPaths( - std::cerr, {topLevelDrv}, state.repair != 0u ? bmRepair : bmNormal)); - - /* Switch the current user environment to the output path. */ - auto store2 = state.store.dynamic_pointer_cast(); - - if (store2) { - PathLocks lock; - lockProfile(lock, profile); - - Path lockTokenCur = optimisticLockProfile(profile); - if (lockToken != lockTokenCur) { - LOG(WARNING) << "profile '" << profile - << "' changed while we were busy; restarting"; - return false; - } - - DLOG(INFO) << "switching to new user environment"; - Path generation = - createGeneration(ref(store2), profile, topLevelOut); - switchLink(profile, generation); - } - - return true; -} - -} // namespace nix diff --git a/third_party/nix/src/nix-env/user-env.hh b/third_party/nix/src/nix-env/user-env.hh deleted file mode 100644 index 95919a6c87..0000000000 --- a/third_party/nix/src/nix-env/user-env.hh +++ /dev/null @@ -1,12 +0,0 @@ -#pragma once - -#include "libexpr/get-drvs.hh" - -namespace nix { - -DrvInfos queryInstalled(EvalState& state, const Path& userEnv); - -bool createUserEnv(EvalState& state, DrvInfos& elems, const Path& profile, - bool keepDerivations, const std::string& lockToken); - -} // namespace nix diff --git a/third_party/nix/src/nix-instantiate/nix-instantiate.cc b/third_party/nix/src/nix-instantiate/nix-instantiate.cc deleted file mode 100644 index 236037299d..0000000000 --- a/third_party/nix/src/nix-instantiate/nix-instantiate.cc +++ /dev/null @@ -1,219 +0,0 @@ -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/common-eval-args.hh" -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libexpr/get-drvs.hh" -#include "libexpr/value-to-json.hh" -#include "libexpr/value-to-xml.hh" -#include "libmain/shared.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/util.hh" -#include "nix/legacy.hh" - -using namespace nix; - -static Path gcRoot; -static int rootNr = 0; -static bool indirectRoot = false; - -enum OutputKind { okPlain, okXML, okJSON }; - -void processExpr(EvalState& state, const Strings& attrPaths, bool parseOnly, - bool strict, Bindings* autoArgs, bool evalOnly, - OutputKind output, bool location, Expr* e) { - if (parseOnly) { - std::cout << format("%1%\n") % *e; - return; - } - - Value vRoot; - state.eval(e, vRoot); - - for (auto& i : attrPaths) { - Value& v(*findAlongAttrPath(state, i, autoArgs, vRoot)); - state.forceValue(v); - - PathSet context; - if (evalOnly) { - Value vRes; - if (autoArgs->empty()) { - vRes = v; - } else { - state.autoCallFunction(autoArgs, v, vRes); - } - if (output == okXML) { - printValueAsXML(state, strict, location, vRes, std::cout, context); - } else if (output == okJSON) { - printValueAsJSON(state, strict, vRes, std::cout, context); - } else { - if (strict) { - state.forceValueDeep(vRes); - } - std::cout << vRes << std::endl; - } - } else { - DrvInfos drvs; - getDerivations(state, v, "", autoArgs, drvs, false); - for (auto& i : drvs) { - Path drvPath = i.queryDrvPath(); - - /* What output do we want? */ - std::string outputName = i.queryOutputName(); - if (outputName.empty()) { - throw Error( - format("derivation '%1%' lacks an 'outputName' attribute ") % - drvPath); - } - - if (gcRoot.empty()) { - printGCWarning(); - } else { - Path rootName = indirectRoot ? absPath(gcRoot) : gcRoot; - if (++rootNr > 1) { - rootName += "-" + std::to_string(rootNr); - } - auto store2 = state.store.dynamic_pointer_cast(); - if (store2) { - drvPath = store2->addPermRoot(drvPath, rootName, indirectRoot); - } - } - std::cout << format("%1%%2%\n") % drvPath % - (outputName != "out" ? "!" + outputName : ""); - } - } - } -} - -static int _main(int argc, char** argv) { - { - Strings files; - bool readStdin = false; - bool fromArgs = false; - bool findFile = false; - bool evalOnly = false; - bool parseOnly = false; - bool traceFileAccess = false; - OutputKind outputKind = okPlain; - bool xmlOutputSourceLocation = true; - bool strict = false; - Strings attrPaths; - bool wantsReadWrite = false; - RepairFlag repair = NoRepair; - - struct MyArgs : LegacyArgs, MixEvalArgs { - using LegacyArgs::LegacyArgs; - }; - - MyArgs myArgs(baseNameOf(argv[0]), - [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - showManPage("nix-instantiate"); - } else if (*arg == "--version") { - printVersion("nix-instantiate"); - } else if (*arg == "-") { - readStdin = true; - } else if (*arg == "--expr" || *arg == "-E") { - fromArgs = true; - } else if (*arg == "--eval" || *arg == "--eval-only") { - evalOnly = true; - } else if (*arg == "--read-write-mode") { - wantsReadWrite = true; - } else if (*arg == "--parse" || *arg == "--parse-only") { - parseOnly = evalOnly = true; - } else if (*arg == "--find-file") { - findFile = true; - } else if (*arg == "--attr" || *arg == "-A") { - attrPaths.push_back(getArg(*arg, arg, end)); - } else if (*arg == "--add-root") { - gcRoot = getArg(*arg, arg, end); - } else if (*arg == "--indirect") { - indirectRoot = true; - } else if (*arg == "--xml") { - outputKind = okXML; - } else if (*arg == "--json") { - outputKind = okJSON; - } else if (*arg == "--no-location") { - xmlOutputSourceLocation = false; - } else if (*arg == "--strict") { - strict = true; - } else if (*arg == "--repair") { - repair = Repair; - } else if (*arg == "--dry-run") { - settings.readOnlyMode = true; - } else if (*arg == "--trace-file-access") { - traceFileAccess = true; - } else if (*arg == "--trace-file-access=true") { - traceFileAccess = true; - } else if (*arg == "--trace-file-access=false") { - traceFileAccess = false; - } else if (*arg == "--notrace-file-access") { - traceFileAccess = false; - } else if (*arg != "" && arg->at(0) == '-') { - return false; - } else { - files.push_back(*arg); - } - return true; - }); - - myArgs.parseCmdline(argvToStrings(argc, argv)); - - if (evalOnly && !wantsReadWrite) { - settings.readOnlyMode = true; - } - - auto store = openStore(); - - auto state = std::make_unique(myArgs.searchPath, store); - state->repair = repair; - if (traceFileAccess) { - state->EnableFileAccessTracing([](const Path& path) { - std::cerr << "trace: depot-scan: " << path << "\n"; - }); - } - - std::unique_ptr autoArgs = myArgs.getAutoArgs(*state); - - if (attrPaths.empty()) { - attrPaths = {""}; - } - - if (findFile) { - for (auto& i : files) { - Path p = state->findFile(i); - if (p.empty()) { - throw Error(format("unable to find '%1%'") % i); - } - std::cout << p << std::endl; - } - return 0; - } - - if (readStdin) { - Expr* e = state->parseStdin(); - processExpr(*state, attrPaths, parseOnly, strict, autoArgs.get(), - evalOnly, outputKind, xmlOutputSourceLocation, e); - } else if (files.empty() && !fromArgs) { - files.push_back("./default.nix"); - } - - for (auto& i : files) { - Expr* e = fromArgs - ? state->parseExprFromString(i, absPath(".")) - : state->parseExprFromFile(resolveExprPath( - state->checkSourcePath(lookupFileArg(*state, i)))); - processExpr(*state, attrPaths, parseOnly, strict, autoArgs.get(), - evalOnly, outputKind, xmlOutputSourceLocation, e); - } - - state->printStats(); - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-instantiate", _main); diff --git a/third_party/nix/src/nix-prefetch-url/nix-prefetch-url.cc b/third_party/nix/src/nix-prefetch-url/nix-prefetch-url.cc deleted file mode 100644 index b61a38a7f1..0000000000 --- a/third_party/nix/src/nix-prefetch-url/nix-prefetch-url.cc +++ /dev/null @@ -1,253 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/common-eval-args.hh" -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libmain/shared.hh" -#include "libstore/download.hh" -#include "libstore/store-api.hh" -#include "libutil/finally.hh" -#include "libutil/hash.hh" -#include "nix/legacy.hh" - -using namespace nix; - -/* If ‘uri’ starts with ‘mirror://’, then resolve it using the list of - mirrors defined in Nixpkgs. */ -std::string resolveMirrorUri(EvalState& state, std::string uri) { - if (std::string(uri, 0, 9) != "mirror://") { - return uri; - } - - std::string s(uri, 9); - auto p = s.find('/'); - if (p == std::string::npos) { - throw Error("invalid mirror URI"); - } - std::string mirrorName(s, 0, p); - - Value vMirrors; - state.eval( - state.parseExprFromString( - "import ", "."), - vMirrors); - state.forceAttrs(vMirrors); - - auto mirrorList = vMirrors.attrs->find(state.symbols.Create(mirrorName)); - if (mirrorList == vMirrors.attrs->end()) { - throw Error(format("unknown mirror name '%1%'") % mirrorName); - } - state.forceList(*mirrorList->second.value); - - if (mirrorList->second.value->listSize() < 1) { - throw Error(format("mirror URI '%1%' did not expand to anything") % uri); - } - - std::string mirror = state.forceString(*(*mirrorList->second.value->list)[0]); - return mirror + (absl::EndsWith(mirror, "/") ? "" : "/") + - std::string(s, p + 1); -} - -static int _main(int argc, char** argv) { - { - HashType ht = htSHA256; - std::vector args; - bool printPath = getEnv("PRINT_PATH").has_value(); - bool fromExpr = false; - std::string attrPath; - bool unpack = false; - std::string name; - - struct MyArgs : LegacyArgs, MixEvalArgs { - using LegacyArgs::LegacyArgs; - }; - - MyArgs myArgs(baseNameOf(argv[0]), - [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - showManPage("nix-prefetch-url"); - } else if (*arg == "--version") { - printVersion("nix-prefetch-url"); - } else if (*arg == "--type") { - std::string s = getArg(*arg, arg, end); - ht = parseHashType(s); - if (ht == htUnknown) { - throw UsageError(format("unknown hash type '%1%'") % s); - } - } else if (*arg == "--print-path") { - printPath = true; - } else if (*arg == "--attr" || *arg == "-A") { - fromExpr = true; - attrPath = getArg(*arg, arg, end); - } else if (*arg == "--unpack") { - unpack = true; - } else if (*arg == "--name") { - name = getArg(*arg, arg, end); - } else if (*arg != "" && arg->at(0) == '-') { - return false; - } else { - args.push_back(*arg); - } - return true; - }); - - myArgs.parseCmdline(argvToStrings(argc, argv)); - - if (args.size() > 2) { - throw UsageError("too many arguments"); - } - - auto store = openStore(); - auto state = std::make_unique(myArgs.searchPath, store); - - std::unique_ptr autoArgs = myArgs.getAutoArgs(*state); - - /* If -A is given, get the URI from the specified Nix - expression. */ - std::string uri; - if (!fromExpr) { - if (args.empty()) { - throw UsageError("you must specify a URI"); - } - uri = args[0]; - } else { - Path path = - resolveExprPath(lookupFileArg(*state, args.empty() ? "." : args[0])); - Value vRoot; - state->evalFile(path, vRoot); - Value& v(*findAlongAttrPath(*state, attrPath, autoArgs.get(), vRoot)); - state->forceAttrs(v); - - /* Extract the URI. */ - auto attr = v.attrs->find(state->symbols.Create("urls")); - if (attr == v.attrs->end()) { - throw Error("attribute set does not contain a 'urls' attribute"); - } - state->forceList(*attr->second.value); - if (attr->second.value->listSize() < 1) { - throw Error("'urls' list is empty"); - } - uri = state->forceString(*(*attr->second.value->list)[0]); - - /* Extract the hash mode. */ - attr = v.attrs->find(state->symbols.Create("outputHashMode")); - if (attr == v.attrs->end()) { - LOG(WARNING) << "this does not look like a fetchurl call"; - } else { - unpack = state->forceString(*attr->second.value) == "recursive"; - } - - /* Extract the name. */ - if (name.empty()) { - attr = v.attrs->find(state->symbols.Create("name")); - if (attr != v.attrs->end()) { - name = state->forceString(*attr->second.value); - } - } - } - - /* Figure out a name in the Nix store. */ - if (name.empty()) { - name = baseNameOf(uri); - } - if (name.empty()) { - throw Error(format("cannot figure out file name for '%1%'") % uri); - } - - /* If an expected hash is given, the file may already exist in - the store. */ - Hash hash; - Hash expectedHash(ht); - Path storePath; - if (args.size() == 2) { - auto expectedHash_ = Hash::deserialize(args[1], ht); - expectedHash = Hash::unwrap_throw(expectedHash); - storePath = store->makeFixedOutputPath(unpack, expectedHash, name); - if (store->isValidPath(storePath)) { - hash = expectedHash; - } else { - storePath.clear(); - } - } - - if (storePath.empty()) { - auto actualUri = resolveMirrorUri(*state, uri); - - AutoDelete tmpDir(createTempDir(), true); - Path tmpFile = Path(tmpDir) + "/tmp"; - - /* Download the file. */ - { - AutoCloseFD fd( - open(tmpFile.c_str(), O_WRONLY | O_CREAT | O_EXCL, 0600)); - if (!fd) { - throw SysError("creating temporary file '%s'", tmpFile); - } - - FdSink sink(fd.get()); - - DownloadRequest req(actualUri); - req.decompress = false; - getDownloader()->download(std::move(req), sink); - } - - /* Optionally unpack the file. */ - if (unpack) { - LOG(INFO) << "unpacking..."; - Path unpacked = Path(tmpDir) + "/unpacked"; - createDirs(unpacked); - if (absl::EndsWith(baseNameOf(uri), ".zip")) { - runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}); - } else { - // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}); - } - - /* If the archive unpacks to a single file/directory, then use - that as the top-level. */ - auto entries = readDirectory(unpacked); - if (entries.size() == 1) { - tmpFile = unpacked + "/" + entries[0].name; - } else { - tmpFile = unpacked; - } - } - - /* FIXME: inefficient; addToStore() will also hash - this. */ - hash = unpack ? hashPath(ht, tmpFile).first : hashFile(ht, tmpFile); - - if (expectedHash != Hash(ht) && expectedHash != hash) { - throw Error(format("hash mismatch for '%1%'") % uri); - } - - /* Copy the file to the Nix store. FIXME: if RemoteStore - implemented addToStoreFromDump() and downloadFile() - supported a sink, we could stream the download directly - into the Nix store. */ - storePath = store->addToStore(name, tmpFile, unpack, ht); - - assert(storePath == store->makeFixedOutputPath(unpack, hash, name)); - } - - if (!printPath) { - LOG(INFO) << "path is '" << storePath << "'"; - } - - std::cout << printHash16or32(hash) << std::endl; - if (printPath) { - std::cout << storePath << std::endl; - } - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-prefetch-url", _main); diff --git a/third_party/nix/src/nix-store/dotgraph.cc b/third_party/nix/src/nix-store/dotgraph.cc deleted file mode 100644 index 2500b8f4b0..0000000000 --- a/third_party/nix/src/nix-store/dotgraph.cc +++ /dev/null @@ -1,141 +0,0 @@ -#include "nix-store/dotgraph.hh" - -#include - -#include "libstore/store-api.hh" -#include "libutil/util.hh" - -using std::cout; - -namespace nix { - -static std::string dotQuote(const std::string& s) { return "\"" + s + "\""; } - -static std::string nextColour() { - static int n = 0; - static std::string colours[] = {"black", "red", "green", - "blue", "magenta", "burlywood"}; - return colours[n++ % (sizeof(colours) / sizeof(std::string))]; -} - -static std::string makeEdge(const std::string& src, const std::string& dst) { - format f = format("%1% -> %2% [color = %3%];\n") % dotQuote(src) % - dotQuote(dst) % dotQuote(nextColour()); - return f.str(); -} - -static std::string makeNode(const std::string& id, const std::string& label, - const std::string& colour) { - format f = format( - "%1% [label = %2%, shape = box, " - "style = filled, fillcolor = %3%];\n") % - dotQuote(id) % dotQuote(label) % dotQuote(colour); - return f.str(); -} - -static std::string symbolicName(const std::string& path) { - std::string p = baseNameOf(path); - return std::string(p, p.find('-') + 1); -} - -#if 0 -std::string pathLabel(const Path & nePath, const std::string & elemPath) -{ - return (std::string) nePath + "-" + elemPath; -} - - -void printClosure(const Path & nePath, const StoreExpr & fs) -{ - PathSet workList(fs.closure.roots); - PathSet doneSet; - - for (PathSet::iterator i = workList.begin(); i != workList.end(); ++i) { - cout << makeEdge(pathLabel(nePath, *i), nePath); - } - - while (!workList.empty()) { - Path path = *(workList.begin()); - workList.erase(path); - - if (doneSet.find(path) == doneSet.end()) { - doneSet.insert(path); - - ClosureElems::const_iterator elem = fs.closure.elems.find(path); - if (elem == fs.closure.elems.end()) - throw Error(format("bad closure, missing path '%1%'") % path); - - for (StringSet::const_iterator i = elem->second.refs.begin(); - i != elem->second.refs.end(); ++i) - { - workList.insert(*i); - cout << makeEdge(pathLabel(nePath, *i), pathLabel(nePath, path)); - } - - cout << makeNode(pathLabel(nePath, path), - symbolicName(path), "#ff0000"); - } - } -} -#endif - -void printDotGraph(const ref& store, const PathSet& roots) { - PathSet workList(roots); - PathSet doneSet; - - cout << "digraph G {\n"; - - while (!workList.empty()) { - Path path = *(workList.begin()); - workList.erase(path); - - if (doneSet.find(path) != doneSet.end()) { - continue; - } - doneSet.insert(path); - - cout << makeNode(path, symbolicName(path), "#ff0000"); - - for (auto& p : store->queryPathInfo(path)->references) { - if (p != path) { - workList.insert(p); - cout << makeEdge(p, path); - } - } - -#if 0 - StoreExpr ne = storeExprFromPath(path); - - string label, colour; - - if (ne.type == StoreExpr::neDerivation) { - for (PathSet::iterator i = ne.derivation.inputs.begin(); - i != ne.derivation.inputs.end(); ++i) - { - workList.insert(*i); - cout << makeEdge(*i, path); - } - - label = "derivation"; - colour = "#00ff00"; - for (StringPairs::iterator i = ne.derivation.env.begin(); - i != ne.derivation.env.end(); ++i) - if (i->first == "name") { label = i->second; } - } - - else if (ne.type == StoreExpr::neClosure) { - label = ""; - colour = "#00ffff"; - printClosure(path, ne); - } - - else abort(); - - cout << makeNode(path, label, colour); -#endif - } - - cout << "}\n"; -} - -} // namespace nix diff --git a/third_party/nix/src/nix-store/dotgraph.hh b/third_party/nix/src/nix-store/dotgraph.hh deleted file mode 100644 index 40c2686854..0000000000 --- a/third_party/nix/src/nix-store/dotgraph.hh +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -#include "libutil/types.hh" - -namespace nix { - -class Store; - -void printDotGraph(const ref& store, const PathSet& roots); - -} // namespace nix diff --git a/third_party/nix/src/nix-store/graphml.cc b/third_party/nix/src/nix-store/graphml.cc deleted file mode 100644 index ada4aaf6d0..0000000000 --- a/third_party/nix/src/nix-store/graphml.cc +++ /dev/null @@ -1,80 +0,0 @@ -#include "nix-store/graphml.hh" - -#include - -#include "libstore/derivations.hh" -#include "libstore/store-api.hh" -#include "libutil/util.hh" - -using std::cout; - -namespace nix { - -static inline const std::string& xmlQuote(const std::string& s) { - // Luckily, store paths shouldn't contain any character that needs to be - // quoted. - return s; -} - -static std::string symbolicName(const std::string& path) { - std::string p = baseNameOf(path); - return std::string(p, p.find('-') + 1); -} - -static std::string makeEdge(const std::string& src, const std::string& dst) { - return fmt(" \n", xmlQuote(src), - xmlQuote(dst)); -} - -static std::string makeNode(const ValidPathInfo& info) { - return fmt( - " \n" - " %2%\n" - " %3%\n" - " %4%\n" - " \n", - info.path, info.narSize, symbolicName(info.path), - (isDerivation(info.path) ? "derivation" : "output-path")); -} - -void printGraphML(const ref& store, const PathSet& roots) { - PathSet workList(roots); - PathSet doneSet; - std::pair ret; - - cout << "\n" - << "\n" - << "" - << "" - << "" - << "\n"; - - while (!workList.empty()) { - Path path = *(workList.begin()); - workList.erase(path); - - ret = doneSet.insert(path); - if (!ret.second) { - continue; - } - - ValidPathInfo info = *(store->queryPathInfo(path)); - cout << makeNode(info); - - for (auto& p : store->queryPathInfo(path)->references) { - if (p != path) { - workList.insert(p); - cout << makeEdge(path, p); - } - } - } - - cout << "\n"; - cout << "\n"; -} - -} // namespace nix diff --git a/third_party/nix/src/nix-store/graphml.hh b/third_party/nix/src/nix-store/graphml.hh deleted file mode 100644 index be07904d0f..0000000000 --- a/third_party/nix/src/nix-store/graphml.hh +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -#include "libutil/types.hh" - -namespace nix { - -class Store; - -void printGraphML(const ref& store, const PathSet& roots); - -} // namespace nix diff --git a/third_party/nix/src/nix-store/nix-store.cc b/third_party/nix/src/nix-store/nix-store.cc deleted file mode 100644 index 532f60b7b7..0000000000 --- a/third_party/nix/src/nix-store/nix-store.cc +++ /dev/null @@ -1,1302 +0,0 @@ -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/local-store.hh" -#include "libstore/serve-protocol.hh" -#include "libstore/worker-protocol.hh" -#include "libutil/archive.hh" -#include "libutil/monitor-fd.hh" -#include "libutil/status.hh" -#include "libutil/util.hh" -#include "nix-store/dotgraph.hh" -#include "nix-store/graphml.hh" -#include "nix/legacy.hh" - -#if HAVE_SODIUM -#include -#endif - -using namespace nix; -using std::cin; -using std::cout; - -// TODO(tazjin): clang-tidy's performance lints don't like this, but -// the automatic fixes fail (it seems that some of the ops want to own -// the args for whatever reason) -using Operation = void (*)(Strings, Strings); - -static Path gcRoot; -static int rootNr = 0; -static bool indirectRoot = false; -static bool noOutput = false; -static std::shared_ptr store; - -ref ensureLocalStore() { - auto store2 = std::dynamic_pointer_cast(store); - if (!store2) { - throw Error("you don't have sufficient rights to use this command"); - } - return ref(store2); -} - -static Path useDeriver(Path path) { - if (isDerivation(path)) { - return path; - } - Path drvPath = store->queryPathInfo(path)->deriver; - if (drvPath.empty()) { - throw Error(format("deriver of path '%1%' is not known") % path); - } - return drvPath; -} - -/* Realise the given path. For a derivation that means build it; for - other paths it means ensure their validity. */ -static PathSet realisePath(Path path, bool build = true) { - DrvPathWithOutputs p = parseDrvPathWithOutputs(path); - - auto store2 = std::dynamic_pointer_cast(store); - - if (isDerivation(p.first)) { - if (build) { - util::OkOrThrow(store->buildPaths(std::cerr, {path})); - } - Derivation drv = store->derivationFromPath(p.first); - rootNr++; - - if (p.second.empty()) { - for (auto& i : drv.outputs) { - p.second.insert(i.first); - } - } - - PathSet outputs; - for (auto& j : p.second) { - auto i = drv.outputs.find(j); - if (i == drv.outputs.end()) { - throw Error( - format("derivation '%1%' does not have an output named '%2%'") % - p.first % j); - } - Path outPath = i->second.path; - if (store2) { - if (gcRoot.empty()) { - printGCWarning(); - } else { - Path rootName = gcRoot; - if (rootNr > 1) { - rootName += "-" + std::to_string(rootNr); - } - if (i->first != "out") { - rootName += "-" + i->first; - } - outPath = store2->addPermRoot(outPath, rootName, indirectRoot); - } - } - outputs.insert(outPath); - } - return outputs; - } - - if (build) { - store->ensurePath(path); - } else if (!store->isValidPath(path)) { - throw Error(format("path '%1%' does not exist and cannot be created") % - path); - } - if (store2) { - if (gcRoot.empty()) { - printGCWarning(); - } else { - Path rootName = gcRoot; - rootNr++; - if (rootNr > 1) { - rootName += "-" + std::to_string(rootNr); - } - path = store2->addPermRoot(path, rootName, indirectRoot); - } - } - return {path}; -} - -/* Realise the given paths. */ -static void opRealise(Strings opFlags, Strings opArgs) { - bool dryRun = false; - BuildMode buildMode = bmNormal; - bool ignoreUnknown = false; - - for (auto& i : opFlags) { - if (i == "--dry-run") { - dryRun = true; - } else if (i == "--repair") { - buildMode = bmRepair; - } else if (i == "--check") { - buildMode = bmCheck; - } else if (i == "--ignore-unknown") { - ignoreUnknown = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - Paths paths; - for (auto& i : opArgs) { - DrvPathWithOutputs p = parseDrvPathWithOutputs(i); - paths.push_back(makeDrvPathWithOutputs( - store->followLinksToStorePath(p.first), p.second)); - } - - unsigned long long downloadSize; - unsigned long long narSize; - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - store->queryMissing(PathSet(paths.begin(), paths.end()), willBuild, - willSubstitute, unknown, downloadSize, narSize); - - if (ignoreUnknown) { - Paths paths2; - for (auto& i : paths) { - if (unknown.find(i) == unknown.end()) { - paths2.push_back(i); - } - } - paths = paths2; - unknown = PathSet(); - } - - if (settings.printMissing) { - printMissing(ref(store), willBuild, willSubstitute, unknown, - downloadSize, narSize); - } - - if (dryRun) { - return; - } - - /* Build all paths at the same time to exploit parallelism. */ - util::OkOrThrow(store->buildPaths( - std::cerr, PathSet(paths.begin(), paths.end()), buildMode)); - - if (!ignoreUnknown) { - for (auto& i : paths) { - PathSet paths = realisePath(i, false); - if (!noOutput) { - for (auto& j : paths) { - cout << format("%1%\n") % j; - } - } - } - } -} - -/* Add files to the Nix store and print the resulting paths. */ -static void opAdd(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - - for (auto& i : opArgs) { - cout << format("%1%\n") % store->addToStore(baseNameOf(i), i); - } -} - -/* Preload the output of a fixed-output derivation into the Nix - store. */ -static void opAddFixed(Strings opFlags, Strings opArgs) { - bool recursive = false; - - for (auto& i : opFlags) { - if (i == "--recursive") { - recursive = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - if (opArgs.empty()) { - throw UsageError("first argument must be hash algorithm"); - } - - HashType hashAlgo = parseHashType(opArgs.front()); - opArgs.pop_front(); - - for (auto& i : opArgs) { - cout << format("%1%\n") % - store->addToStore(baseNameOf(i), i, recursive, hashAlgo); - } -} - -/* Hack to support caching in `nix-prefetch-url'. */ -static void opPrintFixedPath(Strings opFlags, Strings opArgs) { - bool recursive = false; - - for (auto i : opFlags) { - if (i == "--recursive") { - recursive = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - if (opArgs.size() != 3) { - throw UsageError(format("'--print-fixed-path' requires three arguments")); - } - - auto i = opArgs.begin(); - HashType hashAlgo = parseHashType(*i++); - std::string hash = *i++; - std::string name = *i++; - - auto hash_ = Hash::deserialize(hash, hashAlgo); - Hash::unwrap_throw(hash_); - - cout << absl::StrCat(store->makeFixedOutputPath(recursive, *hash_, name), - "\n"); -} - -static PathSet maybeUseOutputs(const Path& storePath, bool useOutput, - bool forceRealise) { - if (forceRealise) { - realisePath(storePath); - } - if (useOutput && isDerivation(storePath)) { - Derivation drv = store->derivationFromPath(storePath); - PathSet outputs; - for (auto& i : drv.outputs) { - outputs.insert(i.second.path); - } - return outputs; - } - return {storePath}; -} - -/* Some code to print a tree representation of a derivation dependency - graph. Topological sorting is used to keep the tree relatively - flat. */ - -const std::string treeConn = "+---"; -const std::string treeLine = "| "; -const std::string treeNull = " "; - -static void printTree(const Path& path, const std::string& firstPad, - const std::string& tailPad, PathSet& done) { - if (done.find(path) != done.end()) { - cout << format("%1%%2% [...]\n") % firstPad % path; - return; - } - done.insert(path); - - cout << format("%1%%2%\n") % firstPad % path; - - auto references = store->queryPathInfo(path)->references; - - /* Topologically sort under the relation A < B iff A \in - closure(B). That is, if derivation A is an (possibly indirect) - input of B, then A is printed first. This has the effect of - flattening the tree, preventing deeply nested structures. */ - Paths sorted = store->topoSortPaths(references); - reverse(sorted.begin(), sorted.end()); - - for (auto i = sorted.begin(); i != sorted.end(); ++i) { - auto j = i; - ++j; - printTree(*i, tailPad + treeConn, - j == sorted.end() ? tailPad + treeNull : tailPad + treeLine, - done); - } -} - -/* Perform various sorts of queries. */ -static void opQuery(Strings opFlags, Strings opArgs) { - enum QueryType { - qDefault, - qOutputs, - qRequisites, - qReferences, - qReferrers, - qReferrersClosure, - qDeriver, - qBinding, - qHash, - qSize, - qTree, - qGraph, - qGraphML, - qResolve, - qRoots - }; - QueryType query = qDefault; - bool useOutput = false; - bool includeOutputs = false; - bool forceRealise = false; - std::string bindingName; - - for (auto& i : opFlags) { - QueryType prev = query; - if (i == "--outputs") { - query = qOutputs; - } else if (i == "--requisites" || i == "-R") { - query = qRequisites; - } else if (i == "--references") { - query = qReferences; - } else if (i == "--referrers" || i == "--referers") { - query = qReferrers; - } else if (i == "--referrers-closure" || i == "--referers-closure") { - query = qReferrersClosure; - } else if (i == "--deriver" || i == "-d") { - query = qDeriver; - } else if (i == "--binding" || i == "-b") { - if (opArgs.empty()) { - throw UsageError("expected binding name"); - } - bindingName = opArgs.front(); - opArgs.pop_front(); - query = qBinding; - } else if (i == "--hash") { - query = qHash; - } else if (i == "--size") { - query = qSize; - } else if (i == "--tree") { - query = qTree; - } else if (i == "--graph") { - query = qGraph; - } else if (i == "--graphml") { - query = qGraphML; - } else if (i == "--resolve") { - query = qResolve; - } else if (i == "--roots") { - query = qRoots; - } else if (i == "--use-output" || i == "-u") { - useOutput = true; - } else if (i == "--force-realise" || i == "--force-realize" || i == "-f") { - forceRealise = true; - } else if (i == "--include-outputs") { - includeOutputs = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - if (prev != qDefault && prev != query) { - throw UsageError(format("query type '%1%' conflicts with earlier flag") % - i); - } - } - - if (query == qDefault) { - query = qOutputs; - } - - RunPager pager; - - switch (query) { - case qOutputs: { - for (auto& i : opArgs) { - i = store->followLinksToStorePath(i); - if (forceRealise) { - realisePath(i); - } - Derivation drv = store->derivationFromPath(i); - for (auto& j : drv.outputs) { - cout << format("%1%\n") % j.second.path; - } - } - break; - } - - case qRequisites: - case qReferences: - case qReferrers: - case qReferrersClosure: { - PathSet paths; - for (auto& i : opArgs) { - PathSet ps = maybeUseOutputs(store->followLinksToStorePath(i), - useOutput, forceRealise); - for (auto& j : ps) { - if (query == qRequisites) { - store->computeFSClosure(j, paths, false, includeOutputs); - } else if (query == qReferences) { - for (auto& p : store->queryPathInfo(j)->references) { - paths.insert(p); - } - } else if (query == qReferrers) { - store->queryReferrers(j, paths); - } else if (query == qReferrersClosure) { - store->computeFSClosure(j, paths, true); - } - } - } - Paths sorted = store->topoSortPaths(paths); - for (auto i = sorted.rbegin(); i != sorted.rend(); ++i) { - cout << format("%s\n") % *i; - } - break; - } - - case qDeriver: - for (auto& i : opArgs) { - Path deriver = - store->queryPathInfo(store->followLinksToStorePath(i))->deriver; - cout << format("%1%\n") % - (deriver.empty() ? "unknown-deriver" : deriver); - } - break; - - case qBinding: - for (auto& i : opArgs) { - Path path = useDeriver(store->followLinksToStorePath(i)); - Derivation drv = store->derivationFromPath(path); - auto j = drv.env.find(bindingName); - if (j == drv.env.end()) { - throw Error( - format( - "derivation '%1%' has no environment binding named '%2%'") % - path % bindingName); - } - cout << format("%1%\n") % j->second; - } - break; - - case qHash: - case qSize: - for (auto& i : opArgs) { - PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), - useOutput, forceRealise); - for (auto& j : paths) { - auto info = store->queryPathInfo(j); - if (query == qHash) { - assert(info->narHash.type == htSHA256); - cout << fmt("%s\n", info->narHash.to_string(Base32)); - } else if (query == qSize) { - cout << fmt("%d\n", info->narSize); - } - } - } - break; - - case qTree: { - PathSet done; - for (auto& i : opArgs) { - printTree(store->followLinksToStorePath(i), "", "", done); - } - break; - } - - case qGraph: { - PathSet roots; - for (auto& i : opArgs) { - PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), - useOutput, forceRealise); - roots.insert(paths.begin(), paths.end()); - } - printDotGraph(ref(store), roots); - break; - } - - case qGraphML: { - PathSet roots; - for (auto& i : opArgs) { - PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), - useOutput, forceRealise); - roots.insert(paths.begin(), paths.end()); - } - printGraphML(ref(store), roots); - break; - } - - case qResolve: { - for (auto& i : opArgs) { - cout << format("%1%\n") % store->followLinksToStorePath(i); - } - break; - } - - case qRoots: { - PathSet referrers; - for (auto& i : opArgs) { - store->computeFSClosure( - maybeUseOutputs(store->followLinksToStorePath(i), useOutput, - forceRealise), - referrers, true, settings.gcKeepOutputs, - settings.gcKeepDerivations); - } - Roots roots = store->findRoots(false); - for (auto& [target, links] : roots) { - if (referrers.find(target) != referrers.end()) { - for (auto& link : links) { - cout << format("%1% -> %2%\n") % link % target; - } - } - } - break; - } - - default: - abort(); - } -} - -static void opPrintEnv(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - if (opArgs.size() != 1) { - throw UsageError("'--print-env' requires one derivation store path"); - } - - Path drvPath = opArgs.front(); - Derivation drv = store->derivationFromPath(drvPath); - - /* Print each environment variable in the derivation in a format - that can be sourced by the shell. */ - for (auto& i : drv.env) { - cout << format("export %1%; %1%=%2%\n") % i.first % shellEscape(i.second); - } - - /* Also output the arguments. This doesn't preserve whitespace in - arguments. */ - cout << "export _args; _args='"; - bool first = true; - for (auto& i : drv.args) { - if (!first) { - cout << ' '; - } - first = false; - cout << shellEscape(i); - } - cout << "'\n"; -} - -static void opReadLog(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - - RunPager pager; - - for (auto& i : opArgs) { - auto path = store->followLinksToStorePath(i); - auto log = store->getBuildLog(path); - if (!log) { - throw Error("build log of derivation '%s' is not available", path); - } - std::cout << *log; - } -} - -static void opDumpDB(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - if (!opArgs.empty()) { - for (auto& i : opArgs) { - i = store->followLinksToStorePath(i); - } - for (auto& i : opArgs) { - cout << store->makeValidityRegistration({i}, true, true); - } - } else { - PathSet validPaths = store->queryAllValidPaths(); - for (auto& i : validPaths) { - cout << store->makeValidityRegistration({i}, true, true); - } - } -} - -static void registerValidity(bool reregister, bool hashGiven, - bool canonicalise) { - ValidPathInfos infos; - - while (true) { - ValidPathInfo info = decodeValidPathInfo(cin, hashGiven); - if (info.path.empty()) { - break; - } - if (!store->isValidPath(info.path) || reregister) { - /* !!! races */ - if (canonicalise) { - canonicalisePathMetaData(info.path, -1); - } - if (!hashGiven) { - HashResult hash = hashPath(htSHA256, info.path); - info.narHash = hash.first; - info.narSize = hash.second; - } - infos.push_back(info); - } - } - - ensureLocalStore()->registerValidPaths(infos); -} - -static void opLoadDB(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - registerValidity(true, true, false); -} - -static void opRegisterValidity(Strings opFlags, Strings opArgs) { - bool reregister = false; // !!! maybe this should be the default - bool hashGiven = false; - - for (auto& i : opFlags) { - if (i == "--reregister") { - reregister = true; - } else if (i == "--hash-given") { - hashGiven = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - - registerValidity(reregister, hashGiven, true); -} - -static void opCheckValidity(Strings opFlags, Strings opArgs) { - bool printInvalid = false; - - for (auto& i : opFlags) { - if (i == "--print-invalid") { - printInvalid = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - for (auto& i : opArgs) { - Path path = store->followLinksToStorePath(i); - if (!store->isValidPath(path)) { - if (printInvalid) { - cout << format("%1%\n") % path; - } else { - throw Error(format("path '%1%' is not valid") % path); - } - } - } -} - -static void opGC(Strings opFlags, Strings opArgs) { - bool printRoots = false; - GCOptions options; - options.action = GCOptions::gcDeleteDead; - - GCResults results; - - /* Do what? */ - for (auto i = opFlags.begin(); i != opFlags.end(); ++i) { - if (*i == "--print-roots") { - printRoots = true; - } else if (*i == "--print-live") { - options.action = GCOptions::gcReturnLive; - } else if (*i == "--print-dead") { - options.action = GCOptions::gcReturnDead; - } else if (*i == "--delete") { - options.action = GCOptions::gcDeleteDead; - } else if (*i == "--max-freed") { - auto maxFreed = getIntArg(*i, i, opFlags.end(), true); - options.maxFreed = maxFreed >= 0 ? maxFreed : 0; - } else { - throw UsageError(format("bad sub-operation '%1%' in GC") % *i); - } - } - - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - - if (printRoots) { - Roots roots = store->findRoots(false); - std::set> roots2; - // Transpose and sort the roots. - for (auto& [target, links] : roots) { - for (auto& link : links) { - roots2.emplace(link, target); - } - } - for (auto& [link, target] : roots2) { - std::cout << link << " -> " << target << "\n"; - } - } - - else { - PrintFreed freed(options.action == GCOptions::gcDeleteDead, results); - store->collectGarbage(options, results); - - if (options.action != GCOptions::gcDeleteDead) { - for (auto& i : results.paths) { - cout << i << std::endl; - } - } - } -} - -/* Remove paths from the Nix store if possible (i.e., if they do not - have any remaining referrers and are not reachable from any GC - roots). */ -static void opDelete(Strings opFlags, Strings opArgs) { - GCOptions options; - options.action = GCOptions::gcDeleteSpecific; - - for (auto& i : opFlags) { - if (i == "--ignore-liveness") { - options.ignoreLiveness = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - for (auto& i : opArgs) { - options.pathsToDelete.insert(store->followLinksToStorePath(i)); - } - - GCResults results; - PrintFreed freed(true, results); - store->collectGarbage(options, results); -} - -/* Dump a path as a Nix archive. The archive is written to standard - output. */ -static void opDump(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - if (opArgs.size() != 1) { - throw UsageError("only one argument allowed"); - } - - FdSink sink(STDOUT_FILENO); - std::string path = *opArgs.begin(); - dumpPath(path, sink); - sink.flush(); -} - -/* Restore a value from a Nix archive. The archive is read from - standard input. */ -static void opRestore(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - if (opArgs.size() != 1) { - throw UsageError("only one argument allowed"); - } - - FdSource source(STDIN_FILENO); - restorePath(*opArgs.begin(), source); -} - -static void opExport(Strings opFlags, Strings opArgs) { - for (auto& i : opFlags) { - throw UsageError(format("unknown flag '%1%'") % i); - } - - for (auto& i : opArgs) { - i = store->followLinksToStorePath(i); - } - - FdSink sink(STDOUT_FILENO); - store->exportPaths(opArgs, sink); - sink.flush(); -} - -static void opImport(Strings opFlags, Strings opArgs) { - for (auto& i : opFlags) { - throw UsageError(format("unknown flag '%1%'") % i); - } - - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - - FdSource source(STDIN_FILENO); - Paths paths = store->importPaths(source, nullptr, NoCheckSigs); - - for (auto& i : paths) { - cout << format("%1%\n") % i << std::flush; - } -} - -/* Initialise the Nix databases. */ -static void opInit(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("unknown flag"); - } - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - /* Doesn't do anything right now; database tables are initialised - automatically. */ -} - -/* Verify the consistency of the Nix environment. */ -static void opVerify(Strings opFlags, Strings opArgs) { - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - - bool checkContents = false; - RepairFlag repair = NoRepair; - - for (auto& i : opFlags) { - if (i == "--check-contents") { - checkContents = true; - } else if (i == "--repair") { - repair = Repair; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - if (store->verifyStore(checkContents, repair)) { - LOG(WARNING) << "not all errors were fixed"; - throw Exit(1); - } -} - -/* Verify whether the contents of the given store path have not changed. */ -static void opVerifyPath(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("no flags expected"); - } - - int status = 0; - - for (auto& i : opArgs) { - Path path = store->followLinksToStorePath(i); - LOG(INFO) << "checking path '" << path << "'..."; - auto info = store->queryPathInfo(path); - HashSink sink(info->narHash.type); - store->narFromPath(path, sink); - auto current = sink.finish(); - if (current.first != info->narHash) { - LOG(ERROR) << "path '" << path << "' was modified! expected hash '" - << info->narHash.to_string() << "', got '" - << current.first.to_string() << "'"; - status = 1; - } - } - - throw Exit(status); -} - -/* Repair the contents of the given path by redownloading it using a - substituter (if available). */ -static void opRepairPath(Strings opFlags, Strings opArgs) { - if (!opFlags.empty()) { - throw UsageError("no flags expected"); - } - - for (auto& i : opArgs) { - Path path = store->followLinksToStorePath(i); - ensureLocalStore()->repairPath(path); - } -} - -/* Optimise the disk space usage of the Nix store by hard-linking - files with the same contents. */ -static void opOptimise(Strings opFlags, Strings opArgs) { - if (!opArgs.empty() || !opFlags.empty()) { - throw UsageError("no arguments expected"); - } - - store->optimiseStore(); -} - -/* Serve the nix store in a way usable by a restricted ssh user. */ -static void opServe(Strings opFlags, Strings opArgs) { - bool writeAllowed = false; - for (auto& i : opFlags) { - if (i == "--write") { - writeAllowed = true; - } else { - throw UsageError(format("unknown flag '%1%'") % i); - } - } - - if (!opArgs.empty()) { - throw UsageError("no arguments expected"); - } - - FdSource in(STDIN_FILENO); - FdSink out(STDOUT_FILENO); - - /* Exchange the greeting. */ - unsigned int magic = readInt(in); - if (magic != SERVE_MAGIC_1) { - throw Error("protocol mismatch"); - } - out << SERVE_MAGIC_2 << SERVE_PROTOCOL_VERSION; - out.flush(); - unsigned int clientVersion = readInt(in); - - auto getBuildSettings = [&]() { - // FIXME: changing options here doesn't work if we're - // building through the daemon. - settings.keepLog = false; - settings.useSubstitutes = false; - settings.maxSilentTime = readInt(in); - settings.buildTimeout = readInt(in); - if (GET_PROTOCOL_MINOR(clientVersion) >= 2) { - settings.maxLogSize = readNum(in); - } - if (GET_PROTOCOL_MINOR(clientVersion) >= 3) { - settings.buildRepeat = readInt(in); - settings.enforceDeterminism = readInt(in) != 0u; - settings.runDiffHook = true; - } - settings.printRepeatedBuilds = false; - }; - - while (true) { - ServeCommand cmd; - try { - cmd = static_cast(readInt(in)); - } catch (EndOfFile& e) { - break; - } - - switch (cmd) { - case cmdQueryValidPaths: { - bool lock = readInt(in) != 0u; - bool substitute = readInt(in) != 0u; - auto paths = readStorePaths(*store, in); - if (lock && writeAllowed) { - for (auto& path : paths) { - store->addTempRoot(path); - } - } - - /* If requested, substitute missing paths. This - implements nix-copy-closure's --use-substitutes - flag. */ - if (substitute && writeAllowed) { - /* Filter out .drv files (we don't want to build anything). */ - PathSet paths2; - for (auto& path : paths) { - if (!isDerivation(path)) { - paths2.insert(path); - } - } - unsigned long long downloadSize; - unsigned long long narSize; - PathSet willBuild; - PathSet willSubstitute; - PathSet unknown; - store->queryMissing(PathSet(paths2.begin(), paths2.end()), willBuild, - willSubstitute, unknown, downloadSize, narSize); - /* FIXME: should use ensurePath(), but it only - does one path at a time. */ - if (!willSubstitute.empty()) { - try { - util::OkOrThrow(store->buildPaths(std::cerr, willSubstitute)); - } catch (Error& e) { - LOG(WARNING) << e.msg(); - } - } - } - - out << store->queryValidPaths(paths); - break; - } - - case cmdQueryPathInfos: { - auto paths = readStorePaths(*store, in); - // !!! Maybe we want a queryPathInfos? - for (auto& i : paths) { - try { - auto info = store->queryPathInfo(i); - out << info->path << info->deriver << info->references; - // !!! Maybe we want compression? - out << info->narSize // downloadSize - << info->narSize; - if (GET_PROTOCOL_MINOR(clientVersion) >= 4) { - out << (info->narHash ? info->narHash.to_string() : "") - << info->ca << info->sigs; - } - } catch (InvalidPath&) { - } - } - out << ""; - break; - } - - case cmdDumpStorePath: - store->narFromPath(readStorePath(*store, in), out); - break; - - case cmdImportPaths: { - if (!writeAllowed) { - throw Error("importing paths is not allowed"); - } - store->importPaths(in, nullptr, - NoCheckSigs); // FIXME: should we skip sig checking? - out << 1; // indicate success - break; - } - - case cmdExportPaths: { - readInt(in); // obsolete - store->exportPaths(readStorePaths(*store, in), out); - break; - } - - case cmdBuildPaths: { - if (!writeAllowed) { - throw Error("building paths is not allowed"); - } - auto paths = readStorePaths(*store, in); - - getBuildSettings(); - - try { - MonitorFdHup monitor(in.fd); - util::OkOrThrow(store->buildPaths(std::cerr, paths)); - out << 0; - } catch (Error& e) { - assert(e.status); - out << e.status << e.msg(); - } - break; - } - - case cmdBuildDerivation: { /* Used by hydra-queue-runner. */ - - if (!writeAllowed) { - throw Error("building paths is not allowed"); - } - - Path drvPath = readStorePath(*store, in); // informational only - BasicDerivation drv; - readDerivation(in, *store, drv); - - getBuildSettings(); - - MonitorFdHup monitor(in.fd); - auto status = store->buildDerivation(std::cerr, drvPath, drv); - - out << status.status << status.errorMsg; - - if (GET_PROTOCOL_MINOR(clientVersion) >= 3) { - out << status.timesBuilt - << static_cast(status.isNonDeterministic) - << status.startTime << status.stopTime; - } - - break; - } - - case cmdQueryClosure: { - bool includeOutputs = readInt(in) != 0u; - PathSet closure; - store->computeFSClosure(readStorePaths(*store, in), closure, - false, includeOutputs); - out << closure; - break; - } - - case cmdAddToStoreNar: { - if (!writeAllowed) { - throw Error("importing paths is not allowed"); - } - - ValidPathInfo info; - info.path = readStorePath(*store, in); - in >> info.deriver; - if (!info.deriver.empty()) { - store->assertStorePath(info.deriver); - } - auto hash_ = Hash::deserialize(readString(in), htSHA256); - info.narHash = Hash::unwrap_throw(hash_); - info.references = readStorePaths(*store, in); - in >> info.registrationTime >> info.narSize >> info.ultimate; - info.sigs = readStrings(in); - in >> info.ca; - - if (info.narSize == 0) { - throw Error("narInfo is too old and missing the narSize field"); - } - - SizedSource sizedSource(in, info.narSize); - - store->addToStore(info, sizedSource, NoRepair, NoCheckSigs); - - // consume all the data that has been sent before continuing. - sizedSource.drainAll(); - - out << 1; // indicate success - - break; - } - - default: - throw Error(format("unknown serve command %1%") % cmd); - } - - out.flush(); - } -} - -static void opGenerateBinaryCacheKey(Strings opFlags, Strings opArgs) { - for (auto& i : opFlags) { - throw UsageError(format("unknown flag '%1%'") % i); - } - - if (opArgs.size() != 3) { - throw UsageError("three arguments expected"); - } - auto i = opArgs.begin(); - std::string keyName = *i++; - std::string secretKeyFile = *i++; - std::string publicKeyFile = *i++; - -#if HAVE_SODIUM - if (sodium_init() == -1) { - throw Error("could not initialise libsodium"); - } - - unsigned char pk[crypto_sign_PUBLICKEYBYTES]; - unsigned char sk[crypto_sign_SECRETKEYBYTES]; - if (crypto_sign_keypair(pk, sk) != 0) { - throw Error("key generation failed"); - } - - writeFile(publicKeyFile, - keyName + ":" + - absl::Base64Escape(std::string(reinterpret_cast(pk), - crypto_sign_PUBLICKEYBYTES))); - umask(0077); - writeFile(secretKeyFile, - keyName + ":" + - absl::Base64Escape(std::string(reinterpret_cast(sk), - crypto_sign_SECRETKEYBYTES))); -#else - throw Error( - "Nix was not compiled with libsodium, required for signed binary cache " - "support"); -#endif -} - -static void opVersion(Strings opFlags, Strings opArgs) { - printVersion("nix-store"); -} - -/* Scan the arguments; find the operation, set global flags, put all - other flags in a list, and put all other arguments in another - list. */ -static int _main(int argc, char** argv) { - { - Strings opFlags; - Strings opArgs; - Operation op = nullptr; - - parseCmdLine(argc, argv, - [&](Strings::iterator& arg, const Strings::iterator& end) { - Operation oldOp = op; - - if (*arg == "--help") { - showManPage("nix-store"); - } else if (*arg == "--version") { - op = opVersion; - } else if (*arg == "--realise" || *arg == "--realize" || - *arg == "-r") { - op = opRealise; - } else if (*arg == "--add" || *arg == "-A") { - op = opAdd; - } else if (*arg == "--add-fixed") { - op = opAddFixed; - } else if (*arg == "--print-fixed-path") { - op = opPrintFixedPath; - } else if (*arg == "--delete") { - op = opDelete; - } else if (*arg == "--query" || *arg == "-q") { - op = opQuery; - } else if (*arg == "--print-env") { - op = opPrintEnv; - } else if (*arg == "--read-log" || *arg == "-l") { - op = opReadLog; - } else if (*arg == "--dump-db") { - op = opDumpDB; - } else if (*arg == "--load-db") { - op = opLoadDB; - } else if (*arg == "--register-validity") { - op = opRegisterValidity; - } else if (*arg == "--check-validity") { - op = opCheckValidity; - } else if (*arg == "--gc") { - op = opGC; - } else if (*arg == "--dump") { - op = opDump; - } else if (*arg == "--restore") { - op = opRestore; - } else if (*arg == "--export") { - op = opExport; - } else if (*arg == "--import") { - op = opImport; - } else if (*arg == "--init") { - op = opInit; - } else if (*arg == "--verify") { - op = opVerify; - } else if (*arg == "--verify-path") { - op = opVerifyPath; - } else if (*arg == "--repair-path") { - op = opRepairPath; - } else if (*arg == "--optimise" || *arg == "--optimize") { - op = opOptimise; - } else if (*arg == "--serve") { - op = opServe; - } else if (*arg == "--generate-binary-cache-key") { - op = opGenerateBinaryCacheKey; - } else if (*arg == "--add-root") { - gcRoot = absPath(getArg(*arg, arg, end)); - } else if (*arg == "--indirect") { - indirectRoot = true; - } else if (*arg == "--no-output") { - noOutput = true; - } else if (*arg != "" && arg->at(0) == '-') { - opFlags.push_back(*arg); - if (*arg == "--max-freed" || *arg == "--max-links" || - *arg == "--max-atime") { /* !!! hack */ - opFlags.push_back(getArg(*arg, arg, end)); - } - } else { - opArgs.push_back(*arg); - } - - if ((oldOp != nullptr) && oldOp != op) { - throw UsageError("only one operation may be specified"); - } - - return true; - }); - - if (op == nullptr) { - throw UsageError("no operation specified"); - } - - if (op != opDump && op != opRestore) { /* !!! hack */ - store = openStore(); - } - - op(opFlags, opArgs); - - return 0; - } -} - -static RegisterLegacyCommand s1("nix-store", _main); diff --git a/third_party/nix/src/nix/add-to-store.cc b/third_party/nix/src/nix/add-to-store.cc deleted file mode 100644 index 53641f120e..0000000000 --- a/third_party/nix/src/nix/add-to-store.cc +++ /dev/null @@ -1,51 +0,0 @@ -#include "libmain/common-args.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdAddToStore final : MixDryRun, StoreCommand { - Path path; - std::optional namePart; - - CmdAddToStore() { - expectArg("path", &path); - - mkFlag() - .longName("name") - .shortName('n') - .description("name component of the store path") - .labels({"name"}) - .dest(&namePart); - } - - std::string name() override { return "add-to-store"; } - - std::string description() override { return "add a path to the Nix store"; } - - Examples examples() override { return {}; } - - void run(ref store) override { - if (!namePart) { - namePart = baseNameOf(path); - } - - StringSink sink; - dumpPath(path, sink); - - ValidPathInfo info; - info.narHash = hashString(htSHA256, *sink.s); - info.narSize = sink.s->size(); - info.path = store->makeFixedOutputPath(true, info.narHash, *namePart); - info.ca = makeFixedOutputCA(true, info.narHash); - - if (!dryRun) { - store->addToStore(info, sink.s); - } - - std::cout << fmt("%s\n", info.path); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/build.cc b/third_party/nix/src/nix/build.cc deleted file mode 100644 index 3fe74b7ffd..0000000000 --- a/third_party/nix/src/nix/build.cc +++ /dev/null @@ -1,68 +0,0 @@ -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdBuild final : MixDryRun, InstallablesCommand { - Path outLink = "result"; - - CmdBuild() { - mkFlag() - .longName("out-link") - .shortName('o') - .description("path of the symlink to the build result") - .labels({"path"}) - .dest(&outLink); - - mkFlag() - .longName("no-link") - .description("do not create a symlink to the build result") - .set(&outLink, Path("")); - } - - std::string name() override { return "build"; } - - std::string description() override { - return "build a derivation or fetch a store path"; - } - - Examples examples() override { - return { - Example{"To build and run GNU Hello from NixOS 17.03:", - "nix build -f channel:nixos-17.03 hello; ./result/bin/hello"}, - Example{"To build the build.x86_64-linux attribute from release.nix:", - "nix build -f release.nix build.x86_64-linux"}, - }; - } - - void run(ref store) override { - auto buildables = build(store, dryRun ? DryRun : Build, installables); - - if (dryRun) { - return; - } - - for (size_t i = 0; i < buildables.size(); ++i) { - auto& b(buildables[i]); - - if (!outLink.empty()) { - for (auto& output : b.outputs) { - if (auto store2 = store.dynamic_pointer_cast()) { - std::string symlink = outLink; - if (i != 0u) { - symlink += fmt("-%d", i); - } - if (output.first != "out") { - symlink += fmt("-%s", output.first); - } - store2->addPermRoot(output.second, absPath(symlink), true); - } - } - } - } - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/cat.cc b/third_party/nix/src/nix/cat.cc deleted file mode 100644 index 7788707eae..0000000000 --- a/third_party/nix/src/nix/cat.cc +++ /dev/null @@ -1,56 +0,0 @@ -#include "libstore/fs-accessor.hh" -#include "libstore/nar-accessor.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct MixCat : virtual Args { - std::string path; - - void cat(const ref& accessor) { - auto st = accessor->stat(path); - if (st.type == FSAccessor::Type::tMissing) { - throw Error(format("path '%1%' does not exist") % path); - } - if (st.type != FSAccessor::Type::tRegular) { - throw Error(format("path '%1%' is not a regular file") % path); - } - - std::cout << accessor->readFile(path); - } -}; - -struct CmdCatStore final : StoreCommand, MixCat { - CmdCatStore() { expectArg("path", &path); } - - std::string name() override { return "cat-store"; } - - std::string description() override { - return "print the contents of a store file on stdout"; - } - - void run(ref store) override { cat(store->getFSAccessor()); } -}; - -struct CmdCatNar final : StoreCommand, MixCat { - Path narPath; - - CmdCatNar() { - expectArg("nar", &narPath); - expectArg("path", &path); - } - - std::string name() override { return "cat-nar"; } - - std::string description() override { - return "print the contents of a file inside a NAR file"; - } - - void run(ref store) override { - cat(makeNarAccessor(make_ref(readFile(narPath)))); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); -static nix::RegisterCommand r2(nix::make_ref()); diff --git a/third_party/nix/src/nix/command.cc b/third_party/nix/src/nix/command.cc deleted file mode 100644 index f7f183ab0a..0000000000 --- a/third_party/nix/src/nix/command.cc +++ /dev/null @@ -1,156 +0,0 @@ -#include "nix/command.hh" - -#include - -#include "libstore/derivations.hh" -#include "libstore/store-api.hh" - -namespace nix { - -Commands* RegisterCommand::commands = nullptr; - -void Command::printHelp(const std::string& programName, std::ostream& out) { - Args::printHelp(programName, out); - - auto exs = examples(); - if (!exs.empty()) { - out << "\n"; - out << "Examples:\n"; - for (auto& ex : exs) { - out << "\n" - << " " << ex.description << "\n" // FIXME: wrap - << " $ " << ex.command << "\n"; - } - } -} - -MultiCommand::MultiCommand(Commands _commands) - : commands(std::move(_commands)) { - expectedArgs.push_back(ExpectedArg{ - "command", 1, true, [=](std::vector ss) { - assert(!command); - auto i = commands.find(ss[0]); - if (i == commands.end()) { - throw UsageError("'%s' is not a recognised command", ss[0]); - } - command = i->second; - }}); -} - -void MultiCommand::printHelp(const std::string& programName, - std::ostream& out) { - if (command) { - command->printHelp(programName + " " + command->name(), out); - return; - } - - out << "Usage: " << programName << " ... ...\n"; - - out << "\n"; - out << "Common flags:\n"; - printFlags(out); - - out << "\n"; - out << "Available commands:\n"; - - Table2 table; - for (auto& command : commands) { - auto descr = command.second->description(); - if (!descr.empty()) { - table.push_back(std::make_pair(command.second->name(), descr)); - } - } - printTable(out, table); - -#if 0 - out << "\n"; - out << "For full documentation, run 'man " << programName << "' or 'man " << programName << "-'.\n"; -#endif -} - -bool MultiCommand::processFlag(Strings::iterator& pos, Strings::iterator end) { - if (Args::processFlag(pos, end)) { - return true; - } - if (command && command->processFlag(pos, end)) { - return true; - } - return false; -} - -bool MultiCommand::processArgs(const Strings& args, bool finish) { - if (command) { - return command->processArgs(args, finish); - } - return Args::processArgs(args, finish); -} - -StoreCommand::StoreCommand() = default; - -ref StoreCommand::getStore() { - if (!_store) { - _store = createStore(); - } - return ref(_store); -} - -ref StoreCommand::createStore() { return openStore(); } - -void StoreCommand::run() { run(getStore()); } - -StorePathsCommand::StorePathsCommand(bool recursive) : recursive(recursive) { - if (recursive) { - mkFlag() - .longName("no-recursive") - .description("apply operation to specified paths only") - .set(&this->recursive, false); - } else { - mkFlag() - .longName("recursive") - .shortName('r') - .description("apply operation to closure of the specified paths") - .set(&this->recursive, true); - } - - mkFlag(0, "all", "apply operation to the entire store", &all); -} - -void StorePathsCommand::run(ref store) { - Paths storePaths; - - if (all) { - if (!installables.empty() != 0u) { - throw UsageError("'--all' does not expect arguments"); - } - for (auto& p : store->queryAllValidPaths()) { - storePaths.push_back(p); - } - } - - else { - for (auto& p : toStorePaths(store, NoBuild, installables)) { - storePaths.push_back(p); - } - - if (recursive) { - PathSet closure; - store->computeFSClosure(PathSet(storePaths.begin(), storePaths.end()), - closure, false, false); - storePaths = Paths(closure.begin(), closure.end()); - } - } - - run(store, storePaths); -} - -void StorePathCommand::run(ref store) { - auto storePaths = toStorePaths(store, NoBuild, installables); - - if (storePaths.size() != 1) { - throw UsageError("this command requires exactly one store path"); - } - - run(store, *storePaths.begin()); -} - -} // namespace nix diff --git a/third_party/nix/src/nix/command.hh b/third_party/nix/src/nix/command.hh deleted file mode 100644 index 87e2fbe9d2..0000000000 --- a/third_party/nix/src/nix/command.hh +++ /dev/null @@ -1,194 +0,0 @@ -#pragma once - -#include - -#include "libexpr/common-eval-args.hh" -#include "libutil/args.hh" - -namespace nix { - -extern std::string programPath; - -struct Value; -class Bindings; -class EvalState; - -/* A command is an argument parser that can be executed by calling its - run() method. */ -struct Command : virtual Args { - virtual std::string name() = 0; - virtual void prepare(){}; - virtual void run() = 0; - - struct Example { - std::string description; - std::string command; - }; - - typedef std::list Examples; - - virtual Examples examples() { return Examples(); } - - void printHelp(const std::string& programName, std::ostream& out) override; -}; - -class Store; - -/* A command that require a Nix store. */ -struct StoreCommand : virtual Command { - StoreCommand(); - void run() override; - ref getStore(); - virtual ref createStore(); - virtual void run(ref) = 0; - - private: - std::shared_ptr _store; -}; - -struct Buildable { - Path drvPath; // may be empty - std::map outputs; -}; - -using Buildables = std::vector; - -struct Installable { - virtual std::string what() = 0; - - virtual Buildables toBuildables() { - throw Error("argument '%s' cannot be built", what()); - } - - Buildable toBuildable(); - - virtual Value* toValue(EvalState& state) { - throw Error("argument '%s' cannot be evaluated", what()); - } -}; - -struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs { - Path file; - - SourceExprCommand(); - - /* Return a value representing the Nix expression from which we - are installing. This is either the file specified by ‘--file’, - or an attribute set constructed from $NIX_PATH, e.g. ‘{ nixpkgs - = import ...; bla = import ...; }’. */ - Value* getSourceExpr(EvalState& state); - - ref getEvalState(); - - private: - std::shared_ptr evalState; - std::shared_ptr vSourceExpr; -}; - -enum RealiseMode { Build, NoBuild, DryRun }; - -/* A command that operates on a list of "installables", which can be - store paths, attribute paths, Nix expressions, etc. */ -struct InstallablesCommand : virtual Args, SourceExprCommand { - std::vector> installables; - - InstallablesCommand() { expectArgs("installables", &_installables); } - - void prepare() override; - - virtual bool useDefaultInstallables() { return true; } - - private: - std::vector _installables; -}; - -struct InstallableCommand : virtual Args, SourceExprCommand { - std::shared_ptr installable; - - InstallableCommand() { expectArg("installable", &_installable); } - - void prepare() override; - - private: - std::string _installable; -}; - -/* A command that operates on zero or more store paths. */ -struct StorePathsCommand : public InstallablesCommand { - private: - bool recursive = false; - bool all = false; - - public: - StorePathsCommand(bool recursive = false); - - using StoreCommand::run; - - virtual void run(ref store, Paths storePaths) = 0; - - void run(ref store) override; - - bool useDefaultInstallables() override { return !all; } -}; - -/* A command that operates on exactly one store path. */ -struct StorePathCommand : public InstallablesCommand { - using StoreCommand::run; - - virtual void run(ref store, const Path& storePath) = 0; - - void run(ref store) override; -}; - -using Commands = std::map>; - -/* An argument parser that supports multiple subcommands, - i.e. ‘ ’. */ -class MultiCommand : virtual Args { - public: - Commands commands; - - std::shared_ptr command; - - MultiCommand(Commands commands); - - void printHelp(const std::string& programName, std::ostream& out) override; - - bool processFlag(Strings::iterator& pos, Strings::iterator end) override; - - bool processArgs(const Strings& args, bool finish) override; -}; - -/* A helper class for registering commands globally. */ -struct RegisterCommand { - static Commands* commands; - - RegisterCommand(ref command) { - if (!commands) { - commands = new Commands; - } - commands->emplace(command->name(), command); - } -}; - -std::shared_ptr parseInstallable(SourceExprCommand& cmd, - const ref& store, - const std::string& installable, - bool useDefaultInstallables); - -Buildables build(const ref& store, RealiseMode mode, - const std::vector>& installables); - -PathSet toStorePaths( - const ref& store, RealiseMode mode, - const std::vector>& installables); - -Path toStorePath(const ref& store, RealiseMode mode, - const std::shared_ptr& installable); - -PathSet toDerivations( - const ref& store, - const std::vector>& installables, - bool useDeriver = false); - -} // namespace nix diff --git a/third_party/nix/src/nix/copy.cc b/third_party/nix/src/nix/copy.cc deleted file mode 100644 index 75c85698d1..0000000000 --- a/third_party/nix/src/nix/copy.cc +++ /dev/null @@ -1,86 +0,0 @@ -#include - -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "libutil/sync.hh" -#include "libutil/thread-pool.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdCopy final : StorePathsCommand { - std::string srcUri, dstUri; - - CheckSigsFlag checkSigs = CheckSigs; - - SubstituteFlag substitute = NoSubstitute; - - CmdCopy() : StorePathsCommand(true) { - mkFlag() - .longName("from") - .labels({"store-uri"}) - .description("URI of the source Nix store") - .dest(&srcUri); - mkFlag() - .longName("to") - .labels({"store-uri"}) - .description("URI of the destination Nix store") - .dest(&dstUri); - - mkFlag() - .longName("no-check-sigs") - .description("do not require that paths are signed by trusted keys") - .set(&checkSigs, NoCheckSigs); - - mkFlag() - .longName("substitute-on-destination") - .shortName('s') - .description( - "whether to try substitutes on the destination store (only " - "supported by SSH)") - .set(&substitute, Substitute); - } - - std::string name() override { return "copy"; } - - std::string description() override { return "copy paths between Nix stores"; } - - Examples examples() override { - return { - Example{"To copy Firefox from the local store to a binary cache in " - "file:///tmp/cache:", - "nix copy --to file:///tmp/cache $(type -p firefox)"}, - Example{"To copy the entire current NixOS system closure to another " - "machine via SSH:", - "nix copy --to ssh://server /run/current-system"}, - Example{"To copy a closure from another machine via SSH:", - "nix copy --from ssh://server " - "/nix/store/a6cnl93nk1wxnq84brbbwr6hxw9gp2w9-blender-2.79-rc2"}, -#ifdef ENABLE_S3 - Example{"To copy Hello to an S3 binary cache:", - "nix copy --to s3://my-bucket?region=eu-west-1 nixpkgs.hello"}, - Example{"To copy Hello to an S3-compatible binary cache:", - "nix copy --to " - "s3://my-bucket?region=eu-west-1&endpoint=example.com " - "nixpkgs.hello"}, -#endif - }; - } - - ref createStore() override { - return srcUri.empty() ? StoreCommand::createStore() : openStore(srcUri); - } - - void run(ref srcStore, Paths storePaths) override { - if (srcUri.empty() && dstUri.empty()) { - throw UsageError("you must pass '--from' and/or '--to'"); - } - - ref dstStore = dstUri.empty() ? openStore() : openStore(dstUri); - - copyPaths(srcStore, dstStore, PathSet(storePaths.begin(), storePaths.end()), - NoRepair, checkSigs, substitute); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/doctor.cc b/third_party/nix/src/nix/doctor.cc deleted file mode 100644 index d0b4c2b588..0000000000 --- a/third_party/nix/src/nix/doctor.cc +++ /dev/null @@ -1,142 +0,0 @@ -#include -#include -#include - -#include "libmain/shared.hh" -#include "libstore/serve-protocol.hh" -#include "libstore/store-api.hh" -#include "libstore/worker-protocol.hh" -#include "nix/command.hh" - -namespace nix { -static std::string formatProtocol(unsigned int proto) { - if (proto != 0u) { - auto major = GET_PROTOCOL_MAJOR(proto) >> 8; - auto minor = GET_PROTOCOL_MINOR(proto); - return (format("%1%.%2%") % major % minor).str(); - } - return "unknown"; -} - -struct CmdDoctor final : StoreCommand { - bool success = true; - - std::string name() override { return "doctor"; } - - std::string description() override { - return "check your system for potential problems"; - } - - void run(ref store) override { - std::cout << "Store uri: " << store->getUri() << std::endl; - std::cout << std::endl; - - auto type = getStoreType(); - - if (type < tOther) { - success &= checkNixInPath(); - success &= checkProfileRoots(store); - } - success &= checkStoreProtocol(store->getProtocol()); - - if (!success) { - throw Exit(2); - } - } - - static bool checkNixInPath() { - PathSet dirs; - - for (auto& dir : absl::StrSplit(getEnv("PATH").value_or(""), - absl::ByChar(':'), absl::SkipEmpty())) { - if (pathExists(absl::StrCat(dir, "/nix-env"))) { - dirs.insert(dirOf(canonPath(absl::StrCat(dir, "/nix-env"), true))); - } - } - - if (dirs.size() != 1) { - std::cout << "Warning: multiple versions of nix found in PATH." - << std::endl; - std::cout << std::endl; - for (auto& dir : dirs) { - std::cout << " " << dir << std::endl; - } - std::cout << std::endl; - return false; - } - - return true; - } - - static bool checkProfileRoots(const ref& store) { - PathSet dirs; - - for (auto dir : absl::StrSplit(getEnv("PATH").value_or(""), - absl::ByChar(':'), absl::SkipEmpty())) { - Path profileDir = dirOf(dir); - try { - Path userEnv = canonPath(profileDir, true); - - if (store->isStorePath(userEnv) && - absl::EndsWith(userEnv, "user-environment")) { - while (profileDir.find("/profiles/") == std::string::npos && - isLink(profileDir)) { - profileDir = absPath(readLink(profileDir), dirOf(profileDir)); - } - - if (profileDir.find("/profiles/") == std::string::npos) { - dirs.insert(std::string(dir)); - } - } - } catch (SysError&) { - } - } - - if (!dirs.empty()) { - std::cout << "Warning: found profiles outside of " << settings.nixStateDir - << "/profiles." << std::endl; - std::cout << "The generation this profile points to might not have a " - "gcroot and could be" - << std::endl; - std::cout << "garbage collected, resulting in broken symlinks." - << std::endl; - std::cout << std::endl; - for (auto& dir : dirs) { - std::cout << " " << dir << std::endl; - } - std::cout << std::endl; - return false; - } - - return true; - } - - static bool checkStoreProtocol(unsigned int storeProto) { - unsigned int clientProto = GET_PROTOCOL_MAJOR(SERVE_PROTOCOL_VERSION) == - GET_PROTOCOL_MAJOR(storeProto) - ? SERVE_PROTOCOL_VERSION - : PROTOCOL_VERSION; - - if (clientProto != storeProto) { - std::cout << "Warning: protocol version of this client does not match " - "the store." - << std::endl; - std::cout << "While this is not necessarily a problem it's recommended " - "to keep the client in" - << std::endl; - std::cout << "sync with the daemon." << std::endl; - std::cout << std::endl; - std::cout << "Client protocol: " << formatProtocol(clientProto) - << std::endl; - std::cout << "Store protocol: " << formatProtocol(storeProto) - << std::endl; - std::cout << std::endl; - return false; - } - - return true; - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/dump-path.cc b/third_party/nix/src/nix/dump-path.cc deleted file mode 100644 index 1d0a996e56..0000000000 --- a/third_party/nix/src/nix/dump-path.cc +++ /dev/null @@ -1,28 +0,0 @@ -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdDumpPath final : StorePathCommand { - std::string name() override { return "dump-path"; } - - std::string description() override { - return "dump a store path to stdout (in NAR format)"; - } - - Examples examples() override { - return { - Example{"To get a NAR from the binary cache https://cache.nixos.org/:", - "nix dump-path --store https://cache.nixos.org/ " - "/nix/store/7crrmih8c52r8fbnqb933dxrsp44md93-glibc-2.25"}, - }; - } - - void run(ref store, const Path& storePath) override { - FdSink sink(STDOUT_FILENO); - store->narFromPath(storePath, sink); - sink.flush(); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/edit.cc b/third_party/nix/src/nix/edit.cc deleted file mode 100644 index 04c67acb94..0000000000 --- a/third_party/nix/src/nix/edit.cc +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/eval.hh" -#include "libmain/shared.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdEdit final : InstallableCommand { - std::string name() override { return "edit"; } - - std::string description() override { - return "open the Nix expression of a Nix package in $EDITOR"; - } - - Examples examples() override { - return { - Example{"To open the Nix expression of the GNU Hello package:", - "nix edit nixpkgs.hello"}, - }; - } - - void run(ref store) override { - auto state = getEvalState(); - - auto v = installable->toValue(*state); - - Value* v2; - try { - auto dummyArgs = Bindings::New(); - v2 = findAlongAttrPath(*state, "meta.position", dummyArgs.get(), *v); - } catch (Error&) { - throw Error("package '%s' has no source location information", - installable->what()); - } - - auto pos = state->forceString(*v2); - DLOG(INFO) << "position is " << pos; - - auto colon = pos.rfind(':'); - if (colon == std::string::npos) { - throw Error("cannot parse meta.position attribute '%s'", pos); - } - - std::string filename(pos, 0, colon); - int lineno; - try { - lineno = std::stoi(std::string(pos, colon + 1)); - } catch (std::invalid_argument& e) { - throw Error("cannot parse line number '%s'", pos); - } - - auto editor = getEnv("EDITOR").value_or("cat"); - - Strings args = - absl::StrSplit(editor, absl::ByAnyChar(" \t\n\r"), absl::SkipEmpty()); - - if (editor.find("emacs") != std::string::npos || - editor.find("nano") != std::string::npos || - editor.find("vim") != std::string::npos) { - args.push_back(fmt("+%d", lineno)); - } - - args.push_back(filename); - - execvp(args.front().c_str(), stringsToCharPtrs(args).data()); - - throw SysError("cannot run editor '%s'", editor); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/eval.cc b/third_party/nix/src/nix/eval.cc deleted file mode 100644 index 72fcbd8271..0000000000 --- a/third_party/nix/src/nix/eval.cc +++ /dev/null @@ -1,56 +0,0 @@ -#include "libexpr/eval.hh" - -#include "libexpr/value-to-json.hh" -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "libutil/json.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdEval final : MixJSON, InstallableCommand { - bool raw = false; - - CmdEval() { mkFlag(0, "raw", "print strings unquoted", &raw); } - - std::string name() override { return "eval"; } - - std::string description() override { return "evaluate a Nix expression"; } - - Examples examples() override { - return { - Example{"To evaluate a Nix expression given on the command line:", - "nix eval '(1 + 2)'"}, - Example{"To evaluate a Nix expression from a file or URI:", - "nix eval -f channel:nixos-17.09 hello.name"}, - Example{"To get the current version of Nixpkgs:", - "nix eval --raw nixpkgs.lib.nixpkgsVersion"}, - Example{"To print the store path of the Hello package:", - "nix eval --raw nixpkgs.hello"}, - }; - } - - void run(ref store) override { - if (raw && json) { - throw UsageError("--raw and --json are mutually exclusive"); - } - - auto state = getEvalState(); - - auto v = installable->toValue(*state); - PathSet context; - - if (raw) { - std::cout << state->coerceToString(noPos, *v, context); - } else if (json) { - JSONPlaceholder jsonOut(std::cout); - printValueAsJSON(*state, true, *v, jsonOut, context); - } else { - state->forceValueDeep(*v); - std::cout << *v << "\n"; - } - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/hash.cc b/third_party/nix/src/nix/hash.cc deleted file mode 100644 index 4fb262f1a8..0000000000 --- a/third_party/nix/src/nix/hash.cc +++ /dev/null @@ -1,152 +0,0 @@ -#include "libutil/hash.hh" - -#include "libmain/shared.hh" -#include "nix/command.hh" -#include "nix/legacy.hh" - -namespace nix { -struct CmdHash final : Command { - enum Mode { mFile, mPath }; - Mode mode; - Base base = SRI; - bool truncate = false; - HashType ht = htSHA256; - std::vector paths; - - explicit CmdHash(Mode mode) : mode(mode) { - mkFlag(0, "sri", "print hash in SRI format", &base, SRI); - mkFlag(0, "base64", "print hash in base-64", &base, Base64); - mkFlag(0, "base32", "print hash in base-32 (Nix-specific)", &base, Base32); - mkFlag(0, "base16", "print hash in base-16", &base, Base16); - mkFlag().longName("type").mkHashTypeFlag(&ht); - expectArgs("paths", &paths); - } - - std::string name() override { - return mode == mFile ? "hash-file" : "hash-path"; - } - - std::string description() override { - return mode == mFile - ? "print cryptographic hash of a regular file" - : "print cryptographic hash of the NAR serialisation of a path"; - } - - void run() override { - for (const auto& path : paths) { - Hash h = mode == mFile ? hashFile(ht, path) : hashPath(ht, path).first; - if (truncate && h.hashSize > nix::kStorePathHashSize) { - h = compressHash(h, nix::kStorePathHashSize); - } - std::cout << format("%1%\n") % h.to_string(base, base == SRI); - } - } -}; - -static RegisterCommand r1(make_ref(CmdHash::mFile)); -static RegisterCommand r2(make_ref(CmdHash::mPath)); - -struct CmdToBase final : Command { - Base base; - HashType ht = htUnknown; - std::vector args; - - explicit CmdToBase(Base base) : base(base) { - mkFlag().longName("type").mkHashTypeFlag(&ht); - expectArgs("strings", &args); - } - - std::string name() override { - return base == Base16 ? "to-base16" - : base == Base32 ? "to-base32" - : base == Base64 ? "to-base64" - : "to-sri"; - } - - std::string description() override { - return fmt("convert a hash to %s representation", - base == Base16 ? "base-16" - : base == Base32 ? "base-32" - : base == Base64 ? "base-64" - : "SRI"); - } - - void run() override { - for (const auto& s : args) { - auto hash_ = Hash::deserialize(s, ht); - if (hash_.ok()) { - std::cout << hash_->to_string(base, base == SRI) << "\n"; - } else { - std::cerr << "failed to parse: " << hash_.status().ToString() << "\n"; - // create a matching blank line, for scripting - std::cout << "\n"; - } - } - } -}; - -static RegisterCommand r3(make_ref(Base16)); -static RegisterCommand r4(make_ref(Base32)); -static RegisterCommand r5(make_ref(Base64)); -static RegisterCommand r6(make_ref(SRI)); - -/* Legacy nix-hash command. */ -static int compatNixHash(int argc, char** argv) { - HashType ht = htMD5; - bool flat = false; - bool base32 = false; - bool truncate = false; - enum { opHash, opTo32, opTo16 } op = opHash; - std::vector ss; - - parseCmdLine(argc, argv, - [&](Strings::iterator& arg, const Strings::iterator& end) { - if (*arg == "--help") { - showManPage("nix-hash"); - } else if (*arg == "--version") { - printVersion("nix-hash"); - } else if (*arg == "--flat") { - flat = true; - } else if (*arg == "--base32") { - base32 = true; - } else if (*arg == "--truncate") { - truncate = true; - } else if (*arg == "--type") { - std::string s = getArg(*arg, arg, end); - ht = parseHashType(s); - if (ht == htUnknown) { - throw UsageError(format("unknown hash type '%1%'") % s); - } - } else if (*arg == "--to-base16") { - op = opTo16; - } else if (*arg == "--to-base32") { - op = opTo32; - } else if (*arg != "" && arg->at(0) == '-') { - return false; - } else { - ss.push_back(*arg); - } - return true; - }); - - if (op == opHash) { - CmdHash cmd(flat ? CmdHash::mFile : CmdHash::mPath); - cmd.ht = ht; - cmd.base = base32 ? Base32 : Base16; - cmd.truncate = truncate; - cmd.paths = ss; - cmd.run(); - } - - else { - CmdToBase cmd(op == opTo32 ? Base32 : Base16); - cmd.args = ss; - cmd.ht = ht; - cmd.run(); - } - - return 0; -} - -static RegisterLegacyCommand s1("nix-hash", compatNixHash); -} // namespace nix diff --git a/third_party/nix/src/nix/installables.cc b/third_party/nix/src/nix/installables.cc deleted file mode 100644 index 7aa26b0dee..0000000000 --- a/third_party/nix/src/nix/installables.cc +++ /dev/null @@ -1,349 +0,0 @@ -#include -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/common-eval-args.hh" -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libexpr/get-drvs.hh" -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/store-api.hh" -#include "libutil/status.hh" -#include "nix/command.hh" - -namespace nix { - -SourceExprCommand::SourceExprCommand() { - mkFlag() - .shortName('f') - .longName("file") - .label("file") - .description("evaluate FILE rather than the default") - .dest(&file); -} - -Value* SourceExprCommand::getSourceExpr(EvalState& state) { - if (vSourceExpr != nullptr) { - return *vSourceExpr; - } - - auto sToplevel = state.symbols.Create("_toplevel"); - - // Allocate the vSourceExpr Value as uncollectable. Boehm GC doesn't - // consider the member variable "alive" during execution causing it to be - // GC'ed in the middle of evaluation. - vSourceExpr = allocRootValue(state.allocValue()); - - if (!file.empty()) { - state.evalFile(lookupFileArg(state, file), **vSourceExpr); - } else { - /* Construct the installation source from $NIX_PATH. */ - - auto searchPath = state.getSearchPath(); - - state.mkAttrs(**vSourceExpr, 1024); - - mkBool(*state.allocAttr(**vSourceExpr, sToplevel), true); - - std::unordered_set seen; - - auto addEntry = [&](const std::string& name) { - if (name.empty()) { - return; - } - if (!seen.insert(name).second) { - return; - } - Value* v1 = state.allocValue(); - mkPrimOpApp(*v1, state.getBuiltin("findFile"), - state.getBuiltin("nixPath")); - Value* v2 = state.allocValue(); - mkApp(*v2, *v1, mkString(*state.allocValue(), name)); - mkApp(*state.allocAttr(**vSourceExpr, state.symbols.Create(name)), - state.getBuiltin("import"), *v2); - }; - - for (auto& i : searchPath) { /* Hack to handle channels. */ - if (i.first.empty() && pathExists(i.second + "/manifest.nix")) { - for (auto& j : readDirectory(i.second)) { - if (j.name != "manifest.nix" && - pathExists(fmt("%s/%s/default.nix", i.second, j.name))) { - addEntry(j.name); - } - } - } else { - addEntry(i.first); - } - } - } - - return *vSourceExpr; -} - -ref SourceExprCommand::getEvalState() { - if (!evalState) { - evalState = std::make_shared(searchPath, getStore()); - } - return ref(evalState); -} - -Buildable Installable::toBuildable() { - auto buildables = toBuildables(); - if (buildables.size() != 1) { - throw Error( - "installable '%s' evaluates to %d derivations, where only one is " - "expected", - what(), buildables.size()); - } - return std::move(buildables[0]); -} - -struct InstallableStorePath final : Installable { - Path storePath; - - explicit InstallableStorePath(Path storePath) - : storePath(std::move(storePath)) {} - - std::string what() override { return storePath; } - - Buildables toBuildables() override { - return {{isDerivation(storePath) ? storePath : "", {{"out", storePath}}}}; - } -}; - -struct InstallableValue : Installable { - SourceExprCommand& cmd; - - explicit InstallableValue(SourceExprCommand& cmd) : cmd(cmd) {} - - Buildables toBuildables() override { - auto state = cmd.getEvalState(); - - auto v = toValue(*state); - - std::unique_ptr autoArgs = cmd.getAutoArgs(*state); - - DrvInfos drvs; - getDerivations(*state, *v, "", autoArgs.get(), drvs, false); - - Buildables res; - - PathSet drvPaths; - - for (auto& drv : drvs) { - Buildable b{drv.queryDrvPath()}; - drvPaths.insert(b.drvPath); - - auto outputName = drv.queryOutputName(); - if (outputName.empty()) { - throw Error("derivation '%s' lacks an 'outputName' attribute", - b.drvPath); - } - - b.outputs.emplace(outputName, drv.queryOutPath()); - - res.push_back(std::move(b)); - } - - // Hack to recognize .all: if all drvs have the same drvPath, - // merge the buildables. - if (drvPaths.size() == 1) { - Buildable b{*drvPaths.begin()}; - for (auto& b2 : res) { - b.outputs.insert(b2.outputs.begin(), b2.outputs.end()); - } - return {b}; - } - return res; - } -}; - -struct InstallableExpr final : InstallableValue { - std::string text; - - InstallableExpr(SourceExprCommand& cmd, std::string text) - : InstallableValue(cmd), text(std::move(text)) {} - - std::string what() override { return text; } - - Value* toValue(EvalState& state) override { - auto v = state.allocValue(); - state.eval(state.parseExprFromString(text, absPath(".")), *v); - return v; - } -}; - -struct InstallableAttrPath final : InstallableValue { - std::string attrPath; - - InstallableAttrPath(SourceExprCommand& cmd, std::string attrPath) - : InstallableValue(cmd), attrPath(std::move(attrPath)) {} - - std::string what() override { return attrPath; } - - Value* toValue(EvalState& state) override { - auto source = cmd.getSourceExpr(state); - - std::unique_ptr autoArgs = cmd.getAutoArgs(state); - - Value* v = findAlongAttrPath(state, attrPath, autoArgs.get(), *source); - state.forceValue(*v); - - return v; - } -}; - -// FIXME: extend -std::string attrRegex = R"([A-Za-z_][A-Za-z0-9-_+]*)"; -static std::regex attrPathRegex(fmt(R"(%1%(\.%1%)*)", attrRegex)); - -static std::vector> parseInstallables( - SourceExprCommand& cmd, const ref& store, - std::vector ss, bool useDefaultInstallables) { - std::vector> result; - - if (ss.empty() && useDefaultInstallables) { - if (cmd.file.empty()) { - cmd.file = "."; - } - ss = {""}; - } - - for (auto& s : ss) { - if (s.compare(0, 1, "(") == 0) { - result.push_back(std::make_shared(cmd, s)); - - } else if (s.find('/') != std::string::npos) { - auto path = store->toStorePath(store->followLinksToStore(s)); - - if (store->isStorePath(path)) { - result.push_back(std::make_shared(path)); - } - } - - else if (s.empty() || std::regex_match(s, attrPathRegex)) { - result.push_back(std::make_shared(cmd, s)); - - } else { - throw UsageError("don't know what to do with argument '%s'", s); - } - } - - return result; -} - -std::shared_ptr parseInstallable(SourceExprCommand& cmd, - const ref& store, - const std::string& installable, - bool useDefaultInstallables) { - auto installables = parseInstallables(cmd, store, {installable}, false); - assert(installables.size() == 1); - return installables.front(); -} - -Buildables build( - const ref& store, RealiseMode mode, - const std::vector>& installables) { - if (mode != Build) { - settings.readOnlyMode = true; - } - - Buildables buildables; - - PathSet pathsToBuild; - - for (auto& i : installables) { - for (auto& b : i->toBuildables()) { - if (!b.drvPath.empty()) { - StringSet outputNames; - for (auto& output : b.outputs) { - outputNames.insert(output.first); - } - pathsToBuild.insert(b.drvPath + "!" + - concatStringsSep(",", outputNames)); - } else { - for (auto& output : b.outputs) { - pathsToBuild.insert(output.second); - } - } - buildables.push_back(std::move(b)); - } - } - - if (mode == DryRun) { - printMissing(store, pathsToBuild); - } else if (mode == Build) { - util::OkOrThrow(store->buildPaths(std::cerr, pathsToBuild)); - } - - return buildables; -} - -PathSet toStorePaths( - const ref& store, RealiseMode mode, - const std::vector>& installables) { - PathSet outPaths; - - for (auto& b : build(store, mode, installables)) { - for (auto& output : b.outputs) { - outPaths.insert(output.second); - } - } - - return outPaths; -} - -Path toStorePath(const ref& store, RealiseMode mode, - const std::shared_ptr& installable) { - auto paths = toStorePaths(store, mode, {installable}); - - if (paths.size() != 1) { - throw Error("argument '%s' should evaluate to one store path", - installable->what()); - } - - return *paths.begin(); -} - -PathSet toDerivations( - const ref& store, - const std::vector>& installables, - bool useDeriver) { - PathSet drvPaths; - - for (auto& i : installables) { - for (auto& b : i->toBuildables()) { - if (b.drvPath.empty()) { - if (!useDeriver) { - throw Error("argument '%s' did not evaluate to a derivation", - i->what()); - } - for (auto& output : b.outputs) { - auto derivers = store->queryValidDerivers(output.second); - if (derivers.empty()) { - throw Error("'%s' does not have a known deriver", i->what()); - } - // FIXME: use all derivers? - drvPaths.insert(*derivers.begin()); - } - } else { - drvPaths.insert(b.drvPath); - } - } - } - - return drvPaths; -} - -void InstallablesCommand::prepare() { - installables = parseInstallables(*this, getStore(), _installables, - useDefaultInstallables()); -} - -void InstallableCommand::prepare() { - installable = parseInstallable(*this, getStore(), _installable, false); -} - -} // namespace nix diff --git a/third_party/nix/src/nix/legacy.cc b/third_party/nix/src/nix/legacy.cc deleted file mode 100644 index a0f9fc65b3..0000000000 --- a/third_party/nix/src/nix/legacy.cc +++ /dev/null @@ -1,7 +0,0 @@ -#include "nix/legacy.hh" - -namespace nix { - -RegisterLegacyCommand::Commands* RegisterLegacyCommand::commands = nullptr; - -} diff --git a/third_party/nix/src/nix/legacy.hh b/third_party/nix/src/nix/legacy.hh deleted file mode 100644 index a0fc88da24..0000000000 --- a/third_party/nix/src/nix/legacy.hh +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace nix { - -typedef std::function MainFunction; - -struct RegisterLegacyCommand { - using Commands = std::map; - static Commands* commands; - - RegisterLegacyCommand(const std::string& name, MainFunction fun) { - if (!commands) { - commands = new Commands; - } - (*commands)[name] = fun; - } -}; - -} // namespace nix diff --git a/third_party/nix/src/nix/log.cc b/third_party/nix/src/nix/log.cc deleted file mode 100644 index 84207d8576..0000000000 --- a/third_party/nix/src/nix/log.cc +++ /dev/null @@ -1,63 +0,0 @@ -#include - -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdLog final : InstallableCommand { - CmdLog() = default; - - std::string name() override { return "log"; } - - std::string description() override { - return "show the build log of the specified packages or paths, if " - "available"; - } - - Examples examples() override { - return { - Example{"To get the build log of GNU Hello:", "nix log nixpkgs.hello"}, - Example{ - "To get the build log of a specific path:", - "nix log " - "/nix/store/lmngj4wcm9rkv3w4dfhzhcyij3195hiq-thunderbird-52.2.1"}, - Example{"To get a build log from a specific binary cache:", - "nix log --store https://cache.nixos.org nixpkgs.hello"}, - }; - } - - void run(ref store) override { - settings.readOnlyMode = true; - - auto subs = getDefaultSubstituters(); - - subs.push_front(store); - - auto b = installable->toBuildable(); - - RunPager pager; - for (auto& sub : subs) { - auto log = !b.drvPath.empty() ? sub->getBuildLog(b.drvPath) : nullptr; - for (auto& output : b.outputs) { - if (log) { - break; - } - log = sub->getBuildLog(output.second); - } - if (!log) { - continue; - } - LOG(INFO) << "got build log for '" << installable->what() << "' from '" - << sub->getUri() << "'"; - std::cout << *log; - return; - } - - throw Error("build log of '%s' is not available", installable->what()); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/ls.cc b/third_party/nix/src/nix/ls.cc deleted file mode 100644 index 1da722babb..0000000000 --- a/third_party/nix/src/nix/ls.cc +++ /dev/null @@ -1,137 +0,0 @@ -#include "libmain/common-args.hh" -#include "libstore/fs-accessor.hh" -#include "libstore/nar-accessor.hh" -#include "libstore/store-api.hh" -#include "libutil/json.hh" -#include "nix/command.hh" - -namespace nix { -struct MixLs : virtual Args, MixJSON { - std::string path; - - bool recursive = false; - bool verbose = false; - bool showDirectory = false; - - MixLs() { - mkFlag('R', "recursive", "list subdirectories recursively", &recursive); - mkFlag('l', "long", "show more file information", &verbose); - mkFlag('d', "directory", "show directories rather than their contents", - &showDirectory); - } - - void listText(ref accessor) { - std::function - doPath; - - auto showFile = [&](const Path& curPath, const std::string& relPath) { - if (verbose) { - auto st = accessor->stat(curPath); - std::string tp = st.type == FSAccessor::Type::tRegular - ? (st.isExecutable ? "-r-xr-xr-x" : "-r--r--r--") - : st.type == FSAccessor::Type::tSymlink ? "lrwxrwxrwx" - : "dr-xr-xr-x"; - std::cout << (format("%s %20d %s") % tp % st.fileSize % relPath); - if (st.type == FSAccessor::Type::tSymlink) { - std::cout << " -> " << accessor->readLink(curPath); - } - std::cout << "\n"; - if (recursive && st.type == FSAccessor::Type::tDirectory) { - doPath(st, curPath, relPath, false); - } - } else { - std::cout << relPath << "\n"; - if (recursive) { - auto st = accessor->stat(curPath); - if (st.type == FSAccessor::Type::tDirectory) { - doPath(st, curPath, relPath, false); - } - } - } - }; - - doPath = [&](const FSAccessor::Stat& st, const Path& curPath, - const std::string& relPath, bool showDirectory) { - if (st.type == FSAccessor::Type::tDirectory && !showDirectory) { - auto names = accessor->readDirectory(curPath); - for (auto& name : names) { - showFile(curPath + "/" + name, relPath + "/" + name); - } - } else { - showFile(curPath, relPath); - } - }; - - auto st = accessor->stat(path); - if (st.type == FSAccessor::Type::tMissing) { - throw Error(format("path '%1%' does not exist") % path); - } - doPath(st, path, - st.type == FSAccessor::Type::tDirectory ? "." : baseNameOf(path), - showDirectory); - } - - void list(const ref& accessor) { - if (path == "/") { - path = ""; - } - - if (json) { - JSONPlaceholder jsonRoot(std::cout); - listNar(jsonRoot, accessor, path, recursive); - } else { - listText(accessor); - } - } -}; - -struct CmdLsStore final : StoreCommand, MixLs { - CmdLsStore() { expectArg("path", &path); } - - Examples examples() override { - return { - Example{"To list the contents of a store path in a binary cache:", - "nix ls-store --store https://cache.nixos.org/ -lR " - "/nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10"}, - }; - } - - std::string name() override { return "ls-store"; } - - std::string description() override { - return "show information about a store path"; - } - - void run(ref store) override { list(store->getFSAccessor()); } -}; - -struct CmdLsNar final : Command, MixLs { - Path narPath; - - CmdLsNar() { - expectArg("nar", &narPath); - expectArg("path", &path); - } - - Examples examples() override { - return { - Example{"To list a specific file in a NAR:", - "nix ls-nar -l hello.nar /bin/hello"}, - }; - } - - std::string name() override { return "ls-nar"; } - - std::string description() override { - return "show information about the contents of a NAR file"; - } - - void run() override { - list(makeNarAccessor(make_ref(readFile(narPath, true)))); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); -static nix::RegisterCommand r2(nix::make_ref()); diff --git a/third_party/nix/src/nix/main.cc b/third_party/nix/src/nix/main.cc deleted file mode 100644 index 08390fd24b..0000000000 --- a/third_party/nix/src/nix/main.cc +++ /dev/null @@ -1,185 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - -#include "libexpr/eval.hh" -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/download.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/finally.hh" -#include "nix/command.hh" -#include "nix/legacy.hh" - -extern std::string chrootHelperName; - -void chrootHelper(int argc, char** argv); - -namespace nix { - -/* Check if we have a non-loopback/link-local network interface. */ -static bool haveInternet() { - struct ifaddrs* addrs; - - if (getifaddrs(&addrs) != 0) { - return true; - } - - Finally free([&]() { freeifaddrs(addrs); }); - - for (auto i = addrs; i != nullptr; i = i->ifa_next) { - if (i->ifa_addr == nullptr) { - continue; - } - if (i->ifa_addr->sa_family == AF_INET) { - if (ntohl( - (reinterpret_cast(i->ifa_addr))->sin_addr.s_addr) != - INADDR_LOOPBACK) { - return true; - } - } else if (i->ifa_addr->sa_family == AF_INET6) { - if (!IN6_IS_ADDR_LOOPBACK(&((sockaddr_in6*)i->ifa_addr)->sin6_addr) && - !IN6_IS_ADDR_LINKLOCAL(&((sockaddr_in6*)i->ifa_addr)->sin6_addr)) { - return true; - } - } - } - - return false; -} - -std::string programPath; - -struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { - bool printBuildLogs = false; - bool useNet = true; - - NixArgs() : MultiCommand(*RegisterCommand::commands), MixCommonArgs("nix") { - mkFlag() - .longName("help") - .description("show usage information") - .handler([&]() { showHelpAndExit(); }); - - mkFlag() - .longName("help-config") - .description("show configuration options") - .handler([&]() { - std::cout << "The following configuration options are available:\n\n"; - Table2 tbl; - std::map settings; - globalConfig.getSettings(settings); - for (const auto& s : settings) { - tbl.emplace_back(s.first, s.second.description); - } - printTable(std::cout, tbl); - throw Exit(); - }); - - mkFlag() - .longName("print-build-logs") - .shortName('L') - .description("print full build logs on stderr") - .set(&printBuildLogs, true); - - mkFlag() - .longName("version") - .description("show version information") - .handler([&]() { printVersion(programName); }); - - mkFlag() - .longName("no-net") - .description( - "disable substituters and consider all previously downloaded files " - "up-to-date") - .handler([&]() { useNet = false; }); - } - - void printFlags(std::ostream& out) override { - Args::printFlags(out); - std::cout << "\n" - "In addition, most configuration settings can be overriden " - "using '-- '.\n" - "Boolean settings can be overriden using '--' or " - "'--no-'. See 'nix\n" - "--help-config' for a list of configuration settings.\n"; - } - - void showHelpAndExit() { - printHelp(programName, std::cout); - std::cout - << "\nNote: this program is EXPERIMENTAL and subject to change.\n"; - throw Exit(); - } -}; - -void mainWrapped(int argc, char** argv) { - /* The chroot helper needs to be run before any threads have been - started. */ - if (argc > 0 && argv[0] == chrootHelperName) { - chrootHelper(argc, argv); - return; - } - - initNix(); - - programPath = argv[0]; - std::string programName = baseNameOf(programPath); - - { - auto legacy = (*RegisterLegacyCommand::commands)[programName]; - if (legacy) { - return legacy(argc, argv); - } - } - - settings.verboseBuild = false; - - NixArgs args; - - args.parseCmdline(argvToStrings(argc, argv)); - - if (!args.command) { - args.showHelpAndExit(); - } - - if (args.useNet && !haveInternet()) { - LOG(WARNING) << "you don't have Internet access; " - << "disabling some network-dependent features"; - args.useNet = false; - } - - if (!args.useNet) { - // FIXME: should check for command line overrides only. - if (!settings.useSubstitutes.overriden) { - settings.useSubstitutes = false; - } - if (!settings.tarballTtl.overriden) { - settings.tarballTtl = std::numeric_limits::max(); - } - if (!downloadSettings.tries.overriden) { - downloadSettings.tries = 0; - } - if (!downloadSettings.connectTimeout.overriden) { - downloadSettings.connectTimeout = 1; - } - } - - args.command->prepare(); - args.command->run(); -} - -} // namespace nix - -int main(int argc, char* argv[]) { - FLAGS_logtostderr = true; - google::InitGoogleLogging(argv[0]); - - return nix::handleExceptions(argv[0], - [&]() { nix::mainWrapped(argc, argv); }); -} diff --git a/third_party/nix/src/nix/optimise-store.cc b/third_party/nix/src/nix/optimise-store.cc deleted file mode 100644 index ceb53aa77b..0000000000 --- a/third_party/nix/src/nix/optimise-store.cc +++ /dev/null @@ -1,27 +0,0 @@ -#include - -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdOptimiseStore final : StoreCommand { - CmdOptimiseStore() = default; - - std::string name() override { return "optimise-store"; } - - std::string description() override { - return "replace identical files in the store by hard links"; - } - - Examples examples() override { - return { - Example{"To optimise the Nix store:", "nix optimise-store"}, - }; - } - - void run(ref store) override { store->optimiseStore(); } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/path-info.cc b/third_party/nix/src/nix/path-info.cc deleted file mode 100644 index fcf060d50d..0000000000 --- a/third_party/nix/src/nix/path-info.cc +++ /dev/null @@ -1,133 +0,0 @@ -#include -#include - -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "libutil/json.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdPathInfo final : StorePathsCommand, MixJSON { - bool showSize = false; - bool showClosureSize = false; - bool humanReadable = false; - bool showSigs = false; - - CmdPathInfo() { - mkFlag('s', "size", "print size of the NAR dump of each path", &showSize); - mkFlag('S', "closure-size", - "print sum size of the NAR dumps of the closure of each path", - &showClosureSize); - mkFlag('h', "human-readable", - "with -s and -S, print sizes like 1K 234M 5.67G etc.", - &humanReadable); - mkFlag(0, "sigs", "show signatures", &showSigs); - } - - std::string name() override { return "path-info"; } - - std::string description() override { - return "query information about store paths"; - } - - Examples examples() override { - return { - Example{"To show the closure sizes of every path in the current NixOS " - "system closure, sorted by size:", - "nix path-info -rS /run/current-system | sort -nk2"}, - Example{"To show a package's closure size and all its dependencies " - "with human readable sizes:", - "nix path-info -rsSh nixpkgs.rust"}, - Example{"To check the existence of a path in a binary cache:", - "nix path-info -r /nix/store/7qvk5c91...-geeqie-1.1 --store " - "https://cache.nixos.org/"}, - Example{"To print the 10 most recently added paths (using --json and " - "the jq(1) command):", - "nix path-info --json --all | jq -r " - "'sort_by(.registrationTime)[-11:-1][].path'"}, - Example{"To show the size of the entire Nix store:", - "nix path-info --json --all | jq 'map(.narSize) | add'"}, - Example{"To show every path whose closure is bigger than 1 GB, sorted " - "by closure size:", - "nix path-info --json --all -S | jq 'map(select(.closureSize > " - "1e9)) | sort_by(.closureSize) | map([.path, .closureSize])'"}, - }; - } - - void printSize(unsigned long long value) { - if (!humanReadable) { - std::cout << fmt("\t%11d", value); - return; - } - - static const std::array idents{ - {' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'}}; - size_t power = 0; - double res = value; - while (res > 1024 && power < idents.size()) { - ++power; - res /= 1024; - } - std::cout << fmt("\t%6.1f%c", res, idents.at(power)); - } - - void run(ref store, Paths storePaths) override { - size_t pathLen = 0; - for (auto& storePath : storePaths) { - pathLen = std::max(pathLen, storePath.size()); - } - - if (json) { - JSONPlaceholder jsonRoot(std::cout); - store->pathInfoToJSON(jsonRoot, - // FIXME: preserve order? - PathSet(storePaths.begin(), storePaths.end()), true, - showClosureSize, AllowInvalid); - } - - else { - for (auto storePath : storePaths) { - auto info = store->queryPathInfo(storePath); - storePath = info->path; // FIXME: screws up padding - - std::cout << storePath; - - if (showSize || showClosureSize || showSigs) { - std::cout << std::string( - std::max(0, static_cast(pathLen) - - static_cast(storePath.size())), - ' '); - } - - if (showSize) { - printSize(info->narSize); - } - - if (showClosureSize) { - printSize(store->getClosureSize(storePath).first); - } - - if (showSigs) { - std::cout << '\t'; - Strings ss; - if (info->ultimate) { - ss.push_back("ultimate"); - } - if (!info->ca.empty()) { - ss.push_back("ca:" + info->ca); - } - for (auto& sig : info->sigs) { - ss.push_back(sig); - } - std::cout << concatStringsSep(" ", ss); - } - - std::cout << std::endl; - } - } - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/ping-store.cc b/third_party/nix/src/nix/ping-store.cc deleted file mode 100644 index 4a33486bf8..0000000000 --- a/third_party/nix/src/nix/ping-store.cc +++ /dev/null @@ -1,25 +0,0 @@ -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdPingStore final : StoreCommand { - std::string name() override { return "ping-store"; } - - std::string description() override { - return "test whether a store can be opened"; - } - - Examples examples() override { - return { - Example{ - "To test whether connecting to a remote Nix store via SSH works:", - "nix ping-store --store ssh://mac1"}, - }; - } - - void run(ref store) override { store->connect(); } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/repl.cc b/third_party/nix/src/nix/repl.cc deleted file mode 100644 index b926d195ae..0000000000 --- a/third_party/nix/src/nix/repl.cc +++ /dev/null @@ -1,819 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "libexpr/common-eval-args.hh" -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libexpr/get-drvs.hh" -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/globals.hh" -#include "libstore/store-api.hh" -#include "libutil/affinity.hh" -#include "libutil/finally.hh" -#include "nix/command.hh" - -namespace nix { - -#define ESC_RED "\033[31m" -#define ESC_GRE "\033[32m" -#define ESC_YEL "\033[33m" -#define ESC_BLU "\033[34;1m" -#define ESC_MAG "\033[35m" -#define ESC_CYA "\033[36m" -#define ESC_END "\033[0m" - -struct NixRepl { - std::string curDir; - EvalState state; - std::unique_ptr autoArgs; - - Strings loadedFiles; - - const static int envSize = 32768; - StaticEnv staticEnv; - Env* env; - int displ; - StringSet varNames; - - const Path historyFile; - - NixRepl(const Strings& searchPath, const nix::ref& store); - ~NixRepl(); - void mainLoop(const std::vector& files); - StringSet completePrefix(const std::string& prefix); - static bool getLine(std::string& input, const std::string& prompt); - Path getDerivationPath(Value& v); - bool processLine(std::string line); - void loadFile(const Path& path); - void initEnv(); - void reloadFiles(); - void addAttrsToScope(Value& attrs); - void addVarToScope(const Symbol& name, Value& v); - Expr* parseString(const std::string& s); - void evalString(std::string s, Value& v); - - using ValuesSeen = std::set; - std::ostream& printValue(std::ostream& str, Value& v, unsigned int maxDepth); - std::ostream& printValue(std::ostream& str, Value& v, unsigned int maxDepth, - ValuesSeen& seen); -}; - -void printHelp() { - std::cout << "Usage: nix-repl [--help] [--version] [-I path] paths...\n" - << "\n" - << "nix-repl is a simple read-eval-print loop (REPL) for the Nix " - "package manager.\n" - << "\n" - << "Options:\n" - << " --help\n" - << " Prints out a summary of the command syntax and exits.\n" - << "\n" - << " --version\n" - << " Prints out the Nix version number on standard output " - "and exits.\n" - << "\n" - << " -I path\n" - << " Add a path to the Nix expression search path. This " - "option may be given\n" - << " multiple times. See the NIX_PATH environment variable " - "for information on\n" - << " the semantics of the Nix search path. Paths added " - "through -I take\n" - << " precedence over NIX_PATH.\n" - << "\n" - << " paths...\n" - << " A list of paths to files containing Nix expressions " - "which nix-repl will\n" - << " load and add to its scope.\n" - << "\n" - << " A path surrounded in < and > will be looked up in the " - "Nix expression search\n" - << " path, as in the Nix language itself.\n" - << "\n" - << " If an element of paths starts with http:// or " - "https://, it is interpreted\n" - << " as the URL of a tarball that will be downloaded and " - "unpacked to a temporary\n" - << " location. The tarball must include a single top-level " - "directory containing\n" - << " at least a file named default.nix.\n"; -} - -std::string removeWhitespace(std::string s) { - s = absl::StripTrailingAsciiWhitespace(s); - size_t n = s.find_first_not_of(" \n\r\t"); - if (n != std::string::npos) { - s = std::string(s, n); - } - return s; -} - -NixRepl::NixRepl(const Strings& searchPath, const nix::ref& store) - : state(searchPath, store), - staticEnv(false, &state.staticBaseEnv), - historyFile(getDataDir() + "/nix/repl-history") { - curDir = absPath("."); -} - -NixRepl::~NixRepl() { write_history(historyFile.c_str()); } - -static NixRepl* curRepl; // ugly - -static char* completionCallback(char* s, int* match) { - auto possible = curRepl->completePrefix(s); - if (possible.size() == 1) { - *match = 1; - auto* res = strdup(possible.begin()->c_str() + strlen(s)); - if (res == nullptr) { - throw Error("allocation failure"); - } - return res; - } - if (possible.size() > 1) { - auto checkAllHaveSameAt = [&](size_t pos) { - auto& first = *possible.begin(); - for (auto& p : possible) { - if (p.size() <= pos || p[pos] != first[pos]) { - return false; - } - } - return true; - }; - size_t start = strlen(s); - size_t len = 0; - while (checkAllHaveSameAt(start + len)) { - ++len; - } - if (len > 0) { - *match = 1; - auto* res = strdup(std::string(*possible.begin(), start, len).c_str()); - if (res == nullptr) { - throw Error("allocation failure"); - } - return res; - } - } - - *match = 0; - return nullptr; -} - -static int listPossibleCallback(char* s, char*** avp) { - auto possible = curRepl->completePrefix(s); - - if (possible.size() > (INT_MAX / sizeof(char*))) { - throw Error("too many completions"); - } - - int ac = 0; - char** vp = nullptr; - - auto check = [&](auto* p) { - if (!p) { - if (vp) { - while (--ac >= 0) { - free(vp[ac]); - } - free(vp); - } - throw Error("allocation failure"); - } - return p; - }; - - vp = check(static_cast(malloc(possible.size() * sizeof(char*)))); - - for (auto& p : possible) { - vp[ac++] = check(strdup(p.c_str())); - } - - *avp = vp; - - return ac; -} - -namespace { -// Used to communicate to NixRepl::getLine whether a signal occurred in -// ::readline. -volatile sig_atomic_t g_signal_received = 0; - -void sigintHandler(int signo) { g_signal_received = signo; } -} // namespace - -void NixRepl::mainLoop(const std::vector& files) { - std::string error = ANSI_RED "error:" ANSI_NORMAL " "; - std::cout << "Welcome to Nix version " << nixVersion << ". Type :? for help." - << std::endl - << std::endl; - - for (auto& i : files) { - loadedFiles.push_back(i); - } - - reloadFiles(); - if (!loadedFiles.empty()) { - std::cout << std::endl; - } - - // Allow nix-repl specific settings in .inputrc - rl_readline_name = "nix-repl"; - createDirs(dirOf(historyFile)); - el_hist_size = 1000; - read_history(historyFile.c_str()); - curRepl = this; - rl_set_complete_func(completionCallback); - rl_set_list_possib_func(listPossibleCallback); - - std::string input; - - while (true) { - // When continuing input from previous lines, don't print a prompt, just - // align to the same number of chars as the prompt. - if (!getLine(input, input.empty() ? "nix-repl> " : " ")) { - break; - } - - try { - if (!removeWhitespace(input).empty() && !processLine(input)) { - return; - } - } catch (ParseError& e) { - if (e.msg().find("unexpected $end") != std::string::npos) { - // For parse errors on incomplete input, we continue waiting for the - // next line of input without clearing the input so far. - continue; - } - LOG(ERROR) << error << (settings.showTrace ? e.prefix() : "") << e.msg(); - - } catch (Error& e) { - LOG(ERROR) << error << (settings.showTrace ? e.prefix() : "") << e.msg(); - } catch (Interrupted& e) { - LOG(ERROR) << error << (settings.showTrace ? e.prefix() : "") << e.msg(); - } - - // We handled the current input fully, so we should clear it - // and read brand new input. - input.clear(); - std::cout << std::endl; - } -} - -bool NixRepl::getLine(std::string& input, const std::string& prompt) { - struct sigaction act; - struct sigaction old; - sigset_t savedSignalMask; - sigset_t set; - - auto setupSignals = [&]() { - act.sa_handler = sigintHandler; - sigfillset(&act.sa_mask); - act.sa_flags = 0; - if (sigaction(SIGINT, &act, &old) != 0) { - throw SysError("installing handler for SIGINT"); - } - - sigemptyset(&set); - sigaddset(&set, SIGINT); - if (sigprocmask(SIG_UNBLOCK, &set, &savedSignalMask) != 0) { - throw SysError("unblocking SIGINT"); - } - }; - auto restoreSignals = [&]() { - if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr) != 0) { - throw SysError("restoring signals"); - } - - if (sigaction(SIGINT, &old, nullptr) != 0) { - throw SysError("restoring handler for SIGINT"); - } - }; - - setupSignals(); - char* s = readline(prompt.c_str()); - Finally doFree([&]() { free(s); }); - restoreSignals(); - - if (g_signal_received != 0) { - g_signal_received = 0; - input.clear(); - return true; - } - - if (s == nullptr) { - return false; - } - input += s; - input += '\n'; - return true; -} - -StringSet NixRepl::completePrefix(const std::string& prefix) { - StringSet completions; - - size_t start = prefix.find_last_of(" \n\r\t(){}[]"); - std::string prev; - std::string cur; - if (start == std::string::npos) { - prev = ""; - cur = prefix; - } else { - prev = std::string(prefix, 0, start + 1); - cur = std::string(prefix, start + 1); - } - - size_t slash; - size_t dot; - - if ((slash = cur.rfind('/')) != std::string::npos) { - try { - auto dir = std::string(cur, 0, slash); - auto prefix2 = std::string(cur, slash + 1); - for (auto& entry : readDirectory(dir.empty() ? "/" : dir)) { - if (entry.name[0] != '.' && absl::StartsWith(entry.name, prefix2)) { - completions.insert(prev + dir + "/" + entry.name); - } - } - } catch (Error&) { - } - } else if ((dot = cur.rfind('.')) == std::string::npos) { - /* This is a variable name; look it up in the current scope. */ - auto i = varNames.lower_bound(cur); - while (i != varNames.end()) { - if (std::string(*i, 0, cur.size()) != cur) { - break; - } - completions.insert(prev + *i); - i++; - } - } else { - try { - /* This is an expression that should evaluate to an - attribute set. Evaluate it to get the names of the - attributes. */ - std::string expr(cur, 0, dot); - std::string cur2 = std::string(cur, dot + 1); - - Expr* e = parseString(expr); - Value v; - e->eval(state, *env, v); - state.forceAttrs(v); - - for (auto& i : *v.attrs) { - std::string name = i.second.name; - if (std::string(name, 0, cur2.size()) != cur2) { - continue; - } - completions.insert(prev + expr + "." + name); - } - - } catch (ParseError& e) { - // Quietly ignore parse errors. - } catch (EvalError& e) { - // Quietly ignore evaluation errors. - } catch (UndefinedVarError& e) { - // Quietly ignore undefined variable errors. - } - } - - return completions; -} - -static int runProgram(const std::string& program, const Strings& args) { - Strings args2(args); - args2.push_front(program); - - Pid pid; - pid = fork(); - if (pid == Pid(-1)) { - throw SysError("forking"); - } - if (pid == Pid(0)) { - restoreAffinity(); - execvp(program.c_str(), stringsToCharPtrs(args2).data()); - _exit(1); - } - - return pid.wait(); -} - -bool isVarName(const std::string& s) { - if (s.empty()) { - return false; - } - char c = s[0]; - if ((c >= '0' && c <= '9') || c == '-' || c == '\'') { - return false; - } - for (auto& i : s) { - if (!((i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') || - (i >= '0' && i <= '9') || i == '_' || i == '-' || i == '\'')) { - return false; - } - } - return true; -} - -Path NixRepl::getDerivationPath(Value& v) { - auto drvInfo = getDerivation(state, v, false); - if (!drvInfo) { - throw Error( - "expression does not evaluate to a derivation, so I can't build it"); - } - Path drvPath = drvInfo->queryDrvPath(); - if (drvPath.empty() || !state.store->isValidPath(drvPath)) { - throw Error("expression did not evaluate to a valid derivation"); - } - return drvPath; -} - -bool NixRepl::processLine(std::string line) { - if (line.empty()) { - return true; - } - - std::string command; - std::string arg; - - if (line[0] == ':') { - size_t p = line.find_first_of(" \n\r\t"); - command = std::string(line, 0, p); - if (p != std::string::npos) { - arg = removeWhitespace(std::string(line, p)); - } - } else { - arg = line; - } - - if (command == ":?" || command == ":help") { - std::cout << "The following commands are available:\n" - << "\n" - << " Evaluate and print expression\n" - << " = Bind expression to variable\n" - << " :a Add attributes from resulting set to scope\n" - << " :b Build derivation\n" - << " :i Build derivation, then install result into " - "current profile\n" - << " :l Load Nix expression and add it to scope\n" - << " :p Evaluate and print expression recursively\n" - << " :q Exit nix-repl\n" - << " :r Reload all files\n" - << " :s Build dependencies of derivation, then start " - "nix-shell\n" - << " :t Describe result of evaluation\n" - << " :u Build derivation, then start nix-shell\n"; - } - - else if (command == ":a" || command == ":add") { - Value v; - evalString(arg, v); - addAttrsToScope(v); - } - - else if (command == ":l" || command == ":load") { - state.resetFileCache(); - loadFile(arg); - } - - else if (command == ":r" || command == ":reload") { - state.resetFileCache(); - reloadFiles(); - } - - else if (command == ":t") { - Value v; - evalString(arg, v); - std::cout << showType(v) << std::endl; - - } else if (command == ":u") { - Value v; - Value f; - Value result; - evalString(arg, v); - evalString( - "drv: (import {}).runCommand \"shell\" { buildInputs = [ drv " - "]; } \"\"", - f); - state.callFunction(f, v, result, Pos()); - - Path drvPath = getDerivationPath(result); - runProgram(settings.nixBinDir + "/nix-shell", Strings{drvPath}); - } - - else if (command == ":b" || command == ":i" || command == ":s") { - Value v; - evalString(arg, v); - Path drvPath = getDerivationPath(v); - - if (command == ":b") { - /* We could do the build in this process using buildPaths(), - but doing it in a child makes it easier to recover from - problems / SIGINT. */ - if (runProgram(settings.nixBinDir + "/nix", - Strings{"build", "--no-link", drvPath}) == 0) { - Derivation drv = readDerivation(drvPath); - std::cout << std::endl - << "this derivation produced the following outputs:" - << std::endl; - for (auto& i : drv.outputs) { - std::cout << format(" %1% -> %2%") % i.first % i.second.path - << std::endl; - } - } - } else if (command == ":i") { - runProgram(settings.nixBinDir + "/nix-env", Strings{"-i", drvPath}); - } else { - runProgram(settings.nixBinDir + "/nix-shell", Strings{drvPath}); - } - } - - else if (command == ":p" || command == ":print") { - Value v; - evalString(arg, v); - printValue(std::cout, v, 1000000000) << std::endl; - } - - else if (command == ":q" || command == ":quit") { - return false; - - } else if (!command.empty()) { - throw Error(format("unknown command '%1%'") % command); - - } else { - size_t p = line.find('='); - std::string name; - if (p != std::string::npos && p < line.size() && line[p + 1] != '=' && - isVarName(name = removeWhitespace(std::string(line, 0, p)))) { - Expr* e = parseString(std::string(line, p + 1)); - Value& v(*state.allocValue()); - v.type = tThunk; - v.thunk.env = env; - v.thunk.expr = e; - addVarToScope(state.symbols.Create(name), v); - } else { - Value v; - evalString(line, v); - printValue(std::cout, v, 1) << std::endl; - } - } - - return true; -} - -void NixRepl::loadFile(const Path& path) { - loadedFiles.remove(path); - loadedFiles.push_back(path); - Value v; - Value v2; - state.evalFile(lookupFileArg(state, path), v); - state.autoCallFunction(autoArgs.get(), v, v2); - addAttrsToScope(v2); -} - -void NixRepl::initEnv() { - env = &state.allocEnv(envSize); - env->up = &state.baseEnv; - displ = 0; - staticEnv.vars.clear(); - - varNames.clear(); - for (auto& i : state.staticBaseEnv.vars) { - varNames.insert(i.first); - } -} - -void NixRepl::reloadFiles() { - initEnv(); - - Strings old = loadedFiles; - loadedFiles.clear(); - - bool first = true; - for (auto& i : old) { - if (!first) { - std::cout << std::endl; - } - first = false; - std::cout << format("Loading '%1%'...") % i << std::endl; - loadFile(i); - } -} - -void NixRepl::addAttrsToScope(Value& attrs) { - state.forceAttrs(attrs); - for (auto& i : *attrs.attrs) { - addVarToScope(i.second.name, *i.second.value); - } - std::cout << format("Added %1% variables.") % attrs.attrs->size() - << std::endl; -} - -void NixRepl::addVarToScope(const Symbol& name, Value& v) { - if (displ >= envSize) { - throw Error("environment full; cannot add more variables"); - } - staticEnv.vars[name] = displ; - env->values[displ++] = &v; - varNames.insert(std::string(name)); -} - -Expr* NixRepl::parseString(const std::string& s) { - Expr* e = state.parseExprFromString(s, curDir, staticEnv); - return e; -} - -void NixRepl::evalString(std::string s, Value& v) { - Expr* e = parseString(std::move(s)); - e->eval(state, *env, v); - state.forceValue(v); -} - -std::ostream& NixRepl::printValue(std::ostream& str, Value& v, - unsigned int maxDepth) { - ValuesSeen seen; - return printValue(str, v, maxDepth, seen); -} - -std::ostream& printStringValue(std::ostream& str, const char* string) { - str << "\""; - for (const char* i = string; *i != 0; i++) { - if (*i == '\"' || *i == '\\') { - str << "\\" << *i; - } else if (*i == '\n') { - str << "\\n"; - } else if (*i == '\r') { - str << "\\r"; - } else if (*i == '\t') { - str << "\\t"; - } else { - str << *i; - } - } - str << "\""; - return str; -} - -// FIXME: lot of cut&paste from Nix's eval.cc. -std::ostream& NixRepl::printValue(std::ostream& str, Value& v, - unsigned int maxDepth, ValuesSeen& seen) { - str.flush(); - checkInterrupt(); - - state.forceValue(v); - - switch (v.type) { - case tInt: - str << ESC_CYA << v.integer << ESC_END; - break; - - case tBool: - str << ESC_CYA << (v.boolean ? "true" : "false") << ESC_END; - break; - - case tString: - str << ESC_YEL; - printStringValue(str, v.string.s); - str << ESC_END; - break; - - case tPath: - str << ESC_GRE << v.path << ESC_END; // !!! escaping? - break; - - case tNull: - str << ESC_CYA "null" ESC_END; - break; - - case tAttrs: { - seen.insert(&v); - - bool isDrv = state.isDerivation(v); - - if (isDrv) { - str << "«derivation "; - Bindings::iterator i = v.attrs->find(state.sDrvPath); - PathSet context; - Path drvPath = - i != v.attrs->end() - ? state.coerceToPath(*i->second.pos, *i->second.value, context) - : "???"; - str << drvPath << "»"; - } - - else if (maxDepth > 0) { - str << "{ "; - - typedef std::map Sorted; - Sorted sorted; - for (auto& i : *v.attrs) { - sorted[i.second.name] = i.second.value; - } - - for (auto& i : sorted) { - if (isVarName(i.first)) { - str << i.first; - } else { - printStringValue(str, i.first.c_str()); - } - str << " = "; - if (seen.find(i.second) != seen.end()) { - str << "«repeated»"; - } else { - try { - printValue(str, *i.second, maxDepth - 1, seen); - } catch (AssertionError& e) { - str << ESC_RED "«error: " << e.msg() << "»" ESC_END; - } - } - str << "; "; - } - - str << "}"; - } else { - str << "{ ... }"; - } - - break; - } - - case tList: - seen.insert(&v); - - str << "[ "; - if (maxDepth > 0) { - for (unsigned int n = 0; n < v.listSize(); ++n) { - if (seen.find((*v.list)[n]) != seen.end()) { - str << "«repeated»"; - } else { - try { - printValue(str, *(*v.list)[n], maxDepth - 1, seen); - } catch (AssertionError& e) { - str << ESC_RED "«error: " << e.msg() << "»" ESC_END; - } - } - str << " "; - } - } else { - str << "... "; - } - - str << "]"; - break; - - case tLambda: { - std::ostringstream s; - s << v.lambda.fun->pos; - str << ESC_BLU "«lambda @ " << filterANSIEscapes(s.str()) << "»" ESC_END; - break; - } - - case tPrimOp: - str << ESC_MAG "«primop»" ESC_END; - break; - - case tPrimOpApp: - str << ESC_BLU "«primop-app»" ESC_END; - break; - - case tFloat: - str << v.fpoint; - break; - - default: - str << ESC_RED "«unknown»" ESC_END; - break; - } - - return str; -} - -struct CmdRepl final : StoreCommand, MixEvalArgs { - std::vector files; - - CmdRepl() { expectArgs("files", &files); } - - std::string name() override { return "repl"; } - - std::string description() override { - return "start an interactive environment for evaluating Nix expressions"; - } - - void run(ref store) override { - auto repl = std::make_unique(searchPath, openStore()); - repl->autoArgs = getAutoArgs(repl->state); - repl->mainLoop(files); - } -}; - -static RegisterCommand r1(make_ref()); - -} // namespace nix diff --git a/third_party/nix/src/nix/run.cc b/third_party/nix/src/nix/run.cc deleted file mode 100644 index b3b54f300b..0000000000 --- a/third_party/nix/src/nix/run.cc +++ /dev/null @@ -1,283 +0,0 @@ -#include - -#include -#include - -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/derivations.hh" -#include "libstore/fs-accessor.hh" -#include "libstore/local-store.hh" -#include "libstore/store-api.hh" -#include "libutil/affinity.hh" -#include "libutil/finally.hh" -#include "nix/command.hh" - -// note: exported in header file -std::string chrootHelperName = "__run_in_chroot"; - -namespace nix { -struct CmdRun final : InstallablesCommand { - std::vector command = {"bash"}; - StringSet keep, unset; - bool ignoreEnvironment = false; - - CmdRun() { - mkFlag() - .longName("command") - .shortName('c') - .description("command and arguments to be executed; defaults to 'bash'") - .labels({"command", "args"}) - .arity(ArityAny) - .handler([&](const std::vector& ss) { - if (ss.empty()) { - throw UsageError("--command requires at least one argument"); - } - command = ss; - }); - - mkFlag() - .longName("ignore-environment") - .shortName('i') - .description( - "clear the entire environment (except those specified with --keep)") - .set(&ignoreEnvironment, true); - - mkFlag() - .longName("keep") - .shortName('k') - .description("keep specified environment variable") - .arity(1) - .labels({"name"}) - .handler([&](std::vector ss) { keep.insert(ss.front()); }); - - mkFlag() - .longName("unset") - .shortName('u') - .description("unset specified environment variable") - .arity(1) - .labels({"name"}) - .handler( - [&](std::vector ss) { unset.insert(ss.front()); }); - } - - std::string name() override { return "run"; } - - std::string description() override { - return "run a shell in which the specified packages are available"; - } - - Examples examples() override { - return { - Example{"To start a shell providing GNU Hello from NixOS 17.03:", - "nix run -f channel:nixos-17.03 hello"}, - Example{"To start a shell providing youtube-dl from your 'nixpkgs' " - "channel:", - "nix run nixpkgs.youtube-dl"}, - Example{"To run GNU Hello:", - "nix run nixpkgs.hello -c hello --greeting 'Hi everybody!'"}, - Example{"To run GNU Hello in a chroot store:", - "nix run --store ~/my-nix nixpkgs.hello -c hello"}, - }; - } - - void run(ref store) override { - auto outPaths = toStorePaths(store, Build, installables); - - auto accessor = store->getFSAccessor(); - - if (ignoreEnvironment) { - if (!unset.empty()) { - throw UsageError( - "--unset does not make sense with --ignore-environment"); - } - - std::map kept; - for (auto& var : keep) { - auto s = getenv(var.c_str()); - if (s != nullptr) { - kept[var] = s; - } - } - - clearEnv(); - - for (auto& var : kept) { - setenv(var.first.c_str(), var.second.c_str(), 1); - } - - } else { - if (!keep.empty()) { - throw UsageError( - "--keep does not make sense without --ignore-environment"); - } - - for (auto& var : unset) { - unsetenv(var.c_str()); - } - } - - std::unordered_set done; - std::queue todo; - for (auto& path : outPaths) { - todo.push(path); - } - - Strings unixPath = absl::StrSplit(getEnv("PATH").value_or(""), - absl::ByChar(':'), absl::SkipEmpty()); - - while (!todo.empty()) { - Path path = todo.front(); - todo.pop(); - if (!done.insert(path).second) { - continue; - } - - { unixPath.push_front(path + "/bin"); } - - auto propPath = path + "/nix-support/propagated-user-env-packages"; - if (accessor->stat(propPath).type == FSAccessor::tRegular) { - for (auto p : - absl::StrSplit(readFile(propPath), absl::ByAnyChar(" \t\n\r"), - absl::SkipEmpty())) { - todo.push(std::string(p)); - } - } - } - - setenv("PATH", concatStringsSep(":", unixPath).c_str(), 1); - - std::string cmd = *command.begin(); - Strings args; - for (auto& arg : command) { - args.push_back(arg); - } - - restoreSignals(); - - restoreAffinity(); - - /* If this is a diverted store (i.e. its "logical" location - (typically /nix/store) differs from its "physical" location - (e.g. /home/eelco/nix/store), then run the command in a - chroot. For non-root users, this requires running it in new - mount and user namespaces. Unfortunately, - unshare(CLONE_NEWUSER) doesn't work in a multithreaded - program (which "nix" is), so we exec() a single-threaded - helper program (chrootHelper() below) to do the work. */ - auto store2 = store.dynamic_pointer_cast(); - - if (store2 && store->storeDir != store2->realStoreDir) { - Strings helperArgs = {chrootHelperName, store->storeDir, - store2->realStoreDir, cmd}; - for (auto& arg : args) { - helperArgs.push_back(arg); - } - - execv(readLink("/proc/self/exe").c_str(), - stringsToCharPtrs(helperArgs).data()); - - throw SysError("could not execute chroot helper"); - } - - execvp(cmd.c_str(), stringsToCharPtrs(args).data()); - - throw SysError("unable to exec '%s'", cmd); - } -}; - -static RegisterCommand r1(make_ref()); -} // namespace nix - -void chrootHelper(int argc, char** argv) { - int p = 1; - std::string storeDir = argv[p++]; - std::string realStoreDir = argv[p++]; - std::string cmd = argv[p++]; - nix::Strings args; - while (p < argc) { - args.push_back(argv[p++]); - } - -#if __linux__ - uid_t uid = getuid(); - uid_t gid = getgid(); - - if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == -1) { - /* Try with just CLONE_NEWNS in case user namespaces are - specifically disabled. */ - if (unshare(CLONE_NEWNS) == -1) { - throw nix::SysError("setting up a private mount namespace"); - } - } - - /* Bind-mount realStoreDir on /nix/store. If the latter mount - point doesn't already exists, we have to create a chroot - environment containing the mount point and bind mounts for the - children of /. Would be nice if we could use overlayfs here, - but that doesn't work in a user namespace yet (Ubuntu has a - patch for this: - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1478578). */ - if (!nix::pathExists(storeDir)) { - // FIXME: Use overlayfs? - - nix::Path tmpDir = nix::createTempDir(); - - nix::createDirs(tmpDir + storeDir); - - if (mount(realStoreDir.c_str(), (tmpDir + storeDir).c_str(), "", MS_BIND, - nullptr) == -1) { - throw nix::SysError("mounting '%s' on '%s'", realStoreDir, storeDir); - } - - for (const auto& entry : nix::readDirectory("/")) { - auto src = "/" + entry.name; - auto st = nix::lstat(src); - if (!S_ISDIR(st.st_mode)) { - continue; - } - nix::Path dst = tmpDir + "/" + entry.name; - if (nix::pathExists(dst)) { - continue; - } - if (mkdir(dst.c_str(), 0700) == -1) { - throw nix::SysError("creating directory '%s'", dst); - } - if (mount(src.c_str(), dst.c_str(), "", MS_BIND | MS_REC, nullptr) == - -1) { - throw nix::SysError("mounting '%s' on '%s'", src, dst); - } - } - - char* cwd = getcwd(nullptr, 0); - if (cwd == nullptr) { - throw nix::SysError("getting current directory"); - } - ::Finally freeCwd([&]() { free(cwd); }); - - if (chroot(tmpDir.c_str()) == -1) { - throw nix::SysError(nix::format("chrooting into '%s'") % tmpDir); - } - - if (chdir(cwd) == -1) { - throw nix::SysError(nix::format("chdir to '%s' in chroot") % cwd); - } - } else if (mount(realStoreDir.c_str(), storeDir.c_str(), "", MS_BIND, - nullptr) == -1) { - throw nix::SysError("mounting '%s' on '%s'", realStoreDir, storeDir); - } - - nix::writeFile("/proc/self/setgroups", "deny"); - nix::writeFile("/proc/self/uid_map", nix::fmt("%d %d %d", uid, uid, 1)); - nix::writeFile("/proc/self/gid_map", nix::fmt("%d %d %d", gid, gid, 1)); - - execvp(cmd.c_str(), nix::stringsToCharPtrs(args).data()); - - throw nix::SysError("unable to exec '%s'", cmd); - -#else - throw nix::Error( - "mounting the Nix store on '%s' is not supported on this platform", - storeDir); -#endif -} diff --git a/third_party/nix/src/nix/search.cc b/third_party/nix/src/nix/search.cc deleted file mode 100644 index 5a6bae6a11..0000000000 --- a/third_party/nix/src/nix/search.cc +++ /dev/null @@ -1,276 +0,0 @@ -#include -#include - -#include - -#include "libexpr/eval-inline.hh" -#include "libexpr/eval.hh" -#include "libexpr/get-drvs.hh" -#include "libexpr/json-to-value.hh" -#include "libexpr/names.hh" -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/globals.hh" -#include "libutil/json.hh" -#include "nix/command.hh" - -namespace { -std::string wrap(const std::string& prefix, const std::string& s) { - return prefix + s + ANSI_NORMAL; -} - -std::string hilite(const std::string& s, const std::smatch& m, - const std::string& postfix) { - return m.empty() ? s - : std::string(m.prefix()) + ANSI_RED + std::string(m.str()) + - postfix + std::string(m.suffix()); -} -} // namespace - -namespace nix { -struct CmdSearch final : SourceExprCommand, MixJSON { - std::vector res; - - bool writeCache = true; - bool useCache = true; - - CmdSearch() { - expectArgs("regex", &res); - - mkFlag() - .longName("update-cache") - .shortName('u') - .description("update the package search cache") - .handler([&]() { - writeCache = true; - useCache = false; - }); - - mkFlag() - .longName("no-cache") - .description("do not use or update the package search cache") - .handler([&]() { - writeCache = false; - useCache = false; - }); - } - - std::string name() override { return "search"; } - - std::string description() override { return "query available packages"; } - - Examples examples() override { - return {Example{"To show all available packages:", "nix search"}, - Example{"To show any packages containing 'blender' in its name or " - "description:", - "nix search blender"}, - Example{"To search for Firefox or Chromium:", - "nix search 'firefox|chromium'"}, - Example{"To search for git and frontend or gui:", - "nix search git 'frontend|gui'"}}; - } - - void run(ref store) override { - settings.readOnlyMode = true; - - // Empty search string should match all packages - // Use "^" here instead of ".*" due to differences in resulting highlighting - // (see #1893 -- libc++ claims empty search string is not in POSIX grammar) - if (res.empty()) { - res.emplace_back("^"); - } - - std::vector regexes; - regexes.reserve(res.size()); - - for (auto& re : res) { - regexes.emplace_back(re, std::regex::extended | std::regex::icase); - } - - auto state = getEvalState(); - - auto jsonOut = json ? std::make_unique(std::cout) : nullptr; - - auto sToplevel = state->symbols.Create("_toplevel"); - auto sRecurse = state->symbols.Create("recurseForDerivations"); - - bool fromCache = false; - - std::map results; - - std::function doExpr; - - doExpr = [&](Value* v, const std::string& attrPath, bool toplevel, - JSONObject* cache) { - DLOG(INFO) << "at attribute '" << attrPath << "'"; - - try { - uint found = 0; - - state->forceValue(*v); - - if (v->type == tLambda && toplevel) { - Value* v2 = state->allocValue(); - auto dummyArgs = Bindings::New(); - state->autoCallFunction(dummyArgs.get(), *v, *v2); - v = v2; - state->forceValue(*v); - } - - if (state->isDerivation(*v)) { - DrvInfo drv(*state, attrPath, v->attrs); - std::string description; - std::smatch attrPathMatch; - std::smatch descriptionMatch; - std::smatch nameMatch; - std::string name; - - DrvName parsed(drv.queryName()); - - for (auto& regex : regexes) { - std::regex_search(attrPath, attrPathMatch, regex); - - name = parsed.name; - std::regex_search(name, nameMatch, regex); - - description = drv.queryMetaString("description"); - std::replace(description.begin(), description.end(), '\n', ' '); - std::regex_search(description, descriptionMatch, regex); - - if (!attrPathMatch.empty() || !nameMatch.empty() || - !descriptionMatch.empty()) { - found++; - } - } - - if (found == res.size()) { - if (json) { - auto jsonElem = jsonOut->object(attrPath); - - jsonElem.attr("pkgName", parsed.name); - jsonElem.attr("version", parsed.version); - jsonElem.attr("description", description); - - } else { - auto name = hilite(parsed.name, nameMatch, "\e[0;2m") + - std::string(parsed.fullName, parsed.name.length()); - results[attrPath] = fmt( - "* %s (%s)\n %s\n", - wrap("\e[0;1m", hilite(attrPath, attrPathMatch, "\e[0;1m")), - wrap("\e[0;2m", hilite(name, nameMatch, "\e[0;2m")), - hilite(description, descriptionMatch, ANSI_NORMAL)); - } - } - - if (cache != nullptr) { - cache->attr("type", "derivation"); - cache->attr("name", drv.queryName()); - cache->attr("system", drv.querySystem()); - if (!description.empty()) { - auto meta(cache->object("meta")); - meta.attr("description", description); - } - } - } - - else if (v->type == tAttrs) { - if (!toplevel) { - auto attrs = v->attrs; - Bindings::iterator j = attrs->find(sRecurse); - if (j == attrs->end() || - !state->forceBool(*j->second.value, *j->second.pos)) { - DLOG(INFO) << "skip attribute '" << attrPath << "'"; - return; - } - } - - bool toplevel2 = false; - if (!fromCache) { - Bindings::iterator j = v->attrs->find(sToplevel); - toplevel2 = j != v->attrs->end() && - state->forceBool(*j->second.value, *j->second.pos); - } - - for (auto& i : *v->attrs) { - auto cache2 = - cache != nullptr - ? std::make_unique(cache->object(i.second.name)) - : nullptr; - doExpr(i.second.value, - attrPath.empty() - ? std::string(i.second.name) - : attrPath + "." + std::string(i.second.name), - toplevel2 || fromCache, cache2 ? cache2.get() : nullptr); - } - } - - } catch (AssertionError& e) { - } catch (Error& e) { - if (!toplevel) { - e.addPrefix(fmt("While evaluating the attribute '%s':\n", attrPath)); - throw; - } - } - }; - - Path jsonCacheFileName = getCacheDir() + "/nix/package-search.json"; - - if (useCache && pathExists(jsonCacheFileName)) { - LOG(WARNING) << "using cached results; pass '-u' to update the cache"; - - Value vRoot; - parseJSON(*state, readFile(jsonCacheFileName), vRoot); - - fromCache = true; - - doExpr(&vRoot, "", true, nullptr); - } - - else { - createDirs(dirOf(jsonCacheFileName)); - - Path tmpFile = fmt("%s.tmp.%d", jsonCacheFileName, getpid()); - - std::ofstream jsonCacheFile; - - try { - // iostream considered harmful - jsonCacheFile.exceptions(std::ofstream::failbit); - jsonCacheFile.open(tmpFile); - - auto cache = writeCache - ? std::make_unique(jsonCacheFile, false) - : nullptr; - - doExpr(getSourceExpr(*state), "", true, cache.get()); - - } catch (std::exception&) { - /* Fun fact: catching std::ios::failure does not work - due to C++11 ABI shenanigans. - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66145 */ - if (!jsonCacheFile) { - throw Error("error writing to %s", tmpFile); - } - throw; - } - - if (writeCache && - rename(tmpFile.c_str(), jsonCacheFileName.c_str()) == -1) { - throw SysError("cannot rename '%s' to '%s'", tmpFile, - jsonCacheFileName); - } - } - - if (results.empty()) { - throw Error("no results for the given search term(s)!"); - } - - RunPager pager; - for (const auto& el : results) { - std::cout << el.second << "\n"; - } - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/show-config.cc b/third_party/nix/src/nix/show-config.cc deleted file mode 100644 index fd92e481e8..0000000000 --- a/third_party/nix/src/nix/show-config.cc +++ /dev/null @@ -1,31 +0,0 @@ -#include "libmain/common-args.hh" -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "libutil/json.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdShowConfig final : Command, MixJSON { - CmdShowConfig() = default; - - std::string name() override { return "show-config"; } - - std::string description() override { return "show the Nix configuration"; } - - void run() override { - if (json) { - // FIXME: use appropriate JSON types (bool, ints, etc). - JSONObject jsonObj(std::cout); - globalConfig.toJSON(jsonObj); - } else { - std::map settings; - globalConfig.getSettings(settings); - for (auto& s : settings) { - std::cout << s.first + " = " + s.second.value + "\n"; - } - } - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/show-derivation.cc b/third_party/nix/src/nix/show-derivation.cc deleted file mode 100644 index efe554710f..0000000000 --- a/third_party/nix/src/nix/show-derivation.cc +++ /dev/null @@ -1,113 +0,0 @@ -// FIXME: integrate this with nix path-info? - -#include "libmain/common-args.hh" -#include "libstore/derivations.hh" -#include "libstore/store-api.hh" -#include "libutil/archive.hh" -#include "libutil/json.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdShowDerivation final : InstallablesCommand { - bool recursive = false; - - CmdShowDerivation() { - mkFlag() - .longName("recursive") - .shortName('r') - .description("include the dependencies of the specified derivations") - .set(&recursive, true); - } - - std::string name() override { return "show-derivation"; } - - std::string description() override { - return "show the contents of a store derivation"; - } - - Examples examples() override { - return { - Example{"To show the store derivation that results from evaluating the " - "Hello package:", - "nix show-derivation nixpkgs.hello"}, - Example{"To show the full derivation graph (if available) that " - "produced your NixOS system:", - "nix show-derivation -r /run/current-system"}, - }; - } - - void run(ref store) override { - auto drvPaths = toDerivations(store, installables, true); - - if (recursive) { - PathSet closure; - store->computeFSClosure(drvPaths, closure); - drvPaths = closure; - } - - { - JSONObject jsonRoot(std::cout, true); - - for (auto& drvPath : drvPaths) { - if (!isDerivation(drvPath)) { - continue; - } - - auto drvObj(jsonRoot.object(drvPath)); - - auto drv = readDerivation(drvPath); - - { - auto outputsObj(drvObj.object("outputs")); - for (auto& output : drv.outputs) { - auto outputObj(outputsObj.object(output.first)); - outputObj.attr("path", output.second.path); - if (!output.second.hash.empty()) { - outputObj.attr("hashAlgo", output.second.hashAlgo); - outputObj.attr("hash", output.second.hash); - } - } - } - - { - auto inputsList(drvObj.list("inputSrcs")); - for (auto& input : drv.inputSrcs) { - inputsList.elem(input); - } - } - - { - auto inputDrvsObj(drvObj.object("inputDrvs")); - for (auto& input : drv.inputDrvs) { - auto inputList(inputDrvsObj.list(input.first)); - for (auto& outputId : input.second) { - inputList.elem(outputId); - } - } - } - - drvObj.attr("platform", drv.platform); - drvObj.attr("builder", drv.builder); - - { - auto argsList(drvObj.list("args")); - for (auto& arg : drv.args) { - argsList.elem(arg); - } - } - - { - auto envObj(drvObj.object("env")); - for (auto& var : drv.env) { - envObj.attr(var.first, var.second); - } - } - } - } - - std::cout << "\n"; - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/sigs.cc b/third_party/nix/src/nix/sigs.cc deleted file mode 100644 index cc42613d07..0000000000 --- a/third_party/nix/src/nix/sigs.cc +++ /dev/null @@ -1,146 +0,0 @@ -#include - -#include - -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "libutil/thread-pool.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdCopySigs final : StorePathsCommand { - Strings substituterUris; - - CmdCopySigs() { - mkFlag() - .longName("substituter") - .shortName('s') - .labels({"store-uri"}) - .description("use signatures from specified store") - .arity(1) - .handler([&](std::vector ss) { - substituterUris.push_back(ss[0]); - }); - } - - std::string name() override { return "copy-sigs"; } - - std::string description() override { - return "copy path signatures from substituters (like binary caches)"; - } - - void run(ref store, Paths storePaths) override { - if (substituterUris.empty()) { - throw UsageError("you must specify at least one substituter using '-s'"); - } - - // FIXME: factor out commonality with MixVerify. - std::vector> substituters; - for (auto& s : substituterUris) { - substituters.push_back(openStore(s)); - } - - ThreadPool pool; - - std::string doneLabel = "done"; - std::atomic added{0}; - - // logger->setExpected(doneLabel, storePaths.size()); - - auto doPath = [&](const Path& storePath) { - // Activity act(*logger, lvlInfo, format("getting signatures for '%s'") % - // storePath); - - checkInterrupt(); - - auto info = store->queryPathInfo(storePath); - - StringSet newSigs; - - for (auto& store2 : substituters) { - try { - auto info2 = store2->queryPathInfo(storePath); - - /* Don't import signatures that don't match this - binary. */ - if (info->narHash != info2->narHash || - info->narSize != info2->narSize || - info->references != info2->references) { - continue; - } - - for (auto& sig : info2->sigs) { - if (info->sigs.count(sig) == 0u) { - newSigs.insert(sig); - } - } - } catch (InvalidPath&) { - } - } - - if (!newSigs.empty()) { - store->addSignatures(storePath, newSigs); - added += newSigs.size(); - } - - // logger->incProgress(doneLabel); - }; - - for (auto& storePath : storePaths) { - pool.enqueue(std::bind(doPath, storePath)); - } - - pool.process(); - - LOG(INFO) << "imported " << added << " signatures"; - } -}; - -static nix::RegisterCommand r1(make_ref()); - -struct CmdSignPaths final : StorePathsCommand { - Path secretKeyFile; - - CmdSignPaths() { - mkFlag() - .shortName('k') - .longName("key-file") - .label("file") - .description("file containing the secret signing key") - .dest(&secretKeyFile); - } - - std::string name() override { return "sign-paths"; } - - std::string description() override { return "sign the specified paths"; } - - void run(ref store, Paths storePaths) override { - if (secretKeyFile.empty()) { - throw UsageError("you must specify a secret key file using '-k'"); - } - - SecretKey secretKey(readFile(secretKeyFile)); - - size_t added{0}; - - for (auto& storePath : storePaths) { - auto info = store->queryPathInfo(storePath); - - auto info2(*info); - info2.sigs.clear(); - info2.sign(secretKey); - assert(!info2.sigs.empty()); - - if (info->sigs.count(*info2.sigs.begin()) == 0u) { - store->addSignatures(storePath, info2.sigs); - added++; - } - } - - LOG(INFO) << "added " << added << " signatures"; - } -}; - -static RegisterCommand r3(make_ref()); - -} // namespace nix diff --git a/third_party/nix/src/nix/upgrade-nix.cc b/third_party/nix/src/nix/upgrade-nix.cc deleted file mode 100644 index c7f654d648..0000000000 --- a/third_party/nix/src/nix/upgrade-nix.cc +++ /dev/null @@ -1,167 +0,0 @@ -#include -#include -#include -#include - -#include "libexpr/attr-path.hh" -#include "libexpr/eval.hh" -#include "libexpr/names.hh" -#include "libmain/common-args.hh" -#include "libstore/download.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdUpgradeNix final : MixDryRun, StoreCommand { - Path profileDir; - std::string storePathsUrl = - "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/" - "tools/nix-fallback-paths.nix"; - - CmdUpgradeNix() { - mkFlag() - .longName("profile") - .shortName('p') - .labels({"profile-dir"}) - .description("the Nix profile to upgrade") - .dest(&profileDir); - - mkFlag() - .longName("nix-store-paths-url") - .labels({"url"}) - .description( - "URL of the file that contains the store paths of the latest Nix " - "release") - .dest(&storePathsUrl); - } - - std::string name() override { return "upgrade-nix"; } - - std::string description() override { - return "upgrade Nix to the latest stable version"; - } - - Examples examples() override { - return { - Example{"To upgrade Nix to the latest stable version:", - "nix upgrade-nix"}, - Example{ - "To upgrade Nix in a specific profile:", - "nix upgrade-nix -p /nix/var/nix/profiles/per-user/alice/profile"}, - }; - } - - void run(ref store) override { - evalSettings.pureEval = true; - - if (profileDir.empty()) { - profileDir = getProfileDir(store); - } - - LOG(INFO) << "upgrading Nix in profile '" << profileDir << "'"; - - Path storePath; - { - LOG(INFO) << "querying latest Nix version"; - storePath = getLatestNix(store); - } - - auto version = DrvName(storePathToName(storePath)).version; - - if (dryRun) { - LOG(ERROR) << "would upgrade to version " << version; - return; - } - - { - LOG(INFO) << "downloading '" << storePath << "'..."; - store->ensurePath(storePath); - } - - { - LOG(INFO) << "verifying that '" << storePath << "' works..."; - auto program = storePath + "/bin/nix-env"; - auto s = runProgram(program, false, {"--version"}); - if (s.find("Nix") == std::string::npos) { - throw Error("could not verify that '%s' works", program); - } - } - - { - LOG(INFO) << "installing '" << storePath << "' into profile '" - << profileDir << "'..."; - runProgram(settings.nixBinDir + "/nix-env", false, - {"--profile", profileDir, "-i", storePath, "--no-sandbox"}); - } - - LOG(INFO) << ANSI_GREEN << "upgrade to version " << version << " done" - << ANSI_NORMAL; - } - - /* Return the profile in which Nix is installed. */ - static Path getProfileDir(const ref& store) { - Path where; - - for (auto& dir : absl::StrSplit(getEnv("PATH").value_or(""), - absl::ByChar(':'), absl::SkipEmpty())) { - if (pathExists(absl::StrCat(dir, "/nix-env"))) { - where = dir; - break; - } - } - - if (where.empty()) { - throw Error( - "couldn't figure out how Nix is installed, so I can't upgrade it"); - } - - LOG(INFO) << "found Nix in '" << where << "'"; - - if (absl::StartsWith(where, "/run/current-system")) { - throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); - } - - Path profileDir = dirOf(where); - - // Resolve profile to /nix/var/nix/profiles/ link. - while (canonPath(profileDir).find("/profiles/") == std::string::npos && - isLink(profileDir)) { - profileDir = readLink(profileDir); - } - - LOG(INFO) << "found profile '" << profileDir << "'"; - - Path userEnv = canonPath(profileDir, true); - - if (baseNameOf(where) != "bin" || - !absl::EndsWith(userEnv, "user-environment")) { - throw Error("directory '%s' does not appear to be part of a Nix profile", - where); - } - - if (!store->isValidPath(userEnv)) { - throw Error("directory '%s' is not in the Nix store", userEnv); - } - - return profileDir; - } - - /* Return the store path of the latest stable Nix. */ - Path getLatestNix(const ref& store) { - // FIXME: use nixos.org? - auto req = DownloadRequest(storePathsUrl); - auto res = getDownloader()->download(req); - - auto state = std::make_unique(Strings(), store); - auto v = state->allocValue(); - state->eval(state->parseExprFromString(*res.data, "/no-such-path"), *v); - std::unique_ptr bindings(Bindings::New()); - auto v2 = - findAlongAttrPath(*state, settings.thisSystem, bindings.get(), *v); - - return state->forceString(*v2); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/verify.cc b/third_party/nix/src/nix/verify.cc deleted file mode 100644 index 7de46f2a9c..0000000000 --- a/third_party/nix/src/nix/verify.cc +++ /dev/null @@ -1,171 +0,0 @@ -#include - -#include - -#include "libmain/shared.hh" -#include "libstore/store-api.hh" -#include "libutil/sync.hh" -#include "libutil/thread-pool.hh" -#include "nix/command.hh" - -namespace nix { -struct CmdVerify final : StorePathsCommand { - bool noContents = false; - bool noTrust = false; - Strings substituterUris; - size_t sigsNeeded = 0; - - CmdVerify() { - mkFlag(0, "no-contents", "do not verify the contents of each store path", - &noContents); - mkFlag(0, "no-trust", "do not verify whether each store path is trusted", - &noTrust); - mkFlag() - .longName("substituter") - .shortName('s') - .labels({"store-uri"}) - .description("use signatures from specified store") - .arity(1) - .handler([&](std::vector ss) { - substituterUris.push_back(ss[0]); - }); - mkIntFlag('n', "sigs-needed", - "require that each path has at least N valid signatures", - &sigsNeeded); - } - - std::string name() override { return "verify"; } - - std::string description() override { - return "verify the integrity of store paths"; - } - - Examples examples() override { - return { - Example{"To verify the entire Nix store:", "nix verify --all"}, - Example{"To check whether each path in the closure of Firefox has at " - "least 2 signatures:", - "nix verify -r -n2 --no-contents $(type -p firefox)"}, - }; - } - - void run(ref store, Paths storePaths) override { - std::vector> substituters; - for (auto& s : substituterUris) { - substituters.push_back(openStore(s)); - } - - auto publicKeys = getDefaultPublicKeys(); - - std::atomic done{0}; - std::atomic untrusted{0}; - std::atomic corrupted{0}; - std::atomic failed{0}; - std::atomic active{0}; - - ThreadPool pool; - - auto doPath = [&](const Path& storePath) { - try { - checkInterrupt(); - - LOG(INFO) << "checking '" << storePath << "'"; - - MaintainCount> mcActive(active); - - auto info = store->queryPathInfo(storePath); - - if (!noContents) { - HashSink sink(info->narHash.type); - store->narFromPath(info->path, sink); - - auto hash = sink.finish(); - - if (hash.first != info->narHash) { - corrupted++; - LOG(WARNING) << "path '" << info->path - << "' was modified! expected hash '" - << info->narHash.to_string() << "', got '" - << hash.first.to_string() << "'"; - } - } - - if (!noTrust) { - bool good = false; - - if (info->ultimate && (sigsNeeded == 0u)) { - good = true; - - } else { - StringSet sigsSeen; - size_t actualSigsNeeded = - std::max(sigsNeeded, static_cast(1)); - size_t validSigs = 0; - - auto doSigs = [&](const StringSet& sigs) { - for (const auto& sig : sigs) { - if (sigsSeen.count(sig) != 0u) { - continue; - } - sigsSeen.insert(sig); - if (validSigs < ValidPathInfo::maxSigs && - info->checkSignature(publicKeys, sig)) { - validSigs++; - } - } - }; - - if (info->isContentAddressed(*store)) { - validSigs = ValidPathInfo::maxSigs; - } - - doSigs(info->sigs); - - for (auto& store2 : substituters) { - if (validSigs >= actualSigsNeeded) { - break; - } - try { - auto info2 = store2->queryPathInfo(info->path); - if (info2->isContentAddressed(*store)) { - validSigs = ValidPathInfo::maxSigs; - } - doSigs(info2->sigs); - } catch (InvalidPath&) { - } catch (Error& e) { - LOG(ERROR) << e.what(); - } - } - - if (validSigs >= actualSigsNeeded) { - good = true; - } - } - - if (!good) { - untrusted++; - LOG(WARNING) << "path '" << info->path << "' is untrusted"; - } - } - - done++; - - } catch (Error& e) { - LOG(ERROR) << e.what(); - failed++; - } - }; - - for (auto& storePath : storePaths) { - pool.enqueue(std::bind(doPath, storePath)); - } - - pool.process(); - - throw Exit((corrupted != 0u ? 1 : 0) | (untrusted != 0u ? 2 : 0) | - (failed != 0u ? 4 : 0)); - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nix/why-depends.cc b/third_party/nix/src/nix/why-depends.cc deleted file mode 100644 index 954d619ef3..0000000000 --- a/third_party/nix/src/nix/why-depends.cc +++ /dev/null @@ -1,269 +0,0 @@ -#include - -#include - -#include "libmain/shared.hh" -#include "libstore/fs-accessor.hh" -#include "libstore/store-api.hh" -#include "nix/command.hh" - -namespace { -static std::string hilite(const std::string& s, size_t pos, size_t len, - const std::string& colour = ANSI_RED) { - return std::string(s, 0, pos) + colour + std::string(s, pos, len) + - ANSI_NORMAL + std::string(s, pos + len); -} - -static std::string filterPrintable(const std::string& s) { - std::string res; - for (char c : s) { - res += isprint(c) != 0 ? c : '.'; - } - return res; -} -} // namespace - -namespace nix { -struct CmdWhyDepends final : SourceExprCommand { - std::string _package, _dependency; - bool all = false; - - CmdWhyDepends() { - expectArg("package", &_package); - expectArg("dependency", &_dependency); - - mkFlag() - .longName("all") - .shortName('a') - .description( - "show all edges in the dependency graph leading from 'package' to " - "'dependency', rather than just a shortest path") - .set(&all, true); - } - - std::string name() override { return "why-depends"; } - - std::string description() override { - return "show why a package has another package in its closure"; - } - - Examples examples() override { - return { - Example{"To show one path through the dependency graph leading from " - "Hello to Glibc:", - "nix why-depends nixpkgs.hello nixpkgs.glibc"}, - Example{ - "To show all files and paths in the dependency graph leading from " - "Thunderbird to libX11:", - "nix why-depends --all nixpkgs.thunderbird nixpkgs.xorg.libX11"}, - Example{"To show why Glibc depends on itself:", - "nix why-depends nixpkgs.glibc nixpkgs.glibc"}, - }; - } - - void run(ref store) override { - auto package = parseInstallable(*this, store, _package, false); - auto packagePath = toStorePath(store, Build, package); - auto dependency = parseInstallable(*this, store, _dependency, false); - auto dependencyPath = toStorePath(store, NoBuild, dependency); - auto dependencyPathHash = storePathToHash(dependencyPath); - - PathSet closure; - store->computeFSClosure({packagePath}, closure, false, false); - - if (closure.count(dependencyPath) == 0u) { - LOG(WARNING) << "'" << package->what() << "' does not depend on '" - << dependency->what() << "'"; - return; - } - - auto accessor = store->getFSAccessor(); - - auto const inf = std::numeric_limits::max(); - - struct Node { - Path path; - PathSet refs; - PathSet rrefs; - size_t dist = inf; - Node* prev = nullptr; - bool queued = false; - bool visited = false; - }; - - std::map graph; - - for (auto& path : closure) { - graph.emplace(path, Node{path, store->queryPathInfo(path)->references}); - } - - // Transpose the graph. - for (auto& node : graph) { - for (auto& ref : node.second.refs) { - graph[ref].rrefs.insert(node.first); - } - } - - /* Run Dijkstra's shortest path algorithm to get the distance - of every path in the closure to 'dependency'. */ - graph[dependencyPath].dist = 0; - - std::priority_queue queue; - - queue.push(&graph.at(dependencyPath)); - - while (!queue.empty()) { - auto& node = *queue.top(); - queue.pop(); - - for (auto& rref : node.rrefs) { - auto& node2 = graph.at(rref); - auto dist = node.dist + 1; - if (dist < node2.dist) { - node2.dist = dist; - node2.prev = &node; - if (!node2.queued) { - node2.queued = true; - queue.push(&node2); - } - } - } - } - - /* Print the subgraph of nodes that have 'dependency' in their - closure (i.e., that have a non-infinite distance to - 'dependency'). Print every edge on a path between `package` - and `dependency`. */ - std::function - printNode; - - const std::string treeConn = "╠═══"; - const std::string treeLast = "╚═══"; - const std::string treeLine = "║ "; - const std::string treeNull = " "; - - struct BailOut {}; - - printNode = [&](Node& node, const std::string& firstPad, - const std::string& tailPad) { - assert(node.dist != inf); - std::cout << fmt("%s%s%s%s" ANSI_NORMAL "\n", firstPad, - node.visited ? "\e[38;5;244m" : "", - !firstPad.empty() ? "=> " : "", node.path); - - if (node.path == dependencyPath && !all && - packagePath != dependencyPath) { - throw BailOut(); - } - - if (node.visited) { - return; - } - node.visited = true; - - /* Sort the references by distance to `dependency` to - ensure that the shortest path is printed first. */ - std::multimap refs; - std::set hashes; - - for (auto& ref : node.refs) { - if (ref == node.path && packagePath != dependencyPath) { - continue; - } - auto& node2 = graph.at(ref); - if (node2.dist == inf) { - continue; - } - refs.emplace(node2.dist, &node2); - hashes.insert(storePathToHash(node2.path)); - } - - /* For each reference, find the files and symlinks that - contain the reference. */ - std::map hits; - - std::function visitPath; - - visitPath = [&](const Path& p) { - auto st = accessor->stat(p); - - auto p2 = p == node.path ? "/" : std::string(p, node.path.size() + 1); - - auto getColour = [&](const std::string& hash) { - return hash == dependencyPathHash ? ANSI_GREEN : ANSI_BLUE; - }; - - if (st.type == FSAccessor::Type::tDirectory) { - auto names = accessor->readDirectory(p); - for (auto& name : names) { - visitPath(p + "/" + name); - } - } - - else if (st.type == FSAccessor::Type::tRegular) { - auto contents = accessor->readFile(p); - - for (auto& hash : hashes) { - auto pos = contents.find(hash); - if (pos != std::string::npos) { - size_t margin = 32; - auto pos2 = pos >= margin ? pos - margin : 0; - hits[hash].emplace_back(fmt( - "%s: …%s…\n", p2, - hilite( - filterPrintable(std::string( - contents, pos2, pos - pos2 + hash.size() + margin)), - pos - pos2, storePathHashLen, getColour(hash)))); - } - } - } - - else if (st.type == FSAccessor::Type::tSymlink) { - auto target = accessor->readLink(p); - - for (auto& hash : hashes) { - auto pos = target.find(hash); - if (pos != std::string::npos) { - hits[hash].emplace_back( - fmt("%s -> %s\n", p2, - hilite(target, pos, storePathHashLen, getColour(hash)))); - } - } - } - }; - - // FIXME: should use scanForReferences(). - - visitPath(node.path); - - RunPager pager; - for (auto& ref : refs) { - auto hash = storePathToHash(ref.second->path); - - bool last = all ? ref == *refs.rbegin() : true; - - for (auto& hit : hits[hash]) { - bool first = hit == *hits[hash].begin(); - std::cout << tailPad - << (first ? (last ? treeLast : treeConn) - : (last ? treeNull : treeLine)) - << hit; - if (!all) { - break; - } - } - - printNode(*ref.second, tailPad + (last ? treeNull : treeLine), - tailPad + (last ? treeNull : treeLine)); - } - }; - - try { - printNode(graph.at(packagePath), "", ""); - } catch (BailOut&) { - } - } -}; -} // namespace nix - -static nix::RegisterCommand r1(nix::make_ref()); diff --git a/third_party/nix/src/nlohmann/json.hpp b/third_party/nix/src/nlohmann/json.hpp deleted file mode 100644 index c9af0bed36..0000000000 --- a/third_party/nix/src/nlohmann/json.hpp +++ /dev/null @@ -1,20406 +0,0 @@ -/* - __ _____ _____ _____ - __| | __| | | | JSON for Modern C++ -| | |__ | | | | | | version 3.5.0 -|_____|_____|_____|_|___| https://github.com/nlohmann/json - -Licensed under the MIT License . -SPDX-License-Identifier: MIT -Copyright (c) 2013-2018 Niels Lohmann . - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -#ifndef NLOHMANN_JSON_HPP -#define NLOHMANN_JSON_HPP - -#define NLOHMANN_JSON_VERSION_MAJOR 3 -#define NLOHMANN_JSON_VERSION_MINOR 5 -#define NLOHMANN_JSON_VERSION_PATCH 0 - -#include // all_of, find, for_each -#include // assert -#include // and, not, or -#include // nullptr_t, ptrdiff_t, size_t -#include // hash, less -#include // initializer_list -#include // istream, ostream -#include // random_access_iterator_tag -#include // accumulate -#include // string, stoi, to_string -#include // declval, forward, move, pair, swap - -// #include -#ifndef NLOHMANN_JSON_FWD_HPP -#define NLOHMANN_JSON_FWD_HPP - -#include // int64_t, uint64_t -#include // map -#include // allocator -#include // string -#include // vector - -/*! -@brief namespace for Niels Lohmann -@see https://github.com/nlohmann -@since version 1.0.0 -*/ -namespace nlohmann -{ -/*! -@brief default JSONSerializer template argument - -This serializer ignores the template arguments and uses ADL -([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) -for serialization. -*/ -template -struct adl_serializer; - -template class ObjectType = - std::map, - template class ArrayType = std::vector, - class StringType = std::string, class BooleanType = bool, - class NumberIntegerType = std::int64_t, - class NumberUnsignedType = std::uint64_t, - class NumberFloatType = double, - template class AllocatorType = std::allocator, - template class JSONSerializer = - adl_serializer> -class basic_json; - -/*! -@brief JSON Pointer - -A JSON pointer defines a string syntax for identifying a specific value -within a JSON document. It can be used with functions `at` and -`operator[]`. Furthermore, JSON pointers are the base for JSON patches. - -@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) - -@since version 2.0.0 -*/ -template -class json_pointer; - -/*! -@brief default JSON class - -This type is the default specialization of the @ref basic_json class which -uses the standard template types. - -@since version 1.0.0 -*/ -using json = basic_json<>; -} // namespace nlohmann - -#endif - -// #include - - -// This file contains all internal macro definitions -// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them - -// exclude unsupported compilers -#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) - #if defined(__clang__) - #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 - #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 - #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #endif -#endif - -// disable float-equal warnings on GCC/clang -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wfloat-equal" -#endif - -// disable documentation warnings on clang -#if defined(__clang__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wdocumentation" -#endif - -// allow for portable deprecation warnings -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #define JSON_DEPRECATED __attribute__((deprecated)) -#elif defined(_MSC_VER) - #define JSON_DEPRECATED __declspec(deprecated) -#else - #define JSON_DEPRECATED -#endif - -// allow to disable exceptions -#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) - #define JSON_THROW(exception) throw exception - #define JSON_TRY try - #define JSON_CATCH(exception) catch(exception) - #define JSON_INTERNAL_CATCH(exception) catch(exception) -#else - #define JSON_THROW(exception) std::abort() - #define JSON_TRY if(true) - #define JSON_CATCH(exception) if(false) - #define JSON_INTERNAL_CATCH(exception) if(false) -#endif - -// override exception macros -#if defined(JSON_THROW_USER) - #undef JSON_THROW - #define JSON_THROW JSON_THROW_USER -#endif -#if defined(JSON_TRY_USER) - #undef JSON_TRY - #define JSON_TRY JSON_TRY_USER -#endif -#if defined(JSON_CATCH_USER) - #undef JSON_CATCH - #define JSON_CATCH JSON_CATCH_USER - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_CATCH_USER -#endif -#if defined(JSON_INTERNAL_CATCH_USER) - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER -#endif - -// manual branch prediction -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #define JSON_LIKELY(x) __builtin_expect(!!(x), 1) - #define JSON_UNLIKELY(x) __builtin_expect(!!(x), 0) -#else - #define JSON_LIKELY(x) x - #define JSON_UNLIKELY(x) x -#endif - -// C++ language standard detection -#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 -#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) - #define JSON_HAS_CPP_14 -#endif - -/*! -@brief macro to briefly define a mapping between an enum and JSON -@def NLOHMANN_JSON_SERIALIZE_ENUM -@since version 3.4.0 -*/ -#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ - template \ - inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [e](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.first == e; \ - }); \ - j = ((it != std::end(m)) ? it : std::begin(m))->second; \ - } \ - template \ - inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [j](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.second == j; \ - }); \ - e = ((it != std::end(m)) ? it : std::begin(m))->first; \ - } - -// Ugly macros to avoid uglier copy-paste when specializing basic_json. They -// may be removed in the future once the class is split. - -#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ - template class ObjectType, \ - template class ArrayType, \ - class StringType, class BooleanType, class NumberIntegerType, \ - class NumberUnsignedType, class NumberFloatType, \ - template class AllocatorType, \ - template class JSONSerializer> - -#define NLOHMANN_BASIC_JSON_TPL \ - basic_json - -// #include - - -#include // not -#include // size_t -#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type - -namespace nlohmann -{ -namespace detail -{ -// alias templates to reduce boilerplate -template -using enable_if_t = typename std::enable_if::type; - -template -using uncvref_t = typename std::remove_cv::type>::type; - -// implementation of C++14 index_sequence and affiliates -// source: https://stackoverflow.com/a/32223343 -template -struct index_sequence -{ - using type = index_sequence; - using value_type = std::size_t; - static constexpr std::size_t size() noexcept - { - return sizeof...(Ints); - } -}; - -template -struct merge_and_renumber; - -template -struct merge_and_renumber, index_sequence> - : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; - -template -struct make_index_sequence - : merge_and_renumber < typename make_index_sequence < N / 2 >::type, - typename make_index_sequence < N - N / 2 >::type > {}; - -template<> struct make_index_sequence<0> : index_sequence<> {}; -template<> struct make_index_sequence<1> : index_sequence<0> {}; - -template -using index_sequence_for = make_index_sequence; - -// dispatch utility (taken from ranges-v3) -template struct priority_tag : priority_tag < N - 1 > {}; -template<> struct priority_tag<0> {}; - -// taken from ranges-v3 -template -struct static_const -{ - static constexpr T value{}; -}; - -template -constexpr T static_const::value; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // not -#include // numeric_limits -#include // false_type, is_constructible, is_integral, is_same, true_type -#include // declval - -// #include - -// #include - - -#include // random_access_iterator_tag - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template struct make_void -{ - using type = void; -}; -template using void_t = typename make_void::type; -} // namespace detail -} // namespace nlohmann - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -struct iterator_types {}; - -template -struct iterator_types < - It, - void_t> -{ - using difference_type = typename It::difference_type; - using value_type = typename It::value_type; - using pointer = typename It::pointer; - using reference = typename It::reference; - using iterator_category = typename It::iterator_category; -}; - -// This is required as some compilers implement std::iterator_traits in a way that -// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. -template -struct iterator_traits -{ -}; - -template -struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types -{ -}; - -template -struct iterator_traits::value>> -{ - using iterator_category = std::random_access_iterator_tag; - using value_type = T; - using difference_type = ptrdiff_t; - using pointer = T*; - using reference = T&; -}; -} -} - -// #include - -// #include - - -#include - -// #include - - -// http://en.cppreference.com/w/cpp/experimental/is_detected -namespace nlohmann -{ -namespace detail -{ -struct nonesuch -{ - nonesuch() = delete; - ~nonesuch() = delete; - nonesuch(nonesuch const&) = delete; - void operator=(nonesuch const&) = delete; -}; - -template class Op, - class... Args> -struct detector -{ - using value_t = std::false_type; - using type = Default; -}; - -template class Op, class... Args> -struct detector>, Op, Args...> -{ - using value_t = std::true_type; - using type = Op; -}; - -template