diff options
148 files changed, 4441 insertions, 2390 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000000..3372b1f03f7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,27 @@ +<!-- + +# Filing a Nix issue + +*WAIT* Are you sure you're filing your issue in the right repository? + +We appreciate you taking the time to tell us about issues you encounter, but routing the issue to the right place will get you help sooner and save everyone time. + +This is the Nix repository, and issues here should be about Nix the build and package management *_tool_*. + +If you have a problem with a specific package on NixOS or when using Nix, you probably want to file an issue with _nixpkgs_, whose issue tracker is over at https://github.com/NixOS/nixpkgs/issues. + +Examples of _Nix_ issues: + +- Nix segfaults when I run `nix-build -A blahblah` +- The Nix language needs a new builtin: `builtins.foobar` +- Regression in the behavior of `nix-env` in Nix 2.0 + +Examples of _nixpkgs_ issues: + +- glibc is b0rked on aarch64 +- chromium in NixOS doesn't support U2F but google-chrome does! +- The OpenJDK package on macOS is missing a key component + +Chances are if you're a newcomer to the Nix world, you'll probably want the [nixpkgs tracker](https://github.com/NixOS/nixpkgs/issues). It also gets a lot more eyeball traffic so you'll probably get a response a lot more quickly. + +--> diff --git a/.gitignore b/.gitignore index ce22fa007dc7..0a9599378567 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,7 @@ perl/Makefile.config /scripts/nix-copy-closure /scripts/nix-reduce-build /scripts/nix-http-export.cgi +/scripts/nix-profile-daemon.sh # /src/libexpr/ /src/libexpr/lexer-tab.cc diff --git a/Makefile b/Makefile index 5d8e990cc5c0..c867823fc485 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,8 @@ makefiles = \ misc/launchd/local.mk \ misc/upstart/local.mk \ doc/manual/local.mk \ - tests/local.mk + tests/local.mk \ + tests/plugins/local.mk GLOBAL_CXXFLAGS += -std=c++14 -g -Wall -include config.h diff --git a/Makefile.config.in b/Makefile.config.in index 45a70cd6dd1a..a9785dc73955 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -6,6 +6,8 @@ CXXFLAGS = @CXXFLAGS@ ENABLE_S3 = @ENABLE_S3@ HAVE_SODIUM = @HAVE_SODIUM@ HAVE_READLINE = @HAVE_READLINE@ +HAVE_BROTLI = @HAVE_BROTLI@ +HAVE_SECCOMP = @HAVE_SECCOMP@ LIBCURL_LIBS = @LIBCURL_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@ PACKAGE_NAME = @PACKAGE_NAME@ @@ -13,9 +15,10 @@ PACKAGE_VERSION = @PACKAGE_VERSION@ SODIUM_LIBS = @SODIUM_LIBS@ LIBLZMA_LIBS = @LIBLZMA_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@ +LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ bash = @bash@ bindir = @bindir@ -bro = @bro@ +brotli = @brotli@ lsof = @lsof@ datadir = @datadir@ datarootdir = @datarootdir@ diff --git a/configure.ac b/configure.ac index 9d8a81d0427b..54322d463ae7 100644 --- a/configure.ac +++ b/configure.ac @@ -61,6 +61,7 @@ CFLAGS= CXXFLAGS= AC_PROG_CC AC_PROG_CXX +AC_PROG_CPP AX_CXX_COMPILE_STDCXX_11 @@ -127,7 +128,7 @@ NEED_PROG(gzip, gzip) NEED_PROG(xz, xz) AC_PATH_PROG(dot, dot) AC_PATH_PROG(pv, pv, pv) -AC_PATH_PROG(bro, bro, bro) +AC_PATH_PROGS(brotli, brotli bro, bro) AC_PATH_PROG(lsof, lsof, lsof) @@ -174,23 +175,51 @@ AC_SUBST(HAVE_SODIUM, [$have_sodium]) # Look for liblzma, a required dependency. PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"]) +AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt], + [AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])]) +# Look for libbrotli{enc,dec}, optional dependencies +PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], + [AC_DEFINE([HAVE_BROTLI], [1], [Whether to use libbrotli.]) + CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"] + have_brotli=1], [have_brotli=]) +AC_SUBST(HAVE_BROTLI, [$have_brotli]) + # Look for libseccomp, required for Linux sandboxing. if test "$sys_name" = linux; then - PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], - [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) + AC_ARG_ENABLE([seccomp-sandboxing], + AC_HELP_STRING([--disable-seccomp-sandboxing], + [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] + )) + if test "x$enable_seccomp_sandboxing" != "xno"; then + PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], + [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) + have_seccomp=1 + AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) + else + have_seccomp= + fi +else + have_seccomp= fi +AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) # Look for aws-cpp-sdk-s3. AC_LANG_PUSH(C++) AC_CHECK_HEADERS([aws/s3/S3Client.h], - [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-cpp-sdk-s3.]) + [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1], [enable_s3=]) AC_SUBST(ENABLE_S3, [$enable_s3]) AC_LANG_POP(C++) +if test -n "$enable_s3"; then + declare -a aws_version_tokens=($(printf '#include <aws/core/VersionConfig.h>\nAWS_SDK_VERSION_STRING' | $CPP - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' ')) + AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.]) + AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.]) +fi + # Whether to use the Boehm garbage collector. AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc], diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix index e135b947fdbb..0ce1bab112f3 100644 --- a/corepkgs/fetchurl.nix +++ b/corepkgs/fetchurl.nix @@ -1,4 +1,4 @@ -{ system ? builtins.currentSystem +{ system ? "" # obsolete , url , md5 ? "", sha1 ? "", sha256 ? "", sha512 ? "" , outputHash ? @@ -17,7 +17,9 @@ derivation { inherit outputHashAlgo outputHash; outputHashMode = if unpack || executable then "recursive" else "flat"; - inherit name system url executable unpack; + inherit name url executable unpack; + + system = "builtin"; # No need to double the amount of network traffic preferLocalBuild = true; diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml index 1957e1105e68..20fd6a0cfb0d 100644 --- a/doc/manual/advanced-topics/distributed-builds.xml +++ b/doc/manual/advanced-topics/distributed-builds.xml @@ -4,71 +4,109 @@ version="5.0" xml:id='chap-distributed-builds'> -<title>Distributed Builds</title> - -<para>Nix supports distributed builds, where a local Nix installation can -forward Nix builds to other machines over the network. This allows -multiple builds to be performed in parallel (thus improving -performance) and allows Nix to perform multi-platform builds in a -semi-transparent way. For instance, if you perform a build for a -<literal>x86_64-darwin</literal> on an <literal>i686-linux</literal> -machine, Nix can automatically forward the build to a -<literal>x86_64-darwin</literal> machine, if available.</para> - -<para>You can enable distributed builds by setting the environment -variable <envar>NIX_BUILD_HOOK</envar> to point to a program that Nix -will call whenever it wants to build a derivation. The build hook -(typically a shell or Perl script) can decline the build, in which Nix -will perform it in the usual way if possible, or it can accept it, in -which case it is responsible for somehow getting the inputs of the -build to another machine, doing the build there, and getting the -results back.</para> - -<example xml:id='ex-remote-systems'><title>Remote machine configuration: -<filename>remote-systems.conf</filename></title> -<programlisting> -nix@mcflurry.labs.cs.uu.nl x86_64-darwin /home/nix/.ssh/id_quarterpounder_auto 2 -nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm -nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 -nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 kvm perf -</programlisting> -</example> - -<para>Nix ships with a build hook that should be suitable for most -purposes. It uses <command>ssh</command> and -<command>nix-copy-closure</command> to copy the build inputs and -outputs and perform the remote build. To use it, you should set -<envar>NIX_BUILD_HOOK</envar> to -<filename><replaceable>prefix</replaceable>/libexec/nix/build-remote</filename>. -You should also define a list of available build machines and point -the environment variable <envar>NIX_REMOTE_SYSTEMS</envar> to -it. <envar>NIX_REMOTE_SYSTEMS</envar> must be an absolute path. An -example configuration is shown in <xref linkend='ex-remote-systems' -/>. Each line in the file specifies a machine, with the following -bits of information: +<title>Remote Builds</title> + +<para>Nix supports remote builds, where a local Nix installation can +forward Nix builds to other machines. This allows multiple builds to +be performed in parallel and allows Nix to perform multi-platform +builds in a semi-transparent way. For instance, if you perform a +build for a <literal>x86_64-darwin</literal> on an +<literal>i686-linux</literal> machine, Nix can automatically forward +the build to a <literal>x86_64-darwin</literal> machine, if +available.</para> + +<para>To forward a build to a remote machine, it’s required that the +remote machine is accessible via SSH and that it has Nix +installed. You can test whether connecting to the remote Nix instance +works, e.g. + +<screen> +$ nix ping-store --store ssh://mac +</screen> + +will try to connect to the machine named <literal>mac</literal>. It is +possible to specify an SSH identity file as part of the remote store +URI, e.g. + +<screen> +$ nix ping-store --store ssh://mac?ssh-key=/home/alice/my-key +</screen> + +Since builds should be non-interactive, the key should not have a +passphrase. Alternatively, you can load identities ahead of time into +<command>ssh-agent</command> or <command>gpg-agent</command>.</para> + +<para>If you get the error + +<screen> +bash: nix-store: command not found +error: cannot connect to 'mac' +</screen> + +then you need to ensure that the <envar>PATH</envar> of +non-interactive login shells contains Nix.</para> + +<warning><para>If you are building via the Nix daemon, it is the Nix +daemon user account (that is, <literal>root</literal>) that should +have SSH access to the remote machine. If you can’t or don’t want to +configure <literal>root</literal> to be able to access to remote +machine, you can use a private Nix store instead by passing +e.g. <literal>--store ~/my-nix</literal>.</para></warning> + +<para>The list of remote machines can be specified on the command line +or in the Nix configuration file. The former is convenient for +testing. For example, the following command allows you to build a +derivation for <literal>x86_64-darwin</literal> on a Linux machine: + +<screen> +$ uname +Linux + +$ nix build \ + '(with import <nixpkgs> { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ + --builders 'ssh://mac x86_64-darwin' +[1/0/1 built, 0.0 MiB DL] building foo on ssh://mac + +$ cat ./result +Darwin +</screen> + +It is possible to specify multiple builders separated by a semicolon +or a newline, e.g. + +<screen> + --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' +</screen> +</para> + +<para>Each machine specification consists of the following elements, +separated by spaces. Only the first element is required. <orderedlist> - <listitem><para>The name of the remote machine, with optionally the - user under which the remote build should be performed. This is - actually passed as an argument to <command>ssh</command>, so it can - be an alias defined in your + <listitem><para>The URI of the remote store in the format + <literal>ssh://[<replaceable>username</replaceable>@]<replaceable>hostname</replaceable></literal>, + e.g. <literal>ssh://nix@mac</literal> or + <literal>ssh://mac</literal>. For backward compatibility, + <literal>ssh://</literal> may be omitted. The hostname may be an + alias defined in your <filename>~/.ssh/config</filename>.</para></listitem> <listitem><para>A comma-separated list of Nix platform type identifiers, such as <literal>x86_64-darwin</literal>. It is possible for a machine to support multiple platform types, e.g., - <literal>i686-linux,x86_64-linux</literal>.</para></listitem> + <literal>i686-linux,x86_64-linux</literal>. If omitted, this + defaults to the local platform type.</para></listitem> - <listitem><para>The SSH private key to be used to log in to the - remote machine. Since builds should be non-interactive, this key - should not have a passphrase!</para></listitem> + <listitem><para>The SSH identity file to be used to log in to the + remote machine. If omitted, SSH will use its regular + identities.</para></listitem> - <listitem><para>The maximum number of builds that - <filename>build-remote</filename> will execute in parallel on the - machine. Typically this should be equal to the number of CPU cores. - For instance, the machine <literal>itchy</literal> in the example - will execute up to 8 builds in parallel.</para></listitem> + <listitem><para>The maximum number of builds that Nix will execute + in parallel on the machine. Typically this should be equal to the + number of CPU cores. For instance, the machine + <literal>itchy</literal> in the example will execute up to 8 builds + in parallel.</para></listitem> <listitem><para>The “speed factor”, indicating the relative speed of the machine. If there are multiple machines of the right type, Nix @@ -76,30 +114,69 @@ bits of information: <listitem><para>A comma-separated list of <emphasis>supported features</emphasis>. If a derivation has the - <varname>requiredSystemFeatures</varname> attribute, then - <filename>build-remote</filename> will only perform the - derivation on a machine that has the specified features. For - instance, the attribute + <varname>requiredSystemFeatures</varname> attribute, then Nix will + only perform the derivation on a machine that has the specified + features. For instance, the attribute <programlisting> requiredSystemFeatures = [ "kvm" ]; </programlisting> will cause the build to be performed on a machine that has the - <literal>kvm</literal> feature (i.e., <literal>scratchy</literal> in - the example above).</para></listitem> + <literal>kvm</literal> feature.</para></listitem> <listitem><para>A comma-separated list of <emphasis>mandatory features</emphasis>. A machine will only be used to build a derivation if all of the machine’s mandatory features appear in the - derivation’s <varname>requiredSystemFeatures</varname> attribute. - Thus, in the example, the machine <literal>poochie</literal> will - only do derivations that have - <varname>requiredSystemFeatures</varname> set to <literal>["kvm" - "perf"]</literal> or <literal>["perf"]</literal>.</para></listitem> + derivation’s <varname>requiredSystemFeatures</varname> + attribute..</para></listitem> </orderedlist> -</para> +For example, the machine specification + +<programlisting> +nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm +nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 +nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 1 2 kvm benchmark +</programlisting> + +specifies several machines that can perform +<literal>i686-linux</literal> builds. However, +<literal>poochie</literal> will only do builds that have the attribute + +<programlisting> +requiredSystemFeatures = [ "benchmark" ]; +</programlisting> + +or + +<programlisting> +requiredSystemFeatures = [ "benchmark" "kvm" ]; +</programlisting> + +<literal>itchy</literal> cannot do builds that require +<literal>kvm</literal>, but <literal>scratchy</literal> does support +such builds. For regular builds, <literal>itchy</literal> will be +preferred over <literal>scratchy</literal> because it has a higher +speed factor.</para> + +<para>Remote builders can also be configured in +<filename>nix.conf</filename>, e.g. + +<programlisting> +builders = ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd +</programlisting> + +Finally, remote builders can be configured in a separate configuration +file included in <option>builders</option> via the syntax +<literal>@<replaceable>file</replaceable></literal>. For example, + +<programlisting> +builders = @/etc/nix/machines +</programlisting> + +causes the list of machines in <filename>/etc/nix/machines</filename> +to be included. (This is the default.)</para> </chapter> diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml index e52cbcd535e3..c76640c97e7e 100644 --- a/doc/manual/command-ref/conf-file.xml +++ b/doc/manual/command-ref/conf-file.xml @@ -40,7 +40,12 @@ <para>The configuration files consist of <literal><replaceable>name</replaceable> = -<replaceable>value</replaceable></literal> pairs, one per line. +<replaceable>value</replaceable></literal> pairs, one per line. Other +files can be included with a line like <literal>include +<replaceable>path</replaceable></literal>, where +<replaceable>path</replaceable> is interpreted relative to the current +conf file and a missing file is an error unless +<literal>!include</literal> is used instead. Comments start with a <literal>#</literal> character. Here is an example configuration file:</para> @@ -58,147 +63,99 @@ false</literal>.</para> <variablelist> - <varlistentry xml:id="conf-keep-outputs"><term><literal>keep-outputs</literal></term> - - <listitem><para>If <literal>true</literal>, the garbage collector - will keep the outputs of non-garbage derivations. If - <literal>false</literal> (default), outputs will be deleted unless - they are GC roots themselves (or reachable from other roots).</para> - - <para>In general, outputs must be registered as roots separately. - However, even if the output of a derivation is registered as a - root, the collector will still delete store paths that are used - only at build time (e.g., the C compiler, or source tarballs - downloaded from the network). To prevent it from doing so, set - this option to <literal>true</literal>.</para></listitem> - - </varlistentry> - - - <varlistentry xml:id="conf-keep-derivations"><term><literal>keep-derivations</literal></term> - - <listitem><para>If <literal>true</literal> (default), the garbage - collector will keep the derivations from which non-garbage store - paths were built. If <literal>false</literal>, they will be - deleted unless explicitly registered as a root (or reachable from - other roots).</para> - - <para>Keeping derivation around is useful for querying and - traceability (e.g., it allows you to ask with what dependencies or - options a store path was built), so by default this option is on. - Turn it off to save a bit of disk space (or a lot if - <literal>keep-outputs</literal> is also turned on).</para></listitem> - - </varlistentry> - - - <varlistentry><term><literal>keep-env-derivations</literal></term> + <varlistentry xml:id="conf-allowed-uris"><term><literal>allowed-uris</literal></term> - <listitem><para>If <literal>false</literal> (default), derivations - are not stored in Nix user environments. That is, the derivation - any build-time-only dependencies may be garbage-collected.</para> + <listitem> - <para>If <literal>true</literal>, when you add a Nix derivation to - a user environment, the path of the derivation is stored in the - user environment. Thus, the derivation will not be - garbage-collected until the user environment generation is deleted - (<command>nix-env --delete-generations</command>). To prevent - build-time-only dependencies from being collected, you should also - turn on <literal>keep-outputs</literal>.</para> + <para>A list of URI prefixes to which access is allowed in + restricted evaluation mode. For example, when set to + <literal>https://github.com/NixOS</literal>, builtin functions + such as <function>fetchGit</function> are allowed to access + <literal>https://github.com/NixOS/patchelf.git</literal>.</para> - <para>The difference between this option and - <literal>keep-derivations</literal> is that this one is - “sticky”: it applies to any user environment created while this - option was enabled, while <literal>keep-derivations</literal> - only applies at the moment the garbage collector is - run.</para></listitem> + </listitem> </varlistentry> - <varlistentry xml:id="conf-max-jobs"><term><literal>max-jobs</literal></term> + <varlistentry xml:id="conf-allow-import-from-derivation"><term><literal>allow-import-from-derivation</literal></term> - <listitem><para>This option defines the maximum number of jobs - that Nix will try to build in parallel. The default is - <literal>1</literal>. The special value <literal>auto</literal> - causes Nix to use the number of CPUs in your system. It can be - overridden using the <option - linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>) - command line switch.</para></listitem> + <listitem><para>By default, Nix allows you to <function>import</function> from a derivation, + allowing building at evaluation time. With this option set to false, Nix will throw an error + when evaluating an expression that uses this feature, allowing users to ensure their evaluation + will not require any builds to take place.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-cores"><term><literal>cores</literal></term> + <varlistentry xml:id="conf-allow-new-privileges"><term><literal>allow-new-privileges</literal></term> - <listitem><para>Sets the value of the - <envar>NIX_BUILD_CORES</envar> environment variable in the - invocation of builders. Builders can use this variable at their - discretion to control the maximum amount of parallelism. For - instance, in Nixpkgs, if the derivation attribute - <varname>enableParallelBuilding</varname> is set to - <literal>true</literal>, the builder passes the - <option>-j<replaceable>N</replaceable></option> flag to GNU Make. - It can be overridden using the <option - linkend='opt-cores'>--cores</option> command line switch and - defaults to <literal>1</literal>. The value <literal>0</literal> - means that the builder should use all available CPU cores in the - system.</para></listitem> + <listitem><para>(Linux-specific.) By default, builders on Linux + cannot acquire new privileges by calling setuid/setgid programs or + programs that have file capabilities. For example, programs such + as <command>sudo</command> or <command>ping</command> will + fail. (Note that in sandbox builds, no such programs are available + unless you bind-mount them into the sandbox via the + <option>sandbox-paths</option> option.) You can allow the + use of such programs by enabling this option. This is impure and + usually undesirable, but may be useful in certain scenarios + (e.g. to spin up containers or set up userspace network interfaces + in tests).</para></listitem> </varlistentry> - <varlistentry xml:id="conf-max-silent-time"><term><literal>max-silent-time</literal></term> + <varlistentry xml:id="conf-allowed-users"><term><literal>allowed-users</literal></term> <listitem> - <para>This option defines the maximum number of seconds that a - builder can go without producing any data on standard output or - standard error. This is useful (for instance in an automated - build system) to catch builds that are stuck in an infinite - loop, or to catch remote builds that are hanging due to network - problems. It can be overridden using the <option - linkend="opt-max-silent-time">--max-silent-time</option> command - line switch.</para> + <para>A list of names of users (separated by whitespace) that + are allowed to connect to the Nix daemon. As with the + <option>trusted-users</option> option, you can specify groups by + prefixing them with <literal>@</literal>. Also, you can allow + all users by specifying <literal>*</literal>. The default is + <literal>*</literal>.</para> - <para>The value <literal>0</literal> means that there is no - timeout. This is also the default.</para> + <para>Note that trusted users are always allowed to connect.</para> </listitem> </varlistentry> - <varlistentry xml:id="conf-timeout"><term><literal>timeout</literal></term> + <varlistentry><term><literal>auto-optimise-store</literal></term> - <listitem> + <listitem><para>If set to <literal>true</literal>, Nix + automatically detects files in the store that have identical + contents, and replaces them with hard links to a single copy. + This saves disk space. If set to <literal>false</literal> (the + default), you can still run <command>nix-store + --optimise</command> to get rid of duplicate + files.</para></listitem> - <para>This option defines the maximum number of seconds that a - builder can run. This is useful (for instance in an automated - build system) to catch builds that are stuck in an infinite loop - but keep writing to their standard output or standard error. It - can be overridden using the <option - linkend="opt-timeout">--timeout</option> command line - switch.</para> + </varlistentry> - <para>The value <literal>0</literal> means that there is no - timeout. This is also the default.</para> + <varlistentry xml:id="conf-builders"> + <term><literal>builders</literal></term> + <listitem> + <para>A list of machines on which to perform builds. <phrase + condition="manual">See <xref linkend="chap-distributed-builds" + /> for details.</phrase></para> </listitem> - </varlistentry> - <varlistentry xml:id="conf-max-build-log-size"><term><literal>max-build-log-size</literal></term> + <varlistentry><term><literal>builders-use-substitutes</literal></term> - <listitem> - - <para>This option defines the maximum number of bytes that a - builder can write to its stdout/stderr. If the builder exceeds - this limit, it’s killed. A value of <literal>0</literal> (the - default) means that there is no limit.</para> - - </listitem> + <listitem><para>If set to <literal>true</literal>, Nix will instruct + remote build machines to use their own binary substitutes if available. In + practical terms, this means that remote hosts will fetch as many build + dependencies as possible from their own substitutes (e.g, from + <literal>cache.nixos.org</literal>), instead of waiting for this host to + upload them all. This can drastically reduce build times if the network + connection between this computer and the remote build host is slow. Defaults + to <literal>false</literal>.</para></listitem> </varlistentry> @@ -244,66 +201,51 @@ false</literal>.</para> </varlistentry> - <varlistentry><term><literal>sandbox</literal></term> + <varlistentry><term><literal>compress-build-log</literal></term> - <listitem><para>If set to <literal>true</literal>, builds will be - performed in a <emphasis>sandboxed environment</emphasis>, i.e., - they’re isolated from the normal file system hierarchy and will - only see their dependencies in the Nix store, the temporary build - directory, private versions of <filename>/proc</filename>, - <filename>/dev</filename>, <filename>/dev/shm</filename> and - <filename>/dev/pts</filename> (on Linux), and the paths configured with the - <link linkend='conf-sandbox-paths'><literal>sandbox-paths</literal> - option</link>. This is useful to prevent undeclared dependencies - on files in directories such as <filename>/usr/bin</filename>. In - addition, on Linux, builds run in private PID, mount, network, IPC - and UTS namespaces to isolate them from other processes in the - system (except that fixed-output derivations do not run in private - network namespace to ensure they can access the network).</para> + <listitem><para>If set to <literal>true</literal> (the default), + build logs written to <filename>/nix/var/log/nix/drvs</filename> + will be compressed on the fly using bzip2. Otherwise, they will + not be compressed.</para></listitem> - <para>Currently, sandboxing only work on Linux and macOS. The use - of a sandbox requires that Nix is run as root (so you should use - the <link linkend='conf-build-users-group'>“build users” - feature</link> to perform the actual builds under different users - than root).</para> + </varlistentry> - <para>If this option is set to <literal>relaxed</literal>, then - fixed-output derivations and derivations that have the - <varname>__noChroot</varname> attribute set to - <literal>true</literal> do not run in sandboxes.</para> - <para>The default is <literal>false</literal>.</para> + <varlistentry xml:id="conf-connect-timeout"><term><literal>connect-timeout</literal></term> + + <listitem> + + <para>The timeout (in seconds) for establishing connections in + the binary cache substituter. It corresponds to + <command>curl</command>’s <option>--connect-timeout</option> + option.</para> </listitem> </varlistentry> - <varlistentry xml:id="conf-sandbox-paths"> - <term><literal>sandbox-paths</literal></term> - - <listitem><para>A list of paths bind-mounted into Nix sandbox - environments. You can use the syntax - <literal><replaceable>target</replaceable>=<replaceable>source</replaceable></literal> - to mount a path in a different location in the sandbox; for - instance, <literal>/bin=/nix-bin</literal> will mount the path - <literal>/nix-bin</literal> as <literal>/bin</literal> inside the - sandbox. If <replaceable>source</replaceable> is followed by - <literal>?</literal>, then it is not an error if - <replaceable>source</replaceable> does not exist; for example, - <literal>/dev/nvidiactl?</literal> specifies that - <filename>/dev/nvidiactl</filename> will only be mounted in the - sandbox if it exists in the host filesystem.</para> + <varlistentry xml:id="conf-cores"><term><literal>cores</literal></term> - <para>Depending on how Nix was built, the default value for this option - may be empty or provide <filename>/bin/sh</filename> as a - bind-mount of <command>bash</command>.</para></listitem> + <listitem><para>Sets the value of the + <envar>NIX_BUILD_CORES</envar> environment variable in the + invocation of builders. Builders can use this variable at their + discretion to control the maximum amount of parallelism. For + instance, in Nixpkgs, if the derivation attribute + <varname>enableParallelBuilding</varname> is set to + <literal>true</literal>, the builder passes the + <option>-j<replaceable>N</replaceable></option> flag to GNU Make. + It can be overridden using the <option + linkend='opt-cores'>--cores</option> command line switch and + defaults to <literal>1</literal>. The value <literal>0</literal> + means that the builder should use all available CPU cores in the + system.</para></listitem> </varlistentry> <varlistentry xml:id="conf-extra-sandbox-paths"> - <term><literal>build-extra-sandbox-paths</literal></term> + <term><literal>extra-sandbox-paths</literal></term> <listitem><para>A list of additional paths appended to <option>sandbox-paths</option>. Useful if you want to extend @@ -312,11 +254,13 @@ false</literal>.</para> </varlistentry> - <varlistentry><term><literal>use-substitutes</literal></term> + <varlistentry><term><literal>extra-substituters</literal></term> - <listitem><para>If set to <literal>true</literal> (default), Nix - will use binary substitutes if available. This option can be - disabled to force building from source.</para></listitem> + <listitem><para>Additional binary caches appended to those + specified in <option>substituters</option>. When used by + unprivileged users, untrusted substituters (i.e. those not listed + in <option>trusted-substituters</option>) are silently + ignored.</para></listitem> </varlistentry> @@ -331,119 +275,168 @@ false</literal>.</para> </varlistentry> - <varlistentry><term><literal>keep-build-log</literal></term> + <varlistentry><term><literal>fsync-metadata</literal></term> - <listitem><para>If set to <literal>true</literal> (the default), - Nix will write the build log of a derivation (i.e. the standard - output and error of its builder) to the directory - <filename>/nix/var/log/nix/drvs</filename>. The build log can be - retrieved using the command <command>nix-store -l - <replaceable>path</replaceable></command>.</para></listitem> + <listitem><para>If set to <literal>true</literal>, changes to the + Nix store metadata (in <filename>/nix/var/nix/db</filename>) are + synchronously flushed to disk. This improves robustness in case + of system crashes, but reduces performance. The default is + <literal>true</literal>.</para></listitem> </varlistentry> - <varlistentry><term><literal>compress-build-log</literal></term> + <varlistentry xml:id="conf-hashed-mirrors"><term><literal>hashed-mirrors</literal></term> - <listitem><para>If set to <literal>true</literal> (the default), - build logs written to <filename>/nix/var/log/nix/drvs</filename> - will be compressed on the fly using bzip2. Otherwise, they will - not be compressed.</para></listitem> + <listitem><para>A list of web servers used by + <function>builtins.fetchurl</function> to obtain files by + hash. The default is + <literal>http://tarballs.nixos.org/</literal>. Given a hash type + <replaceable>ht</replaceable> and a base-16 hash + <replaceable>h</replaceable>, Nix will try to download the file + from + <literal>hashed-mirror/<replaceable>ht</replaceable>/<replaceable>h</replaceable></literal>. + This allows files to be downloaded even if they have disappeared + from their original URI. For example, given the default mirror + <literal>http://tarballs.nixos.org/</literal>, when building the derivation + +<programlisting> +builtins.fetchurl { + url = https://example.org/foo-1.2.3.tar.xz; + sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; +} +</programlisting> + + Nix will attempt to download this file from + <literal>http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae</literal> + first. If it is not available there, if will try the original URI.</para></listitem> </varlistentry> - <varlistentry><term><literal>substituters</literal></term> + <varlistentry><term><literal>http-connections</literal></term> - <listitem><para>A list of URLs of substituters, separated by - whitespace. The default is - <literal>https://cache.nixos.org</literal>.</para></listitem> + <listitem><para>The maximum number of parallel TCP connections + used to fetch files from binary caches and by other downloads. It + defaults to 25. 0 means no limit.</para></listitem> </varlistentry> - <!-- - <varlistentry><term><literal>binary-caches-files</literal></term> + <varlistentry><term><literal>keep-build-log</literal></term> - <listitem><para>A list of names of files that will be read to - obtain additional binary cache URLs. The default is - <literal>/nix/var/nix/profiles/per-user/<replaceable>username</replaceable>/channels/binary-caches/*</literal>. - Note that when you’re using the Nix daemon, - <replaceable>username</replaceable> is always equal to - <literal>root</literal>, so Nix will only use the binary caches - provided by the channels installed by root. Do not set this - option to read files created by untrusted users!</para></listitem> + <listitem><para>If set to <literal>true</literal> (the default), + Nix will write the build log of a derivation (i.e. the standard + output and error of its builder) to the directory + <filename>/nix/var/log/nix/drvs</filename>. The build log can be + retrieved using the command <command>nix-store -l + <replaceable>path</replaceable></command>.</para></listitem> </varlistentry> - --> - <varlistentry><term><literal>trusted-substituters</literal></term> + <varlistentry xml:id="conf-keep-derivations"><term><literal>keep-derivations</literal></term> - <listitem><para>A list of URLs of substituters, separated by - whitespace. These are not used by default, but can be enabled by - users of the Nix daemon by specifying <literal>--option - substituters <replaceable>urls</replaceable></literal> on the - command line. Unprivileged users are only allowed to pass a - subset of the URLs listed in <literal>substituters</literal> and - <literal>trusted-substituters</literal>.</para></listitem> + <listitem><para>If <literal>true</literal> (default), the garbage + collector will keep the derivations from which non-garbage store + paths were built. If <literal>false</literal>, they will be + deleted unless explicitly registered as a root (or reachable from + other roots).</para> + + <para>Keeping derivation around is useful for querying and + traceability (e.g., it allows you to ask with what dependencies or + options a store path was built), so by default this option is on. + Turn it off to save a bit of disk space (or a lot if + <literal>keep-outputs</literal> is also turned on).</para></listitem> </varlistentry> - <varlistentry><term><literal>extra-substituters</literal></term> + <varlistentry><term><literal>keep-env-derivations</literal></term> - <listitem><para>Additional binary caches appended to those - specified in <option>substituters</option>. When used by - unprivileged users, untrusted substituters (i.e. those not listed - in <option>trusted-substituters</option>) are silently - ignored.</para></listitem> + <listitem><para>If <literal>false</literal> (default), derivations + are not stored in Nix user environments. That is, the derivation + any build-time-only dependencies may be garbage-collected.</para> + + <para>If <literal>true</literal>, when you add a Nix derivation to + a user environment, the path of the derivation is stored in the + user environment. Thus, the derivation will not be + garbage-collected until the user environment generation is deleted + (<command>nix-env --delete-generations</command>). To prevent + build-time-only dependencies from being collected, you should also + turn on <literal>keep-outputs</literal>.</para> + + <para>The difference between this option and + <literal>keep-derivations</literal> is that this one is + “sticky”: it applies to any user environment created while this + option was enabled, while <literal>keep-derivations</literal> + only applies at the moment the garbage collector is + run.</para></listitem> </varlistentry> - <varlistentry><term><literal>require-sigs</literal></term> + <varlistentry xml:id="conf-keep-outputs"><term><literal>keep-outputs</literal></term> - <listitem><para>If set to <literal>true</literal> (the default), - any non-content-addressed path added or copied to the Nix store - (e.g. when substituting from a binary cache) must have a valid - signature, that is, be signed using one of the keys listed in - <option>trusted-public-keys</option> or - <option>secret-key-files</option>. Set to <literal>false</literal> - to disable signature checking.</para></listitem> + <listitem><para>If <literal>true</literal>, the garbage collector + will keep the outputs of non-garbage derivations. If + <literal>false</literal> (default), outputs will be deleted unless + they are GC roots themselves (or reachable from other roots).</para> + + <para>In general, outputs must be registered as roots separately. + However, even if the output of a derivation is registered as a + root, the collector will still delete store paths that are used + only at build time (e.g., the C compiler, or source tarballs + downloaded from the network). To prevent it from doing so, set + this option to <literal>true</literal>.</para></listitem> </varlistentry> - <varlistentry><term><literal>trusted-public-keys</literal></term> + <varlistentry xml:id="conf-max-build-log-size"><term><literal>max-build-log-size</literal></term> - <listitem><para>A whitespace-separated list of public keys. When - paths are copied from another Nix store (such as a binary cache), - they must be signed with one of these keys. For example: - <literal>cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=</literal>.</para></listitem> + <listitem> + + <para>This option defines the maximum number of bytes that a + builder can write to its stdout/stderr. If the builder exceeds + this limit, it’s killed. A value of <literal>0</literal> (the + default) means that there is no limit.</para> + + </listitem> </varlistentry> - <varlistentry><term><literal>secret-key-files</literal></term> + <varlistentry xml:id="conf-max-jobs"><term><literal>max-jobs</literal></term> - <listitem><para>A whitespace-separated list of files containing - secret (private) keys. These are used to sign locally-built - paths. They can be generated using <command>nix-store - --generate-binary-cache-key</command>. The corresponding public - key can be distributed to other users, who can add it to - <option>trusted-public-keys</option> in their - <filename>nix.conf</filename>.</para></listitem> + <listitem><para>This option defines the maximum number of jobs + that Nix will try to build in parallel. The default is + <literal>1</literal>. The special value <literal>auto</literal> + causes Nix to use the number of CPUs in your system. It can be + overridden using the <option + linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>) + command line switch.</para></listitem> </varlistentry> - <varlistentry><term><literal>http-connections</literal></term> + <varlistentry xml:id="conf-max-silent-time"><term><literal>max-silent-time</literal></term> - <listitem><para>The maximum number of parallel TCP connections - used to fetch files from binary caches and by other downloads. It - defaults to 25. 0 means no limit.</para></listitem> + <listitem> + + <para>This option defines the maximum number of seconds that a + builder can go without producing any data on standard output or + standard error. This is useful (for instance in an automated + build system) to catch builds that are stuck in an infinite + loop, or to catch remote builds that are hanging due to network + problems. It can be overridden using the <option + linkend="opt-max-silent-time">--max-silent-time</option> command + line switch.</para> + + <para>The value <literal>0</literal> means that there is no + timeout. This is also the default.</para> + + </listitem> </varlistentry> @@ -471,104 +464,95 @@ password <replaceable>my-password</replaceable> </varlistentry> - <varlistentry><term><literal>system</literal></term> - - <listitem><para>This option specifies the canonical Nix system - name of the current installation, such as - <literal>i686-linux</literal> or - <literal>x86_64-darwin</literal>. Nix can only build derivations - whose <literal>system</literal> attribute equals the value - specified here. In general, it never makes sense to modify this - value from its default, since you can use it to ‘lie’ about the - platform you are building on (e.g., perform a Mac OS build on a - Linux machine; the result would obviously be wrong). It only - makes sense if the Nix binaries can run on multiple platforms, - e.g., ‘universal binaries’ that run on <literal>x86_64-linux</literal> and - <literal>i686-linux</literal>.</para> - - <para>It defaults to the canonical Nix system name detected by - <filename>configure</filename> at build time.</para></listitem> + <varlistentry xml:id="conf-plugin-files"> + <term><literal>plugin-files</literal></term> + <listitem> + <para> + A list of plugin files to be loaded by Nix. Each of these + files will be dlopened by Nix, allowing them to affect + execution through static initialization. In particular, these + plugins may construct static instances of RegisterPrimOp to + add new primops or constants to the expression language, + RegisterStoreImplementation to add new store implementations, + RegisterCommand to add new subcommands to the + <literal>nix</literal> command, and RegisterSetting to add new + nix config settings. See the constructors for those types for + more details. + </para> + <para> + Since these files are loaded into the same address space as + Nix itself, they must be DSOs compatible with the instance of + Nix running at the time (i.e. compiled against the same + headers, not linked to any incompatible libraries). They + should not be linked to any Nix libs directly, as those will + be available already at load time. + </para> + <para> + If an entry in the list is a directory, all files in the + directory are loaded as plugins (non-recursively). + </para> + </listitem> </varlistentry> - <varlistentry><term><literal>fsync-metadata</literal></term> - - <listitem><para>If set to <literal>true</literal>, changes to the - Nix store metadata (in <filename>/nix/var/nix/db</filename>) are - synchronously flushed to disk. This improves robustness in case - of system crashes, but reduces performance. The default is - <literal>true</literal>.</para></listitem> - - </varlistentry> + <varlistentry xml:id="conf-pre-build-hook"><term><literal>pre-build-hook</literal></term> + <listitem> - <varlistentry><term><literal>auto-optimise-store</literal></term> - <listitem><para>If set to <literal>true</literal>, Nix - automatically detects files in the store that have identical - contents, and replaces them with hard links to a single copy. - This saves disk space. If set to <literal>false</literal> (the - default), you can still run <command>nix-store - --optimise</command> to get rid of duplicate - files.</para></listitem> + <para>If set, the path to a program that can set extra + derivation-specific settings for this system. This is used for settings + that can't be captured by the derivation model itself and are too variable + between different versions of the same system to be hard-coded into nix. + </para> - </varlistentry> + <para>The hook is passed the derivation path and, if sandboxes are enabled, + the sandbox directory. It can then modify the sandbox and send a series of + commands to modify various settings to stdout. The currently recognized + commands are:</para> + <variablelist> + <varlistentry xml:id="extra-sandbox-paths"> + <term><literal>extra-sandbox-paths</literal></term> - <varlistentry xml:id="conf-connect-timeout"><term><literal>connect-timeout</literal></term> + <listitem> - <listitem> + <para>Pass a list of files and directories to be included in the + sandbox for this build. One entry per line, terminated by an empty + line. Entries have the same format as + <literal>sandbox-paths</literal>.</para> - <para>The timeout (in seconds) for establishing connections in - the binary cache substituter. It corresponds to - <command>curl</command>’s <option>--connect-timeout</option> - option.</para> + </listitem> + </varlistentry> + </variablelist> </listitem> </varlistentry> - <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term> - - <listitem> - - <para>A list of names of users (separated by whitespace) that - have additional rights when connecting to the Nix daemon, such - as the ability to specify additional binary caches, or to import - unsigned NARs. You can also specify groups by prefixing them - with <literal>@</literal>; for instance, - <literal>@wheel</literal> means all users in the - <literal>wheel</literal> group. The default is - <literal>root</literal>.</para> - - <warning><para>Adding a user to <option>trusted-users</option> - is essentially equivalent to giving that user root access to the - system. For example, the user can set - <option>sandbox-paths</option> and thereby obtain read access to - directories that are otherwise inacessible to - them.</para></warning> + <varlistentry xml:id="conf-repeat"><term><literal>repeat</literal></term> - </listitem> + <listitem><para>How many times to repeat builds to check whether + they are deterministic. The default value is 0. If the value is + non-zero, every build is repeated the specified number of + times. If the contents of any of the runs differs from the + previous ones, the build is rejected and the resulting store paths + are not registered as “valid” in Nix’s database.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-allowed-users"><term><literal>allowed-users</literal></term> - - <listitem> - - <para>A list of names of users (separated by whitespace) that - are allowed to connect to the Nix daemon. As with the - <option>trusted-users</option> option, you can specify groups by - prefixing them with <literal>@</literal>. Also, you can allow - all users by specifying <literal>*</literal>. The default is - <literal>*</literal>.</para> - - <para>Note that trusted users are always allowed to connect.</para> + <varlistentry><term><literal>require-sigs</literal></term> - </listitem> + <listitem><para>If set to <literal>true</literal> (the default), + any non-content-addressed path added or copied to the Nix store + (e.g. when substituting from a binary cache) must have a valid + signature, that is, be signed using one of the keys listed in + <option>trusted-public-keys</option> or + <option>secret-key-files</option>. Set to <literal>false</literal> + to disable signature checking.</para></listitem> </varlistentry> @@ -589,133 +573,202 @@ password <replaceable>my-password</replaceable> </varlistentry> - <varlistentry xml:id="conf-allowed-uris"><term><literal>allowed-uris</literal></term> + <varlistentry><term><literal>sandbox</literal></term> - <listitem> + <listitem><para>If set to <literal>true</literal>, builds will be + performed in a <emphasis>sandboxed environment</emphasis>, i.e., + they’re isolated from the normal file system hierarchy and will + only see their dependencies in the Nix store, the temporary build + directory, private versions of <filename>/proc</filename>, + <filename>/dev</filename>, <filename>/dev/shm</filename> and + <filename>/dev/pts</filename> (on Linux), and the paths configured with the + <link linkend='conf-sandbox-paths'><literal>sandbox-paths</literal> + option</link>. This is useful to prevent undeclared dependencies + on files in directories such as <filename>/usr/bin</filename>. In + addition, on Linux, builds run in private PID, mount, network, IPC + and UTS namespaces to isolate them from other processes in the + system (except that fixed-output derivations do not run in private + network namespace to ensure they can access the network).</para> - <para>A list of URI prefixes to which access is allowed in - restricted evaluation mode. For example, when set to - <literal>https://github.com/NixOS</literal>, builtin functions - such as <function>fetchGit</function> are allowed to access - <literal>https://github.com/NixOS/patchelf.git</literal>.</para> + <para>Currently, sandboxing only work on Linux and macOS. The use + of a sandbox requires that Nix is run as root (so you should use + the <link linkend='conf-build-users-group'>“build users” + feature</link> to perform the actual builds under different users + than root).</para> + + <para>If this option is set to <literal>relaxed</literal>, then + fixed-output derivations and derivations that have the + <varname>__noChroot</varname> attribute set to + <literal>true</literal> do not run in sandboxes.</para> + + <para>The default is <literal>false</literal>.</para> </listitem> </varlistentry> - <varlistentry xml:id="conf-pre-build-hook"><term><literal>pre-build-hook</literal></term> + <varlistentry xml:id="conf-sandbox-dev-shm-size"><term><literal>sandbox-dev-shm-size</literal></term> - <listitem> + <listitem><para>This option determines the maximum size of the + <literal>tmpfs</literal> filesystem mounted on + <filename>/dev/shm</filename> in Linux sandboxes. For the format, + see the description of the <option>size</option> option of + <literal>tmpfs</literal> in + <citerefentry><refentrytitle>mount</refentrytitle><manvolnum>8</manvolnum></citerefentry>. The + default is <literal>50%</literal>.</para></listitem> + </varlistentry> - <para>If set, the path to a program that can set extra - derivation-specific settings for this system. This is used for settings - that can't be captured by the derivation model itself and are too variable - between different versions of the same system to be hard-coded into nix. - </para> - <para>The hook is passed the derivation path and, if sandboxes are enabled, - the sandbox directory. It can then modify the sandbox and send a series of - commands to modify various settings to stdout. The currently recognized - commands are:</para> + <varlistentry xml:id="conf-sandbox-paths"> + <term><literal>sandbox-paths</literal></term> - <variablelist> - <varlistentry xml:id="extra-sandbox-paths"> - <term><literal>extra-sandbox-paths</literal></term> + <listitem><para>A list of paths bind-mounted into Nix sandbox + environments. You can use the syntax + <literal><replaceable>target</replaceable>=<replaceable>source</replaceable></literal> + to mount a path in a different location in the sandbox; for + instance, <literal>/bin=/nix-bin</literal> will mount the path + <literal>/nix-bin</literal> as <literal>/bin</literal> inside the + sandbox. If <replaceable>source</replaceable> is followed by + <literal>?</literal>, then it is not an error if + <replaceable>source</replaceable> does not exist; for example, + <literal>/dev/nvidiactl?</literal> specifies that + <filename>/dev/nvidiactl</filename> will only be mounted in the + sandbox if it exists in the host filesystem.</para> - <listitem> + <para>Depending on how Nix was built, the default value for this option + may be empty or provide <filename>/bin/sh</filename> as a + bind-mount of <command>bash</command>.</para></listitem> - <para>Pass a list of files and directories to be included in the - sandbox for this build. One entry per line, terminated by an empty - line. Entries have the same format as - <literal>sandbox-paths</literal>.</para> + </varlistentry> - </listitem> - </varlistentry> - </variablelist> - </listitem> + <varlistentry><term><literal>secret-key-files</literal></term> + + <listitem><para>A whitespace-separated list of files containing + secret (private) keys. These are used to sign locally-built + paths. They can be generated using <command>nix-store + --generate-binary-cache-key</command>. The corresponding public + key can be distributed to other users, who can add it to + <option>trusted-public-keys</option> in their + <filename>nix.conf</filename>.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-repeat"><term><literal>repeat</literal></term> + <varlistentry xml:id="conf-show-trace"><term><literal>show-trace</literal></term> - <listitem><para>How many times to repeat builds to check whether - they are deterministic. The default value is 0. If the value is - non-zero, every build is repeated the specified number of - times. If the contents of any of the runs differs from the - previous ones, the build is rejected and the resulting store paths - are not registered as “valid” in Nix’s database.</para></listitem> + <listitem><para>Causes Nix to print out a stack trace in case of Nix + expression evaluation errors.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-sandbox-dev-shm-size"><term><literal>sandbox-dev-shm-size</literal></term> + <varlistentry><term><literal>substitute</literal></term> - <listitem><para>This option determines the maximum size of the - <literal>tmpfs</literal> filesystem mounted on - <filename>/dev/shm</filename> in Linux sandboxes. For the format, - see the description of the <option>size</option> option of - <literal>tmpfs</literal> in - <citerefentry><refentrytitle>mount</refentrytitle><manvolnum>8</manvolnum></citerefentry>. The - default is <literal>50%</literal>.</para></listitem> + <listitem><para>If set to <literal>true</literal> (default), Nix + will use binary substitutes if available. This option can be + disabled to force building from source.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-allow-import-from-derivation"><term><literal>allow-import-from-derivation</literal></term> + <varlistentry><term><literal>substituters</literal></term> - <listitem><para>By default, Nix allows you to <function>import</function> from a derivation, - allowing building at evaluation time. With this option set to false, Nix will throw an error - when evaluating an expression that uses this feature, allowing users to ensure their evaluation - will not require any builds to take place.</para></listitem> + <listitem><para>A list of URLs of substituters, separated by + whitespace. The default is + <literal>https://cache.nixos.org</literal>.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-allow-new-privileges"><term><literal>allow-new-privileges</literal></term> + <varlistentry><term><literal>system</literal></term> - <listitem><para>(Linux-specific.) By default, builders on Linux - cannot acquire new privileges by calling setuid/setgid programs or - programs that have file capabilities. For example, programs such - as <command>sudo</command> or <command>ping</command> will - fail. (Note that in sandbox builds, no such programs are available - unless you bind-mount them into the sandbox via the - <option>sandbox-paths</option> option.) You can allow the - use of such programs by enabling this option. This is impure and - usually undesirable, but may be useful in certain scenarios - (e.g. to spin up containers or set up userspace network interfaces - in tests).</para></listitem> + <listitem><para>This option specifies the canonical Nix system + name of the current installation, such as + <literal>i686-linux</literal> or + <literal>x86_64-darwin</literal>. Nix can only build derivations + whose <literal>system</literal> attribute equals the value + specified here. In general, it never makes sense to modify this + value from its default, since you can use it to ‘lie’ about the + platform you are building on (e.g., perform a Mac OS build on a + Linux machine; the result would obviously be wrong). It only + makes sense if the Nix binaries can run on multiple platforms, + e.g., ‘universal binaries’ that run on <literal>x86_64-linux</literal> and + <literal>i686-linux</literal>.</para> + + <para>It defaults to the canonical Nix system name detected by + <filename>configure</filename> at build time.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-hashed-mirrors"><term><literal>hashed-mirrors</literal></term> + <varlistentry xml:id="conf-timeout"><term><literal>timeout</literal></term> - <listitem><para>A list of web servers used by - <function>builtins.fetchurl</function> to obtain files by - hash. The default is - <literal>http://tarballs.nixos.org/</literal>. Given a hash type - <replaceable>ht</replaceable> and a base-16 hash - <replaceable>h</replaceable>, Nix will try to download the file - from - <literal>hashed-mirror/<replaceable>ht</replaceable>/<replaceable>h</replaceable></literal>. - This allows files to be downloaded even if they have disappeared - from their original URI. For example, given the default mirror - <literal>http://tarballs.nixos.org/</literal>, when building the derivation + <listitem> -<programlisting> -builtins.fetchurl { - url = https://example.org/foo-1.2.3.tar.xz; - sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; -} -</programlisting> + <para>This option defines the maximum number of seconds that a + builder can run. This is useful (for instance in an automated + build system) to catch builds that are stuck in an infinite loop + but keep writing to their standard output or standard error. It + can be overridden using the <option + linkend="opt-timeout">--timeout</option> command line + switch.</para> - Nix will attempt to download this file from - <literal>http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae</literal> - first. If it is not available there, if will try the original URI.</para></listitem> + <para>The value <literal>0</literal> means that there is no + timeout. This is also the default.</para> + + </listitem> + + </varlistentry> + + + <varlistentry><term><literal>trusted-public-keys</literal></term> + + <listitem><para>A whitespace-separated list of public keys. When + paths are copied from another Nix store (such as a binary cache), + they must be signed with one of these keys. For example: + <literal>cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=</literal>.</para></listitem> + + </varlistentry> + + + <varlistentry><term><literal>trusted-substituters</literal></term> + + <listitem><para>A list of URLs of substituters, separated by + whitespace. These are not used by default, but can be enabled by + users of the Nix daemon by specifying <literal>--option + substituters <replaceable>urls</replaceable></literal> on the + command line. Unprivileged users are only allowed to pass a + subset of the URLs listed in <literal>substituters</literal> and + <literal>trusted-substituters</literal>.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term> + + <listitem> + + <para>A list of names of users (separated by whitespace) that + have additional rights when connecting to the Nix daemon, such + as the ability to specify additional binary caches, or to import + unsigned NARs. You can also specify groups by prefixing them + with <literal>@</literal>; for instance, + <literal>@wheel</literal> means all users in the + <literal>wheel</literal> group. The default is + <literal>root</literal>.</para> + + <warning><para>Adding a user to <option>trusted-users</option> + is essentially equivalent to giving that user root access to the + system. For example, the user can set + <option>sandbox-paths</option> and thereby obtain read access to + directories that are otherwise inacessible to + them.</para></warning> + + </listitem> </varlistentry> diff --git a/doc/manual/command-ref/env-common.xml b/doc/manual/command-ref/env-common.xml index a83aeaf2e575..361d3e2b0330 100644 --- a/doc/manual/command-ref/env-common.xml +++ b/doc/manual/command-ref/env-common.xml @@ -154,6 +154,8 @@ $ mount -o bind /mnt/otherdisk/nix /nix</screen> <literal>daemon</literal> if you want to use the Nix daemon to execute Nix operations. This is necessary in <link linkend="ssec-multi-user">multi-user Nix installations</link>. + If the Nix daemon's Unix socket is at some non-standard path, + this variable should be set to <literal>unix://path/to/socket</literal>. Otherwise, it should be left unset.</para></listitem> </varlistentry> diff --git a/doc/manual/command-ref/nix-build.xml b/doc/manual/command-ref/nix-build.xml index d6b2e5e5adb7..40fe7a43f10c 100644 --- a/doc/manual/command-ref/nix-build.xml +++ b/doc/manual/command-ref/nix-build.xml @@ -29,8 +29,6 @@ </group> <replaceable>attrPath</replaceable> </arg> - <arg><option>--drv-link</option> <replaceable>drvlink</replaceable></arg> - <arg><option>--add-drv-link</option></arg> <arg><option>--no-out-link</option></arg> <arg> <group choice='req'> @@ -91,25 +89,6 @@ also <xref linkend="sec-common-options" />.</phrase></para> <variablelist> - <varlistentry><term><option>--drv-link</option> <replaceable>drvlink</replaceable></term> - - <listitem><para>Add a symlink named - <replaceable>drvlink</replaceable> to the store derivation - produced by <command>nix-instantiate</command>. The derivation is - a root of the garbage collector until the symlink is deleted or - renamed. If there are multiple derivations, numbers are suffixed - to <replaceable>drvlink</replaceable> to distinguish between - them.</para></listitem> - - </varlistentry> - - <varlistentry><term><option>--add-drv-link</option></term> - - <listitem><para>Shorthand for <option>--drv-link</option> - <filename>./derivation</filename>.</para></listitem> - - </varlistentry> - <varlistentry><term><option>--no-out-link</option></term> <listitem><para>Do not create a symlink to the output path. Note diff --git a/doc/manual/command-ref/nix-channel.xml b/doc/manual/command-ref/nix-channel.xml index 9acf44e52984..ff4021a765e0 100644 --- a/doc/manual/command-ref/nix-channel.xml +++ b/doc/manual/command-ref/nix-channel.xml @@ -31,7 +31,7 @@ <refsection><title>Description</title> -<para>A Nix channel is mechanism that allows you to automatically stay +<para>A Nix channel is a mechanism that allows you to automatically stay up-to-date with a set of pre-built Nix expressions. A Nix channel is just a URL that points to a place containing both a set of Nix expressions and a pointer to a binary cache. <phrase @@ -165,8 +165,8 @@ following files:</para> <varlistentry><term><filename>nixexprs.tar.xz</filename></term> <listitem><para>A tarball containing Nix expressions and files - referenced by them (such as build scripts and patches). At - top-level, the tarball should contain a single directory. That + referenced by them (such as build scripts and patches). At the + top level, the tarball should contain a single directory. That directory must contain a file <filename>default.nix</filename> that serves as the channel’s “entry point”.</para></listitem> @@ -175,7 +175,7 @@ following files:</para> <varlistentry><term><filename>binary-cache-url</filename></term> <listitem><para>A file containing the URL to a binary cache (such - as <uri>https://cache.nixos.org</uri>. Nix will automatically + as <uri>https://cache.nixos.org</uri>). Nix will automatically check this cache for pre-built binaries, if the user has sufficient rights to add binary caches. For instance, in a multi-user Nix setup, the binary caches provided by the channels diff --git a/doc/manual/command-ref/opt-common-syn.xml b/doc/manual/command-ref/opt-common-syn.xml index 3aff4e1b6357..168bef080f4f 100644 --- a/doc/manual/command-ref/opt-common-syn.xml +++ b/doc/manual/command-ref/opt-common-syn.xml @@ -47,7 +47,6 @@ </arg> <arg><option>--fallback</option></arg> <arg><option>--readonly-mode</option></arg> -<arg><option>--show-trace</option></arg> <arg> <option>-I</option> <replaceable>path</replaceable> diff --git a/doc/manual/command-ref/opt-common.xml b/doc/manual/command-ref/opt-common.xml index 32d53c753a22..bcb60b30125c 100644 --- a/doc/manual/command-ref/opt-common.xml +++ b/doc/manual/command-ref/opt-common.xml @@ -301,13 +301,6 @@ </varlistentry> -<varlistentry><term><option>--show-trace</option></term> - - <listitem><para>Causes Nix to print out a stack trace in case of Nix - expression evaluation errors.</para></listitem> - -</varlistentry> - <varlistentry xml:id="opt-I"><term><option>-I</option> <replaceable>path</replaceable></term> diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml index 5a3a8645c1d9..8a32ed8b5c99 100644 --- a/doc/manual/expressions/builtins.xml +++ b/doc/manual/expressions/builtins.xml @@ -126,6 +126,17 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting> </varlistentry> + <varlistentry><term><function>builtins.splitVersion</function> + <replaceable>s</replaceable></term> + + <listitem><para>Split a string representing a version into its + components, by the same version splitting logic underlying the + version comparison in <link linkend="ssec-version-comparisons"> + <command>nix-env -u</command></link>.</para></listitem> + + </varlistentry> + + <varlistentry><term><function>builtins.concatLists</function> <replaceable>lists</replaceable></term> @@ -308,8 +319,9 @@ stdenv.mkDerivation { … } </varlistentry> - <varlistentry><term><function>builtins.filterSource</function> - <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> + <varlistentry xml:id='builtin-filterSource'> + <term><function>builtins.filterSource</function> + <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> <listitem> @@ -768,6 +780,75 @@ Evaluates to <literal>[ "foo" ]</literal>. </varlistentry> + <varlistentry> + <term> + <function>builtins.path</function> + <replaceable>args</replaceable> + </term> + + <listitem> + <para> + An enrichment of the built-in path type, based on the attributes + present in <replaceable>args</replaceable>. All are optional + except <varname>path</varname>: + </para> + + <variablelist> + <varlistentry> + <term>path</term> + <listitem> + <para>The underlying path.</para> + </listitem> + </varlistentry> + <varlistentry> + <term>name</term> + <listitem> + <para> + The name of the path when added to the store. This can + used to reference paths that have nix-illegal characters + in their names, like <literal>@</literal>. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>filter</term> + <listitem> + <para> + A function of the type expected by + <link linkend="builtin-filterSource">builtins.filterSource</link>, + with the same semantics. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>recursive</term> + <listitem> + <para> + When <literal>false</literal>, when + <varname>path</varname> is added to the store it is with a + flat hash, rather than a hash of the NAR serialization of + the file. Thus, <varname>path</varname> must refer to a + regular file, not a directory. This allows similar + behavior to <literal>fetchurl</literal>. Defaults to + <literal>true</literal>. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>sha256</term> + <listitem> + <para> + When provided, this is the expected hash of the file at + the path. Evaluation will fail if the hash is incorrect, + and providing a hash allows + <literal>builtins.path</literal> to be used even when the + <literal>pure-eval</literal> nix config option is on. + </para> + </listitem> + </varlistentry> + </variablelist> + </listitem> + </varlistentry> <varlistentry><term><function>builtins.pathExists</function> <replaceable>path</replaceable></term> diff --git a/doc/manual/expressions/debug-build.xml b/doc/manual/expressions/debug-build.xml deleted file mode 100644 index 0c1f4e6719b2..000000000000 --- a/doc/manual/expressions/debug-build.xml +++ /dev/null @@ -1,34 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="sec-debug-build"> - -<title>Debugging Build Failures</title> - -<para>At the beginning of each phase of the build (such as unpacking, -building or installing), the set of all shell variables is written to -the file <filename>env-vars</filename> at the top-level build -directory. This is useful for debugging: it allows you to recreate -the environment in which a build was performed. For instance, if a -build fails, then assuming you used the <option>-K</option> flag, you -can go to the output directory and <quote>switch</quote> to the -environment of the builder: - -<screen> -$ nix-build -K ./foo.nix -... fails, keeping build directory `/tmp/nix-1234-0' - -$ cd /tmp/nix-1234-0 - -$ source env-vars - -<lineannotation>(edit some files...)</lineannotation> - -$ make - -<lineannotation>(execution continues with the same GCC, make, etc.)</lineannotation></screen> - -</para> - -</section> diff --git a/doc/manual/expressions/language-constructs.xml b/doc/manual/expressions/language-constructs.xml index 2f0027d479cd..47d95f8a13e3 100644 --- a/doc/manual/expressions/language-constructs.xml +++ b/doc/manual/expressions/language-constructs.xml @@ -61,7 +61,7 @@ evaluates to <literal>"foobar"</literal>. <simplesect><title>Inheriting attributes</title> -<para>When defining a set it is often convenient to copy variables +<para>When defining a set or in a let-expression it is often convenient to copy variables from the surrounding lexical scope (e.g., when you want to propagate attributes). This can be shortened using the <literal>inherit</literal> keyword. For instance, @@ -72,7 +72,15 @@ let x = 123; in y = 456; }</programlisting> -evaluates to <literal>{ x = 123; y = 456; }</literal>. (Note that +is equivalent to + +<programlisting> +let x = 123; in +{ x = x; + y = 456; +}</programlisting> + +and both evaluate to <literal>{ x = 123; y = 456; }</literal>. (Note that this works because <varname>x</varname> is added to the lexical scope by the <literal>let</literal> construct.) It is also possible to inherit attributes from another set. For instance, in this fragment @@ -101,6 +109,26 @@ variables from the surrounding scope (<varname>fetchurl</varname> <varname>libXaw</varname> (the X Athena Widgets) from the <varname>xlibs</varname> (X11 client-side libraries) set.</para> +<para> +Summarizing the fragment + +<programlisting> +... +inherit x y z; +inherit (src-set) a b c; +...</programlisting> + +is equivalent to + +<programlisting> +... +x = x; y = y; z = z; +a = src-set.a; b = src-set.b; c = src-set.c; +...</programlisting> + +when used while defining local variables in a let-expression or +while defining a set.</para> + </simplesect> diff --git a/doc/manual/expressions/simple-building-testing.xml b/doc/manual/expressions/simple-building-testing.xml index bd3901a13351..0348c082b205 100644 --- a/doc/manual/expressions/simple-building-testing.xml +++ b/doc/manual/expressions/simple-building-testing.xml @@ -81,6 +81,4 @@ Just pass the option <link linkend='opt-max-jobs'><option>-j in parallel, or set. Typically this should be the number of CPUs.</para> -<xi:include href="debug-build.xml" /> - </section> diff --git a/doc/manual/installation/installing-binary.xml b/doc/manual/installation/installing-binary.xml index 24e76eafeb18..7e8dfb0db3d4 100644 --- a/doc/manual/installation/installing-binary.xml +++ b/doc/manual/installation/installing-binary.xml @@ -79,16 +79,6 @@ alice$ ./install </para> -<para>Nix can be uninstalled using <command>rpm -e nix</command> or -<command>dpkg -r nix</command> on RPM- and Dpkg-based systems, -respectively. After this you should manually remove the Nix store and -other auxiliary data, if desired: - -<screen> -$ rm -rf /nix</screen> - -</para> - <para>You can uninstall Nix simply by running: <screen> diff --git a/doc/manual/installation/multi-user.xml b/doc/manual/installation/multi-user.xml index a13e3c89be78..69ae1ef27041 100644 --- a/doc/manual/installation/multi-user.xml +++ b/doc/manual/installation/multi-user.xml @@ -52,34 +52,6 @@ This creates 10 build users. There can never be more concurrent builds than the number of build users, so you may want to increase this if you expect to do many builds at the same time.</para> -<para>On macOS, you can create the required group and users by -running the following script: - -<programlisting> -#! /bin/bash -e - -dseditgroup -o create nixbld -q - -gid=$(dscl . -read /Groups/nixbld | awk '($1 == "PrimaryGroupID:") {print $2 }') - -echo "created nixbld group with gid $gid" - -for i in $(seq 1 10); do - user=/Users/nixbld$i - uid="$((30000 + $i))" - dscl . create $user - dscl . create $user RealName "Nix build user $i" - dscl . create $user PrimaryGroupID "$gid" - dscl . create $user UserShell /usr/bin/false - dscl . create $user NFSHomeDirectory /var/empty - dscl . create $user UniqueID "$uid" - dseditgroup -o edit -a nixbld$i -t user nixbld - echo "created nixbld$i user with uid $uid" -done -</programlisting> - -</para> - </simplesect> diff --git a/doc/manual/installation/supported-platforms.xml b/doc/manual/installation/supported-platforms.xml index a468a5640637..6858573ff407 100644 --- a/doc/manual/installation/supported-platforms.xml +++ b/doc/manual/installation/supported-platforms.xml @@ -33,7 +33,4 @@ </para> -<para>Nix is fairly portable, so it should work on most platforms that -support POSIX threads and have a C++11 compiler.</para> - </chapter> diff --git a/doc/manual/introduction/quick-start.xml b/doc/manual/introduction/quick-start.xml index aa239b7538b4..1ce6c8d50a1b 100644 --- a/doc/manual/introduction/quick-start.xml +++ b/doc/manual/introduction/quick-start.xml @@ -15,7 +15,7 @@ to subsequent chapters.</para> <step><para>Install single-user Nix by running the following: <screen> -$ curl https://nixos.org/nix/install | sh +$ bash <(curl https://nixos.org/nix/install) </screen> This will install Nix in <filename>/nix</filename>. The install script diff --git a/doc/manual/manual.xml b/doc/manual/manual.xml index 61205d916993..b408b6817727 100644 --- a/doc/manual/manual.xml +++ b/doc/manual/manual.xml @@ -12,19 +12,14 @@ <firstname>Eelco</firstname> <surname>Dolstra</surname> </personname> - <affiliation> - <orgname>LogicBlox</orgname> - </affiliation> <contrib>Author</contrib> </author> <copyright> - <year>2004-2014</year> + <year>2004-2018</year> <holder>Eelco Dolstra</holder> </copyright> - <date>November 2014</date> - </info> <!-- @@ -41,7 +36,6 @@ <xi:include href="expressions/writing-nix-expressions.xml" /> <xi:include href="advanced-topics/advanced-topics.xml" /> <xi:include href="command-ref/command-ref.xml" /> - <xi:include href="troubleshooting/troubleshooting.xml" /> <xi:include href="glossary/glossary.xml" /> <xi:include href="hacking.xml" /> <xi:include href="release-notes/release-notes.xml" /> diff --git a/doc/manual/release-notes/release-notes.xml b/doc/manual/release-notes/release-notes.xml index c4b14bc5499e..b8392a647af9 100644 --- a/doc/manual/release-notes/release-notes.xml +++ b/doc/manual/release-notes/release-notes.xml @@ -12,7 +12,7 @@ </partintro> --> -<xi:include href="rl-1.12.xml" /> +<xi:include href="rl-2.0.xml" /> <xi:include href="rl-1.11.10.xml" /> <xi:include href="rl-1.11.xml" /> <xi:include href="rl-1.10.xml" /> diff --git a/doc/manual/release-notes/rl-1.12.xml b/doc/manual/release-notes/rl-1.12.xml deleted file mode 100644 index 29943e3e6e97..000000000000 --- a/doc/manual/release-notes/rl-1.12.xml +++ /dev/null @@ -1,426 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="ssec-relnotes-1.12"> - -<title>Release 1.12 (TBA)</title> - -<para>This release has the following new features:</para> - -<itemizedlist> - - <listitem> - <para>Start of new <command>nix</command> command line - interface. This is a work in progress and the interface is subject - to change.</para> - - <itemizedlist> - - <listitem><para>Self-documenting: <option>--help</option> shows - all available command-line arguments.</para></listitem> - - <listitem><para><option>--help-config</option> shows all - configuration options.</para></listitem> - - <listitem><para><command>nix build</command>: Replacement for - <command>nix-build</command>.</para></listitem> - - <listitem><para><command>nix ls-store</command> and <command>nix - ls-nar</command> allow listing the contents of a store path or - NAR file.</para></listitem> - - <listitem><para><command>nix cat-store</command> and - <command>nix cat-nar</command> allow extracting a file from a - store path or NAR file.</para></listitem> - - <listitem><para><command>nix verify</command> checks whether a - store path is unmodified and/or is trusted.</para></listitem> - - <listitem><para><command>nix copy-sigs</command> copies - signatures from one store to another.</para></listitem> - - <listitem><para><command>nix sign-paths</command> signs store - paths.</para></listitem> - - <listitem><para><command>nix copy</command> copies paths between - arbitrary Nix stores, generalising - <command>nix-copy-closure</command> and - <command>nix-push</command>.</para></listitem> - - <listitem><para><command>nix path-info</command> shows - information about store paths.</para></listitem> - - <listitem><para><command>nix run</command> starts a shell in - which the specified packages are available.</para></listitem> - - <listitem><para><command>nix log</command> shows the build log - of a package or path. If the build log is not available locally, - it will try to obtain it from a binary cache.</para></listitem> - - <listitem><para><command>nix eval</command> replaces - <command>nix-instantiate --eval</command>.</para></listitem> - - <listitem><para><command>nix dump-path</command> to get a NAR - from a store path.</para></listitem> - - <listitem><para><command>nix edit</command> opens the source - code of a package in an editor.</para></listitem> - - <listitem><para><command>nix search</command> replaces - <command>nix-env -qa</command>. It searches the available - packages for occurences of a search string in the attribute - name, package name or description. It caches available packages - to speed up searches.</para></listitem> - - <listitem><para><command>nix why-depends</command> (d41c5eb13f4f3a37d80dbc6d3888644170c3b44a).</para></listitem> - - <listitem><para><command>nix show-derivation</command> (e8d6ee7c1b90a2fe6d824f1a875acc56799ae6e2).</para></listitem> - - <listitem><para><command>nix add-to-store</command> (970366266b8df712f5f9cedb45af183ef5a8357f).</para></listitem> - - <listitem><para>Progress indicator.</para></listitem> - - <listitem><para>All options are available as flags now - (b8283773bd64d7da6859ed520ee19867742a03ba).</para></listitem> - - </itemizedlist> - - </listitem> - - <listitem> - <para>The external program <command>nix-repl</command> has been - integrated into Nix as <command>nix repl</command>.</para> - </listitem> - - <listitem> - <para>New build mode <command>nix-build --hash</command> that - builds a derivation, computes the hash of the output, and moves - the output to the store path corresponding to what a fixed-output - derivation with that hash would produce. - (Add docs and examples; see d367b8e7875161e655deaa96bf8a5dd0bcf8229e)</para> - </listitem> - - <listitem> - <para>It is no longer necessary to set the - <envar>NIX_REMOTE</envar> environment variable if you need to use - the Nix daemon. Nix will use the daemon automatically if you don’t - have write access to the Nix database.</para> - </listitem> - - <listitem> - <para>The Nix language now supports floating point numbers. They are - based on regular C++ <literal>float</literal> and compatible with - existing integers and number-related operations. Export and import to and - from JSON and XML works, too.</para> - </listitem> - - <listitem> - <para><command>nix-shell</command> now sets the - <varname>IN_NIX_SHELL</varname> environment variable during - evaluation and in the shell itself. This can be used to perform - different actions depending on whether you’re in a Nix shell or in - a regular build. Nixpkgs provides - <varname>lib.inNixShell</varname> to check this variable during - evaluation. (bb36a1a3cf3fbe6bc9d0afcc5fa0f928bed03170)</para> - </listitem> - - <listitem> - <para>Internal: all <classname>Store</classname> classes are now - thread-safe. <classname>RemoteStore</classname> supports multiple - concurrent connections to the daemon. This is primarily useful in - multi-threaded programs such as - <command>hydra-queue-runner</command>.</para> - </listitem> - - <listitem> - <para>The dependency on Perl has been removed. As a result, some - (obsolete) programs have been removed: <command>nix-push</command> - (replaced by <command>nix copy</command>), - <command>nix-pull</command> (obsoleted by binary caches), - <command>nix-generate-patches</command>, - <command>bsdiff</command>, <command>bspatch</command>.</para> - </listitem> - - <listitem> - <para>Improved store abstraction. Substituters - eliminated. BinaryCacheStore, LocalBinaryCacheStore, - HttpBinaryCacheStore, S3BinaryCacheStore (compile-time - optional), SSHStore. Add docs + examples? - </para> - </listitem> - - <listitem> - <para>Nix now stores signatures for local store - paths. Locally-built paths are now signed automatically using the - secret keys specified by the <option>secret-key-files</option> - store option.</para> - - <para>In addition, store paths that have been built locally are - marked as “ultimately trusted”, and content-addressable store - paths carry a “content-addressability assertion” that allow them - to be trusted without any signatures.</para> - </listitem> - - <listitem> - <para><envar>NIX_PATH</envar> is now lazy, so URIs in the path are - only downloaded if they are needed for evaluation.</para> - </listitem> - - <listitem> - <para>You can now use - <uri>channel:<replaceable>channel-name</replaceable></uri> as a - short-hand for - <uri>https://nixos.org/channels/<replaceable>channel-name</replaceable>/nixexprs.tar.xz</uri>. For - example, <literal>nix-build channel:nixos-15.09 -A hello</literal> - will build the GNU Hello package from the - <literal>nixos-15.09</literal> channel.</para> - </listitem> - - <listitem> - <para>When <option>--no-build-output</option> is given, the last - 10 lines of the build log will be shown if a build - fails.</para> - </listitem> - - <listitem> - <para><function>builtins.fetchGit</function>. - (38539b943a060d9cdfc24d6e5d997c0885b8aa2f)</para> - </listitem> - - <listitem> - <para><literal><nix/fetchurl.nix></literal> now uses the - content-addressable tarball cache at - <uri>http://tarballs.nixos.org/</uri>, just like - <function>fetchurl</function> in - Nixpkgs. (f2682e6e18a76ecbfb8a12c17e3a0ca15c084197)</para> - </listitem> - - <listitem> - <para>Chroot Nix stores: allow the “physical” location of the Nix - store (e.g. <filename>/home/alice/nix/store</filename>) to differ - from its “logical” location (typically - <filename>/nix/store</filename>). This allows non-root users to - use Nix while still getting the benefits from prebuilt binaries - from - <uri>cache.nixos.org</uri>. (4494000e04122f24558e1436e66d20d89028b4bd, - 3eb621750848e0e6b30e5a79f76afbb096bb6c8a)</para> - </listitem> - - <listitem> - <para>On Linux, builds are now executed in a user - namespace with uid 1000 and gid 100.</para> - </listitem> - - <listitem> - <para><function>builtins.fetchurl</function> and - <function>builtins.fetchTarball</function> now support - <varname>sha256</varname> and <varname>name</varname> - attributes.</para> - </listitem> - - <listitem> - <para><literal>HttpBinaryCacheStore</literal> (the replacement of - <command>download-from-binary-cache</command>) now retries - automatically on certain HTTP error codes.</para> - </listitem> - - <listitem> - <para>Derivation attributes can now reference the outputs of the - derivation using the <function>placeholder</function> builtin - function. For example, the attribute - -<programlisting> -configureFlags = "--prefix=${placeholder "out"} --includedir=${placeholder "dev"}"; -</programlisting> - - will cause the <envar>configureFlags</envar> environment variable - to contain the actual store paths corresponding to the - <literal>out</literal> and <literal>dev</literal> outputs. TODO: - add docs.</para> - </listitem> - - <listitem> - <para>Support for HTTP/2. This makes binary cache lookups much - more efficient. (90ad02bf626b885a5dd8967894e2eafc953bdf92)</para> - </listitem> - - <listitem> - <para>The <option>build-sandbox-paths</option> configuration - option can now specify optional paths by appending a - <literal>?</literal>, e.g. <literal>/dev/nvidiactl?</literal> will - bind-mount <varname>/dev/nvidiactl</varname> only if it - exists.</para> - </listitem> - - <listitem> - <para>More support for testing build reproducibility: when - <option>enforce-determinism</option> is set to - <literal>false</literal>, it’s no longer a fatal error build - rounds produce different output - (8bdf83f936adae6f2c907a6d2541e80d4120f051); add a hook to run - diffoscope when build rounds produce different output - (9a313469a4bdea2d1e8df24d16289dc2a172a169w).</para> - </listitem> - - <listitem> - <para>Kill builds as soon as stdout/stderr is closed. This fixes a - bug that allowed builds to hang Nix indefinitely (regardless of - timeouts). (21948deed99a3295e4d5666e027a6ca42dc00b40)</para> - </listitem> - - <listitem> - <para>Add support for passing structured data to builders. TODO: - document. (6de33a9c675b187437a2e1abbcb290981a89ecb1)</para> - </listitem> - - <listitem> - <para><varname>exportReferencesGraph</varname>: Export more - complete info in JSON - format. (c2b0d8749f7e77afc1c4b3e8dd36b7ee9720af4a)</para> - </listitem> - - <listitem> - <para>Support for - netrc. (e6e74f987f0fa284d220432d426eb965269a97d6, - 302386f775eea309679654e5ea7c972fb6e7b9af)</para> - </listitem> - - <listitem> - <para>Support <uri>s3://</uri> URIs in all places where Nix allows - URIs. (9ff9c3f2f80ba4108e9c945bbfda2c64735f987b)</para> - </listitem> - - <listitem> - <para>The <option>build-max-jobs</option> option can be set to - <literal>auto</literal> to use the number of CPUs in the - system. (7251d048fa812d2551b7003bc9f13a8f5d4c95a5)</para> - </listitem> - - <listitem> - <para>Add support for Brotli compression. - <uri>cache.nixos.org</uri> compresses build logs using - Brotli.</para> - </listitem> - - <listitem> - <para>Substitutions from binary caches now require signatures by - default. This was already the case on - NixOS. (ecbc3fedd3d5bdc5a0e1a0a51b29062f2874ac8b)</para> - </listitem> - - <listitem> - <para><command>nix-env</command> now ignores packages with bad - derivation names (in particular those starting with a digit or - containing a - dot). (b0cb11722626e906a73f10dd9a0c9eea29faf43a)</para> - </listitem> - - <listitem> - <para>Renamed various configuration options. (TODO: in progress)</para> - </listitem> - - <listitem> - <para>Remote machines can now be specified on the command - line. TODO: - document. (1a68710d4dff609bbaf61db3e17a2573f0aadf17)</para> - </listitem> - - <listitem> - <para>In Linux sandbox builds, we now use - <filename>/build</filename> instead of <filename>/tmp</filename> - as the temporary build directory. This fixes potential security - problems when a build accidentally stores its - <envar>TMPDIR</envar> in some critical place, such as an - RPATH. (eba840c8a13b465ace90172ff76a0db2899ab11b)</para> - </listitem> - - <listitem> - <para>In Linux sandbox builds, we now provide a default - <filename>/bin/sh</filename> (namely <filename>ash</filename> from - BusyBox). (a2d92bb20e82a0957067ede60e91fab256948b41)</para> - </listitem> - - <listitem> - <para>Make all configuration options available as command line - flags (b8283773bd64d7da6859ed520ee19867742a03ba).</para> - </listitem> - - <listitem> - <para>Support base-64 - hashes. (c0015e87af70f539f24d2aa2bc224a9d8b84276b)</para> - </listitem> - - <listitem> - <para><command>nix-shell</command> now uses - <varname>bashInteractive</varname> from Nixpkgs, rather than the - <command>bash</command> command that happens to be in the caller’s - <envar>PATH</envar>. This is especially important on macOS where - the <command>bash</command> provided by the system is seriously - outdated and cannot execute <literal>stdenv</literal>’s setup - script.</para> - </listitem> - - <listitem> - <para>New builtin functions: <function>builtins.split</function> - (b8867a0239b1930a16f9ef3f7f3e864b01416dff), - <function>builtins.partition</function>.</para> - </listitem> - - <listitem> - <para>Automatic garbage collection.</para> - </listitem> - - <listitem> - <para><command>nix-store -q --roots</command> and - <command>nix-store --gc --print-roots</command> now show temporary - and in-memory roots.</para> - </listitem> - - <listitem> - <para>Builders can now communicate what build phase they are in by - writing messages to the file descriptor specified in - <envar>NIX_LOG_FD</envar>. (88e6bb76de5564b3217be9688677d1c89101b2a3) - </para> - </listitem> - -</itemizedlist> - -<para>Some features were removed:</para> - -<itemizedlist> - - <listitem> - <para>“Nested” log output. As a result, - <command>nix-log2xml</command> was also removed.</para> - </listitem> - - <listitem> - <para>OpenSSL-based signing. (f435f8247553656774dd1b2c88e9de5d59cab203)</para> - </listitem> - - <listitem> - <para>Caching of failed - builds. (8cffec84859cec8b610a2a22ab0c4d462a9351ff)</para> - </listitem> - - <listitem> - <para><filename>nix-mode.el</filename> has been removed from - Nix. It is now a separate repository in - <uri>https://github.com/NixOS/nix-mode</uri> and can be installed - through the MELPA package repository.</para> - </listitem> - - <listitem> - <para>In restricted evaluation mode - (<option>--restrict-eval</option>), builtin functions that - download from the network (such as <function>fetchGit</function>) - are permitted to fetch underneath the list of URI prefixes - specified in the option <option>allowed-uris</option>.</para> - </listitem> - -</itemizedlist> - -<para>This release has contributions from TBD.</para> - -</section> diff --git a/doc/manual/release-notes/rl-2.0.xml b/doc/manual/release-notes/rl-2.0.xml new file mode 100644 index 000000000000..fc9a77b08b60 --- /dev/null +++ b/doc/manual/release-notes/rl-2.0.xml @@ -0,0 +1,1012 @@ +<section xmlns="http://docbook.org/ns/docbook" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:xi="http://www.w3.org/2001/XInclude" + version="5.0" + xml:id="ssec-relnotes-2.0"> + +<title>Release 2.0 (2018-02-22)</title> + +<para>The following incompatible changes have been made:</para> + +<itemizedlist> + + <listitem> + <para>The manifest-based substituter mechanism + (<command>download-using-manifests</command>) has been <link + xlink:href="https://github.com/NixOS/nix/commit/867967265b80946dfe1db72d40324b4f9af988ed">removed</link>. It + has been superseded by the binary cache substituter mechanism + since several years. As a result, the following programs have been + removed: + + <itemizedlist> + <listitem><para><command>nix-pull</command></para></listitem> + <listitem><para><command>nix-generate-patches</command></para></listitem> + <listitem><para><command>bsdiff</command></para></listitem> + <listitem><para><command>bspatch</command></para></listitem> + </itemizedlist> + </para> + </listitem> + + <listitem> + <para>The “copy from other stores” substituter mechanism + (<command>copy-from-other-stores</command> and the + <envar>NIX_OTHER_STORES</envar> environment variable) has been + removed. It was primarily used by the NixOS installer to copy + available paths from the installation medium. The replacement is + to use a chroot store as a substituter + (e.g. <literal>--substituters /mnt</literal>), or to build into a + chroot store (e.g. <literal>--store /mnt --substituters /</literal>).</para> + </listitem> + + <listitem> + <para>The command <command>nix-push</command> has been removed as + part of the effort to eliminate Nix's dependency on Perl. You can + use <command>nix copy</command> instead, e.g. <literal>nix copy + --to file:///tmp/my-binary-cache <replaceable>paths…</replaceable></literal></para> + </listitem> + + <listitem> + <para>The “nested” log output feature (<option>--log-type + pretty</option>) has been removed. As a result, + <command>nix-log2xml</command> was also removed.</para> + </listitem> + + <listitem> + <para>OpenSSL-based signing has been <link + xlink:href="https://github.com/NixOS/nix/commit/f435f8247553656774dd1b2c88e9de5d59cab203">removed</link>. This + feature was never well-supported. A better alternative is provided + by the <option>secret-key-files</option> and + <option>trusted-public-keys</option> options.</para> + </listitem> + + <listitem> + <para>Failed build caching has been <link + xlink:href="https://github.com/NixOS/nix/commit/8cffec84859cec8b610a2a22ab0c4d462a9351ff">removed</link>. This + feature was introduced to support the Hydra continuous build + system, but Hydra no longer uses it.</para> + </listitem> + + <listitem> + <para><filename>nix-mode.el</filename> has been removed from + Nix. It is now <link + xlink:href="https://github.com/NixOS/nix-mode">a separate + repository</link> and can be installed through the MELPA package + repository.</para> + </listitem> + +</itemizedlist> + +<para>This release has the following new features:</para> + +<itemizedlist> + + <listitem> + <para>It introduces a new command named <command>nix</command>, + which is intended to eventually replace all + <command>nix-*</command> commands with a more consistent and + better designed user interface. It currently provides replacements + for some (but not all) of the functionality provided by + <command>nix-store</command>, <command>nix-build</command>, + <command>nix-shell -p</command>, <command>nix-env -qa</command>, + <command>nix-instantiate --eval</command>, + <command>nix-push</command> and + <command>nix-copy-closure</command>. It has the following major + features:</para> + + <itemizedlist> + + <listitem> + <para>Unlike the legacy commands, it has a consistent way to + refer to packages and package-like arguments (like store + paths). For example, the following commands all copy the GNU + Hello package to a remote machine: + + <screen>nix copy --to ssh://machine nixpkgs.hello</screen> + <screen>nix copy --to ssh://machine /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10</screen> + <screen>nix copy --to ssh://machine '(with import <nixpkgs> {}; hello)'</screen> + + By contrast, <command>nix-copy-closure</command> only accepted + store paths as arguments.</para> + </listitem> + + <listitem> + <para>It is self-documenting: <option>--help</option> shows + all available command-line arguments. If + <option>--help</option> is given after a subcommand, it shows + examples for that subcommand. <command>nix + --help-config</command> shows all configuration + options.</para> + </listitem> + + <listitem> + <para>It is much less verbose. By default, it displays a + single-line progress indicator that shows how many packages + are left to be built or downloaded, and (if there are running + builds) the most recent line of builder output. If a build + fails, it shows the last few lines of builder output. The full + build log can be retrieved using <command>nix + log</command>.</para> + </listitem> + + <listitem> + <para>It <link + xlink:href="https://github.com/NixOS/nix/commit/b8283773bd64d7da6859ed520ee19867742a03ba">provides</link> + all <filename>nix.conf</filename> configuration options as + command line flags. For example, instead of <literal>--option + http-connections 100</literal> you can write + <literal>--http-connections 100</literal>. Boolean options can + be written as + <literal>--<replaceable>foo</replaceable></literal> or + <literal>--no-<replaceable>foo</replaceable></literal> + (e.g. <option>--no-auto-optimise-store</option>).</para> + </listitem> + + <listitem> + <para>Many subcommands have a <option>--json</option> flag to + write results to stdout in JSON format.</para> + </listitem> + + </itemizedlist> + + <warning><para>Please note that the <command>nix</command> command + is a work in progress and the interface is subject to + change.</para></warning> + + <para>It provides the following high-level (“porcelain”) + subcommands:</para> + + <itemizedlist> + + <listitem> + <para><command>nix build</command> is a replacement for + <command>nix-build</command>.</para> + </listitem> + + <listitem> + <para><command>nix run</command> executes a command in an + environment in which the specified packages are available. It + is (roughly) a replacement for <command>nix-shell + -p</command>. Unlike that command, it does not execute the + command in a shell, and has a flag (<command>-c</command>) + that specifies the unquoted command line to be + executed.</para> + + <para>It is particularly useful in conjunction with chroot + stores, allowing Linux users who do not have permission to + install Nix in <command>/nix/store</command> to still use + binary substitutes that assume + <command>/nix/store</command>. For example, + + <screen>nix run --store ~/my-nix nixpkgs.hello -c hello --greeting 'Hi everybody!'</screen> + + downloads (or if not substitutes are available, builds) the + GNU Hello package into + <filename>~/my-nix/nix/store</filename>, then runs + <command>hello</command> in a mount namespace where + <filename>~/my-nix/nix/store</filename> is mounted onto + <command>/nix/store</command>.</para> + </listitem> + + <listitem> + <para><command>nix search</command> replaces <command>nix-env + -qa</command>. It searches the available packages for + occurrences of a search string in the attribute name, package + name or description. Unlike <command>nix-env -qa</command>, it + has a cache to speed up subsequent searches.</para> + </listitem> + + <listitem> + <para><command>nix copy</command> copies paths between + arbitrary Nix stores, generalising + <command>nix-copy-closure</command> and + <command>nix-push</command>.</para> + </listitem> + + <listitem> + <para><command>nix repl</command> replaces the external + program <command>nix-repl</command>. It provides an + interactive environment for evaluating and building Nix + expressions. Note that it uses <literal>linenoise-ng</literal> + instead of GNU Readline.</para> + </listitem> + + <listitem> + <para><command>nix upgrade-nix</command> upgrades Nix to the + latest stable version. This requires that Nix is installed in + a profile. (Thus it won’t work on NixOS, or if it’s installed + outside of the Nix store.)</para> + </listitem> + + <listitem> + <para><command>nix verify</command> checks whether store paths + are unmodified and/or “trusted” (see below). It replaces + <command>nix-store --verify</command> and <command>nix-store + --verify-path</command>.</para> + </listitem> + + <listitem> + <para><command>nix log</command> shows the build log of a + package or path. If the build log is not available locally, it + will try to obtain it from the configured substituters (such + as <uri>cache.nixos.org</uri>, which now provides build + logs).</para> + </listitem> + + <listitem> + <para><command>nix edit</command> opens the source code of a + package in your editor.</para> + </listitem> + + <listitem> + <para><command>nix eval</command> replaces + <command>nix-instantiate --eval</command>.</para> + </listitem> + + <listitem> + <para><command + xlink:href="https://github.com/NixOS/nix/commit/d41c5eb13f4f3a37d80dbc6d3888644170c3b44a">nix + why-depends</command> shows why one store path has another in + its closure. This is primarily useful to finding the causes of + closure bloat. For example, + + <screen>nix why-depends nixpkgs.vlc nixpkgs.libdrm.dev</screen> + + shows a chain of files and fragments of file contents that + cause the VLC package to have the “dev” output of + <literal>libdrm</literal> in its closure — an undesirable + situation.</para> + </listitem> + + <listitem> + <para><command>nix path-info</command> shows information about + store paths, replacing <command>nix-store -q</command>. A + useful feature is the option <option>--closure-size</option> + (<option>-S</option>). For example, the following command show + the closure sizes of every path in the current NixOS system + closure, sorted by size: + + <screen>nix path-info -rS /run/current-system | sort -nk2</screen> + + </para> + </listitem> + + <listitem> + <para><command>nix optimise-store</command> replaces + <command>nix-store --optimise</command>. The main difference + is that it has a progress indicator.</para> + </listitem> + + </itemizedlist> + + <para>A number of low-level (“plumbing”) commands are also + available:</para> + + <itemizedlist> + + <listitem> + <para><command>nix ls-store</command> and <command>nix + ls-nar</command> list the contents of a store path or NAR + file. The former is primarily useful in conjunction with + remote stores, e.g. + + <screen>nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10</screen> + + lists the contents of path in a binary cache.</para> + </listitem> + + <listitem> + <para><command>nix cat-store</command> and <command>nix + cat-nar</command> allow extracting a file from a store path or + NAR file.</para> + </listitem> + + <listitem> + <para><command>nix dump-path</command> writes the contents of + a store path to stdout in NAR format. This replaces + <command>nix-store --dump</command>.</para> + </listitem> + + <listitem> + <para><command + xlink:href="https://github.com/NixOS/nix/commit/e8d6ee7c1b90a2fe6d824f1a875acc56799ae6e2">nix + show-derivation</command> displays a store derivation in JSON + format. This is an alternative to + <command>pp-aterm</command>.</para> + </listitem> + + <listitem> + <para><command + xlink:href="https://github.com/NixOS/nix/commit/970366266b8df712f5f9cedb45af183ef5a8357f">nix + add-to-store</command> replaces <command>nix-store + --add</command>.</para> + </listitem> + + <listitem> + <para><command>nix sign-paths</command> signs store + paths.</para> + </listitem> + + <listitem> + <para><command>nix copy-sigs</command> copies signatures from + one store to another.</para> + </listitem> + + <listitem> + <para><command>nix show-config</command> shows all + configuration options and their current values.</para> + </listitem> + + </itemizedlist> + + </listitem> + + <listitem> + <para>The store abstraction that Nix has had for a long time to + support store access via the Nix daemon has been extended + significantly. In particular, substituters (which used to be + external programs such as + <command>download-from-binary-cache</command>) are now subclasses + of the abstract <classname>Store</classname> class. This allows + many Nix commands to operate on such store types. For example, + <command>nix path-info</command> shows information about paths in + your local Nix store, while <command>nix path-info --store + https://cache.nixos.org/</command> shows information about paths + in the specified binary cache. Similarly, + <command>nix-copy-closure</command>, <command>nix-push</command> + and substitution are all instances of the general notion of + copying paths between different kinds of Nix stores.</para> + + <para>Stores are specified using an URI-like syntax, + e.g. <uri>https://cache.nixos.org/</uri> or + <uri>ssh://machine</uri>. The following store types are supported: + + <itemizedlist> + + <listitem> + + <para><classname>LocalStore</classname> (stori URI + <literal>local</literal> or an absolute path) and the misnamed + <classname>RemoteStore</classname> (<literal>daemon</literal>) + provide access to a local Nix store, the latter via the Nix + daemon. You can use <literal>auto</literal> or the empty + string to auto-select a local or daemon store depending on + whether you have write permission to the Nix store. It is no + longer necessary to set the <envar>NIX_REMOTE</envar> + environment variable to use the Nix daemon.</para> + + <para>As noted above, <classname>LocalStore</classname> now + supports chroot builds, allowing the “physical” location of + the Nix store + (e.g. <filename>/home/alice/nix/store</filename>) to differ + from its “logical” location (typically + <filename>/nix/store</filename>). This allows non-root users + to use Nix while still getting the benefits from prebuilt + binaries from <uri>cache.nixos.org</uri>.</para> + + </listitem> + + <listitem> + + <para><classname>BinaryCacheStore</classname> is the abstract + superclass of all binary cache stores. It supports writing + build logs and NAR content listings in JSON format.</para> + + </listitem> + + <listitem> + + <para><classname>HttpBinaryCacheStore</classname> + (<literal>http://</literal>, <literal>https://</literal>) + supports binary caches via HTTP or HTTPS. If the server + supports <literal>PUT</literal> requests, it supports + uploading store paths via commands such as <command>nix + copy</command>.</para> + + </listitem> + + <listitem> + + <para><classname>LocalBinaryCacheStore</classname> + (<literal>file://</literal>) supports binary caches in the + local filesystem.</para> + + </listitem> + + <listitem> + + <para><classname>S3BinaryCacheStore</classname> + (<literal>s3://</literal>) supports binary caches stored in + Amazon S3, if enabled at compile time.</para> + + </listitem> + + <listitem> + + <para><classname>LegacySSHStore</classname> (<literal>ssh://</literal>) + is used to implement remote builds and + <command>nix-copy-closure</command>.</para> + + </listitem> + + <listitem> + + <para><classname>SSHStore</classname> + (<literal>ssh-ng://</literal>) supports arbitrary Nix + operations on a remote machine via the same protocol used by + <command>nix-daemon</command>.</para> + + </listitem> + + </itemizedlist> + + </para> + + </listitem> + + <listitem> + + <para>Security has been improved in various ways: + + <itemizedlist> + + <listitem> + <para>Nix now stores signatures for local store + paths. When paths are copied between stores (e.g., copied from + a binary cache to a local store), signatures are + propagated.</para> + + <para>Locally-built paths are signed automatically using the + secret keys specified by the <option>secret-key-files</option> + store option. Secret/public key pairs can be generated using + <command>nix-store + --generate-binary-cache-key</command>.</para> + + <para>In addition, locally-built store paths are marked as + “ultimately trusted”, but this bit is not propagated when + paths are copied between stores.</para> + </listitem> + + <listitem> + <para>Content-addressable store paths no longer require + signatures — they can be imported into a store by unprivileged + users even if they lack signatures.</para> + </listitem> + + <listitem> + <para>The command <command>nix verify</command> checks whether + the specified paths are trusted, i.e., have a certain number + of trusted signatures, are ultimately trusted, or are + content-addressed.</para> + </listitem> + + <listitem> + <para>Substitutions from binary caches <link + xlink:href="https://github.com/NixOS/nix/commit/ecbc3fedd3d5bdc5a0e1a0a51b29062f2874ac8b">now</link> + require signatures by default. This was already the case on + NixOS.</para> + </listitem> + + <listitem> + <para>In Linux sandbox builds, we <link + xlink:href="https://github.com/NixOS/nix/commit/eba840c8a13b465ace90172ff76a0db2899ab11b">now</link> + use <filename>/build</filename> instead of + <filename>/tmp</filename> as the temporary build + directory. This fixes potential security problems when a build + accidentally stores its <envar>TMPDIR</envar> in some + security-sensitive place, such as an RPATH.</para> + </listitem> + + </itemizedlist> + + </para> + + </listitem> + + <listitem> + <para><emphasis>Pure evaluation mode</emphasis>. This is a variant + of the existing restricted evaluation mode. In pure mode, the Nix + evaluator forbids access to anything that could cause different + evaluations of the same command line arguments to produce a + different result. This includes builtin functions such as + <function>builtins.getEnv</function>, but more importantly, + <emphasis>all</emphasis> filesystem or network access unless a + content hash or commit hash is specified. For example, calls to + <function>builtins.fetchGit</function> are only allowed if a + <varname>rev</varname> attribute is specified.</para> + + <para>The goal of this feature is to enable true reproducibility + and traceability of builds (including NixOS system configurations) + at the evaluation level. For example, in the future, + <command>nixos-rebuild</command> might build configurations from a + Nix expression in a Git repository in pure mode. That expression + might fetch other repositories such as Nixpkgs via + <function>builtins.fetchGit</function>. The commit hash of the + top-level repository then uniquely identifies a running system, + and, in conjunction with that repository, allows it to be + reproduced or modified.</para> + + </listitem> + + <listitem> + <para>There are several new features to support binary + reproducibility (i.e. to help ensure that multiple builds of the + same derivation produce exactly the same output). When + <option>enforce-determinism</option> is set to + <literal>false</literal>, it’s <link + xlink:href="https://github.com/NixOS/nix/commit/8bdf83f936adae6f2c907a6d2541e80d4120f051">no + longer</link> a fatal error if build rounds produce different + output. Also, a hook named <option>diff-hook</option> is <link + xlink:href="https://github.com/NixOS/nix/commit/9a313469a4bdea2d1e8df24d16289dc2a172a169">provided</link> + to allow you to run tools such as <command>diffoscope</command> + when build rounds produce different output.</para> + </listitem> + + <listitem> + <para>Configuring remote builds is a lot easier now. Provided you + are not using the Nix daemon, you can now just specify a remote + build machine on the command line, e.g. <literal>--option builders + 'ssh://my-mac x86_64-darwin'</literal>. The environment variable + <envar>NIX_BUILD_HOOK</envar> has been removed and is no longer + needed. The environment variable <envar>NIX_REMOTE_SYSTEMS</envar> + is still supported for compatibility, but it is also possible to + specify builders in <command>nix.conf</command> by setting the + option <literal>builders = + @<replaceable>path</replaceable></literal>.</para> + </listitem> + + <listitem> + <para>If a fixed-output derivation produces a result with an + incorrect hash, the output path is moved to the location + corresponding to the actual hash and registered as valid. Thus, a + subsequent build of the fixed-output derivation with the correct + hash is unnecessary.</para> + </listitem> + + <listitem> + <para><command>nix-shell</command> <link + xlink:href="https://github.com/NixOS/nix/commit/ea59f39326c8e9dc42dfed4bcbf597fbce58797c">now</link> + sets the <varname>IN_NIX_SHELL</varname> environment variable + during evaluation and in the shell itself. This can be used to + perform different actions depending on whether you’re in a Nix + shell or in a regular build. Nixpkgs provides + <varname>lib.inNixShell</varname> to check this variable during + evaluation.</para> + </listitem> + + <listitem> + <para><envar>NIX_PATH</envar> is now lazy, so URIs in the path are + only downloaded if they are needed for evaluation.</para> + </listitem> + + <listitem> + <para>You can now use + <uri>channel:<replaceable>channel-name</replaceable></uri> as a + short-hand for + <uri>https://nixos.org/channels/<replaceable>channel-name</replaceable>/nixexprs.tar.xz</uri>. For + example, <literal>nix-build channel:nixos-15.09 -A hello</literal> + will build the GNU Hello package from the + <literal>nixos-15.09</literal> channel. In the future, this may + use Git to fetch updates more efficiently.</para> + </listitem> + + <listitem> + <para>When <option>--no-build-output</option> is given, the last + 10 lines of the build log will be shown if a build + fails.</para> + </listitem> + + <listitem> + <para>Networking has been improved: + + <itemizedlist> + + <listitem> + <para>HTTP/2 is now supported. This makes binary cache lookups + <link + xlink:href="https://github.com/NixOS/nix/commit/90ad02bf626b885a5dd8967894e2eafc953bdf92">much + more efficient</link>.</para> + </listitem> + + <listitem> + <para>We now retry downloads on many HTTP errors, making + binary caches substituters more resilient to temporary + failures.</para> + </listitem> + + <listitem> + <para>HTTP credentials can now be configured via the standard + <filename>netrc</filename> mechanism.</para> + </listitem> + + <listitem> + <para>If S3 support is enabled at compile time, + <uri>s3://</uri> URIs are <link + xlink:href="https://github.com/NixOS/nix/commit/9ff9c3f2f80ba4108e9c945bbfda2c64735f987b">supported</link> + in all places where Nix allows URIs.</para> + </listitem> + + <listitem> + <para>Brotli compression is now supported. In particular, + <uri>cache.nixos.org</uri> build logs are now compressed using + Brotli.</para> + </listitem> + + </itemizedlist> + + </para> + + </listitem> + + <listitem> + <para><command>nix-env</command> <link + xlink:href="https://github.com/NixOS/nix/commit/b0cb11722626e906a73f10dd9a0c9eea29faf43a">now</link> + ignores packages with bad derivation names (in particular those + starting with a digit or containing a dot).</para> + </listitem> + + <listitem> + <para>Many configuration options have been renamed, either because + they were unnecessarily verbose + (e.g. <option>build-use-sandbox</option> is now just + <option>sandbox</option>) or to reflect generalised behaviour + (e.g. <option>binary-caches</option> is now + <option>substituters</option> because it allows arbitrary store + URIs). The old names are still supported for compatibility.</para> + </listitem> + + <listitem> + <para>The <option>max-jobs</option> option can <link + xlink:href="https://github.com/NixOS/nix/commit/7251d048fa812d2551b7003bc9f13a8f5d4c95a5">now</link> + be set to <literal>auto</literal> to use the number of CPUs in the + system.</para> + </listitem> + + <listitem> + <para>Hashes can <link + xlink:href="https://github.com/NixOS/nix/commit/c0015e87af70f539f24d2aa2bc224a9d8b84276b">now</link> + be specified in base-64 format, in addition to base-16 and the + non-standard base-32.</para> + </listitem> + + <listitem> + <para><command>nix-shell</command> now uses + <varname>bashInteractive</varname> from Nixpkgs, rather than the + <command>bash</command> command that happens to be in the caller’s + <envar>PATH</envar>. This is especially important on macOS where + the <command>bash</command> provided by the system is seriously + outdated and cannot execute <literal>stdenv</literal>’s setup + script.</para> + </listitem> + + <listitem> + <para>Nix can now automatically trigger a garbage collection if + free disk space drops below a certain level during a build. This + is configured using the <option>min-free</option> and + <option>max-free</option> options.</para> + </listitem> + + <listitem> + <para><command>nix-store -q --roots</command> and + <command>nix-store --gc --print-roots</command> now show temporary + and in-memory roots.</para> + </listitem> + + <listitem> + <para> + Nix can now be extended with plugins. See the documentation of + the <option>plugin-files</option> option for more details. + </para> + </listitem> + +</itemizedlist> + +<para>The Nix language has the following new features: + +<itemizedlist> + + <listitem> + <para>It supports floating point numbers. They are based on the + C++ <literal>float</literal> type and are supported by the + existing numerical operators. Export and import to and from JSON + and XML works, too.</para> + </listitem> + + <listitem> + <para>Derivation attributes can now reference the outputs of the + derivation using the <function>placeholder</function> builtin + function. For example, the attribute + +<programlisting> +configureFlags = "--prefix=${placeholder "out"} --includedir=${placeholder "dev"}"; +</programlisting> + + will cause the <envar>configureFlags</envar> environment variable + to contain the actual store paths corresponding to the + <literal>out</literal> and <literal>dev</literal> outputs.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>The following builtin functions are new or extended: + +<itemizedlist> + + <listitem> + <para><function + xlink:href="https://github.com/NixOS/nix/commit/38539b943a060d9cdfc24d6e5d997c0885b8aa2f">builtins.fetchGit</function> + allows Git repositories to be fetched at evaluation time. Thus it + differs from the <function>fetchgit</function> function in + Nixpkgs, which fetches at build time and cannot be used to fetch + Nix expressions during evaluation. A typical use case is to import + external NixOS modules from your configuration, e.g. + + <programlisting>imports = [ (builtins.fetchGit https://github.com/edolstra/dwarffs + "/module.nix") ];</programlisting> + + </para> + </listitem> + + <listitem> + <para>Similarly, <function>builtins.fetchMercurial</function> + allows you to fetch Mercurial repositories.</para> + </listitem> + + <listitem> + <para><function>builtins.path</function> generalises + <function>builtins.filterSource</function> and path literals + (e.g. <literal>./foo</literal>). It allows specifying a store path + name that differs from the source path name + (e.g. <literal>builtins.path { path = ./foo; name = "bar"; + }</literal>) and also supports filtering out unwanted + files.</para> + </listitem> + + <listitem> + <para><function>builtins.fetchurl</function> and + <function>builtins.fetchTarball</function> now support + <varname>sha256</varname> and <varname>name</varname> + attributes.</para> + </listitem> + + <listitem> + <para><function + xlink:href="https://github.com/NixOS/nix/commit/b8867a0239b1930a16f9ef3f7f3e864b01416dff">builtins.split</function> + splits a string using a POSIX extended regular expression as the + separator.</para> + </listitem> + + <listitem> + <para><function + xlink:href="https://github.com/NixOS/nix/commit/26d92017d3b36cff940dcb7d1611c42232edb81a">builtins.partition</function> + partitions the elements of a list into two lists, depending on a + Boolean predicate.</para> + </listitem> + + <listitem> + <para><literal><nix/fetchurl.nix></literal> now uses the + content-addressable tarball cache at + <uri>http://tarballs.nixos.org/</uri>, just like + <function>fetchurl</function> in + Nixpkgs. (f2682e6e18a76ecbfb8a12c17e3a0ca15c084197)</para> + </listitem> + + <listitem> + <para>In restricted and pure evaluation mode, builtin functions + that download from the network (such as + <function>fetchGit</function>) are permitted to fetch underneath a + list of URI prefixes specified in the option + <option>allowed-uris</option>.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>The Nix build environment has the following changes: + +<itemizedlist> + + <listitem> + <para>Values such as Booleans, integers, (nested) lists and + attribute sets can <link + xlink:href="https://github.com/NixOS/nix/commit/6de33a9c675b187437a2e1abbcb290981a89ecb1">now</link> + be passed to builders in a non-lossy way. If the special attribute + <varname>__structuredAttrs</varname> is set to + <literal>true</literal>, the other derivation attributes are + serialised in JSON format and made available to the builder via + the file <envar>.attrs.json</envar> in the builder’s temporary + directory. This obviates the need for + <varname>passAsFile</varname> since JSON files have no size + restrictions, unlike process environments.</para> + + <para><link + xlink:href="https://github.com/NixOS/nix/commit/2d5b1b24bf70a498e4c0b378704cfdb6471cc699">As + a convenience to Bash builders</link>, Nix writes a script named + <envar>.attrs.sh</envar> to the builder’s directory that + initialises shell variables corresponding to all attributes that + are representable in Bash. This includes non-nested (associative) + arrays. For example, the attribute <literal>hardening.format = + true</literal> ends up as the Bash associative array element + <literal>${hardening[format]}</literal>.</para> + </listitem> + + <listitem> + <para>Builders can <link + xlink:href="https://github.com/NixOS/nix/commit/88e6bb76de5564b3217be9688677d1c89101b2a3">now</link> + communicate what build phase they are in by writing messages to + the file descriptor specified in <envar>NIX_LOG_FD</envar>. The + current phase is shown by the <command>nix</command> progress + indicator. + </para> + </listitem> + + <listitem> + <para>In Linux sandbox builds, we <link + xlink:href="https://github.com/NixOS/nix/commit/a2d92bb20e82a0957067ede60e91fab256948b41">now</link> + provide a default <filename>/bin/sh</filename> (namely + <filename>ash</filename> from BusyBox).</para> + </listitem> + + <listitem> + <para>In structured attribute mode, + <varname>exportReferencesGraph</varname> <link + xlink:href="https://github.com/NixOS/nix/commit/c2b0d8749f7e77afc1c4b3e8dd36b7ee9720af4a">exports</link> + extended information about closures in JSON format. In particular, + it includes the sizes and hashes of paths. This is primarily + useful for NixOS image builders.</para> + </listitem> + + <listitem> + <para>Builds are <link + xlink:href="https://github.com/NixOS/nix/commit/21948deed99a3295e4d5666e027a6ca42dc00b40">now</link> + killed as soon as Nix receives EOF on the builder’s stdout or + stderr. This fixes a bug that allowed builds to hang Nix + indefinitely, regardless of + timeouts.</para> + </listitem> + + <listitem> + <para>The <option>sandbox-paths</option> configuration + option can now specify optional paths by appending a + <literal>?</literal>, e.g. <literal>/dev/nvidiactl?</literal> will + bind-mount <varname>/dev/nvidiactl</varname> only if it + exists.</para> + </listitem> + + <listitem> + <para>On Linux, builds are now executed in a user + namespace with UID 1000 and GID 100.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>A number of significant internal changes were made: + +<itemizedlist> + + <listitem> + <para>Nix no longer depends on Perl and all Perl components have + been rewritten in C++ or removed. The Perl bindings that used to + be part of Nix have been moved to a separate package, + <literal>nix-perl</literal>.</para> + </listitem> + + <listitem> + <para>All <classname>Store</classname> classes are now + thread-safe. <classname>RemoteStore</classname> supports multiple + concurrent connections to the daemon. This is primarily useful in + multi-threaded programs such as + <command>hydra-queue-runner</command>.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>This release has contributions from + +Adrien Devresse, +Alexander Ried, +Alex Cruice, +Alexey Shmalko, +AmineChikhaoui, +Andy Wingo, +Aneesh Agrawal, +Anthony Cowley, +Armijn Hemel, +aszlig, +Ben Gamari, +Benjamin Hipple, +Benjamin Staffin, +Benno Fünfstück, +Bjørn Forsman, +Brian McKenna, +Charles Strahan, +Chase Adams, +Chris Martin, +Christian Theune, +Chris Warburton, +Daiderd Jordan, +Dan Connolly, +Daniel Peebles, +Dan Peebles, +davidak, +David McFarland, +Dmitry Kalinkin, +Domen Kožar, +Eelco Dolstra, +Emery Hemingway, +Eric Litak, +Eric Wolf, +Fabian Schmitthenner, +Frederik Rietdijk, +Gabriel Gonzalez, +Giorgio Gallo, +Graham Christensen, +Guillaume Maudoux, +Harmen, +Iavael, +James Broadhead, +James Earl Douglas, +Janus Troelsen, +Jeremy Shaw, +Joachim Schiele, +Joe Hermaszewski, +Joel Moberg, +Johannes 'fish' Ziemke, +Jörg Thalheim, +Jude Taylor, +kballou, +Keshav Kini, +Kjetil Orbekk, +Langston Barrett, +Linus Heckemann, +Ludovic Courtès, +Manav Rathi, +Marc Scholten, +Markus Hauck, +Matt Audesse, +Matthew Bauer, +Matthias Beyer, +Matthieu Coudron, +N1X, +Nathan Zadoks, +Neil Mayhew, +Nicolas B. Pierron, +Niklas Hambüchen, +Nikolay Amiantov, +Ole Jørgen Brønner, +Orivej Desh, +Peter Simons, +Peter Stuart, +Pyry Jahkola, +regnat, +Renzo Carbonara, +Rhys, +Robert Vollmert, +Scott Olson, +Scott R. Parish, +Sergei Trofimovich, +Shea Levy, +Sheena Artrip, +Spencer Baugh, +Stefan Junker, +Susan Potter, +Thomas Tuegel, +Timothy Allen, +Tristan Hume, +Tuomas Tynkkynen, +tv, +Tyson Whitehead, +Vladimír Čunát, +Will Dietz, +wmertens, +Wout Mertens, +zimbatm and +Zoran Plesivčak. +</para> + +</section> diff --git a/doc/manual/style.css b/doc/manual/style.css index 53fd9d5709c3..592583ab086a 100644 --- a/doc/manual/style.css +++ b/doc/manual/style.css @@ -96,7 +96,6 @@ div.example margin-right: 1.5em; background: #f4f4f8; border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.example p.title @@ -106,7 +105,6 @@ div.example p.title div.example pre { - box-shadow: none; } @@ -116,15 +114,12 @@ div.example pre pre.screen, pre.programlisting { - border: 1px solid #b0b0b0; - padding: 3px 3px; + padding: 6px 6px; margin-left: 1.5em; margin-right: 1.5em; color: #600000; background: #f4f4f8; font-family: monospace; - border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.example pre.programlisting @@ -149,7 +144,6 @@ div.example pre.programlisting padding: 0.3em 0.3em 0.3em 0.3em; background: #fffff5; border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.note, div.warning @@ -256,16 +250,14 @@ span.command strong div.calloutlist table { - box-shadow: none; } table { border-collapse: collapse; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.affiliation { font-style: italic; -} \ No newline at end of file +} diff --git a/doc/manual/troubleshooting/collisions-nixenv.xml b/doc/manual/troubleshooting/collisions-nixenv.xml deleted file mode 100644 index 23cc43faf088..000000000000 --- a/doc/manual/troubleshooting/collisions-nixenv.xml +++ /dev/null @@ -1,38 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="sec-collisions-nixenv"> - -<title>Collisions in <command>nix-env</command></title> - -<para>Symptom: when installing or upgrading, you get an error message such as - -<screen> -$ nix-env -i docbook-xml -... -adding /nix/store/s5hyxgm62gk2...-docbook-xml-4.2 -collision between `/nix/store/s5hyxgm62gk2...-docbook-xml-4.2/xml/dtd/docbook/calstblx.dtd' - and `/nix/store/06h377hr4b33...-docbook-xml-4.3/xml/dtd/docbook/calstblx.dtd' - at /nix/store/...-builder.pl line 62.</screen> - -</para> - -<para>The cause is that two installed packages in the user environment -have overlapping filenames (e.g., -<filename>xml/dtd/docbook/calstblx.dtd</filename>. This usually -happens when you accidentally try to install two versions of the same -package. For instance, in the example above, the Nix Packages -collection contains two versions of <literal>docbook-xml</literal>, so -<command>nix-env -i</command> will try to install both. The default -user environment builder has no way to way to resolve such conflicts, -so it just gives up.</para> - -<para>Solution: remove one of the offending packages from the user -environment (if already installed) using <command>nix-env --e</command>, or specify exactly which version should be installed -(e.g., <literal>nix-env -i docbook-xml-4.2</literal>).</para> - -<!-- FIXME: describe priorities --> - -</section> diff --git a/doc/manual/troubleshooting/links-nix-store.xml b/doc/manual/troubleshooting/links-nix-store.xml deleted file mode 100644 index c768889567d0..000000000000 --- a/doc/manual/troubleshooting/links-nix-store.xml +++ /dev/null @@ -1,43 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="sec-links-nix-store"> - -<title><quote>Too many links</quote> Error in the Nix store</title> - - -<para>Symptom: when building something, you get an error message such as - -<screen> -... -<literal>mkdir: cannot create directory `/nix/store/<replaceable>name</replaceable>': Too many links</literal></screen> - -</para> - -<para>This is usually because you have more than 32,000 subdirectories -in <filename>/nix/store</filename>, as can be seen using <command>ls --l</command>: - -<screen> -$ ls -ld /nix/store -drwxrwxrwt 32000 nix nix 4620288 Sep 8 15:08 store</screen> - -The <literal>ext2</literal> file system is limited to an inode link -count of 32,000 (each subdirectory increasing the count by one). -Furthermore, the <literal>st_nlink</literal> field of the -<function>stat</function> system call is a 16-bit value.</para> - -<para>This only happens on very large Nix installations (such as build -machines).</para> - -<para>Quick solution: run the garbage collector. You may want to use -the <option>--max-links</option> option.</para> - -<para>Real solution: put the Nix store on a file system that supports -more than 32,000 subdirectories per directory, such as ext4. (This -doesn’t solve the <literal>st_nlink</literal> limit, but ext4 lies to -the kernel by reporting a link count of 1 if it exceeds the -limit.)</para> - -</section> diff --git a/doc/manual/troubleshooting/troubleshooting.xml b/doc/manual/troubleshooting/troubleshooting.xml deleted file mode 100644 index 1e973a192b18..000000000000 --- a/doc/manual/troubleshooting/troubleshooting.xml +++ /dev/null @@ -1,16 +0,0 @@ -<appendix xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="ch-troubleshooting"> - -<title>Troubleshooting</title> - -<para>This section provides solutions for some common problems. See -the <link xlink:href="https://github.com/NixOS/nix/issues">Nix bug -tracker</link> for a list of currently known issues.</para> - -<xi:include href="collisions-nixenv.xml" /> -<xi:include href="links-nix-store.xml" /> - -</appendix> diff --git a/local.mk b/local.mk index 0a225423741d..40a910991a48 100644 --- a/local.mk +++ b/local.mk @@ -1,6 +1,5 @@ ifeq ($(MAKECMDGOALS), dist) - # Make sure we are in repo root with `--git-dir` - dist-files += $(shell git --git-dir=.git ls-files || find * -type f) + dist-files += $(shell cat .dist-files) endif dist-files += configure config.h.in nix.spec perl/configure diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 313ff93d4c86..aa7633a709c1 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -6,6 +6,7 @@ use Data::Dumper; use File::Basename; use File::Path; use File::Slurp; +use File::Copy; use JSON::PP; use LWP::UserAgent; @@ -54,7 +55,7 @@ sub downloadFile { my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json')); - my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die; + my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die "job '$jobName' lacks product $productNr\n"; $dstName //= basename($srcFile); my $dstFile = "$releaseDir/" . $dstName; @@ -78,9 +79,8 @@ sub downloadFile { return ($dstFile, $sha256_expected); } -downloadFile("tarball", "2"); # PDF -downloadFile("tarball", "3"); # .tar.bz2 -my ($tarball, $tarballHash) = downloadFile("tarball", "4"); # .tar.xz +downloadFile("tarball", "2"); # .tar.bz2 +my ($tarball, $tarballHash) = downloadFile("tarball", "3"); # .tar.xz my ($tarball_i686_linux, $tarball_i686_linux_hash) = downloadFile("binaryTarball.i686-linux", "1"); my ($tarball_x86_64_linux, $tarball_x86_64_linux_hash) = downloadFile("binaryTarball.x86_64-linux", "1"); my ($tarball_aarch64_linux, $tarball_aarch64_linux_hash) = downloadFile("binaryTarball.aarch64-linux", "1"); @@ -152,4 +152,9 @@ write_file("$siteDir/nix-release.tt", system("cd $siteDir && nix-shell --run 'make nix/install nix/install.sig'") == 0 or die; +copy("$siteDir/nix/install", "$siteDir/nix/install-$version") or die; +copy("$siteDir/nix/install.sig", "$siteDir/nix/install-$version.sig") or die; + +system("cd $siteDir && git add nix/install-$version nix/install-$version.sig") == 0 or die; + system("cd $siteDir && git commit -a -m 'Nix $version released'") == 0 or die; diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in index 66fcd155ee9b..549619a57d56 100644 --- a/misc/launchd/org.nixos.nix-daemon.plist.in +++ b/misc/launchd/org.nixos.nix-daemon.plist.in @@ -4,6 +4,8 @@ <dict> <key>Label</key> <string>org.nixos.nix-daemon</string> + <key>KeepAlive</key> + <true/> <key>RunAtLoad</key> <true/> <key>Program</key> diff --git a/mk/libraries.mk b/mk/libraries.mk index 3cd7a53107bd..14c95fa91cf6 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -45,6 +45,11 @@ endif # - $(1)_INSTALL_DIR: the directory where the library will be # installed. Defaults to $(libdir). # +# - $(1)_EXCLUDE_FROM_LIBRARY_LIST: if defined, the library will not +# be automatically marked as a dependency of the top-level all +# target andwill not be listed in the make help output. This is +# useful for libraries built solely for testing, for example. +# # - BUILD_SHARED_LIBS: if equal to ‘1’, a dynamic library will be # built, otherwise a static library. define build-library @@ -149,7 +154,9 @@ define build-library $(1)_DEPS := $$(foreach fn, $$($(1)_OBJS), $$(call filename-to-dep, $$(fn))) -include $$($(1)_DEPS) + ifndef $(1)_EXCLUDE_FROM_LIBRARY_LIST libs-list += $$($(1)_PATH) + endif clean-files += $$(_d)/*.a $$(_d)/*.$(SO_EXT) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS) dist-files += $$(_srcs) endef diff --git a/mk/tests.mk b/mk/tests.mk index e353d46a0d02..70c30661b95f 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -39,7 +39,7 @@ installcheck: echo "$${red}$$failed out of $$total tests failed $$normal"; \ exit 1; \ else \ - echo "$${green}All tests succeeded"; \ + echo "$${green}All tests succeeded$$normal"; \ fi .PHONY: check installcheck diff --git a/release-common.nix b/release-common.nix index 4553118e1f56..f9df6fab9ff6 100644 --- a/release-common.nix +++ b/release-common.nix @@ -1,17 +1,31 @@ { pkgs }: rec { - sh = pkgs.busybox.override { + # Use "busybox-sandbox-shell" if present, + # if not (legacy) fallback and hope it's sufficient. + sh = pkgs.busybox-sandbox-shell or (pkgs.busybox.override { useMusl = true; enableStatic = true; enableMinimal = true; extraConfig = '' + CONFIG_FEATURE_FANCY_ECHO y + CONFIG_FEATURE_SH_MATH y + CONFIG_FEATURE_SH_MATH_64 y + CONFIG_ASH y + CONFIG_ASH_OPTIMIZE_FOR_SIZE y + + CONFIG_ASH_ALIAS y + CONFIG_ASH_BASH_COMPAT y + CONFIG_ASH_CMDCMD y CONFIG_ASH_ECHO y + CONFIG_ASH_GETOPTS y + CONFIG_ASH_INTERNAL_GLOB y + CONFIG_ASH_JOB_CONTROL y + CONFIG_ASH_PRINTF y CONFIG_ASH_TEST y - CONFIG_ASH_OPTIMIZE_FOR_SIZE y ''; - }; + }); configureFlags = [ "--disable-init-state" diff --git a/release.nix b/release.nix index c5c2170f78d0..3f8d5da4721e 100644 --- a/release.nix +++ b/release.nix @@ -1,12 +1,12 @@ -{ nix ? { outPath = ./.; revCount = 1234; shortRev = "abcdef"; } -, nixpkgs ? { outPath = <nixpkgs>; revCount = 1234; shortRev = "abcdef"; } +{ nix ? builtins.fetchGit ./. +, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs.git; ref = "nix-2.0"; } , officialRelease ? false , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] }: let - pkgs = import <nixpkgs> {}; + pkgs = import nixpkgs { system = builtins.currentSystem or "x86_64-linux"; }; jobs = rec { @@ -27,16 +27,13 @@ let pkgconfig sqlite libsodium boehmgc docbook5 docbook5_xsl autoconf-archive - git ] ++ lib.optional stdenv.isLinux libseccomp; configureFlags = "--enable-gc"; postUnpack = '' - # Clean up when building from a working tree. - if [[ -d $sourceRoot/.git ]]; then - git -C $sourceRoot clean -fd - fi + (cd source && find . -type f) | cut -c3- > source/.dist-files + cat source/.dist-files ''; preConfigure = '' @@ -62,7 +59,7 @@ let build = pkgs.lib.genAttrs systems (system: - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; with import ./release-common.nix { inherit pkgs; }; @@ -105,7 +102,7 @@ let perlBindings = pkgs.lib.genAttrs systems (system: - let pkgs = import <nixpkgs> { inherit system; }; in with pkgs; + let pkgs = import nixpkgs { inherit system; }; in with pkgs; releaseTools.nixBuild { name = "nix-perl"; @@ -130,8 +127,7 @@ let binaryTarball = pkgs.lib.genAttrs systems (system: - # FIXME: temporarily use a different branch for the Darwin build. - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; let toplevel = builtins.getAttr system jobs.build; @@ -140,7 +136,7 @@ let runCommand "nix-binary-tarball-${version}" { exportReferencesGraph = [ "closure1" toplevel "closure2" cacert ]; - buildInputs = [ perl shellcheck ]; + buildInputs = [ perl ] ++ lib.optional (system != "aarch64-linux") shellcheck; meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; } '' @@ -153,8 +149,10 @@ let --subst-var-by nix ${toplevel} \ --subst-var-by cacert ${cacert} - shellcheck -e SC1090 $TMPDIR/install - shellcheck -e SC1091,SC2002 $TMPDIR/install-darwin-multi-user + if type -p shellcheck; then + shellcheck -e SC1090 $TMPDIR/install + shellcheck -e SC1091,SC2002 $TMPDIR/install-darwin-multi-user + fi chmod +x $TMPDIR/install chmod +x $TMPDIR/install-darwin-multi-user @@ -174,7 +172,7 @@ let coverage = - with import <nixpkgs> { system = "x86_64-linux"; }; + with import nixpkgs { system = "x86_64-linux"; }; releaseTools.coverageAnalysis { name = "nix-build"; @@ -218,20 +216,25 @@ let # System tests. tests.remoteBuilds = (import ./tests/remote-builds.nix rec { + inherit nixpkgs; nix = build.x86_64-linux; system = "x86_64-linux"; }); tests.nix-copy-closure = (import ./tests/nix-copy-closure.nix rec { + inherit nixpkgs; nix = build.x86_64-linux; system = "x86_64-linux"; }); - tests.setuid = pkgs.lib.genAttrs (pkgs.lib.filter (pkgs.lib.hasSuffix "-linux") systems) (system: - import ./tests/setuid.nix rec { - nix = build.${system}; inherit system; - }); + tests.setuid = pkgs.lib.genAttrs + ["i686-linux" "x86_64-linux"] + (system: + import ./tests/setuid.nix rec { + inherit nixpkgs; + nix = build.${system}; inherit system; + }); tests.binaryTarball = - with import <nixpkgs> { system = "x86_64-linux"; }; + with import nixpkgs { system = "x86_64-linux"; }; vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test" { diskImage = vmTools.diskImages.ubuntu1204x86_64; } @@ -250,7 +253,7 @@ let ''); # */ tests.evalNixpkgs = - import <nixpkgs/pkgs/top-level/make-tarball.nix> { + import (nixpkgs + "/pkgs/top-level/make-tarball.nix") { inherit nixpkgs; inherit pkgs; nix = build.x86_64-linux; @@ -304,7 +307,7 @@ let makeRPM = system: diskImageFun: extraPackages: - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; releaseTools.rpmBuild rec { name = "nix-rpm"; @@ -313,7 +316,8 @@ let { extraPackages = [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" ] ++ extraPackages; }; - memSize = 1024; + # At most 2047MB can be simulated in qemu-system-i386 + memSize = 2047; meta.schedulingPriority = 50; postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck"; #enableParallelBuilding = true; @@ -326,7 +330,7 @@ let makeDeb = system: diskImageFun: extraPackages: extraDebPackages: - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; releaseTools.debBuild { name = "nix-deb"; diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index b6b3305bac71..716b6e9bc9a3 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -24,7 +24,7 @@ readonly YELLOW='\033[38;33m' readonly YELLOW_UL='\033[38;4;33m' readonly CORES=$(sysctl -n hw.ncpu) -readonly NIX_USER_COUNT="$CORES" +readonly NIX_USER_COUNT="32" readonly NIX_BUILD_GROUP_ID="30000" readonly NIX_BUILD_GROUP_NAME="nixbld" readonly NIX_FIRST_BUILD_UID="30001" @@ -33,7 +33,7 @@ readonly NIX_FIRST_BUILD_UID="30001" readonly NIX_ROOT="/nix" readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist -readonly PROFILE_TARGETS=("/etc/profile" "/etc/bashrc" "/etc/zshrc") +readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/zshrc") readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" @@ -153,7 +153,7 @@ subheader() { } row() { - printf "$BOLD%s$ESC:\t%s\n" "$1" "$2" + printf "$BOLD%s$ESC:\\t%s\\n" "$1" "$2" } task() { @@ -218,7 +218,7 @@ __sudo() { echo "I am executing:" echo "" - printf " $ sudo %s\n" "$cmd" + printf " $ sudo %s\\n" "$cmd" echo "" echo "$expl" echo "" @@ -647,7 +647,7 @@ chat_about_sudo() { cat <<EOF This script is going to call sudo a lot. Normally, it would show you exactly what commands it is running and why. However, the script is -run in a headless fashion, like this: +run in a headless fashion, like this: $ curl https://nixos.org/nix/install | sh @@ -695,7 +695,7 @@ install_from_extracted_nix() { cd "$EXTRACTED_NIX_PATH" _sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \ - rsync -rlpt "$(pwd)/store/" "$NIX_ROOT/store/" + rsync -rlpt ./store/* "$NIX_ROOT/store/" if [ -d "$NIX_INSTALLED_NIX" ]; then echo " Alright! We have our first nix at $NIX_INSTALLED_NIX" diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index ab95c09c8305..a5f52274fc70 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -60,12 +60,6 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_USER_PROFILE_DIR" - for i in $NIX_PROFILES; do - if [ -d "$i/lib/aspell" ]; then - export ASPELL_CONF="dict-dir $i/lib/aspell" - fi - done - # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt @@ -81,7 +75,7 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt" fi - if [ -n ${MANPATH} ]; then + if [ -n "${MANPATH}" ]; then export MANPATH="$NIX_LINK/share/man:$MANPATH" fi diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 445006b327fd..dbf8fe1b8f8a 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -64,6 +64,8 @@ int main (int argc, char * * argv) settings.maxBuildJobs.set("1"); // hack to make tests with local?root= work + initPlugins(); + auto store = openStore().cast<LocalStore>(); /* It would be more appropriate to use $XDG_RUNTIME_DIR, since @@ -218,9 +220,11 @@ connected: signal(SIGALRM, old); } + auto substitute = settings.buildersUseSubstitutes ? Substitute : NoSubstitute; + { Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri)); - copyPaths(store, ref<Store>(sshStore), inputs, NoRepair, NoCheckSigs); + copyPaths(store, ref<Store>(sshStore), inputs, NoRepair, NoCheckSigs, substitute); } uploadLock = -1; @@ -239,8 +243,8 @@ connected: if (!missing.empty()) { Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri)); - setenv("NIX_HELD_LOCKS", concatStringsSep(" ", missing).c_str(), 1); /* FIXME: ugly */ - copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs); + store->locksHeld.insert(missing.begin(), missing.end()); /* FIXME: ugly */ + copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs, substitute); } return; diff --git a/src/buildenv/buildenv.cc b/src/buildenv/buildenv.cc index eddb9fdaa8d2..2afad913ac6b 100644 --- a/src/buildenv/buildenv.cc +++ b/src/buildenv/buildenv.cc @@ -74,10 +74,10 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority) auto prevPriority = priorities[dstFile]; if (prevPriority == priority) throw Error(format( - "Packages '%1%' and '%2%' have the same priority '%3%'" + "packages '%1%' and '%2%' have the same priority %3%; " "use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' " "to change the priority of one of the conflicting packages" - " ('0' being the highest priority)" + " (0 being the highest priority)" ) % srcFile % target % priority); if (prevPriority < priority) continue; diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc index 910428c02686..b284daa3c2f7 100644 --- a/src/libexpr/attr-set.cc +++ b/src/libexpr/attr-set.cc @@ -7,13 +7,14 @@ namespace nix { +/* Note: Various places expect the allocated memory to be zeroed. */ static void * allocBytes(size_t n) { void * p; #if HAVE_BOEHMGC p = GC_malloc(n); #else - p = malloc(n); + p = calloc(n, 1); #endif if (!p) throw std::bad_alloc(); return p; diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh index e1fc2bf6d796..3119a1848af2 100644 --- a/src/libexpr/attr-set.hh +++ b/src/libexpr/attr-set.hh @@ -83,7 +83,7 @@ public: for (size_t n = 0; n < size_; n++) res.emplace_back(&attrs[n]); std::sort(res.begin(), res.end(), [](const Attr * a, const Attr * b) { - return (string) a->name < (string) b->name; + return (const string &) a->name < (const string &) b->name; }); return res; } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 63de2d60a147..f94c23ea72bb 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -43,13 +43,14 @@ static char * dupString(const char * s) } +/* Note: Various places expect the allocated memory to be zeroed. */ static void * allocBytes(size_t n) { void * p; #if HAVE_BOEHMGC p = GC_malloc(n); #else - p = malloc(n); + p = calloc(n, 1); #endif if (!p) throw std::bad_alloc(); return p; @@ -293,6 +294,10 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store) , sWrong(symbols.create("wrong")) , sStructuredAttrs(symbols.create("__structuredAttrs")) , sBuilder(symbols.create("builder")) + , sArgs(symbols.create("args")) + , sOutputHash(symbols.create("outputHash")) + , sOutputHashAlgo(symbols.create("outputHashAlgo")) + , sOutputHashMode(symbols.create("outputHashMode")) , repair(NoRepair) , store(store) , baseEnv(allocEnv(128)) @@ -300,15 +305,24 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store) { countCalls = getEnv("NIX_COUNT_CALLS", "0") != "0"; - restricted = settings.restrictEval; - assert(gcInitialised); /* Initialise the Nix expression search path. */ - Strings paths = parseNixPath(getEnv("NIX_PATH", "")); - for (auto & i : _searchPath) addToSearchPath(i); - for (auto & i : paths) addToSearchPath(i); - addToSearchPath("nix=" + settings.nixDataDir + "/nix/corepkgs"); + if (!settings.pureEval) { + Strings paths = parseNixPath(getEnv("NIX_PATH", "")); + for (auto & i : _searchPath) addToSearchPath(i); + for (auto & i : paths) addToSearchPath(i); + } + addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true)); + + if (settings.restrictEval || settings.pureEval) { + allowedPaths = PathSet(); + for (auto & i : searchPath) { + auto r = resolveSearchPathElem(i); + if (!r.first) continue; + allowedPaths->insert(r.second); + } + } clearValue(vEmptySet); vEmptySet.type = tAttrs; @@ -326,38 +340,36 @@ EvalState::~EvalState() Path EvalState::checkSourcePath(const Path & path_) { - if (!restricted) return path_; + if (!allowedPaths) return path_; + + bool found = false; + + for (auto & i : *allowedPaths) { + if (isDirOrInDir(path_, i)) { + found = true; + break; + } + } + + if (!found) + throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path_); /* Resolve symlinks. */ debug(format("checking access to '%s'") % path_); Path path = canonPath(path_, true); - for (auto & i : searchPath) { - auto r = resolveSearchPathElem(i); - if (!r.first) continue; - if (path == r.second || isInDir(path, r.second)) + for (auto & i : *allowedPaths) { + if (isDirOrInDir(path, i)) return path; } - /* To support import-from-derivation, allow access to anything in - the store. FIXME: only allow access to paths that have been - constructed by this evaluation. */ - if (store->isInStore(path)) return path; - -#if 0 - /* Hack to support the chroot dependencies of corepkgs (see - corepkgs/config.nix.in). */ - if (path == settings.nixPrefix && isStorePath(settings.nixPrefix)) - return path; -#endif - - throw RestrictedPathError(format("access to path '%1%' is forbidden in restricted mode") % path_); + throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path); } void EvalState::checkURI(const std::string & uri) { - if (!restricted) return; + if (!settings.restrictEval) return; /* 'uri' should be equal to a prefix, or in a subdirectory of a prefix. Thus, the prefix https://github.co does not permit @@ -371,11 +383,33 @@ void EvalState::checkURI(const std::string & uri) && (prefix[prefix.size() - 1] == '/' || uri[prefix.size()] == '/'))) return; + /* If the URI is a path, then check it against allowedPaths as + well. */ + if (hasPrefix(uri, "/")) { + checkSourcePath(uri); + return; + } + + if (hasPrefix(uri, "file://")) { + checkSourcePath(std::string(uri, 7)); + return; + } + throw RestrictedPathError("access to URI '%s' is forbidden in restricted mode", uri); } -void EvalState::addConstant(const string & name, Value & v) +Path EvalState::toRealPath(const Path & path, const PathSet & context) +{ + // FIXME: check whether 'path' is in 'context'. + return + !context.empty() && store->isInStore(path) + ? store->toRealPath(path) + : path; +}; + + +Value * EvalState::addConstant(const string & name, Value & v) { Value * v2 = allocValue(); *v2 = v; @@ -383,12 +417,18 @@ void EvalState::addConstant(const string & name, Value & v) baseEnv.values[baseEnvDispl++] = v2; string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2)); + return v2; } -void EvalState::addPrimOp(const string & name, +Value * EvalState::addPrimOp(const string & name, unsigned int arity, PrimOpFun primOp) { + if (arity == 0) { + Value v; + primOp(*this, noPos, nullptr, v); + return addConstant(name, v); + } Value * v = allocValue(); string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; Symbol sym = symbols.create(name2); @@ -397,6 +437,7 @@ void EvalState::addPrimOp(const string & name, staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl; baseEnv.values[baseEnvDispl++] = v; baseEnv.values[0]->attrs->push_back(Attr(sym, v)); + return v; } @@ -546,9 +587,7 @@ Env & EvalState::allocEnv(unsigned int size) Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *)); env->size = size; - /* Clear the values because maybeThunk() and lookupVar fromWith expect this. */ - for (unsigned i = 0; i < size; ++i) - env->values[i] = 0; + /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */ return *env; } @@ -649,8 +688,10 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) } -void EvalState::evalFile(const Path & path, Value & v) +void EvalState::evalFile(const Path & path_, Value & v) { + auto path = checkSourcePath(path_); + FileEvalCache::iterator i; if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) { v = i->second; @@ -1546,7 +1587,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path) dstPath = srcToStore[path]; else { dstPath = settings.readOnlyMode - ? store->computeStorePathForPath(checkSourcePath(path)).first + ? store->computeStorePathForPath(baseNameOf(path), checkSourcePath(path)).first : store->addToStore(baseNameOf(path), checkSourcePath(path), true, htSHA256, defaultPathFilter, repair); srcToStore[path] = dstPath; printMsg(lvlChatty, format("copied source '%1%' -> '%2%'") @@ -1668,10 +1709,13 @@ void EvalState::printStats() printMsg(v, format(" time elapsed: %1%") % cpuTime); printMsg(v, format(" size of a value: %1%") % sizeof(Value)); printMsg(v, format(" size of an attr: %1%") % sizeof(Attr)); - printMsg(v, format(" environments allocated: %1% (%2% bytes)") % nrEnvs % bEnvs); - printMsg(v, format(" list elements: %1% (%2% bytes)") % nrListElems % bLists); + printMsg(v, format(" environments allocated count: %1%") % nrEnvs); + printMsg(v, format(" environments allocated bytes: %1%") % bEnvs); + printMsg(v, format(" list elements count: %1%") % nrListElems); + printMsg(v, format(" list elements bytes: %1%") % bLists); printMsg(v, format(" list concatenations: %1%") % nrListConcats); - printMsg(v, format(" values allocated: %1% (%2% bytes)") % nrValues % bValues); + printMsg(v, format(" values allocated count: %1%") % nrValues); + printMsg(v, format(" values allocated bytes: %1%") % bValues); printMsg(v, format(" sets allocated: %1% (%2% bytes)") % nrAttrsets % bAttrsets); printMsg(v, format(" right-biased unions: %1%") % nrOpUpdates); printMsg(v, format(" values copied in right-biased unions: %1%") % nrOpUpdateValuesCopied); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index f0ab1435bff3..9d8799b7906b 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -69,16 +69,17 @@ public: const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue, sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls, sFile, sLine, sColumn, sFunctor, sToString, - sRight, sWrong, sStructuredAttrs, sBuilder; + sRight, sWrong, sStructuredAttrs, sBuilder, sArgs, + sOutputHash, sOutputHashAlgo, sOutputHashMode; Symbol sDerivationNix; /* If set, force copying files to the Nix store even if they already exist there. */ RepairFlag repair; - /* If set, don't allow access to files outside of the Nix search - path or to environment variables. */ - bool restricted; + /* The allowed filesystem paths in restricted or pure evaluation + mode. */ + std::experimental::optional<PathSet> allowedPaths; Value vEmptySet; @@ -112,6 +113,15 @@ public: void checkURI(const std::string & uri); + /* When using a diverted store and 'path' is in the Nix store, map + 'path' to the diverted location (e.g. /nix/store/foo is mapped + to /home/alice/my-nix/nix/store/foo). However, this is only + done if the context is not empty, since otherwise we're + probably trying to read from the actual /nix/store. This is + intended to distinguish between import-from-derivation and + sources stored in the actual /nix/store. */ + Path toRealPath(const Path & path, const PathSet & context); + /* Parse a Nix expression from the specified file. */ Expr * parseExprFromFile(const Path & path); Expr * parseExprFromFile(const Path & path, StaticEnv & staticEnv); @@ -201,9 +211,9 @@ private: void createBaseEnv(); - void addConstant(const string & name, Value & v); + Value * addConstant(const string & name, Value & v); - void addPrimOp(const string & name, + Value * addPrimOp(const string & name, unsigned int arity, PrimOpFun primOp); public: diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc index 9380de3a66b3..8b1404595548 100644 --- a/src/libexpr/json-to-value.cc +++ b/src/libexpr/json-to-value.cc @@ -106,10 +106,16 @@ static void parseJSON(EvalState & state, const char * & s, Value & v) tmp_number += *s++; } - if (number_type == tFloat) - mkFloat(v, stod(tmp_number)); - else - mkInt(v, stoi(tmp_number)); + try { + if (number_type == tFloat) + mkFloat(v, stod(tmp_number)); + else + mkInt(v, stoi(tmp_number)); + } catch (std::invalid_argument e) { + throw JSONParseError("invalid JSON number"); + } catch (std::out_of_range e) { + throw JSONParseError("out-of-range JSON number"); + } } else if (strncmp(s, "true", 4) == 0) { diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index 28a0a6a87896..e5e01fb5831a 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -49,9 +49,10 @@ static void adjustLoc(YYLTYPE * loc, const char * s, size_t len) } -static Expr * unescapeStr(SymbolTable & symbols, const char * s) +static Expr * unescapeStr(SymbolTable & symbols, const char * s, size_t length) { string t; + t.reserve(length); char c; while ((c = *s++)) { if (c == '\\') { @@ -150,7 +151,7 @@ or { return OR_KW; } /* It is impossible to match strings ending with '$' with one regex because trailing contexts are only valid at the end of a rule. (A sane but undocumented limitation.) */ - yylval->e = unescapeStr(data->symbols, yytext); + yylval->e = unescapeStr(data->symbols, yytext, yyleng); return STR; } <STRING>\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } @@ -178,7 +179,7 @@ or { return OR_KW; } return IND_STR; } <IND_STRING>\'\'\\. { - yylval->e = unescapeStr(data->symbols, yytext + 2); + yylval->e = unescapeStr(data->symbols, yytext + 2, yyleng - 2); return IND_STR; } <IND_STRING>\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } diff --git a/src/libexpr/names.cc b/src/libexpr/names.cc index 6d78d2116121..382088c78872 100644 --- a/src/libexpr/names.cc +++ b/src/libexpr/names.cc @@ -41,7 +41,7 @@ bool DrvName::matches(DrvName & n) } -static string nextComponent(string::const_iterator & p, +string nextComponent(string::const_iterator & p, const string::const_iterator end) { /* Skip any dots and dashes (component separators). */ diff --git a/src/libexpr/names.hh b/src/libexpr/names.hh index 9667fc96fd0f..13c3093e77b0 100644 --- a/src/libexpr/names.hh +++ b/src/libexpr/names.hh @@ -24,6 +24,8 @@ private: typedef list<DrvName> DrvNames; +string nextComponent(string::const_iterator & p, + const string::const_iterator end); int compareVersions(const string & v1, const string & v2); DrvNames drvNamesFromArgs(const Strings & opArgs); diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index e3b5dfb420b4..6778023f506d 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -39,7 +39,7 @@ std::pair<string, string> decodeContext(const string & s) size_t index = s.find("!", 1); return std::pair<string, string>(string(s, index + 1), string(s, 1, index - 1)); } else - return std::pair<string, string>(s.at(0) == '/' ? s: string(s, 1), ""); + return std::pair<string, string>(s.at(0) == '/' ? s : string(s, 1), ""); } @@ -49,24 +49,38 @@ InvalidPathError::InvalidPathError(const Path & path) : void EvalState::realiseContext(const PathSet & context) { PathSet drvs; + for (auto & i : context) { std::pair<string, string> decoded = decodeContext(i); Path ctx = decoded.first; assert(store->isStorePath(ctx)); if (!store->isValidPath(ctx)) throw InvalidPathError(ctx); - if (!decoded.second.empty() && nix::isDerivation(ctx)) + if (!decoded.second.empty() && nix::isDerivation(ctx)) { drvs.insert(decoded.first + "!" + decoded.second); + + /* Add the output of this derivation to the allowed + paths. */ + if (allowedPaths) { + auto drv = store->derivationFromPath(decoded.first); + DerivationOutputs::iterator i = drv.outputs.find(decoded.second); + if (i == drv.outputs.end()) + throw Error("derivation '%s' does not have an output named '%s'", decoded.first, decoded.second); + allowedPaths->insert(i->second.path); + } + } } - if (!drvs.empty()) { - if (!settings.enableImportFromDerivation) - throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin())); - /* For performance, prefetch all substitute info. */ - PathSet willBuild, willSubstitute, unknown; - unsigned long long downloadSize, narSize; - store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize); - store->buildPaths(drvs); - } + + if (drvs.empty()) return; + + if (!settings.enableImportFromDerivation) + throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin())); + + /* For performance, prefetch all substitute info. */ + PathSet willBuild, willSubstitute, unknown; + unsigned long long downloadSize, narSize; + store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize); + store->buildPaths(drvs); } @@ -84,10 +98,10 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args % path % e.path % pos); } - path = state.checkSourcePath(path); + Path realPath = state.checkSourcePath(state.toRealPath(path, context)); if (state.store->isStorePath(path) && state.store->isValidPath(path) && isDerivation(path)) { - Derivation drv = readDerivation(path); + Derivation drv = readDerivation(realPath); Value & w = *state.allocValue(); state.mkAttrs(w, 3 + drv.outputs.size()); Value * v2 = state.allocAttr(w, state.sDrvPath); @@ -114,7 +128,7 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args } else { state.forceAttrs(*args[0]); if (args[0]->attrs->empty()) - state.evalFile(path, v); + state.evalFile(realPath, v); else { Env * env = &state.allocEnv(args[0]->attrs->size()); env->up = &state.baseEnv; @@ -127,8 +141,8 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args env->values[displ++] = attr.value; } - printTalkative("evaluating file '%1%'", path); - Expr * e = state.parseExprFromFile(resolveExprPath(path), staticEnv); + printTalkative("evaluating file '%1%'", realPath); + Expr * e = state.parseExprFromFile(resolveExprPath(realPath), staticEnv); e->eval(state, *env, v); } @@ -439,7 +453,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v) { string name = state.forceStringNoCtx(*args[0], pos); - mkString(v, state.restricted ? "" : getEnv(name)); + mkString(v, settings.restrictEval || settings.pureEval ? "" : getEnv(name)); } @@ -539,7 +553,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * for (auto & i : args[0]->attrs->lexicographicOrder()) { if (i->name == state.sIgnoreNulls) continue; - string key = i->name; + const string & key = i->name; vomit("processing attribute '%1%'", key); auto handleHashMode = [&](const std::string & s) { @@ -575,7 +589,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * /* The `args' attribute is special: it supplies the command-line arguments to the builder. */ - if (key == "args") { + if (i->name == state.sArgs) { state.forceList(*i->value, pos); for (unsigned int n = 0; n < i->value->listSize(); ++n) { string s = state.coerceToString(posDrvName, *i->value->listElems()[n], context, true); @@ -598,15 +612,13 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * drv.builder = state.forceString(*i->value, context, posDrvName); else if (i->name == state.sSystem) drv.platform = state.forceStringNoCtx(*i->value, posDrvName); - else if (i->name == state.sName) - drvName = state.forceStringNoCtx(*i->value, posDrvName); - else if (key == "outputHash") + else if (i->name == state.sOutputHash) outputHash = state.forceStringNoCtx(*i->value, posDrvName); - else if (key == "outputHashAlgo") + else if (i->name == state.sOutputHashAlgo) outputHashAlgo = state.forceStringNoCtx(*i->value, posDrvName); - else if (key == "outputHashMode") + else if (i->name == state.sOutputHashMode) handleHashMode(state.forceStringNoCtx(*i->value, posDrvName)); - else if (key == "outputs") { + else if (i->name == state.sOutputs) { /* Require ‘outputs’ to be a list of strings. */ state.forceList(*i->value, posDrvName); Strings ss; @@ -620,14 +632,10 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * drv.env.emplace(key, s); if (i->name == state.sBuilder) drv.builder = s; else if (i->name == state.sSystem) drv.platform = s; - else if (i->name == state.sName) { - drvName = s; - printMsg(lvlVomit, format("derivation name is '%1%'") % drvName); - } - else if (key == "outputHash") outputHash = s; - else if (key == "outputHashAlgo") outputHashAlgo = s; - else if (key == "outputHashMode") handleHashMode(s); - else if (key == "outputs") + else if (i->name == state.sOutputHash) outputHash = s; + else if (i->name == state.sOutputHashAlgo) outputHashAlgo = s; + else if (i->name == state.sOutputHashMode) handleHashMode(s); + else if (i->name == state.sOutputs) handleOutputs(tokenizeString<Strings>(s)); } @@ -863,7 +871,7 @@ static void prim_readFile(EvalState & state, const Pos & pos, Value * * args, Va throw EvalError(format("cannot read '%1%', since path '%2%' is not valid, at %3%") % path % e.path % pos); } - string s = readFile(state.checkSourcePath(path)); + string s = readFile(state.checkSourcePath(state.toRealPath(path, context))); if (s.find((char) 0) != string::npos) throw Error(format("the contents of the file '%1%' cannot be represented as a Nix string") % path); mkString(v, s.c_str()); @@ -1009,20 +1017,13 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu } -static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v) +static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_, + Value * filterFun, bool recursive, const Hash & expectedHash, Value & v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[1], context); - if (!context.empty()) - throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % pos); - - state.forceValue(*args[0]); - if (args[0]->type != tLambda) - throw TypeError(format("first argument in call to 'filterSource' is not a function but %1%, at %2%") % showType(*args[0]) % pos); - - path = state.checkSourcePath(path); - - PathFilter filter = [&](const Path & path) { + const auto path = settings.pureEval && expectedHash ? + path_ : + state.checkSourcePath(path_); + PathFilter filter = filterFun ? ([&](const Path & path) { auto st = lstat(path); /* Call the filter function. The first argument is the path, @@ -1031,7 +1032,7 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args mkString(arg1, path); Value fun2; - state.callFunction(*args[0], arg1, fun2, noPos); + state.callFunction(*filterFun, arg1, fun2, noPos); Value arg2; mkString(arg2, @@ -1044,16 +1045,79 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args state.callFunction(fun2, arg2, res, noPos); return state.forceBool(res, pos); - }; + }) : defaultPathFilter; - Path dstPath = settings.readOnlyMode - ? state.store->computeStorePathForPath(path, true, htSHA256, filter).first - : state.store->addToStore(baseNameOf(path), path, true, htSHA256, filter, state.repair); + Path expectedStorePath; + if (expectedHash) { + expectedStorePath = + state.store->makeFixedOutputPath(recursive, expectedHash, name); + } + Path dstPath; + if (!expectedHash || !state.store->isValidPath(expectedStorePath)) { + dstPath = settings.readOnlyMode + ? state.store->computeStorePathForPath(name, path, recursive, htSHA256, filter).first + : state.store->addToStore(name, path, recursive, htSHA256, filter, state.repair); + if (expectedHash && expectedStorePath != dstPath) { + throw Error(format("store path mismatch in (possibly filtered) path added from '%1%'") % path); + } + } else + dstPath = expectedStorePath; mkString(v, dstPath, {dstPath}); } +static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + Path path = state.coerceToPath(pos, *args[1], context); + if (!context.empty()) + throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % pos); + + state.forceValue(*args[0]); + if (args[0]->type != tLambda) + throw TypeError(format("first argument in call to 'filterSource' is not a function but %1%, at %2%") % showType(*args[0]) % pos); + + addPath(state, pos, baseNameOf(path), path, args[0], true, Hash(), v); +} + +static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + state.forceAttrs(*args[0], pos); + Path path; + string name; + Value * filterFun = nullptr; + auto recursive = true; + Hash expectedHash; + + for (auto & attr : *args[0]->attrs) { + const string & n(attr.name); + if (n == "path") { + PathSet context; + path = state.coerceToPath(*attr.pos, *attr.value, context); + if (!context.empty()) + throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % *attr.pos); + } else if (attr.name == state.sName) + name = state.forceStringNoCtx(*attr.value, *attr.pos); + else if (n == "filter") { + state.forceValue(*attr.value); + filterFun = attr.value; + } else if (n == "recursive") + recursive = state.forceBool(*attr.value, *attr.pos); + else if (n == "sha256") + expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256); + else + throw EvalError(format("unsupported argument '%1%' to 'addPath', at %2%") % attr.name % *attr.pos); + } + if (path.empty()) + throw EvalError(format("'path' required, at %1%") % pos); + if (name.empty()) + name = baseNameOf(path); + + addPath(state, pos, name, path, filterFun, recursive, expectedHash, v); +} + + /************************************************************* * Sets *************************************************************/ @@ -1068,8 +1132,11 @@ static void prim_attrNames(EvalState & state, const Pos & pos, Value * * args, V state.mkList(v, args[0]->attrs->size()); size_t n = 0; - for (auto & i : args[0]->attrs->lexicographicOrder()) - mkString(*(v.listElems()[n++] = state.allocValue()), i->name); + for (auto & i : *args[0]->attrs) + mkString(*(v.listElems()[n++] = state.allocValue()), i.name); + + std::sort(v.listElems(), v.listElems() + n, + [](Value * v1, Value * v2) { return strcmp(v1->string.s, v2->string.s) < 0; }); } @@ -1653,6 +1720,14 @@ static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, } +static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + state.forceString(*args[0], context, pos); + mkBool(v, !context.empty()); +} + + /* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a builder without causing the derivation to be built (for instance, in the derivation that builds NARs in nix-push, when doing @@ -1838,21 +1913,32 @@ static void prim_replaceStrings(EvalState & state, const Pos & pos, Value * * ar auto s = state.forceString(*args[2], context, pos); string res; - for (size_t p = 0; p < s.size(); ) { + // Loops one past last character to handle the case where 'from' contains an empty string. + for (size_t p = 0; p <= s.size(); ) { bool found = false; auto i = from.begin(); auto j = to.begin(); for (; i != from.end(); ++i, ++j) if (s.compare(p, i->size(), *i) == 0) { found = true; - p += i->size(); res += j->first; + if (i->empty()) { + if (p < s.size()) + res += s[p]; + p++; + } else { + p += i->size(); + } for (auto& path : j->second) context.insert(path); j->second.clear(); break; } - if (!found) res += s[p++]; + if (!found) { + if (p < s.size()) + res += s[p]; + p++; + } } mkString(v, res, context); @@ -1883,6 +1969,26 @@ static void prim_compareVersions(EvalState & state, const Pos & pos, Value * * a } +static void prim_splitVersion(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + string version = state.forceStringNoCtx(*args[0], pos); + auto iter = version.cbegin(); + Strings components; + while (iter != version.cend()) { + auto component = nextComponent(iter, version.cend()); + if (component.empty()) + break; + components.emplace_back(std::move(component)); + } + state.mkList(v, components.size()); + unsigned int n = 0; + for (auto & component : components) { + auto listElem = v.listElems()[n++] = state.allocValue(); + mkString(*listElem, std::move(component)); + } +} + + /************************************************************* * Networking *************************************************************/ @@ -1921,7 +2027,14 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, state.checkURI(url); + if (settings.pureEval && !expectedHash) + throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who); + Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash); + + if (state.allowedPaths) + state.allowedPaths->insert(res); + mkString(v, res, PathSet({res})); } @@ -1973,11 +2086,24 @@ void EvalState::createBaseEnv() mkNull(v); addConstant("null", v); - mkInt(v, time(0)); - addConstant("__currentTime", v); + auto vThrow = addPrimOp("throw", 1, prim_throw); - mkString(v, settings.thisSystem); - addConstant("__currentSystem", v); + auto addPurityError = [&](const std::string & name) { + Value * v2 = allocValue(); + mkString(*v2, fmt("'%s' is not allowed in pure evaluation mode", name)); + mkApp(v, *vThrow, *v2); + addConstant(name, v); + }; + + if (!settings.pureEval) { + mkInt(v, time(0)); + addConstant("__currentTime", v); + } + + if (!settings.pureEval) { + mkString(v, settings.thisSystem); + addConstant("__currentSystem", v); + } mkString(v, nixVersion); addConstant("__nixVersion", v); @@ -1993,10 +2119,10 @@ void EvalState::createBaseEnv() addConstant("__langVersion", v); // Miscellaneous - addPrimOp("scopedImport", 2, prim_scopedImport); + auto vScopedImport = addPrimOp("scopedImport", 2, prim_scopedImport); Value * v2 = allocValue(); mkAttrs(*v2, 0); - mkApp(v, *baseEnv.values[baseEnvDispl - 1], *v2); + mkApp(v, *vScopedImport, *v2); forceValue(v); addConstant("import", v); if (settings.enableNativeCode) { @@ -2012,7 +2138,6 @@ void EvalState::createBaseEnv() addPrimOp("__isBool", 1, prim_isBool); addPrimOp("__genericClosure", 1, prim_genericClosure); addPrimOp("abort", 1, prim_abort); - addPrimOp("throw", 1, prim_throw); addPrimOp("__addErrorContext", 2, prim_addErrorContext); addPrimOp("__tryEval", 1, prim_tryEval); addPrimOp("__getEnv", 1, prim_getEnv); @@ -2027,7 +2152,10 @@ void EvalState::createBaseEnv() // Paths addPrimOp("__toPath", 1, prim_toPath); - addPrimOp("__storePath", 1, prim_storePath); + if (settings.pureEval) + addPurityError("__storePath"); + else + addPrimOp("__storePath", 1, prim_storePath); addPrimOp("__pathExists", 1, prim_pathExists); addPrimOp("baseNameOf", 1, prim_baseNameOf); addPrimOp("dirOf", 1, prim_dirOf); @@ -2041,6 +2169,7 @@ void EvalState::createBaseEnv() addPrimOp("__fromJSON", 1, prim_fromJSON); addPrimOp("__toFile", 2, prim_toFile); addPrimOp("__filterSource", 2, prim_filterSource); + addPrimOp("__path", 1, prim_path); // Sets addPrimOp("__attrNames", 1, prim_attrNames); @@ -2083,6 +2212,7 @@ void EvalState::createBaseEnv() addPrimOp("toString", 1, prim_toString); addPrimOp("__substring", 3, prim_substring); addPrimOp("__stringLength", 1, prim_stringLength); + addPrimOp("__hasContext", 1, prim_hasContext); addPrimOp("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext); addPrimOp("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency); addPrimOp("__hashString", 2, prim_hashString); @@ -2094,6 +2224,7 @@ void EvalState::createBaseEnv() // Versions addPrimOp("__parseDrvName", 1, prim_parseDrvName); addPrimOp("__compareVersions", 2, prim_compareVersions); + addPrimOp("__splitVersion", 1, prim_splitVersion); // Derivations addPrimOp("derivationStrict", 1, prim_derivationStrict); @@ -2105,7 +2236,7 @@ void EvalState::createBaseEnv() /* Add a wrapper around the derivation primop that computes the `drvPath' and `outPath' attributes lazily. */ - string path = settings.nixDataDir + "/nix/corepkgs/derivation.nix"; + string path = canonPath(settings.nixDataDir + "/nix/corepkgs/derivation.nix", true); sDerivationNix = symbols.create(path); evalFile(path, v); addConstant("derivation", v); diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh index 39d23b04a5ce..31bf3f84f6c7 100644 --- a/src/libexpr/primops.hh +++ b/src/libexpr/primops.hh @@ -9,6 +9,9 @@ struct RegisterPrimOp { typedef std::vector<std::tuple<std::string, size_t, PrimOpFun>> PrimOps; static PrimOps * primOps; + /* You can register a constant by passing an arity of 0. fun + will get called during EvalState initialization, so there + may be primops not yet added and builtins is not yet sorted. */ RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun); }; diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index e92e0638031f..9fc0d46626dd 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -22,10 +22,15 @@ struct GitInfo uint64_t revCount = 0; }; +std::regex revRegex("^[0-9a-fA-F]{40}$"); + GitInfo exportGit(ref<Store> store, const std::string & uri, - std::experimental::optional<std::string> ref, const std::string & rev, + std::experimental::optional<std::string> ref, std::string rev, const std::string & name) { + if (settings.pureEval && rev == "") + throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); + if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { bool clean = true; @@ -68,20 +73,20 @@ GitInfo exportGit(ref<Store> store, const std::string & uri, return gitInfo; } + + // clean working tree, but no ref or rev specified. Use 'HEAD'. + rev = chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" })); + ref = "HEAD"s; } - if (!ref) ref = "master"s; + if (!ref) ref = "HEAD"s; - if (rev != "") { - std::regex revRegex("^[0-9a-fA-F]{40}$"); - if (!std::regex_match(rev, revRegex)) - throw Error("invalid Git revision '%s'", rev); - } + if (rev != "" && !std::regex_match(rev, revRegex)) + throw Error("invalid Git revision '%s'", rev); Path cacheDir = getCacheDir() + "/nix/git"; if (!pathExists(cacheDir)) { - createDirs(cacheDir); runProgram("git", true, { "init", "--bare", cacheDir }); } @@ -228,6 +233,9 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.shortRev); mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount); v.attrs->sort(); + + if (state.allowedPaths) + state.allowedPaths->insert(gitInfo.storePath); } static RegisterPrimOp r("fetchGit", 1, prim_fetchGit); diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index a317476c5829..5517d83df824 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -27,6 +27,9 @@ std::regex commitHashRegex("^[0-9a-fA-F]{40}$"); HgInfo exportMercurial(ref<Store> store, const std::string & uri, std::string rev, const std::string & name) { + if (settings.pureEval && rev == "") + throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision"); + if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) { bool clean = runProgram("hg", true, { "status", "-R", uri, "--modified", "--added", "--removed" }) == ""; @@ -196,6 +199,9 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar mkString(*state.allocAttr(v, state.symbols.create("shortRev")), std::string(hgInfo.rev, 0, 12)); mkInt(*state.allocAttr(v, state.symbols.create("revCount")), hgInfo.revCount); v.attrs->sort(); + + if (state.allowedPaths) + state.allowedPaths->insert(hgInfo.storePath); } static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial); diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index d3aac6aba1ff..bcc05c2cdad6 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -37,6 +37,10 @@ MixCommonArgs::MixCommonArgs(const string & programName) std::string cat = "config"; settings.convertToArgs(*this, cat); + + // Backward compatibility hack: nix-env already had a --system flag. + if (programName == "nix-env") longFlags.erase("system"); + hiddenCategories.insert(cat); } diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 85d3c077ba5e..7d888202bbf1 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -193,9 +193,6 @@ LegacyArgs::LegacyArgs(const std::string & programName, mkFlag(0, "readonly-mode", "do not write to the Nix store", &settings.readOnlyMode); - mkFlag(0, "show-trace", "show Nix expression stack trace in evaluation errors", - &settings.showTrace); - mkFlag(0, "no-gc-warning", "disable warning about not using '--add-root'", &gcWarning, false); @@ -265,6 +262,7 @@ void printVersion(const string & programName) void showManPage(const string & name) { restoreSignals(); + setenv("MANPATH", settings.nixManDir.c_str(), 1); execlp("man", "man", name.c_str(), NULL); throw SysError(format("command 'man %1%' failed") % name.c_str()); } diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 1dcc4f0ac942..8e4861232db5 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -22,6 +22,7 @@ public: int handleExceptions(const string & programName, std::function<void()> fun); +/* Don't forget to call initPlugins() after settings are initialized! */ void initNix(); void parseCmdLine(int argc, char * * argv, diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index ab971dd8b6d9..d1b278b8efbe 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -149,7 +149,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str /* Compress the NAR. */ narInfo->compression = compression; auto now1 = std::chrono::steady_clock::now(); - auto narCompressed = compress(compression, *nar); + auto narCompressed = compress(compression, *nar, parallelCompression); auto now2 = std::chrono::steady_clock::now(); narInfo->fileHash = hashString(htSHA256, *narCompressed); narInfo->fileSize = narCompressed->size(); diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index 8492ff600eba..e20b968442b7 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -19,6 +19,8 @@ public: const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"}; const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"}; + const Setting<bool> parallelCompression{this, false, "parallel-compression", + "enable multi-threading compression, available for xz only currently"}; private: diff --git a/src/libstore/build.cc b/src/libstore/build.cc index d4bd650baf22..1d611ffbaba5 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -6,6 +6,7 @@ #include "archive.hh" #include "affinity.hh" #include "builtins.hh" +#include "download.hh" #include "finally.hh" #include "compression.hh" #include "json.hh" @@ -48,7 +49,9 @@ #include <sys/param.h> #include <sys/mount.h> #include <sys/syscall.h> +#if HAVE_SECCOMP #include <seccomp.h> +#endif #define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old)) #endif @@ -1123,11 +1126,6 @@ void DerivationGoal::haveDerivation() return; } - /* Reject doing a hash build of anything other than a fixed-output - derivation. */ - if (buildMode == bmHash && !drv->isFixedOutput()) - throw Error("cannot do a hash build of non-fixed-output derivation '%1%'", drvPath); - /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ @@ -1319,9 +1317,7 @@ void DerivationGoal::inputsRealised() allPaths.insert(inputPaths.begin(), inputPaths.end()); /* Is this a fixed-output derivation? */ - fixedOutput = true; - for (auto & i : drv->outputs) - if (i.second.hash == "") fixedOutput = false; + fixedOutput = drv->isFixedOutput(); /* Don't repeat fixed-output derivations since they're already verified by their output hash.*/ @@ -1341,19 +1337,6 @@ void DerivationGoal::tryToBuild() { trace("trying to build"); - /* Check for the possibility that some other goal in this process - has locked the output since we checked in haveDerivation(). - (It can't happen between here and the lockPaths() call below - because we're not allowing multi-threading.) If so, put this - goal to sleep until another goal finishes, then try again. */ - for (auto & i : drv->outputs) - if (pathIsLockedByMe(worker.store.toRealPath(i.second.path))) { - debug(format("putting derivation '%1%' to sleep because '%2%' is locked by another goal") - % drvPath % i.second.path); - worker.waitForAnyGoal(shared_from_this()); - return; - } - /* Obtain locks on all output paths. The locks are automatically released when we exit this function or Nix crashes. If we can't acquire the lock, then continue; hopefully some other @@ -1777,6 +1760,19 @@ PathSet exportReferences(Store & store, PathSet storePaths) return paths; } +static std::once_flag dns_resolve_flag; + +static void preloadNSS() { + /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of + one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already + been loaded in the parent. So we force a download of an invalid URL to force the NSS machinery to + load its lookup libraries in the parent before any child gets a chance to. */ + std::call_once(dns_resolve_flag, []() { + DownloadRequest request("http://this.pre-initializes.the.dns.resolvers.invalid"); + request.tries = 1; // We only need to do it once, and this also suppresses an annoying warning + try { getDownloader()->download(request); } catch (...) {} + }); +} void DerivationGoal::startBuilder() { @@ -1787,6 +1783,9 @@ void DerivationGoal::startBuilder() % drv->platform % settings.thisSystem % drvPath); } + if (drv->isBuiltin()) + preloadNSS(); + #if __APPLE__ additionalSandboxProfile = get(drv->env, "__sandboxProfile"); #endif @@ -1810,8 +1809,13 @@ void DerivationGoal::startBuilder() useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1"; } - if (worker.store.storeDir != worker.store.realStoreDir) - useChroot = true; + if (worker.store.storeDir != worker.store.realStoreDir) { + #if __linux__ + useChroot = true; + #else + throw Error("building using a diverted store is not supported on this platform"); + #endif + } /* If `build-users-group' is not empty, then we have to build as one of the members of that group. */ @@ -2469,7 +2473,7 @@ void setupSeccomp() { #if __linux__ if (!settings.filterSyscalls) return; - +#if HAVE_SECCOMP scmp_filter_ctx ctx; if (!(ctx = seccomp_init(SCMP_ACT_ALLOW))) @@ -2515,6 +2519,11 @@ void setupSeccomp() if (seccomp_load(ctx) != 0) throw SysError("unable to load seccomp BPF program"); +#else + throw Error( + "seccomp is not supported on this platform; " + "you can bypass this error by setting the option 'filter-syscalls' to false, but note that untrusted builds can then create setuid binaries!"); +#endif #endif } @@ -2928,8 +2937,13 @@ void DerivationGoal::runChild() if (drv->isBuiltin()) { try { logger = makeJSONLogger(*logger); + + BasicDerivation drv2(*drv); + for (auto & e : drv2.env) + e.second = rewriteStrings(e.second, inputRewrites); + if (drv->builder == "builtin:fetchurl") - builtinFetchurl(*drv, netrcData); + builtinFetchurl(drv2, netrcData); else throw Error(format("unsupported builtin function '%1%'") % string(drv->builder, 8)); _exit(0); @@ -2992,6 +3006,8 @@ void DerivationGoal::registerOutputs() bool runDiffHook = settings.runDiffHook; bool keepPreviousRound = settings.keepFailed || runDiffHook; + std::exception_ptr delayedException; + /* Check whether the output paths were created, and grep each output path to determine what other paths it references. Also make all output paths read-only. */ @@ -3066,7 +3082,7 @@ void DerivationGoal::registerOutputs() /* Check that fixed-output derivations produced the right outputs (i.e., the content hash should match the specified hash). */ - if (i.second.hash != "") { + if (fixedOutput) { bool recursive; Hash h; i.second.parseHashInfo(recursive, h); @@ -3082,27 +3098,34 @@ void DerivationGoal::registerOutputs() /* Check the hash. In hash mode, move the path produced by the derivation to its content-addressed location. */ Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath); - if (buildMode == bmHash) { - Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]); - printError(format("build produced path '%1%' with %2% hash '%3%'") - % dest % printHashType(h.type) % printHash16or32(h2)); - if (worker.store.isValidPath(dest)) - return; + + Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]); + + if (h != h2) { + + /* Throw an error after registering the path as + valid. */ + delayedException = std::make_exception_ptr( + BuildError("fixed-output derivation produced path '%s' with %s hash '%s' instead of the expected hash '%s'", + dest, printHashType(h.type), printHash16or32(h2), printHash16or32(h))); + Path actualDest = worker.store.toRealPath(dest); + + if (worker.store.isValidPath(dest)) + std::rethrow_exception(delayedException); + if (actualPath != actualDest) { PathLocks outputLocks({actualDest}); deletePath(actualDest); if (rename(actualPath.c_str(), actualDest.c_str()) == -1) throw SysError(format("moving '%1%' to '%2%'") % actualPath % dest); } + path = dest; actualPath = actualDest; - } else { - if (h != h2) - throw BuildError( - format("output path '%1%' has %2% hash '%3%' when '%4%' was expected") - % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h)); } + else + assert(path == dest); info.ca = makeFixedOutputCA(recursive, h2); } @@ -3279,6 +3302,11 @@ void DerivationGoal::registerOutputs() paths referenced by each of them. If there are cycles in the outputs, this will fail. */ worker.store.registerValidPaths(infos); + + /* In case of a fixed-output derivation hash mismatch, throw an + exception now that we have registered the output as valid. */ + if (delayedException) + std::rethrow_exception(delayedException); } @@ -3394,7 +3422,7 @@ void DerivationGoal::flushLine() else { if (settings.verboseBuild && (settings.printRepeatedBuilds || curRound == 1)) - printError(filterANSIEscapes(currentLogLine, true)); + printError(currentLogLine); else { logTail.push_back(currentLogLine); if (logTail.size() > settings.logLines) logTail.pop_front(); @@ -3636,7 +3664,7 @@ void SubstitutionGoal::tryNext() /* Update the total expected download size. */ auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info); - maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, narInfo->narSize); + maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize); maintainExpectedDownload = narInfo && narInfo->fileSize @@ -3650,9 +3678,12 @@ void SubstitutionGoal::tryNext() /* Bail out early if this substituter lacks a valid signature. LocalStore::addToStore() also checks for this, but only after we've downloaded the path. */ - if (worker.store.requireSigs && !info->checkSignatures(worker.store, worker.store.publicKeys)) { - printInfo(format("warning: substituter '%s' does not have a valid signature for path '%s'") - % sub->getUri() % storePath); + if (worker.store.requireSigs + && !sub->isTrusted + && !info->checkSignatures(worker.store, worker.store.publicKeys)) + { + printError("warning: substituter '%s' does not have a valid signature for path '%s'", + sub->getUri(), storePath); tryNext(); return; } @@ -3702,6 +3733,17 @@ void SubstitutionGoal::tryToRun() return; } + /* If the store path is already locked (probably by a + DerivationGoal), then put this goal to sleep. Note: we don't + acquire a lock here since that breaks addToStore(), so below we + handle an AlreadyLocked exception from addToStore(). The check + here is just an optimisation to prevent having to redo a + download due to a locked path. */ + if (pathIsLockedByMe(worker.store.toRealPath(storePath))) { + worker.waitForAWhile(shared_from_this()); + return; + } + maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions); worker.updateProgress(); @@ -3718,7 +3760,7 @@ void SubstitutionGoal::tryToRun() PushActivity pact(act.id); copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()), - storePath, repair); + storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs); promise.set_value(); } catch (...) { @@ -3741,8 +3783,14 @@ void SubstitutionGoal::finished() try { promise.get_future().get(); + } catch (AlreadyLocked & e) { + /* Probably a DerivationGoal is already building this store + path. Sleep for a while and try again. */ + state = &SubstitutionGoal::init; + worker.waitForAWhile(shared_from_this()); + return; } catch (Error & e) { - printInfo(e.msg()); + printError(e.msg()); /* Try the next substitute. */ state = &SubstitutionGoal::tryNext; diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 4474dfd4b968..5ab625f42288 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -17,11 +17,13 @@ #include <curl/curl.h> -#include <queue> -#include <iostream> -#include <thread> +#include <algorithm> #include <cmath> +#include <cstring> +#include <iostream> +#include <queue> #include <random> +#include <thread> using namespace std::string_literals; @@ -91,6 +93,8 @@ struct CurlDownloader : public Downloader { if (!request.expectedETag.empty()) requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); + if (!request.mimeType.empty()) + requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str()); } ~DownloadItem() @@ -185,6 +189,22 @@ struct CurlDownloader : public Downloader return 0; } + size_t readOffset = 0; + int readCallback(char *buffer, size_t size, size_t nitems) + { + if (readOffset == request.data->length()) + return 0; + auto count = std::min(size * nitems, request.data->length() - readOffset); + memcpy(buffer, request.data->data() + readOffset, count); + readOffset += count; + return count; + } + + static int readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp) + { + return ((DownloadItem *) userp)->readCallback(buffer, size, nitems); + } + long lowSpeedTimeout = 300; void init() @@ -225,6 +245,13 @@ struct CurlDownloader : public Downloader if (request.head) curl_easy_setopt(req, CURLOPT_NOBODY, 1); + if (request.data) { + curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); + curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); + curl_easy_setopt(req, CURLOPT_READDATA, this); + curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); + } + if (request.verifyTLS) { if (settings.caFile != "") curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str()); @@ -265,7 +292,7 @@ struct CurlDownloader : public Downloader } if (code == CURLE_OK && - (httpStatus == 200 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) + (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) { result.cached = httpStatus == 304; done = true; @@ -303,6 +330,7 @@ struct CurlDownloader : public Downloader // Don't bother retrying on certain cURL errors either switch (code) { case CURLE_FAILED_INIT: + case CURLE_URL_MALFORMAT: case CURLE_NOT_BUILT_IN: case CURLE_REMOTE_ACCESS_DENIED: case CURLE_FILE_COULDNT_READ_FILE: @@ -311,10 +339,10 @@ struct CurlDownloader : public Downloader case CURLE_BAD_FUNCTION_ARGUMENT: case CURLE_INTERFACE_FAILED: case CURLE_UNKNOWN_OPTION: - err = Misc; - break; + err = Misc; + break; default: // Shut up warnings - break; + break; } } @@ -689,7 +717,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa Path tmpDir = createTempDir(); AutoDelete autoDelete(tmpDir, true); // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}); + runProgram("tar", true, {"xf", store->toRealPath(storePath), "-C", tmpDir, "--strip-components", "1"}); unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, NoRepair); } replaceSymlink(unpackedStorePath, unpackedLink); @@ -699,7 +727,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa if (expectedStorePath != "" && storePath != expectedStorePath) throw nix::Error("store path mismatch in file downloaded from '%s'", url); - return storePath; + return store->toRealPath(storePath); } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index f2d65ad8d61d..d9d525d4e65f 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -18,6 +18,8 @@ struct DownloadRequest unsigned int baseRetryTimeMs = 250; ActivityId parentAct; bool decompress = true; + std::shared_ptr<std::string> data; + std::string mimeType; DownloadRequest(const std::string & uri) : uri(uri), parentAct(curActivity) { } diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index ab2c5ca0274c..943b16c28fa3 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -324,10 +324,8 @@ Roots LocalStore::findRootsNoTemp() { Roots roots; - /* Process direct roots in {gcroots,manifests,profiles}. */ + /* Process direct roots in {gcroots,profiles}. */ findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots); - if (pathExists(stateDir + "/manifests")) - findRoots(stateDir + "/manifests", DT_UNKNOWN, roots); findRoots(stateDir + "/profiles", DT_UNKNOWN, roots); /* Add additional roots returned by the program specified by the diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index d3c96ddd6e61..f46e8326235f 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -6,6 +6,7 @@ #include <algorithm> #include <map> #include <thread> +#include <dlfcn.h> namespace nix { @@ -37,6 +38,7 @@ Settings::Settings() , nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR))) , nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR))) , nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR))) + , nixManDir(canonPath(NIX_MAN_DIR)) , nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH)) { buildUsersGroup = getuid() == 0 ? "nixbld" : ""; @@ -137,4 +139,46 @@ void MaxBuildJobsSetting::set(const std::string & str) throw UsageError("configuration setting '%s' should be 'auto' or an integer", name); } + +void initPlugins() +{ + for (const auto & pluginFile : settings.pluginFiles.get()) { + Paths pluginFiles; + try { + auto ents = readDirectory(pluginFile); + for (const auto & ent : ents) + pluginFiles.emplace_back(pluginFile + "/" + ent.name); + } catch (SysError & e) { + if (e.errNo != ENOTDIR) + throw; + pluginFiles.emplace_back(pluginFile); + } + for (const auto & file : pluginFiles) { + /* handle is purposefully leaked as there may be state in the + DSO needed by the action of the plugin. */ + void *handle = + dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL); + if (!handle) + throw Error("could not dynamically open plugin file '%s%': %s%", file, dlerror()); + } + } + /* We handle settings registrations here, since plugins can add settings */ + if (RegisterSetting::settingRegistrations) { + for (auto & registration : *RegisterSetting::settingRegistrations) + settings.addSetting(registration); + delete RegisterSetting::settingRegistrations; + } + settings.handleUnknownSettings(); +} + +RegisterSetting::SettingRegistrations * RegisterSetting::settingRegistrations; + +RegisterSetting::RegisterSetting(AbstractSetting * s) +{ + if (!settingRegistrations) + settingRegistrations = new SettingRegistrations; + settingRegistrations->emplace_back(s); +} + + } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 5c857cbb6a9c..dd01f832df0c 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -82,6 +82,9 @@ public: /* The directory where the main programs are stored. */ Path nixBinDir; + /* The directory where the man pages are stored. */ + Path nixManDir; + /* File name of the socket the daemon listens to. */ Path nixDaemonSocketFile; @@ -138,6 +141,11 @@ public: Setting<std::string> builders{this, "@" + nixConfDir + "/machines", "builders", "A semicolon-separated list of build machines, in the format of nix.machines."}; + Setting<bool> buildersUseSubstitutes{this, false, "builders-use-substitutes", + "Whether build machines should use their own substitutes for obtaining " + "build dependencies if possible, rather than waiting for this host to " + "upload them."}; + Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space", "Amount of reserved disk space for the garbage collector."}; @@ -150,7 +158,7 @@ public: Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering", "Whether to call sync() before registering a path as valid."}; - Setting<bool> useSubstitutes{this, true, "use-substitutes", + Setting<bool> useSubstitutes{this, true, "substitute", "Whether to use substitutes.", {"build-use-substitutes"}}; @@ -206,7 +214,8 @@ public: bool lockCPU; /* Whether to show a stack trace if Nix evaluation fails. */ - bool showTrace = false; + Setting<bool> showTrace{this, false, "show-trace", + "Whether to show a stack trace on evaluation errors."}; Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", "Whether builtin functions that allow executing native code should be enabled."}; @@ -227,6 +236,9 @@ public: "Whether to restrict file system access to paths in $NIX_PATH, " "and network access to the URI prefixes listed in 'allowed-uris'."}; + Setting<bool> pureEval{this, false, "pure-eval", + "Whether to restrict file system and network access to files specified by cryptographic hash."}; + Setting<size_t> buildRepeat{this, 0, "repeat", "The number of times to repeat a build in order to verify determinism.", {"build-repeat"}}; @@ -278,10 +290,7 @@ public: Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl", "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."}; - Setting<std::string> signedBinaryCaches{this, "*", "signed-binary-caches", - "Obsolete."}; - - Setting<bool> requireSigs{this, signedBinaryCaches == "*", "require-sigs", + Setting<bool> requireSigs{this, true, "require-sigs", "Whether to check that any non-content-addressed path added to the " "Nix store has a valid signature (that is, one signed using a key " "listed in 'trusted-public-keys'."}; @@ -361,14 +370,28 @@ public: Setting<Strings> allowedUris{this, {}, "allowed-uris", "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; + + Setting<Paths> pluginFiles{this, {}, "plugin-files", + "Plugins to dynamically load at nix initialization time."}; }; // FIXME: don't use a global variable. extern Settings settings; +/* This should be called after settings are initialized, but before + anything else */ +void initPlugins(); + extern const string nixVersion; +struct RegisterSetting +{ + typedef std::vector<AbstractSetting *> SettingRegistrations; + static SettingRegistrations * settingRegistrations; + RegisterSetting(AbstractSetting * s); +}; + } diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 057337685791..b9e9cd5daba5 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -38,7 +38,7 @@ public: try { BinaryCacheStore::init(); } catch (UploadToHTTP &) { - throw Error(format("'%s' does not appear to be a binary cache") % cacheUri); + throw Error("'%s' does not appear to be a binary cache", cacheUri); } diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority); } @@ -67,7 +67,14 @@ protected: const std::string & data, const std::string & mimeType) override { - throw UploadToHTTP("uploading to an HTTP binary cache is not supported"); + auto req = DownloadRequest(cacheUri + "/" + path); + req.data = std::make_shared<string>(data); // FIXME: inefficient + req.mimeType = mimeType; + try { + getDownloader()->download(req); + } catch (DownloadError & e) { + throw UploadToHTTP(format("uploading to HTTP binary cache at %1% not supported: %2%") % cacheUri % e.msg()); + } } void getFile(const std::string & path, diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 7afecc1cfc62..4afe51ea91ec 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -992,8 +992,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & /* Lock the output path. But don't lock if we're being called from a build hook (whose parent process already acquired a lock on this path). */ - Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS")); - if (find(locksHeld.begin(), locksHeld.end(), info.path) == locksHeld.end()) + if (!locksHeld.count(info.path)) outputLock.lockPaths({realPath}); if (repair || !isValidPath(info.path)) { diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index d35cd1a949eb..bbd50e1c1451 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -19,7 +19,7 @@ namespace nix { /* Nix store and database schema version. Version 1 (or 0) was Nix <= 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is - Nix 1.0. Version 7 is Nix 1.3. Version 10 is 1.12. */ + Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */ const int nixSchemaVersion = 10; @@ -104,6 +104,9 @@ private: public: + // Hack for build-remote.cc. + PathSet locksHeld = tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS")); + /* Initialise the local store, upgrading the schema if necessary. */ LocalStore(const Params & params); diff --git a/src/libstore/local.mk b/src/libstore/local.mk index 50c46ce6fe99..e11efa5c2b54 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -9,6 +9,9 @@ libstore_SOURCES := $(wildcard $(d)/*.cc) libstore_LIBS = libutil libformat libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread +ifneq ($(OS), FreeBSD) + libstore_LDFLAGS += -ldl +endif libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb @@ -22,7 +25,7 @@ ifeq ($(OS), SunOS) libstore_LDFLAGS += -lsocket endif -ifeq ($(OS), Linux) +ifeq ($(HAVE_SECCOMP), 1) libstore_LDFLAGS += -lseccomp endif @@ -35,6 +38,7 @@ libstore_CXXFLAGS = \ -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \ -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \ -DNIX_BIN_DIR=\"$(bindir)\" \ + -DNIX_MAN_DIR=\"$(mandir)\" \ -DSANDBOX_SHELL="\"$(sandbox_shell)\"" \ -DLSOF=\"$(lsof)\" diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh index 1903355a236e..2871199de16e 100644 --- a/src/libstore/nar-accessor.hh +++ b/src/libstore/nar-accessor.hh @@ -1,5 +1,7 @@ #pragma once +#include <functional> + #include "fs-accessor.hh" namespace nix { diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index 587f29598851..08d1efdbeb01 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -113,8 +113,10 @@ bool PathLocks::lockPaths(const PathSet & _paths, { auto lockedPaths(lockedPaths_.lock()); - if (lockedPaths->count(lockPath)) - throw Error("deadlock: trying to re-acquire self-held lock '%s'", lockPath); + if (lockedPaths->count(lockPath)) { + if (!wait) return false; + throw AlreadyLocked("deadlock: trying to re-acquire self-held lock '%s'", lockPath); + } lockedPaths->insert(lockPath); } diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh index 2a7de611446e..db51f950a320 100644 --- a/src/libstore/pathlocks.hh +++ b/src/libstore/pathlocks.hh @@ -2,10 +2,8 @@ #include "util.hh" - namespace nix { - /* Open (possibly create) a lock file and return the file descriptor. -1 is returned if create is false and the lock could not be opened because it doesn't exist. Any other error throws an exception. */ @@ -18,6 +16,7 @@ enum LockType { ltRead, ltWrite, ltNone }; bool lockFile(int fd, LockType lockType, bool wait); +MakeError(AlreadyLocked, Error); class PathLocks { @@ -38,9 +37,6 @@ public: void setDeletion(bool deletePaths); }; - -// FIXME: not thread-safe! bool pathIsLockedByMe(const Path & path); - } diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 77b41b6bf8a8..8f0b65557ac4 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -78,9 +78,22 @@ UDSRemoteStore::UDSRemoteStore(const Params & params) } +UDSRemoteStore::UDSRemoteStore(std::string socket_path, const Params & params) + : Store(params) + , LocalFSStore(params) + , RemoteStore(params) + , path(socket_path) +{ +} + + std::string UDSRemoteStore::getUri() { - return "daemon"; + if (path) { + return std::string("unix://") + *path; + } else { + return "daemon"; + } } @@ -98,7 +111,7 @@ ref<RemoteStore::Connection> UDSRemoteStore::openConnection() throw SysError("cannot create Unix domain socket"); closeOnExec(conn->fd.get()); - string socketPath = settings.nixDaemonSocketFile; + string socketPath = path ? *path : settings.nixDaemonSocketFile; struct sockaddr_un addr; addr.sun_family = AF_UNIX; @@ -721,5 +734,14 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) } } +static std::string uriScheme = "unix://"; + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const Store::Params & params) + -> std::shared_ptr<Store> +{ + if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0; + return std::make_shared<UDSRemoteStore>(std::string(uri, uriScheme.size()), params); +}); } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 30c6beae6ff2..7f36e206416b 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -134,6 +134,7 @@ class UDSRemoteStore : public LocalFSStore, public RemoteStore public: UDSRemoteStore(const Params & params); + UDSRemoteStore(std::string path, const Params & params); std::string getUri() override; @@ -145,6 +146,7 @@ private: }; ref<RemoteStore::Connection> openConnection() override; + std::experimental::optional<std::string> path; }; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 0079da1becfb..23af452094cf 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -10,6 +10,7 @@ #include "istringstream_nocopy.hh" #include <aws/core/Aws.h> +#include <aws/core/VersionConfig.h> #include <aws/core/auth/AWSCredentialsProvider.h> #include <aws/core/auth/AWSCredentialsProviderChain.h> #include <aws/core/client/ClientConfiguration.h> @@ -87,7 +88,14 @@ S3Helper::S3Helper(const std::string & profile, const std::string & region) std::make_shared<Aws::Auth::DefaultAWSCredentialsProviderChain>()) : std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>( std::make_shared<Aws::Auth::ProfileConfigFileAWSCredentialsProvider>(profile.c_str())), - *config, true, false)) + *config, + // FIXME: https://github.com/aws/aws-sdk-cpp/issues/759 +#if AWS_VERSION_MAJOR == 1 && AWS_VERSION_MINOR < 3 + false, +#else + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, +#endif + false)) { } diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 77ab87ef728e..8830edcc3449 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -222,11 +222,10 @@ Path Store::makeTextPath(const string & name, const Hash & hash, } -std::pair<Path, Hash> Store::computeStorePathForPath(const Path & srcPath, - bool recursive, HashType hashAlgo, PathFilter & filter) const +std::pair<Path, Hash> Store::computeStorePathForPath(const string & name, + const Path & srcPath, bool recursive, HashType hashAlgo, PathFilter & filter) const { Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath); - string name = baseNameOf(srcPath); Path dstPath = makeFixedOutputPath(recursive, h, name); return std::pair<Path, Hash>(dstPath, h); } @@ -840,7 +839,7 @@ ref<Store> openStore(const std::string & uri_, for (auto fun : *RegisterStoreImplementation::implementations) { auto store = fun(uri, params); if (store) { - store->warnUnknownSettings(); + store->handleUnknownSettings(); return ref<Store>(store); } } @@ -897,7 +896,11 @@ std::list<ref<Store>> getDefaultSubstituters() auto addStore = [&](const std::string & uri) { if (done.count(uri)) return; done.insert(uri); - stores.push_back(openStore(uri)); + try { + stores.push_back(openStore(uri)); + } catch (Error & e) { + printError("warning: %s", e.what()); + } }; for (auto uri : settings.substituters.get()) diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index d1e1b5d6f452..563aa566bd37 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -192,7 +192,7 @@ struct ValidPathInfo typedef list<ValidPathInfo> ValidPathInfos; -enum BuildMode { bmNormal, bmRepair, bmCheck, bmHash }; +enum BuildMode { bmNormal, bmRepair, bmCheck }; struct BuildResult @@ -248,6 +248,8 @@ public: const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"}; + const Setting<bool> isTrusted{this, false, "trusted", "whether paths from this store can be used as substitutes even when they lack trusted signatures"}; + protected: struct State @@ -305,9 +307,9 @@ public: /* This is the preparatory part of addToStore(); it computes the store path to which srcPath is to be copied. Returns the store path and the cryptographic hash of the contents of srcPath. */ - std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath, - bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter) const; + std::pair<Path, Hash> computeStorePathForPath(const string & name, + const Path & srcPath, bool recursive = true, + HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter) const; /* Preparatory part of addTextToStore(). @@ -597,6 +599,11 @@ public: "nix-cache-info" file. Lower value means higher priority. */ virtual int getPriority() { return 0; } + virtual Path toRealPath(const Path & storePath) + { + return storePath; + } + protected: Stats stats; @@ -639,9 +646,10 @@ public: virtual Path getRealStoreDir() { return storeDir; } - Path toRealPath(const Path & storePath) + Path toRealPath(const Path & storePath) override { - return getRealStoreDir() + "/" + baseNameOf(storePath); + assert(isInStore(storePath)); + return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1); } std::shared_ptr<std::string> getBuildLog(const Path & path) override; @@ -699,6 +707,9 @@ void removeTempRoots(); * ‘daemon’: The Nix store accessed via a Unix domain socket connection to nix-daemon. + * ‘unix://<path>’: The Nix store accessed via a Unix domain socket + connection to nix-daemon, with the socket located at <path>. + * ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on whether the user has write access to the local Nix store/database. diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 2b3dff3a5ea1..470c925ed7a6 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -1,12 +1,18 @@ #include "compression.hh" #include "util.hh" #include "finally.hh" +#include "logging.hh" #include <lzma.h> #include <bzlib.h> #include <cstdio> #include <cstring> +#if HAVE_BROTLI +#include <brotli/decode.h> +#include <brotli/encode.h> +#endif // HAVE_BROTLI + #include <iostream> namespace nix { @@ -94,14 +100,62 @@ static ref<std::string> decompressBzip2(const std::string & in) static ref<std::string> decompressBrotli(const std::string & in) { - // FIXME: use libbrotli - return make_ref<std::string>(runProgram(BRO, true, {"-d"}, {in})); +#if !HAVE_BROTLI + return make_ref<std::string>(runProgram(BROTLI, true, {"-d"}, {in})); +#else + auto *s = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + if (!s) + throw CompressionError("unable to initialize brotli decoder"); + + Finally free([s]() { BrotliDecoderDestroyInstance(s); }); + + uint8_t outbuf[BUFSIZ]; + ref<std::string> res = make_ref<std::string>(); + const uint8_t *next_in = (uint8_t *)in.c_str(); + size_t avail_in = in.size(); + uint8_t *next_out = outbuf; + size_t avail_out = sizeof(outbuf); + + while (true) { + checkInterrupt(); + + auto ret = BrotliDecoderDecompressStream(s, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr); + + switch (ret) { + case BROTLI_DECODER_RESULT_ERROR: + throw CompressionError("error while decompressing brotli file"); + case BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: + throw CompressionError("incomplete or corrupt brotli file"); + case BROTLI_DECODER_RESULT_SUCCESS: + if (avail_in != 0) + throw CompressionError("unexpected input after brotli decompression"); + break; + case BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: + // I'm not sure if this can happen, but abort if this happens with empty buffer + if (avail_out == sizeof(outbuf)) + throw CompressionError("brotli decompression requires larger buffer"); + break; + } + + // Always ensure we have full buffer for next invocation + if (avail_out < sizeof(outbuf)) { + res->append((char*)outbuf, sizeof(outbuf) - avail_out); + next_out = outbuf; + avail_out = sizeof(outbuf); + } + + if (ret == BROTLI_DECODER_RESULT_SUCCESS) return res; + } +#endif // HAVE_BROTLI } -ref<std::string> compress(const std::string & method, const std::string & in) +ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel) { StringSink ssink; - auto sink = makeCompressionSink(method, ssink); + auto sink = makeCompressionSink(method, ssink, parallel); (*sink)(in); sink->finish(); return ssink.s; @@ -136,10 +190,9 @@ struct XzSink : CompressionSink lzma_stream strm = LZMA_STREAM_INIT; bool finished = false; - XzSink(Sink & nextSink) : nextSink(nextSink) - { - lzma_ret ret = lzma_easy_encoder( - &strm, 6, LZMA_CHECK_CRC64); + template <typename F> + XzSink(Sink & nextSink, F&& initEncoder) : nextSink(nextSink) { + lzma_ret ret = initEncoder(); if (ret != LZMA_OK) throw CompressionError("unable to initialise lzma encoder"); // FIXME: apply the x86 BCJ filter? @@ -147,6 +200,9 @@ struct XzSink : CompressionSink strm.next_out = outbuf; strm.avail_out = sizeof(outbuf); } + XzSink(Sink & nextSink) : XzSink(nextSink, [this]() { + return lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64); + }) {} ~XzSink() { @@ -200,6 +256,27 @@ struct XzSink : CompressionSink } }; +#ifdef HAVE_LZMA_MT +struct ParallelXzSink : public XzSink +{ + ParallelXzSink(Sink &nextSink) : XzSink(nextSink, [this]() { + lzma_mt mt_options = {}; + mt_options.flags = 0; + mt_options.timeout = 300; // Using the same setting as the xz cmd line + mt_options.preset = LZMA_PRESET_DEFAULT; + mt_options.filters = NULL; + mt_options.check = LZMA_CHECK_CRC64; + mt_options.threads = lzma_cputhreads(); + mt_options.block_size = 0; + if (mt_options.threads == 0) + mt_options.threads = 1; + // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the + // number of threads. + return lzma_stream_encoder_mt(&strm, &mt_options); + }) {} +}; +#endif + struct BzipSink : CompressionSink { Sink & nextSink; @@ -270,36 +347,142 @@ struct BzipSink : CompressionSink } }; -struct BrotliSink : CompressionSink +struct LambdaCompressionSink : CompressionSink { Sink & nextSink; std::string data; + using CompressFnTy = std::function<std::string(const std::string&)>; + CompressFnTy compressFn; + LambdaCompressionSink(Sink& nextSink, CompressFnTy compressFn) + : nextSink(nextSink) + , compressFn(std::move(compressFn)) + { + }; + + void finish() override + { + flush(); + nextSink(compressFn(data)); + } + + void write(const unsigned char * data, size_t len) override + { + checkInterrupt(); + this->data.append((const char *) data, len); + } +}; + +struct BrotliCmdSink : LambdaCompressionSink +{ + BrotliCmdSink(Sink& nextSink) + : LambdaCompressionSink(nextSink, [](const std::string& data) { + return runProgram(BROTLI, true, {}, data); + }) + { + } +}; + +#if HAVE_BROTLI +struct BrotliSink : CompressionSink +{ + Sink & nextSink; + uint8_t outbuf[BUFSIZ]; + BrotliEncoderState *state; + bool finished = false; BrotliSink(Sink & nextSink) : nextSink(nextSink) { + state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); + if (!state) + throw CompressionError("unable to initialise brotli encoder"); } ~BrotliSink() { + BrotliEncoderDestroyInstance(state); } - // FIXME: use libbrotli - void finish() override { flush(); - nextSink(runProgram(BRO, true, {}, data)); + assert(!finished); + + const uint8_t *next_in = nullptr; + size_t avail_in = 0; + uint8_t *next_out = outbuf; + size_t avail_out = sizeof(outbuf); + while (!finished) { + checkInterrupt(); + + if (!BrotliEncoderCompressStream(state, + BROTLI_OPERATION_FINISH, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr)) + throw CompressionError("error while finishing brotli file"); + + finished = BrotliEncoderIsFinished(state); + if (avail_out == 0 || finished) { + nextSink(outbuf, sizeof(outbuf) - avail_out); + next_out = outbuf; + avail_out = sizeof(outbuf); + } + } } void write(const unsigned char * data, size_t len) override { - checkInterrupt(); - this->data.append((const char *) data, len); + assert(!finished); + + // Don't feed brotli too much at once + const size_t CHUNK_SIZE = sizeof(outbuf) << 2; + while (len) { + size_t n = std::min(CHUNK_SIZE, len); + writeInternal(data, n); + data += n; + len -= n; + } + } + private: + void writeInternal(const unsigned char * data, size_t len) + { + assert(!finished); + + const uint8_t *next_in = data; + size_t avail_in = len; + uint8_t *next_out = outbuf; + size_t avail_out = sizeof(outbuf); + + while (avail_in > 0) { + checkInterrupt(); + + if (!BrotliEncoderCompressStream(state, + BROTLI_OPERATION_PROCESS, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr)) + throw CompressionError("error while compressing brotli file"); + + if (avail_out < sizeof(outbuf) || avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - avail_out); + next_out = outbuf; + avail_out = sizeof(outbuf); + } + } } }; +#endif // HAVE_BROTLI -ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink) +ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel) { + if (parallel) { +#ifdef HAVE_LZMA_MT + if (method == "xz") + return make_ref<ParallelXzSink>(nextSink); +#endif + printMsg(lvlError, format("Warning: parallel compression requested but not supported for method '%1%', falling back to single-threaded compression") % method); + } + if (method == "none") return make_ref<NoneSink>(nextSink); else if (method == "xz") @@ -307,7 +490,11 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next else if (method == "bzip2") return make_ref<BzipSink>(nextSink); else if (method == "br") +#if HAVE_BROTLI return make_ref<BrotliSink>(nextSink); +#else + return make_ref<BrotliCmdSink>(nextSink); +#endif else throw UnknownCompressionMethod(format("unknown compression method '%s'") % method); } diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index e3e6f5a99303..a0d7530d74fc 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -8,7 +8,7 @@ namespace nix { -ref<std::string> compress(const std::string & method, const std::string & in); +ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false); ref<std::string> decompress(const std::string & method, const std::string & in); @@ -17,7 +17,7 @@ struct CompressionSink : BufferedSink virtual void finish() = 0; }; -ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink); +ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false); MakeError(UnknownCompressionMethod, Error); diff --git a/src/libutil/config.cc b/src/libutil/config.cc index d46ca65a3863..ce6858f0d65a 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -7,10 +7,12 @@ namespace nix { void Config::set(const std::string & name, const std::string & value) { auto i = _settings.find(name); - if (i == _settings.end()) - throw UsageError("unknown setting '%s'", name); - i->second.setting->set(value); - i->second.setting->overriden = true; + if (i == _settings.end()) { + extras.emplace(name, value); + } else { + i->second.setting->set(value); + i->second.setting->overriden = true; + } } void Config::addSetting(AbstractSetting * setting) @@ -21,34 +23,34 @@ void Config::addSetting(AbstractSetting * setting) bool set = false; - auto i = initials.find(setting->name); - if (i != initials.end()) { + auto i = extras.find(setting->name); + if (i != extras.end()) { setting->set(i->second); setting->overriden = true; - initials.erase(i); + extras.erase(i); set = true; } for (auto & alias : setting->aliases) { - auto i = initials.find(alias); - if (i != initials.end()) { + auto i = extras.find(alias); + if (i != extras.end()) { if (set) warn("setting '%s' is set, but it's an alias of '%s' which is also set", alias, setting->name); else { setting->set(i->second); setting->overriden = true; - initials.erase(i); + extras.erase(i); set = true; } } } } -void Config::warnUnknownSettings() +void Config::handleUnknownSettings() { - for (auto & i : initials) - warn("unknown setting '%s'", i.first); + for (auto & s : extras) + warn("unknown setting '%s'", s.first); } StringMap Config::getSettings(bool overridenOnly) @@ -60,7 +62,7 @@ StringMap Config::getSettings(bool overridenOnly) return res; } -void Config::applyConfigFile(const Path & path, bool fatal) +void Config::applyConfigFile(const Path & path) { try { string contents = readFile(path); @@ -80,7 +82,31 @@ void Config::applyConfigFile(const Path & path, bool fatal) vector<string> tokens = tokenizeString<vector<string> >(line); if (tokens.empty()) continue; - if (tokens.size() < 2 || tokens[1] != "=") + if (tokens.size() < 2) + throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); + + auto include = false; + auto ignoreMissing = false; + if (tokens[0] == "include") + include = true; + else if (tokens[0] == "!include") { + include = true; + ignoreMissing = true; + } + + if (include) { + if (tokens.size() != 2) + throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); + auto p = absPath(tokens[1], dirOf(path)); + if (pathExists(p)) { + applyConfigFile(p); + } else if (!ignoreMissing) { + throw Error("file '%1%' included from '%2%' not found", p, path); + } + continue; + } + + if (tokens[1] != "=") throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); string name = tokens[0]; @@ -88,12 +114,7 @@ void Config::applyConfigFile(const Path & path, bool fatal) vector<string>::iterator i = tokens.begin(); advance(i, 2); - try { - set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow - } catch (UsageError & e) { - if (fatal) throw; - warn("in configuration file '%s': %s", path, e.what()); - } + set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow }; } catch (SysError &) { } } diff --git a/src/libutil/config.hh b/src/libutil/config.hh index 9a32af528ec7..d2e7faf17434 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -48,25 +48,25 @@ private: Settings _settings; - StringMap initials; + StringMap extras; public: Config(const StringMap & initials) - : initials(initials) + : extras(initials) { } void set(const std::string & name, const std::string & value); void addSetting(AbstractSetting * setting); - void warnUnknownSettings(); + void handleUnknownSettings(); StringMap getSettings(bool overridenOnly = false); const Settings & _getSettings() { return _settings; } - void applyConfigFile(const Path & path, bool fatal = false); + void applyConfigFile(const Path & path); void resetOverriden(); diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index 11e3c9dca58a..75e4767550f7 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -189,7 +189,8 @@ Hash::Hash(const std::string & s, HashType type) else if (size == base64Len()) { auto d = base64Decode(std::string(s, pos)); - assert(d.size() == hashSize); + if (d.size() != hashSize) + throw BadHash("invalid base-64 hash '%s'", s); memcpy(hash, d.data(), hashSize); } diff --git a/src/libutil/local.mk b/src/libutil/local.mk index 0721b21c2089..5fc2aab569da 100644 --- a/src/libutil/local.mk +++ b/src/libutil/local.mk @@ -6,8 +6,8 @@ libutil_DIR := $(d) libutil_SOURCES := $(wildcard $(d)/*.cc) -libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) +libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) libutil_LIBS = libformat -libutil_CXXFLAGS = -DBRO=\"$(bro)\" +libutil_CXXFLAGS = -DBROTLI=\"$(brotli)\" diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 6924e0080475..27a631a37d10 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -44,7 +44,7 @@ public: prefix = std::string("<") + c + ">"; } - writeToStderr(prefix + (tty ? fs.s : filterANSIEscapes(fs.s)) + "\n"); + writeToStderr(prefix + filterANSIEscapes(fs.s) + "\n"); } void startActivity(ActivityId act, Verbosity lvl, ActivityType type, diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh index e0ec66c01803..5ee0b88ef50f 100644 --- a/src/libutil/monitor-fd.hh +++ b/src/libutil/monitor-fd.hh @@ -21,13 +21,29 @@ public: MonitorFdHup(int fd) { thread = std::thread([fd]() { - /* Wait indefinitely until a POLLHUP occurs. */ - struct pollfd fds[1]; - fds[0].fd = fd; - fds[0].events = 0; - if (poll(fds, 1, -1) == -1) abort(); // can't happen - assert(fds[0].revents & POLLHUP); - triggerInterrupt(); + while (true) { + /* Wait indefinitely until a POLLHUP occurs. */ + struct pollfd fds[1]; + fds[0].fd = fd; + /* This shouldn't be necessary, but macOS doesn't seem to + like a zeroed out events field. + See rdar://37537852. + */ + fds[0].events = POLLHUP; + auto count = poll(fds, 1, -1); + if (count == -1) abort(); // can't happen + /* This shouldn't happen, but can on macOS due to a bug. + See rdar://37550628. + + This may eventually need a delay or further + coordination with the main thread if spinning proves + too harmful. + */ + if (count == 0) continue; + assert(fds[0].revents & POLLHUP); + triggerInterrupt(); + break; + } }); }; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 950e6362a245..9e2a502afaf8 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -67,7 +67,8 @@ void FdSink::write(const unsigned char * data, size_t len) try { writeFull(fd, data, len); } catch (SysError & e) { - _good = true; + _good = false; + throw; } } diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 197df0c44aa0..2391e14a94bd 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -73,6 +73,13 @@ std::map<std::string, std::string> getEnv() } +void clearEnv() +{ + for (auto & name : getEnv()) + unsetenv(name.first.c_str()); +} + + Path absPath(Path path, Path dir) { if (path[0] != '/') { @@ -192,6 +199,12 @@ bool isInDir(const Path & path, const Path & dir) } +bool isDirOrInDir(const Path & path, const Path & dir) +{ + return path == dir or isInDir(path, dir); +} + + struct stat lstat(const Path & path) { struct stat st; @@ -1172,36 +1185,51 @@ void ignoreException() } -string filterANSIEscapes(const string & s, bool nixOnly) +std::string filterANSIEscapes(const std::string & s, unsigned int width) { - string t, r; - enum { stTop, stEscape, stCSI } state = stTop; - for (auto c : s) { - if (state == stTop) { - if (c == '\e') { - state = stEscape; - r = c; - } else - t += c; - } else if (state == stEscape) { - r += c; - if (c == '[') - state = stCSI; - else { - t += r; - state = stTop; + std::string t, e; + size_t w = 0; + auto i = s.begin(); + + while (w < (size_t) width && i != s.end()) { + + if (*i == '\e') { + std::string e; + e += *i++; + char last = 0; + + if (i != s.end() && *i == '[') { + e += *i++; + // eat parameter bytes + while (i != s.end() && *i >= 0x30 && *i <= 0x3f) e += *i++; + // eat intermediate bytes + while (i != s.end() && *i >= 0x20 && *i <= 0x2f) e += *i++; + // eat final byte + if (i != s.end() && *i >= 0x40 && *i <= 0x7e) e += last = *i++; + } else { + if (i != s.end() && *i >= 0x40 && *i <= 0x5f) e += *i++; } - } else { - r += c; - if (c >= 0x40 && c <= 0x7e) { - if (nixOnly && (c != 'p' && c != 'q' && c != 's' && c != 'a' && c != 'b')) - t += r; - state = stTop; - r.clear(); + + if (last == 'm') + t += e; + } + + else if (*i == '\t') { + i++; t += ' '; w++; + while (w < (size_t) width && w % 8) { + t += ' '; w++; } } + + else if (*i == '\r') + // do nothing for now + i++; + + else { + t += *i++; w++; + } } - t += r; + return t; } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index a3494e09b09b..c5c537ee63d8 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -32,6 +32,9 @@ string getEnv(const string & key, const string & def = ""); /* Get the entire environment. */ std::map<std::string, std::string> getEnv(); +/* Clear the environment. */ +void clearEnv(); + /* Return an absolutized path, resolving paths relative to the specified directory, or the current directory otherwise. The path is also canonicalised. */ @@ -53,10 +56,12 @@ Path dirOf(const Path & path); following the final `/'. */ string baseNameOf(const Path & path); -/* Check whether a given path is a descendant of the given - directory. */ +/* Check whether 'path' is a descendant of 'dir'. */ bool isInDir(const Path & path, const Path & dir); +/* Check whether 'path' is equal to 'dir' or a descendant of 'dir'. */ +bool isDirOrInDir(const Path & path, const Path & dir); + /* Get status of `path'. */ struct stat lstat(const Path & path); @@ -386,10 +391,12 @@ void ignoreException(); #define ANSI_BLUE "\e[34;1m" -/* Filter out ANSI escape codes from the given string. If ‘nixOnly’ is - set, only filter escape codes generated by Nixpkgs' stdenv (used to - denote nesting etc.). */ -string filterANSIEscapes(const string & s, bool nixOnly = false); +/* Truncate a string to 'width' printable characters. Certain ANSI + escape sequences (such as colour setting) are copied but not + included in the character count. Other ANSI escape sequences are + filtered. Also, tabs are expanded to spaces. */ +std::string filterANSIEscapes(const std::string & s, + unsigned int width = std::numeric_limits<unsigned int>::max()); /* Base64 encoding/decoding. */ diff --git a/src/linenoise/linenoise.cpp b/src/linenoise/linenoise.cpp index 8ee8984d6948..c57505d2fa97 100644 --- a/src/linenoise/linenoise.cpp +++ b/src/linenoise/linenoise.cpp @@ -2587,13 +2587,6 @@ int InputBuffer::getInputLine(PromptBase& pi) { // ctrl-I/tab, command completion, needs to be before switch statement if (c == ctrlChar('I') && completionCallback) { - if (pos == 0) // SERVER-4967 -- in earlier versions, you could paste - // previous output - continue; // back into the shell ... this output may have leading - // tabs. - // This hack (i.e. what the old code did) prevents command completion - // on an empty line but lets users paste text with leading tabs. - killRing.lastAction = KillRing::actionOther; historyRecallMostRecent = false; diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 58366daa6e86..99f773451ffe 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -141,7 +141,7 @@ void mainWrapped(int argc, char * * argv) else if (*arg == "--version") printVersion(myName); - else if (*arg == "--add-drv-link") + else if (*arg == "--add-drv-link" || *arg == "--indirect") ; // obsolete else if (*arg == "--no-out-link" || *arg == "--no-link") @@ -167,9 +167,6 @@ void mainWrapped(int argc, char * * argv) buildMode = bmRepair; } - else if (*arg == "--hash") - buildMode = bmHash; - else if (*arg == "--run-env") // obsolete runEnv = true; @@ -235,6 +232,8 @@ void mainWrapped(int argc, char * * argv) myArgs.parseCmdline(args); + initPlugins(); + if (packages && fromArgs) throw UsageError("'-p' and '-E' are mutually exclusive"); @@ -279,8 +278,8 @@ void mainWrapped(int argc, char * * argv) else /* If we're in a #! script, interpret filenames relative to the script. */ - exprs.push_back(state.parseExprFromFile(resolveExprPath(lookupFileArg(state, - inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i)))); + exprs.push_back(state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, + inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i))))); } /* Evaluate them into derivations. */ diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 370f216abccd..ec9a7174ecb9 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -213,6 +213,9 @@ int main(int argc, char ** argv) } return true; }); + + initPlugins(); + switch (cmd) { case cNone: throw UsageError("no command specified"); diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index cc663a96924d..37fe22f48134 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -77,6 +77,8 @@ int main(int argc, char * * argv) return true; }); + initPlugins(); + auto profilesDir = settings.nixStateDir + "/profiles"; if (removeOld) removeOldGenerations(profilesDir); diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index 861fc2e5cd64..dfb1b8fc5dc4 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -44,6 +44,8 @@ int main(int argc, char ** argv) return true; }); + initPlugins(); + if (sshHost.empty()) throw UsageError("no host name specified"); diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 5629cc64b96e..890bffa19aa5 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -411,7 +411,7 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store, /* Repairing is not atomic, so disallowed for "untrusted" clients. */ if (mode == bmRepair && !trusted) - throw Error("repairing is not supported when building through the Nix daemon"); + throw Error("repairing is not allowed because you are not in 'trusted-users'"); } logger->startWork(); store->buildPaths(drvs, mode); @@ -994,7 +994,7 @@ static void daemonLoop(char * * argv) if (matchUser(user, group, trustedUsers)) trusted = true; - if (!trusted && !matchUser(user, group, allowedUsers)) + if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup) throw Error(format("user '%1%' is not allowed to connect to the Nix daemon") % user); printInfo(format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : "")) @@ -1060,6 +1060,8 @@ int main(int argc, char * * argv) return true; }); + initPlugins(); + if (stdio) { if (getStoreType() == tDaemon) { /* Forward on this connection to the real daemon */ diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 016caf6d2346..97e66cbd937e 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1393,6 +1393,8 @@ int main(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (!op) throw UsageError("no operation specified"); auto store = openStore(); diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 55ac007e8682..dd262bea0918 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -151,6 +151,8 @@ int main(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (evalOnly && !wantsReadWrite) settings.readOnlyMode = true; @@ -182,7 +184,7 @@ int main(int argc, char * * argv) for (auto & i : files) { Expr * e = fromArgs ? state.parseExprFromString(i, absPath(".")) - : state.parseExprFromFile(resolveExprPath(lookupFileArg(state, i))); + : state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, i)))); processExpr(state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); } diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc index fef3eaa45538..fa7ee254500c 100644 --- a/src/nix-prefetch-url/nix-prefetch-url.cc +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -89,6 +89,8 @@ int main(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (args.size() > 2) throw UsageError("too many arguments"); diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index f6f276dd1798..e1e27ceef94d 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -122,7 +122,6 @@ static void opRealise(Strings opFlags, Strings opArgs) if (i == "--dry-run") dryRun = true; else if (i == "--repair") buildMode = bmRepair; else if (i == "--check") buildMode = bmCheck; - else if (i == "--hash") buildMode = bmHash; else if (i == "--ignore-unknown") ignoreUnknown = true; else throw UsageError(format("unknown flag '%1%'") % i); @@ -632,6 +631,7 @@ static void opDump(Strings opFlags, Strings opArgs) FdSink sink(STDOUT_FILENO); string path = *opArgs.begin(); dumpPath(path, sink); + sink.flush(); } @@ -657,6 +657,7 @@ static void opExport(Strings opFlags, Strings opArgs) FdSink sink(STDOUT_FILENO); store->exportPaths(opArgs, sink); + sink.flush(); } @@ -1053,6 +1054,8 @@ int main(int argc, char * * argv) return true; }); + initPlugins(); + if (!op) throw UsageError("no operation specified"); if (op != opDump && op != opRestore) /* !!! hack */ diff --git a/src/nix/build.cc b/src/nix/build.cc index f7c99f12dbbf..b329ac38ac2b 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -50,7 +50,9 @@ struct CmdBuild : MixDryRun, InstallablesCommand void run(ref<Store> store) override { - auto buildables = toBuildables(store, dryRun ? DryRun : Build, installables); + auto buildables = build(store, dryRun ? DryRun : Build, installables); + + if (dryRun) return; for (size_t i = 0; i < buildables.size(); ++i) { auto & b(buildables[i]); diff --git a/src/nix/command.hh b/src/nix/command.hh index 6b34e3881e79..97a6fee7fd27 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -5,6 +5,8 @@ namespace nix { +extern std::string programPath; + struct Value; class Bindings; class EvalState; @@ -196,7 +198,7 @@ std::shared_ptr<Installable> parseInstallable( SourceExprCommand & cmd, ref<Store> store, const std::string & installable, bool useDefaultInstallables); -Buildables toBuildables(ref<Store> store, RealiseMode mode, +Buildables build(ref<Store> store, RealiseMode mode, std::vector<std::shared_ptr<Installable>> installables); PathSet toStorePaths(ref<Store> store, RealiseMode mode, diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 2ddea9e70a6a..f29429c1ac49 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -57,15 +57,15 @@ struct CmdCopy : StorePathsCommand return { Example{ "To copy Firefox from the local store to a binary cache in file:///tmp/cache:", - "nix copy --to file:///tmp/cache -r $(type -p firefox)" + "nix copy --to file:///tmp/cache $(type -p firefox)" }, Example{ "To copy the entire current NixOS system closure to another machine via SSH:", - "nix copy --to ssh://server -r /run/current-system" + "nix copy --to ssh://server /run/current-system" }, Example{ "To copy a closure from another machine via SSH:", - "nix copy --from ssh://server -r /nix/store/a6cnl93nk1wxnq84brbbwr6hxw9gp2w9-blender-2.79-rc2" + "nix copy --from ssh://server /nix/store/a6cnl93nk1wxnq84brbbwr6hxw9gp2w9-blender-2.79-rc2" }, }; } diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index 1a1866437b07..f411c0cb7c89 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -29,6 +29,7 @@ struct CmdDumpPath : StorePathCommand { FdSink sink(STDOUT_FILENO); store->narFromPath(storePath, sink); + sink.flush(); } }; diff --git a/src/nix/edit.cc b/src/nix/edit.cc index 127be321eee2..7eaa86e2f914 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -52,7 +52,12 @@ struct CmdEdit : InstallableCommand throw Error("cannot parse meta.position attribute '%s'", pos); std::string filename(pos, 0, colon); - int lineno = std::stoi(std::string(pos, colon + 1)); + int lineno; + try { + lineno = std::stoi(std::string(pos, colon + 1)); + } catch (std::invalid_argument e) { + throw Error("cannot parse line number '%s'", pos); + } auto editor = getEnv("EDITOR", "cat"); diff --git a/src/nix/eval.cc b/src/nix/eval.cc index 0fbeca1c121d..b7058361cbec 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -5,10 +5,11 @@ #include "eval.hh" #include "json.hh" #include "value-to-json.hh" +#include "progress-bar.hh" using namespace nix; -struct CmdEval : MixJSON, InstallablesCommand +struct CmdEval : MixJSON, InstallableCommand { bool raw = false; @@ -56,20 +57,19 @@ struct CmdEval : MixJSON, InstallablesCommand auto state = getEvalState(); - auto jsonOut = json ? std::make_unique<JSONList>(std::cout) : nullptr; + auto v = installable->toValue(*state); + PathSet context; - for (auto & i : installables) { - auto v = i->toValue(*state); - PathSet context; - if (raw) { - std::cout << state->coerceToString(noPos, *v, context); - } else if (json) { - auto jsonElem = jsonOut->placeholder(); - printValueAsJSON(*state, true, *v, jsonElem, context); - } else { - state->forceValueDeep(*v); - std::cout << *v << "\n"; - } + stopProgressBar(); + + if (raw) { + std::cout << state->coerceToString(noPos, *v, context); + } else if (json) { + JSONPlaceholder jsonOut(std::cout); + printValueAsJSON(*state, true, *v, jsonOut, context); + } else { + state->forceValueDeep(*v); + std::cout << *v << "\n"; } } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index ae93c4ef649e..a3fdd8a2808d 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -30,10 +30,8 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) vSourceExpr = state.allocValue(); - if (file != "") { - Expr * e = state.parseExprFromFile(resolveExprPath(lookupFileArg(state, file))); - state.eval(e, *vSourceExpr); - } + if (file != "") + state.evalFile(lookupFileArg(state, file), *vSourceExpr); else { @@ -255,7 +253,7 @@ std::shared_ptr<Installable> parseInstallable( return installables.front(); } -Buildables toBuildables(ref<Store> store, RealiseMode mode, +Buildables build(ref<Store> store, RealiseMode mode, std::vector<std::shared_ptr<Installable>> installables) { if (mode != Build) @@ -293,7 +291,7 @@ PathSet toStorePaths(ref<Store> store, RealiseMode mode, { PathSet outPaths; - for (auto & b : toBuildables(store, mode, installables)) + for (auto & b : build(store, mode, installables)) for (auto & output : b.outputs) outPaths.insert(output.second); diff --git a/src/nix/local.mk b/src/nix/local.mk index bddd53b168d3..f76da194467c 100644 --- a/src/nix/local.mk +++ b/src/nix/local.mk @@ -6,4 +6,6 @@ nix_SOURCES := $(wildcard $(d)/*.cc) $(wildcard src/linenoise/*.cpp) nix_LIBS = libexpr libmain libstore libutil libformat +nix_LDFLAGS = -pthread + $(eval $(call install-symlink, nix, $(bindir)/nix-hash)) diff --git a/src/nix/log.cc b/src/nix/log.cc index 966ad8b65087..f07ec4e93a16 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -50,6 +50,7 @@ struct CmdLog : InstallableCommand auto b = installable->toBuildable(); + RunPager pager; for (auto & sub : subs) { auto log = b.drvPath != "" ? sub->getBuildLog(b.drvPath) : nullptr; for (auto & output : b.outputs) { diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 69620595d8ca..e99622faf472 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -90,6 +90,16 @@ struct CmdLsStore : StoreCommand, MixLs expectArg("path", &path); } + Examples examples() override + { + return { + Example{ + "To list the contents of a store path in a binary cache:", + "nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10" + }, + }; + } + std::string name() override { return "ls-store"; @@ -116,6 +126,16 @@ struct CmdLsNar : Command, MixLs expectArg("path", &path); } + Examples examples() override + { + return { + Example{ + "To list a specific file in a NAR:", + "nix ls-nar -l hello.nar /bin/hello" + }, + }; + } + std::string name() override { return "ls-nar"; diff --git a/src/nix/main.cc b/src/nix/main.cc index 06bb8a1c3043..bb107ec7d3f6 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -16,6 +16,8 @@ void chrootHelper(int argc, char * * argv); namespace nix { +std::string programPath; + struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { NixArgs() : MultiCommand(*RegisterCommand::commands), MixCommonArgs("nix") @@ -78,7 +80,8 @@ void mainWrapped(int argc, char * * argv) initNix(); initGC(); - string programName = baseNameOf(argv[0]); + programPath = argv[0]; + string programName = baseNameOf(programPath); { auto legacy = (*RegisterLegacyCommand::commands)[programName]; @@ -89,6 +92,8 @@ void mainWrapped(int argc, char * * argv) args.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (!args.command) args.showHelpAndExit(); Finally f([]() { stopProgressBar(); }); diff --git a/src/nix/ping-store.cc b/src/nix/ping-store.cc new file mode 100644 index 000000000000..310942574a2a --- /dev/null +++ b/src/nix/ping-store.cc @@ -0,0 +1,35 @@ +#include "command.hh" +#include "shared.hh" +#include "store-api.hh" + +using namespace nix; + +struct CmdPingStore : StoreCommand +{ + std::string name() override + { + return "ping-store"; + } + + std::string description() override + { + return "test whether a store can be opened"; + } + + Examples examples() override + { + return { + Example{ + "To test whether connecting to a remote Nix store via SSH works:", + "nix ping-store --store ssh://mac1" + }, + }; + } + + void run(ref<Store> store) override + { + store->connect(); + } +}; + +static RegisterCommand r1(make_ref<CmdPingStore>()); diff --git a/src/nix/progress-bar.cc b/src/nix/progress-bar.cc index fb9955190b40..e6553c06f4ae 100644 --- a/src/nix/progress-bar.cc +++ b/src/nix/progress-bar.cc @@ -3,8 +3,9 @@ #include "sync.hh" #include "store-api.hh" -#include <map> #include <atomic> +#include <map> +#include <thread> namespace nix { @@ -22,44 +23,6 @@ static uint64_t getI(const std::vector<Logger::Field> & fields, size_t n) return fields[n].i; } -/* Truncate a string to 'width' printable characters. ANSI escape - sequences are copied but not included in the character count. Also, - tabs are expanded to spaces. */ -static std::string ansiTruncate(const std::string & s, int width) -{ - if (width <= 0) return s; - - std::string t; - size_t w = 0; - auto i = s.begin(); - - while (w < (size_t) width && i != s.end()) { - if (*i == '\e') { - t += *i++; - if (i != s.end() && *i == '[') { - t += *i++; - while (i != s.end() && (*i < 0x40 || *i > 0x7e)) { - t += *i++; - } - if (i != s.end()) t += *i++; - } - } - - else if (*i == '\t') { - t += ' '; w++; - while (w < (size_t) width && w & 8) { - t += ' '; w++; - } - } - - else { - t += *i++; w++; - } - } - - return t; -} - class ProgressBar : public Logger { private: @@ -101,15 +64,28 @@ private: Sync<State> state_; + std::thread updateThread; + + std::condition_variable quitCV, updateCV; + public: ProgressBar() { + updateThread = std::thread([&]() { + auto state(state_.lock()); + while (state->active) { + state.wait(updateCV); + draw(*state); + state.wait_for(quitCV, std::chrono::milliseconds(50)); + } + }); } ~ProgressBar() { stop(); + updateThread.join(); } void stop() @@ -121,6 +97,8 @@ public: writeToStderr("\r\e[K"); if (status != "") writeToStderr("[" + status + "]\n"); + updateCV.notify_one(); + quitCV.notify_one(); } void log(Verbosity lvl, const FormatOrString & fs) override @@ -132,7 +110,7 @@ public: void log(State & state, Verbosity lvl, const std::string & s) { writeToStderr("\r\e[K" + s + ANSI_NORMAL "\n"); - update(state); + draw(state); } void startActivity(ActivityId act, Verbosity lvl, ActivityType type, @@ -167,7 +145,12 @@ public: if (type == actSubstitute) { auto name = storePathToName(getS(fields, 0)); - i->s = fmt("fetching " ANSI_BOLD "%s" ANSI_NORMAL " from %s", name, getS(fields, 1)); + auto sub = getS(fields, 1); + i->s = fmt( + hasPrefix(sub, "local") + ? "copying " ANSI_BOLD "%s" ANSI_NORMAL " from %s" + : "fetching " ANSI_BOLD "%s" ANSI_NORMAL " from %s", + name, sub); } if (type == actQueryPathInfo) { @@ -180,7 +163,7 @@ public: || (type == actCopyPath && hasAncestor(*state, actSubstitute, parent))) i->visible = false; - update(*state); + update(); } /* Check whether an activity has an ancestore with the specified @@ -215,7 +198,7 @@ public: state->its.erase(i); } - update(*state); + update(); } void result(ActivityId act, ResultType type, const std::vector<Field> & fields) override @@ -225,7 +208,7 @@ public: if (type == resFileLinked) { state->filesLinked++; state->bytesLinked += getI(fields, 0); - update(*state); + update(); } else if (type == resBuildLogLine) { @@ -238,25 +221,25 @@ public: info.lastLine = lastLine; state->activities.emplace_back(info); i->second = std::prev(state->activities.end()); - update(*state); + update(); } } else if (type == resUntrustedPath) { state->untrustedPaths++; - update(*state); + update(); } else if (type == resCorruptedPath) { state->corruptedPaths++; - update(*state); + update(); } else if (type == resSetPhase) { auto i = state->its.find(act); assert(i != state->its.end()); i->second->phase = getS(fields, 0); - update(*state); + update(); } else if (type == resProgress) { @@ -267,7 +250,7 @@ public: actInfo.expected = getI(fields, 1); actInfo.running = getI(fields, 2); actInfo.failed = getI(fields, 3); - update(*state); + update(); } else if (type == resSetExpected) { @@ -279,17 +262,16 @@ public: state->activitiesByType[type].expected -= j; j = getI(fields, 1); state->activitiesByType[type].expected += j; - update(*state); + update(); } } void update() { - auto state(state_.lock()); - update(*state); + updateCV.notify_one(); } - void update(State & state) + void draw(State & state) { if (!state.active) return; @@ -323,7 +305,10 @@ public: } } - writeToStderr("\r" + ansiTruncate(line, getWindowSize().second) + "\e[K"); + auto width = getWindowSize().second; + if (width <= 0) std::numeric_limits<decltype(width)>::max(); + + writeToStderr("\r" + filterANSIEscapes(line, width) + "\e[K"); } std::string getStatus(State & state) diff --git a/src/nix/repl.cc b/src/nix/repl.cc index 1adb816c5bf0..9216209173d9 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -186,7 +186,16 @@ bool NixRepl::getLine(string & input, const std::string &prompt) { char * s = linenoise(prompt.c_str()); Finally doFree([&]() { free(s); }); - if (!s) return false; + if (!s) { + switch (auto type = linenoiseKeyType()) { + case 1: // ctrl-C + return true; + case 2: // ctrl-D + return false; + default: + throw Error(format("Unexpected linenoise keytype: %1%") % type); + } + } input += s; return true; } diff --git a/src/nix/run.cc b/src/nix/run.cc index ade87e63a49c..d04e106e037b 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -16,8 +16,6 @@ using namespace nix; std::string chrootHelperName = "__run_in_chroot"; -extern char * * environ; - struct CmdRun : InstallablesCommand { std::vector<std::string> command = { "bash" }; @@ -85,6 +83,10 @@ struct CmdRun : InstallablesCommand "To run GNU Hello:", "nix run nixpkgs.hello -c hello --greeting 'Hi everybody!'" }, + Example{ + "To run GNU Hello in a chroot store:", + "nix run --store ~/my-nix nixpkgs.hello -c hello" + }, }; } @@ -105,7 +107,7 @@ struct CmdRun : InstallablesCommand if (s) kept[var] = s; } - environ = nullptr; + clearEnv(); for (auto & var : kept) setenv(var.first.c_str(), var.second.c_str(), 1); diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc new file mode 100644 index 000000000000..758bbbc688bc --- /dev/null +++ b/src/nix/upgrade-nix.cc @@ -0,0 +1,131 @@ +#include "command.hh" +#include "store-api.hh" +#include "download.hh" +#include "eval.hh" +#include "attr-path.hh" + +using namespace nix; + +struct CmdUpgradeNix : StoreCommand +{ + Path profileDir; + + CmdUpgradeNix() + { + mkFlag() + .longName("profile") + .shortName('p') + .labels({"profile-dir"}) + .description("the Nix profile to upgrade") + .dest(&profileDir); + } + + std::string name() override + { + return "upgrade-nix"; + } + + std::string description() override + { + return "upgrade Nix to the latest stable version"; + } + + Examples examples() override + { + return { + Example{ + "To upgrade Nix to the latest stable version:", + "nix upgrade-nix" + }, + Example{ + "To upgrade Nix in a specific profile:", + "nix upgrade-nix -p /nix/var/nix/profiles/per-user/alice/profile" + }, + }; + } + + void run(ref<Store> store) override + { + settings.pureEval = true; + + if (profileDir == "") + profileDir = getProfileDir(store); + + printInfo("upgrading Nix in profile '%s'", profileDir); + + Path storePath; + { + Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); + storePath = getLatestNix(store); + } + + { + Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", storePath)); + store->ensurePath(storePath); + } + + { + Activity act(*logger, lvlInfo, actUnknown, fmt("verifying that '%s' works...", storePath)); + auto program = storePath + "/bin/nix-env"; + auto s = runProgram(program, false, {"--version"}); + if (s.find("Nix") == std::string::npos) + throw Error("could not verify that '%s' works", program); + } + + { + Activity act(*logger, lvlInfo, actUnknown, fmt("installing '%s' into profile '%s'...", storePath, profileDir)); + runProgram(settings.nixBinDir + "/nix-env", false, + {"--profile", profileDir, "-i", storePath, "--no-sandbox"}); + } + } + + /* Return the profile in which Nix is installed. */ + Path getProfileDir(ref<Store> store) + { + Path where; + + for (auto & dir : tokenizeString<Strings>(getEnv("PATH"), ":")) + if (pathExists(dir + "/nix-env")) { + where = dir; + break; + } + + if (where == "") + throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); + + printInfo("found Nix in '%s'", where); + + if (hasPrefix(where, "/run/current-system")) + throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); + + Path profileDir; + Path userEnv; + + if (baseNameOf(where) != "bin" || + !hasSuffix(userEnv = canonPath(profileDir = dirOf(where), true), "user-environment")) + throw Error("directory '%s' does not appear to be part of a Nix profile", where); + + if (!store->isValidPath(userEnv)) + throw Error("directory '%s' is not in the Nix store", userEnv); + + return profileDir; + } + + /* Return the store path of the latest stable Nix. */ + Path getLatestNix(ref<Store> store) + { + // FIXME: use nixos.org? + auto req = DownloadRequest("https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix"); + auto res = getDownloader()->download(req); + + EvalState state(Strings(), store); + auto v = state.allocValue(); + state.eval(state.parseExprFromString(*res.data, "/no-such-path"), *v); + Bindings & bindings(*state.allocBindings(0)); + auto v2 = findAlongAttrPath(state, settings.thisSystem, bindings, *v); + + return state.forceString(*v2); + } +}; + +static RegisterCommand r1(make_ref<CmdUpgradeNix>()); diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp index 9754e464c76b..5b0b0ea5b301 100644 --- a/src/nlohmann/json.hpp +++ b/src/nlohmann/json.hpp @@ -1,7 +1,7 @@ /* __ _____ _____ _____ __| | __| | | | JSON for Modern C++ -| | |__ | | | | | | version 2.1.1 +| | |__ | | | | | | version 3.0.1 |_____|_____|_____|_|___| https://github.com/nlohmann/json Licensed under the MIT License <http://opensource.org/licenses/MIT>. @@ -109,7 +109,7 @@ SOFTWARE. #define JSON_UNLIKELY(x) x #endif -// cpp language standard detection +// C++ language standard detection #if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 @@ -128,20 +128,18 @@ template<typename = void, typename = void> struct adl_serializer; // forward declaration of basic_json (required to split the class) -template<template<typename U, typename V, typename... Args> class ObjectType = - std::map, - template<typename U, typename... Args> class ArrayType = std::vector, +template<template<typename, typename, typename...> class ObjectType = std::map, + template<typename, typename...> class ArrayType = std::vector, class StringType = std::string, class BooleanType = bool, class NumberIntegerType = std::int64_t, class NumberUnsignedType = std::uint64_t, class NumberFloatType = double, - template<typename U> class AllocatorType = std::allocator, - template<typename T, typename SFINAE = void> class JSONSerializer = - adl_serializer> + template<typename> class AllocatorType = std::allocator, + template<typename, typename = void> class JSONSerializer = adl_serializer> class basic_json; -// Ugly macros to avoid uglier copy-paste when specializing basic_json -// This is only temporary and will be removed in 3.0 +// Ugly macros to avoid uglier copy-paste when specializing basic_json. They +// may be removed in the future once the class is split. #define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ template<template<typename, typename, typename...> class ObjectType, \ @@ -227,7 +225,7 @@ class exception : public std::exception /*! @brief exception indicating a parse error -This excpetion is thrown by the library when a parse error occurs. Parse errors +This exception is thrown by the library when a parse error occurs. Parse errors can occur during the deserialization of JSON text, CBOR, MessagePack, as well as when using JSON Patch. @@ -243,12 +241,12 @@ json.exception.parse_error.102 | parse error at 14: missing or wrong low surroga json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. -json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number wihtout a leading `0`. +json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. -json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xf8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. +json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. @note For an input with n bytes, 1 is the index of the first character and n+1 @@ -378,6 +376,7 @@ json.exception.type_error.312 | cannot use update() with string | The @ref updat json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. +json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | @liveexample{The following code shows how a `type_error` exception can be caught.,type_error} @@ -457,7 +456,6 @@ Exceptions have ids 5xx. name / id | example message | description ------------------------------ | --------------- | ------------------------- json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. -json.exception.other_error.502 | invalid object size for conversion | Some conversions to user-defined types impose constraints on the object size (e.g. std::pair) @sa @ref exception for the base class of the library exceptions @sa @ref parse_error for exceptions indicating a parse error @@ -540,20 +538,14 @@ Returns an ordering that is similar to Python: inline bool operator<(const value_t lhs, const value_t rhs) noexcept { static constexpr std::array<uint8_t, 8> order = {{ - 0, // null - 3, // object - 4, // array - 5, // string - 1, // boolean - 2, // integer - 2, // unsigned - 2, // float + 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, + 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */ } }; const auto l_index = static_cast<std::size_t>(lhs); const auto r_index = static_cast<std::size_t>(rhs); - return (l_index < order.size() and r_index < order.size() and order[l_index] < order[r_index]); + return l_index < order.size() and r_index < order.size() and order[l_index] < order[r_index]; } @@ -591,17 +583,15 @@ struct merge_and_renumber; template<std::size_t... I1, std::size_t... I2> struct merge_and_renumber<index_sequence<I1...>, index_sequence<I2...>> - : index_sequence < I1..., (sizeof...(I1) + I2)... > - {}; + : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; template<std::size_t N> struct make_index_sequence : merge_and_renumber < typename make_index_sequence < N / 2 >::type, - typename make_index_sequence < N - N / 2 >::type > -{}; + typename make_index_sequence < N - N / 2 >::type > {}; -template<> struct make_index_sequence<0> : index_sequence<> { }; -template<> struct make_index_sequence<1> : index_sequence<0> { }; +template<> struct make_index_sequence<0> : index_sequence<> {}; +template<> struct make_index_sequence<1> : index_sequence<0> {}; template<typename... Ts> using index_sequence_for = make_index_sequence<sizeof...(Ts)>; @@ -624,7 +614,7 @@ template<class B1> struct conjunction<B1> : B1 {}; template<class B1, class... Bn> struct conjunction<B1, Bn...> : std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {}; -template<class B> struct negation : std::integral_constant < bool, !B::value > {}; +template<class B> struct negation : std::integral_constant<bool, not B::value> {}; // dispatch utility (taken from ranges-v3) template<unsigned N> struct priority_tag : priority_tag < N - 1 > {}; @@ -725,8 +715,7 @@ struct external_constructor<value_t::array> } template<typename BasicJsonType, typename CompatibleArrayType, - enable_if_t<not std::is_same<CompatibleArrayType, - typename BasicJsonType::array_t>::value, + enable_if_t<not std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value, int> = 0> static void construct(BasicJsonType& j, const CompatibleArrayType& arr) { @@ -743,7 +732,7 @@ struct external_constructor<value_t::array> j.m_type = value_t::array; j.m_value = value_t::array; j.m_value.array->reserve(arr.size()); - for (bool x : arr) + for (const bool x : arr) { j.m_value.array->push_back(x); } @@ -782,8 +771,7 @@ struct external_constructor<value_t::object> } template<typename BasicJsonType, typename CompatibleObjectType, - enable_if_t<not std::is_same<CompatibleObjectType, - typename BasicJsonType::object_t>::value, int> = 0> + enable_if_t<not std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int> = 0> static void construct(BasicJsonType& j, const CompatibleObjectType& obj) { using std::begin; @@ -896,7 +884,7 @@ struct is_compatible_integer_type is_compatible_integer_type_impl < std::is_integral<CompatibleNumberIntegerType>::value and not std::is_same<bool, CompatibleNumberIntegerType>::value, - RealIntegerType, CompatibleNumberIntegerType > ::value; + RealIntegerType, CompatibleNumberIntegerType >::value; }; @@ -922,10 +910,8 @@ template<typename BasicJsonType, typename T> struct has_non_default_from_json { private: - template < - typename U, - typename = enable_if_t<std::is_same< - T, decltype(uncvref_t<U>::from_json(std::declval<BasicJsonType>()))>::value >> + template<typename U, typename = + enable_if_t<std::is_same<T, decltype(uncvref_t<U>::from_json(std::declval<BasicJsonType>()))>::value>> static int detect(U&&); static void detect(...); @@ -954,22 +940,21 @@ struct has_to_json // to_json // ///////////// -template<typename BasicJsonType, typename T, enable_if_t< - std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0> +template<typename BasicJsonType, typename T, + enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0> void to_json(BasicJsonType& j, T b) noexcept { external_constructor<value_t::boolean>::construct(j, b); } template<typename BasicJsonType, typename CompatibleString, - enable_if_t<std::is_constructible<typename BasicJsonType::string_t, - CompatibleString>::value, int> = 0> + enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0> void to_json(BasicJsonType& j, const CompatibleString& s) { external_constructor<value_t::string>::construct(j, s); } -template <typename BasicJsonType> +template<typename BasicJsonType> void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s) { external_constructor<value_t::string>::construct(j, std::move(s)); @@ -982,19 +967,15 @@ void to_json(BasicJsonType& j, FloatType val) noexcept external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val)); } -template < - typename BasicJsonType, typename CompatibleNumberUnsignedType, - enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, - CompatibleNumberUnsignedType>::value, int> = 0 > +template<typename BasicJsonType, typename CompatibleNumberUnsignedType, + enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0> void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept { external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val)); } -template < - typename BasicJsonType, typename CompatibleNumberIntegerType, - enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, - CompatibleNumberIntegerType>::value, int> = 0 > +template<typename BasicJsonType, typename CompatibleNumberIntegerType, + enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0> void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept { external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val)); @@ -1014,49 +995,43 @@ void to_json(BasicJsonType& j, const std::vector<bool>& e) external_constructor<value_t::array>::construct(j, e); } -template < - typename BasicJsonType, typename CompatibleArrayType, - enable_if_t < - is_compatible_array_type<BasicJsonType, CompatibleArrayType>::value or - std::is_same<typename BasicJsonType::array_t, CompatibleArrayType>::value, - int > = 0 > +template<typename BasicJsonType, typename CompatibleArrayType, + enable_if_t<is_compatible_array_type<BasicJsonType, CompatibleArrayType>::value or + std::is_same<typename BasicJsonType::array_t, CompatibleArrayType>::value, + int> = 0> void to_json(BasicJsonType& j, const CompatibleArrayType& arr) { external_constructor<value_t::array>::construct(j, arr); } -template <typename BasicJsonType, typename T, - enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0> +template<typename BasicJsonType, typename T, + enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0> void to_json(BasicJsonType& j, std::valarray<T> arr) { external_constructor<value_t::array>::construct(j, std::move(arr)); } -template <typename BasicJsonType> +template<typename BasicJsonType> void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr) { external_constructor<value_t::array>::construct(j, std::move(arr)); } -template < - typename BasicJsonType, typename CompatibleObjectType, - enable_if_t<is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value, - int> = 0 > +template<typename BasicJsonType, typename CompatibleObjectType, + enable_if_t<is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value, int> = 0> void to_json(BasicJsonType& j, const CompatibleObjectType& obj) { external_constructor<value_t::object>::construct(j, obj); } -template <typename BasicJsonType> +template<typename BasicJsonType> void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj) { external_constructor<value_t::object>::construct(j, std::move(obj)); } template<typename BasicJsonType, typename T, std::size_t N, - enable_if_t<not std::is_constructible< - typename BasicJsonType::string_t, T (&)[N]>::value, - int> = 0> + enable_if_t<not std::is_constructible<typename BasicJsonType::string_t, T (&)[N]>::value, int> = 0> void to_json(BasicJsonType& j, T (&arr)[N]) { external_constructor<value_t::array>::construct(j, arr); @@ -1087,8 +1062,7 @@ void to_json(BasicJsonType& j, const std::tuple<Args...>& t) // overloads for basic_json template parameters template<typename BasicJsonType, typename ArithmeticType, enable_if_t<std::is_arithmetic<ArithmeticType>::value and - not std::is_same<ArithmeticType, - typename BasicJsonType::boolean_t>::value, + not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value, int> = 0> void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val) { @@ -1351,6 +1325,13 @@ struct to_json_fn { static_assert(sizeof(BasicJsonType) == 0, "could not find to_json() method in T's namespace"); + +#ifdef _MSC_VER + // MSVC does not show a stacktrace for the above assert + using decayed = uncvref_t<T>; + static_assert(sizeof(typename decayed::force_msvc_stacktrace) == 0, + "forcing MSVC stacktrace to show which T we're talking about."); +#endif } public: @@ -1378,6 +1359,12 @@ struct from_json_fn { static_assert(sizeof(BasicJsonType) == 0, "could not find from_json() method in T's namespace"); +#ifdef _MSC_VER + // MSVC does not show a stacktrace for the above assert + using decayed = uncvref_t<T>; + static_assert(sizeof(typename decayed::force_msvc_stacktrace) == 0, + "forcing MSVC stacktrace to show which T we're talking about."); +#endif } public: @@ -1448,7 +1435,7 @@ class input_stream_adapter : public input_adapter_protocol explicit input_stream_adapter(std::istream& i) : is(i), sb(*i.rdbuf()) { - // ignore Byte Order Mark at start of input + // skip byte order mark std::char_traits<char>::int_type c; if ((c = get_character()) == 0xEF) { @@ -1472,7 +1459,7 @@ class input_stream_adapter : public input_adapter_protocol } else if (c != std::char_traits<char>::eof()) { - is.unget(); // Not BOM. Process as usual. + is.unget(); // no byte order mark; process as usual } } @@ -1481,8 +1468,8 @@ class input_stream_adapter : public input_adapter_protocol input_stream_adapter& operator=(input_stream_adapter&) = delete; // std::istream/std::streambuf use std::char_traits<char>::to_int_type, to - // ensure that std::char_traits<char>::eof() and the character 0xff do not - // end up as the same value, eg. 0xffffffff. + // ensure that std::char_traits<char>::eof() and the character 0xFF do not + // end up as the same value, eg. 0xFFFFFFFF. std::char_traits<char>::int_type get_character() override { return sb.sbumpc(); @@ -1561,8 +1548,7 @@ class input_adapter template<typename CharT, typename std::enable_if< std::is_pointer<CharT>::value and - std::is_integral< - typename std::remove_pointer<CharT>::type>::value and + std::is_integral<typename std::remove_pointer<CharT>::type>::value and sizeof(typename std::remove_pointer<CharT>::type) == 1, int>::type = 0> input_adapter(CharT b, std::size_t l) @@ -1574,8 +1560,7 @@ class input_adapter template<typename CharT, typename std::enable_if< std::is_pointer<CharT>::value and - std::is_integral< - typename std::remove_pointer<CharT>::type>::value and + std::is_integral<typename std::remove_pointer<CharT>::type>::value and sizeof(typename std::remove_pointer<CharT>::type) == 1, int>::type = 0> input_adapter(CharT b) @@ -1585,8 +1570,7 @@ class input_adapter /// input adapter for iterator range with contiguous storage template<class IteratorType, typename std::enable_if< - std::is_same<typename std::iterator_traits<IteratorType>::iterator_category, - std::random_access_iterator_tag>::value, + std::is_same<typename std::iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value, int>::type = 0> input_adapter(IteratorType first, IteratorType last) { @@ -1624,13 +1608,10 @@ class input_adapter : input_adapter(std::begin(array), std::end(array)) {} /// input adapter for contiguous container - template < - class ContiguousContainer, - typename std::enable_if < - not std::is_pointer<ContiguousContainer>::value and - std::is_base_of<std::random_access_iterator_tag, - typename std::iterator_traits<decltype(std::begin(std::declval<ContiguousContainer const>()))>::iterator_category>::value, - int >::type = 0 > + template<class ContiguousContainer, typename + std::enable_if<not std::is_pointer<ContiguousContainer>::value and + std::is_base_of<std::random_access_iterator_tag, typename std::iterator_traits<decltype(std::begin(std::declval<ContiguousContainer const>()))>::iterator_category>::value, + int>::type = 0> input_adapter(const ContiguousContainer& c) : input_adapter(std::begin(c), std::end(c)) {} @@ -1804,6 +1785,12 @@ class lexer checks if it is inside the range. If a violation was detected, set up an error message and return false. Otherwise, return true. + @param[in] ranges list of integers; interpreted as list of pairs of + inclusive lower and upper bound, respectively + + @pre The passed list @a ranges must have 2, 4, or 6 elements; that is, + 1, 2, or 3 pairs. This precondition is enforced by an assertion. + @return true if and only if no range violation was detected */ bool next_byte_in_range(std::initializer_list<int> ranges) @@ -1970,19 +1957,19 @@ class lexer // result of the above calculation yields a proper codepoint assert(0x00 <= codepoint and codepoint <= 0x10FFFF); - // translate code point to bytes + // translate codepoint into bytes if (codepoint < 0x80) { // 1-byte characters: 0xxxxxxx (ASCII) add(codepoint); } - else if (codepoint <= 0x7ff) + else if (codepoint <= 0x7FF) { // 2-byte characters: 110xxxxx 10xxxxxx add(0xC0 | (codepoint >> 6)); add(0x80 | (codepoint & 0x3F)); } - else if (codepoint <= 0xffff) + else if (codepoint <= 0xFFFF) { // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx add(0xE0 | (codepoint >> 12)); @@ -2021,12 +2008,12 @@ class lexer case 0x07: case 0x08: case 0x09: - case 0x0a: - case 0x0b: - case 0x0c: - case 0x0d: - case 0x0e: - case 0x0f: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: case 0x10: case 0x11: case 0x12: @@ -2037,12 +2024,12 @@ class lexer case 0x17: case 0x18: case 0x19: - case 0x1a: - case 0x1b: - case 0x1c: - case 0x1d: - case 0x1e: - case 0x1f: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: { error_message = "invalid string: control character must be escaped"; return token_type::parse_error; @@ -2058,12 +2045,12 @@ class lexer case 0x27: case 0x28: case 0x29: - case 0x2a: - case 0x2b: - case 0x2c: - case 0x2d: - case 0x2e: - case 0x2f: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: case 0x30: case 0x31: case 0x32: @@ -2074,12 +2061,12 @@ class lexer case 0x37: case 0x38: case 0x39: - case 0x3a: - case 0x3b: - case 0x3c: - case 0x3d: - case 0x3e: - case 0x3f: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: case 0x40: case 0x41: case 0x42: @@ -2090,12 +2077,12 @@ class lexer case 0x47: case 0x48: case 0x49: - case 0x4a: - case 0x4b: - case 0x4c: - case 0x4d: - case 0x4e: - case 0x4f: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: case 0x50: case 0x51: case 0x52: @@ -2106,11 +2093,11 @@ class lexer case 0x57: case 0x58: case 0x59: - case 0x5a: - case 0x5b: - case 0x5d: - case 0x5e: - case 0x5f: + case 0x5A: + case 0x5B: + case 0x5D: + case 0x5E: + case 0x5F: case 0x60: case 0x61: case 0x62: @@ -2121,12 +2108,12 @@ class lexer case 0x67: case 0x68: case 0x69: - case 0x6a: - case 0x6b: - case 0x6c: - case 0x6d: - case 0x6e: - case 0x6f: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: case 0x70: case 0x71: case 0x72: @@ -2137,48 +2124,48 @@ class lexer case 0x77: case 0x78: case 0x79: - case 0x7a: - case 0x7b: - case 0x7c: - case 0x7d: - case 0x7e: - case 0x7f: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: { add(current); break; } // U+0080..U+07FF: bytes C2..DF 80..BF - case 0xc2: - case 0xc3: - case 0xc4: - case 0xc5: - case 0xc6: - case 0xc7: - case 0xc8: - case 0xc9: - case 0xca: - case 0xcb: - case 0xcc: - case 0xcd: - case 0xce: - case 0xcf: - case 0xd0: - case 0xd1: - case 0xd2: - case 0xd3: - case 0xd4: - case 0xd5: - case 0xd6: - case 0xd7: - case 0xd8: - case 0xd9: - case 0xda: - case 0xdb: - case 0xdc: - case 0xdd: - case 0xde: - case 0xdf: + case 0xC2: + case 0xC3: + case 0xC4: + case 0xC5: + case 0xC6: + case 0xC7: + case 0xC8: + case 0xC9: + case 0xCA: + case 0xCB: + case 0xCC: + case 0xCD: + case 0xCE: + case 0xCF: + case 0xD0: + case 0xD1: + case 0xD2: + case 0xD3: + case 0xD4: + case 0xD5: + case 0xD6: + case 0xD7: + case 0xD8: + case 0xD9: + case 0xDA: + case 0xDB: + case 0xDC: + case 0xDD: + case 0xDE: + case 0xDF: { if (JSON_UNLIKELY(not next_byte_in_range({0x80, 0xBF}))) { @@ -2188,7 +2175,7 @@ class lexer } // U+0800..U+0FFF: bytes E0 A0..BF 80..BF - case 0xe0: + case 0xE0: { if (JSON_UNLIKELY(not (next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF})))) { @@ -2199,20 +2186,20 @@ class lexer // U+1000..U+CFFF: bytes E1..EC 80..BF 80..BF // U+E000..U+FFFF: bytes EE..EF 80..BF 80..BF - case 0xe1: - case 0xe2: - case 0xe3: - case 0xe4: - case 0xe5: - case 0xe6: - case 0xe7: - case 0xe8: - case 0xe9: - case 0xea: - case 0xeb: - case 0xec: - case 0xee: - case 0xef: + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xEE: + case 0xEF: { if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF})))) { @@ -2222,7 +2209,7 @@ class lexer } // U+D000..U+D7FF: bytes ED 80..9F 80..BF - case 0xed: + case 0xED: { if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x9F, 0x80, 0xBF})))) { @@ -2232,7 +2219,7 @@ class lexer } // U+10000..U+3FFFF F0 90..BF 80..BF 80..BF - case 0xf0: + case 0xF0: { if (JSON_UNLIKELY(not (next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) { @@ -2242,9 +2229,9 @@ class lexer } // U+40000..U+FFFFF F1..F3 80..BF 80..BF 80..BF - case 0xf1: - case 0xf2: - case 0xf3: + case 0xF1: + case 0xF2: + case 0xF3: { if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) { @@ -2254,7 +2241,7 @@ class lexer } // U+100000..U+10FFFF F4 80..8F 80..BF 80..BF - case 0xf4: + case 0xF4: { if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF})))) { @@ -2772,9 +2759,9 @@ scan_number_done: { // escape control characters std::string result; - for (auto c : token_string) + for (const auto c : token_string) { - if ('\x00' <= c and c <= '\x1f') + if ('\x00' <= c and c <= '\x1F') { // escape control characters std::stringstream ss; @@ -2877,10 +2864,10 @@ scan_number_done: std::size_t chars_read = 0; /// raw input token string (for error messages) - std::vector<char> token_string { }; + std::vector<char> token_string {}; /// buffer for variable-length tokens (numbers, strings) - std::string yytext { }; + std::string yytext {}; /// a description of occurred lexer errors const char* error_message = ""; @@ -3281,7 +3268,7 @@ class parser } /*! - @brief the acutal acceptor + @brief the actual acceptor @invariant 1. The last token is not yet processed. Therefore, the caller of this function must make sure a token has been read. @@ -3539,7 +3526,7 @@ class primitive_iterator_t return *this; } - primitive_iterator_t operator++(int) + primitive_iterator_t const operator++(int) { auto result = *this; m_it++; @@ -3552,7 +3539,7 @@ class primitive_iterator_t return *this; } - primitive_iterator_t operator--(int) + primitive_iterator_t const operator--(int) { auto result = *this; m_it--; @@ -3618,7 +3605,7 @@ This class implements a both iterators (iterator and const_iterator) for the iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593) */ template<typename BasicJsonType> -class iter_impl : public std::iterator<std::bidirectional_iterator_tag, BasicJsonType> +class iter_impl { /// allow basic_json to access private members friend iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>; @@ -3632,6 +3619,14 @@ class iter_impl : public std::iterator<std::bidirectional_iterator_tag, BasicJso "iter_impl only accepts (const) basic_json"); public: + + /// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17. + /// The C++ Standard has never required user-defined iterators to derive from std::iterator. + /// A user-defined iterator should provide publicly accessible typedefs named + /// iterator_category, value_type, difference_type, pointer, and reference. + /// Note that value_type is required to be non-const, even for constant iterators. + using iterator_category = std::bidirectional_iterator_tag; + /// the type of the values when the iterator is dereferenced using value_type = typename BasicJsonType::value_type; /// a type to represent differences between iterators @@ -3855,7 +3850,7 @@ class iter_impl : public std::iterator<std::bidirectional_iterator_tag, BasicJso @brief post-increment (it++) @pre The iterator is initialized; i.e. `m_object != nullptr`. */ - iter_impl operator++(int) + iter_impl const operator++(int) { auto result = *this; ++(*this); @@ -3898,7 +3893,7 @@ class iter_impl : public std::iterator<std::bidirectional_iterator_tag, BasicJso @brief post-decrement (it--) @pre The iterator is initialized; i.e. `m_object != nullptr`. */ - iter_impl operator--(int) + iter_impl const operator--(int) { auto result = *this; --(*this); @@ -4291,7 +4286,7 @@ class json_reverse_iterator : public std::reverse_iterator<Base> { public: using difference_type = std::ptrdiff_t; - /// shortcut to the reverse iterator adaptor + /// shortcut to the reverse iterator adapter using base_iterator = std::reverse_iterator<Base>; /// the reference type for the pointed-to element using reference = typename Base::reference; @@ -4304,7 +4299,7 @@ class json_reverse_iterator : public std::reverse_iterator<Base> json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {} /// post-increment (it++) - json_reverse_iterator operator++(int) + json_reverse_iterator const operator++(int) { return static_cast<json_reverse_iterator>(base_iterator::operator++(1)); } @@ -4316,7 +4311,7 @@ class json_reverse_iterator : public std::reverse_iterator<Base> } /// post-decrement (it--) - json_reverse_iterator operator--(int) + json_reverse_iterator const operator--(int) { return static_cast<json_reverse_iterator>(base_iterator::operator--(1)); } @@ -4576,12 +4571,12 @@ class binary_reader case 0x07: case 0x08: case 0x09: - case 0x0a: - case 0x0b: - case 0x0c: - case 0x0d: - case 0x0e: - case 0x0f: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: case 0x10: case 0x11: case 0x12: @@ -4598,10 +4593,10 @@ class binary_reader case 0x19: // Unsigned integer (two-byte uint16_t follows) return get_number<uint16_t>(); - case 0x1a: // Unsigned integer (four-byte uint32_t follows) + case 0x1A: // Unsigned integer (four-byte uint32_t follows) return get_number<uint32_t>(); - case 0x1b: // Unsigned integer (eight-byte uint64_t follows) + case 0x1B: // Unsigned integer (eight-byte uint64_t follows) return get_number<uint64_t>(); // Negative integer -1-0x00..-1-0x17 (-1..-24) @@ -4615,12 +4610,12 @@ class binary_reader case 0x27: case 0x28: case 0x29: - case 0x2a: - case 0x2b: - case 0x2c: - case 0x2d: - case 0x2e: - case 0x2f: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: case 0x30: case 0x31: case 0x32: @@ -4642,12 +4637,12 @@ class binary_reader return static_cast<number_integer_t>(-1) - get_number<uint16_t>(); } - case 0x3a: // Negative integer -1-n (four-byte uint32_t follows) + case 0x3A: // Negative integer -1-n (four-byte uint32_t follows) { return static_cast<number_integer_t>(-1) - get_number<uint32_t>(); } - case 0x3b: // Negative integer -1-n (eight-byte uint64_t follows) + case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows) { return static_cast<number_integer_t>(-1) - static_cast<number_integer_t>(get_number<uint64_t>()); @@ -4664,12 +4659,12 @@ class binary_reader case 0x67: case 0x68: case 0x69: - case 0x6a: - case 0x6b: - case 0x6c: - case 0x6d: - case 0x6e: - case 0x6f: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: case 0x70: case 0x71: case 0x72: @@ -4680,9 +4675,9 @@ class binary_reader case 0x77: case 0x78: // UTF-8 string (one-byte uint8_t for n follows) case 0x79: // UTF-8 string (two-byte uint16_t for n follow) - case 0x7a: // UTF-8 string (four-byte uint32_t for n follow) - case 0x7b: // UTF-8 string (eight-byte uint64_t for n follow) - case 0x7f: // UTF-8 string (indefinite length) + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + case 0x7F: // UTF-8 string (indefinite length) { return get_cbor_string(); } @@ -4698,12 +4693,12 @@ class binary_reader case 0x87: case 0x88: case 0x89: - case 0x8a: - case 0x8b: - case 0x8c: - case 0x8d: - case 0x8e: - case 0x8f: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: case 0x90: case 0x91: case 0x92: @@ -4713,7 +4708,7 @@ class binary_reader case 0x96: case 0x97: { - return get_cbor_array(current & 0x1f); + return get_cbor_array(current & 0x1F); } case 0x98: // array (one-byte uint8_t for n follows) @@ -4726,20 +4721,20 @@ class binary_reader return get_cbor_array(get_number<uint16_t>()); } - case 0x9a: // array (four-byte uint32_t for n follow) + case 0x9A: // array (four-byte uint32_t for n follow) { return get_cbor_array(get_number<uint32_t>()); } - case 0x9b: // array (eight-byte uint64_t for n follow) + case 0x9B: // array (eight-byte uint64_t for n follow) { return get_cbor_array(get_number<uint64_t>()); } - case 0x9f: // array (indefinite length) + case 0x9F: // array (indefinite length) { BasicJsonType result = value_t::array; - while (get() != 0xff) + while (get() != 0xFF) { result.push_back(parse_cbor_internal(false)); } @@ -4747,58 +4742,58 @@ class binary_reader } // map (0x00..0x17 pairs of data items follow) - case 0xa0: - case 0xa1: - case 0xa2: - case 0xa3: - case 0xa4: - case 0xa5: - case 0xa6: - case 0xa7: - case 0xa8: - case 0xa9: - case 0xaa: - case 0xab: - case 0xac: - case 0xad: - case 0xae: - case 0xaf: - case 0xb0: - case 0xb1: - case 0xb2: - case 0xb3: - case 0xb4: - case 0xb5: - case 0xb6: - case 0xb7: - { - return get_cbor_object(current & 0x1f); - } - - case 0xb8: // map (one-byte uint8_t for n follows) + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + { + return get_cbor_object(current & 0x1F); + } + + case 0xB8: // map (one-byte uint8_t for n follows) { return get_cbor_object(get_number<uint8_t>()); } - case 0xb9: // map (two-byte uint16_t for n follow) + case 0xB9: // map (two-byte uint16_t for n follow) { return get_cbor_object(get_number<uint16_t>()); } - case 0xba: // map (four-byte uint32_t for n follow) + case 0xBA: // map (four-byte uint32_t for n follow) { return get_cbor_object(get_number<uint32_t>()); } - case 0xbb: // map (eight-byte uint64_t for n follow) + case 0xBB: // map (eight-byte uint64_t for n follow) { return get_cbor_object(get_number<uint64_t>()); } - case 0xbf: // map (indefinite length) + case 0xBF: // map (indefinite length) { BasicJsonType result = value_t::object; - while (get() != 0xff) + while (get() != 0xFF) { auto key = get_cbor_string(); result[key] = parse_cbor_internal(); @@ -4806,22 +4801,22 @@ class binary_reader return result; } - case 0xf4: // false + case 0xF4: // false { return false; } - case 0xf5: // true + case 0xF5: // true { return true; } - case 0xf6: // null + case 0xF6: // null { return value_t::null; } - case 0xf9: // Half-Precision Float (two-byte IEEE 754) + case 0xF9: // Half-Precision Float (two-byte IEEE 754) { const int byte1 = get(); check_eof(); @@ -4837,8 +4832,8 @@ class binary_reader // half-precision floating-point numbers in the C language // is shown in Fig. 3. const int half = (byte1 << 8) + byte2; - const int exp = (half >> 10) & 0x1f; - const int mant = half & 0x3ff; + const int exp = (half >> 10) & 0x1F; + const int mant = half & 0x3FF; double val; if (exp == 0) { @@ -4856,12 +4851,12 @@ class binary_reader return (half & 0x8000) != 0 ? -val : val; } - case 0xfa: // Single-Precision Float (four-byte IEEE 754) + case 0xFA: // Single-Precision Float (four-byte IEEE 754) { return get_number<float>(); } - case 0xfb: // Double-Precision Float (eight-byte IEEE 754) + case 0xFB: // Double-Precision Float (eight-byte IEEE 754) { return get_number<double>(); } @@ -4869,7 +4864,7 @@ class binary_reader default: // anything else (0xFF is handled inside the other types) { std::stringstream ss; - ss << std::setw(2) << std::setfill('0') << std::hex << current; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; JSON_THROW(parse_error::create(112, chars_read, "error reading CBOR; last byte: 0x" + ss.str())); } } @@ -4894,12 +4889,12 @@ class binary_reader case 0x07: case 0x08: case 0x09: - case 0x0a: - case 0x0b: - case 0x0c: - case 0x0d: - case 0x0e: - case 0x0f: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: case 0x10: case 0x11: case 0x12: @@ -4910,12 +4905,12 @@ class binary_reader case 0x17: case 0x18: case 0x19: - case 0x1a: - case 0x1b: - case 0x1c: - case 0x1d: - case 0x1e: - case 0x1f: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: case 0x20: case 0x21: case 0x22: @@ -4926,12 +4921,12 @@ class binary_reader case 0x27: case 0x28: case 0x29: - case 0x2a: - case 0x2b: - case 0x2c: - case 0x2d: - case 0x2e: - case 0x2f: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: case 0x30: case 0x31: case 0x32: @@ -4942,12 +4937,12 @@ class binary_reader case 0x37: case 0x38: case 0x39: - case 0x3a: - case 0x3b: - case 0x3c: - case 0x3d: - case 0x3e: - case 0x3f: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: case 0x40: case 0x41: case 0x42: @@ -4958,12 +4953,12 @@ class binary_reader case 0x47: case 0x48: case 0x49: - case 0x4a: - case 0x4b: - case 0x4c: - case 0x4d: - case 0x4e: - case 0x4f: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: case 0x50: case 0x51: case 0x52: @@ -4974,12 +4969,12 @@ class binary_reader case 0x57: case 0x58: case 0x59: - case 0x5a: - case 0x5b: - case 0x5c: - case 0x5d: - case 0x5e: - case 0x5f: + case 0x5A: + case 0x5B: + case 0x5C: + case 0x5D: + case 0x5E: + case 0x5F: case 0x60: case 0x61: case 0x62: @@ -4990,12 +4985,12 @@ class binary_reader case 0x67: case 0x68: case 0x69: - case 0x6a: - case 0x6b: - case 0x6c: - case 0x6d: - case 0x6e: - case 0x6f: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: case 0x70: case 0x71: case 0x72: @@ -5006,12 +5001,12 @@ class binary_reader case 0x77: case 0x78: case 0x79: - case 0x7a: - case 0x7b: - case 0x7c: - case 0x7d: - case 0x7e: - case 0x7f: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: return static_cast<number_unsigned_t>(current); // fixmap @@ -5025,14 +5020,14 @@ class binary_reader case 0x87: case 0x88: case 0x89: - case 0x8a: - case 0x8b: - case 0x8c: - case 0x8d: - case 0x8e: - case 0x8f: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: { - return get_msgpack_object(current & 0x0f); + return get_msgpack_object(current & 0x0F); } // fixarray @@ -5046,154 +5041,154 @@ class binary_reader case 0x97: case 0x98: case 0x99: - case 0x9a: - case 0x9b: - case 0x9c: - case 0x9d: - case 0x9e: - case 0x9f: + case 0x9A: + case 0x9B: + case 0x9C: + case 0x9D: + case 0x9E: + case 0x9F: { - return get_msgpack_array(current & 0x0f); + return get_msgpack_array(current & 0x0F); } // fixstr - case 0xa0: - case 0xa1: - case 0xa2: - case 0xa3: - case 0xa4: - case 0xa5: - case 0xa6: - case 0xa7: - case 0xa8: - case 0xa9: - case 0xaa: - case 0xab: - case 0xac: - case 0xad: - case 0xae: - case 0xaf: - case 0xb0: - case 0xb1: - case 0xb2: - case 0xb3: - case 0xb4: - case 0xb5: - case 0xb6: - case 0xb7: - case 0xb8: - case 0xb9: - case 0xba: - case 0xbb: - case 0xbc: - case 0xbd: - case 0xbe: - case 0xbf: + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: return get_msgpack_string(); - case 0xc0: // nil + case 0xC0: // nil return value_t::null; - case 0xc2: // false + case 0xC2: // false return false; - case 0xc3: // true + case 0xC3: // true return true; - case 0xca: // float 32 + case 0xCA: // float 32 return get_number<float>(); - case 0xcb: // float 64 + case 0xCB: // float 64 return get_number<double>(); - case 0xcc: // uint 8 + case 0xCC: // uint 8 return get_number<uint8_t>(); - case 0xcd: // uint 16 + case 0xCD: // uint 16 return get_number<uint16_t>(); - case 0xce: // uint 32 + case 0xCE: // uint 32 return get_number<uint32_t>(); - case 0xcf: // uint 64 + case 0xCF: // uint 64 return get_number<uint64_t>(); - case 0xd0: // int 8 + case 0xD0: // int 8 return get_number<int8_t>(); - case 0xd1: // int 16 + case 0xD1: // int 16 return get_number<int16_t>(); - case 0xd2: // int 32 + case 0xD2: // int 32 return get_number<int32_t>(); - case 0xd3: // int 64 + case 0xD3: // int 64 return get_number<int64_t>(); - case 0xd9: // str 8 - case 0xda: // str 16 - case 0xdb: // str 32 + case 0xD9: // str 8 + case 0xDA: // str 16 + case 0xDB: // str 32 return get_msgpack_string(); - case 0xdc: // array 16 + case 0xDC: // array 16 { return get_msgpack_array(get_number<uint16_t>()); } - case 0xdd: // array 32 + case 0xDD: // array 32 { return get_msgpack_array(get_number<uint32_t>()); } - case 0xde: // map 16 + case 0xDE: // map 16 { return get_msgpack_object(get_number<uint16_t>()); } - case 0xdf: // map 32 + case 0xDF: // map 32 { return get_msgpack_object(get_number<uint32_t>()); } // positive fixint - case 0xe0: - case 0xe1: - case 0xe2: - case 0xe3: - case 0xe4: - case 0xe5: - case 0xe6: - case 0xe7: - case 0xe8: - case 0xe9: - case 0xea: - case 0xeb: - case 0xec: - case 0xed: - case 0xee: - case 0xef: - case 0xf0: - case 0xf1: - case 0xf2: - case 0xf3: - case 0xf4: - case 0xf5: - case 0xf6: - case 0xf7: - case 0xf8: - case 0xf9: - case 0xfa: - case 0xfb: - case 0xfc: - case 0xfd: - case 0xfe: - case 0xff: + case 0xE0: + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xED: + case 0xEE: + case 0xEF: + case 0xF0: + case 0xF1: + case 0xF2: + case 0xF3: + case 0xF4: + case 0xF5: + case 0xF6: + case 0xF7: + case 0xF8: + case 0xF9: + case 0xFA: + case 0xFB: + case 0xFC: + case 0xFD: + case 0xFE: + case 0xFF: return static_cast<int8_t>(current); default: // anything else { std::stringstream ss; - ss << std::setw(2) << std::setfill('0') << std::hex << current; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; JSON_THROW(parse_error::create(112, chars_read, "error reading MessagePack; last byte: 0x" + ss.str())); } @@ -5309,12 +5304,12 @@ class binary_reader case 0x67: case 0x68: case 0x69: - case 0x6a: - case 0x6b: - case 0x6c: - case 0x6d: - case 0x6e: - case 0x6f: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: case 0x70: case 0x71: case 0x72: @@ -5324,7 +5319,7 @@ class binary_reader case 0x76: case 0x77: { - return get_string(current & 0x1f); + return get_string(current & 0x1F); } case 0x78: // UTF-8 string (one-byte uint8_t for n follows) @@ -5337,20 +5332,20 @@ class binary_reader return get_string(get_number<uint16_t>()); } - case 0x7a: // UTF-8 string (four-byte uint32_t for n follow) + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) { return get_string(get_number<uint32_t>()); } - case 0x7b: // UTF-8 string (eight-byte uint64_t for n follow) + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) { return get_string(get_number<uint64_t>()); } - case 0x7f: // UTF-8 string (indefinite length) + case 0x7F: // UTF-8 string (indefinite length) { std::string result; - while (get() != 0xff) + while (get() != 0xFF) { check_eof(); result.push_back(static_cast<char>(current)); @@ -5361,7 +5356,7 @@ class binary_reader default: { std::stringstream ss; - ss << std::setw(2) << std::setfill('0') << std::hex << current; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; JSON_THROW(parse_error::create(113, chars_read, "expected a CBOR string; last byte: 0x" + ss.str())); } } @@ -5412,53 +5407,53 @@ class binary_reader switch (current) { // fixstr - case 0xa0: - case 0xa1: - case 0xa2: - case 0xa3: - case 0xa4: - case 0xa5: - case 0xa6: - case 0xa7: - case 0xa8: - case 0xa9: - case 0xaa: - case 0xab: - case 0xac: - case 0xad: - case 0xae: - case 0xaf: - case 0xb0: - case 0xb1: - case 0xb2: - case 0xb3: - case 0xb4: - case 0xb5: - case 0xb6: - case 0xb7: - case 0xb8: - case 0xb9: - case 0xba: - case 0xbb: - case 0xbc: - case 0xbd: - case 0xbe: - case 0xbf: - { - return get_string(current & 0x1f); - } - - case 0xd9: // str 8 + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + { + return get_string(current & 0x1F); + } + + case 0xD9: // str 8 { return get_string(get_number<uint8_t>()); } - case 0xda: // str 16 + case 0xDA: // str 16 { return get_string(get_number<uint16_t>()); } - case 0xdb: // str 32 + case 0xDB: // str 32 { return get_string(get_number<uint32_t>()); } @@ -5466,7 +5461,7 @@ class binary_reader default: { std::stringstream ss; - ss << std::setw(2) << std::setfill('0') << std::hex << current; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; JSON_THROW(parse_error::create(113, chars_read, "expected a MessagePack string; last byte: 0x" + ss.str())); } @@ -5562,15 +5557,15 @@ class binary_writer { case value_t::null: { - oa->write_character(static_cast<CharType>(0xf6)); + oa->write_character(static_cast<CharType>(0xF6)); break; } case value_t::boolean: { oa->write_character(j.m_value.boolean - ? static_cast<CharType>(0xf5) - : static_cast<CharType>(0xf4)); + ? static_cast<CharType>(0xF5) + : static_cast<CharType>(0xF4)); break; } @@ -5597,12 +5592,12 @@ class binary_writer } else if (j.m_value.number_integer <= (std::numeric_limits<uint32_t>::max)()) { - oa->write_character(static_cast<CharType>(0x1a)); + oa->write_character(static_cast<CharType>(0x1A)); write_number(static_cast<uint32_t>(j.m_value.number_integer)); } else { - oa->write_character(static_cast<CharType>(0x1b)); + oa->write_character(static_cast<CharType>(0x1B)); write_number(static_cast<uint64_t>(j.m_value.number_integer)); } } @@ -5627,12 +5622,12 @@ class binary_writer } else if (positive_number <= (std::numeric_limits<uint32_t>::max)()) { - oa->write_character(static_cast<CharType>(0x3a)); + oa->write_character(static_cast<CharType>(0x3A)); write_number(static_cast<uint32_t>(positive_number)); } else { - oa->write_character(static_cast<CharType>(0x3b)); + oa->write_character(static_cast<CharType>(0x3B)); write_number(static_cast<uint64_t>(positive_number)); } } @@ -5657,12 +5652,12 @@ class binary_writer } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)()) { - oa->write_character(static_cast<CharType>(0x1a)); + oa->write_character(static_cast<CharType>(0x1A)); write_number(static_cast<uint32_t>(j.m_value.number_unsigned)); } else { - oa->write_character(static_cast<CharType>(0x1b)); + oa->write_character(static_cast<CharType>(0x1B)); write_number(static_cast<uint64_t>(j.m_value.number_unsigned)); } break; @@ -5670,7 +5665,7 @@ class binary_writer case value_t::number_float: // Double-Precision Float { - oa->write_character(static_cast<CharType>(0xfb)); + oa->write_character(static_cast<CharType>(0xFB)); write_number(j.m_value.number_float); break; } @@ -5683,25 +5678,25 @@ class binary_writer { write_number(static_cast<uint8_t>(0x60 + N)); } - else if (N <= 0xff) + else if (N <= 0xFF) { oa->write_character(static_cast<CharType>(0x78)); write_number(static_cast<uint8_t>(N)); } - else if (N <= 0xffff) + else if (N <= 0xFFFF) { oa->write_character(static_cast<CharType>(0x79)); write_number(static_cast<uint16_t>(N)); } - else if (N <= 0xffffffff) + else if (N <= 0xFFFFFFFF) { - oa->write_character(static_cast<CharType>(0x7a)); + oa->write_character(static_cast<CharType>(0x7A)); write_number(static_cast<uint32_t>(N)); } // LCOV_EXCL_START - else if (N <= 0xffffffffffffffff) + else if (N <= 0xFFFFFFFFFFFFFFFF) { - oa->write_character(static_cast<CharType>(0x7b)); + oa->write_character(static_cast<CharType>(0x7B)); write_number(static_cast<uint64_t>(N)); } // LCOV_EXCL_STOP @@ -5721,25 +5716,25 @@ class binary_writer { write_number(static_cast<uint8_t>(0x80 + N)); } - else if (N <= 0xff) + else if (N <= 0xFF) { oa->write_character(static_cast<CharType>(0x98)); write_number(static_cast<uint8_t>(N)); } - else if (N <= 0xffff) + else if (N <= 0xFFFF) { oa->write_character(static_cast<CharType>(0x99)); write_number(static_cast<uint16_t>(N)); } - else if (N <= 0xffffffff) + else if (N <= 0xFFFFFFFF) { - oa->write_character(static_cast<CharType>(0x9a)); + oa->write_character(static_cast<CharType>(0x9A)); write_number(static_cast<uint32_t>(N)); } // LCOV_EXCL_START - else if (N <= 0xffffffffffffffff) + else if (N <= 0xFFFFFFFFFFFFFFFF) { - oa->write_character(static_cast<CharType>(0x9b)); + oa->write_character(static_cast<CharType>(0x9B)); write_number(static_cast<uint64_t>(N)); } // LCOV_EXCL_STOP @@ -5758,27 +5753,27 @@ class binary_writer const auto N = j.m_value.object->size(); if (N <= 0x17) { - write_number(static_cast<uint8_t>(0xa0 + N)); + write_number(static_cast<uint8_t>(0xA0 + N)); } - else if (N <= 0xff) + else if (N <= 0xFF) { - oa->write_character(static_cast<CharType>(0xb8)); + oa->write_character(static_cast<CharType>(0xB8)); write_number(static_cast<uint8_t>(N)); } - else if (N <= 0xffff) + else if (N <= 0xFFFF) { - oa->write_character(static_cast<CharType>(0xb9)); + oa->write_character(static_cast<CharType>(0xB9)); write_number(static_cast<uint16_t>(N)); } - else if (N <= 0xffffffff) + else if (N <= 0xFFFFFFFF) { - oa->write_character(static_cast<CharType>(0xba)); + oa->write_character(static_cast<CharType>(0xBA)); write_number(static_cast<uint32_t>(N)); } // LCOV_EXCL_START - else if (N <= 0xffffffffffffffff) + else if (N <= 0xFFFFFFFFFFFFFFFF) { - oa->write_character(static_cast<CharType>(0xbb)); + oa->write_character(static_cast<CharType>(0xBB)); write_number(static_cast<uint64_t>(N)); } // LCOV_EXCL_STOP @@ -5806,15 +5801,15 @@ class binary_writer { case value_t::null: // nil { - oa->write_character(static_cast<CharType>(0xc0)); + oa->write_character(static_cast<CharType>(0xC0)); break; } case value_t::boolean: // true and false { oa->write_character(j.m_value.boolean - ? static_cast<CharType>(0xc3) - : static_cast<CharType>(0xc2)); + ? static_cast<CharType>(0xC3) + : static_cast<CharType>(0xC2)); break; } @@ -5833,25 +5828,25 @@ class binary_writer else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)()) { // uint 8 - oa->write_character(static_cast<CharType>(0xcc)); + oa->write_character(static_cast<CharType>(0xCC)); write_number(static_cast<uint8_t>(j.m_value.number_integer)); } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)()) { // uint 16 - oa->write_character(static_cast<CharType>(0xcd)); + oa->write_character(static_cast<CharType>(0xCD)); write_number(static_cast<uint16_t>(j.m_value.number_integer)); } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)()) { // uint 32 - oa->write_character(static_cast<CharType>(0xce)); + oa->write_character(static_cast<CharType>(0xCE)); write_number(static_cast<uint32_t>(j.m_value.number_integer)); } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)()) { // uint 64 - oa->write_character(static_cast<CharType>(0xcf)); + oa->write_character(static_cast<CharType>(0xCF)); write_number(static_cast<uint64_t>(j.m_value.number_integer)); } } @@ -5866,28 +5861,28 @@ class binary_writer j.m_value.number_integer <= (std::numeric_limits<int8_t>::max)()) { // int 8 - oa->write_character(static_cast<CharType>(0xd0)); + oa->write_character(static_cast<CharType>(0xD0)); write_number(static_cast<int8_t>(j.m_value.number_integer)); } else if (j.m_value.number_integer >= (std::numeric_limits<int16_t>::min)() and j.m_value.number_integer <= (std::numeric_limits<int16_t>::max)()) { // int 16 - oa->write_character(static_cast<CharType>(0xd1)); + oa->write_character(static_cast<CharType>(0xD1)); write_number(static_cast<int16_t>(j.m_value.number_integer)); } else if (j.m_value.number_integer >= (std::numeric_limits<int32_t>::min)() and j.m_value.number_integer <= (std::numeric_limits<int32_t>::max)()) { // int 32 - oa->write_character(static_cast<CharType>(0xd2)); + oa->write_character(static_cast<CharType>(0xD2)); write_number(static_cast<int32_t>(j.m_value.number_integer)); } else if (j.m_value.number_integer >= (std::numeric_limits<int64_t>::min)() and j.m_value.number_integer <= (std::numeric_limits<int64_t>::max)()) { // int 64 - oa->write_character(static_cast<CharType>(0xd3)); + oa->write_character(static_cast<CharType>(0xD3)); write_number(static_cast<int64_t>(j.m_value.number_integer)); } } @@ -5904,25 +5899,25 @@ class binary_writer else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)()) { // uint 8 - oa->write_character(static_cast<CharType>(0xcc)); + oa->write_character(static_cast<CharType>(0xCC)); write_number(static_cast<uint8_t>(j.m_value.number_integer)); } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)()) { // uint 16 - oa->write_character(static_cast<CharType>(0xcd)); + oa->write_character(static_cast<CharType>(0xCD)); write_number(static_cast<uint16_t>(j.m_value.number_integer)); } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)()) { // uint 32 - oa->write_character(static_cast<CharType>(0xce)); + oa->write_character(static_cast<CharType>(0xCE)); write_number(static_cast<uint32_t>(j.m_value.number_integer)); } else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)()) { // uint 64 - oa->write_character(static_cast<CharType>(0xcf)); + oa->write_character(static_cast<CharType>(0xCF)); write_number(static_cast<uint64_t>(j.m_value.number_integer)); } break; @@ -5930,7 +5925,7 @@ class binary_writer case value_t::number_float: // float 64 { - oa->write_character(static_cast<CharType>(0xcb)); + oa->write_character(static_cast<CharType>(0xCB)); write_number(j.m_value.number_float); break; } @@ -5942,24 +5937,24 @@ class binary_writer if (N <= 31) { // fixstr - write_number(static_cast<uint8_t>(0xa0 | N)); + write_number(static_cast<uint8_t>(0xA0 | N)); } else if (N <= 255) { // str 8 - oa->write_character(static_cast<CharType>(0xd9)); + oa->write_character(static_cast<CharType>(0xD9)); write_number(static_cast<uint8_t>(N)); } else if (N <= 65535) { // str 16 - oa->write_character(static_cast<CharType>(0xda)); + oa->write_character(static_cast<CharType>(0xDA)); write_number(static_cast<uint16_t>(N)); } else if (N <= 4294967295) { // str 32 - oa->write_character(static_cast<CharType>(0xdb)); + oa->write_character(static_cast<CharType>(0xDB)); write_number(static_cast<uint32_t>(N)); } @@ -5979,16 +5974,16 @@ class binary_writer // fixarray write_number(static_cast<uint8_t>(0x90 | N)); } - else if (N <= 0xffff) + else if (N <= 0xFFFF) { // array 16 - oa->write_character(static_cast<CharType>(0xdc)); + oa->write_character(static_cast<CharType>(0xDC)); write_number(static_cast<uint16_t>(N)); } - else if (N <= 0xffffffff) + else if (N <= 0xFFFFFFFF) { // array 32 - oa->write_character(static_cast<CharType>(0xdd)); + oa->write_character(static_cast<CharType>(0xDD)); write_number(static_cast<uint32_t>(N)); } @@ -6007,18 +6002,18 @@ class binary_writer if (N <= 15) { // fixmap - write_number(static_cast<uint8_t>(0x80 | (N & 0xf))); + write_number(static_cast<uint8_t>(0x80 | (N & 0xF))); } else if (N <= 65535) { // map 16 - oa->write_character(static_cast<CharType>(0xde)); + oa->write_character(static_cast<CharType>(0xDE)); write_number(static_cast<uint16_t>(N)); } else if (N <= 4294967295) { // map 32 - oa->write_character(static_cast<CharType>(0xdf)); + oa->write_character(static_cast<CharType>(0xDF)); write_number(static_cast<uint32_t>(N)); } @@ -6363,9 +6358,9 @@ class serializer case 0x05: case 0x06: case 0x07: - case 0x0b: - case 0x0e: - case 0x0f: + case 0x0B: + case 0x0E: + case 0x0F: case 0x10: case 0x11: case 0x12: @@ -6376,12 +6371,12 @@ class serializer case 0x17: case 0x18: case 0x19: - case 0x1a: - case 0x1b: - case 0x1c: - case 0x1d: - case 0x1e: - case 0x1f: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: { // from c (1 byte) to \uxxxx (6 bytes) res += 5; @@ -6393,12 +6388,8 @@ class serializer if (ensure_ascii and (s[i] & 0x80 or s[i] == 0x7F)) { const auto bytes = bytes_following(static_cast<uint8_t>(s[i])); - if (bytes == std::string::npos) - { - // invalid characters are treated as is, so no - // additional space will be used - break; - } + // invalid characters will be detected by throw_if_invalid_utf8 + assert (bytes != std::string::npos); if (bytes == 3) { @@ -6492,6 +6483,8 @@ class serializer */ void dump_escaped(const string_t& s, const bool ensure_ascii) const { + throw_if_invalid_utf8(s); + const auto space = extra_space(s, ensure_ascii); if (space == 0) { @@ -6514,7 +6507,7 @@ class serializer break; } - case '\\': // reverse solidus (0x5c) + case '\\': // reverse solidus (0x5C) { // nothing to change pos += 2; @@ -6528,21 +6521,21 @@ class serializer break; } - case '\f': // formfeed (0x0c) + case '\f': // formfeed (0x0C) { result[pos + 1] = 'f'; pos += 2; break; } - case '\n': // newline (0x0a) + case '\n': // newline (0x0A) { result[pos + 1] = 'n'; pos += 2; break; } - case '\r': // carriage return (0x0d) + case '\r': // carriage return (0x0D) { result[pos + 1] = 'r'; pos += 2; @@ -6564,21 +6557,18 @@ class serializer (ensure_ascii and (s[i] & 0x80 or s[i] == 0x7F))) { const auto bytes = bytes_following(static_cast<uint8_t>(s[i])); - if (bytes == std::string::npos) - { - // copy invalid character as is - result[pos++] = s[i]; - break; - } + // invalid characters will be detected by throw_if_invalid_utf8 + assert (bytes != std::string::npos); // check that the additional bytes are present assert(i + bytes < s.size()); - // to use \uxxxx escaping, we first need to caluclate + // to use \uxxxx escaping, we first need to calculate // the codepoint from the UTF-8 bytes int codepoint = 0; - assert(0 <= bytes and bytes <= 3); + // bytes is unsigned type: + assert(bytes <= 3); switch (bytes) { case 0: @@ -6641,11 +6631,10 @@ class serializer @param[in] x integer number (signed or unsigned) to dump @tparam NumberType either @a number_integer_t or @a number_unsigned_t */ - template < - typename NumberType, - detail::enable_if_t<std::is_same<NumberType, number_unsigned_t>::value or - std::is_same<NumberType, number_integer_t>::value, - int> = 0 > + template<typename NumberType, detail::enable_if_t< + std::is_same<NumberType, number_unsigned_t>::value or + std::is_same<NumberType, number_integer_t>::value, + int> = 0> void dump_integer(NumberType x) { // special case for "0" @@ -6743,6 +6732,87 @@ class serializer } } + /*! + @brief check whether a string is UTF-8 encoded + + The function checks each byte of a string whether it is UTF-8 encoded. The + result of the check is stored in the @a state parameter. The function must + be called initially with state 0 (accept). State 1 means the string must + be rejected, because the current byte is not allowed. If the string is + completely processed, but the state is non-zero, the string ended + prematurely; that is, the last byte indicated more bytes should have + followed. + + @param[in,out] state the state of the decoding + @param[in] byte next byte to decode + + @note The function has been edited: a std::array is used and the code + point is not calculated. + + @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de> + @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + */ + static void decode(uint8_t& state, const uint8_t byte) + { + static const std::array<uint8_t, 400> utf8d = + { + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF + 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF + 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8 + } + }; + + const uint8_t type = utf8d[byte]; + state = utf8d[256u + state * 16u + type]; + } + + /*! + @brief throw an exception if a string is not UTF-8 encoded + + @param[in] str UTF-8 string to check + @throw type_error.316 if passed string is not UTF-8 encoded + + @since version 3.0.0 + */ + static void throw_if_invalid_utf8(const std::string& str) + { + // start with state 0 (= accept) + uint8_t state = 0; + + for (size_t i = 0; i < str.size(); ++i) + { + const auto byte = static_cast<uint8_t>(str[i]); + decode(state, byte); + if (state == 1) + { + // state 1 means reject + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << static_cast<int>(byte); + JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + ss.str())); + } + } + + if (state != 0) + { + // we finish reading, but do not accept: string was incomplete + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << static_cast<int>(static_cast<uint8_t>(str.back())); + JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + ss.str())); + } + } + private: /// the output of the serializer output_adapter_t<char> o = nullptr; @@ -6771,27 +6841,20 @@ class json_ref using value_type = BasicJsonType; json_ref(value_type&& value) - : owned_value(std::move(value)), - value_ref(&owned_value), - is_rvalue(true) + : owned_value(std::move(value)), value_ref(&owned_value), is_rvalue(true) {} json_ref(const value_type& value) - : value_ref(const_cast<value_type*>(&value)), - is_rvalue(false) + : value_ref(const_cast<value_type*>(&value)), is_rvalue(false) {} json_ref(std::initializer_list<json_ref> init) - : owned_value(init), - value_ref(&owned_value), - is_rvalue(true) + : owned_value(init), value_ref(&owned_value), is_rvalue(true) {} - template <class... Args> + template<class... Args> json_ref(Args&& ... args) - : owned_value(std::forward<Args>(args)...), - value_ref(&owned_value), - is_rvalue(true) + : owned_value(std::forward<Args>(args)...), value_ref(&owned_value), is_rvalue(true) {} // class should be movable only @@ -6949,6 +7012,27 @@ class json_pointer return to_string(); } + /*! + @param[in] s reference token to be converted into an array index + + @return integer representation of @a s + + @throw out_of_range.404 if string @a s could not be converted to an integer + */ + static int array_index(const std::string& s) + { + size_t processed_chars = 0; + const int res = std::stoi(s, &processed_chars); + + // check if the string was completely read + if (JSON_UNLIKELY(processed_chars != s.size())) + { + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'")); + } + + return res; + } + private: /*! @brief remove and return last reference pointer @@ -6984,7 +7068,6 @@ class json_pointer return result; } - /*! @brief create and return a reference to the pointed to value @@ -7320,11 +7403,11 @@ class basic_json public: using value_t = detail::value_t; - // forward declarations + /// @copydoc nlohmann::json_pointer using json_pointer = ::nlohmann::json_pointer; template<typename T, typename SFINAE> using json_serializer = JSONSerializer<T, SFINAE>; - + /// helper type for initializer lists of basic_json values using initializer_list_t = std::initializer_list<detail::json_ref<basic_json>>; //////////////// @@ -7436,7 +7519,7 @@ class basic_json result["url"] = "https://github.com/nlohmann/json"; result["version"] = { - {"string", "2.1.1"}, {"major", 2}, {"minor", 1}, {"patch", 1} + {"string", "3.0.1"}, {"major", 3}, {"minor", 0}, {"patch", 1} }; #ifdef _WIN32 @@ -7489,6 +7572,14 @@ class basic_json /// the template arguments passed to class @ref basic_json. /// @{ +#if defined(JSON_HAS_CPP_14) + // Use transparent comparator if possible, combined with perfect forwarding + // on find() and count() calls prevents unnecessary string construction. + using object_comparator_t = std::less<>; +#else + using object_comparator_t = std::less<StringType>; +#endif + /*! @brief a type for an object @@ -7572,14 +7663,6 @@ class basic_json 7159](http://rfc7159.net/rfc7159), because any order implements the specified "unordered" nature of JSON objects. */ - -#if defined(JSON_HAS_CPP_14) - // Use transparent comparator if possible, combined with perfect forwarding - // on find() and count() calls prevents unnecessary string construction. - using object_comparator_t = std::less<>; -#else - using object_comparator_t = std::less<StringType>; -#endif using object_t = ObjectType<StringType, basic_json, object_comparator_t, @@ -7931,12 +8014,14 @@ class basic_json static T* create(Args&& ... args) { AllocatorType<T> alloc; + using AllocatorTraits = std::allocator_traits<AllocatorType<T>>; + auto deleter = [&](T * object) { - alloc.deallocate(object, 1); + AllocatorTraits::deallocate(alloc, object, 1); }; - std::unique_ptr<T, decltype(deleter)> object(alloc.allocate(1), deleter); - alloc.construct(object.get(), std::forward<Args>(args)...); + std::unique_ptr<T, decltype(deleter)> object(AllocatorTraits::allocate(alloc, 1), deleter); + AllocatorTraits::construct(alloc, object.get(), std::forward<Args>(args)...); assert(object != nullptr); return object.release(); } @@ -8054,7 +8139,7 @@ class basic_json object = nullptr; // silence warning, see #821 if (JSON_UNLIKELY(t == value_t::null)) { - JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 2.1.1")); // LCOV_EXCL_LINE + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.0.1")); // LCOV_EXCL_LINE } break; } @@ -8104,24 +8189,24 @@ class basic_json case value_t::object: { AllocatorType<object_t> alloc; - alloc.destroy(object); - alloc.deallocate(object, 1); + std::allocator_traits<decltype(alloc)>::destroy(alloc, object); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, object, 1); break; } case value_t::array: { AllocatorType<array_t> alloc; - alloc.destroy(array); - alloc.deallocate(array, 1); + std::allocator_traits<decltype(alloc)>::destroy(alloc, array); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, array, 1); break; } case value_t::string: { AllocatorType<string_t> alloc; - alloc.destroy(string); - alloc.deallocate(string, 1); + std::allocator_traits<decltype(alloc)>::destroy(alloc, string); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, string, 1); break; } @@ -8154,6 +8239,21 @@ class basic_json // JSON parser callback // ////////////////////////// + /*! + @brief parser event types + + The parser callback distinguishes the following events: + - `object_start`: the parser read `{` and started to process a JSON object + - `key`: the parser read a key of a value in an object + - `object_end`: the parser read `}` and finished processing a JSON object + - `array_start`: the parser read `[` and started to process a JSON array + - `array_end`: the parser read `]` and finished processing a JSON array + - `value`: the parser finished reading a JSON value + + @image html callback_events.png "Example when certain parse events are triggered" + + @sa @ref parser_callback_t for more information and examples + */ using parse_event_t = typename parser::parse_event_t; /*! @@ -8280,7 +8380,7 @@ class basic_json @brief create a JSON value This is a "catch all" constructor for all compatible JSON types; that is, - types for which a `to_json()` method exsits. The constructor forwards the + types for which a `to_json()` method exists. The constructor forwards the parameter @a val to that method (to `json_serializer<U>::to_json` method with `U = uncvref_t<CompatibleType>`, to be exact). @@ -8952,11 +9052,14 @@ class basic_json @param[in] indent_char The character to use for indentation if @a indent is greater than `0`. The default is ` ` (space). @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters - in the output are escaped with \uXXXX sequences, and the result consists + in the output are escaped with `\uXXXX` sequences, and the result consists of ASCII characters only. @return string containing the serialization of the JSON value + @throw type_error.316 if a string stored inside the JSON value is not + UTF-8 encoded + @complexity Linear. @exceptionsafety Strong guarantee: if an exception is thrown, there are no @@ -8968,8 +9071,8 @@ class basic_json @see https://docs.python.org/2/library/json.html#json.dump - @since version 1.0.0; indentation character @a indent_char and option - @a ensure_ascii added in version 3.0.0 + @since version 1.0.0; indentation character @a indent_char, option + @a ensure_ascii and exceptions added in version 3.0.0 */ string_t dump(const int indent = -1, const char indent_char = ' ', const bool ensure_ascii = false) const @@ -9003,7 +9106,7 @@ class basic_json string | value_t::string number (integer) | value_t::number_integer number (unsigned integer) | value_t::number_unsigned - number (foating-point) | value_t::number_float + number (floating-point) | value_t::number_float object | value_t::object array | value_t::array discarded | value_t::discarded @@ -9507,11 +9610,9 @@ class basic_json @since version 2.1.0 */ - template < - typename BasicJsonType, - detail::enable_if_t<std::is_same<typename std::remove_const<BasicJsonType>::type, - basic_json_t>::value, - int> = 0 > + template<typename BasicJsonType, detail::enable_if_t< + std::is_same<typename std::remove_const<BasicJsonType>::type, basic_json_t>::value, + int> = 0> basic_json get() const { return *this; @@ -9556,14 +9657,12 @@ class basic_json @since version 2.1.0 */ - template < - typename ValueTypeCV, - typename ValueType = detail::uncvref_t<ValueTypeCV>, - detail::enable_if_t < - not std::is_same<basic_json_t, ValueType>::value and - detail::has_from_json<basic_json_t, ValueType>::value and - not detail::has_non_default_from_json<basic_json_t, ValueType>::value, - int > = 0 > + template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>, + detail::enable_if_t < + not std::is_same<basic_json_t, ValueType>::value and + detail::has_from_json<basic_json_t, ValueType>::value and + not detail::has_non_default_from_json<basic_json_t, ValueType>::value, + int> = 0> ValueType get() const noexcept(noexcept( JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), std::declval<ValueType&>()))) { @@ -9611,12 +9710,10 @@ class basic_json @since version 2.1.0 */ - template < - typename ValueTypeCV, - typename ValueType = detail::uncvref_t<ValueTypeCV>, - detail::enable_if_t<not std::is_same<basic_json_t, ValueType>::value and - detail::has_non_default_from_json<basic_json_t, - ValueType>::value, int> = 0 > + template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>, + detail::enable_if_t<not std::is_same<basic_json_t, ValueType>::value and + detail::has_non_default_from_json<basic_json_t, ValueType>::value, + int> = 0> ValueType get() const noexcept(noexcept( JSONSerializer<ValueTypeCV>::from_json(std::declval<const basic_json_t&>()))) { @@ -10110,7 +10207,7 @@ class basic_json @return const reference to the element at index @a idx - @throw type_error.305 if the JSON value is not an array; in that cases, + @throw type_error.305 if the JSON value is not an array; in that case, using the [] operator with an index makes no sense. @complexity Constant. @@ -10193,7 +10290,7 @@ class basic_json @pre The element with key @a key must exist. **This precondition is enforced with an assertion.** - @throw type_error.305 if the JSON value is not an object; in that cases, + @throw type_error.305 if the JSON value is not an object; in that case, using the [] operator with a key makes no sense. @complexity Logarithmic in the size of the container. @@ -10282,7 +10379,7 @@ class basic_json @pre The element with key @a key must exist. **This precondition is enforced with an assertion.** - @throw type_error.305 if the JSON value is not an object; in that cases, + @throw type_error.305 if the JSON value is not an object; in that case, using the [] operator with a key makes no sense. @complexity Logarithmic in the size of the container. @@ -10342,7 +10439,7 @@ class basic_json @return copy of the element at key @a key or @a default_value if @a key is not found - @throw type_error.306 if the JSON value is not an objec; in that cases, + @throw type_error.306 if the JSON value is not an object; in that case, using `value()` with a key makes no sense. @complexity Logarithmic in the size of the container. @@ -10415,7 +10512,7 @@ class basic_json @return copy of the element at key @a key or @a default_value if @a key is not found - @throw type_error.306 if the JSON value is not an objec; in that cases, + @throw type_error.306 if the JSON value is not an objec; in that case, using `value()` with a key makes no sense. @complexity Logarithmic in the size of the container. @@ -10619,8 +10716,8 @@ class basic_json if (is_string()) { AllocatorType<string_t> alloc; - alloc.destroy(m_value.string); - alloc.deallocate(m_value.string, 1); + std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1); m_value.string = nullptr; } @@ -10725,8 +10822,8 @@ class basic_json if (is_string()) { AllocatorType<string_t> alloc; - alloc.destroy(m_value.string); - alloc.deallocate(m_value.string, 1); + std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1); m_value.string = nullptr; } @@ -11220,22 +11317,62 @@ class basic_json reference to the JSON values is returned, so there is no access to the underlying iterator. + For loop without iterator_wrapper: + + @code{cpp} + for (auto it = j_object.begin(); it != j_object.end(); ++it) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + Range-based for loop without iterator proxy: + + @code{cpp} + for (auto it : j_object) + { + // "it" is of type json::reference and has no key() member + std::cout << "value: " << it << '\n'; + } + @endcode + + Range-based for loop with iterator proxy: + + @code{cpp} + for (auto it : json::iterator_wrapper(j_object)) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + @note When iterating over an array, `key()` will return the index of the + element as string (see example). + + @param[in] ref reference to a JSON value + @return iteration proxy object wrapping @a ref with an interface to use in + range-based for loops + @liveexample{The following code shows how the wrapper is used,iterator_wrapper} + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + @note The name of this function is not yet final and may change in the future. */ - static iteration_proxy<iterator> iterator_wrapper(reference cont) + static iteration_proxy<iterator> iterator_wrapper(reference ref) { - return iteration_proxy<iterator>(cont); + return iteration_proxy<iterator>(ref); } /*! @copydoc iterator_wrapper(reference) */ - static iteration_proxy<const_iterator> iterator_wrapper(const_reference cont) + static iteration_proxy<const_iterator> iterator_wrapper(const_reference ref) { - return iteration_proxy<const_iterator>(cont); + return iteration_proxy<const_iterator>(ref); } /// @} @@ -12120,7 +12257,7 @@ class basic_json JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name()))); } - for (auto it = j.begin(); it != j.end(); ++it) + for (auto it = j.cbegin(); it != j.cend(); ++it) { m_value.object->operator[](it.key()) = it.value(); } @@ -12341,7 +12478,7 @@ class basic_json [comparison function](https://github.com/mariokonrad/marnav/blob/master/src/marnav/math/floatingpoint.hpp#L34-#L39) could be used, for instance @code {.cpp} - template <typename T, typename = typename std::enable_if<std::is_floating_point<T>::value, T>::type> + template<typename T, typename = typename std::enable_if<std::is_floating_point<T>::value, T>::type> inline bool is_same(T a, T b, T epsilon = std::numeric_limits<T>::epsilon()) noexcept { return std::abs(a - b) <= epsilon; @@ -12769,7 +12906,7 @@ class basic_json `std::setw(4)` on @a o sets the indentation level to `4` and the serialization result is the same as calling `dump(4)`. - - The indentation characrer can be controlled with the member variable + - The indentation character can be controlled with the member variable `fill` of the output stream @a o. For instance, the manipulator `std::setfill('\\t')` sets indentation to use a tab character rather than the default space character. @@ -12779,12 +12916,15 @@ class basic_json @return the stream @a o + @throw type_error.316 if a string stored inside the JSON value is not + UTF-8 encoded + @complexity Linear. @liveexample{The example below shows the serialization with different parameters to `width` to adjust the indentation level.,operator_serialize} - @since version 1.0.0; indentaction character added in version 3.0.0 + @since version 1.0.0; indentation character added in version 3.0.0 */ friend std::ostream& operator<<(std::ostream& o, const basic_json& j) { @@ -13124,40 +13264,40 @@ class basic_json JSON value type | value/range | CBOR type | first byte --------------- | ------------------------------------------ | ---------------------------------- | --------------- - null | `null` | Null | 0xf6 - boolean | `true` | True | 0xf5 - boolean | `false` | False | 0xf4 - number_integer | -9223372036854775808..-2147483649 | Negative integer (8 bytes follow) | 0x3b - number_integer | -2147483648..-32769 | Negative integer (4 bytes follow) | 0x3a + null | `null` | Null | 0xF6 + boolean | `true` | True | 0xF5 + boolean | `false` | False | 0xF4 + number_integer | -9223372036854775808..-2147483649 | Negative integer (8 bytes follow) | 0x3B + number_integer | -2147483648..-32769 | Negative integer (4 bytes follow) | 0x3A number_integer | -32768..-129 | Negative integer (2 bytes follow) | 0x39 number_integer | -128..-25 | Negative integer (1 byte follow) | 0x38 number_integer | -24..-1 | Negative integer | 0x20..0x37 number_integer | 0..23 | Integer | 0x00..0x17 number_integer | 24..255 | Unsigned integer (1 byte follow) | 0x18 number_integer | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 - number_integer | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1a - number_integer | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1b + number_integer | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A + number_integer | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B number_unsigned | 0..23 | Integer | 0x00..0x17 number_unsigned | 24..255 | Unsigned integer (1 byte follow) | 0x18 number_unsigned | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 - number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1a - number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1b - number_float | *any value* | Double-Precision Float | 0xfb + number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A + number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B + number_float | *any value* | Double-Precision Float | 0xFB string | *length*: 0..23 | UTF-8 string | 0x60..0x77 string | *length*: 23..255 | UTF-8 string (1 byte follow) | 0x78 string | *length*: 256..65535 | UTF-8 string (2 bytes follow) | 0x79 - string | *length*: 65536..4294967295 | UTF-8 string (4 bytes follow) | 0x7a - string | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow) | 0x7b + string | *length*: 65536..4294967295 | UTF-8 string (4 bytes follow) | 0x7A + string | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow) | 0x7B array | *size*: 0..23 | array | 0x80..0x97 array | *size*: 23..255 | array (1 byte follow) | 0x98 array | *size*: 256..65535 | array (2 bytes follow) | 0x99 - array | *size*: 65536..4294967295 | array (4 bytes follow) | 0x9a - array | *size*: 4294967296..18446744073709551615 | array (8 bytes follow) | 0x9b - object | *size*: 0..23 | map | 0xa0..0xb7 - object | *size*: 23..255 | map (1 byte follow) | 0xb8 - object | *size*: 256..65535 | map (2 bytes follow) | 0xb9 - object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xba - object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xbb + array | *size*: 65536..4294967295 | array (4 bytes follow) | 0x9A + array | *size*: 4294967296..18446744073709551615 | array (8 bytes follow) | 0x9B + object | *size*: 0..23 | map | 0xA0..0xB7 + object | *size*: 23..255 | map (1 byte follow) | 0xB8 + object | *size*: 256..65535 | map (2 bytes follow) | 0xB9 + object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xBA + object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xBB @note The mapping is **complete** in the sense that any JSON value type can be converted to a CBOR value. @@ -13167,20 +13307,20 @@ class basic_json function which serializes NaN or Infinity to `null`. @note The following CBOR types are not used in the conversion: - - byte strings (0x40..0x5f) - - UTF-8 strings terminated by "break" (0x7f) - - arrays terminated by "break" (0x9f) - - maps terminated by "break" (0xbf) - - date/time (0xc0..0xc1) - - bignum (0xc2..0xc3) - - decimal fraction (0xc4) - - bigfloat (0xc5) - - tagged items (0xc6..0xd4, 0xd8..0xdb) - - expected conversions (0xd5..0xd7) - - simple values (0xe0..0xf3, 0xf8) - - undefined (0xf7) - - half and single-precision floats (0xf9-0xfa) - - break (0xff) + - byte strings (0x40..0x5F) + - UTF-8 strings terminated by "break" (0x7F) + - arrays terminated by "break" (0x9F) + - maps terminated by "break" (0xBF) + - date/time (0xC0..0xC1) + - bignum (0xC2..0xC3) + - decimal fraction (0xC4) + - bigfloat (0xC5) + - tagged items (0xC6..0xD4, 0xD8..0xDB) + - expected conversions (0xD5..0xD7) + - simple values (0xE0..0xF3, 0xF8) + - undefined (0xF7) + - half and single-precision floats (0xF9-0xFA) + - break (0xFF) @param[in] j JSON value to serialize @return MessagePack serialization as byte vector @@ -13226,35 +13366,35 @@ class basic_json JSON value type | value/range | MessagePack type | first byte --------------- | --------------------------------- | ---------------- | ---------- - null | `null` | nil | 0xc0 - boolean | `true` | true | 0xc3 - boolean | `false` | false | 0xc2 - number_integer | -9223372036854775808..-2147483649 | int64 | 0xd3 - number_integer | -2147483648..-32769 | int32 | 0xd2 - number_integer | -32768..-129 | int16 | 0xd1 - number_integer | -128..-33 | int8 | 0xd0 - number_integer | -32..-1 | negative fixint | 0xe0..0xff - number_integer | 0..127 | positive fixint | 0x00..0x7f - number_integer | 128..255 | uint 8 | 0xcc - number_integer | 256..65535 | uint 16 | 0xcd - number_integer | 65536..4294967295 | uint 32 | 0xce - number_integer | 4294967296..18446744073709551615 | uint 64 | 0xcf - number_unsigned | 0..127 | positive fixint | 0x00..0x7f - number_unsigned | 128..255 | uint 8 | 0xcc - number_unsigned | 256..65535 | uint 16 | 0xcd - number_unsigned | 65536..4294967295 | uint 32 | 0xce - number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xcf - number_float | *any value* | float 64 | 0xcb - string | *length*: 0..31 | fixstr | 0xa0..0xbf - string | *length*: 32..255 | str 8 | 0xd9 - string | *length*: 256..65535 | str 16 | 0xda - string | *length*: 65536..4294967295 | str 32 | 0xdb - array | *size*: 0..15 | fixarray | 0x90..0x9f - array | *size*: 16..65535 | array 16 | 0xdc - array | *size*: 65536..4294967295 | array 32 | 0xdd - object | *size*: 0..15 | fix map | 0x80..0x8f - object | *size*: 16..65535 | map 16 | 0xde - object | *size*: 65536..4294967295 | map 32 | 0xdf + null | `null` | nil | 0xC0 + boolean | `true` | true | 0xC3 + boolean | `false` | false | 0xC2 + number_integer | -9223372036854775808..-2147483649 | int64 | 0xD3 + number_integer | -2147483648..-32769 | int32 | 0xD2 + number_integer | -32768..-129 | int16 | 0xD1 + number_integer | -128..-33 | int8 | 0xD0 + number_integer | -32..-1 | negative fixint | 0xE0..0xFF + number_integer | 0..127 | positive fixint | 0x00..0x7F + number_integer | 128..255 | uint 8 | 0xCC + number_integer | 256..65535 | uint 16 | 0xCD + number_integer | 65536..4294967295 | uint 32 | 0xCE + number_integer | 4294967296..18446744073709551615 | uint 64 | 0xCF + number_unsigned | 0..127 | positive fixint | 0x00..0x7F + number_unsigned | 128..255 | uint 8 | 0xCC + number_unsigned | 256..65535 | uint 16 | 0xCD + number_unsigned | 65536..4294967295 | uint 32 | 0xCE + number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xCF + number_float | *any value* | float 64 | 0xCB + string | *length*: 0..31 | fixstr | 0xA0..0xBF + string | *length*: 32..255 | str 8 | 0xD9 + string | *length*: 256..65535 | str 16 | 0xDA + string | *length*: 65536..4294967295 | str 32 | 0xDB + array | *size*: 0..15 | fixarray | 0x90..0x9F + array | *size*: 16..65535 | array 16 | 0xDC + array | *size*: 65536..4294967295 | array 32 | 0xDD + object | *size*: 0..15 | fix map | 0x80..0x8F + object | *size*: 16..65535 | map 16 | 0xDE + object | *size*: 65536..4294967295 | map 32 | 0xDF @note The mapping is **complete** in the sense that any JSON value type can be converted to a MessagePack value. @@ -13265,10 +13405,10 @@ class basic_json - objects with more than 4294967295 elements @note The following MessagePack types are not used in the conversion: - - bin 8 - bin 32 (0xc4..0xc6) - - ext 8 - ext 32 (0xc7..0xc9) - - float 32 (0xca) - - fixext 1 - fixext 16 (0xd4..0xd8) + - bin 8 - bin 32 (0xC4..0xC6) + - ext 8 - ext 32 (0xC7..0xC9) + - float 32 (0xCA) + - fixext 1 - fixext 16 (0xD4..0xD8) @note Any MessagePack output created @ref to_msgpack can be successfully parsed by @ref from_msgpack. @@ -13322,51 +13462,51 @@ class basic_json Integer | number_unsigned | 0x00..0x17 Unsigned integer | number_unsigned | 0x18 Unsigned integer | number_unsigned | 0x19 - Unsigned integer | number_unsigned | 0x1a - Unsigned integer | number_unsigned | 0x1b + Unsigned integer | number_unsigned | 0x1A + Unsigned integer | number_unsigned | 0x1B Negative integer | number_integer | 0x20..0x37 Negative integer | number_integer | 0x38 Negative integer | number_integer | 0x39 - Negative integer | number_integer | 0x3a - Negative integer | number_integer | 0x3b + Negative integer | number_integer | 0x3A + Negative integer | number_integer | 0x3B Negative integer | number_integer | 0x40..0x57 UTF-8 string | string | 0x60..0x77 UTF-8 string | string | 0x78 UTF-8 string | string | 0x79 - UTF-8 string | string | 0x7a - UTF-8 string | string | 0x7b - UTF-8 string | string | 0x7f + UTF-8 string | string | 0x7A + UTF-8 string | string | 0x7B + UTF-8 string | string | 0x7F array | array | 0x80..0x97 array | array | 0x98 array | array | 0x99 - array | array | 0x9a - array | array | 0x9b - array | array | 0x9f - map | object | 0xa0..0xb7 - map | object | 0xb8 - map | object | 0xb9 - map | object | 0xba - map | object | 0xbb - map | object | 0xbf - False | `false` | 0xf4 - True | `true` | 0xf5 - Nill | `null` | 0xf6 - Half-Precision Float | number_float | 0xf9 - Single-Precision Float | number_float | 0xfa - Double-Precision Float | number_float | 0xfb + array | array | 0x9A + array | array | 0x9B + array | array | 0x9F + map | object | 0xA0..0xB7 + map | object | 0xB8 + map | object | 0xB9 + map | object | 0xBA + map | object | 0xBB + map | object | 0xBF + False | `false` | 0xF4 + True | `true` | 0xF5 + Nill | `null` | 0xF6 + Half-Precision Float | number_float | 0xF9 + Single-Precision Float | number_float | 0xFA + Double-Precision Float | number_float | 0xFB @warning The mapping is **incomplete** in the sense that not all CBOR types can be converted to a JSON value. The following CBOR types are not supported and will yield parse errors (parse_error.112): - - byte strings (0x40..0x5f) - - date/time (0xc0..0xc1) - - bignum (0xc2..0xc3) - - decimal fraction (0xc4) - - bigfloat (0xc5) - - tagged items (0xc6..0xd4, 0xd8..0xdb) - - expected conversions (0xd5..0xd7) - - simple values (0xe0..0xf3, 0xf8) - - undefined (0xf7) + - byte strings (0x40..0x5F) + - date/time (0xC0..0xC1) + - bignum (0xC2..0xC3) + - decimal fraction (0xC4) + - bigfloat (0xC5) + - tagged items (0xC6..0xD4, 0xD8..0xDB) + - expected conversions (0xD5..0xD7) + - simple values (0xE0..0xF3, 0xF8) + - undefined (0xF7) @warning CBOR allows map keys of any type, whereas JSON only allows strings as keys in object values. Therefore, CBOR maps with keys @@ -13426,38 +13566,38 @@ class basic_json MessagePack type | JSON value type | first byte ---------------- | --------------- | ---------- - positive fixint | number_unsigned | 0x00..0x7f - fixmap | object | 0x80..0x8f - fixarray | array | 0x90..0x9f - fixstr | string | 0xa0..0xbf - nil | `null` | 0xc0 - false | `false` | 0xc2 - true | `true` | 0xc3 - float 32 | number_float | 0xca - float 64 | number_float | 0xcb - uint 8 | number_unsigned | 0xcc - uint 16 | number_unsigned | 0xcd - uint 32 | number_unsigned | 0xce - uint 64 | number_unsigned | 0xcf - int 8 | number_integer | 0xd0 - int 16 | number_integer | 0xd1 - int 32 | number_integer | 0xd2 - int 64 | number_integer | 0xd3 - str 8 | string | 0xd9 - str 16 | string | 0xda - str 32 | string | 0xdb - array 16 | array | 0xdc - array 32 | array | 0xdd - map 16 | object | 0xde - map 32 | object | 0xdf - negative fixint | number_integer | 0xe0-0xff + positive fixint | number_unsigned | 0x00..0x7F + fixmap | object | 0x80..0x8F + fixarray | array | 0x90..0x9F + fixstr | string | 0xA0..0xBF + nil | `null` | 0xC0 + false | `false` | 0xC2 + true | `true` | 0xC3 + float 32 | number_float | 0xCA + float 64 | number_float | 0xCB + uint 8 | number_unsigned | 0xCC + uint 16 | number_unsigned | 0xCD + uint 32 | number_unsigned | 0xCE + uint 64 | number_unsigned | 0xCF + int 8 | number_integer | 0xD0 + int 16 | number_integer | 0xD1 + int 32 | number_integer | 0xD2 + int 64 | number_integer | 0xD3 + str 8 | string | 0xD9 + str 16 | string | 0xDA + str 32 | string | 0xDB + array 16 | array | 0xDC + array 32 | array | 0xDD + map 16 | object | 0xDE + map 32 | object | 0xDF + negative fixint | number_integer | 0xE0-0xFF @warning The mapping is **incomplete** in the sense that not all MessagePack types can be converted to a JSON value. The following MessagePack types are not supported and will yield parse errors: - - bin 8 - bin 32 (0xc4..0xc6) - - ext 8 - ext 32 (0xc7..0xc9) - - fixext 1 - fixext 16 (0xd4..0xd8) + - bin 8 - bin 32 (0xC4..0xC6) + - ext 8 - ext 32 (0xC7..0xC9) + - fixext 1 - fixext 16 (0xD4..0xD8) @note Any MessagePack output created @ref to_msgpack can be successfully parsed by @ref from_msgpack. @@ -13601,6 +13741,9 @@ class basic_json pointer @a ptr. As `at` provides checked access (and no elements are implicitly inserted), the index '-' is always invalid. See example below. + @throw out_of_range.403 if the JSON pointer describes a key of an object + which cannot be found. See example below. + @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. See example below. @@ -13641,6 +13784,9 @@ class basic_json pointer @a ptr. As `at` provides checked access (and no elements are implicitly inserted), the index '-' is always invalid. See example below. + @throw out_of_range.403 if the JSON pointer describes a key of an object + which cannot be found. See example below. + @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. See example below. @@ -13856,7 +14002,7 @@ class basic_json } else { - const auto idx = std::stoi(last_path); + const auto idx = json_pointer::array_index(last_path); if (JSON_UNLIKELY(static_cast<size_type>(idx) > parent.size())) { // avoid undefined behavior @@ -13904,7 +14050,7 @@ class basic_json else if (parent.is_array()) { // note erase performs range check - parent.erase(static_cast<size_type>(std::stoi(last_path))); + parent.erase(static_cast<size_type>(json_pointer::array_index(last_path))); } }; @@ -13999,7 +14145,12 @@ class basic_json const json_pointer from_ptr(from_path); // the "from" location must exist - use at() - result[ptr] = result.at(from_ptr); + basic_json v = result.at(from_ptr); + + // The copy is functionally identical to an "add" + // operation at the target location using the value + // specified in the "from" member. + operation_add(ptr, v); break; } @@ -14141,7 +14292,7 @@ class basic_json case value_t::object: { // first pass: traverse this object's elements - for (auto it = source.begin(); it != source.end(); ++it) + for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch const auto key = json_pointer::escape(it.key()); @@ -14163,7 +14314,7 @@ class basic_json } // second pass: traverse other object's elements - for (auto it = target.begin(); it != target.end(); ++it) + for (auto it = target.cbegin(); it != target.cend(); ++it) { if (source.find(it.key()) == source.end()) { @@ -14256,7 +14407,7 @@ json_pointer::get_and_create(NLOHMANN_BASIC_JSON_TPL& j) const // create an entry in the array JSON_TRY { - result = &result->operator[](static_cast<size_type>(std::stoi(reference_token))); + result = &result->operator[](static_cast<size_type>(array_index(reference_token))); } JSON_CATCH(std::invalid_argument&) { @@ -14333,7 +14484,7 @@ json_pointer::get_unchecked(NLOHMANN_BASIC_JSON_TPL* ptr) const JSON_TRY { ptr = &ptr->operator[]( - static_cast<size_type>(std::stoi(reference_token))); + static_cast<size_type>(array_index(reference_token))); } JSON_CATCH(std::invalid_argument&) { @@ -14388,7 +14539,7 @@ json_pointer::get_checked(NLOHMANN_BASIC_JSON_TPL* ptr) const // note: at performs range check JSON_TRY { - ptr = &ptr->at(static_cast<size_type>(std::stoi(reference_token))); + ptr = &ptr->at(static_cast<size_type>(array_index(reference_token))); } JSON_CATCH(std::invalid_argument&) { @@ -14443,7 +14594,7 @@ json_pointer::get_unchecked(const NLOHMANN_BASIC_JSON_TPL* ptr) const JSON_TRY { ptr = &ptr->operator[]( - static_cast<size_type>(std::stoi(reference_token))); + static_cast<size_type>(array_index(reference_token))); } JSON_CATCH(std::invalid_argument&) { @@ -14497,7 +14648,7 @@ json_pointer::get_checked(const NLOHMANN_BASIC_JSON_TPL* ptr) const // note: at performs range check JSON_TRY { - ptr = &ptr->at(static_cast<size_type>(std::stoi(reference_token))); + ptr = &ptr->at(static_cast<size_type>(array_index(reference_token))); } JSON_CATCH(std::invalid_argument&) { diff --git a/tests/brotli.sh b/tests/brotli.sh new file mode 100644 index 000000000000..645dd4214ec6 --- /dev/null +++ b/tests/brotli.sh @@ -0,0 +1,28 @@ +source common.sh + + +# Only test if we found brotli libraries +# (CLI tool is likely unavailable if libraries are missing) +if [ -n "$HAVE_BROTLI" ]; then + +clearStore +clearCache + +cacheURI="file://$cacheDir?compression=br" + +outPath=$(nix-build dependencies.nix --no-out-link) + +nix copy --to $cacheURI $outPath + +HASH=$(nix hash-path $outPath) + +clearStore +clearCacheCache + +nix copy --from $cacheURI $outPath --no-check-sigs + +HASH2=$(nix hash-path $outPath) + +[[ $HASH = $HASH2 ]] + +fi # HAVE_BROTLI diff --git a/tests/build-dry.sh b/tests/build-dry.sh new file mode 100644 index 000000000000..610e6070c5d7 --- /dev/null +++ b/tests/build-dry.sh @@ -0,0 +1,52 @@ +source common.sh + +################################################### +# Check that --dry-run isn't confused with read-only mode +# https://github.com/NixOS/nix/issues/1795 + +clearStore +clearCache + +# Ensure this builds successfully first +nix build -f dependencies.nix + +clearStore +clearCache + +# Try --dry-run using old command first +nix-build dependencies.nix --dry-run 2>&1 | grep "will be built" +# Now new command: +nix build -f dependencies.nix --dry-run 2>&1 | grep "will be built" + +# TODO: XXX: FIXME: #1793 +# Disable this part of the test until the problem is resolved: +if [ -n "$ISSUE_1795_IS_FIXED" ]; then +clearStore +clearCache + +# Try --dry-run using new command first +nix build -f dependencies.nix --dry-run 2>&1 | grep "will be built" +# Now old command: +nix-build dependencies.nix --dry-run 2>&1 | grep "will be built" +fi + +################################################### +# Check --dry-run doesn't create links with --dry-run +# https://github.com/NixOS/nix/issues/1849 +clearStore +clearCache + +RESULT=$TEST_ROOT/result-link +rm -f $RESULT + +nix-build dependencies.nix -o $RESULT --dry-run + +[[ ! -h $RESULT ]] || fail "nix-build --dry-run created output link" + +nix build -f dependencies.nix -o $RESULT --dry-run + +[[ ! -h $RESULT ]] || fail "nix build --dry-run created output link" + +nix build -f dependencies.nix -o $RESULT + +[[ -h $RESULT ]] diff --git a/tests/build-remote.sh b/tests/build-remote.sh index cf3bb4633183..9bca0f4a3856 100644 --- a/tests/build-remote.sh +++ b/tests/build-remote.sh @@ -2,7 +2,7 @@ source common.sh clearStore -if [[ $(uname) != Linux ]]; then exit; fi +if ! canUseSandbox; then exit; fi if [[ ! $SHELL =~ /nix/store ]]; then exit; fi chmod -R u+w $TEST_ROOT/store0 || true diff --git a/tests/check.nix b/tests/check.nix new file mode 100644 index 000000000000..08aac2fb0a77 --- /dev/null +++ b/tests/check.nix @@ -0,0 +1,17 @@ +with import ./config.nix; + +{ + nondeterministic = mkDerivation { + name = "nondeterministic"; + buildCommand = + '' + mkdir $out + date +%s.%N > $out/date + ''; + }; + + fetchurl = import <nix/fetchurl.nix> { + url = "file://" + toString ./lang/eval-okay-xml.exp.xml; + sha256 = "0kg4sla7ihm8ijr8cb3117fhl99zrc2bwy1jrngsfmkh8bav4m0v"; + }; +} diff --git a/tests/check.sh b/tests/check.sh new file mode 100644 index 000000000000..b05e40ffbeea --- /dev/null +++ b/tests/check.sh @@ -0,0 +1,32 @@ +source common.sh + +clearStore + +nix-build dependencies.nix --no-out-link +nix-build dependencies.nix --no-out-link --check + +nix-build check.nix -A nondeterministic --no-out-link +(! nix-build check.nix -A nondeterministic --no-out-link --check 2> $TEST_ROOT/log) +grep 'may not be deterministic' $TEST_ROOT/log + +clearStore + +nix-build dependencies.nix --no-out-link --repeat 3 + +(! nix-build check.nix -A nondeterministic --no-out-link --repeat 1 2> $TEST_ROOT/log) +grep 'differs from previous round' $TEST_ROOT/log + +path=$(nix-build check.nix -A fetchurl --no-out-link --hashed-mirrors '') + +chmod +w $path +echo foo > $path +chmod -w $path + +nix-build check.nix -A fetchurl --no-out-link --check --hashed-mirrors '' + +# Note: "check" doesn't repair anything, it just compares to the hash stored in the database. +[[ $(cat $path) = foo ]] + +nix-build check.nix -A fetchurl --no-out-link --repair --hashed-mirrors '' + +[[ $(cat $path) != foo ]] diff --git a/tests/common.sh.in b/tests/common.sh.in index 09f2949141a4..195205988afb 100644 --- a/tests/common.sh.in +++ b/tests/common.sh.in @@ -11,7 +11,6 @@ export NIX_LOCALSTATE_DIR=$TEST_ROOT/var export NIX_LOG_DIR=$TEST_ROOT/var/log/nix export NIX_STATE_DIR=$TEST_ROOT/var/nix export NIX_CONF_DIR=$TEST_ROOT/etc -export NIX_MANIFESTS_DIR=$TEST_ROOT/var/nix/manifests export _NIX_TEST_SHARED=$TEST_ROOT/shared if [[ -n $NIX_STORE ]]; then export _NIX_TEST_NO_SANDBOX=1 @@ -32,6 +31,7 @@ export xmllint="@xmllint@" export SHELL="@bash@" export PAGER=cat export HAVE_SODIUM="@HAVE_SODIUM@" +export HAVE_BROTLI="@HAVE_BROTLI@" export version=@PACKAGE_VERSION@ export system=@system@ @@ -86,6 +86,24 @@ killDaemon() { trap "" EXIT } +canUseSandbox() { + if [[ $(uname) != Linux ]]; then return 1; fi + + if [ ! -L /proc/self/ns/user ]; then + echo "Kernel doesn't support user namespaces, skipping this test..." + return 1 + fi + + if [ -e /proc/sys/kernel/unprivileged_userns_clone ]; then + if [ "$(cat /proc/sys/kernel/unprivileged_userns_clone)" != 1 ]; then + echo "Unprivileged user namespaces disabled by sysctl, skipping this test..." + return 1 + fi + fi + + return 0 +} + fail() { echo "$1" exit 1 diff --git a/tests/export.sh b/tests/export.sh index ec7560f19728..2238539bcca9 100644 --- a/tests/export.sh +++ b/tests/export.sh @@ -8,6 +8,11 @@ nix-store --export $outPath > $TEST_ROOT/exp nix-store --export $(nix-store -qR $outPath) > $TEST_ROOT/exp_all +if nix-store --export $outPath >/dev/full ; then + echo "exporting to a bad file descriptor should fail" + exit 1 +fi + clearStore diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index 09e4f742668e..530ac7bb813c 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -29,10 +29,17 @@ rev2=$(git -C $repo rev-parse HEAD) path=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") [[ $(cat $path/hello) = world ]] +# In pure eval mode, fetchGit without a revision should fail. +[[ $(nix eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") = world ]] +(! nix eval --pure-eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") + # Fetch using an explicit revision hash. path2=$(nix eval --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] +# In pure eval mode, fetchGit with a revision should succeed. +[[ $(nix eval --pure-eval --raw "(builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] + # Fetch again. This should be cached. mv $repo ${repo}-tmp path2=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") @@ -93,3 +100,42 @@ git -C $repo add hello git -C $repo commit -m 'Bla4' rev3=$(git -C $repo rev-parse HEAD) nix eval --tarball-ttl 3600 "(builtins.fetchGit { url = $repo; rev = \"$rev3\"; })" >/dev/null + +# Update 'path' to reflect latest master +path=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") + +# Check behavior when non-master branch is used +git -C $repo checkout $rev2 -b dev +echo dev > $repo/hello + +# File URI uses 'master' unless specified otherwise +path2=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") +[[ $path = $path2 ]] + +# Using local path with branch other than 'master' should work when clean or dirty +path3=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +# (check dirty-tree handling was used) +[[ $(nix eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] + +# Committing shouldn't change store path, or switch to using 'master' +git -C $repo commit -m 'Bla5' -a +path4=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +[[ $(cat $path4/hello) = dev ]] +[[ $path3 = $path4 ]] + +# Confirm same as 'dev' branch +path5=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +[[ $path3 = $path5 ]] + + +# Nuke the cache +rm -rf $TEST_HOME/.cache/nix/git + +# Try again, but without 'git' on PATH +NIX=$(command -v nix) +# This should fail +(! PATH= $NIX eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath" ) + +# Try again, with 'git' available. This should work. +path5=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +[[ $path3 = $path5 ]] diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh index 271350ecd171..4088dbd39796 100644 --- a/tests/fetchMercurial.sh +++ b/tests/fetchMercurial.sh @@ -29,10 +29,17 @@ rev2=$(hg log --cwd $repo -r tip --template '{node}') path=$(nix eval --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $(cat $path/hello) = world ]] +# In pure eval mode, fetchGit without a revision should fail. +[[ $(nix eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] +(! nix eval --pure-eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") + # Fetch using an explicit revision hash. path2=$(nix eval --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] +# In pure eval mode, fetchGit with a revision should succeed. +[[ $(nix eval --pure-eval --raw "(builtins.readFile (fetchMercurial { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] + # Fetch again. This should be cached. mv $repo ${repo}-tmp path2=$(nix eval --raw "(builtins.fetchMercurial file://$repo).outPath") diff --git a/tests/fixed.sh b/tests/fixed.sh index cac3f0be91b0..8f51403a7071 100644 --- a/tests/fixed.sh +++ b/tests/fixed.sh @@ -5,15 +5,22 @@ clearStore export IMPURE_VAR1=foo export IMPURE_VAR2=bar +path=$(nix-store -q $(nix-instantiate fixed.nix -A good.0)) + +echo 'testing bad...' +nix-build fixed.nix -A bad --no-out-link && fail "should fail" + +# Building with the bad hash should produce the "good" output path as +# a side-effect. +[[ -e $path ]] +nix path-info --json $path | grep fixed:md5:2qk15sxzzjlnpjk9brn7j8ppcd + echo 'testing good...' nix-build fixed.nix -A good --no-out-link echo 'testing good2...' nix-build fixed.nix -A good2 --no-out-link -echo 'testing bad...' -nix-build fixed.nix -A bad --no-out-link && fail "should fail" - echo 'testing reallyBad...' nix-instantiate fixed.nix -A reallyBad && fail "should fail" diff --git a/tests/init.sh b/tests/init.sh index 41cca047d8fb..e5353598bcc4 100644 --- a/tests/init.sh +++ b/tests/init.sh @@ -16,7 +16,12 @@ mkdir "$NIX_CONF_DIR" cat > "$NIX_CONF_DIR"/nix.conf <<EOF build-users-group = keep-derivations = false +include nix.conf.extra +EOF + +cat > "$NIX_CONF_DIR"/nix.conf.extra <<EOF fsync-metadata = false +!include nix.conf.extra.not-there EOF # Initialise the database. diff --git a/tests/lang/data b/tests/lang/data new file mode 100644 index 000000000000..257cc5642cb1 --- /dev/null +++ b/tests/lang/data @@ -0,0 +1 @@ +foo diff --git a/tests/lang/eval-okay-path.nix b/tests/lang/eval-okay-path.nix new file mode 100644 index 000000000000..e67168cf3edf --- /dev/null +++ b/tests/lang/eval-okay-path.nix @@ -0,0 +1,7 @@ +builtins.path + { path = ./.; + filter = path: _: baseNameOf path == "data"; + recursive = true; + sha256 = "1yhm3gwvg5a41yylymgblsclk95fs6jy72w0wv925mmidlhcq4sw"; + name = "output"; + } diff --git a/tests/lang/eval-okay-replacestrings.exp b/tests/lang/eval-okay-replacestrings.exp index a2add1b7b140..72e8274d8c58 100644 --- a/tests/lang/eval-okay-replacestrings.exp +++ b/tests/lang/eval-okay-replacestrings.exp @@ -1 +1 @@ -[ "faabar" "fbar" "fubar" "faboor" "fubar" ] +[ "faabar" "fbar" "fubar" "faboor" "fubar" "XaXbXcX" "X" "a_b" ] diff --git a/tests/lang/eval-okay-replacestrings.nix b/tests/lang/eval-okay-replacestrings.nix index 6284a0e660ae..bd8031fc004e 100644 --- a/tests/lang/eval-okay-replacestrings.nix +++ b/tests/lang/eval-okay-replacestrings.nix @@ -5,4 +5,7 @@ with builtins; (replaceStrings ["oo"] ["u"] "foobar") (replaceStrings ["oo" "a"] ["a" "oo"] "foobar") (replaceStrings ["oo" "oo"] ["u" "i"] "foobar") + (replaceStrings [""] ["X"] "abc") + (replaceStrings [""] ["X"] "") + (replaceStrings ["-"] ["_"] "a-b") ] diff --git a/tests/lang/eval-okay-splitversion.exp b/tests/lang/eval-okay-splitversion.exp new file mode 100644 index 000000000000..153ceb8186a0 --- /dev/null +++ b/tests/lang/eval-okay-splitversion.exp @@ -0,0 +1 @@ +[ "1" "2" "3" ] diff --git a/tests/lang/eval-okay-splitversion.nix b/tests/lang/eval-okay-splitversion.nix new file mode 100644 index 000000000000..9e5c99d2e7f6 --- /dev/null +++ b/tests/lang/eval-okay-splitversion.nix @@ -0,0 +1 @@ +builtins.splitVersion "1.2.3" diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh index 4a686bb59a3b..acfd46c54170 100644 --- a/tests/linux-sandbox.sh +++ b/tests/linux-sandbox.sh @@ -2,7 +2,7 @@ source common.sh clearStore -if [[ $(uname) != Linux ]]; then exit; fi +if ! canUseSandbox; then exit; fi # Note: we need to bind-mount $SHELL into the chroot. Currently we # only support the case where $SHELL is in the Nix store, because diff --git a/tests/local.mk b/tests/local.mk index baf74224bb12..ec7ebfb0dedc 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -13,13 +13,18 @@ nix_tests = \ check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ placeholders.sh nix-shell.sh \ linux-sandbox.sh \ + build-dry.sh \ build-remote.sh \ nar-access.sh \ structured-attrs.sh \ fetchGit.sh \ fetchMercurial.sh \ signing.sh \ - run.sh + run.sh \ + brotli.sh \ + pure-eval.sh \ + check.sh \ + plugins.sh # parallel.sh install-tests += $(foreach x, $(nix_tests), tests/$(x)) @@ -28,4 +33,4 @@ tests-environment = NIX_REMOTE= $(bash) -e clean-files += $(d)/common.sh -installcheck: $(d)/common.sh +installcheck: $(d)/common.sh $(d)/plugins/libplugintest.$(SO_EXT) diff --git a/tests/misc.sh b/tests/misc.sh index 6d0ab3adcec8..eda0164167f2 100644 --- a/tests/misc.sh +++ b/tests/misc.sh @@ -16,4 +16,4 @@ nix-env --foo 2>&1 | grep "no operation" nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. -nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 | grep "infinite recursion encountered, at (string):1:15$" +nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 | grep "infinite recursion encountered, at .*(string).*:1:15$" diff --git a/tests/nar-access.sh b/tests/nar-access.sh index bd849cbfab1d..553d6ca89d7d 100644 --- a/tests/nar-access.sh +++ b/tests/nar-access.sh @@ -36,3 +36,9 @@ diff -u baz.cat-nar $storePath/foo/baz # Test missing files. nix ls-store --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR' nix ls-store $storePath/xyzzy 2>&1 | grep 'does not exist' + +# Test failure to dump. +if nix-store --dump $storePath >/dev/full ; then + echo "dumping to /dev/full should fail" + exit -1 +fi diff --git a/tests/nix-copy-closure.nix b/tests/nix-copy-closure.nix index 44126dd64e47..0dc147fb34e9 100644 --- a/tests/nix-copy-closure.nix +++ b/tests/nix-copy-closure.nix @@ -1,8 +1,8 @@ # Test ‘nix-copy-closure’. -{ system, nix }: +{ nixpkgs, system, nix }: -with import <nixpkgs/nixos/lib/testing.nix> { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { @@ -29,10 +29,10 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { startAll; # Create an SSH key on the client. - my $key = `${pkgs.openssh}/bin/ssh-keygen -t dsa -f key -N ""`; + my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`; $client->succeed("mkdir -m 700 /root/.ssh"); - $client->copyFileFromHost("key", "/root/.ssh/id_dsa"); - $client->succeed("chmod 600 /root/.ssh/id_dsa"); + $client->copyFileFromHost("key", "/root/.ssh/id_ed25519"); + $client->succeed("chmod 600 /root/.ssh/id_ed25519"); # Install the SSH key on the server. $server->succeed("mkdir -m 700 /root/.ssh"); diff --git a/tests/plugins.sh b/tests/plugins.sh new file mode 100644 index 000000000000..4b1baeddce32 --- /dev/null +++ b/tests/plugins.sh @@ -0,0 +1,7 @@ +source common.sh + +set -o pipefail + +res=$(nix eval '(builtins.anotherNull)' --option setting-set true --option plugin-files $PWD/plugins/libplugintest*) + +[ "$res"x = "nullx" ] diff --git a/tests/plugins/local.mk b/tests/plugins/local.mk new file mode 100644 index 000000000000..1d2bac052fd2 --- /dev/null +++ b/tests/plugins/local.mk @@ -0,0 +1,9 @@ +libraries += libplugintest + +libplugintest_DIR := $(d) + +libplugintest_SOURCES := $(d)/plugintest.cc + +libplugintest_ALLOW_UNDEFINED := 1 + +libplugintest_EXCLUDE_FROM_LIBRARY_LIST := 1 diff --git a/tests/plugins/plugintest.cc b/tests/plugins/plugintest.cc new file mode 100644 index 000000000000..8da15ebabd7d --- /dev/null +++ b/tests/plugins/plugintest.cc @@ -0,0 +1,19 @@ +#include "globals.hh" +#include "primops.hh" + +using namespace nix; + +static BaseSetting<bool> settingSet{false, "setting-set", + "Whether the plugin-defined setting was set"}; + +static RegisterSetting rs(&settingSet); + +static void prim_anotherNull (EvalState & state, const Pos & pos, Value ** args, Value & v) +{ + if (settingSet) + mkNull(v); + else + mkBool(v, false); +} + +static RegisterPrimOp rp("anotherNull", 0, prim_anotherNull); diff --git a/tests/pure-eval.nix b/tests/pure-eval.nix new file mode 100644 index 000000000000..ed25b3d45637 --- /dev/null +++ b/tests/pure-eval.nix @@ -0,0 +1,3 @@ +{ + x = 123; +} diff --git a/tests/pure-eval.sh b/tests/pure-eval.sh new file mode 100644 index 000000000000..49c8564487c3 --- /dev/null +++ b/tests/pure-eval.sh @@ -0,0 +1,18 @@ +source common.sh + +clearStore + +nix eval --pure-eval '(assert 1 + 2 == 3; true)' + +[[ $(nix eval '(builtins.readFile ./pure-eval.sh)') =~ clearStore ]] + +(! nix eval --pure-eval '(builtins.readFile ./pure-eval.sh)') + +(! nix eval --pure-eval '(builtins.currentTime)') +(! nix eval --pure-eval '(builtins.currentSystem)') + +(! nix-instantiate --pure-eval ./simple.nix) + +[[ $(nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") == 123 ]] +(! nix eval --pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") +nix eval --pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash-file pure-eval.nix --type sha256)\"; })).x)" diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix index 58a26d8b6182..d7a4b21989e5 100644 --- a/tests/remote-builds.nix +++ b/tests/remote-builds.nix @@ -1,8 +1,8 @@ # Test Nix's remote build feature. -{ system, nix }: +{ nixpkgs, system, nix }: -with import <nixpkgs/nixos/lib/testing.nix> { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; makeTest ( @@ -46,13 +46,13 @@ in nix.buildMachines = [ { hostName = "slave1"; sshUser = "root"; - sshKey = "/root/.ssh/id_dsa"; + sshKey = "/root/.ssh/id_ed25519"; system = "i686-linux"; maxJobs = 1; } { hostName = "slave2"; sshUser = "root"; - sshKey = "/root/.ssh/id_dsa"; + sshKey = "/root/.ssh/id_ed25519"; system = "i686-linux"; maxJobs = 1; } @@ -70,10 +70,10 @@ in startAll; # Create an SSH key on the client. - my $key = `${pkgs.openssh}/bin/ssh-keygen -t dsa -f key -N ""`; + my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`; $client->succeed("mkdir -p -m 700 /root/.ssh"); - $client->copyFileFromHost("key", "/root/.ssh/id_dsa"); - $client->succeed("chmod 600 /root/.ssh/id_dsa"); + $client->copyFileFromHost("key", "/root/.ssh/id_ed25519"); + $client->succeed("chmod 600 /root/.ssh/id_ed25519"); # Install the SSH key on the slaves. $client->waitForUnit("network.target"); diff --git a/tests/restricted.nix b/tests/restricted.nix new file mode 100644 index 000000000000..e0ef5840209c --- /dev/null +++ b/tests/restricted.nix @@ -0,0 +1 @@ +1 + 2 diff --git a/tests/restricted.sh b/tests/restricted.sh index c063c8693d55..0605383cc86a 100644 --- a/tests/restricted.sh +++ b/tests/restricted.sh @@ -3,7 +3,8 @@ source common.sh clearStore nix-instantiate --restrict-eval --eval -E '1 + 2' -(! nix-instantiate --restrict-eval ./simple.nix) +(! nix-instantiate --restrict-eval ./restricted.nix) +(! nix-instantiate --eval --restrict-eval <(echo '1 + 2')) nix-instantiate --restrict-eval ./simple.nix -I src=. nix-instantiate --restrict-eval ./simple.nix -I src1=simple.nix -I src2=config.nix -I src3=./simple.builder.sh @@ -28,3 +29,12 @@ nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval (! nix eval --raw "(builtins.fetchurl https://github.com/NixOS/patchelf/archive/master.tar.gz)" --restrict-eval) (! nix eval --raw "(builtins.fetchTarball https://github.com/NixOS/patchelf/archive/master.tar.gz)" --restrict-eval) (! nix eval --raw "(fetchGit git://github.com/NixOS/patchelf.git)" --restrict-eval) + +ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix +[[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] +(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix) +(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT) +(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) +nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT -I . + +[[ $(nix eval --raw --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]] diff --git a/tests/run.sh b/tests/run.sh index 784d29183cf3..d1dbfd6bd4a6 100644 --- a/tests/run.sh +++ b/tests/run.sh @@ -6,24 +6,23 @@ clearCache nix run -f run.nix hello -c hello | grep 'Hello World' nix run -f run.nix hello -c hello NixOS | grep 'Hello NixOS' -if [[ $(uname) = Linux ]]; then +if ! canUseSandbox; then exit; fi - chmod -R u+w $TEST_ROOT/store0 || true - rm -rf $TEST_ROOT/store0 +chmod -R u+w $TEST_ROOT/store0 || true +rm -rf $TEST_ROOT/store0 - clearStore +clearStore - path=$(nix eval --raw -f run.nix hello) +path=$(nix eval --raw -f run.nix hello) - # Note: we need the sandbox paths to ensure that the shell is - # visible in the sandbox. - nix run --sandbox-build-dir /build-tmp \ - --sandbox-paths '/nix? /bin? /lib? /usr?' \ - --store $TEST_ROOT/store0 -f run.nix hello -c hello | grep 'Hello World' +# Note: we need the sandbox paths to ensure that the shell is +# visible in the sandbox. +nix run --sandbox-build-dir /build-tmp \ + --sandbox-paths '/nix? /bin? /lib? /lib64? /usr?' \ + --store $TEST_ROOT/store0 -f run.nix hello -c hello | grep 'Hello World' - path2=$(nix run --sandbox-paths '/nix? /bin? /lib? /usr?' --store $TEST_ROOT/store0 -f run.nix hello -c $SHELL -c 'type -p hello') +path2=$(nix run --sandbox-paths '/nix? /bin? /lib? /lib64? /usr?' --store $TEST_ROOT/store0 -f run.nix hello -c $SHELL -c 'type -p hello') - [[ $path/bin/hello = $path2 ]] +[[ $path/bin/hello = $path2 ]] - [[ -e $TEST_ROOT/store0/nix/store/$(basename $path)/bin/hello ]] -fi +[[ -e $TEST_ROOT/store0/nix/store/$(basename $path)/bin/hello ]] diff --git a/tests/setuid.nix b/tests/setuid.nix index c982d9cf0366..77e83c8d6c2c 100644 --- a/tests/setuid.nix +++ b/tests/setuid.nix @@ -1,8 +1,8 @@ # Verify that Linux builds cannot create setuid or setgid binaries. -{ system, nix }: +{ nixpkgs, system, nix }: -with import <nixpkgs/nixos/lib/testing.nix> { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; makeTest { diff --git a/tests/shell.shebang.sh b/tests/shell.shebang.sh index c8e55ca9b90c..f7132043de44 100755 --- a/tests/shell.shebang.sh +++ b/tests/shell.shebang.sh @@ -1,4 +1,4 @@ #! @ENV_PROG@ nix-shell -#! nix-shell -I nixpkgs=shell.nix --no-use-substitutes +#! nix-shell -I nixpkgs=shell.nix --no-substitute #! nix-shell --pure -i bash -p foo bar echo "$(foo) $(bar) $@" diff --git a/tests/user-envs.sh b/tests/user-envs.sh index c4192fdc59b2..ba63923113d8 100644 --- a/tests/user-envs.sh +++ b/tests/user-envs.sh @@ -24,6 +24,9 @@ rm -f $HOME/.nix-defexpr ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr nix-env -qa '*' --description | grep -q silly +# Query the system. +nix-env -qa '*' --system | grep -q $system + # Install "foo-1.0". nix-env -i foo-1.0 diff --git a/version b/version index 35d51f33b34f..415b19fc3623 100644 --- a/version +++ b/version @@ -1 +1 @@ -1.12 \ No newline at end of file +2.0 \ No newline at end of file |